xref: /dpdk/drivers/net/mlx5/mlx5_flow_hw.c (revision e9fd1ebf981f361844aea9ec94e17f4bda5e1479)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2022 NVIDIA Corporation & Affiliates
3  */
4 
5 #include <rte_flow.h>
6 #include <rte_flow_driver.h>
7 
8 #include <mlx5_malloc.h>
9 
10 #include "mlx5.h"
11 #include "mlx5_defs.h"
12 #include "mlx5_flow.h"
13 #include "mlx5_rx.h"
14 
15 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
16 #include "mlx5_hws_cnt.h"
17 
18 /** Fast path async flow API functions. */
19 static struct rte_flow_fp_ops mlx5_flow_hw_fp_ops;
20 
21 /* The maximum actions support in the flow. */
22 #define MLX5_HW_MAX_ACTS 16
23 
24 /*
25  * The default ipool threshold value indicates which per_core_cache
26  * value to set.
27  */
28 #define MLX5_HW_IPOOL_SIZE_THRESHOLD (1 << 19)
29 /* The default min local cache size. */
30 #define MLX5_HW_IPOOL_CACHE_MIN (1 << 9)
31 
32 /* Default push burst threshold. */
33 #define BURST_THR 32u
34 
35 /* Default queue to flush the flows. */
36 #define MLX5_DEFAULT_FLUSH_QUEUE 0
37 
38 /* Maximum number of rules in control flow tables. */
39 #define MLX5_HW_CTRL_FLOW_NB_RULES (4096)
40 
41 /* Lowest flow group usable by an application if group translation is done. */
42 #define MLX5_HW_LOWEST_USABLE_GROUP (1)
43 
44 /* Maximum group index usable by user applications for transfer flows. */
45 #define MLX5_HW_MAX_TRANSFER_GROUP (UINT32_MAX - 1)
46 
47 /* Maximum group index usable by user applications for egress flows. */
48 #define MLX5_HW_MAX_EGRESS_GROUP (UINT32_MAX - 1)
49 
50 /* Lowest priority for HW root table. */
51 #define MLX5_HW_LOWEST_PRIO_ROOT 15
52 
53 /* Lowest priority for HW non-root table. */
54 #define MLX5_HW_LOWEST_PRIO_NON_ROOT (UINT32_MAX)
55 
56 /* Priorities for Rx control flow rules. */
57 #define MLX5_HW_CTRL_RX_PRIO_L2 (MLX5_HW_LOWEST_PRIO_ROOT)
58 #define MLX5_HW_CTRL_RX_PRIO_L3 (MLX5_HW_LOWEST_PRIO_ROOT - 1)
59 #define MLX5_HW_CTRL_RX_PRIO_L4 (MLX5_HW_LOWEST_PRIO_ROOT - 2)
60 
61 #define MLX5_HW_VLAN_PUSH_TYPE_IDX 0
62 #define MLX5_HW_VLAN_PUSH_VID_IDX 1
63 #define MLX5_HW_VLAN_PUSH_PCP_IDX 2
64 
65 #define MLX5_MIRROR_MAX_CLONES_NUM 3
66 #define MLX5_MIRROR_MAX_SAMPLE_ACTIONS_LEN 4
67 
68 #define MLX5_HW_PORT_IS_PROXY(priv) \
69 	(!!((priv)->sh->esw_mode && (priv)->master))
70 
71 
72 struct mlx5_indlst_legacy {
73 	struct mlx5_indirect_list indirect;
74 	struct rte_flow_action_handle *handle;
75 	enum rte_flow_action_type legacy_type;
76 };
77 
78 #define MLX5_CONST_ENCAP_ITEM(encap_type, ptr) \
79 (((const struct encap_type *)(ptr))->definition)
80 
81 struct mlx5_multi_pattern_ctx {
82 	union {
83 		struct mlx5dr_action_reformat_header reformat_hdr;
84 		struct mlx5dr_action_mh_pattern mh_pattern;
85 	};
86 	union {
87 		/* action template auxiliary structures for object destruction */
88 		struct mlx5_hw_encap_decap_action *encap;
89 		struct mlx5_hw_modify_header_action *mhdr;
90 	};
91 	/* multi pattern action */
92 	struct mlx5dr_rule_action *rule_action;
93 };
94 
95 #define MLX5_MULTIPATTERN_ENCAP_NUM 4
96 
97 struct mlx5_tbl_multi_pattern_ctx {
98 	struct {
99 		uint32_t elements_num;
100 		struct mlx5_multi_pattern_ctx ctx[MLX5_HW_TBL_MAX_ACTION_TEMPLATE];
101 	} reformat[MLX5_MULTIPATTERN_ENCAP_NUM];
102 
103 	struct {
104 		uint32_t elements_num;
105 		struct mlx5_multi_pattern_ctx ctx[MLX5_HW_TBL_MAX_ACTION_TEMPLATE];
106 	} mh;
107 };
108 
109 #define MLX5_EMPTY_MULTI_PATTERN_CTX {{{0,}},}
110 
111 static int
112 mlx5_tbl_multi_pattern_process(struct rte_eth_dev *dev,
113 			       struct rte_flow_template_table *tbl,
114 			       struct mlx5_tbl_multi_pattern_ctx *mpat,
115 			       struct rte_flow_error *error);
116 
117 static __rte_always_inline int
118 mlx5_multi_pattern_reformat_to_index(enum mlx5dr_action_type type)
119 {
120 	switch (type) {
121 	case MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2:
122 		return 0;
123 	case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2:
124 		return 1;
125 	case MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
126 		return 2;
127 	case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3:
128 		return 3;
129 	default:
130 		break;
131 	}
132 	return -1;
133 }
134 
135 static __rte_always_inline enum mlx5dr_action_type
136 mlx5_multi_pattern_reformat_index_to_type(uint32_t ix)
137 {
138 	switch (ix) {
139 	case 0:
140 		return MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2;
141 	case 1:
142 		return MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
143 	case 2:
144 		return MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2;
145 	case 3:
146 		return MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3;
147 	default:
148 		break;
149 	}
150 	return MLX5DR_ACTION_TYP_MAX;
151 }
152 
153 static inline enum mlx5dr_table_type
154 get_mlx5dr_table_type(const struct rte_flow_attr *attr)
155 {
156 	enum mlx5dr_table_type type;
157 
158 	if (attr->transfer)
159 		type = MLX5DR_TABLE_TYPE_FDB;
160 	else if (attr->egress)
161 		type = MLX5DR_TABLE_TYPE_NIC_TX;
162 	else
163 		type = MLX5DR_TABLE_TYPE_NIC_RX;
164 	return type;
165 }
166 
167 struct mlx5_mirror_clone {
168 	enum rte_flow_action_type type;
169 	void *action_ctx;
170 };
171 
172 struct mlx5_mirror {
173 	struct mlx5_indirect_list indirect;
174 	uint32_t clones_num;
175 	struct mlx5dr_action *mirror_action;
176 	struct mlx5_mirror_clone clone[MLX5_MIRROR_MAX_CLONES_NUM];
177 };
178 
179 static int flow_hw_flush_all_ctrl_flows(struct rte_eth_dev *dev);
180 static int flow_hw_translate_group(struct rte_eth_dev *dev,
181 				   const struct mlx5_flow_template_table_cfg *cfg,
182 				   uint32_t group,
183 				   uint32_t *table_group,
184 				   struct rte_flow_error *error);
185 static __rte_always_inline int
186 flow_hw_set_vlan_vid_construct(struct rte_eth_dev *dev,
187 			       struct mlx5_hw_q_job *job,
188 			       struct mlx5_action_construct_data *act_data,
189 			       const struct mlx5_hw_actions *hw_acts,
190 			       const struct rte_flow_action *action);
191 static void
192 flow_hw_construct_quota(struct mlx5_priv *priv,
193 			struct mlx5dr_rule_action *rule_act, uint32_t qid);
194 
195 static __rte_always_inline uint32_t flow_hw_tx_tag_regc_mask(struct rte_eth_dev *dev);
196 static __rte_always_inline uint32_t flow_hw_tx_tag_regc_value(struct rte_eth_dev *dev);
197 
198 const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops;
199 
200 /* DR action flags with different table. */
201 static uint32_t mlx5_hw_act_flag[MLX5_HW_ACTION_FLAG_MAX]
202 				[MLX5DR_TABLE_TYPE_MAX] = {
203 	{
204 		MLX5DR_ACTION_FLAG_ROOT_RX,
205 		MLX5DR_ACTION_FLAG_ROOT_TX,
206 		MLX5DR_ACTION_FLAG_ROOT_FDB,
207 	},
208 	{
209 		MLX5DR_ACTION_FLAG_HWS_RX,
210 		MLX5DR_ACTION_FLAG_HWS_TX,
211 		MLX5DR_ACTION_FLAG_HWS_FDB,
212 	},
213 };
214 
215 /* Ethernet item spec for promiscuous mode. */
216 static const struct rte_flow_item_eth ctrl_rx_eth_promisc_spec = {
217 	.hdr.dst_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
218 	.hdr.src_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
219 	.hdr.ether_type = 0,
220 };
221 /* Ethernet item mask for promiscuous mode. */
222 static const struct rte_flow_item_eth ctrl_rx_eth_promisc_mask = {
223 	.hdr.dst_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
224 	.hdr.src_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
225 	.hdr.ether_type = 0,
226 };
227 
228 /* Ethernet item spec for all multicast mode. */
229 static const struct rte_flow_item_eth ctrl_rx_eth_mcast_spec = {
230 	.hdr.dst_addr.addr_bytes = "\x01\x00\x00\x00\x00\x00",
231 	.hdr.src_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
232 	.hdr.ether_type = 0,
233 };
234 /* Ethernet item mask for all multicast mode. */
235 static const struct rte_flow_item_eth ctrl_rx_eth_mcast_mask = {
236 	.hdr.dst_addr.addr_bytes = "\x01\x00\x00\x00\x00\x00",
237 	.hdr.src_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
238 	.hdr.ether_type = 0,
239 };
240 
241 /* Ethernet item spec for IPv4 multicast traffic. */
242 static const struct rte_flow_item_eth ctrl_rx_eth_ipv4_mcast_spec = {
243 	.hdr.dst_addr.addr_bytes = "\x01\x00\x5e\x00\x00\x00",
244 	.hdr.src_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
245 	.hdr.ether_type = 0,
246 };
247 /* Ethernet item mask for IPv4 multicast traffic. */
248 static const struct rte_flow_item_eth ctrl_rx_eth_ipv4_mcast_mask = {
249 	.hdr.dst_addr.addr_bytes = "\xff\xff\xff\x00\x00\x00",
250 	.hdr.src_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
251 	.hdr.ether_type = 0,
252 };
253 
254 /* Ethernet item spec for IPv6 multicast traffic. */
255 static const struct rte_flow_item_eth ctrl_rx_eth_ipv6_mcast_spec = {
256 	.hdr.dst_addr.addr_bytes = "\x33\x33\x00\x00\x00\x00",
257 	.hdr.src_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
258 	.hdr.ether_type = 0,
259 };
260 /* Ethernet item mask for IPv6 multicast traffic. */
261 static const struct rte_flow_item_eth ctrl_rx_eth_ipv6_mcast_mask = {
262 	.hdr.dst_addr.addr_bytes = "\xff\xff\x00\x00\x00\x00",
263 	.hdr.src_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
264 	.hdr.ether_type = 0,
265 };
266 
267 /* Ethernet item mask for unicast traffic. */
268 static const struct rte_flow_item_eth ctrl_rx_eth_dmac_mask = {
269 	.hdr.dst_addr.addr_bytes = "\xff\xff\xff\xff\xff\xff",
270 	.hdr.src_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
271 	.hdr.ether_type = 0,
272 };
273 
274 /* Ethernet item spec for broadcast. */
275 static const struct rte_flow_item_eth ctrl_rx_eth_bcast_spec = {
276 	.hdr.dst_addr.addr_bytes = "\xff\xff\xff\xff\xff\xff",
277 	.hdr.src_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
278 	.hdr.ether_type = 0,
279 };
280 
281 static __rte_always_inline struct mlx5_hw_q_job *
282 flow_hw_job_get(struct mlx5_priv *priv, uint32_t queue)
283 {
284 	MLX5_ASSERT(priv->hw_q[queue].job_idx <= priv->hw_q[queue].size);
285 	return priv->hw_q[queue].job_idx ?
286 	       priv->hw_q[queue].job[--priv->hw_q[queue].job_idx] : NULL;
287 }
288 
289 static __rte_always_inline void
290 flow_hw_job_put(struct mlx5_priv *priv, struct mlx5_hw_q_job *job, uint32_t queue)
291 {
292 	MLX5_ASSERT(priv->hw_q[queue].job_idx < priv->hw_q[queue].size);
293 	priv->hw_q[queue].job[priv->hw_q[queue].job_idx++] = job;
294 }
295 
296 static inline enum mlx5dr_matcher_insert_mode
297 flow_hw_matcher_insert_mode_get(enum rte_flow_table_insertion_type insert_type)
298 {
299 	if (insert_type == RTE_FLOW_TABLE_INSERTION_TYPE_PATTERN)
300 		return MLX5DR_MATCHER_INSERT_BY_HASH;
301 	else
302 		return MLX5DR_MATCHER_INSERT_BY_INDEX;
303 }
304 
305 static inline enum mlx5dr_matcher_distribute_mode
306 flow_hw_matcher_distribute_mode_get(enum rte_flow_table_hash_func hash_func)
307 {
308 	if (hash_func == RTE_FLOW_TABLE_HASH_FUNC_LINEAR)
309 		return MLX5DR_MATCHER_DISTRIBUTE_BY_LINEAR;
310 	else
311 		return MLX5DR_MATCHER_DISTRIBUTE_BY_HASH;
312 }
313 
314 /**
315  * Set the hash fields according to the @p rss_desc information.
316  *
317  * @param[in] rss_desc
318  *   Pointer to the mlx5_flow_rss_desc.
319  * @param[out] hash_fields
320  *   Pointer to the RSS hash fields.
321  */
322 static void
323 flow_hw_hashfields_set(struct mlx5_flow_rss_desc *rss_desc,
324 		       uint64_t *hash_fields)
325 {
326 	uint64_t fields = 0;
327 	int rss_inner = 0;
328 	uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
329 
330 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
331 	if (rss_desc->level >= 2)
332 		rss_inner = 1;
333 #endif
334 	if (rss_types & MLX5_IPV4_LAYER_TYPES) {
335 		if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
336 			fields |= IBV_RX_HASH_SRC_IPV4;
337 		else if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
338 			fields |= IBV_RX_HASH_DST_IPV4;
339 		else
340 			fields |= MLX5_IPV4_IBV_RX_HASH;
341 	} else if (rss_types & MLX5_IPV6_LAYER_TYPES) {
342 		if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
343 			fields |= IBV_RX_HASH_SRC_IPV6;
344 		else if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
345 			fields |= IBV_RX_HASH_DST_IPV6;
346 		else
347 			fields |= MLX5_IPV6_IBV_RX_HASH;
348 	}
349 	if (rss_types & RTE_ETH_RSS_UDP) {
350 		if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
351 			fields |= IBV_RX_HASH_SRC_PORT_UDP;
352 		else if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
353 			fields |= IBV_RX_HASH_DST_PORT_UDP;
354 		else
355 			fields |= MLX5_UDP_IBV_RX_HASH;
356 	} else if (rss_types & RTE_ETH_RSS_TCP) {
357 		if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
358 			fields |= IBV_RX_HASH_SRC_PORT_TCP;
359 		else if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
360 			fields |= IBV_RX_HASH_DST_PORT_TCP;
361 		else
362 			fields |= MLX5_TCP_IBV_RX_HASH;
363 	}
364 	if (rss_types & RTE_ETH_RSS_ESP)
365 		fields |= IBV_RX_HASH_IPSEC_SPI;
366 	if (rss_inner)
367 		fields |= IBV_RX_HASH_INNER;
368 	*hash_fields = fields;
369 }
370 
371 /**
372  * Generate the matching pattern item flags.
373  *
374  * @param[in] items
375  *   Pointer to the list of items.
376  *
377  * @return
378  *   Matching item flags. RSS hash field function
379  *   silently ignores the flags which are unsupported.
380  */
381 static uint64_t
382 flow_hw_matching_item_flags_get(const struct rte_flow_item items[])
383 {
384 	uint64_t item_flags = 0;
385 	uint64_t last_item = 0;
386 
387 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
388 		int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
389 		int item_type = items->type;
390 
391 		switch (item_type) {
392 		case RTE_FLOW_ITEM_TYPE_IPV4:
393 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
394 					     MLX5_FLOW_LAYER_OUTER_L3_IPV4;
395 			break;
396 		case RTE_FLOW_ITEM_TYPE_IPV6:
397 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
398 					     MLX5_FLOW_LAYER_OUTER_L3_IPV6;
399 			break;
400 		case RTE_FLOW_ITEM_TYPE_TCP:
401 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
402 					     MLX5_FLOW_LAYER_OUTER_L4_TCP;
403 			break;
404 		case RTE_FLOW_ITEM_TYPE_UDP:
405 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
406 					     MLX5_FLOW_LAYER_OUTER_L4_UDP;
407 			break;
408 		case RTE_FLOW_ITEM_TYPE_IPV6_ROUTING_EXT:
409 			last_item = tunnel ? MLX5_FLOW_ITEM_INNER_IPV6_ROUTING_EXT :
410 					     MLX5_FLOW_ITEM_OUTER_IPV6_ROUTING_EXT;
411 			break;
412 		case RTE_FLOW_ITEM_TYPE_GRE:
413 			last_item = MLX5_FLOW_LAYER_GRE;
414 			break;
415 		case RTE_FLOW_ITEM_TYPE_NVGRE:
416 			last_item = MLX5_FLOW_LAYER_GRE;
417 			break;
418 		case RTE_FLOW_ITEM_TYPE_VXLAN:
419 			last_item = MLX5_FLOW_LAYER_VXLAN;
420 			break;
421 		case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
422 			last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
423 			break;
424 		case RTE_FLOW_ITEM_TYPE_GENEVE:
425 			last_item = MLX5_FLOW_LAYER_GENEVE;
426 			break;
427 		case RTE_FLOW_ITEM_TYPE_MPLS:
428 			last_item = MLX5_FLOW_LAYER_MPLS;
429 			break;
430 		case RTE_FLOW_ITEM_TYPE_GTP:
431 			last_item = MLX5_FLOW_LAYER_GTP;
432 			break;
433 		case RTE_FLOW_ITEM_TYPE_COMPARE:
434 			last_item = MLX5_FLOW_ITEM_COMPARE;
435 			break;
436 		default:
437 			break;
438 		}
439 		item_flags |= last_item;
440 	}
441 	return item_flags;
442 }
443 
444 /**
445  * Register destination table DR jump action.
446  *
447  * @param[in] dev
448  *   Pointer to the rte_eth_dev structure.
449  * @param[in] table_attr
450  *   Pointer to the flow attributes.
451  * @param[in] dest_group
452  *   The destination group ID.
453  * @param[out] error
454  *   Pointer to error structure.
455  *
456  * @return
457  *    Table on success, NULL otherwise and rte_errno is set.
458  */
459 static struct mlx5_hw_jump_action *
460 flow_hw_jump_action_register(struct rte_eth_dev *dev,
461 			     const struct mlx5_flow_template_table_cfg *cfg,
462 			     uint32_t dest_group,
463 			     struct rte_flow_error *error)
464 {
465 	struct mlx5_priv *priv = dev->data->dev_private;
466 	struct rte_flow_attr jattr = cfg->attr.flow_attr;
467 	struct mlx5_flow_group *grp;
468 	struct mlx5_flow_cb_ctx ctx = {
469 		.dev = dev,
470 		.error = error,
471 		.data = &jattr,
472 	};
473 	struct mlx5_list_entry *ge;
474 	uint32_t target_group;
475 
476 	target_group = dest_group;
477 	if (flow_hw_translate_group(dev, cfg, dest_group, &target_group, error))
478 		return NULL;
479 	jattr.group = target_group;
480 	ge = mlx5_hlist_register(priv->sh->flow_tbls, target_group, &ctx);
481 	if (!ge)
482 		return NULL;
483 	grp = container_of(ge, struct mlx5_flow_group, entry);
484 	return &grp->jump;
485 }
486 
487 /**
488  * Release jump action.
489  *
490  * @param[in] dev
491  *   Pointer to the rte_eth_dev structure.
492  * @param[in] jump
493  *   Pointer to the jump action.
494  */
495 
496 static void
497 flow_hw_jump_release(struct rte_eth_dev *dev, struct mlx5_hw_jump_action *jump)
498 {
499 	struct mlx5_priv *priv = dev->data->dev_private;
500 	struct mlx5_flow_group *grp;
501 
502 	grp = container_of
503 		(jump, struct mlx5_flow_group, jump);
504 	mlx5_hlist_unregister(priv->sh->flow_tbls, &grp->entry);
505 }
506 
507 /**
508  * Register queue/RSS action.
509  *
510  * @param[in] dev
511  *   Pointer to the rte_eth_dev structure.
512  * @param[in] hws_flags
513  *   DR action flags.
514  * @param[in] action
515  *   rte flow action.
516  *
517  * @return
518  *    Table on success, NULL otherwise and rte_errno is set.
519  */
520 static inline struct mlx5_hrxq*
521 flow_hw_tir_action_register(struct rte_eth_dev *dev,
522 			    uint32_t hws_flags,
523 			    const struct rte_flow_action *action)
524 {
525 	struct mlx5_flow_rss_desc rss_desc = {
526 		.hws_flags = hws_flags,
527 	};
528 	struct mlx5_hrxq *hrxq;
529 
530 	if (action->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
531 		const struct rte_flow_action_queue *queue = action->conf;
532 
533 		rss_desc.const_q = &queue->index;
534 		rss_desc.queue_num = 1;
535 	} else {
536 		const struct rte_flow_action_rss *rss = action->conf;
537 
538 		rss_desc.queue_num = rss->queue_num;
539 		rss_desc.const_q = rss->queue;
540 		memcpy(rss_desc.key,
541 		       !rss->key ? rss_hash_default_key : rss->key,
542 		       MLX5_RSS_HASH_KEY_LEN);
543 		rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
544 		rss_desc.types = !rss->types ? RTE_ETH_RSS_IP : rss->types;
545 		rss_desc.symmetric_hash_function = MLX5_RSS_IS_SYMM(rss->func);
546 		flow_hw_hashfields_set(&rss_desc, &rss_desc.hash_fields);
547 		flow_dv_action_rss_l34_hash_adjust(rss->types,
548 						   &rss_desc.hash_fields);
549 		if (rss->level > 1) {
550 			rss_desc.hash_fields |= IBV_RX_HASH_INNER;
551 			rss_desc.tunnel = 1;
552 		}
553 	}
554 	hrxq = mlx5_hrxq_get(dev, &rss_desc);
555 	return hrxq;
556 }
557 
558 static __rte_always_inline int
559 flow_hw_ct_compile(struct rte_eth_dev *dev,
560 		   uint32_t queue, uint32_t idx,
561 		   struct mlx5dr_rule_action *rule_act)
562 {
563 	struct mlx5_priv *priv = dev->data->dev_private;
564 	struct mlx5_aso_ct_action *ct;
565 
566 	ct = mlx5_ipool_get(priv->hws_ctpool->cts, MLX5_ACTION_CTX_CT_GET_IDX(idx));
567 	if (!ct || mlx5_aso_ct_available(priv->sh, queue, ct))
568 		return -1;
569 	rule_act->action = priv->hws_ctpool->dr_action;
570 	rule_act->aso_ct.offset = ct->offset;
571 	rule_act->aso_ct.direction = ct->is_original ?
572 		MLX5DR_ACTION_ASO_CT_DIRECTION_INITIATOR :
573 		MLX5DR_ACTION_ASO_CT_DIRECTION_RESPONDER;
574 	return 0;
575 }
576 
577 static void
578 flow_hw_template_destroy_reformat_action(struct mlx5_hw_encap_decap_action *encap_decap)
579 {
580 	if (encap_decap->multi_pattern) {
581 		uint32_t refcnt = __atomic_sub_fetch(encap_decap->multi_pattern_refcnt,
582 						     1, __ATOMIC_RELAXED);
583 		if (refcnt)
584 			return;
585 		mlx5_free((void *)(uintptr_t)encap_decap->multi_pattern_refcnt);
586 	}
587 	if (encap_decap->action)
588 		mlx5dr_action_destroy(encap_decap->action);
589 }
590 
591 static void
592 flow_hw_template_destroy_mhdr_action(struct mlx5_hw_modify_header_action *mhdr)
593 {
594 	if (mhdr->multi_pattern) {
595 		uint32_t refcnt = __atomic_sub_fetch(mhdr->multi_pattern_refcnt,
596 						     1, __ATOMIC_RELAXED);
597 		if (refcnt)
598 			return;
599 		mlx5_free((void *)(uintptr_t)mhdr->multi_pattern_refcnt);
600 	}
601 	if (mhdr->action)
602 		mlx5dr_action_destroy(mhdr->action);
603 }
604 
605 /**
606  * Destroy DR actions created by action template.
607  *
608  * For DR actions created during table creation's action translate.
609  * Need to destroy the DR action when destroying the table.
610  *
611  * @param[in] dev
612  *   Pointer to the rte_eth_dev structure.
613  * @param[in] acts
614  *   Pointer to the template HW steering DR actions.
615  */
616 static void
617 __flow_hw_action_template_destroy(struct rte_eth_dev *dev,
618 				 struct mlx5_hw_actions *acts)
619 {
620 	struct mlx5_priv *priv = dev->data->dev_private;
621 	struct mlx5_action_construct_data *data;
622 
623 	while (!LIST_EMPTY(&acts->act_list)) {
624 		data = LIST_FIRST(&acts->act_list);
625 		LIST_REMOVE(data, next);
626 		mlx5_ipool_free(priv->acts_ipool, data->idx);
627 	}
628 
629 	if (acts->mark)
630 		if (!(__atomic_fetch_sub(&priv->hws_mark_refcnt, 1, __ATOMIC_RELAXED) - 1))
631 			flow_hw_rxq_flag_set(dev, false);
632 
633 	if (acts->jump) {
634 		struct mlx5_flow_group *grp;
635 
636 		grp = container_of
637 			(acts->jump, struct mlx5_flow_group, jump);
638 		mlx5_hlist_unregister(priv->sh->flow_tbls, &grp->entry);
639 		acts->jump = NULL;
640 	}
641 	if (acts->tir) {
642 		mlx5_hrxq_release(dev, acts->tir->idx);
643 		acts->tir = NULL;
644 	}
645 	if (acts->encap_decap) {
646 		flow_hw_template_destroy_reformat_action(acts->encap_decap);
647 		mlx5_free(acts->encap_decap);
648 		acts->encap_decap = NULL;
649 	}
650 	if (acts->push_remove) {
651 		if (acts->push_remove->action)
652 			mlx5dr_action_destroy(acts->push_remove->action);
653 		mlx5_free(acts->push_remove);
654 		acts->push_remove = NULL;
655 	}
656 	if (acts->mhdr) {
657 		flow_hw_template_destroy_mhdr_action(acts->mhdr);
658 		mlx5_free(acts->mhdr);
659 		acts->mhdr = NULL;
660 	}
661 	if (mlx5_hws_cnt_id_valid(acts->cnt_id)) {
662 		mlx5_hws_cnt_shared_put(priv->hws_cpool, &acts->cnt_id);
663 		acts->cnt_id = 0;
664 	}
665 	if (acts->mtr_id) {
666 		mlx5_ipool_free(priv->hws_mpool->idx_pool, acts->mtr_id);
667 		acts->mtr_id = 0;
668 	}
669 }
670 
671 /**
672  * Append dynamic action to the dynamic action list.
673  *
674  * @param[in] priv
675  *   Pointer to the port private data structure.
676  * @param[in] acts
677  *   Pointer to the template HW steering DR actions.
678  * @param[in] type
679  *   Action type.
680  * @param[in] action_src
681  *   Offset of source rte flow action.
682  * @param[in] action_dst
683  *   Offset of destination DR action.
684  *
685  * @return
686  *    0 on success, negative value otherwise and rte_errno is set.
687  */
688 static __rte_always_inline struct mlx5_action_construct_data *
689 __flow_hw_act_data_alloc(struct mlx5_priv *priv,
690 			 enum rte_flow_action_type type,
691 			 uint16_t action_src,
692 			 uint16_t action_dst)
693 {
694 	struct mlx5_action_construct_data *act_data;
695 	uint32_t idx = 0;
696 
697 	act_data = mlx5_ipool_zmalloc(priv->acts_ipool, &idx);
698 	if (!act_data)
699 		return NULL;
700 	act_data->idx = idx;
701 	act_data->type = type;
702 	act_data->action_src = action_src;
703 	act_data->action_dst = action_dst;
704 	return act_data;
705 }
706 
707 /**
708  * Append dynamic action to the dynamic action list.
709  *
710  * @param[in] priv
711  *   Pointer to the port private data structure.
712  * @param[in] acts
713  *   Pointer to the template HW steering DR actions.
714  * @param[in] type
715  *   Action type.
716  * @param[in] action_src
717  *   Offset of source rte flow action.
718  * @param[in] action_dst
719  *   Offset of destination DR action.
720  *
721  * @return
722  *    0 on success, negative value otherwise and rte_errno is set.
723  */
724 static __rte_always_inline int
725 __flow_hw_act_data_general_append(struct mlx5_priv *priv,
726 				  struct mlx5_hw_actions *acts,
727 				  enum rte_flow_action_type type,
728 				  uint16_t action_src,
729 				  uint16_t action_dst)
730 {
731 	struct mlx5_action_construct_data *act_data;
732 
733 	act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
734 	if (!act_data)
735 		return -1;
736 	LIST_INSERT_HEAD(&acts->act_list, act_data, next);
737 	return 0;
738 }
739 
740 static __rte_always_inline int
741 flow_hw_act_data_indirect_list_append(struct mlx5_priv *priv,
742 				      struct mlx5_hw_actions *acts,
743 				      enum rte_flow_action_type type,
744 				      uint16_t action_src, uint16_t action_dst,
745 				      indirect_list_callback_t cb)
746 {
747 	struct mlx5_action_construct_data *act_data;
748 
749 	act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
750 	if (!act_data)
751 		return -1;
752 	act_data->indirect_list_cb = cb;
753 	LIST_INSERT_HEAD(&acts->act_list, act_data, next);
754 	return 0;
755 }
756 /**
757  * Append dynamic encap action to the dynamic action list.
758  *
759  * @param[in] priv
760  *   Pointer to the port private data structure.
761  * @param[in] acts
762  *   Pointer to the template HW steering DR actions.
763  * @param[in] type
764  *   Action type.
765  * @param[in] action_src
766  *   Offset of source rte flow action.
767  * @param[in] action_dst
768  *   Offset of destination DR action.
769  * @param[in] len
770  *   Length of the data to be updated.
771  *
772  * @return
773  *    0 on success, negative value otherwise and rte_errno is set.
774  */
775 static __rte_always_inline int
776 __flow_hw_act_data_encap_append(struct mlx5_priv *priv,
777 				struct mlx5_hw_actions *acts,
778 				enum rte_flow_action_type type,
779 				uint16_t action_src,
780 				uint16_t action_dst,
781 				uint16_t len)
782 {
783 	struct mlx5_action_construct_data *act_data;
784 
785 	act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
786 	if (!act_data)
787 		return -1;
788 	act_data->encap.len = len;
789 	LIST_INSERT_HEAD(&acts->act_list, act_data, next);
790 	return 0;
791 }
792 
793 /**
794  * Append dynamic push action to the dynamic action list.
795  *
796  * @param[in] dev
797  *   Pointer to the port.
798  * @param[in] acts
799  *   Pointer to the template HW steering DR actions.
800  * @param[in] type
801  *   Action type.
802  * @param[in] action_src
803  *   Offset of source rte flow action.
804  * @param[in] action_dst
805  *   Offset of destination DR action.
806  * @param[in] len
807  *   Length of the data to be updated.
808  *
809  * @return
810  *    Data pointer on success, NULL otherwise and rte_errno is set.
811  */
812 static __rte_always_inline void *
813 __flow_hw_act_data_push_append(struct rte_eth_dev *dev,
814 			       struct mlx5_hw_actions *acts,
815 			       enum rte_flow_action_type type,
816 			       uint16_t action_src,
817 			       uint16_t action_dst,
818 			       uint16_t len)
819 {
820 	struct mlx5_action_construct_data *act_data;
821 	struct mlx5_priv *priv = dev->data->dev_private;
822 
823 	act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
824 	if (!act_data)
825 		return NULL;
826 	act_data->ipv6_ext.len = len;
827 	LIST_INSERT_HEAD(&acts->act_list, act_data, next);
828 	return act_data;
829 }
830 
831 static __rte_always_inline int
832 __flow_hw_act_data_hdr_modify_append(struct mlx5_priv *priv,
833 				     struct mlx5_hw_actions *acts,
834 				     enum rte_flow_action_type type,
835 				     uint16_t action_src,
836 				     uint16_t action_dst,
837 				     uint16_t mhdr_cmds_off,
838 				     uint16_t mhdr_cmds_end,
839 				     bool shared,
840 				     struct field_modify_info *field,
841 				     struct field_modify_info *dcopy,
842 				     uint32_t *mask)
843 {
844 	struct mlx5_action_construct_data *act_data;
845 
846 	act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
847 	if (!act_data)
848 		return -1;
849 	act_data->modify_header.mhdr_cmds_off = mhdr_cmds_off;
850 	act_data->modify_header.mhdr_cmds_end = mhdr_cmds_end;
851 	act_data->modify_header.shared = shared;
852 	rte_memcpy(act_data->modify_header.field, field,
853 		   sizeof(*field) * MLX5_ACT_MAX_MOD_FIELDS);
854 	rte_memcpy(act_data->modify_header.dcopy, dcopy,
855 		   sizeof(*dcopy) * MLX5_ACT_MAX_MOD_FIELDS);
856 	rte_memcpy(act_data->modify_header.mask, mask,
857 		   sizeof(*mask) * MLX5_ACT_MAX_MOD_FIELDS);
858 	LIST_INSERT_HEAD(&acts->act_list, act_data, next);
859 	return 0;
860 }
861 
862 /**
863  * Append shared RSS action to the dynamic action list.
864  *
865  * @param[in] priv
866  *   Pointer to the port private data structure.
867  * @param[in] acts
868  *   Pointer to the template HW steering DR actions.
869  * @param[in] type
870  *   Action type.
871  * @param[in] action_src
872  *   Offset of source rte flow action.
873  * @param[in] action_dst
874  *   Offset of destination DR action.
875  * @param[in] idx
876  *   Shared RSS index.
877  * @param[in] rss
878  *   Pointer to the shared RSS info.
879  *
880  * @return
881  *    0 on success, negative value otherwise and rte_errno is set.
882  */
883 static __rte_always_inline int
884 __flow_hw_act_data_shared_rss_append(struct mlx5_priv *priv,
885 				     struct mlx5_hw_actions *acts,
886 				     enum rte_flow_action_type type,
887 				     uint16_t action_src,
888 				     uint16_t action_dst,
889 				     uint32_t idx,
890 				     struct mlx5_shared_action_rss *rss)
891 {
892 	struct mlx5_action_construct_data *act_data;
893 
894 	act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
895 	if (!act_data)
896 		return -1;
897 	act_data->shared_rss.level = rss->origin.level;
898 	act_data->shared_rss.types = !rss->origin.types ? RTE_ETH_RSS_IP :
899 				     rss->origin.types;
900 	act_data->shared_rss.idx = idx;
901 	act_data->shared_rss.symmetric_hash_function =
902 		MLX5_RSS_IS_SYMM(rss->origin.func);
903 	LIST_INSERT_HEAD(&acts->act_list, act_data, next);
904 	return 0;
905 }
906 
907 /**
908  * Append shared counter action to the dynamic action list.
909  *
910  * @param[in] priv
911  *   Pointer to the port private data structure.
912  * @param[in] acts
913  *   Pointer to the template HW steering DR actions.
914  * @param[in] type
915  *   Action type.
916  * @param[in] action_src
917  *   Offset of source rte flow action.
918  * @param[in] action_dst
919  *   Offset of destination DR action.
920  * @param[in] cnt_id
921  *   Shared counter id.
922  *
923  * @return
924  *    0 on success, negative value otherwise and rte_errno is set.
925  */
926 static __rte_always_inline int
927 __flow_hw_act_data_shared_cnt_append(struct mlx5_priv *priv,
928 				     struct mlx5_hw_actions *acts,
929 				     enum rte_flow_action_type type,
930 				     uint16_t action_src,
931 				     uint16_t action_dst,
932 				     cnt_id_t cnt_id)
933 {
934 	struct mlx5_action_construct_data *act_data;
935 
936 	act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
937 	if (!act_data)
938 		return -1;
939 	act_data->type = type;
940 	act_data->shared_counter.id = cnt_id;
941 	LIST_INSERT_HEAD(&acts->act_list, act_data, next);
942 	return 0;
943 }
944 
945 /**
946  * Append shared meter_mark action to the dynamic action list.
947  *
948  * @param[in] priv
949  *   Pointer to the port private data structure.
950  * @param[in] acts
951  *   Pointer to the template HW steering DR actions.
952  * @param[in] type
953  *   Action type.
954  * @param[in] action_src
955  *   Offset of source rte flow action.
956  * @param[in] action_dst
957  *   Offset of destination DR action.
958  * @param[in] mtr_id
959  *   Shared meter id.
960  *
961  * @return
962  *    0 on success, negative value otherwise and rte_errno is set.
963  */
964 static __rte_always_inline int
965 __flow_hw_act_data_shared_mtr_append(struct mlx5_priv *priv,
966 				     struct mlx5_hw_actions *acts,
967 				     enum rte_flow_action_type type,
968 				     uint16_t action_src,
969 				     uint16_t action_dst,
970 				     cnt_id_t mtr_id)
971 {	struct mlx5_action_construct_data *act_data;
972 
973 	act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
974 	if (!act_data)
975 		return -1;
976 	act_data->type = type;
977 	act_data->shared_meter.id = mtr_id;
978 	LIST_INSERT_HEAD(&acts->act_list, act_data, next);
979 	return 0;
980 }
981 
982 /**
983  * Translate shared indirect action.
984  *
985  * @param[in] dev
986  *   Pointer to the rte_eth_dev data structure.
987  * @param[in] action
988  *   Pointer to the shared indirect rte_flow action.
989  * @param[in] acts
990  *   Pointer to the template HW steering DR actions.
991  * @param[in] action_src
992  *   Offset of source rte flow action.
993  * @param[in] action_dst
994  *   Offset of destination DR action.
995  *
996  * @return
997  *    0 on success, negative value otherwise and rte_errno is set.
998  */
999 static __rte_always_inline int
1000 flow_hw_shared_action_translate(struct rte_eth_dev *dev,
1001 				const struct rte_flow_action *action,
1002 				struct mlx5_hw_actions *acts,
1003 				uint16_t action_src,
1004 				uint16_t action_dst)
1005 {
1006 	struct mlx5_priv *priv = dev->data->dev_private;
1007 	struct mlx5_shared_action_rss *shared_rss;
1008 	uint32_t act_idx = (uint32_t)(uintptr_t)action->conf;
1009 	uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
1010 	uint32_t idx = act_idx &
1011 		       ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
1012 
1013 	switch (type) {
1014 	case MLX5_INDIRECT_ACTION_TYPE_RSS:
1015 		shared_rss = mlx5_ipool_get
1016 		  (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
1017 		if (!shared_rss || __flow_hw_act_data_shared_rss_append
1018 		    (priv, acts,
1019 		    (enum rte_flow_action_type)MLX5_RTE_FLOW_ACTION_TYPE_RSS,
1020 		    action_src, action_dst, idx, shared_rss))
1021 			return -1;
1022 		break;
1023 	case MLX5_INDIRECT_ACTION_TYPE_COUNT:
1024 		if (__flow_hw_act_data_shared_cnt_append(priv, acts,
1025 			(enum rte_flow_action_type)
1026 			MLX5_RTE_FLOW_ACTION_TYPE_COUNT,
1027 			action_src, action_dst, act_idx))
1028 			return -1;
1029 		break;
1030 	case MLX5_INDIRECT_ACTION_TYPE_AGE:
1031 		/* Not supported, prevent by validate function. */
1032 		MLX5_ASSERT(0);
1033 		break;
1034 	case MLX5_INDIRECT_ACTION_TYPE_CT:
1035 		if (flow_hw_ct_compile(dev, MLX5_HW_INV_QUEUE,
1036 				       idx, &acts->rule_acts[action_dst]))
1037 			return -1;
1038 		break;
1039 	case MLX5_INDIRECT_ACTION_TYPE_METER_MARK:
1040 		if (__flow_hw_act_data_shared_mtr_append(priv, acts,
1041 			(enum rte_flow_action_type)
1042 			MLX5_RTE_FLOW_ACTION_TYPE_METER_MARK,
1043 			action_src, action_dst, idx))
1044 			return -1;
1045 		break;
1046 	case MLX5_INDIRECT_ACTION_TYPE_QUOTA:
1047 		flow_hw_construct_quota(priv, &acts->rule_acts[action_dst], idx);
1048 		break;
1049 	default:
1050 		DRV_LOG(WARNING, "Unsupported shared action type:%d", type);
1051 		break;
1052 	}
1053 	return 0;
1054 }
1055 
1056 static __rte_always_inline bool
1057 flow_hw_action_modify_field_is_shared(const struct rte_flow_action *action,
1058 				      const struct rte_flow_action *mask)
1059 {
1060 	const struct rte_flow_action_modify_field *v = action->conf;
1061 	const struct rte_flow_action_modify_field *m = mask->conf;
1062 
1063 	if (v->src.field == RTE_FLOW_FIELD_VALUE) {
1064 		uint32_t j;
1065 
1066 		for (j = 0; j < RTE_DIM(m->src.value); ++j) {
1067 			/*
1068 			 * Immediate value is considered to be masked
1069 			 * (and thus shared by all flow rules), if mask
1070 			 * is non-zero. Partial mask over immediate value
1071 			 * is not allowed.
1072 			 */
1073 			if (m->src.value[j])
1074 				return true;
1075 		}
1076 		return false;
1077 	}
1078 	if (v->src.field == RTE_FLOW_FIELD_POINTER)
1079 		return m->src.pvalue != NULL;
1080 	/*
1081 	 * Source field types other than VALUE and
1082 	 * POINTER are always shared.
1083 	 */
1084 	return true;
1085 }
1086 
1087 static __rte_always_inline bool
1088 flow_hw_should_insert_nop(const struct mlx5_hw_modify_header_action *mhdr,
1089 			  const struct mlx5_modification_cmd *cmd)
1090 {
1091 	struct mlx5_modification_cmd last_cmd = { { 0 } };
1092 	struct mlx5_modification_cmd new_cmd = { { 0 } };
1093 	const uint32_t cmds_num = mhdr->mhdr_cmds_num;
1094 	unsigned int last_type;
1095 	bool should_insert = false;
1096 
1097 	if (cmds_num == 0)
1098 		return false;
1099 	last_cmd = *(&mhdr->mhdr_cmds[cmds_num - 1]);
1100 	last_cmd.data0 = rte_be_to_cpu_32(last_cmd.data0);
1101 	last_cmd.data1 = rte_be_to_cpu_32(last_cmd.data1);
1102 	last_type = last_cmd.action_type;
1103 	new_cmd = *cmd;
1104 	new_cmd.data0 = rte_be_to_cpu_32(new_cmd.data0);
1105 	new_cmd.data1 = rte_be_to_cpu_32(new_cmd.data1);
1106 	switch (new_cmd.action_type) {
1107 	case MLX5_MODIFICATION_TYPE_SET:
1108 	case MLX5_MODIFICATION_TYPE_ADD:
1109 		if (last_type == MLX5_MODIFICATION_TYPE_SET ||
1110 		    last_type == MLX5_MODIFICATION_TYPE_ADD)
1111 			should_insert = new_cmd.field == last_cmd.field;
1112 		else if (last_type == MLX5_MODIFICATION_TYPE_COPY ||
1113 			 last_type == MLX5_MODIFICATION_TYPE_ADD_FIELD)
1114 			should_insert = new_cmd.field == last_cmd.dst_field;
1115 		else if (last_type == MLX5_MODIFICATION_TYPE_NOP)
1116 			should_insert = false;
1117 		else
1118 			MLX5_ASSERT(false); /* Other types are not supported. */
1119 		break;
1120 	case MLX5_MODIFICATION_TYPE_COPY:
1121 	case MLX5_MODIFICATION_TYPE_ADD_FIELD:
1122 		if (last_type == MLX5_MODIFICATION_TYPE_SET ||
1123 		    last_type == MLX5_MODIFICATION_TYPE_ADD)
1124 			should_insert = (new_cmd.field == last_cmd.field ||
1125 					 new_cmd.dst_field == last_cmd.field);
1126 		else if (last_type == MLX5_MODIFICATION_TYPE_COPY ||
1127 			 last_type == MLX5_MODIFICATION_TYPE_ADD_FIELD)
1128 			should_insert = (new_cmd.field == last_cmd.dst_field ||
1129 					 new_cmd.dst_field == last_cmd.dst_field);
1130 		else if (last_type == MLX5_MODIFICATION_TYPE_NOP)
1131 			should_insert = false;
1132 		else
1133 			MLX5_ASSERT(false); /* Other types are not supported. */
1134 		break;
1135 	default:
1136 		/* Other action types should be rejected on AT validation. */
1137 		MLX5_ASSERT(false);
1138 		break;
1139 	}
1140 	return should_insert;
1141 }
1142 
1143 static __rte_always_inline int
1144 flow_hw_mhdr_cmd_nop_append(struct mlx5_hw_modify_header_action *mhdr)
1145 {
1146 	struct mlx5_modification_cmd *nop;
1147 	uint32_t num = mhdr->mhdr_cmds_num;
1148 
1149 	if (num + 1 >= MLX5_MHDR_MAX_CMD)
1150 		return -ENOMEM;
1151 	nop = mhdr->mhdr_cmds + num;
1152 	nop->data0 = 0;
1153 	nop->action_type = MLX5_MODIFICATION_TYPE_NOP;
1154 	nop->data0 = rte_cpu_to_be_32(nop->data0);
1155 	nop->data1 = 0;
1156 	mhdr->mhdr_cmds_num = num + 1;
1157 	return 0;
1158 }
1159 
1160 static __rte_always_inline int
1161 flow_hw_mhdr_cmd_append(struct mlx5_hw_modify_header_action *mhdr,
1162 			struct mlx5_modification_cmd *cmd)
1163 {
1164 	uint32_t num = mhdr->mhdr_cmds_num;
1165 
1166 	if (num + 1 >= MLX5_MHDR_MAX_CMD)
1167 		return -ENOMEM;
1168 	mhdr->mhdr_cmds[num] = *cmd;
1169 	mhdr->mhdr_cmds_num = num + 1;
1170 	return 0;
1171 }
1172 
1173 static __rte_always_inline int
1174 flow_hw_converted_mhdr_cmds_append(struct mlx5_hw_modify_header_action *mhdr,
1175 				   struct mlx5_flow_dv_modify_hdr_resource *resource)
1176 {
1177 	uint32_t idx;
1178 	int ret;
1179 
1180 	for (idx = 0; idx < resource->actions_num; ++idx) {
1181 		struct mlx5_modification_cmd *src = &resource->actions[idx];
1182 
1183 		if (flow_hw_should_insert_nop(mhdr, src)) {
1184 			ret = flow_hw_mhdr_cmd_nop_append(mhdr);
1185 			if (ret)
1186 				return ret;
1187 		}
1188 		ret = flow_hw_mhdr_cmd_append(mhdr, src);
1189 		if (ret)
1190 			return ret;
1191 	}
1192 	return 0;
1193 }
1194 
1195 static __rte_always_inline void
1196 flow_hw_modify_field_init(struct mlx5_hw_modify_header_action *mhdr,
1197 			  struct rte_flow_actions_template *at)
1198 {
1199 	memset(mhdr, 0, sizeof(*mhdr));
1200 	/* Modify header action without any commands is shared by default. */
1201 	mhdr->shared = true;
1202 	mhdr->pos = at->mhdr_off;
1203 }
1204 
1205 static __rte_always_inline int
1206 flow_hw_modify_field_compile(struct rte_eth_dev *dev,
1207 			     const struct rte_flow_attr *attr,
1208 			     const struct rte_flow_action *action, /* Current action from AT. */
1209 			     const struct rte_flow_action *action_mask, /* Current mask from AT. */
1210 			     struct mlx5_hw_actions *acts,
1211 			     struct mlx5_hw_modify_header_action *mhdr,
1212 			     uint16_t src_pos,
1213 			     struct rte_flow_error *error)
1214 {
1215 	struct mlx5_priv *priv = dev->data->dev_private;
1216 	const struct rte_flow_action_modify_field *conf = action->conf;
1217 	union {
1218 		struct mlx5_flow_dv_modify_hdr_resource resource;
1219 		uint8_t data[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
1220 			     sizeof(struct mlx5_modification_cmd) * MLX5_MHDR_MAX_CMD];
1221 	} dummy;
1222 	struct mlx5_flow_dv_modify_hdr_resource *resource;
1223 	struct rte_flow_item item = {
1224 		.spec = NULL,
1225 		.mask = NULL
1226 	};
1227 	struct field_modify_info field[MLX5_ACT_MAX_MOD_FIELDS] = {
1228 						{0, 0, MLX5_MODI_OUT_NONE} };
1229 	struct field_modify_info dcopy[MLX5_ACT_MAX_MOD_FIELDS] = {
1230 						{0, 0, MLX5_MODI_OUT_NONE} };
1231 	uint32_t mask[MLX5_ACT_MAX_MOD_FIELDS] = { 0 };
1232 	uint32_t type, value = 0;
1233 	uint16_t cmds_start, cmds_end;
1234 	bool shared;
1235 	int ret;
1236 
1237 	/*
1238 	 * Modify header action is shared if previous modify_field actions
1239 	 * are shared and currently compiled action is shared.
1240 	 */
1241 	shared = flow_hw_action_modify_field_is_shared(action, action_mask);
1242 	mhdr->shared &= shared;
1243 	if (conf->src.field == RTE_FLOW_FIELD_POINTER ||
1244 	    conf->src.field == RTE_FLOW_FIELD_VALUE) {
1245 		type = conf->operation == RTE_FLOW_MODIFY_SET ? MLX5_MODIFICATION_TYPE_SET :
1246 								MLX5_MODIFICATION_TYPE_ADD;
1247 		/* For SET/ADD fill the destination field (field) first. */
1248 		mlx5_flow_field_id_to_modify_info(&conf->dst, field, mask,
1249 						  conf->width, dev,
1250 						  attr, error);
1251 		item.spec = conf->src.field == RTE_FLOW_FIELD_POINTER ?
1252 				(void *)(uintptr_t)conf->src.pvalue :
1253 				(void *)(uintptr_t)&conf->src.value;
1254 		if (conf->dst.field == RTE_FLOW_FIELD_META ||
1255 		    conf->dst.field == RTE_FLOW_FIELD_TAG ||
1256 		    conf->dst.field == RTE_FLOW_FIELD_METER_COLOR ||
1257 		    conf->dst.field == (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG) {
1258 			uint8_t tag_index = flow_tag_index_get(&conf->dst);
1259 
1260 			value = *(const unaligned_uint32_t *)item.spec;
1261 			if (conf->dst.field == RTE_FLOW_FIELD_TAG &&
1262 			    tag_index == RTE_PMD_MLX5_LINEAR_HASH_TAG_INDEX)
1263 				value = rte_cpu_to_be_32(value << 16);
1264 			else
1265 				value = rte_cpu_to_be_32(value);
1266 			item.spec = &value;
1267 		} else if (conf->dst.field == RTE_FLOW_FIELD_GTP_PSC_QFI ||
1268 			   conf->dst.field == RTE_FLOW_FIELD_GENEVE_OPT_TYPE) {
1269 			/*
1270 			 * Both QFI and Geneve option type are passed as an uint8_t integer,
1271 			 * but it is accessed through a 2nd least significant byte of a 32-bit
1272 			 * field in modify header command.
1273 			 */
1274 			value = *(const uint8_t *)item.spec;
1275 			value = rte_cpu_to_be_32(value << 8);
1276 			item.spec = &value;
1277 		}
1278 	} else {
1279 		type = conf->operation == RTE_FLOW_MODIFY_SET ?
1280 		       MLX5_MODIFICATION_TYPE_COPY : MLX5_MODIFICATION_TYPE_ADD_FIELD;
1281 		/* For COPY fill the destination field (dcopy) without mask. */
1282 		mlx5_flow_field_id_to_modify_info(&conf->dst, dcopy, NULL,
1283 						  conf->width, dev,
1284 						  attr, error);
1285 		/* Then construct the source field (field) with mask. */
1286 		mlx5_flow_field_id_to_modify_info(&conf->src, field, mask,
1287 						  conf->width, dev,
1288 						  attr, error);
1289 	}
1290 	item.mask = &mask;
1291 	memset(&dummy, 0, sizeof(dummy));
1292 	resource = &dummy.resource;
1293 	ret = flow_dv_convert_modify_action(&item, field, dcopy, resource, type, error);
1294 	if (ret)
1295 		return ret;
1296 	MLX5_ASSERT(resource->actions_num > 0);
1297 	/*
1298 	 * If previous modify field action collide with this one, then insert NOP command.
1299 	 * This NOP command will not be a part of action's command range used to update commands
1300 	 * on rule creation.
1301 	 */
1302 	if (flow_hw_should_insert_nop(mhdr, &resource->actions[0])) {
1303 		ret = flow_hw_mhdr_cmd_nop_append(mhdr);
1304 		if (ret)
1305 			return rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1306 						  NULL, "too many modify field operations specified");
1307 	}
1308 	cmds_start = mhdr->mhdr_cmds_num;
1309 	ret = flow_hw_converted_mhdr_cmds_append(mhdr, resource);
1310 	if (ret)
1311 		return rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1312 					  NULL, "too many modify field operations specified");
1313 
1314 	cmds_end = mhdr->mhdr_cmds_num;
1315 	if (shared)
1316 		return 0;
1317 	ret = __flow_hw_act_data_hdr_modify_append(priv, acts, RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
1318 						   src_pos, mhdr->pos,
1319 						   cmds_start, cmds_end, shared,
1320 						   field, dcopy, mask);
1321 	if (ret)
1322 		return rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1323 					  NULL, "not enough memory to store modify field metadata");
1324 	return 0;
1325 }
1326 
1327 static uint32_t
1328 flow_hw_count_nop_modify_field(struct mlx5_hw_modify_header_action *mhdr)
1329 {
1330 	uint32_t i;
1331 	uint32_t nops = 0;
1332 
1333 	for (i = 0; i < mhdr->mhdr_cmds_num; ++i) {
1334 		struct mlx5_modification_cmd cmd = mhdr->mhdr_cmds[i];
1335 
1336 		cmd.data0 = rte_be_to_cpu_32(cmd.data0);
1337 		if (cmd.action_type == MLX5_MODIFICATION_TYPE_NOP)
1338 			++nops;
1339 	}
1340 	return nops;
1341 }
1342 
1343 static int
1344 flow_hw_validate_compiled_modify_field(struct rte_eth_dev *dev,
1345 				       const struct mlx5_flow_template_table_cfg *cfg,
1346 				       struct mlx5_hw_modify_header_action *mhdr,
1347 				       struct rte_flow_error *error)
1348 {
1349 	struct mlx5_priv *priv = dev->data->dev_private;
1350 	struct mlx5_hca_attr *hca_attr = &priv->sh->cdev->config.hca_attr;
1351 
1352 	/*
1353 	 * Header modify pattern length limitation is only valid for HWS groups, i.e. groups > 0.
1354 	 * In group 0, MODIFY_FIELD actions are handled with header modify actions
1355 	 * managed by rdma-core.
1356 	 */
1357 	if (cfg->attr.flow_attr.group != 0 &&
1358 	    mhdr->mhdr_cmds_num > hca_attr->max_header_modify_pattern_length) {
1359 		uint32_t nops = flow_hw_count_nop_modify_field(mhdr);
1360 
1361 		DRV_LOG(ERR, "Too many modify header commands generated from "
1362 			     "MODIFY_FIELD actions. "
1363 			     "Generated HW commands = %u (amount of NOP commands = %u). "
1364 			     "Maximum supported = %u.",
1365 			     mhdr->mhdr_cmds_num, nops,
1366 			     hca_attr->max_header_modify_pattern_length);
1367 		return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1368 					  "Number of MODIFY_FIELD actions exceeds maximum "
1369 					  "supported limit of actions");
1370 	}
1371 	return 0;
1372 }
1373 
1374 static int
1375 flow_hw_represented_port_compile(struct rte_eth_dev *dev,
1376 				 const struct rte_flow_attr *attr,
1377 				 const struct rte_flow_action *action,
1378 				 const struct rte_flow_action *action_mask,
1379 				 struct mlx5_hw_actions *acts,
1380 				 uint16_t action_src, uint16_t action_dst,
1381 				 struct rte_flow_error *error)
1382 {
1383 	struct mlx5_priv *priv = dev->data->dev_private;
1384 	const struct rte_flow_action_ethdev *v = action->conf;
1385 	const struct rte_flow_action_ethdev *m = action_mask->conf;
1386 	int ret;
1387 
1388 	if (!attr->group)
1389 		return rte_flow_error_set(error, EINVAL,
1390 					  RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1391 					  "represented_port action cannot"
1392 					  " be used on group 0");
1393 	if (!attr->transfer)
1394 		return rte_flow_error_set(error, EINVAL,
1395 					  RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1396 					  NULL,
1397 					  "represented_port action requires"
1398 					  " transfer attribute");
1399 	if (attr->ingress || attr->egress)
1400 		return rte_flow_error_set(error, EINVAL,
1401 					  RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1402 					  "represented_port action cannot"
1403 					  " be used with direction attributes");
1404 	if (!priv->master)
1405 		return rte_flow_error_set(error, EINVAL,
1406 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1407 					  "represented_port action must"
1408 					  " be used on proxy port");
1409 	if (m && !!m->port_id) {
1410 		struct mlx5_priv *port_priv;
1411 
1412 		if (!v)
1413 			return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1414 						  action, "port index was not provided");
1415 		port_priv = mlx5_port_to_eswitch_info(v->port_id, false);
1416 		if (port_priv == NULL)
1417 			return rte_flow_error_set
1418 					(error, EINVAL,
1419 					 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1420 					 "port does not exist or unable to"
1421 					 " obtain E-Switch info for port");
1422 		MLX5_ASSERT(priv->hw_vport != NULL);
1423 		if (priv->hw_vport[v->port_id]) {
1424 			acts->rule_acts[action_dst].action =
1425 					priv->hw_vport[v->port_id];
1426 		} else {
1427 			return rte_flow_error_set
1428 					(error, EINVAL,
1429 					 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1430 					 "cannot use represented_port action"
1431 					 " with this port");
1432 		}
1433 	} else {
1434 		ret = __flow_hw_act_data_general_append
1435 				(priv, acts, action->type,
1436 				 action_src, action_dst);
1437 		if (ret)
1438 			return rte_flow_error_set
1439 					(error, ENOMEM,
1440 					 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1441 					 "not enough memory to store"
1442 					 " vport action");
1443 	}
1444 	return 0;
1445 }
1446 
1447 static __rte_always_inline int
1448 flow_hw_meter_compile(struct rte_eth_dev *dev,
1449 		      const struct mlx5_flow_template_table_cfg *cfg,
1450 		      uint16_t aso_mtr_pos,
1451 		      uint16_t jump_pos,
1452 		      const struct rte_flow_action *action,
1453 		      struct mlx5_hw_actions *acts,
1454 		      struct rte_flow_error *error)
1455 {
1456 	struct mlx5_priv *priv = dev->data->dev_private;
1457 	struct mlx5_aso_mtr *aso_mtr;
1458 	const struct rte_flow_action_meter *meter = action->conf;
1459 	uint32_t group = cfg->attr.flow_attr.group;
1460 
1461 	aso_mtr = mlx5_aso_meter_by_idx(priv, meter->mtr_id);
1462 	acts->rule_acts[aso_mtr_pos].action = priv->mtr_bulk.action;
1463 	acts->rule_acts[aso_mtr_pos].aso_meter.offset = aso_mtr->offset;
1464 	acts->jump = flow_hw_jump_action_register
1465 		(dev, cfg, aso_mtr->fm.group, error);
1466 	if (!acts->jump)
1467 		return -ENOMEM;
1468 	acts->rule_acts[jump_pos].action = (!!group) ?
1469 				    acts->jump->hws_action :
1470 				    acts->jump->root_action;
1471 	if (mlx5_aso_mtr_wait(priv->sh, MLX5_HW_INV_QUEUE, aso_mtr))
1472 		return -ENOMEM;
1473 	return 0;
1474 }
1475 
1476 static __rte_always_inline int
1477 flow_hw_cnt_compile(struct rte_eth_dev *dev, uint32_t  start_pos,
1478 		      struct mlx5_hw_actions *acts)
1479 {
1480 	struct mlx5_priv *priv = dev->data->dev_private;
1481 	uint32_t pos = start_pos;
1482 	cnt_id_t cnt_id;
1483 	int ret;
1484 
1485 	ret = mlx5_hws_cnt_shared_get(priv->hws_cpool, &cnt_id, 0);
1486 	if (ret != 0)
1487 		return ret;
1488 	ret = mlx5_hws_cnt_pool_get_action_offset
1489 				(priv->hws_cpool,
1490 				 cnt_id,
1491 				 &acts->rule_acts[pos].action,
1492 				 &acts->rule_acts[pos].counter.offset);
1493 	if (ret != 0)
1494 		return ret;
1495 	acts->cnt_id = cnt_id;
1496 	return 0;
1497 }
1498 
1499 static __rte_always_inline bool
1500 is_of_vlan_pcp_present(const struct rte_flow_action *actions)
1501 {
1502 	/*
1503 	 * Order of RTE VLAN push actions is
1504 	 * OF_PUSH_VLAN / OF_SET_VLAN_VID [ / OF_SET_VLAN_PCP ]
1505 	 */
1506 	return actions[MLX5_HW_VLAN_PUSH_PCP_IDX].type ==
1507 		RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP;
1508 }
1509 
1510 static __rte_always_inline bool
1511 is_template_masked_push_vlan(const struct rte_flow_action_of_push_vlan *mask)
1512 {
1513 	/*
1514 	 * In masked push VLAN template all RTE push actions are masked.
1515 	 */
1516 	return mask && mask->ethertype != 0;
1517 }
1518 
1519 static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
1520 {
1521 /*
1522  * OpenFlow Switch Specification defines 801.1q VID as 12+1 bits.
1523  */
1524 	rte_be32_t type, vid, pcp;
1525 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1526 	rte_be32_t vid_lo, vid_hi;
1527 #endif
1528 
1529 	type = ((const struct rte_flow_action_of_push_vlan *)
1530 		actions[MLX5_HW_VLAN_PUSH_TYPE_IDX].conf)->ethertype;
1531 	vid = ((const struct rte_flow_action_of_set_vlan_vid *)
1532 		actions[MLX5_HW_VLAN_PUSH_VID_IDX].conf)->vlan_vid;
1533 	pcp = is_of_vlan_pcp_present(actions) ?
1534 	      ((const struct rte_flow_action_of_set_vlan_pcp *)
1535 		      actions[MLX5_HW_VLAN_PUSH_PCP_IDX].conf)->vlan_pcp : 0;
1536 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1537 	vid_hi = vid & 0xff;
1538 	vid_lo = vid >> 8;
1539 	return (((vid_lo << 8) | (pcp << 5) | vid_hi) << 16) | type;
1540 #else
1541 	return (type << 16) | (pcp << 13) | vid;
1542 #endif
1543 }
1544 
1545 static __rte_always_inline struct mlx5_aso_mtr *
1546 flow_hw_meter_mark_alloc(struct rte_eth_dev *dev, uint32_t queue,
1547 			 const struct rte_flow_action *action,
1548 			 void *user_data, bool push)
1549 {
1550 	struct mlx5_priv *priv = dev->data->dev_private;
1551 	struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
1552 	const struct rte_flow_action_meter_mark *meter_mark = action->conf;
1553 	struct mlx5_aso_mtr *aso_mtr;
1554 	struct mlx5_flow_meter_info *fm;
1555 	uint32_t mtr_id;
1556 
1557 	if (meter_mark->profile == NULL)
1558 		return NULL;
1559 	aso_mtr = mlx5_ipool_malloc(priv->hws_mpool->idx_pool, &mtr_id);
1560 	if (!aso_mtr)
1561 		return NULL;
1562 	/* Fill the flow meter parameters. */
1563 	aso_mtr->type = ASO_METER_INDIRECT;
1564 	fm = &aso_mtr->fm;
1565 	fm->meter_id = mtr_id;
1566 	fm->profile = (struct mlx5_flow_meter_profile *)(meter_mark->profile);
1567 	fm->is_enable = meter_mark->state;
1568 	fm->color_aware = meter_mark->color_mode;
1569 	aso_mtr->pool = pool;
1570 	aso_mtr->state = (queue == MLX5_HW_INV_QUEUE) ?
1571 			  ASO_METER_WAIT : ASO_METER_WAIT_ASYNC;
1572 	aso_mtr->offset = mtr_id - 1;
1573 	aso_mtr->init_color = fm->color_aware ? RTE_COLORS : RTE_COLOR_GREEN;
1574 	/* Update ASO flow meter by wqe. */
1575 	if (mlx5_aso_meter_update_by_wqe(priv->sh, queue, aso_mtr,
1576 					 &priv->mtr_bulk, user_data, push)) {
1577 		mlx5_ipool_free(pool->idx_pool, mtr_id);
1578 		return NULL;
1579 	}
1580 	/* Wait for ASO object completion. */
1581 	if (queue == MLX5_HW_INV_QUEUE &&
1582 	    mlx5_aso_mtr_wait(priv->sh, MLX5_HW_INV_QUEUE, aso_mtr)) {
1583 		mlx5_ipool_free(pool->idx_pool, mtr_id);
1584 		return NULL;
1585 	}
1586 	return aso_mtr;
1587 }
1588 
1589 static __rte_always_inline int
1590 flow_hw_meter_mark_compile(struct rte_eth_dev *dev,
1591 			   uint16_t aso_mtr_pos,
1592 			   const struct rte_flow_action *action,
1593 			   struct mlx5dr_rule_action *acts,
1594 			   uint32_t *index,
1595 			   uint32_t queue)
1596 {
1597 	struct mlx5_priv *priv = dev->data->dev_private;
1598 	struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
1599 	struct mlx5_aso_mtr *aso_mtr;
1600 
1601 	aso_mtr = flow_hw_meter_mark_alloc(dev, queue, action, NULL, true);
1602 	if (!aso_mtr)
1603 		return -1;
1604 
1605 	/* Compile METER_MARK action */
1606 	acts[aso_mtr_pos].action = pool->action;
1607 	acts[aso_mtr_pos].aso_meter.offset = aso_mtr->offset;
1608 	*index = aso_mtr->fm.meter_id;
1609 	return 0;
1610 }
1611 
1612 static int
1613 flow_hw_translate_indirect_mirror(__rte_unused struct rte_eth_dev *dev,
1614 				  __rte_unused const struct mlx5_action_construct_data *act_data,
1615 				  const struct rte_flow_action *action,
1616 				  struct mlx5dr_rule_action *dr_rule)
1617 {
1618 	const struct rte_flow_action_indirect_list *list_conf = action->conf;
1619 	const struct mlx5_mirror *mirror = (typeof(mirror))list_conf->handle;
1620 
1621 	dr_rule->action = mirror->mirror_action;
1622 	return 0;
1623 }
1624 
1625 /**
1626  * HWS mirror implemented as FW island.
1627  * The action does not support indirect list flow configuration.
1628  * If template handle was masked, use handle mirror action in flow rules.
1629  * Otherwise let flow rule specify mirror handle.
1630  */
1631 static int
1632 hws_table_tmpl_translate_indirect_mirror(struct rte_eth_dev *dev,
1633 					 const struct rte_flow_action *action,
1634 					 const struct rte_flow_action *mask,
1635 					 struct mlx5_hw_actions *acts,
1636 					 uint16_t action_src, uint16_t action_dst)
1637 {
1638 	int ret = 0;
1639 	const struct rte_flow_action_indirect_list *mask_conf = mask->conf;
1640 
1641 	if (mask_conf && mask_conf->handle) {
1642 		/**
1643 		 * If mirror handle was masked, assign fixed DR5 mirror action.
1644 		 */
1645 		flow_hw_translate_indirect_mirror(dev, NULL, action,
1646 						  &acts->rule_acts[action_dst]);
1647 	} else {
1648 		struct mlx5_priv *priv = dev->data->dev_private;
1649 		ret = flow_hw_act_data_indirect_list_append
1650 			(priv, acts, RTE_FLOW_ACTION_TYPE_INDIRECT_LIST,
1651 			 action_src, action_dst,
1652 			 flow_hw_translate_indirect_mirror);
1653 	}
1654 	return ret;
1655 }
1656 
1657 static int
1658 flow_hw_reformat_action(__rte_unused struct rte_eth_dev *dev,
1659 			__rte_unused const struct mlx5_action_construct_data *data,
1660 			const struct rte_flow_action *action,
1661 			struct mlx5dr_rule_action *dr_rule)
1662 {
1663 	const struct rte_flow_action_indirect_list *indlst_conf = action->conf;
1664 
1665 	dr_rule->action = ((struct mlx5_hw_encap_decap_action *)
1666 			   (indlst_conf->handle))->action;
1667 	if (!dr_rule->action)
1668 		return -EINVAL;
1669 	return 0;
1670 }
1671 
1672 /**
1673  * Template conf must not be masked. If handle is masked, use the one in template,
1674  * otherwise update per flow rule.
1675  */
1676 static int
1677 hws_table_tmpl_translate_indirect_reformat(struct rte_eth_dev *dev,
1678 					   const struct rte_flow_action *action,
1679 					   const struct rte_flow_action *mask,
1680 					   struct mlx5_hw_actions *acts,
1681 					   uint16_t action_src, uint16_t action_dst)
1682 {
1683 	int ret = -1;
1684 	const struct rte_flow_action_indirect_list *mask_conf = mask->conf;
1685 	struct mlx5_priv *priv = dev->data->dev_private;
1686 
1687 	if (mask_conf && mask_conf->handle && !mask_conf->conf)
1688 		/**
1689 		 * If handle was masked, assign fixed DR action.
1690 		 */
1691 		ret = flow_hw_reformat_action(dev, NULL, action,
1692 					      &acts->rule_acts[action_dst]);
1693 	else if (mask_conf && !mask_conf->handle && !mask_conf->conf)
1694 		ret = flow_hw_act_data_indirect_list_append
1695 			(priv, acts, RTE_FLOW_ACTION_TYPE_INDIRECT_LIST,
1696 			 action_src, action_dst, flow_hw_reformat_action);
1697 	return ret;
1698 }
1699 
1700 static int
1701 flow_dr_set_meter(struct mlx5_priv *priv,
1702 		  struct mlx5dr_rule_action *dr_rule,
1703 		  const struct rte_flow_action_indirect_list *action_conf)
1704 {
1705 	const struct mlx5_indlst_legacy *legacy_obj =
1706 		(typeof(legacy_obj))action_conf->handle;
1707 	struct mlx5_aso_mtr_pool *mtr_pool = priv->hws_mpool;
1708 	uint32_t act_idx = (uint32_t)(uintptr_t)legacy_obj->handle;
1709 	uint32_t mtr_id = act_idx & (RTE_BIT32(MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
1710 	struct mlx5_aso_mtr *aso_mtr = mlx5_ipool_get(mtr_pool->idx_pool, mtr_id);
1711 
1712 	if (!aso_mtr)
1713 		return -EINVAL;
1714 	dr_rule->action = mtr_pool->action;
1715 	dr_rule->aso_meter.offset = aso_mtr->offset;
1716 	return 0;
1717 }
1718 
1719 __rte_always_inline static void
1720 flow_dr_mtr_flow_color(struct mlx5dr_rule_action *dr_rule, enum rte_color init_color)
1721 {
1722 	dr_rule->aso_meter.init_color =
1723 		(enum mlx5dr_action_aso_meter_color)rte_col_2_mlx5_col(init_color);
1724 }
1725 
1726 static int
1727 flow_hw_translate_indirect_meter(struct rte_eth_dev *dev,
1728 				 const struct mlx5_action_construct_data *act_data,
1729 				 const struct rte_flow_action *action,
1730 				 struct mlx5dr_rule_action *dr_rule)
1731 {
1732 	int ret;
1733 	struct mlx5_priv *priv = dev->data->dev_private;
1734 	const struct rte_flow_action_indirect_list *action_conf = action->conf;
1735 	const struct rte_flow_indirect_update_flow_meter_mark **flow_conf =
1736 		(typeof(flow_conf))action_conf->conf;
1737 
1738 	/*
1739 	 * Masked indirect handle set dr5 action during template table
1740 	 * translation.
1741 	 */
1742 	if (!dr_rule->action) {
1743 		ret = flow_dr_set_meter(priv, dr_rule, action_conf);
1744 		if (ret)
1745 			return ret;
1746 	}
1747 	if (!act_data->shared_meter.conf_masked) {
1748 		if (flow_conf && flow_conf[0] && flow_conf[0]->init_color < RTE_COLORS)
1749 			flow_dr_mtr_flow_color(dr_rule, flow_conf[0]->init_color);
1750 	}
1751 	return 0;
1752 }
1753 
1754 static int
1755 hws_table_tmpl_translate_indirect_meter(struct rte_eth_dev *dev,
1756 					const struct rte_flow_action *action,
1757 					const struct rte_flow_action *mask,
1758 					struct mlx5_hw_actions *acts,
1759 					uint16_t action_src, uint16_t action_dst)
1760 {
1761 	int ret;
1762 	struct mlx5_priv *priv = dev->data->dev_private;
1763 	const struct rte_flow_action_indirect_list *action_conf = action->conf;
1764 	const struct rte_flow_action_indirect_list *mask_conf = mask->conf;
1765 	bool is_handle_masked = mask_conf && mask_conf->handle;
1766 	bool is_conf_masked = mask_conf && mask_conf->conf && mask_conf->conf[0];
1767 	struct mlx5dr_rule_action *dr_rule = &acts->rule_acts[action_dst];
1768 
1769 	if (is_handle_masked) {
1770 		ret = flow_dr_set_meter(priv, dr_rule, action->conf);
1771 		if (ret)
1772 			return ret;
1773 	}
1774 	if (is_conf_masked) {
1775 		const struct
1776 			rte_flow_indirect_update_flow_meter_mark **flow_conf =
1777 			(typeof(flow_conf))action_conf->conf;
1778 		flow_dr_mtr_flow_color(dr_rule,
1779 				       flow_conf[0]->init_color);
1780 	}
1781 	if (!is_handle_masked || !is_conf_masked) {
1782 		struct mlx5_action_construct_data *act_data;
1783 
1784 		ret = flow_hw_act_data_indirect_list_append
1785 			(priv, acts, RTE_FLOW_ACTION_TYPE_INDIRECT_LIST,
1786 			 action_src, action_dst, flow_hw_translate_indirect_meter);
1787 		if (ret)
1788 			return ret;
1789 		act_data = LIST_FIRST(&acts->act_list);
1790 		act_data->shared_meter.conf_masked = is_conf_masked;
1791 	}
1792 	return 0;
1793 }
1794 
1795 static int
1796 hws_table_tmpl_translate_indirect_legacy(struct rte_eth_dev *dev,
1797 					 const struct rte_flow_action *action,
1798 					 const struct rte_flow_action *mask,
1799 					 struct mlx5_hw_actions *acts,
1800 					 uint16_t action_src, uint16_t action_dst)
1801 {
1802 	int ret;
1803 	const struct rte_flow_action_indirect_list *indlst_conf = action->conf;
1804 	struct mlx5_indlst_legacy *indlst_obj = (typeof(indlst_obj))indlst_conf->handle;
1805 	uint32_t act_idx = (uint32_t)(uintptr_t)indlst_obj->handle;
1806 	uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
1807 
1808 	switch (type) {
1809 	case MLX5_INDIRECT_ACTION_TYPE_METER_MARK:
1810 		ret = hws_table_tmpl_translate_indirect_meter(dev, action, mask,
1811 							      acts, action_src,
1812 							      action_dst);
1813 		break;
1814 	default:
1815 		ret = -EINVAL;
1816 		break;
1817 	}
1818 	return ret;
1819 }
1820 
1821 /*
1822  * template .. indirect_list handle Ht conf Ct ..
1823  * mask     .. indirect_list handle Hm conf Cm ..
1824  *
1825  * PMD requires Ht != 0 to resolve handle type.
1826  * If Ht was masked (Hm != 0) DR5 action will be set according to Ht and will
1827  * not change. Otherwise, DR5 action will be resolved during flow rule build.
1828  * If Ct was masked (Cm != 0), table template processing updates base
1829  * indirect action configuration with Ct parameters.
1830  */
1831 static int
1832 table_template_translate_indirect_list(struct rte_eth_dev *dev,
1833 				       const struct rte_flow_action *action,
1834 				       const struct rte_flow_action *mask,
1835 				       struct mlx5_hw_actions *acts,
1836 				       uint16_t action_src, uint16_t action_dst)
1837 {
1838 	int ret = 0;
1839 	enum mlx5_indirect_list_type type;
1840 	const struct rte_flow_action_indirect_list *list_conf = action->conf;
1841 
1842 	if (!list_conf || !list_conf->handle)
1843 		return -EINVAL;
1844 	type = mlx5_get_indirect_list_type(list_conf->handle);
1845 	switch (type) {
1846 	case MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY:
1847 		ret = hws_table_tmpl_translate_indirect_legacy(dev, action, mask,
1848 							       acts, action_src,
1849 							       action_dst);
1850 		break;
1851 	case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
1852 		ret = hws_table_tmpl_translate_indirect_mirror(dev, action, mask,
1853 							       acts, action_src,
1854 							       action_dst);
1855 		break;
1856 	case MLX5_INDIRECT_ACTION_LIST_TYPE_REFORMAT:
1857 		if (list_conf->conf)
1858 			return -EINVAL;
1859 		ret = hws_table_tmpl_translate_indirect_reformat(dev, action, mask,
1860 								 acts, action_src,
1861 								 action_dst);
1862 		break;
1863 	default:
1864 		return -EINVAL;
1865 	}
1866 	return ret;
1867 }
1868 
1869 static int
1870 mlx5_tbl_translate_reformat(struct mlx5_priv *priv,
1871 			    const struct rte_flow_template_table_attr *table_attr,
1872 			    struct mlx5_hw_actions *acts,
1873 			    struct rte_flow_actions_template *at,
1874 			    const struct rte_flow_item *enc_item,
1875 			    const struct rte_flow_item *enc_item_m,
1876 			    uint8_t *encap_data, uint8_t *encap_data_m,
1877 			    struct mlx5_tbl_multi_pattern_ctx *mp_ctx,
1878 			    size_t data_size, uint16_t reformat_src,
1879 			    enum mlx5dr_action_type refmt_type,
1880 			    struct rte_flow_error *error)
1881 {
1882 	int mp_reformat_ix = mlx5_multi_pattern_reformat_to_index(refmt_type);
1883 	const struct rte_flow_attr *attr = &table_attr->flow_attr;
1884 	enum mlx5dr_table_type tbl_type = get_mlx5dr_table_type(attr);
1885 	struct mlx5dr_action_reformat_header hdr;
1886 	uint8_t buf[MLX5_ENCAP_MAX_LEN];
1887 	bool shared_rfmt = false;
1888 	int ret;
1889 
1890 	MLX5_ASSERT(at->reformat_off != UINT16_MAX);
1891 	if (enc_item) {
1892 		MLX5_ASSERT(!encap_data);
1893 		ret = flow_dv_convert_encap_data(enc_item, buf, &data_size, error);
1894 		if (ret)
1895 			return ret;
1896 		encap_data = buf;
1897 		if (enc_item_m)
1898 			shared_rfmt = true;
1899 	} else if (encap_data && encap_data_m) {
1900 		shared_rfmt = true;
1901 	}
1902 	acts->encap_decap = mlx5_malloc(MLX5_MEM_ZERO,
1903 					sizeof(*acts->encap_decap) + data_size,
1904 					0, SOCKET_ID_ANY);
1905 	if (!acts->encap_decap)
1906 		return rte_flow_error_set(error, ENOMEM,
1907 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1908 					  NULL, "no memory for reformat context");
1909 	hdr.sz = data_size;
1910 	hdr.data = encap_data;
1911 	if (shared_rfmt || mp_reformat_ix < 0) {
1912 		uint16_t reformat_ix = at->reformat_off;
1913 		uint32_t flags = mlx5_hw_act_flag[!!attr->group][tbl_type] |
1914 				 MLX5DR_ACTION_FLAG_SHARED;
1915 
1916 		acts->encap_decap->action =
1917 			mlx5dr_action_create_reformat(priv->dr_ctx, refmt_type,
1918 						      1, &hdr, 0, flags);
1919 		if (!acts->encap_decap->action)
1920 			return -rte_errno;
1921 		acts->rule_acts[reformat_ix].action = acts->encap_decap->action;
1922 		acts->rule_acts[reformat_ix].reformat.data = acts->encap_decap->data;
1923 		acts->rule_acts[reformat_ix].reformat.offset = 0;
1924 		acts->encap_decap->shared = true;
1925 	} else {
1926 		uint32_t ix;
1927 		typeof(mp_ctx->reformat[0]) *reformat_ctx = mp_ctx->reformat +
1928 							    mp_reformat_ix;
1929 
1930 		ix = reformat_ctx->elements_num++;
1931 		reformat_ctx->ctx[ix].reformat_hdr = hdr;
1932 		reformat_ctx->ctx[ix].rule_action = &acts->rule_acts[at->reformat_off];
1933 		reformat_ctx->ctx[ix].encap = acts->encap_decap;
1934 		acts->rule_acts[at->reformat_off].reformat.hdr_idx = ix;
1935 		acts->encap_decap_pos = at->reformat_off;
1936 		acts->encap_decap->data_size = data_size;
1937 		ret = __flow_hw_act_data_encap_append
1938 			(priv, acts, (at->actions + reformat_src)->type,
1939 			 reformat_src, at->reformat_off, data_size);
1940 		if (ret)
1941 			return -rte_errno;
1942 	}
1943 	return 0;
1944 }
1945 
1946 static int
1947 mlx5_tbl_translate_modify_header(struct rte_eth_dev *dev,
1948 				 const struct mlx5_flow_template_table_cfg *cfg,
1949 				 struct mlx5_hw_actions *acts,
1950 				 struct mlx5_tbl_multi_pattern_ctx *mp_ctx,
1951 				 struct mlx5_hw_modify_header_action *mhdr,
1952 				 struct rte_flow_error *error)
1953 {
1954 	struct mlx5_priv *priv = dev->data->dev_private;
1955 	const struct rte_flow_template_table_attr *table_attr = &cfg->attr;
1956 	const struct rte_flow_attr *attr = &table_attr->flow_attr;
1957 	enum mlx5dr_table_type tbl_type = get_mlx5dr_table_type(attr);
1958 	uint16_t mhdr_ix = mhdr->pos;
1959 	struct mlx5dr_action_mh_pattern pattern = {
1960 		.sz = sizeof(struct mlx5_modification_cmd) * mhdr->mhdr_cmds_num
1961 	};
1962 
1963 	if (flow_hw_validate_compiled_modify_field(dev, cfg, mhdr, error)) {
1964 		__flow_hw_action_template_destroy(dev, acts);
1965 		return -rte_errno;
1966 	}
1967 	acts->mhdr = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*acts->mhdr),
1968 				 0, SOCKET_ID_ANY);
1969 	if (!acts->mhdr)
1970 		return rte_flow_error_set(error, ENOMEM,
1971 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1972 					  NULL, "translate modify_header: no memory for modify header context");
1973 	rte_memcpy(acts->mhdr, mhdr, sizeof(*mhdr));
1974 	pattern.data = (__be64 *)acts->mhdr->mhdr_cmds;
1975 	if (mhdr->shared) {
1976 		uint32_t flags = mlx5_hw_act_flag[!!attr->group][tbl_type] |
1977 				 MLX5DR_ACTION_FLAG_SHARED;
1978 
1979 		acts->mhdr->action = mlx5dr_action_create_modify_header
1980 						(priv->dr_ctx, 1, &pattern, 0,
1981 						 flags);
1982 		if (!acts->mhdr->action)
1983 			return rte_flow_error_set(error, rte_errno,
1984 						  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1985 						  NULL, "translate modify_header: failed to create DR action");
1986 		acts->rule_acts[mhdr_ix].action = acts->mhdr->action;
1987 	} else {
1988 		typeof(mp_ctx->mh) *mh = &mp_ctx->mh;
1989 		uint32_t idx = mh->elements_num;
1990 		struct mlx5_multi_pattern_ctx *mh_ctx = mh->ctx + mh->elements_num++;
1991 
1992 		mh_ctx->mh_pattern = pattern;
1993 		mh_ctx->mhdr = acts->mhdr;
1994 		mh_ctx->rule_action = &acts->rule_acts[mhdr_ix];
1995 		acts->rule_acts[mhdr_ix].modify_header.pattern_idx = idx;
1996 	}
1997 	return 0;
1998 }
1999 
2000 
2001 static int
2002 mlx5_create_ipv6_ext_reformat(struct rte_eth_dev *dev,
2003 			      const struct mlx5_flow_template_table_cfg *cfg,
2004 			      struct mlx5_hw_actions *acts,
2005 			      struct rte_flow_actions_template *at,
2006 			      uint8_t *push_data, uint8_t *push_data_m,
2007 			      size_t push_size, uint16_t recom_src,
2008 			      enum mlx5dr_action_type recom_type)
2009 {
2010 	struct mlx5_priv *priv = dev->data->dev_private;
2011 	const struct rte_flow_template_table_attr *table_attr = &cfg->attr;
2012 	const struct rte_flow_attr *attr = &table_attr->flow_attr;
2013 	enum mlx5dr_table_type type = get_mlx5dr_table_type(attr);
2014 	struct mlx5_action_construct_data *act_data;
2015 	struct mlx5dr_action_reformat_header hdr = {0};
2016 	uint32_t flag, bulk = 0;
2017 
2018 	flag = mlx5_hw_act_flag[!!attr->group][type];
2019 	acts->push_remove = mlx5_malloc(MLX5_MEM_ZERO,
2020 					sizeof(*acts->push_remove) + push_size,
2021 					0, SOCKET_ID_ANY);
2022 	if (!acts->push_remove)
2023 		return -ENOMEM;
2024 
2025 	switch (recom_type) {
2026 	case MLX5DR_ACTION_TYP_PUSH_IPV6_ROUTE_EXT:
2027 		if (!push_data || !push_size)
2028 			goto err1;
2029 		if (!push_data_m) {
2030 			bulk = rte_log2_u32(table_attr->nb_flows);
2031 		} else {
2032 			flag |= MLX5DR_ACTION_FLAG_SHARED;
2033 			acts->push_remove->shared = 1;
2034 		}
2035 		acts->push_remove->data_size = push_size;
2036 		memcpy(acts->push_remove->data, push_data, push_size);
2037 		hdr.data = push_data;
2038 		hdr.sz = push_size;
2039 		break;
2040 	case MLX5DR_ACTION_TYP_POP_IPV6_ROUTE_EXT:
2041 		flag |= MLX5DR_ACTION_FLAG_SHARED;
2042 		acts->push_remove->shared = 1;
2043 		break;
2044 	default:
2045 		break;
2046 	}
2047 
2048 	acts->push_remove->action =
2049 		mlx5dr_action_create_reformat_ipv6_ext(priv->dr_ctx,
2050 				recom_type, &hdr, bulk, flag);
2051 	if (!acts->push_remove->action)
2052 		goto err1;
2053 	acts->rule_acts[at->recom_off].action = acts->push_remove->action;
2054 	acts->rule_acts[at->recom_off].ipv6_ext.header = acts->push_remove->data;
2055 	acts->rule_acts[at->recom_off].ipv6_ext.offset = 0;
2056 	acts->push_remove_pos = at->recom_off;
2057 	if (!acts->push_remove->shared) {
2058 		act_data = __flow_hw_act_data_push_append(dev, acts,
2059 				RTE_FLOW_ACTION_TYPE_IPV6_EXT_PUSH,
2060 				recom_src, at->recom_off, push_size);
2061 		if (!act_data)
2062 			goto err;
2063 	}
2064 	return 0;
2065 err:
2066 	if (acts->push_remove->action)
2067 		mlx5dr_action_destroy(acts->push_remove->action);
2068 err1:
2069 	if (acts->push_remove) {
2070 		mlx5_free(acts->push_remove);
2071 		acts->push_remove = NULL;
2072 	}
2073 	return -EINVAL;
2074 }
2075 
2076 /**
2077  * Translate rte_flow actions to DR action.
2078  *
2079  * As the action template has already indicated the actions. Translate
2080  * the rte_flow actions to DR action if possbile. So in flow create
2081  * stage we will save cycles from handing the actions' organizing.
2082  * For the actions with limited information, need to add these to a
2083  * list.
2084  *
2085  * @param[in] dev
2086  *   Pointer to the rte_eth_dev structure.
2087  * @param[in] cfg
2088  *   Pointer to the table configuration.
2089  * @param[in/out] acts
2090  *   Pointer to the template HW steering DR actions.
2091  * @param[in] at
2092  *   Action template.
2093  * @param[out] error
2094  *   Pointer to error structure.
2095  *
2096  * @return
2097  *   0 on success, a negative errno otherwise and rte_errno is set.
2098  */
2099 static int
2100 __flow_hw_actions_translate(struct rte_eth_dev *dev,
2101 			    const struct mlx5_flow_template_table_cfg *cfg,
2102 			    struct mlx5_hw_actions *acts,
2103 			    struct rte_flow_actions_template *at,
2104 			    struct mlx5_tbl_multi_pattern_ctx *mp_ctx,
2105 			    struct rte_flow_error *error)
2106 {
2107 	struct mlx5_priv *priv = dev->data->dev_private;
2108 	const struct rte_flow_template_table_attr *table_attr = &cfg->attr;
2109 	struct mlx5_hca_flex_attr *hca_attr = &priv->sh->cdev->config.hca_attr.flex;
2110 	const struct rte_flow_attr *attr = &table_attr->flow_attr;
2111 	struct rte_flow_action *actions = at->actions;
2112 	struct rte_flow_action *masks = at->masks;
2113 	enum mlx5dr_action_type refmt_type = MLX5DR_ACTION_TYP_LAST;
2114 	enum mlx5dr_action_type recom_type = MLX5DR_ACTION_TYP_LAST;
2115 	const struct rte_flow_action_raw_encap *raw_encap_data;
2116 	const struct rte_flow_action_ipv6_ext_push *ipv6_ext_data;
2117 	const struct rte_flow_item *enc_item = NULL, *enc_item_m = NULL;
2118 	uint16_t reformat_src = 0, recom_src = 0;
2119 	uint8_t *encap_data = NULL, *encap_data_m = NULL;
2120 	uint8_t *push_data = NULL, *push_data_m = NULL;
2121 	size_t data_size = 0, push_size = 0;
2122 	struct mlx5_hw_modify_header_action mhdr = { 0 };
2123 	bool actions_end = false;
2124 	uint32_t type;
2125 	bool reformat_used = false;
2126 	bool recom_used = false;
2127 	unsigned int of_vlan_offset;
2128 	uint16_t jump_pos;
2129 	uint32_t ct_idx;
2130 	int ret, err;
2131 	uint32_t target_grp = 0;
2132 	int table_type;
2133 
2134 	flow_hw_modify_field_init(&mhdr, at);
2135 	if (attr->transfer)
2136 		type = MLX5DR_TABLE_TYPE_FDB;
2137 	else if (attr->egress)
2138 		type = MLX5DR_TABLE_TYPE_NIC_TX;
2139 	else
2140 		type = MLX5DR_TABLE_TYPE_NIC_RX;
2141 	for (; !actions_end; actions++, masks++) {
2142 		uint64_t pos = actions - at->actions;
2143 		uint16_t src_pos = pos - at->src_off[pos];
2144 		uint16_t dr_pos = at->dr_off[pos];
2145 
2146 		switch ((int)actions->type) {
2147 		case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
2148 			if (!attr->group) {
2149 				DRV_LOG(ERR, "Indirect action is not supported in root table.");
2150 				goto err;
2151 			}
2152 			ret = table_template_translate_indirect_list
2153 				(dev, actions, masks, acts, src_pos, dr_pos);
2154 			if (ret)
2155 				goto err;
2156 			break;
2157 		case RTE_FLOW_ACTION_TYPE_INDIRECT:
2158 			if (!attr->group) {
2159 				DRV_LOG(ERR, "Indirect action is not supported in root table.");
2160 				goto err;
2161 			}
2162 			if (actions->conf && masks->conf) {
2163 				if (flow_hw_shared_action_translate
2164 				(dev, actions, acts, src_pos, dr_pos))
2165 					goto err;
2166 			} else if (__flow_hw_act_data_general_append
2167 					(priv, acts, RTE_FLOW_ACTION_TYPE_INDIRECT,
2168 					 src_pos, dr_pos)){
2169 				goto err;
2170 			}
2171 			break;
2172 		case RTE_FLOW_ACTION_TYPE_VOID:
2173 			break;
2174 		case RTE_FLOW_ACTION_TYPE_DROP:
2175 			acts->rule_acts[dr_pos].action =
2176 				priv->hw_drop[!!attr->group];
2177 			break;
2178 		case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
2179 			if (!attr->group) {
2180 				DRV_LOG(ERR, "Port representor is not supported in root table.");
2181 				goto err;
2182 			}
2183 			acts->rule_acts[dr_pos].action = priv->hw_def_miss;
2184 			break;
2185 		case RTE_FLOW_ACTION_TYPE_MARK:
2186 			acts->mark = true;
2187 			if (masks->conf &&
2188 			    ((const struct rte_flow_action_mark *)
2189 			     masks->conf)->id)
2190 				acts->rule_acts[dr_pos].tag.value =
2191 					mlx5_flow_mark_set
2192 					(((const struct rte_flow_action_mark *)
2193 					(actions->conf))->id);
2194 			else if (__flow_hw_act_data_general_append(priv, acts,
2195 								   actions->type,
2196 								   src_pos, dr_pos))
2197 				goto err;
2198 			acts->rule_acts[dr_pos].action =
2199 				priv->hw_tag[!!attr->group];
2200 			__atomic_fetch_add(&priv->hws_mark_refcnt, 1, __ATOMIC_RELAXED);
2201 			flow_hw_rxq_flag_set(dev, true);
2202 			break;
2203 		case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
2204 			acts->rule_acts[dr_pos].action =
2205 				priv->hw_push_vlan[type];
2206 			if (is_template_masked_push_vlan(masks->conf))
2207 				acts->rule_acts[dr_pos].push_vlan.vlan_hdr =
2208 					vlan_hdr_to_be32(actions);
2209 			else if (__flow_hw_act_data_general_append
2210 					(priv, acts, actions->type,
2211 					 src_pos, dr_pos))
2212 				goto err;
2213 			of_vlan_offset = is_of_vlan_pcp_present(actions) ?
2214 					MLX5_HW_VLAN_PUSH_PCP_IDX :
2215 					MLX5_HW_VLAN_PUSH_VID_IDX;
2216 			actions += of_vlan_offset;
2217 			masks += of_vlan_offset;
2218 			break;
2219 		case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
2220 			acts->rule_acts[dr_pos].action =
2221 				priv->hw_pop_vlan[type];
2222 			break;
2223 		case RTE_FLOW_ACTION_TYPE_JUMP:
2224 			if (masks->conf &&
2225 			    ((const struct rte_flow_action_jump *)
2226 			     masks->conf)->group) {
2227 				uint32_t jump_group =
2228 					((const struct rte_flow_action_jump *)
2229 					actions->conf)->group;
2230 				acts->jump = flow_hw_jump_action_register
2231 						(dev, cfg, jump_group, error);
2232 				if (!acts->jump)
2233 					goto err;
2234 				acts->rule_acts[dr_pos].action = (!!attr->group) ?
2235 								 acts->jump->hws_action :
2236 								 acts->jump->root_action;
2237 			} else if (__flow_hw_act_data_general_append
2238 					(priv, acts, actions->type,
2239 					 src_pos, dr_pos)){
2240 				goto err;
2241 			}
2242 			break;
2243 		case RTE_FLOW_ACTION_TYPE_QUEUE:
2244 			if (masks->conf &&
2245 			    ((const struct rte_flow_action_queue *)
2246 			     masks->conf)->index) {
2247 				acts->tir = flow_hw_tir_action_register
2248 				(dev,
2249 				 mlx5_hw_act_flag[!!attr->group][type],
2250 				 actions);
2251 				if (!acts->tir)
2252 					goto err;
2253 				acts->rule_acts[dr_pos].action =
2254 					acts->tir->action;
2255 			} else if (__flow_hw_act_data_general_append
2256 					(priv, acts, actions->type,
2257 					 src_pos, dr_pos)) {
2258 				goto err;
2259 			}
2260 			break;
2261 		case RTE_FLOW_ACTION_TYPE_RSS:
2262 			if (actions->conf && masks->conf) {
2263 				acts->tir = flow_hw_tir_action_register
2264 				(dev,
2265 				 mlx5_hw_act_flag[!!attr->group][type],
2266 				 actions);
2267 				if (!acts->tir)
2268 					goto err;
2269 				acts->rule_acts[dr_pos].action =
2270 					acts->tir->action;
2271 			} else if (__flow_hw_act_data_general_append
2272 					(priv, acts, actions->type,
2273 					 src_pos, dr_pos)) {
2274 				goto err;
2275 			}
2276 			break;
2277 		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
2278 			MLX5_ASSERT(!reformat_used);
2279 			enc_item = MLX5_CONST_ENCAP_ITEM(rte_flow_action_vxlan_encap,
2280 							 actions->conf);
2281 			if (masks->conf)
2282 				enc_item_m = MLX5_CONST_ENCAP_ITEM(rte_flow_action_vxlan_encap,
2283 								   masks->conf);
2284 			reformat_used = true;
2285 			reformat_src = src_pos;
2286 			refmt_type = MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
2287 			break;
2288 		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
2289 			MLX5_ASSERT(!reformat_used);
2290 			enc_item = MLX5_CONST_ENCAP_ITEM(rte_flow_action_nvgre_encap,
2291 							 actions->conf);
2292 			if (masks->conf)
2293 				enc_item_m = MLX5_CONST_ENCAP_ITEM(rte_flow_action_nvgre_encap,
2294 								   masks->conf);
2295 			reformat_used = true;
2296 			reformat_src = src_pos;
2297 			refmt_type = MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
2298 			break;
2299 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
2300 			raw_encap_data =
2301 				(const struct rte_flow_action_raw_encap *)
2302 				 masks->conf;
2303 			if (raw_encap_data)
2304 				encap_data_m = raw_encap_data->data;
2305 			raw_encap_data =
2306 				(const struct rte_flow_action_raw_encap *)
2307 				 actions->conf;
2308 			encap_data = raw_encap_data->data;
2309 			data_size = raw_encap_data->size;
2310 			if (reformat_used) {
2311 				refmt_type = data_size <
2312 				MLX5_ENCAPSULATION_DECISION_SIZE ?
2313 				MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2 :
2314 				MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3;
2315 			} else {
2316 				reformat_used = true;
2317 				refmt_type =
2318 				MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
2319 			}
2320 			reformat_src = src_pos;
2321 			break;
2322 		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
2323 		case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
2324 			MLX5_ASSERT(!reformat_used);
2325 			reformat_used = true;
2326 			refmt_type = MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2;
2327 			break;
2328 		case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
2329 			reformat_used = true;
2330 			refmt_type = MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2;
2331 			break;
2332 		case RTE_FLOW_ACTION_TYPE_IPV6_EXT_PUSH:
2333 			if (!hca_attr->query_match_sample_info || !hca_attr->parse_graph_anchor ||
2334 			    !priv->sh->srh_flex_parser.flex.mapnum) {
2335 				DRV_LOG(ERR, "SRv6 anchor is not supported.");
2336 				goto err;
2337 			}
2338 			MLX5_ASSERT(!recom_used && !recom_type);
2339 			recom_used = true;
2340 			recom_type = MLX5DR_ACTION_TYP_PUSH_IPV6_ROUTE_EXT;
2341 			ipv6_ext_data =
2342 				(const struct rte_flow_action_ipv6_ext_push *)masks->conf;
2343 			if (ipv6_ext_data)
2344 				push_data_m = ipv6_ext_data->data;
2345 			ipv6_ext_data =
2346 				(const struct rte_flow_action_ipv6_ext_push *)actions->conf;
2347 			if (ipv6_ext_data) {
2348 				push_data = ipv6_ext_data->data;
2349 				push_size = ipv6_ext_data->size;
2350 			}
2351 			recom_src = src_pos;
2352 			break;
2353 		case RTE_FLOW_ACTION_TYPE_IPV6_EXT_REMOVE:
2354 			if (!hca_attr->query_match_sample_info || !hca_attr->parse_graph_anchor ||
2355 			    !priv->sh->srh_flex_parser.flex.mapnum) {
2356 				DRV_LOG(ERR, "SRv6 anchor is not supported.");
2357 				goto err;
2358 			}
2359 			recom_used = true;
2360 			recom_type = MLX5DR_ACTION_TYP_POP_IPV6_ROUTE_EXT;
2361 			break;
2362 		case RTE_FLOW_ACTION_TYPE_SEND_TO_KERNEL:
2363 			flow_hw_translate_group(dev, cfg, attr->group,
2364 						&target_grp, error);
2365 			if (target_grp == 0) {
2366 				__flow_hw_action_template_destroy(dev, acts);
2367 				return rte_flow_error_set(error, ENOTSUP,
2368 						RTE_FLOW_ERROR_TYPE_ACTION,
2369 						NULL,
2370 						"Send to kernel action on root table is not supported in HW steering mode");
2371 			}
2372 			table_type = attr->ingress ? MLX5DR_TABLE_TYPE_NIC_RX :
2373 				     ((attr->egress) ? MLX5DR_TABLE_TYPE_NIC_TX :
2374 				      MLX5DR_TABLE_TYPE_FDB);
2375 			acts->rule_acts[dr_pos].action = priv->hw_send_to_kernel[table_type];
2376 			break;
2377 		case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
2378 			err = flow_hw_modify_field_compile(dev, attr, actions,
2379 							   masks, acts, &mhdr,
2380 							   src_pos, error);
2381 			if (err)
2382 				goto err;
2383 			break;
2384 		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
2385 			if (flow_hw_represented_port_compile
2386 					(dev, attr, actions,
2387 					 masks, acts, src_pos, dr_pos, error))
2388 				goto err;
2389 			break;
2390 		case RTE_FLOW_ACTION_TYPE_METER:
2391 			/*
2392 			 * METER action is compiled to 2 DR actions - ASO_METER and FT.
2393 			 * Calculated DR offset is stored only for ASO_METER and FT
2394 			 * is assumed to be the next action.
2395 			 */
2396 			jump_pos = dr_pos + 1;
2397 			if (actions->conf && masks->conf &&
2398 			    ((const struct rte_flow_action_meter *)
2399 			     masks->conf)->mtr_id) {
2400 				err = flow_hw_meter_compile(dev, cfg,
2401 							    dr_pos, jump_pos, actions, acts, error);
2402 				if (err)
2403 					goto err;
2404 			} else if (__flow_hw_act_data_general_append(priv, acts,
2405 								     actions->type,
2406 								     src_pos,
2407 								     dr_pos))
2408 				goto err;
2409 			break;
2410 		case RTE_FLOW_ACTION_TYPE_AGE:
2411 			flow_hw_translate_group(dev, cfg, attr->group,
2412 						&target_grp, error);
2413 			if (target_grp == 0) {
2414 				__flow_hw_action_template_destroy(dev, acts);
2415 				return rte_flow_error_set(error, ENOTSUP,
2416 						RTE_FLOW_ERROR_TYPE_ACTION,
2417 						NULL,
2418 						"Age action on root table is not supported in HW steering mode");
2419 			}
2420 			if (__flow_hw_act_data_general_append(priv, acts,
2421 							      actions->type,
2422 							      src_pos,
2423 							      dr_pos))
2424 				goto err;
2425 			break;
2426 		case RTE_FLOW_ACTION_TYPE_COUNT:
2427 			flow_hw_translate_group(dev, cfg, attr->group,
2428 						&target_grp, error);
2429 			if (target_grp == 0) {
2430 				__flow_hw_action_template_destroy(dev, acts);
2431 				return rte_flow_error_set(error, ENOTSUP,
2432 						RTE_FLOW_ERROR_TYPE_ACTION,
2433 						NULL,
2434 						"Counter action on root table is not supported in HW steering mode");
2435 			}
2436 			if ((at->action_flags & MLX5_FLOW_ACTION_AGE) ||
2437 			    (at->action_flags & MLX5_FLOW_ACTION_INDIRECT_AGE))
2438 				/*
2439 				 * When both COUNT and AGE are requested, it is
2440 				 * saved as AGE action which creates also the
2441 				 * counter.
2442 				 */
2443 				break;
2444 			if (masks->conf &&
2445 			    ((const struct rte_flow_action_count *)
2446 			     masks->conf)->id) {
2447 				err = flow_hw_cnt_compile(dev, dr_pos, acts);
2448 				if (err)
2449 					goto err;
2450 			} else if (__flow_hw_act_data_general_append
2451 					(priv, acts, actions->type,
2452 					 src_pos, dr_pos)) {
2453 				goto err;
2454 			}
2455 			break;
2456 		case RTE_FLOW_ACTION_TYPE_CONNTRACK:
2457 			if (masks->conf) {
2458 				ct_idx = MLX5_ACTION_CTX_CT_GET_IDX
2459 					 ((uint32_t)(uintptr_t)actions->conf);
2460 				if (flow_hw_ct_compile(dev, MLX5_HW_INV_QUEUE, ct_idx,
2461 						       &acts->rule_acts[dr_pos]))
2462 					goto err;
2463 			} else if (__flow_hw_act_data_general_append
2464 					(priv, acts, actions->type,
2465 					 src_pos, dr_pos)) {
2466 				goto err;
2467 			}
2468 			break;
2469 		case RTE_FLOW_ACTION_TYPE_METER_MARK:
2470 			if (actions->conf && masks->conf &&
2471 			    ((const struct rte_flow_action_meter_mark *)
2472 			     masks->conf)->profile) {
2473 				err = flow_hw_meter_mark_compile(dev,
2474 								 dr_pos, actions,
2475 								 acts->rule_acts,
2476 								 &acts->mtr_id,
2477 								 MLX5_HW_INV_QUEUE);
2478 				if (err)
2479 					goto err;
2480 			} else if (__flow_hw_act_data_general_append(priv, acts,
2481 								     actions->type,
2482 								     src_pos,
2483 								     dr_pos))
2484 				goto err;
2485 			break;
2486 		case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
2487 			/* Internal, can be skipped. */
2488 			if (!!attr->group) {
2489 				DRV_LOG(ERR, "DEFAULT MISS action is only"
2490 					" supported in root table.");
2491 				goto err;
2492 			}
2493 			acts->rule_acts[dr_pos].action = priv->hw_def_miss;
2494 			break;
2495 		case RTE_FLOW_ACTION_TYPE_END:
2496 			actions_end = true;
2497 			break;
2498 		default:
2499 			break;
2500 		}
2501 	}
2502 	if (mhdr.pos != UINT16_MAX) {
2503 		ret = mlx5_tbl_translate_modify_header(dev, cfg, acts, mp_ctx,
2504 						       &mhdr, error);
2505 		if (ret)
2506 			goto err;
2507 	}
2508 	if (reformat_used) {
2509 		ret = mlx5_tbl_translate_reformat(priv, table_attr, acts, at,
2510 						  enc_item, enc_item_m,
2511 						  encap_data, encap_data_m,
2512 						  mp_ctx, data_size,
2513 						  reformat_src,
2514 						  refmt_type, error);
2515 		if (ret)
2516 			goto err;
2517 	}
2518 	if (recom_used) {
2519 		MLX5_ASSERT(at->recom_off != UINT16_MAX);
2520 		ret = mlx5_create_ipv6_ext_reformat(dev, cfg, acts, at, push_data,
2521 						    push_data_m, push_size, recom_src,
2522 						    recom_type);
2523 		if (ret)
2524 			goto err;
2525 	}
2526 	return 0;
2527 err:
2528 	err = rte_errno;
2529 	__flow_hw_action_template_destroy(dev, acts);
2530 	return rte_flow_error_set(error, err,
2531 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2532 				  "fail to create rte table");
2533 }
2534 
2535 /**
2536  * Translate rte_flow actions to DR action.
2537  *
2538  * @param[in] dev
2539  *   Pointer to the rte_eth_dev structure.
2540  * @param[in] tbl
2541  *   Pointer to the flow template table.
2542  * @param[out] error
2543  *   Pointer to error structure.
2544  *
2545  * @return
2546  *    0 on success, negative value otherwise and rte_errno is set.
2547  */
2548 static int
2549 flow_hw_actions_translate(struct rte_eth_dev *dev,
2550 			  struct rte_flow_template_table *tbl,
2551 			  struct rte_flow_error *error)
2552 {
2553 	int ret;
2554 	uint32_t i;
2555 	struct mlx5_tbl_multi_pattern_ctx mpat = MLX5_EMPTY_MULTI_PATTERN_CTX;
2556 
2557 	for (i = 0; i < tbl->nb_action_templates; i++) {
2558 		if (__flow_hw_actions_translate(dev, &tbl->cfg,
2559 						&tbl->ats[i].acts,
2560 						tbl->ats[i].action_template,
2561 						&mpat, error))
2562 			goto err;
2563 	}
2564 	ret = mlx5_tbl_multi_pattern_process(dev, tbl, &mpat, error);
2565 	if (ret)
2566 		goto err;
2567 	return 0;
2568 err:
2569 	while (i--)
2570 		__flow_hw_action_template_destroy(dev, &tbl->ats[i].acts);
2571 	return -1;
2572 }
2573 
2574 /**
2575  * Get shared indirect action.
2576  *
2577  * @param[in] dev
2578  *   Pointer to the rte_eth_dev data structure.
2579  * @param[in] act_data
2580  *   Pointer to the recorded action construct data.
2581  * @param[in] item_flags
2582  *   The matcher itme_flags used for RSS lookup.
2583  * @param[in] rule_act
2584  *   Pointer to the shared action's destination rule DR action.
2585  *
2586  * @return
2587  *    0 on success, negative value otherwise and rte_errno is set.
2588  */
2589 static __rte_always_inline int
2590 flow_hw_shared_action_get(struct rte_eth_dev *dev,
2591 			  struct mlx5_action_construct_data *act_data,
2592 			  const uint64_t item_flags,
2593 			  struct mlx5dr_rule_action *rule_act)
2594 {
2595 	struct mlx5_priv *priv = dev->data->dev_private;
2596 	struct mlx5_flow_rss_desc rss_desc = { 0 };
2597 	uint64_t hash_fields = 0;
2598 	uint32_t hrxq_idx = 0;
2599 	struct mlx5_hrxq *hrxq = NULL;
2600 	int act_type = act_data->type;
2601 
2602 	switch (act_type) {
2603 	case MLX5_RTE_FLOW_ACTION_TYPE_RSS:
2604 		rss_desc.level = act_data->shared_rss.level;
2605 		rss_desc.types = act_data->shared_rss.types;
2606 		rss_desc.symmetric_hash_function = act_data->shared_rss.symmetric_hash_function;
2607 		flow_dv_hashfields_set(item_flags, &rss_desc, &hash_fields);
2608 		hrxq_idx = flow_dv_action_rss_hrxq_lookup
2609 			(dev, act_data->shared_rss.idx, hash_fields);
2610 		if (hrxq_idx)
2611 			hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
2612 					      hrxq_idx);
2613 		if (hrxq) {
2614 			rule_act->action = hrxq->action;
2615 			return 0;
2616 		}
2617 		break;
2618 	default:
2619 		DRV_LOG(WARNING, "Unsupported shared action type:%d",
2620 			act_data->type);
2621 		break;
2622 	}
2623 	return -1;
2624 }
2625 
2626 static void
2627 flow_hw_construct_quota(struct mlx5_priv *priv,
2628 			struct mlx5dr_rule_action *rule_act, uint32_t qid)
2629 {
2630 	rule_act->action = priv->quota_ctx.dr_action;
2631 	rule_act->aso_meter.offset = qid - 1;
2632 	rule_act->aso_meter.init_color =
2633 		MLX5DR_ACTION_ASO_METER_COLOR_GREEN;
2634 }
2635 
2636 /**
2637  * Construct shared indirect action.
2638  *
2639  * @param[in] dev
2640  *   Pointer to the rte_eth_dev data structure.
2641  * @param[in] queue
2642  *   The flow creation queue index.
2643  * @param[in] action
2644  *   Pointer to the shared indirect rte_flow action.
2645  * @param[in] table
2646  *   Pointer to the flow table.
2647  * @param[in] it_idx
2648  *   Item template index the action template refer to.
2649  * @param[in] action_flags
2650  *   Actions bit-map detected in this template.
2651  * @param[in, out] flow
2652  *   Pointer to the flow containing the counter.
2653  * @param[in] rule_act
2654  *   Pointer to the shared action's destination rule DR action.
2655  *
2656  * @return
2657  *    0 on success, negative value otherwise and rte_errno is set.
2658  */
2659 static __rte_always_inline int
2660 flow_hw_shared_action_construct(struct rte_eth_dev *dev, uint32_t queue,
2661 				const struct rte_flow_action *action,
2662 				struct rte_flow_template_table *table,
2663 				const uint8_t it_idx, uint64_t action_flags,
2664 				struct rte_flow_hw *flow,
2665 				struct mlx5dr_rule_action *rule_act)
2666 {
2667 	struct mlx5_priv *priv = dev->data->dev_private;
2668 	struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
2669 	struct mlx5_action_construct_data act_data;
2670 	struct mlx5_shared_action_rss *shared_rss;
2671 	struct mlx5_aso_mtr *aso_mtr;
2672 	struct mlx5_age_info *age_info;
2673 	struct mlx5_hws_age_param *param;
2674 	uint32_t act_idx = (uint32_t)(uintptr_t)action->conf;
2675 	uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
2676 	uint32_t idx = act_idx &
2677 		       ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
2678 	uint64_t item_flags;
2679 	cnt_id_t age_cnt;
2680 
2681 	memset(&act_data, 0, sizeof(act_data));
2682 	switch (type) {
2683 	case MLX5_INDIRECT_ACTION_TYPE_RSS:
2684 		act_data.type = MLX5_RTE_FLOW_ACTION_TYPE_RSS;
2685 		shared_rss = mlx5_ipool_get
2686 			(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
2687 		if (!shared_rss)
2688 			return -1;
2689 		act_data.shared_rss.idx = idx;
2690 		act_data.shared_rss.level = shared_rss->origin.level;
2691 		act_data.shared_rss.types = !shared_rss->origin.types ?
2692 					    RTE_ETH_RSS_IP :
2693 					    shared_rss->origin.types;
2694 		act_data.shared_rss.symmetric_hash_function =
2695 			MLX5_RSS_IS_SYMM(shared_rss->origin.func);
2696 
2697 		item_flags = table->its[it_idx]->item_flags;
2698 		if (flow_hw_shared_action_get
2699 				(dev, &act_data, item_flags, rule_act))
2700 			return -1;
2701 		break;
2702 	case MLX5_INDIRECT_ACTION_TYPE_COUNT:
2703 		if (mlx5_hws_cnt_pool_get_action_offset(priv->hws_cpool,
2704 				act_idx,
2705 				&rule_act->action,
2706 				&rule_act->counter.offset))
2707 			return -1;
2708 		flow->cnt_id = act_idx;
2709 		break;
2710 	case MLX5_INDIRECT_ACTION_TYPE_AGE:
2711 		/*
2712 		 * Save the index with the indirect type, to recognize
2713 		 * it in flow destroy.
2714 		 */
2715 		flow->age_idx = act_idx;
2716 		if (action_flags & MLX5_FLOW_ACTION_INDIRECT_COUNT)
2717 			/*
2718 			 * The mutual update for idirect AGE & COUNT will be
2719 			 * performed later after we have ID for both of them.
2720 			 */
2721 			break;
2722 		age_info = GET_PORT_AGE_INFO(priv);
2723 		param = mlx5_ipool_get(age_info->ages_ipool, idx);
2724 		if (param == NULL)
2725 			return -1;
2726 		if (action_flags & MLX5_FLOW_ACTION_COUNT) {
2727 			if (mlx5_hws_cnt_pool_get(priv->hws_cpool,
2728 						  &param->queue_id, &age_cnt,
2729 						  idx) < 0)
2730 				return -1;
2731 			flow->cnt_id = age_cnt;
2732 			param->nb_cnts++;
2733 		} else {
2734 			/*
2735 			 * Get the counter of this indirect AGE or create one
2736 			 * if doesn't exist.
2737 			 */
2738 			age_cnt = mlx5_hws_age_cnt_get(priv, param, idx);
2739 			if (age_cnt == 0)
2740 				return -1;
2741 		}
2742 		if (mlx5_hws_cnt_pool_get_action_offset(priv->hws_cpool,
2743 						     age_cnt, &rule_act->action,
2744 						     &rule_act->counter.offset))
2745 			return -1;
2746 		break;
2747 	case MLX5_INDIRECT_ACTION_TYPE_CT:
2748 		if (flow_hw_ct_compile(dev, queue, idx, rule_act))
2749 			return -1;
2750 		break;
2751 	case MLX5_INDIRECT_ACTION_TYPE_METER_MARK:
2752 		/* Find ASO object. */
2753 		aso_mtr = mlx5_ipool_get(pool->idx_pool, idx);
2754 		if (!aso_mtr)
2755 			return -1;
2756 		rule_act->action = pool->action;
2757 		rule_act->aso_meter.offset = aso_mtr->offset;
2758 		break;
2759 	case MLX5_INDIRECT_ACTION_TYPE_QUOTA:
2760 		flow_hw_construct_quota(priv, rule_act, idx);
2761 		break;
2762 	default:
2763 		DRV_LOG(WARNING, "Unsupported shared action type:%d", type);
2764 		break;
2765 	}
2766 	return 0;
2767 }
2768 
2769 static __rte_always_inline int
2770 flow_hw_mhdr_cmd_is_nop(const struct mlx5_modification_cmd *cmd)
2771 {
2772 	struct mlx5_modification_cmd cmd_he = {
2773 		.data0 = rte_be_to_cpu_32(cmd->data0),
2774 		.data1 = 0,
2775 	};
2776 
2777 	return cmd_he.action_type == MLX5_MODIFICATION_TYPE_NOP;
2778 }
2779 
2780 /**
2781  * Construct flow action array.
2782  *
2783  * For action template contains dynamic actions, these actions need to
2784  * be updated according to the rte_flow action during flow creation.
2785  *
2786  * @param[in] dev
2787  *   Pointer to the rte_eth_dev structure.
2788  * @param[in] job
2789  *   Pointer to job descriptor.
2790  * @param[in] hw_acts
2791  *   Pointer to translated actions from template.
2792  * @param[in] it_idx
2793  *   Item template index the action template refer to.
2794  * @param[in] actions
2795  *   Array of rte_flow action need to be checked.
2796  * @param[in] rule_acts
2797  *   Array of DR rule actions to be used during flow creation..
2798  * @param[in] acts_num
2799  *   Pointer to the real acts_num flow has.
2800  *
2801  * @return
2802  *    0 on success, negative value otherwise and rte_errno is set.
2803  */
2804 static __rte_always_inline int
2805 flow_hw_modify_field_construct(struct mlx5_hw_q_job *job,
2806 			       struct mlx5_action_construct_data *act_data,
2807 			       const struct mlx5_hw_actions *hw_acts,
2808 			       const struct rte_flow_action *action)
2809 {
2810 	const struct rte_flow_action_modify_field *mhdr_action = action->conf;
2811 	uint8_t values[16] = { 0 };
2812 	unaligned_uint32_t *value_p;
2813 	uint32_t i;
2814 	struct field_modify_info *field;
2815 
2816 	if (!hw_acts->mhdr)
2817 		return -1;
2818 	if (hw_acts->mhdr->shared || act_data->modify_header.shared)
2819 		return 0;
2820 	MLX5_ASSERT(mhdr_action->operation == RTE_FLOW_MODIFY_SET ||
2821 		    mhdr_action->operation == RTE_FLOW_MODIFY_ADD);
2822 	if (mhdr_action->src.field != RTE_FLOW_FIELD_VALUE &&
2823 	    mhdr_action->src.field != RTE_FLOW_FIELD_POINTER)
2824 		return 0;
2825 	if (mhdr_action->src.field == RTE_FLOW_FIELD_VALUE)
2826 		rte_memcpy(values, &mhdr_action->src.value, sizeof(values));
2827 	else
2828 		rte_memcpy(values, mhdr_action->src.pvalue, sizeof(values));
2829 	if (mhdr_action->dst.field == RTE_FLOW_FIELD_META ||
2830 	    mhdr_action->dst.field == RTE_FLOW_FIELD_TAG ||
2831 	    mhdr_action->dst.field == RTE_FLOW_FIELD_METER_COLOR ||
2832 	    mhdr_action->dst.field == (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG) {
2833 		uint8_t tag_index = flow_tag_index_get(&mhdr_action->dst);
2834 
2835 		value_p = (unaligned_uint32_t *)values;
2836 		if (mhdr_action->dst.field == RTE_FLOW_FIELD_TAG &&
2837 		    tag_index == RTE_PMD_MLX5_LINEAR_HASH_TAG_INDEX)
2838 			*value_p = rte_cpu_to_be_32(*value_p << 16);
2839 		else
2840 			*value_p = rte_cpu_to_be_32(*value_p);
2841 	} else if (mhdr_action->dst.field == RTE_FLOW_FIELD_GTP_PSC_QFI ||
2842 		   mhdr_action->dst.field == RTE_FLOW_FIELD_GENEVE_OPT_TYPE) {
2843 		uint32_t tmp;
2844 
2845 		/*
2846 		 * Both QFI and Geneve option type are passed as an uint8_t integer,
2847 		 * but it is accessed through a 2nd least significant byte of a 32-bit
2848 		 * field in modify header command.
2849 		 */
2850 		tmp = values[0];
2851 		value_p = (unaligned_uint32_t *)values;
2852 		*value_p = rte_cpu_to_be_32(tmp << 8);
2853 	}
2854 	i = act_data->modify_header.mhdr_cmds_off;
2855 	field = act_data->modify_header.field;
2856 	do {
2857 		uint32_t off_b;
2858 		uint32_t mask;
2859 		uint32_t data;
2860 		const uint8_t *mask_src;
2861 
2862 		if (i >= act_data->modify_header.mhdr_cmds_end)
2863 			return -1;
2864 		if (flow_hw_mhdr_cmd_is_nop(&job->mhdr_cmd[i])) {
2865 			++i;
2866 			continue;
2867 		}
2868 		mask_src = (const uint8_t *)act_data->modify_header.mask;
2869 		mask = flow_dv_fetch_field(mask_src + field->offset, field->size);
2870 		if (!mask) {
2871 			++field;
2872 			continue;
2873 		}
2874 		off_b = rte_bsf32(mask);
2875 		data = flow_dv_fetch_field(values + field->offset, field->size);
2876 		/*
2877 		 * IPv6 DSCP uses OUT_IPV6_TRAFFIC_CLASS as ID but it starts from 2
2878 		 * bits left. Shift the data left for IPv6 DSCP
2879 		 */
2880 		if (field->id == MLX5_MODI_OUT_IPV6_TRAFFIC_CLASS &&
2881 		    !(mask & MLX5_IPV6_HDR_ECN_MASK))
2882 			data <<= MLX5_IPV6_HDR_DSCP_SHIFT;
2883 		data = (data & mask) >> off_b;
2884 		job->mhdr_cmd[i++].data1 = rte_cpu_to_be_32(data);
2885 		++field;
2886 	} while (field->size);
2887 	return 0;
2888 }
2889 
2890 /**
2891  * Construct flow action array.
2892  *
2893  * For action template contains dynamic actions, these actions need to
2894  * be updated according to the rte_flow action during flow creation.
2895  *
2896  * @param[in] dev
2897  *   Pointer to the rte_eth_dev structure.
2898  * @param[in] job
2899  *   Pointer to job descriptor.
2900  * @param[in] hw_acts
2901  *   Pointer to translated actions from template.
2902  * @param[in] it_idx
2903  *   Item template index the action template refer to.
2904  * @param[in] actions
2905  *   Array of rte_flow action need to be checked.
2906  * @param[in] rule_acts
2907  *   Array of DR rule actions to be used during flow creation..
2908  * @param[in] acts_num
2909  *   Pointer to the real acts_num flow has.
2910  *
2911  * @return
2912  *    0 on success, negative value otherwise and rte_errno is set.
2913  */
2914 static __rte_always_inline int
2915 flow_hw_actions_construct(struct rte_eth_dev *dev,
2916 			  struct mlx5_hw_q_job *job,
2917 			  const struct mlx5_hw_action_template *hw_at,
2918 			  const uint8_t it_idx,
2919 			  const struct rte_flow_action actions[],
2920 			  struct mlx5dr_rule_action *rule_acts,
2921 			  uint32_t queue,
2922 			  struct rte_flow_error *error)
2923 {
2924 	struct mlx5_priv *priv = dev->data->dev_private;
2925 	struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
2926 	struct rte_flow_template_table *table = job->flow->table;
2927 	struct mlx5_action_construct_data *act_data;
2928 	const struct rte_flow_actions_template *at = hw_at->action_template;
2929 	const struct mlx5_hw_actions *hw_acts = &hw_at->acts;
2930 	const struct rte_flow_action *action;
2931 	const struct rte_flow_action_raw_encap *raw_encap_data;
2932 	const struct rte_flow_action_ipv6_ext_push *ipv6_push;
2933 	const struct rte_flow_item *enc_item = NULL;
2934 	const struct rte_flow_action_ethdev *port_action = NULL;
2935 	const struct rte_flow_action_meter *meter = NULL;
2936 	const struct rte_flow_action_age *age = NULL;
2937 	uint8_t *buf = job->encap_data;
2938 	uint8_t *push_buf = job->push_data;
2939 	struct rte_flow_attr attr = {
2940 			.ingress = 1,
2941 	};
2942 	uint32_t ft_flag;
2943 	size_t encap_len = 0;
2944 	int ret;
2945 	uint32_t age_idx = 0;
2946 	struct mlx5_aso_mtr *aso_mtr;
2947 
2948 	rte_memcpy(rule_acts, hw_acts->rule_acts, sizeof(*rule_acts) * at->dr_actions_num);
2949 	attr.group = table->grp->group_id;
2950 	ft_flag = mlx5_hw_act_flag[!!table->grp->group_id][table->type];
2951 	if (table->type == MLX5DR_TABLE_TYPE_FDB) {
2952 		attr.transfer = 1;
2953 		attr.ingress = 1;
2954 	} else if (table->type == MLX5DR_TABLE_TYPE_NIC_TX) {
2955 		attr.egress = 1;
2956 		attr.ingress = 0;
2957 	} else {
2958 		attr.ingress = 1;
2959 	}
2960 	if (hw_acts->mhdr && hw_acts->mhdr->mhdr_cmds_num > 0) {
2961 		uint16_t pos = hw_acts->mhdr->pos;
2962 
2963 		if (!hw_acts->mhdr->shared) {
2964 			rule_acts[pos].modify_header.offset =
2965 						job->flow->res_idx - 1;
2966 			rule_acts[pos].modify_header.data =
2967 						(uint8_t *)job->mhdr_cmd;
2968 			rte_memcpy(job->mhdr_cmd, hw_acts->mhdr->mhdr_cmds,
2969 				   sizeof(*job->mhdr_cmd) * hw_acts->mhdr->mhdr_cmds_num);
2970 		}
2971 	}
2972 	LIST_FOREACH(act_data, &hw_acts->act_list, next) {
2973 		uint32_t jump_group;
2974 		uint32_t tag;
2975 		uint64_t item_flags;
2976 		struct mlx5_hw_jump_action *jump;
2977 		struct mlx5_hrxq *hrxq;
2978 		uint32_t ct_idx;
2979 		cnt_id_t cnt_id;
2980 		uint32_t *cnt_queue;
2981 		uint32_t mtr_id;
2982 
2983 		action = &actions[act_data->action_src];
2984 		/*
2985 		 * action template construction replaces
2986 		 * OF_SET_VLAN_VID with MODIFY_FIELD
2987 		 */
2988 		if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)
2989 			MLX5_ASSERT(act_data->type ==
2990 				    RTE_FLOW_ACTION_TYPE_MODIFY_FIELD);
2991 		else
2992 			MLX5_ASSERT(action->type ==
2993 				    RTE_FLOW_ACTION_TYPE_INDIRECT ||
2994 				    (int)action->type == act_data->type);
2995 		switch ((int)act_data->type) {
2996 		case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
2997 			act_data->indirect_list_cb(dev, act_data, actions,
2998 						   &rule_acts[act_data->action_dst]);
2999 			break;
3000 		case RTE_FLOW_ACTION_TYPE_INDIRECT:
3001 			if (flow_hw_shared_action_construct
3002 					(dev, queue, action, table, it_idx,
3003 					 at->action_flags, job->flow,
3004 					 &rule_acts[act_data->action_dst]))
3005 				return -1;
3006 			break;
3007 		case RTE_FLOW_ACTION_TYPE_VOID:
3008 			break;
3009 		case RTE_FLOW_ACTION_TYPE_MARK:
3010 			tag = mlx5_flow_mark_set
3011 			      (((const struct rte_flow_action_mark *)
3012 			      (action->conf))->id);
3013 			rule_acts[act_data->action_dst].tag.value = tag;
3014 			break;
3015 		case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
3016 			rule_acts[act_data->action_dst].push_vlan.vlan_hdr =
3017 				vlan_hdr_to_be32(action);
3018 			break;
3019 		case RTE_FLOW_ACTION_TYPE_JUMP:
3020 			jump_group = ((const struct rte_flow_action_jump *)
3021 						action->conf)->group;
3022 			jump = flow_hw_jump_action_register
3023 				(dev, &table->cfg, jump_group, NULL);
3024 			if (!jump)
3025 				return -1;
3026 			rule_acts[act_data->action_dst].action =
3027 			(!!attr.group) ? jump->hws_action : jump->root_action;
3028 			job->flow->jump = jump;
3029 			job->flow->fate_type = MLX5_FLOW_FATE_JUMP;
3030 			break;
3031 		case RTE_FLOW_ACTION_TYPE_RSS:
3032 		case RTE_FLOW_ACTION_TYPE_QUEUE:
3033 			hrxq = flow_hw_tir_action_register(dev,
3034 					ft_flag,
3035 					action);
3036 			if (!hrxq)
3037 				return -1;
3038 			rule_acts[act_data->action_dst].action = hrxq->action;
3039 			job->flow->hrxq = hrxq;
3040 			job->flow->fate_type = MLX5_FLOW_FATE_QUEUE;
3041 			break;
3042 		case MLX5_RTE_FLOW_ACTION_TYPE_RSS:
3043 			item_flags = table->its[it_idx]->item_flags;
3044 			if (flow_hw_shared_action_get
3045 				(dev, act_data, item_flags,
3046 				 &rule_acts[act_data->action_dst]))
3047 				return -1;
3048 			break;
3049 		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3050 			enc_item = ((const struct rte_flow_action_vxlan_encap *)
3051 				   action->conf)->definition;
3052 			if (flow_dv_convert_encap_data(enc_item, buf, &encap_len, NULL))
3053 				return -1;
3054 			break;
3055 		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
3056 			enc_item = ((const struct rte_flow_action_nvgre_encap *)
3057 				   action->conf)->definition;
3058 			if (flow_dv_convert_encap_data(enc_item, buf, &encap_len, NULL))
3059 				return -1;
3060 			break;
3061 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
3062 			raw_encap_data =
3063 				(const struct rte_flow_action_raw_encap *)
3064 				 action->conf;
3065 			rte_memcpy((void *)buf, raw_encap_data->data, act_data->encap.len);
3066 			MLX5_ASSERT(raw_encap_data->size ==
3067 				    act_data->encap.len);
3068 			break;
3069 		case RTE_FLOW_ACTION_TYPE_IPV6_EXT_PUSH:
3070 			ipv6_push =
3071 				(const struct rte_flow_action_ipv6_ext_push *)action->conf;
3072 			rte_memcpy((void *)push_buf, ipv6_push->data,
3073 				   act_data->ipv6_ext.len);
3074 			MLX5_ASSERT(ipv6_push->size == act_data->ipv6_ext.len);
3075 			break;
3076 		case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
3077 			if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)
3078 				ret = flow_hw_set_vlan_vid_construct(dev, job,
3079 								     act_data,
3080 								     hw_acts,
3081 								     action);
3082 			else
3083 				ret = flow_hw_modify_field_construct(job,
3084 								     act_data,
3085 								     hw_acts,
3086 								     action);
3087 			if (ret)
3088 				return -1;
3089 			break;
3090 		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
3091 			port_action = action->conf;
3092 			if (!priv->hw_vport[port_action->port_id])
3093 				return -1;
3094 			rule_acts[act_data->action_dst].action =
3095 					priv->hw_vport[port_action->port_id];
3096 			break;
3097 		case RTE_FLOW_ACTION_TYPE_QUOTA:
3098 			flow_hw_construct_quota(priv,
3099 						rule_acts + act_data->action_dst,
3100 						act_data->shared_meter.id);
3101 			break;
3102 		case RTE_FLOW_ACTION_TYPE_METER:
3103 			meter = action->conf;
3104 			mtr_id = meter->mtr_id;
3105 			aso_mtr = mlx5_aso_meter_by_idx(priv, mtr_id);
3106 			rule_acts[act_data->action_dst].action =
3107 				priv->mtr_bulk.action;
3108 			rule_acts[act_data->action_dst].aso_meter.offset =
3109 								aso_mtr->offset;
3110 			jump = flow_hw_jump_action_register
3111 				(dev, &table->cfg, aso_mtr->fm.group, NULL);
3112 			if (!jump)
3113 				return -1;
3114 			MLX5_ASSERT
3115 				(!rule_acts[act_data->action_dst + 1].action);
3116 			rule_acts[act_data->action_dst + 1].action =
3117 					(!!attr.group) ? jump->hws_action :
3118 							 jump->root_action;
3119 			job->flow->jump = jump;
3120 			job->flow->fate_type = MLX5_FLOW_FATE_JUMP;
3121 			if (mlx5_aso_mtr_wait(priv->sh, MLX5_HW_INV_QUEUE, aso_mtr))
3122 				return -1;
3123 			break;
3124 		case RTE_FLOW_ACTION_TYPE_AGE:
3125 			age = action->conf;
3126 			/*
3127 			 * First, create the AGE parameter, then create its
3128 			 * counter later:
3129 			 * Regular counter - in next case.
3130 			 * Indirect counter - update it after the loop.
3131 			 */
3132 			age_idx = mlx5_hws_age_action_create(priv, queue, 0,
3133 							     age,
3134 							     job->flow->res_idx,
3135 							     error);
3136 			if (age_idx == 0)
3137 				return -rte_errno;
3138 			job->flow->age_idx = age_idx;
3139 			if (at->action_flags & MLX5_FLOW_ACTION_INDIRECT_COUNT)
3140 				/*
3141 				 * When AGE uses indirect counter, no need to
3142 				 * create counter but need to update it with the
3143 				 * AGE parameter, will be done after the loop.
3144 				 */
3145 				break;
3146 			/* Fall-through. */
3147 		case RTE_FLOW_ACTION_TYPE_COUNT:
3148 			/* If the port is engaged in resource sharing, do not use queue cache. */
3149 			cnt_queue = mlx5_hws_cnt_is_pool_shared(priv) ? NULL : &queue;
3150 			ret = mlx5_hws_cnt_pool_get(priv->hws_cpool, cnt_queue, &cnt_id, age_idx);
3151 			if (ret != 0)
3152 				return ret;
3153 			ret = mlx5_hws_cnt_pool_get_action_offset
3154 				(priv->hws_cpool,
3155 				 cnt_id,
3156 				 &rule_acts[act_data->action_dst].action,
3157 				 &rule_acts[act_data->action_dst].counter.offset
3158 				 );
3159 			if (ret != 0)
3160 				return ret;
3161 			job->flow->cnt_id = cnt_id;
3162 			break;
3163 		case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
3164 			ret = mlx5_hws_cnt_pool_get_action_offset
3165 				(priv->hws_cpool,
3166 				 act_data->shared_counter.id,
3167 				 &rule_acts[act_data->action_dst].action,
3168 				 &rule_acts[act_data->action_dst].counter.offset
3169 				 );
3170 			if (ret != 0)
3171 				return ret;
3172 			job->flow->cnt_id = act_data->shared_counter.id;
3173 			break;
3174 		case RTE_FLOW_ACTION_TYPE_CONNTRACK:
3175 			ct_idx = MLX5_ACTION_CTX_CT_GET_IDX
3176 				 ((uint32_t)(uintptr_t)action->conf);
3177 			if (flow_hw_ct_compile(dev, queue, ct_idx,
3178 					       &rule_acts[act_data->action_dst]))
3179 				return -1;
3180 			break;
3181 		case MLX5_RTE_FLOW_ACTION_TYPE_METER_MARK:
3182 			mtr_id = act_data->shared_meter.id &
3183 				((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
3184 			/* Find ASO object. */
3185 			aso_mtr = mlx5_ipool_get(pool->idx_pool, mtr_id);
3186 			if (!aso_mtr)
3187 				return -1;
3188 			rule_acts[act_data->action_dst].action =
3189 							pool->action;
3190 			rule_acts[act_data->action_dst].aso_meter.offset =
3191 							aso_mtr->offset;
3192 			break;
3193 		case RTE_FLOW_ACTION_TYPE_METER_MARK:
3194 			/*
3195 			 * Allocate meter directly will slow down flow
3196 			 * insertion rate.
3197 			 */
3198 			ret = flow_hw_meter_mark_compile(dev,
3199 				act_data->action_dst, action,
3200 				rule_acts, &job->flow->mtr_id, MLX5_HW_INV_QUEUE);
3201 			if (ret != 0)
3202 				return ret;
3203 			break;
3204 		default:
3205 			break;
3206 		}
3207 	}
3208 	if (at->action_flags & MLX5_FLOW_ACTION_INDIRECT_COUNT) {
3209 		if (at->action_flags & MLX5_FLOW_ACTION_INDIRECT_AGE) {
3210 			age_idx = job->flow->age_idx & MLX5_HWS_AGE_IDX_MASK;
3211 			if (mlx5_hws_cnt_age_get(priv->hws_cpool,
3212 						 job->flow->cnt_id) != age_idx)
3213 				/*
3214 				 * This is first use of this indirect counter
3215 				 * for this indirect AGE, need to increase the
3216 				 * number of counters.
3217 				 */
3218 				mlx5_hws_age_nb_cnt_increase(priv, age_idx);
3219 		}
3220 		/*
3221 		 * Update this indirect counter the indirect/direct AGE in which
3222 		 * using it.
3223 		 */
3224 		mlx5_hws_cnt_age_set(priv->hws_cpool, job->flow->cnt_id,
3225 				     age_idx);
3226 	}
3227 	if (hw_acts->encap_decap && !hw_acts->encap_decap->shared) {
3228 		rule_acts[hw_acts->encap_decap_pos].reformat.offset =
3229 				job->flow->res_idx - 1;
3230 		rule_acts[hw_acts->encap_decap_pos].reformat.data = buf;
3231 	}
3232 	if (hw_acts->push_remove && !hw_acts->push_remove->shared) {
3233 		rule_acts[hw_acts->push_remove_pos].ipv6_ext.offset =
3234 				job->flow->res_idx - 1;
3235 		rule_acts[hw_acts->push_remove_pos].ipv6_ext.header = push_buf;
3236 	}
3237 	if (mlx5_hws_cnt_id_valid(hw_acts->cnt_id))
3238 		job->flow->cnt_id = hw_acts->cnt_id;
3239 	return 0;
3240 }
3241 
3242 static const struct rte_flow_item *
3243 flow_hw_get_rule_items(struct rte_eth_dev *dev,
3244 		       const struct rte_flow_template_table *table,
3245 		       const struct rte_flow_item items[],
3246 		       uint8_t pattern_template_index,
3247 		       struct mlx5_hw_q_job *job)
3248 {
3249 	struct rte_flow_pattern_template *pt = table->its[pattern_template_index];
3250 
3251 	/* Only one implicit item can be added to flow rule pattern. */
3252 	MLX5_ASSERT(!pt->implicit_port || !pt->implicit_tag);
3253 	/* At least one item was allocated in job descriptor for items. */
3254 	MLX5_ASSERT(MLX5_HW_MAX_ITEMS >= 1);
3255 	if (pt->implicit_port) {
3256 		if (pt->orig_item_nb + 1 > MLX5_HW_MAX_ITEMS) {
3257 			rte_errno = ENOMEM;
3258 			return NULL;
3259 		}
3260 		/* Set up represented port item in job descriptor. */
3261 		job->port_spec = (struct rte_flow_item_ethdev){
3262 			.port_id = dev->data->port_id,
3263 		};
3264 		job->items[0] = (struct rte_flow_item){
3265 			.type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,
3266 			.spec = &job->port_spec,
3267 		};
3268 		rte_memcpy(&job->items[1], items, sizeof(*items) * pt->orig_item_nb);
3269 		return job->items;
3270 	} else if (pt->implicit_tag) {
3271 		if (pt->orig_item_nb + 1 > MLX5_HW_MAX_ITEMS) {
3272 			rte_errno = ENOMEM;
3273 			return NULL;
3274 		}
3275 		/* Set up tag item in job descriptor. */
3276 		job->tag_spec = (struct rte_flow_item_tag){
3277 			.data = flow_hw_tx_tag_regc_value(dev),
3278 		};
3279 		job->items[0] = (struct rte_flow_item){
3280 			.type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_TAG,
3281 			.spec = &job->tag_spec,
3282 		};
3283 		rte_memcpy(&job->items[1], items, sizeof(*items) * pt->orig_item_nb);
3284 		return job->items;
3285 	} else {
3286 		return items;
3287 	}
3288 }
3289 
3290 /**
3291  * Enqueue HW steering flow creation.
3292  *
3293  * The flow will be applied to the HW only if the postpone bit is not set or
3294  * the extra push function is called.
3295  * The flow creation status should be checked from dequeue result.
3296  *
3297  * @param[in] dev
3298  *   Pointer to the rte_eth_dev structure.
3299  * @param[in] queue
3300  *   The queue to create the flow.
3301  * @param[in] attr
3302  *   Pointer to the flow operation attributes.
3303  * @param[in] items
3304  *   Items with flow spec value.
3305  * @param[in] pattern_template_index
3306  *   The item pattern flow follows from the table.
3307  * @param[in] actions
3308  *   Action with flow spec value.
3309  * @param[in] action_template_index
3310  *   The action pattern flow follows from the table.
3311  * @param[in] user_data
3312  *   Pointer to the user_data.
3313  * @param[out] error
3314  *   Pointer to error structure.
3315  *
3316  * @return
3317  *    Flow pointer on success, NULL otherwise and rte_errno is set.
3318  */
3319 static struct rte_flow *
3320 flow_hw_async_flow_create(struct rte_eth_dev *dev,
3321 			  uint32_t queue,
3322 			  const struct rte_flow_op_attr *attr,
3323 			  struct rte_flow_template_table *table,
3324 			  const struct rte_flow_item items[],
3325 			  uint8_t pattern_template_index,
3326 			  const struct rte_flow_action actions[],
3327 			  uint8_t action_template_index,
3328 			  void *user_data,
3329 			  struct rte_flow_error *error)
3330 {
3331 	struct mlx5_priv *priv = dev->data->dev_private;
3332 	struct mlx5dr_rule_attr rule_attr = {
3333 		.queue_id = queue,
3334 		.user_data = user_data,
3335 		.burst = attr->postpone,
3336 	};
3337 	struct mlx5dr_rule_action rule_acts[MLX5_HW_MAX_ACTS];
3338 	struct rte_flow_hw *flow = NULL;
3339 	struct mlx5_hw_q_job *job = NULL;
3340 	const struct rte_flow_item *rule_items;
3341 	uint32_t flow_idx = 0;
3342 	uint32_t res_idx = 0;
3343 	int ret;
3344 
3345 	if (unlikely((!dev->data->dev_started))) {
3346 		rte_errno = EINVAL;
3347 		goto error;
3348 	}
3349 	job = flow_hw_job_get(priv, queue);
3350 	if (!job) {
3351 		rte_errno = ENOMEM;
3352 		goto error;
3353 	}
3354 	flow = mlx5_ipool_zmalloc(table->flow, &flow_idx);
3355 	if (!flow)
3356 		goto error;
3357 	mlx5_ipool_malloc(table->resource, &res_idx);
3358 	if (!res_idx)
3359 		goto error;
3360 	/*
3361 	 * Set the table here in order to know the destination table
3362 	 * when free the flow afterward.
3363 	 */
3364 	flow->table = table;
3365 	flow->mt_idx = pattern_template_index;
3366 	flow->idx = flow_idx;
3367 	flow->res_idx = res_idx;
3368 	/*
3369 	 * Set the job type here in order to know if the flow memory
3370 	 * should be freed or not when get the result from dequeue.
3371 	 */
3372 	job->type = MLX5_HW_Q_JOB_TYPE_CREATE;
3373 	job->flow = flow;
3374 	job->user_data = user_data;
3375 	rule_attr.user_data = job;
3376 	/*
3377 	 * Indexed pool returns 1-based indices, but mlx5dr expects 0-based indices for rule
3378 	 * insertion hints.
3379 	 */
3380 	MLX5_ASSERT(res_idx > 0);
3381 	flow->rule_idx = res_idx - 1;
3382 	rule_attr.rule_idx = flow->rule_idx;
3383 	/*
3384 	 * Construct the flow actions based on the input actions.
3385 	 * The implicitly appended action is always fixed, like metadata
3386 	 * copy action from FDB to NIC Rx.
3387 	 * No need to copy and contrust a new "actions" list based on the
3388 	 * user's input, in order to save the cost.
3389 	 */
3390 	if (flow_hw_actions_construct(dev, job,
3391 				      &table->ats[action_template_index],
3392 				      pattern_template_index, actions,
3393 				      rule_acts, queue, error)) {
3394 		rte_errno = EINVAL;
3395 		goto error;
3396 	}
3397 	rule_items = flow_hw_get_rule_items(dev, table, items,
3398 					    pattern_template_index, job);
3399 	if (!rule_items)
3400 		goto error;
3401 	ret = mlx5dr_rule_create(table->matcher,
3402 				 pattern_template_index, rule_items,
3403 				 action_template_index, rule_acts,
3404 				 &rule_attr, (struct mlx5dr_rule *)flow->rule);
3405 	if (likely(!ret))
3406 		return (struct rte_flow *)flow;
3407 error:
3408 	if (job)
3409 		flow_hw_job_put(priv, job, queue);
3410 	if (flow_idx)
3411 		mlx5_ipool_free(table->flow, flow_idx);
3412 	if (res_idx)
3413 		mlx5_ipool_free(table->resource, res_idx);
3414 	rte_flow_error_set(error, rte_errno,
3415 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3416 			   "fail to create rte flow");
3417 	return NULL;
3418 }
3419 
3420 /**
3421  * Enqueue HW steering flow creation by index.
3422  *
3423  * The flow will be applied to the HW only if the postpone bit is not set or
3424  * the extra push function is called.
3425  * The flow creation status should be checked from dequeue result.
3426  *
3427  * @param[in] dev
3428  *   Pointer to the rte_eth_dev structure.
3429  * @param[in] queue
3430  *   The queue to create the flow.
3431  * @param[in] attr
3432  *   Pointer to the flow operation attributes.
3433  * @param[in] rule_index
3434  *   The item pattern flow follows from the table.
3435  * @param[in] actions
3436  *   Action with flow spec value.
3437  * @param[in] action_template_index
3438  *   The action pattern flow follows from the table.
3439  * @param[in] user_data
3440  *   Pointer to the user_data.
3441  * @param[out] error
3442  *   Pointer to error structure.
3443  *
3444  * @return
3445  *    Flow pointer on success, NULL otherwise and rte_errno is set.
3446  */
3447 static struct rte_flow *
3448 flow_hw_async_flow_create_by_index(struct rte_eth_dev *dev,
3449 			  uint32_t queue,
3450 			  const struct rte_flow_op_attr *attr,
3451 			  struct rte_flow_template_table *table,
3452 			  uint32_t rule_index,
3453 			  const struct rte_flow_action actions[],
3454 			  uint8_t action_template_index,
3455 			  void *user_data,
3456 			  struct rte_flow_error *error)
3457 {
3458 	struct rte_flow_item items[] = {{.type = RTE_FLOW_ITEM_TYPE_END,}};
3459 	struct mlx5_priv *priv = dev->data->dev_private;
3460 	struct mlx5dr_rule_attr rule_attr = {
3461 		.queue_id = queue,
3462 		.user_data = user_data,
3463 		.burst = attr->postpone,
3464 	};
3465 	struct mlx5dr_rule_action rule_acts[MLX5_HW_MAX_ACTS];
3466 	struct rte_flow_hw *flow = NULL;
3467 	struct mlx5_hw_q_job *job = NULL;
3468 	uint32_t flow_idx = 0;
3469 	uint32_t res_idx = 0;
3470 	int ret;
3471 
3472 	if (unlikely(rule_index >= table->cfg.attr.nb_flows)) {
3473 		rte_errno = EINVAL;
3474 		goto error;
3475 	}
3476 	job = flow_hw_job_get(priv, queue);
3477 	if (!job) {
3478 		rte_errno = ENOMEM;
3479 		goto error;
3480 	}
3481 	flow = mlx5_ipool_zmalloc(table->flow, &flow_idx);
3482 	if (!flow)
3483 		goto error;
3484 	mlx5_ipool_malloc(table->resource, &res_idx);
3485 	if (!res_idx)
3486 		goto error;
3487 	/*
3488 	 * Set the table here in order to know the destination table
3489 	 * when free the flow afterwards.
3490 	 */
3491 	flow->table = table;
3492 	flow->mt_idx = 0;
3493 	flow->idx = flow_idx;
3494 	flow->res_idx = res_idx;
3495 	/*
3496 	 * Set the job type here in order to know if the flow memory
3497 	 * should be freed or not when get the result from dequeue.
3498 	 */
3499 	job->type = MLX5_HW_Q_JOB_TYPE_CREATE;
3500 	job->flow = flow;
3501 	job->user_data = user_data;
3502 	rule_attr.user_data = job;
3503 	/*
3504 	 * Set the rule index.
3505 	 */
3506 	flow->rule_idx = rule_index;
3507 	rule_attr.rule_idx = flow->rule_idx;
3508 	/*
3509 	 * Construct the flow actions based on the input actions.
3510 	 * The implicitly appended action is always fixed, like metadata
3511 	 * copy action from FDB to NIC Rx.
3512 	 * No need to copy and contrust a new "actions" list based on the
3513 	 * user's input, in order to save the cost.
3514 	 */
3515 	if (flow_hw_actions_construct(dev, job,
3516 				      &table->ats[action_template_index],
3517 				      0, actions, rule_acts, queue, error)) {
3518 		rte_errno = EINVAL;
3519 		goto error;
3520 	}
3521 	ret = mlx5dr_rule_create(table->matcher,
3522 				 0, items, action_template_index, rule_acts,
3523 				 &rule_attr, (struct mlx5dr_rule *)flow->rule);
3524 	if (likely(!ret))
3525 		return (struct rte_flow *)flow;
3526 error:
3527 	if (job)
3528 		flow_hw_job_put(priv, job, queue);
3529 	if (res_idx)
3530 		mlx5_ipool_free(table->resource, res_idx);
3531 	if (flow_idx)
3532 		mlx5_ipool_free(table->flow, flow_idx);
3533 	rte_flow_error_set(error, rte_errno,
3534 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3535 			   "fail to create rte flow");
3536 	return NULL;
3537 }
3538 
3539 /**
3540  * Enqueue HW steering flow update.
3541  *
3542  * The flow will be applied to the HW only if the postpone bit is not set or
3543  * the extra push function is called.
3544  * The flow destruction status should be checked from dequeue result.
3545  *
3546  * @param[in] dev
3547  *   Pointer to the rte_eth_dev structure.
3548  * @param[in] queue
3549  *   The queue to destroy the flow.
3550  * @param[in] attr
3551  *   Pointer to the flow operation attributes.
3552  * @param[in] flow
3553  *   Pointer to the flow to be destroyed.
3554  * @param[in] actions
3555  *   Action with flow spec value.
3556  * @param[in] action_template_index
3557  *   The action pattern flow follows from the table.
3558  * @param[in] user_data
3559  *   Pointer to the user_data.
3560  * @param[out] error
3561  *   Pointer to error structure.
3562  *
3563  * @return
3564  *    0 on success, negative value otherwise and rte_errno is set.
3565  */
3566 static int
3567 flow_hw_async_flow_update(struct rte_eth_dev *dev,
3568 			   uint32_t queue,
3569 			   const struct rte_flow_op_attr *attr,
3570 			   struct rte_flow *flow,
3571 			   const struct rte_flow_action actions[],
3572 			   uint8_t action_template_index,
3573 			   void *user_data,
3574 			   struct rte_flow_error *error)
3575 {
3576 	struct mlx5_priv *priv = dev->data->dev_private;
3577 	struct mlx5dr_rule_attr rule_attr = {
3578 		.queue_id = queue,
3579 		.user_data = user_data,
3580 		.burst = attr->postpone,
3581 	};
3582 	struct mlx5dr_rule_action rule_acts[MLX5_HW_MAX_ACTS];
3583 	struct rte_flow_hw *of = (struct rte_flow_hw *)flow;
3584 	struct rte_flow_hw *nf;
3585 	struct rte_flow_template_table *table = of->table;
3586 	struct mlx5_hw_q_job *job = NULL;
3587 	uint32_t res_idx = 0;
3588 	int ret;
3589 
3590 	job = flow_hw_job_get(priv, queue);
3591 	if (!job) {
3592 		rte_errno = ENOMEM;
3593 		goto error;
3594 	}
3595 	mlx5_ipool_malloc(table->resource, &res_idx);
3596 	if (!res_idx)
3597 		goto error;
3598 	nf = job->upd_flow;
3599 	memset(nf, 0, sizeof(struct rte_flow_hw));
3600 	/*
3601 	 * Set the table here in order to know the destination table
3602 	 * when free the flow afterwards.
3603 	 */
3604 	nf->table = table;
3605 	nf->mt_idx = of->mt_idx;
3606 	nf->idx = of->idx;
3607 	nf->res_idx = res_idx;
3608 	/*
3609 	 * Set the job type here in order to know if the flow memory
3610 	 * should be freed or not when get the result from dequeue.
3611 	 */
3612 	job->type = MLX5_HW_Q_JOB_TYPE_UPDATE;
3613 	job->flow = nf;
3614 	job->user_data = user_data;
3615 	rule_attr.user_data = job;
3616 	/*
3617 	 * Indexed pool returns 1-based indices, but mlx5dr expects 0-based indices for rule
3618 	 * insertion hints.
3619 	 */
3620 	MLX5_ASSERT(res_idx > 0);
3621 	nf->rule_idx = res_idx - 1;
3622 	rule_attr.rule_idx = nf->rule_idx;
3623 	/*
3624 	 * Construct the flow actions based on the input actions.
3625 	 * The implicitly appended action is always fixed, like metadata
3626 	 * copy action from FDB to NIC Rx.
3627 	 * No need to copy and contrust a new "actions" list based on the
3628 	 * user's input, in order to save the cost.
3629 	 */
3630 	if (flow_hw_actions_construct(dev, job,
3631 				      &table->ats[action_template_index],
3632 				      nf->mt_idx, actions,
3633 				      rule_acts, queue, error)) {
3634 		rte_errno = EINVAL;
3635 		goto error;
3636 	}
3637 	/*
3638 	 * Switch the old flow and the new flow.
3639 	 */
3640 	job->flow = of;
3641 	job->upd_flow = nf;
3642 	ret = mlx5dr_rule_action_update((struct mlx5dr_rule *)of->rule,
3643 					action_template_index, rule_acts, &rule_attr);
3644 	if (likely(!ret))
3645 		return 0;
3646 error:
3647 	/* Flow created fail, return the descriptor and flow memory. */
3648 	if (job)
3649 		flow_hw_job_put(priv, job, queue);
3650 	if (res_idx)
3651 		mlx5_ipool_free(table->resource, res_idx);
3652 	return rte_flow_error_set(error, rte_errno,
3653 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3654 			"fail to update rte flow");
3655 }
3656 
3657 /**
3658  * Enqueue HW steering flow destruction.
3659  *
3660  * The flow will be applied to the HW only if the postpone bit is not set or
3661  * the extra push function is called.
3662  * The flow destruction status should be checked from dequeue result.
3663  *
3664  * @param[in] dev
3665  *   Pointer to the rte_eth_dev structure.
3666  * @param[in] queue
3667  *   The queue to destroy the flow.
3668  * @param[in] attr
3669  *   Pointer to the flow operation attributes.
3670  * @param[in] flow
3671  *   Pointer to the flow to be destroyed.
3672  * @param[in] user_data
3673  *   Pointer to the user_data.
3674  * @param[out] error
3675  *   Pointer to error structure.
3676  *
3677  * @return
3678  *    0 on success, negative value otherwise and rte_errno is set.
3679  */
3680 static int
3681 flow_hw_async_flow_destroy(struct rte_eth_dev *dev,
3682 			   uint32_t queue,
3683 			   const struct rte_flow_op_attr *attr,
3684 			   struct rte_flow *flow,
3685 			   void *user_data,
3686 			   struct rte_flow_error *error)
3687 {
3688 	struct mlx5_priv *priv = dev->data->dev_private;
3689 	struct mlx5dr_rule_attr rule_attr = {
3690 		.queue_id = queue,
3691 		.user_data = user_data,
3692 		.burst = attr->postpone,
3693 	};
3694 	struct rte_flow_hw *fh = (struct rte_flow_hw *)flow;
3695 	struct mlx5_hw_q_job *job;
3696 	int ret;
3697 
3698 	job = flow_hw_job_get(priv, queue);
3699 	if (!job)
3700 		return rte_flow_error_set(error, ENOMEM,
3701 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3702 					  "fail to destroy rte flow: flow queue full");
3703 	job->type = MLX5_HW_Q_JOB_TYPE_DESTROY;
3704 	job->user_data = user_data;
3705 	job->flow = fh;
3706 	rule_attr.user_data = job;
3707 	rule_attr.rule_idx = fh->rule_idx;
3708 	ret = mlx5dr_rule_destroy((struct mlx5dr_rule *)fh->rule, &rule_attr);
3709 	if (ret) {
3710 		flow_hw_job_put(priv, job, queue);
3711 		return rte_flow_error_set(error, rte_errno,
3712 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3713 					  "fail to destroy rte flow");
3714 	}
3715 	return 0;
3716 }
3717 
3718 /**
3719  * Release the AGE and counter for given flow.
3720  *
3721  * @param[in] priv
3722  *   Pointer to the port private data structure.
3723  * @param[in] queue
3724  *   The queue to release the counter.
3725  * @param[in, out] flow
3726  *   Pointer to the flow containing the counter.
3727  * @param[out] error
3728  *   Pointer to error structure.
3729  */
3730 static void
3731 flow_hw_age_count_release(struct mlx5_priv *priv, uint32_t queue,
3732 			  struct rte_flow_hw *flow,
3733 			  struct rte_flow_error *error)
3734 {
3735 	uint32_t *cnt_queue;
3736 
3737 	if (mlx5_hws_cnt_is_shared(priv->hws_cpool, flow->cnt_id)) {
3738 		if (flow->age_idx && !mlx5_hws_age_is_indirect(flow->age_idx)) {
3739 			/* Remove this AGE parameter from indirect counter. */
3740 			mlx5_hws_cnt_age_set(priv->hws_cpool, flow->cnt_id, 0);
3741 			/* Release the AGE parameter. */
3742 			mlx5_hws_age_action_destroy(priv, flow->age_idx, error);
3743 			flow->age_idx = 0;
3744 		}
3745 		return;
3746 	}
3747 	/* If the port is engaged in resource sharing, do not use queue cache. */
3748 	cnt_queue = mlx5_hws_cnt_is_pool_shared(priv) ? NULL : &queue;
3749 	/* Put the counter first to reduce the race risk in BG thread. */
3750 	mlx5_hws_cnt_pool_put(priv->hws_cpool, cnt_queue, &flow->cnt_id);
3751 	flow->cnt_id = 0;
3752 	if (flow->age_idx) {
3753 		if (mlx5_hws_age_is_indirect(flow->age_idx)) {
3754 			uint32_t idx = flow->age_idx & MLX5_HWS_AGE_IDX_MASK;
3755 
3756 			mlx5_hws_age_nb_cnt_decrease(priv, idx);
3757 		} else {
3758 			/* Release the AGE parameter. */
3759 			mlx5_hws_age_action_destroy(priv, flow->age_idx, error);
3760 		}
3761 		flow->age_idx = 0;
3762 	}
3763 }
3764 
3765 static __rte_always_inline void
3766 flow_hw_pull_legacy_indirect_comp(struct rte_eth_dev *dev, struct mlx5_hw_q_job *job,
3767 				  uint32_t queue)
3768 {
3769 	struct mlx5_priv *priv = dev->data->dev_private;
3770 	struct mlx5_aso_ct_action *aso_ct;
3771 	struct mlx5_aso_mtr *aso_mtr;
3772 	uint32_t type, idx;
3773 
3774 	if (MLX5_INDIRECT_ACTION_TYPE_GET(job->action) ==
3775 	    MLX5_INDIRECT_ACTION_TYPE_QUOTA) {
3776 		mlx5_quota_async_completion(dev, queue, job);
3777 	} else if (job->type == MLX5_HW_Q_JOB_TYPE_DESTROY) {
3778 		type = MLX5_INDIRECT_ACTION_TYPE_GET(job->action);
3779 		if (type == MLX5_INDIRECT_ACTION_TYPE_METER_MARK) {
3780 			idx = MLX5_INDIRECT_ACTION_IDX_GET(job->action);
3781 			mlx5_ipool_free(priv->hws_mpool->idx_pool, idx);
3782 		}
3783 	} else if (job->type == MLX5_HW_Q_JOB_TYPE_CREATE) {
3784 		type = MLX5_INDIRECT_ACTION_TYPE_GET(job->action);
3785 		if (type == MLX5_INDIRECT_ACTION_TYPE_METER_MARK) {
3786 			idx = MLX5_INDIRECT_ACTION_IDX_GET(job->action);
3787 			aso_mtr = mlx5_ipool_get(priv->hws_mpool->idx_pool, idx);
3788 			aso_mtr->state = ASO_METER_READY;
3789 		} else if (type == MLX5_INDIRECT_ACTION_TYPE_CT) {
3790 			idx = MLX5_ACTION_CTX_CT_GET_IDX
3791 			((uint32_t)(uintptr_t)job->action);
3792 			aso_ct = mlx5_ipool_get(priv->hws_ctpool->cts, idx);
3793 			aso_ct->state = ASO_CONNTRACK_READY;
3794 		}
3795 	} else if (job->type == MLX5_HW_Q_JOB_TYPE_QUERY) {
3796 		type = MLX5_INDIRECT_ACTION_TYPE_GET(job->action);
3797 		if (type == MLX5_INDIRECT_ACTION_TYPE_CT) {
3798 			idx = MLX5_ACTION_CTX_CT_GET_IDX
3799 			((uint32_t)(uintptr_t)job->action);
3800 			aso_ct = mlx5_ipool_get(priv->hws_ctpool->cts, idx);
3801 			mlx5_aso_ct_obj_analyze(job->query.user,
3802 						job->query.hw);
3803 			aso_ct->state = ASO_CONNTRACK_READY;
3804 		}
3805 	} else {
3806 		/*
3807 		 * rte_flow_op_result::user data can point to
3808 		 * struct mlx5_aso_mtr object as well
3809 		 */
3810 		if (queue != CTRL_QUEUE_ID(priv))
3811 			MLX5_ASSERT(false);
3812 	}
3813 }
3814 
3815 static inline int
3816 __flow_hw_pull_indir_action_comp(struct rte_eth_dev *dev,
3817 				 uint32_t queue,
3818 				 struct rte_flow_op_result res[],
3819 				 uint16_t n_res)
3820 
3821 {
3822 	struct mlx5_priv *priv = dev->data->dev_private;
3823 	struct rte_ring *r = priv->hw_q[queue].indir_cq;
3824 	void *user_data = NULL;
3825 	int ret_comp, i;
3826 
3827 	ret_comp = (int)rte_ring_count(r);
3828 	if (ret_comp > n_res)
3829 		ret_comp = n_res;
3830 	for (i = 0; i < ret_comp; i++) {
3831 		rte_ring_dequeue(r, &user_data);
3832 		res[i].user_data = user_data;
3833 		res[i].status = RTE_FLOW_OP_SUCCESS;
3834 	}
3835 	if (ret_comp < n_res && priv->hws_mpool)
3836 		ret_comp += mlx5_aso_pull_completion(&priv->hws_mpool->sq[queue],
3837 				&res[ret_comp], n_res - ret_comp);
3838 	if (ret_comp < n_res && priv->hws_ctpool)
3839 		ret_comp += mlx5_aso_pull_completion(&priv->ct_mng->aso_sqs[queue],
3840 				&res[ret_comp], n_res - ret_comp);
3841 	if (ret_comp < n_res && priv->quota_ctx.sq)
3842 		ret_comp += mlx5_aso_pull_completion(&priv->quota_ctx.sq[queue],
3843 						     &res[ret_comp],
3844 						     n_res - ret_comp);
3845 	for (i = 0; i <  ret_comp; i++) {
3846 		struct mlx5_hw_q_job *job = (struct mlx5_hw_q_job *)res[i].user_data;
3847 
3848 		/* Restore user data. */
3849 		res[i].user_data = job->user_data;
3850 		if (job->indirect_type == MLX5_HW_INDIRECT_TYPE_LEGACY)
3851 			flow_hw_pull_legacy_indirect_comp(dev, job, queue);
3852 		/*
3853 		 * Current PMD supports 2 indirect action list types - MIRROR and REFORMAT.
3854 		 * These indirect list types do not post WQE to create action.
3855 		 * Future indirect list types that do post WQE will add
3856 		 * completion handlers here.
3857 		 */
3858 		flow_hw_job_put(priv, job, queue);
3859 	}
3860 	return ret_comp;
3861 }
3862 
3863 /**
3864  * Pull the enqueued flows.
3865  *
3866  * For flows enqueued from creation/destruction, the status should be
3867  * checked from the dequeue result.
3868  *
3869  * @param[in] dev
3870  *   Pointer to the rte_eth_dev structure.
3871  * @param[in] queue
3872  *   The queue to pull the result.
3873  * @param[in/out] res
3874  *   Array to save the results.
3875  * @param[in] n_res
3876  *   Available result with the array.
3877  * @param[out] error
3878  *   Pointer to error structure.
3879  *
3880  * @return
3881  *    Result number on success, negative value otherwise and rte_errno is set.
3882  */
3883 static int
3884 flow_hw_pull(struct rte_eth_dev *dev,
3885 	     uint32_t queue,
3886 	     struct rte_flow_op_result res[],
3887 	     uint16_t n_res,
3888 	     struct rte_flow_error *error)
3889 {
3890 	struct mlx5_priv *priv = dev->data->dev_private;
3891 	struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
3892 	struct mlx5_hw_q_job *job;
3893 	uint32_t res_idx;
3894 	int ret, i;
3895 
3896 	/* 1. Pull the flow completion. */
3897 	ret = mlx5dr_send_queue_poll(priv->dr_ctx, queue, res, n_res);
3898 	if (ret < 0)
3899 		return rte_flow_error_set(error, rte_errno,
3900 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3901 				"fail to query flow queue");
3902 	for (i = 0; i <  ret; i++) {
3903 		job = (struct mlx5_hw_q_job *)res[i].user_data;
3904 		/* Release the original resource index in case of update. */
3905 		res_idx = job->flow->res_idx;
3906 		/* Restore user data. */
3907 		res[i].user_data = job->user_data;
3908 		if (job->type == MLX5_HW_Q_JOB_TYPE_DESTROY ||
3909 		    job->type == MLX5_HW_Q_JOB_TYPE_UPDATE) {
3910 			if (job->flow->fate_type == MLX5_FLOW_FATE_JUMP)
3911 				flow_hw_jump_release(dev, job->flow->jump);
3912 			else if (job->flow->fate_type == MLX5_FLOW_FATE_QUEUE)
3913 				mlx5_hrxq_obj_release(dev, job->flow->hrxq);
3914 			if (mlx5_hws_cnt_id_valid(job->flow->cnt_id))
3915 				flow_hw_age_count_release(priv, queue,
3916 							  job->flow, error);
3917 			if (job->flow->mtr_id) {
3918 				mlx5_ipool_free(pool->idx_pool,	job->flow->mtr_id);
3919 				job->flow->mtr_id = 0;
3920 			}
3921 			if (job->type == MLX5_HW_Q_JOB_TYPE_DESTROY) {
3922 				mlx5_ipool_free(job->flow->table->resource, res_idx);
3923 				mlx5_ipool_free(job->flow->table->flow, job->flow->idx);
3924 			} else {
3925 				rte_memcpy(job->flow, job->upd_flow,
3926 					offsetof(struct rte_flow_hw, rule));
3927 				mlx5_ipool_free(job->flow->table->resource, res_idx);
3928 			}
3929 		}
3930 		flow_hw_job_put(priv, job, queue);
3931 	}
3932 	/* 2. Pull indirect action comp. */
3933 	if (ret < n_res)
3934 		ret += __flow_hw_pull_indir_action_comp(dev, queue, &res[ret],
3935 							n_res - ret);
3936 	return ret;
3937 }
3938 
3939 static inline uint32_t
3940 __flow_hw_push_action(struct rte_eth_dev *dev,
3941 		    uint32_t queue)
3942 {
3943 	struct mlx5_priv *priv = dev->data->dev_private;
3944 	struct rte_ring *iq = priv->hw_q[queue].indir_iq;
3945 	struct rte_ring *cq = priv->hw_q[queue].indir_cq;
3946 	void *job = NULL;
3947 	uint32_t ret, i;
3948 
3949 	ret = rte_ring_count(iq);
3950 	for (i = 0; i < ret; i++) {
3951 		rte_ring_dequeue(iq, &job);
3952 		rte_ring_enqueue(cq, job);
3953 	}
3954 	if (!priv->shared_host) {
3955 		if (priv->hws_ctpool)
3956 			mlx5_aso_push_wqe(priv->sh,
3957 					  &priv->ct_mng->aso_sqs[queue]);
3958 		if (priv->hws_mpool)
3959 			mlx5_aso_push_wqe(priv->sh,
3960 					  &priv->hws_mpool->sq[queue]);
3961 	}
3962 	return priv->hw_q[queue].size - priv->hw_q[queue].job_idx;
3963 }
3964 
3965 static int
3966 __flow_hw_push(struct rte_eth_dev *dev,
3967 	       uint32_t queue,
3968 	       struct rte_flow_error *error)
3969 {
3970 	struct mlx5_priv *priv = dev->data->dev_private;
3971 	int ret, num;
3972 
3973 	num = __flow_hw_push_action(dev, queue);
3974 	ret = mlx5dr_send_queue_action(priv->dr_ctx, queue,
3975 				       MLX5DR_SEND_QUEUE_ACTION_DRAIN_ASYNC);
3976 	if (ret) {
3977 		rte_flow_error_set(error, rte_errno,
3978 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3979 				   "fail to push flows");
3980 		return ret;
3981 	}
3982 	return num;
3983 }
3984 
3985 /**
3986  * Push the enqueued flows to HW.
3987  *
3988  * Force apply all the enqueued flows to the HW.
3989  *
3990  * @param[in] dev
3991  *   Pointer to the rte_eth_dev structure.
3992  * @param[in] queue
3993  *   The queue to push the flow.
3994  * @param[out] error
3995  *   Pointer to error structure.
3996  *
3997  * @return
3998  *    0 on success, negative value otherwise and rte_errno is set.
3999  */
4000 static int
4001 flow_hw_push(struct rte_eth_dev *dev,
4002 	     uint32_t queue, struct rte_flow_error *error)
4003 {
4004 	int ret = __flow_hw_push(dev, queue, error);
4005 
4006 	return ret >= 0 ? 0 : ret;
4007 }
4008 
4009 /**
4010  * Drain the enqueued flows' completion.
4011  *
4012  * @param[in] dev
4013  *   Pointer to the rte_eth_dev structure.
4014  * @param[in] queue
4015  *   The queue to pull the flow.
4016  * @param[out] error
4017  *   Pointer to error structure.
4018  *
4019  * @return
4020  *    0 on success, negative value otherwise and rte_errno is set.
4021  */
4022 static int
4023 __flow_hw_pull_comp(struct rte_eth_dev *dev,
4024 		    uint32_t queue, struct rte_flow_error *error)
4025 {
4026 	struct rte_flow_op_result comp[BURST_THR];
4027 	int ret, i, empty_loop = 0;
4028 	uint32_t pending_rules;
4029 
4030 	ret = __flow_hw_push(dev, queue, error);
4031 	if (ret < 0)
4032 		return ret;
4033 	pending_rules = ret;
4034 	while (pending_rules) {
4035 		ret = flow_hw_pull(dev, queue, comp, BURST_THR, error);
4036 		if (ret < 0)
4037 			return -1;
4038 		if (!ret) {
4039 			rte_delay_us_sleep(MLX5_ASO_WQE_CQE_RESPONSE_DELAY);
4040 			if (++empty_loop > 5) {
4041 				DRV_LOG(WARNING, "No available dequeue %u, quit.", pending_rules);
4042 				break;
4043 			}
4044 			continue;
4045 		}
4046 		for (i = 0; i < ret; i++) {
4047 			if (comp[i].status == RTE_FLOW_OP_ERROR)
4048 				DRV_LOG(WARNING, "Flow flush get error CQE.");
4049 		}
4050 		/*
4051 		 * Indirect **SYNC** METER_MARK and CT actions do not
4052 		 * remove completion after WQE post.
4053 		 * That implementation avoids HW timeout.
4054 		 * The completion is removed before the following WQE post.
4055 		 * However, HWS queue updates do not reflect that behaviour.
4056 		 * Therefore, during port destruction sync queue may have
4057 		 * pending completions.
4058 		 */
4059 		pending_rules -= RTE_MIN(pending_rules, (uint32_t)ret);
4060 		empty_loop = 0;
4061 	}
4062 	return 0;
4063 }
4064 
4065 /**
4066  * Flush created flows.
4067  *
4068  * @param[in] dev
4069  *   Pointer to the rte_eth_dev structure.
4070  * @param[out] error
4071  *   Pointer to error structure.
4072  *
4073  * @return
4074  *    0 on success, negative value otherwise and rte_errno is set.
4075  */
4076 int
4077 flow_hw_q_flow_flush(struct rte_eth_dev *dev,
4078 		     struct rte_flow_error *error)
4079 {
4080 	struct mlx5_priv *priv = dev->data->dev_private;
4081 	struct mlx5_hw_q *hw_q = &priv->hw_q[MLX5_DEFAULT_FLUSH_QUEUE];
4082 	struct rte_flow_template_table *tbl;
4083 	struct rte_flow_hw *flow;
4084 	struct rte_flow_op_attr attr = {
4085 		.postpone = 0,
4086 	};
4087 	uint32_t pending_rules = 0;
4088 	uint32_t queue;
4089 	uint32_t fidx;
4090 
4091 	/*
4092 	 * Ensure to push and dequeue all the enqueued flow
4093 	 * creation/destruction jobs in case user forgot to
4094 	 * dequeue. Or the enqueued created flows will be
4095 	 * leaked. The forgotten dequeues would also cause
4096 	 * flow flush get extra CQEs as expected and pending_rules
4097 	 * be minus value.
4098 	 */
4099 	for (queue = 0; queue < priv->nb_queue; queue++) {
4100 		if (__flow_hw_pull_comp(dev, queue, error))
4101 			return -1;
4102 	}
4103 	/* Flush flow per-table from MLX5_DEFAULT_FLUSH_QUEUE. */
4104 	LIST_FOREACH(tbl, &priv->flow_hw_tbl, next) {
4105 		if (!tbl->cfg.external)
4106 			continue;
4107 		MLX5_IPOOL_FOREACH(tbl->flow, fidx, flow) {
4108 			if (flow_hw_async_flow_destroy(dev,
4109 						MLX5_DEFAULT_FLUSH_QUEUE,
4110 						&attr,
4111 						(struct rte_flow *)flow,
4112 						NULL,
4113 						error))
4114 				return -1;
4115 			pending_rules++;
4116 			/* Drain completion with queue size. */
4117 			if (pending_rules >= hw_q->size) {
4118 				if (__flow_hw_pull_comp(dev,
4119 							MLX5_DEFAULT_FLUSH_QUEUE,
4120 							error))
4121 					return -1;
4122 				pending_rules = 0;
4123 			}
4124 		}
4125 	}
4126 	/* Drain left completion. */
4127 	if (pending_rules &&
4128 	    __flow_hw_pull_comp(dev, MLX5_DEFAULT_FLUSH_QUEUE, error))
4129 		return -1;
4130 	return 0;
4131 }
4132 
4133 static int
4134 mlx5_tbl_multi_pattern_process(struct rte_eth_dev *dev,
4135 			       struct rte_flow_template_table *tbl,
4136 			       struct mlx5_tbl_multi_pattern_ctx *mpat,
4137 			       struct rte_flow_error *error)
4138 {
4139 	uint32_t i;
4140 	struct mlx5_priv *priv = dev->data->dev_private;
4141 	const struct rte_flow_template_table_attr *table_attr = &tbl->cfg.attr;
4142 	const struct rte_flow_attr *attr = &table_attr->flow_attr;
4143 	enum mlx5dr_table_type type = get_mlx5dr_table_type(attr);
4144 	uint32_t flags = mlx5_hw_act_flag[!!attr->group][type];
4145 	struct mlx5dr_action *dr_action;
4146 	uint32_t bulk_size = rte_log2_u32(table_attr->nb_flows);
4147 
4148 	for (i = 0; i < MLX5_MULTIPATTERN_ENCAP_NUM; i++) {
4149 		uint32_t j;
4150 		uint32_t *reformat_refcnt;
4151 		typeof(mpat->reformat[0]) *reformat = mpat->reformat + i;
4152 		struct mlx5dr_action_reformat_header hdr[MLX5_HW_TBL_MAX_ACTION_TEMPLATE];
4153 		enum mlx5dr_action_type reformat_type =
4154 			mlx5_multi_pattern_reformat_index_to_type(i);
4155 
4156 		if (!reformat->elements_num)
4157 			continue;
4158 		for (j = 0; j < reformat->elements_num; j++)
4159 			hdr[j] = reformat->ctx[j].reformat_hdr;
4160 		reformat_refcnt = mlx5_malloc(MLX5_MEM_ZERO, sizeof(uint32_t), 0,
4161 					      rte_socket_id());
4162 		if (!reformat_refcnt)
4163 			return rte_flow_error_set(error, ENOMEM,
4164 						  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4165 						  NULL, "failed to allocate multi-pattern encap counter");
4166 		*reformat_refcnt = reformat->elements_num;
4167 		dr_action = mlx5dr_action_create_reformat
4168 			(priv->dr_ctx, reformat_type, reformat->elements_num, hdr,
4169 			 bulk_size, flags);
4170 		if (!dr_action) {
4171 			mlx5_free(reformat_refcnt);
4172 			return rte_flow_error_set(error, rte_errno,
4173 						  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4174 						  NULL,
4175 						  "failed to create multi-pattern encap action");
4176 		}
4177 		for (j = 0; j < reformat->elements_num; j++) {
4178 			reformat->ctx[j].rule_action->action = dr_action;
4179 			reformat->ctx[j].encap->action = dr_action;
4180 			reformat->ctx[j].encap->multi_pattern = 1;
4181 			reformat->ctx[j].encap->multi_pattern_refcnt = reformat_refcnt;
4182 		}
4183 	}
4184 	if (mpat->mh.elements_num) {
4185 		typeof(mpat->mh) *mh = &mpat->mh;
4186 		struct mlx5dr_action_mh_pattern pattern[MLX5_HW_TBL_MAX_ACTION_TEMPLATE];
4187 		uint32_t *mh_refcnt = mlx5_malloc(MLX5_MEM_ZERO, sizeof(uint32_t),
4188 						 0, rte_socket_id());
4189 
4190 		if (!mh_refcnt)
4191 			return rte_flow_error_set(error, ENOMEM,
4192 						  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4193 						  NULL, "failed to allocate modify header counter");
4194 		*mh_refcnt = mpat->mh.elements_num;
4195 		for (i = 0; i < mpat->mh.elements_num; i++)
4196 			pattern[i] = mh->ctx[i].mh_pattern;
4197 		dr_action = mlx5dr_action_create_modify_header
4198 			(priv->dr_ctx, mpat->mh.elements_num, pattern,
4199 			 bulk_size, flags);
4200 		if (!dr_action) {
4201 			mlx5_free(mh_refcnt);
4202 			return rte_flow_error_set(error, rte_errno,
4203 						  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4204 						  NULL,
4205 						  "failed to create multi-pattern header modify action");
4206 		}
4207 		for (i = 0; i < mpat->mh.elements_num; i++) {
4208 			mh->ctx[i].rule_action->action = dr_action;
4209 			mh->ctx[i].mhdr->action = dr_action;
4210 			mh->ctx[i].mhdr->multi_pattern = 1;
4211 			mh->ctx[i].mhdr->multi_pattern_refcnt = mh_refcnt;
4212 		}
4213 	}
4214 
4215 	return 0;
4216 }
4217 
4218 static int
4219 mlx5_hw_build_template_table(struct rte_eth_dev *dev,
4220 			     uint8_t nb_action_templates,
4221 			     struct rte_flow_actions_template *action_templates[],
4222 			     struct mlx5dr_action_template *at[],
4223 			     struct rte_flow_template_table *tbl,
4224 			     struct rte_flow_error *error)
4225 {
4226 	int ret;
4227 	uint8_t i;
4228 	struct mlx5_tbl_multi_pattern_ctx mpat = MLX5_EMPTY_MULTI_PATTERN_CTX;
4229 
4230 	for (i = 0; i < nb_action_templates; i++) {
4231 		uint32_t refcnt = __atomic_add_fetch(&action_templates[i]->refcnt, 1,
4232 						     __ATOMIC_RELAXED);
4233 
4234 		if (refcnt <= 1) {
4235 			rte_flow_error_set(error, EINVAL,
4236 					   RTE_FLOW_ERROR_TYPE_ACTION,
4237 					   &action_templates[i], "invalid AT refcount");
4238 			goto at_error;
4239 		}
4240 		at[i] = action_templates[i]->tmpl;
4241 		tbl->ats[i].action_template = action_templates[i];
4242 		LIST_INIT(&tbl->ats[i].acts.act_list);
4243 		/* do NOT translate table action if `dev` was not started */
4244 		if (!dev->data->dev_started)
4245 			continue;
4246 		ret = __flow_hw_actions_translate(dev, &tbl->cfg,
4247 						  &tbl->ats[i].acts,
4248 						  action_templates[i],
4249 						  &mpat, error);
4250 		if (ret) {
4251 			i++;
4252 			goto at_error;
4253 		}
4254 	}
4255 	tbl->nb_action_templates = nb_action_templates;
4256 	ret = mlx5_tbl_multi_pattern_process(dev, tbl, &mpat, error);
4257 	if (ret)
4258 		goto at_error;
4259 	return 0;
4260 
4261 at_error:
4262 	while (i--) {
4263 		__flow_hw_action_template_destroy(dev, &tbl->ats[i].acts);
4264 		__atomic_sub_fetch(&action_templates[i]->refcnt,
4265 				   1, __ATOMIC_RELAXED);
4266 	}
4267 	return rte_errno;
4268 }
4269 
4270 /**
4271  * Create flow table.
4272  *
4273  * The input item and action templates will be binded to the table.
4274  * Flow memory will also be allocated. Matcher will be created based
4275  * on the item template. Action will be translated to the dedicated
4276  * DR action if possible.
4277  *
4278  * @param[in] dev
4279  *   Pointer to the rte_eth_dev structure.
4280  * @param[in] table_cfg
4281  *   Pointer to the table configuration.
4282  * @param[in] item_templates
4283  *   Item template array to be binded to the table.
4284  * @param[in] nb_item_templates
4285  *   Number of item template.
4286  * @param[in] action_templates
4287  *   Action template array to be binded to the table.
4288  * @param[in] nb_action_templates
4289  *   Number of action template.
4290  * @param[out] error
4291  *   Pointer to error structure.
4292  *
4293  * @return
4294  *    Table on success, NULL otherwise and rte_errno is set.
4295  */
4296 static struct rte_flow_template_table *
4297 flow_hw_table_create(struct rte_eth_dev *dev,
4298 		     const struct mlx5_flow_template_table_cfg *table_cfg,
4299 		     struct rte_flow_pattern_template *item_templates[],
4300 		     uint8_t nb_item_templates,
4301 		     struct rte_flow_actions_template *action_templates[],
4302 		     uint8_t nb_action_templates,
4303 		     struct rte_flow_error *error)
4304 {
4305 	struct rte_flow_error sub_error = {
4306 		.type = RTE_FLOW_ERROR_TYPE_NONE,
4307 		.cause = NULL,
4308 		.message = NULL,
4309 	};
4310 	struct mlx5_priv *priv = dev->data->dev_private;
4311 	struct mlx5dr_matcher_attr matcher_attr = {0};
4312 	struct rte_flow_template_table *tbl = NULL;
4313 	struct mlx5_flow_group *grp;
4314 	struct mlx5dr_match_template *mt[MLX5_HW_TBL_MAX_ITEM_TEMPLATE];
4315 	struct mlx5dr_action_template *at[MLX5_HW_TBL_MAX_ACTION_TEMPLATE];
4316 	const struct rte_flow_template_table_attr *attr = &table_cfg->attr;
4317 	struct rte_flow_attr flow_attr = attr->flow_attr;
4318 	struct mlx5_flow_cb_ctx ctx = {
4319 		.dev = dev,
4320 		.error = &sub_error,
4321 		.data = &flow_attr,
4322 	};
4323 	struct mlx5_indexed_pool_config cfg = {
4324 		.size = sizeof(struct rte_flow_hw) + mlx5dr_rule_get_handle_size(),
4325 		.trunk_size = 1 << 12,
4326 		.per_core_cache = 1 << 13,
4327 		.need_lock = 1,
4328 		.release_mem_en = !!priv->sh->config.reclaim_mode,
4329 		.malloc = mlx5_malloc,
4330 		.free = mlx5_free,
4331 		.type = "mlx5_hw_table_flow",
4332 	};
4333 	struct mlx5_list_entry *ge;
4334 	uint32_t i = 0, max_tpl = MLX5_HW_TBL_MAX_ITEM_TEMPLATE;
4335 	uint32_t nb_flows = rte_align32pow2(attr->nb_flows);
4336 	bool port_started = !!dev->data->dev_started;
4337 	int err;
4338 
4339 	/* HWS layer accepts only 1 item template with root table. */
4340 	if (!attr->flow_attr.group)
4341 		max_tpl = 1;
4342 	cfg.max_idx = nb_flows;
4343 	/* For table has very limited flows, disable cache. */
4344 	if (nb_flows < cfg.trunk_size) {
4345 		cfg.per_core_cache = 0;
4346 		cfg.trunk_size = nb_flows;
4347 	} else if (nb_flows <= MLX5_HW_IPOOL_SIZE_THRESHOLD) {
4348 		cfg.per_core_cache = MLX5_HW_IPOOL_CACHE_MIN;
4349 	}
4350 	/* Check if we requires too many templates. */
4351 	if (nb_item_templates > max_tpl ||
4352 	    nb_action_templates > MLX5_HW_TBL_MAX_ACTION_TEMPLATE) {
4353 		rte_errno = EINVAL;
4354 		goto error;
4355 	}
4356 	/* Allocate the table memory. */
4357 	tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*tbl), 0, rte_socket_id());
4358 	if (!tbl)
4359 		goto error;
4360 	tbl->cfg = *table_cfg;
4361 	/* Allocate flow indexed pool. */
4362 	tbl->flow = mlx5_ipool_create(&cfg);
4363 	if (!tbl->flow)
4364 		goto error;
4365 	/* Allocate rule indexed pool. */
4366 	cfg.size = 0;
4367 	cfg.type = "mlx5_hw_table_rule";
4368 	cfg.max_idx += priv->hw_q[0].size;
4369 	tbl->resource = mlx5_ipool_create(&cfg);
4370 	if (!tbl->resource)
4371 		goto error;
4372 	/* Register the flow group. */
4373 	ge = mlx5_hlist_register(priv->sh->groups, attr->flow_attr.group, &ctx);
4374 	if (!ge)
4375 		goto error;
4376 	grp = container_of(ge, struct mlx5_flow_group, entry);
4377 	tbl->grp = grp;
4378 	/* Prepare matcher information. */
4379 	matcher_attr.optimize_flow_src = MLX5DR_MATCHER_FLOW_SRC_ANY;
4380 	matcher_attr.priority = attr->flow_attr.priority;
4381 	matcher_attr.optimize_using_rule_idx = true;
4382 	matcher_attr.mode = MLX5DR_MATCHER_RESOURCE_MODE_RULE;
4383 	matcher_attr.insert_mode = flow_hw_matcher_insert_mode_get(attr->insertion_type);
4384 	if (attr->hash_func == RTE_FLOW_TABLE_HASH_FUNC_CRC16) {
4385 		DRV_LOG(ERR, "16-bit checksum hash type is not supported");
4386 		rte_errno = ENOTSUP;
4387 		goto it_error;
4388 	}
4389 	matcher_attr.distribute_mode = flow_hw_matcher_distribute_mode_get(attr->hash_func);
4390 	matcher_attr.rule.num_log = rte_log2_u32(nb_flows);
4391 	/* Parse hints information. */
4392 	if (attr->specialize) {
4393 		if (attr->specialize == RTE_FLOW_TABLE_SPECIALIZE_TRANSFER_WIRE_ORIG)
4394 			matcher_attr.optimize_flow_src = MLX5DR_MATCHER_FLOW_SRC_WIRE;
4395 		else if (attr->specialize == RTE_FLOW_TABLE_SPECIALIZE_TRANSFER_VPORT_ORIG)
4396 			matcher_attr.optimize_flow_src = MLX5DR_MATCHER_FLOW_SRC_VPORT;
4397 		else
4398 			DRV_LOG(INFO, "Unsupported hint value %x", attr->specialize);
4399 	}
4400 	/* Build the item template. */
4401 	for (i = 0; i < nb_item_templates; i++) {
4402 		uint32_t ret;
4403 
4404 		if ((flow_attr.ingress && !item_templates[i]->attr.ingress) ||
4405 		    (flow_attr.egress && !item_templates[i]->attr.egress) ||
4406 		    (flow_attr.transfer && !item_templates[i]->attr.transfer)) {
4407 			DRV_LOG(ERR, "pattern template and template table attribute mismatch");
4408 			rte_errno = EINVAL;
4409 			goto it_error;
4410 		}
4411 		if (item_templates[i]->item_flags & MLX5_FLOW_ITEM_COMPARE)
4412 			matcher_attr.mode = MLX5DR_MATCHER_RESOURCE_MODE_HTABLE;
4413 		ret = __atomic_fetch_add(&item_templates[i]->refcnt, 1,
4414 					 __ATOMIC_RELAXED) + 1;
4415 		if (ret <= 1) {
4416 			rte_errno = EINVAL;
4417 			goto it_error;
4418 		}
4419 		mt[i] = item_templates[i]->mt;
4420 		tbl->its[i] = item_templates[i];
4421 	}
4422 	tbl->nb_item_templates = nb_item_templates;
4423 	/* Build the action template. */
4424 	err = mlx5_hw_build_template_table(dev, nb_action_templates,
4425 					   action_templates, at, tbl, &sub_error);
4426 	if (err) {
4427 		i = nb_item_templates;
4428 		goto it_error;
4429 	}
4430 	tbl->matcher = mlx5dr_matcher_create
4431 		(tbl->grp->tbl, mt, nb_item_templates, at, nb_action_templates, &matcher_attr);
4432 	if (!tbl->matcher)
4433 		goto at_error;
4434 	tbl->type = attr->flow_attr.transfer ? MLX5DR_TABLE_TYPE_FDB :
4435 		    (attr->flow_attr.egress ? MLX5DR_TABLE_TYPE_NIC_TX :
4436 		    MLX5DR_TABLE_TYPE_NIC_RX);
4437 	if (port_started)
4438 		LIST_INSERT_HEAD(&priv->flow_hw_tbl, tbl, next);
4439 	else
4440 		LIST_INSERT_HEAD(&priv->flow_hw_tbl_ongo, tbl, next);
4441 	return tbl;
4442 at_error:
4443 	for (i = 0; i < nb_action_templates; i++) {
4444 		__flow_hw_action_template_destroy(dev, &tbl->ats[i].acts);
4445 		__atomic_fetch_sub(&action_templates[i]->refcnt,
4446 				   1, __ATOMIC_RELAXED);
4447 	}
4448 	i = nb_item_templates;
4449 it_error:
4450 	while (i--)
4451 		__atomic_fetch_sub(&item_templates[i]->refcnt,
4452 				   1, __ATOMIC_RELAXED);
4453 error:
4454 	err = rte_errno;
4455 	if (tbl) {
4456 		if (tbl->grp)
4457 			mlx5_hlist_unregister(priv->sh->groups,
4458 					      &tbl->grp->entry);
4459 		if (tbl->resource)
4460 			mlx5_ipool_destroy(tbl->resource);
4461 		if (tbl->flow)
4462 			mlx5_ipool_destroy(tbl->flow);
4463 		mlx5_free(tbl);
4464 	}
4465 	if (error != NULL) {
4466 		if (sub_error.type == RTE_FLOW_ERROR_TYPE_NONE)
4467 			rte_flow_error_set(error, err, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4468 					   "Failed to create template table");
4469 		else
4470 			rte_memcpy(error, &sub_error, sizeof(sub_error));
4471 	}
4472 	return NULL;
4473 }
4474 
4475 /**
4476  * Update flow template table.
4477  *
4478  * @param[in] dev
4479  *   Pointer to the rte_eth_dev structure.
4480  * @param[out] error
4481  *   Pointer to error structure.
4482  *
4483  * @return
4484  *    0 on success, negative value otherwise and rte_errno is set.
4485  */
4486 int
4487 flow_hw_table_update(struct rte_eth_dev *dev,
4488 		     struct rte_flow_error *error)
4489 {
4490 	struct mlx5_priv *priv = dev->data->dev_private;
4491 	struct rte_flow_template_table *tbl;
4492 
4493 	while ((tbl = LIST_FIRST(&priv->flow_hw_tbl_ongo)) != NULL) {
4494 		if (flow_hw_actions_translate(dev, tbl, error))
4495 			return -1;
4496 		LIST_REMOVE(tbl, next);
4497 		LIST_INSERT_HEAD(&priv->flow_hw_tbl, tbl, next);
4498 	}
4499 	return 0;
4500 }
4501 
4502 /**
4503  * Translates group index specified by the user in @p attr to internal
4504  * group index.
4505  *
4506  * Translation is done by incrementing group index, so group n becomes n + 1.
4507  *
4508  * @param[in] dev
4509  *   Pointer to Ethernet device.
4510  * @param[in] cfg
4511  *   Pointer to the template table configuration.
4512  * @param[in] group
4513  *   Currently used group index (table group or jump destination).
4514  * @param[out] table_group
4515  *   Pointer to output group index.
4516  * @param[out] error
4517  *   Pointer to error structure.
4518  *
4519  * @return
4520  *   0 on success. Otherwise, returns negative error code, rte_errno is set
4521  *   and error structure is filled.
4522  */
4523 static int
4524 flow_hw_translate_group(struct rte_eth_dev *dev,
4525 			const struct mlx5_flow_template_table_cfg *cfg,
4526 			uint32_t group,
4527 			uint32_t *table_group,
4528 			struct rte_flow_error *error)
4529 {
4530 	struct mlx5_priv *priv = dev->data->dev_private;
4531 	struct mlx5_sh_config *config = &priv->sh->config;
4532 	const struct rte_flow_attr *flow_attr = &cfg->attr.flow_attr;
4533 
4534 	if (config->dv_esw_en &&
4535 	    priv->fdb_def_rule &&
4536 	    cfg->external &&
4537 	    flow_attr->transfer) {
4538 		if (group > MLX5_HW_MAX_TRANSFER_GROUP)
4539 			return rte_flow_error_set(error, EINVAL,
4540 						  RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
4541 						  NULL,
4542 						  "group index not supported");
4543 		*table_group = group + 1;
4544 	} else if (config->dv_esw_en &&
4545 		   (config->repr_matching || config->dv_xmeta_en == MLX5_XMETA_MODE_META32_HWS) &&
4546 		   cfg->external &&
4547 		   flow_attr->egress) {
4548 		/*
4549 		 * On E-Switch setups, default egress flow rules are inserted to allow
4550 		 * representor matching and/or preserving metadata across steering domains.
4551 		 * These flow rules are inserted in group 0 and this group is reserved by PMD
4552 		 * for these purposes.
4553 		 *
4554 		 * As a result, if representor matching or extended metadata mode is enabled,
4555 		 * group provided by the user must be incremented to avoid inserting flow rules
4556 		 * in group 0.
4557 		 */
4558 		if (group > MLX5_HW_MAX_EGRESS_GROUP)
4559 			return rte_flow_error_set(error, EINVAL,
4560 						  RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
4561 						  NULL,
4562 						  "group index not supported");
4563 		*table_group = group + 1;
4564 	} else {
4565 		*table_group = group;
4566 	}
4567 	return 0;
4568 }
4569 
4570 /**
4571  * Create flow table.
4572  *
4573  * This function is a wrapper over @ref flow_hw_table_create(), which translates parameters
4574  * provided by user to proper internal values.
4575  *
4576  * @param[in] dev
4577  *   Pointer to Ethernet device.
4578  * @param[in] attr
4579  *   Pointer to the table attributes.
4580  * @param[in] item_templates
4581  *   Item template array to be binded to the table.
4582  * @param[in] nb_item_templates
4583  *   Number of item templates.
4584  * @param[in] action_templates
4585  *   Action template array to be binded to the table.
4586  * @param[in] nb_action_templates
4587  *   Number of action templates.
4588  * @param[out] error
4589  *   Pointer to error structure.
4590  *
4591  * @return
4592  *   Table on success, Otherwise, returns negative error code, rte_errno is set
4593  *   and error structure is filled.
4594  */
4595 static struct rte_flow_template_table *
4596 flow_hw_template_table_create(struct rte_eth_dev *dev,
4597 			      const struct rte_flow_template_table_attr *attr,
4598 			      struct rte_flow_pattern_template *item_templates[],
4599 			      uint8_t nb_item_templates,
4600 			      struct rte_flow_actions_template *action_templates[],
4601 			      uint8_t nb_action_templates,
4602 			      struct rte_flow_error *error)
4603 {
4604 	struct mlx5_flow_template_table_cfg cfg = {
4605 		.attr = *attr,
4606 		.external = true,
4607 	};
4608 	uint32_t group = attr->flow_attr.group;
4609 
4610 	if (flow_hw_translate_group(dev, &cfg, group, &cfg.attr.flow_attr.group, error))
4611 		return NULL;
4612 	return flow_hw_table_create(dev, &cfg, item_templates, nb_item_templates,
4613 				    action_templates, nb_action_templates, error);
4614 }
4615 
4616 /**
4617  * Destroy flow table.
4618  *
4619  * @param[in] dev
4620  *   Pointer to the rte_eth_dev structure.
4621  * @param[in] table
4622  *   Pointer to the table to be destroyed.
4623  * @param[out] error
4624  *   Pointer to error structure.
4625  *
4626  * @return
4627  *   0 on success, a negative errno value otherwise and rte_errno is set.
4628  */
4629 static int
4630 flow_hw_table_destroy(struct rte_eth_dev *dev,
4631 		      struct rte_flow_template_table *table,
4632 		      struct rte_flow_error *error)
4633 {
4634 	struct mlx5_priv *priv = dev->data->dev_private;
4635 	int i;
4636 	uint32_t fidx = 1;
4637 	uint32_t ridx = 1;
4638 
4639 	/* Build ipool allocated object bitmap. */
4640 	mlx5_ipool_flush_cache(table->resource);
4641 	mlx5_ipool_flush_cache(table->flow);
4642 	/* Check if ipool has allocated objects. */
4643 	if (table->refcnt ||
4644 	    mlx5_ipool_get_next(table->flow, &fidx) ||
4645 	    mlx5_ipool_get_next(table->resource, &ridx)) {
4646 		DRV_LOG(WARNING, "Table %p is still in use.", (void *)table);
4647 		return rte_flow_error_set(error, EBUSY,
4648 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4649 				   NULL,
4650 				   "table in use");
4651 	}
4652 	LIST_REMOVE(table, next);
4653 	for (i = 0; i < table->nb_item_templates; i++)
4654 		__atomic_fetch_sub(&table->its[i]->refcnt,
4655 				   1, __ATOMIC_RELAXED);
4656 	for (i = 0; i < table->nb_action_templates; i++) {
4657 		__flow_hw_action_template_destroy(dev, &table->ats[i].acts);
4658 		__atomic_fetch_sub(&table->ats[i].action_template->refcnt,
4659 				   1, __ATOMIC_RELAXED);
4660 	}
4661 	mlx5dr_matcher_destroy(table->matcher);
4662 	mlx5_hlist_unregister(priv->sh->groups, &table->grp->entry);
4663 	mlx5_ipool_destroy(table->resource);
4664 	mlx5_ipool_destroy(table->flow);
4665 	mlx5_free(table);
4666 	return 0;
4667 }
4668 
4669 /**
4670  * Parse group's miss actions.
4671  *
4672  * @param[in] dev
4673  *   Pointer to the rte_eth_dev structure.
4674  * @param[in] cfg
4675  *   Pointer to the table_cfg structure.
4676  * @param[in] actions
4677  *   Array of actions to perform on group miss. Supported types:
4678  *   RTE_FLOW_ACTION_TYPE_JUMP, RTE_FLOW_ACTION_TYPE_VOID, RTE_FLOW_ACTION_TYPE_END.
4679  * @param[out] dst_group_id
4680  *   Pointer to destination group id output. will be set to 0 if actions is END,
4681  *   otherwise will be set to destination group id.
4682  * @param[out] error
4683  *   Pointer to error structure.
4684  *
4685  * @return
4686  *   0 on success, a negative errno value otherwise and rte_errno is set.
4687  */
4688 
4689 static int
4690 flow_hw_group_parse_miss_actions(struct rte_eth_dev *dev,
4691 				 struct mlx5_flow_template_table_cfg *cfg,
4692 				 const struct rte_flow_action actions[],
4693 				 uint32_t *dst_group_id,
4694 				 struct rte_flow_error *error)
4695 {
4696 	const struct rte_flow_action_jump *jump_conf;
4697 	uint32_t temp = 0;
4698 	uint32_t i;
4699 
4700 	for (i = 0; actions[i].type != RTE_FLOW_ACTION_TYPE_END; i++) {
4701 		switch (actions[i].type) {
4702 		case RTE_FLOW_ACTION_TYPE_VOID:
4703 			continue;
4704 		case RTE_FLOW_ACTION_TYPE_JUMP:
4705 			if (temp)
4706 				return rte_flow_error_set(error, ENOTSUP,
4707 							  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, actions,
4708 							  "Miss actions can contain only a single JUMP");
4709 
4710 			jump_conf = (const struct rte_flow_action_jump *)actions[i].conf;
4711 			if (!jump_conf)
4712 				return rte_flow_error_set(error, EINVAL,
4713 							  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4714 							  jump_conf, "Jump conf must not be NULL");
4715 
4716 			if (flow_hw_translate_group(dev, cfg, jump_conf->group, &temp, error))
4717 				return -rte_errno;
4718 
4719 			if (!temp)
4720 				return rte_flow_error_set(error, EINVAL,
4721 							  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4722 							  "Failed to set group miss actions - Invalid target group");
4723 			break;
4724 		default:
4725 			return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
4726 						  &actions[i], "Unsupported default miss action type");
4727 		}
4728 	}
4729 
4730 	*dst_group_id = temp;
4731 	return 0;
4732 }
4733 
4734 /**
4735  * Set group's miss group.
4736  *
4737  * @param[in] dev
4738  *   Pointer to the rte_eth_dev structure.
4739  * @param[in] cfg
4740  *   Pointer to the table_cfg structure.
4741  * @param[in] src_grp
4742  *   Pointer to source group structure.
4743  *   if NULL, a new group will be created based on group id from cfg->attr.flow_attr.group.
4744  * @param[in] dst_grp
4745  *   Pointer to destination group structure.
4746  * @param[out] error
4747  *   Pointer to error structure.
4748  *
4749  * @return
4750  *   0 on success, a negative errno value otherwise and rte_errno is set.
4751  */
4752 
4753 static int
4754 flow_hw_group_set_miss_group(struct rte_eth_dev *dev,
4755 			     struct mlx5_flow_template_table_cfg *cfg,
4756 			     struct mlx5_flow_group *src_grp,
4757 			     struct mlx5_flow_group *dst_grp,
4758 			     struct rte_flow_error *error)
4759 {
4760 	struct rte_flow_error sub_error = {
4761 		.type = RTE_FLOW_ERROR_TYPE_NONE,
4762 		.cause = NULL,
4763 		.message = NULL,
4764 	};
4765 	struct mlx5_flow_cb_ctx ctx = {
4766 		.dev = dev,
4767 		.error = &sub_error,
4768 		.data = &cfg->attr.flow_attr,
4769 	};
4770 	struct mlx5_priv *priv = dev->data->dev_private;
4771 	struct mlx5_list_entry *ge;
4772 	bool ref = false;
4773 	int ret;
4774 
4775 	if (!dst_grp)
4776 		return -EINVAL;
4777 
4778 	/* If group doesn't exist - needs to be created. */
4779 	if (!src_grp) {
4780 		ge = mlx5_hlist_register(priv->sh->groups, cfg->attr.flow_attr.group, &ctx);
4781 		if (!ge)
4782 			return -rte_errno;
4783 
4784 		src_grp = container_of(ge, struct mlx5_flow_group, entry);
4785 		LIST_INSERT_HEAD(&priv->flow_hw_grp, src_grp, next);
4786 		ref = true;
4787 	} else if (!src_grp->miss_group) {
4788 		/* If group exists, but has no miss actions - need to increase ref_cnt. */
4789 		LIST_INSERT_HEAD(&priv->flow_hw_grp, src_grp, next);
4790 		src_grp->entry.ref_cnt++;
4791 		ref = true;
4792 	}
4793 
4794 	ret = mlx5dr_table_set_default_miss(src_grp->tbl, dst_grp->tbl);
4795 	if (ret)
4796 		goto mlx5dr_error;
4797 
4798 	/* If group existed and had old miss actions - ref_cnt is already correct.
4799 	 * However, need to reduce ref counter for old miss group.
4800 	 */
4801 	if (src_grp->miss_group)
4802 		mlx5_hlist_unregister(priv->sh->groups, &src_grp->miss_group->entry);
4803 
4804 	src_grp->miss_group = dst_grp;
4805 	return 0;
4806 
4807 mlx5dr_error:
4808 	/* Reduce src_grp ref_cnt back & remove from grp list in case of mlx5dr error */
4809 	if (ref) {
4810 		mlx5_hlist_unregister(priv->sh->groups, &src_grp->entry);
4811 		LIST_REMOVE(src_grp, next);
4812 	}
4813 
4814 	return rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4815 				  "Failed to set group miss actions");
4816 }
4817 
4818 /**
4819  * Unset group's miss group.
4820  *
4821  * @param[in] dev
4822  *   Pointer to the rte_eth_dev structure.
4823  * @param[in] grp
4824  *   Pointer to group structure.
4825  * @param[out] error
4826  *   Pointer to error structure.
4827  *
4828  * @return
4829  *   0 on success, a negative errno value otherwise and rte_errno is set.
4830  */
4831 
4832 static int
4833 flow_hw_group_unset_miss_group(struct rte_eth_dev *dev,
4834 			       struct mlx5_flow_group *grp,
4835 			       struct rte_flow_error *error)
4836 {
4837 	struct mlx5_priv *priv = dev->data->dev_private;
4838 	int ret;
4839 
4840 	/* If group doesn't exist - no need to change anything. */
4841 	if (!grp)
4842 		return 0;
4843 
4844 	/* If group exists, but miss actions is already default behavior -
4845 	 * no need to change anything.
4846 	 */
4847 	if (!grp->miss_group)
4848 		return 0;
4849 
4850 	ret = mlx5dr_table_set_default_miss(grp->tbl, NULL);
4851 	if (ret)
4852 		return rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4853 					  "Failed to unset group miss actions");
4854 
4855 	mlx5_hlist_unregister(priv->sh->groups, &grp->miss_group->entry);
4856 	grp->miss_group = NULL;
4857 
4858 	LIST_REMOVE(grp, next);
4859 	mlx5_hlist_unregister(priv->sh->groups, &grp->entry);
4860 
4861 	return 0;
4862 }
4863 
4864 /**
4865  * Set group miss actions.
4866  *
4867  * @param[in] dev
4868  *   Pointer to the rte_eth_dev structure.
4869  * @param[in] group_id
4870  *   Group id.
4871  * @param[in] attr
4872  *   Pointer to group attributes structure.
4873  * @param[in] actions
4874  *   Array of actions to perform on group miss. Supported types:
4875  *   RTE_FLOW_ACTION_TYPE_JUMP, RTE_FLOW_ACTION_TYPE_VOID, RTE_FLOW_ACTION_TYPE_END.
4876  * @param[out] error
4877  *   Pointer to error structure.
4878  *
4879  * @return
4880  *   0 on success, a negative errno value otherwise and rte_errno is set.
4881  */
4882 
4883 static int
4884 flow_hw_group_set_miss_actions(struct rte_eth_dev *dev,
4885 			       uint32_t group_id,
4886 			       const struct rte_flow_group_attr *attr,
4887 			       const struct rte_flow_action actions[],
4888 			       struct rte_flow_error *error)
4889 {
4890 	struct rte_flow_error sub_error = {
4891 		.type = RTE_FLOW_ERROR_TYPE_NONE,
4892 		.cause = NULL,
4893 		.message = NULL,
4894 	};
4895 	struct mlx5_flow_template_table_cfg cfg = {
4896 		.external = true,
4897 		.attr = {
4898 			.flow_attr = {
4899 				.group = group_id,
4900 				.ingress = attr->ingress,
4901 				.egress = attr->egress,
4902 				.transfer = attr->transfer,
4903 			},
4904 		},
4905 	};
4906 	struct mlx5_flow_cb_ctx ctx = {
4907 		.dev = dev,
4908 		.error = &sub_error,
4909 		.data = &cfg.attr.flow_attr,
4910 	};
4911 	struct mlx5_priv *priv = dev->data->dev_private;
4912 	struct mlx5_flow_group *src_grp = NULL;
4913 	struct mlx5_flow_group *dst_grp = NULL;
4914 	struct mlx5_list_entry *ge;
4915 	uint32_t dst_group_id = 0;
4916 	int ret;
4917 
4918 	if (flow_hw_translate_group(dev, &cfg, group_id, &group_id, error))
4919 		return -rte_errno;
4920 
4921 	if (!group_id)
4922 		return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4923 					  NULL, "Failed to set group miss actions - invalid group id");
4924 
4925 	ret = flow_hw_group_parse_miss_actions(dev, &cfg, actions, &dst_group_id, error);
4926 	if (ret)
4927 		return -rte_errno;
4928 
4929 	if (dst_group_id == group_id) {
4930 		return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4931 					  NULL, "Failed to set group miss actions - target group id must differ from group_id");
4932 	}
4933 
4934 	cfg.attr.flow_attr.group = group_id;
4935 	ge = mlx5_hlist_lookup(priv->sh->groups, group_id, &ctx);
4936 	if (ge)
4937 		src_grp = container_of(ge, struct mlx5_flow_group, entry);
4938 
4939 	if (dst_group_id) {
4940 		/* Increase ref_cnt for new miss group. */
4941 		cfg.attr.flow_attr.group = dst_group_id;
4942 		ge = mlx5_hlist_register(priv->sh->groups, dst_group_id, &ctx);
4943 		if (!ge)
4944 			return -rte_errno;
4945 
4946 		dst_grp = container_of(ge, struct mlx5_flow_group, entry);
4947 
4948 		cfg.attr.flow_attr.group = group_id;
4949 		ret = flow_hw_group_set_miss_group(dev, &cfg, src_grp, dst_grp, error);
4950 		if (ret)
4951 			goto error;
4952 	} else {
4953 		return flow_hw_group_unset_miss_group(dev, src_grp, error);
4954 	}
4955 
4956 	return 0;
4957 
4958 error:
4959 	if (dst_grp)
4960 		mlx5_hlist_unregister(priv->sh->groups, &dst_grp->entry);
4961 	return -rte_errno;
4962 }
4963 
4964 static bool
4965 flow_hw_modify_field_is_used(const struct rte_flow_action_modify_field *action,
4966 			     enum rte_flow_field_id field)
4967 {
4968 	return action->src.field == field || action->dst.field == field;
4969 }
4970 
4971 static bool
4972 flow_hw_modify_field_is_geneve_opt(enum rte_flow_field_id field)
4973 {
4974 	return field == RTE_FLOW_FIELD_GENEVE_OPT_TYPE ||
4975 	       field == RTE_FLOW_FIELD_GENEVE_OPT_CLASS ||
4976 	       field == RTE_FLOW_FIELD_GENEVE_OPT_DATA;
4977 }
4978 
4979 static bool
4980 flow_hw_modify_field_is_add_dst_valid(const struct rte_flow_action_modify_field *conf)
4981 {
4982 	if (conf->operation != RTE_FLOW_MODIFY_ADD)
4983 		return true;
4984 	if (conf->src.field == RTE_FLOW_FIELD_POINTER ||
4985 	    conf->src.field == RTE_FLOW_FIELD_VALUE)
4986 		return true;
4987 	switch (conf->dst.field) {
4988 	case RTE_FLOW_FIELD_IPV4_TTL:
4989 	case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
4990 	case RTE_FLOW_FIELD_TCP_SEQ_NUM:
4991 	case RTE_FLOW_FIELD_TCP_ACK_NUM:
4992 	case RTE_FLOW_FIELD_TAG:
4993 	case RTE_FLOW_FIELD_META:
4994 	case RTE_FLOW_FIELD_FLEX_ITEM:
4995 	case RTE_FLOW_FIELD_TCP_DATA_OFFSET:
4996 	case RTE_FLOW_FIELD_IPV4_IHL:
4997 	case RTE_FLOW_FIELD_IPV4_TOTAL_LEN:
4998 	case RTE_FLOW_FIELD_IPV6_PAYLOAD_LEN:
4999 		return true;
5000 	default:
5001 		break;
5002 	}
5003 	return false;
5004 }
5005 
5006 static int
5007 flow_hw_validate_action_modify_field(struct rte_eth_dev *dev,
5008 				     const struct rte_flow_action *action,
5009 				     const struct rte_flow_action *mask,
5010 				     struct rte_flow_error *error)
5011 {
5012 	const struct rte_flow_action_modify_field *action_conf = action->conf;
5013 	const struct rte_flow_action_modify_field *mask_conf = mask->conf;
5014 	struct mlx5_priv *priv = dev->data->dev_private;
5015 	struct mlx5_hca_attr *attr = &priv->sh->cdev->config.hca_attr;
5016 	int ret;
5017 
5018 	if (!mask_conf)
5019 		return rte_flow_error_set(error, EINVAL,
5020 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
5021 					  "modify_field mask conf is missing");
5022 	if (action_conf->operation != mask_conf->operation)
5023 		return rte_flow_error_set(error, EINVAL,
5024 				RTE_FLOW_ERROR_TYPE_ACTION, action,
5025 				"modify_field operation mask and template are not equal");
5026 	if (action_conf->dst.field != mask_conf->dst.field)
5027 		return rte_flow_error_set(error, EINVAL,
5028 				RTE_FLOW_ERROR_TYPE_ACTION, action,
5029 				"destination field mask and template are not equal");
5030 	if (action_conf->dst.field == RTE_FLOW_FIELD_POINTER ||
5031 	    action_conf->dst.field == RTE_FLOW_FIELD_VALUE ||
5032 	    action_conf->dst.field == RTE_FLOW_FIELD_HASH_RESULT)
5033 		return rte_flow_error_set(error, EINVAL,
5034 				RTE_FLOW_ERROR_TYPE_ACTION, action,
5035 				"immediate value, pointer and hash result cannot be used as destination");
5036 	ret = flow_validate_modify_field_level(&action_conf->dst, error);
5037 	if (ret)
5038 		return ret;
5039 	if (action_conf->dst.field != RTE_FLOW_FIELD_FLEX_ITEM &&
5040 	    !flow_hw_modify_field_is_geneve_opt(action_conf->dst.field)) {
5041 		if (action_conf->dst.tag_index &&
5042 		    !flow_modify_field_support_tag_array(action_conf->dst.field))
5043 			return rte_flow_error_set(error, EINVAL,
5044 					RTE_FLOW_ERROR_TYPE_ACTION, action,
5045 					"destination tag index is not supported");
5046 		if (action_conf->dst.class_id)
5047 			return rte_flow_error_set(error, EINVAL,
5048 					RTE_FLOW_ERROR_TYPE_ACTION, action,
5049 					"destination class id is not supported");
5050 	}
5051 	if (mask_conf->dst.level != UINT8_MAX)
5052 		return rte_flow_error_set(error, EINVAL,
5053 			RTE_FLOW_ERROR_TYPE_ACTION, action,
5054 			"destination encapsulation level must be fully masked");
5055 	if (mask_conf->dst.offset != UINT32_MAX)
5056 		return rte_flow_error_set(error, EINVAL,
5057 			RTE_FLOW_ERROR_TYPE_ACTION, action,
5058 			"destination offset level must be fully masked");
5059 	if (action_conf->src.field != mask_conf->src.field)
5060 		return rte_flow_error_set(error, EINVAL,
5061 				RTE_FLOW_ERROR_TYPE_ACTION, action,
5062 				"destination field mask and template are not equal");
5063 	if (action_conf->src.field != RTE_FLOW_FIELD_POINTER &&
5064 	    action_conf->src.field != RTE_FLOW_FIELD_VALUE) {
5065 		if (action_conf->src.field != RTE_FLOW_FIELD_FLEX_ITEM &&
5066 		    !flow_hw_modify_field_is_geneve_opt(action_conf->src.field)) {
5067 			if (action_conf->src.tag_index &&
5068 			    !flow_modify_field_support_tag_array(action_conf->src.field))
5069 				return rte_flow_error_set(error, EINVAL,
5070 					RTE_FLOW_ERROR_TYPE_ACTION, action,
5071 					"source tag index is not supported");
5072 			if (action_conf->src.class_id)
5073 				return rte_flow_error_set(error, EINVAL,
5074 					RTE_FLOW_ERROR_TYPE_ACTION, action,
5075 					"source class id is not supported");
5076 		}
5077 		if (mask_conf->src.level != UINT8_MAX)
5078 			return rte_flow_error_set(error, EINVAL,
5079 				RTE_FLOW_ERROR_TYPE_ACTION, action,
5080 				"source encapsulation level must be fully masked");
5081 		if (mask_conf->src.offset != UINT32_MAX)
5082 			return rte_flow_error_set(error, EINVAL,
5083 				RTE_FLOW_ERROR_TYPE_ACTION, action,
5084 				"source offset level must be fully masked");
5085 		ret = flow_validate_modify_field_level(&action_conf->src, error);
5086 		if (ret)
5087 			return ret;
5088 	}
5089 	if ((action_conf->dst.field == RTE_FLOW_FIELD_TAG &&
5090 	     action_conf->dst.tag_index >= MLX5_FLOW_HW_TAGS_MAX &&
5091 	     action_conf->dst.tag_index != RTE_PMD_MLX5_LINEAR_HASH_TAG_INDEX) ||
5092 	    (action_conf->src.field == RTE_FLOW_FIELD_TAG &&
5093 	     action_conf->src.tag_index >= MLX5_FLOW_HW_TAGS_MAX &&
5094 	     action_conf->src.tag_index != RTE_PMD_MLX5_LINEAR_HASH_TAG_INDEX))
5095 		return rte_flow_error_set(error, EINVAL,
5096 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
5097 				 "tag index is out of range");
5098 	if ((action_conf->dst.field == RTE_FLOW_FIELD_TAG &&
5099 	     flow_hw_get_reg_id(dev, RTE_FLOW_ITEM_TYPE_TAG, action_conf->dst.tag_index) == REG_NON) ||
5100 	    (action_conf->src.field == RTE_FLOW_FIELD_TAG &&
5101 	     flow_hw_get_reg_id(dev, RTE_FLOW_ITEM_TYPE_TAG, action_conf->src.tag_index) == REG_NON))
5102 		return rte_flow_error_set(error, EINVAL,
5103 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
5104 					  "tag index is out of range");
5105 	if (mask_conf->width != UINT32_MAX)
5106 		return rte_flow_error_set(error, EINVAL,
5107 				RTE_FLOW_ERROR_TYPE_ACTION, action,
5108 				"modify_field width field must be fully masked");
5109 	if (flow_hw_modify_field_is_used(action_conf, RTE_FLOW_FIELD_START))
5110 		return rte_flow_error_set(error, EINVAL,
5111 				RTE_FLOW_ERROR_TYPE_ACTION, action,
5112 				"modifying arbitrary place in a packet is not supported");
5113 	if (flow_hw_modify_field_is_used(action_conf, RTE_FLOW_FIELD_VLAN_TYPE))
5114 		return rte_flow_error_set(error, EINVAL,
5115 				RTE_FLOW_ERROR_TYPE_ACTION, action,
5116 				"modifying vlan_type is not supported");
5117 	if (flow_hw_modify_field_is_used(action_conf, RTE_FLOW_FIELD_RANDOM))
5118 		return rte_flow_error_set(error, EINVAL,
5119 				RTE_FLOW_ERROR_TYPE_ACTION, action,
5120 				"modifying random value is not supported");
5121 	/**
5122 	 * Geneve VNI modification is supported only when Geneve header is
5123 	 * parsed natively. When GENEVE options are supported, they both Geneve
5124 	 * and options headers are parsed as a flex parser.
5125 	 */
5126 	if (flow_hw_modify_field_is_used(action_conf, RTE_FLOW_FIELD_GENEVE_VNI) &&
5127 	    attr->geneve_tlv_opt)
5128 		return rte_flow_error_set(error, EINVAL,
5129 				RTE_FLOW_ERROR_TYPE_ACTION, action,
5130 				"modifying Geneve VNI is not supported when GENEVE opt is supported");
5131 	if (priv->tlv_options == NULL &&
5132 	    (flow_hw_modify_field_is_used(action_conf, RTE_FLOW_FIELD_GENEVE_OPT_TYPE) ||
5133 	     flow_hw_modify_field_is_used(action_conf, RTE_FLOW_FIELD_GENEVE_OPT_CLASS) ||
5134 	     flow_hw_modify_field_is_used(action_conf, RTE_FLOW_FIELD_GENEVE_OPT_DATA)))
5135 		return rte_flow_error_set(error, EINVAL,
5136 				RTE_FLOW_ERROR_TYPE_ACTION, action,
5137 				"modifying Geneve TLV option is supported only after parser configuration");
5138 	/* Due to HW bug, tunnel MPLS header is read only. */
5139 	if (action_conf->dst.field == RTE_FLOW_FIELD_MPLS)
5140 		return rte_flow_error_set(error, EINVAL,
5141 				RTE_FLOW_ERROR_TYPE_ACTION, action,
5142 				"MPLS cannot be used as destination");
5143 	/* ADD_FIELD is not supported for all the fields. */
5144 	if (!flow_hw_modify_field_is_add_dst_valid(action_conf))
5145 		return rte_flow_error_set(error, EINVAL,
5146 				RTE_FLOW_ERROR_TYPE_ACTION, action,
5147 				"invalid add_field destination");
5148 	return 0;
5149 }
5150 static int
5151 flow_hw_validate_action_port_representor(struct rte_eth_dev *dev __rte_unused,
5152 					 const struct rte_flow_actions_template_attr *attr,
5153 					 const struct rte_flow_action *action,
5154 					 const struct rte_flow_action *mask,
5155 					 struct rte_flow_error *error)
5156 {
5157 	const struct rte_flow_action_ethdev *action_conf = NULL;
5158 	const struct rte_flow_action_ethdev *mask_conf = NULL;
5159 
5160 	/* If transfer is set, port has been validated as proxy port. */
5161 	if (!attr->transfer)
5162 		return rte_flow_error_set(error, EINVAL,
5163 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5164 					  "cannot use port_representor actions"
5165 					  " without an E-Switch");
5166 	if (!action || !mask)
5167 		return rte_flow_error_set(error, EINVAL,
5168 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5169 					  "actiona and mask configuration must be set");
5170 	action_conf = action->conf;
5171 	mask_conf = mask->conf;
5172 	if (!mask_conf || mask_conf->port_id != MLX5_REPRESENTED_PORT_ESW_MGR ||
5173 	    !action_conf || action_conf->port_id != MLX5_REPRESENTED_PORT_ESW_MGR)
5174 		return rte_flow_error_set(error, EINVAL,
5175 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5176 					  "only eswitch manager port 0xffff is"
5177 					  " supported");
5178 	return 0;
5179 }
5180 
5181 static int
5182 flow_hw_validate_action_represented_port(struct rte_eth_dev *dev,
5183 					 const struct rte_flow_action *action,
5184 					 const struct rte_flow_action *mask,
5185 					 struct rte_flow_error *error)
5186 {
5187 	const struct rte_flow_action_ethdev *action_conf = action->conf;
5188 	const struct rte_flow_action_ethdev *mask_conf = mask->conf;
5189 	struct mlx5_priv *priv = dev->data->dev_private;
5190 
5191 	if (!priv->sh->config.dv_esw_en)
5192 		return rte_flow_error_set(error, EINVAL,
5193 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5194 					  "cannot use represented_port actions"
5195 					  " without an E-Switch");
5196 	if (mask_conf && mask_conf->port_id) {
5197 		struct mlx5_priv *port_priv;
5198 		struct mlx5_priv *dev_priv;
5199 
5200 		if (!action_conf)
5201 			return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
5202 						  action, "port index was not provided");
5203 		port_priv = mlx5_port_to_eswitch_info(action_conf->port_id, false);
5204 		if (!port_priv)
5205 			return rte_flow_error_set(error, rte_errno,
5206 						  RTE_FLOW_ERROR_TYPE_ACTION,
5207 						  action,
5208 						  "failed to obtain E-Switch"
5209 						  " info for port");
5210 		dev_priv = mlx5_dev_to_eswitch_info(dev);
5211 		if (!dev_priv)
5212 			return rte_flow_error_set(error, rte_errno,
5213 						  RTE_FLOW_ERROR_TYPE_ACTION,
5214 						  action,
5215 						  "failed to obtain E-Switch"
5216 						  " info for transfer proxy");
5217 		if (port_priv->domain_id != dev_priv->domain_id)
5218 			return rte_flow_error_set(error, rte_errno,
5219 						  RTE_FLOW_ERROR_TYPE_ACTION,
5220 						  action,
5221 						  "cannot forward to port from"
5222 						  " a different E-Switch");
5223 	}
5224 	return 0;
5225 }
5226 
5227 /**
5228  * Validate AGE action.
5229  *
5230  * @param[in] dev
5231  *   Pointer to rte_eth_dev structure.
5232  * @param[in] action
5233  *   Pointer to the indirect action.
5234  * @param[in] action_flags
5235  *   Holds the actions detected until now.
5236  * @param[in] fixed_cnt
5237  *   Indicator if this list has a fixed COUNT action.
5238  * @param[out] error
5239  *   Pointer to error structure.
5240  *
5241  * @return
5242  *   0 on success, a negative errno value otherwise and rte_errno is set.
5243  */
5244 static int
5245 flow_hw_validate_action_age(struct rte_eth_dev *dev,
5246 			    const struct rte_flow_action *action,
5247 			    uint64_t action_flags, bool fixed_cnt,
5248 			    struct rte_flow_error *error)
5249 {
5250 	struct mlx5_priv *priv = dev->data->dev_private;
5251 	struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);
5252 
5253 	if (!priv->sh->cdev->config.devx)
5254 		return rte_flow_error_set(error, ENOTSUP,
5255 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5256 					  NULL, "AGE action not supported");
5257 	if (age_info->ages_ipool == NULL)
5258 		return rte_flow_error_set(error, EINVAL,
5259 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5260 					  "aging pool not initialized");
5261 	if ((action_flags & MLX5_FLOW_ACTION_AGE) ||
5262 	    (action_flags & MLX5_FLOW_ACTION_INDIRECT_AGE))
5263 		return rte_flow_error_set(error, EINVAL,
5264 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
5265 					  "duplicate AGE actions set");
5266 	if (fixed_cnt)
5267 		return rte_flow_error_set(error, EINVAL,
5268 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
5269 					  "AGE and fixed COUNT combination is not supported");
5270 	return 0;
5271 }
5272 
5273 /**
5274  * Validate count action.
5275  *
5276  * @param[in] dev
5277  *   Pointer to rte_eth_dev structure.
5278  * @param[in] action
5279  *   Pointer to the indirect action.
5280  * @param[in] mask
5281  *   Pointer to the indirect action mask.
5282  * @param[in] action_flags
5283  *   Holds the actions detected until now.
5284  * @param[out] error
5285  *   Pointer to error structure.
5286  *
5287  * @return
5288  *   0 on success, a negative errno value otherwise and rte_errno is set.
5289  */
5290 static int
5291 flow_hw_validate_action_count(struct rte_eth_dev *dev,
5292 			      const struct rte_flow_action *action,
5293 			      const struct rte_flow_action *mask,
5294 			      uint64_t action_flags,
5295 			      struct rte_flow_error *error)
5296 {
5297 	struct mlx5_priv *priv = dev->data->dev_private;
5298 	const struct rte_flow_action_count *count = mask->conf;
5299 
5300 	if (!priv->sh->cdev->config.devx)
5301 		return rte_flow_error_set(error, ENOTSUP,
5302 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
5303 					  "count action not supported");
5304 	if (!priv->hws_cpool)
5305 		return rte_flow_error_set(error, EINVAL,
5306 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
5307 					  "counters pool not initialized");
5308 	if ((action_flags & MLX5_FLOW_ACTION_COUNT) ||
5309 	    (action_flags & MLX5_FLOW_ACTION_INDIRECT_COUNT))
5310 		return rte_flow_error_set(error, EINVAL,
5311 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
5312 					  "duplicate count actions set");
5313 	if (count && count->id && (action_flags & MLX5_FLOW_ACTION_AGE))
5314 		return rte_flow_error_set(error, EINVAL,
5315 					  RTE_FLOW_ERROR_TYPE_ACTION, mask,
5316 					  "AGE and COUNT action shared by mask combination is not supported");
5317 	return 0;
5318 }
5319 
5320 /**
5321  * Validate meter_mark action.
5322  *
5323  * @param[in] dev
5324  *   Pointer to rte_eth_dev structure.
5325  * @param[in] action
5326  *   Pointer to the indirect action.
5327  * @param[out] error
5328  *   Pointer to error structure.
5329  *
5330  * @return
5331  *   0 on success, a negative errno value otherwise and rte_errno is set.
5332  */
5333 static int
5334 flow_hw_validate_action_meter_mark(struct rte_eth_dev *dev,
5335 			      const struct rte_flow_action *action,
5336 			      struct rte_flow_error *error)
5337 {
5338 	struct mlx5_priv *priv = dev->data->dev_private;
5339 
5340 	RTE_SET_USED(action);
5341 
5342 	if (!priv->sh->cdev->config.devx)
5343 		return rte_flow_error_set(error, ENOTSUP,
5344 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
5345 					  "meter_mark action not supported");
5346 	if (!priv->hws_mpool)
5347 		return rte_flow_error_set(error, EINVAL,
5348 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
5349 					  "meter_mark pool not initialized");
5350 	return 0;
5351 }
5352 
5353 /**
5354  * Validate indirect action.
5355  *
5356  * @param[in] dev
5357  *   Pointer to rte_eth_dev structure.
5358  * @param[in] action
5359  *   Pointer to the indirect action.
5360  * @param[in] mask
5361  *   Pointer to the indirect action mask.
5362  * @param[in, out] action_flags
5363  *   Holds the actions detected until now.
5364  * @param[in, out] fixed_cnt
5365  *   Pointer to indicator if this list has a fixed COUNT action.
5366  * @param[out] error
5367  *   Pointer to error structure.
5368  *
5369  * @return
5370  *   0 on success, a negative errno value otherwise and rte_errno is set.
5371  */
5372 static int
5373 flow_hw_validate_action_indirect(struct rte_eth_dev *dev,
5374 				 const struct rte_flow_action *action,
5375 				 const struct rte_flow_action *mask,
5376 				 uint64_t *action_flags, bool *fixed_cnt,
5377 				 struct rte_flow_error *error)
5378 {
5379 	uint32_t type;
5380 	int ret;
5381 
5382 	if (!mask)
5383 		return rte_flow_error_set(error, EINVAL,
5384 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5385 					  "Unable to determine indirect action type without a mask specified");
5386 	type = mask->type;
5387 	switch (type) {
5388 	case RTE_FLOW_ACTION_TYPE_METER_MARK:
5389 		ret = flow_hw_validate_action_meter_mark(dev, mask, error);
5390 		if (ret < 0)
5391 			return ret;
5392 		*action_flags |= MLX5_FLOW_ACTION_METER;
5393 		break;
5394 	case RTE_FLOW_ACTION_TYPE_RSS:
5395 		/* TODO: Validation logic (same as flow_hw_actions_validate) */
5396 		*action_flags |= MLX5_FLOW_ACTION_RSS;
5397 		break;
5398 	case RTE_FLOW_ACTION_TYPE_CONNTRACK:
5399 		/* TODO: Validation logic (same as flow_hw_actions_validate) */
5400 		*action_flags |= MLX5_FLOW_ACTION_CT;
5401 		break;
5402 	case RTE_FLOW_ACTION_TYPE_COUNT:
5403 		if (action->conf && mask->conf) {
5404 			if ((*action_flags & MLX5_FLOW_ACTION_AGE) ||
5405 			    (*action_flags & MLX5_FLOW_ACTION_INDIRECT_AGE))
5406 				/*
5407 				 * AGE cannot use indirect counter which is
5408 				 * shared with enother flow rules.
5409 				 */
5410 				return rte_flow_error_set(error, EINVAL,
5411 						  RTE_FLOW_ERROR_TYPE_ACTION,
5412 						  NULL,
5413 						  "AGE and fixed COUNT combination is not supported");
5414 			*fixed_cnt = true;
5415 		}
5416 		ret = flow_hw_validate_action_count(dev, action, mask,
5417 						    *action_flags, error);
5418 		if (ret < 0)
5419 			return ret;
5420 		*action_flags |= MLX5_FLOW_ACTION_INDIRECT_COUNT;
5421 		break;
5422 	case RTE_FLOW_ACTION_TYPE_AGE:
5423 		ret = flow_hw_validate_action_age(dev, action, *action_flags,
5424 						  *fixed_cnt, error);
5425 		if (ret < 0)
5426 			return ret;
5427 		*action_flags |= MLX5_FLOW_ACTION_INDIRECT_AGE;
5428 		break;
5429 	case RTE_FLOW_ACTION_TYPE_QUOTA:
5430 		/* TODO: add proper quota verification */
5431 		*action_flags |= MLX5_FLOW_ACTION_QUOTA;
5432 		break;
5433 	default:
5434 		DRV_LOG(WARNING, "Unsupported shared action type: %d", type);
5435 		return rte_flow_error_set(error, ENOTSUP,
5436 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, mask,
5437 					  "Unsupported indirect action type");
5438 	}
5439 	return 0;
5440 }
5441 
5442 /**
5443  * Validate ipv6_ext_push action.
5444  *
5445  * @param[in] dev
5446  *   Pointer to rte_eth_dev structure.
5447  * @param[in] action
5448  *   Pointer to the indirect action.
5449  * @param[out] error
5450  *   Pointer to error structure.
5451  *
5452  * @return
5453  *   0 on success, a negative errno value otherwise and rte_errno is set.
5454  */
5455 static int
5456 flow_hw_validate_action_ipv6_ext_push(struct rte_eth_dev *dev __rte_unused,
5457 				      const struct rte_flow_action *action,
5458 				      struct rte_flow_error *error)
5459 {
5460 	const struct rte_flow_action_ipv6_ext_push *raw_push_data = action->conf;
5461 
5462 	if (!raw_push_data || !raw_push_data->size || !raw_push_data->data)
5463 		return rte_flow_error_set(error, EINVAL,
5464 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
5465 					  "invalid ipv6_ext_push data");
5466 	if (raw_push_data->type != IPPROTO_ROUTING ||
5467 	    raw_push_data->size > MLX5_PUSH_MAX_LEN)
5468 		return rte_flow_error_set(error, EINVAL,
5469 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
5470 					  "Unsupported ipv6_ext_push type or length");
5471 	return 0;
5472 }
5473 
5474 /**
5475  * Validate raw_encap action.
5476  *
5477  * @param[in] dev
5478  *   Pointer to rte_eth_dev structure.
5479  * @param[in] action
5480  *   Pointer to the indirect action.
5481  * @param[out] error
5482  *   Pointer to error structure.
5483  *
5484  * @return
5485  *   0 on success, a negative errno value otherwise and rte_errno is set.
5486  */
5487 static int
5488 flow_hw_validate_action_raw_encap(const struct rte_flow_action *action,
5489 				  const struct rte_flow_action *mask,
5490 				  struct rte_flow_error *error)
5491 {
5492 	const struct rte_flow_action_raw_encap *mask_conf = mask->conf;
5493 	const struct rte_flow_action_raw_encap *action_conf = action->conf;
5494 
5495 	if (!mask_conf || !mask_conf->size)
5496 		return rte_flow_error_set(error, EINVAL,
5497 					  RTE_FLOW_ERROR_TYPE_ACTION, mask,
5498 					  "raw_encap: size must be masked");
5499 	if (!action_conf || !action_conf->size)
5500 		return rte_flow_error_set(error, EINVAL,
5501 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
5502 					  "raw_encap: invalid action configuration");
5503 	if (mask_conf->data && !action_conf->data)
5504 		return rte_flow_error_set(error, EINVAL,
5505 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
5506 					  "raw_encap: masked data is missing");
5507 	return 0;
5508 }
5509 
5510 /**
5511  * Process `... / raw_decap / raw_encap / ...` actions sequence.
5512  * The PMD handles the sequence as a single encap or decap reformat action,
5513  * depending on the raw_encap configuration.
5514  *
5515  * The function assumes that the raw_decap / raw_encap location
5516  * in actions template list complies with relative HWS actions order:
5517  * for the required reformat configuration:
5518  * ENCAP configuration must appear before [JUMP|DROP|PORT]
5519  * DECAP configuration must appear at the template head.
5520  */
5521 static uint64_t
5522 mlx5_decap_encap_reformat_type(const struct rte_flow_action *actions,
5523 			       uint32_t encap_ind, uint64_t flags)
5524 {
5525 	const struct rte_flow_action_raw_encap *encap = actions[encap_ind].conf;
5526 
5527 	if ((flags & MLX5_FLOW_ACTION_DECAP) == 0)
5528 		return MLX5_FLOW_ACTION_ENCAP;
5529 	if (actions[encap_ind - 1].type != RTE_FLOW_ACTION_TYPE_RAW_DECAP)
5530 		return MLX5_FLOW_ACTION_ENCAP;
5531 	return encap->size >= MLX5_ENCAPSULATION_DECISION_SIZE ?
5532 	       MLX5_FLOW_ACTION_ENCAP : MLX5_FLOW_ACTION_DECAP;
5533 }
5534 
5535 static inline uint16_t
5536 flow_hw_template_expand_modify_field(struct rte_flow_action actions[],
5537 				     struct rte_flow_action masks[],
5538 				     const struct rte_flow_action *mf_actions,
5539 				     const struct rte_flow_action *mf_masks,
5540 				     uint64_t flags, uint32_t act_num,
5541 				     uint32_t mf_num)
5542 {
5543 	uint32_t i, tail;
5544 
5545 	MLX5_ASSERT(actions && masks);
5546 	MLX5_ASSERT(mf_num > 0);
5547 	if (flags & MLX5_FLOW_ACTION_MODIFY_FIELD) {
5548 		/*
5549 		 * Application action template already has Modify Field.
5550 		 * It's location will be used in DR.
5551 		 * Expanded MF action can be added before the END.
5552 		 */
5553 		i = act_num - 1;
5554 		goto insert;
5555 	}
5556 	/**
5557 	 * Locate the first action positioned BEFORE the new MF.
5558 	 *
5559 	 * Search for a place to insert modify header
5560 	 * from the END action backwards:
5561 	 * 1. END is always present in actions array
5562 	 * 2. END location is always at action[act_num - 1]
5563 	 * 3. END always positioned AFTER modify field location
5564 	 *
5565 	 * Relative actions order is the same for RX, TX and FDB.
5566 	 *
5567 	 * Current actions order (draft-3)
5568 	 * @see action_order_arr[]
5569 	 */
5570 	for (i = act_num - 2; (int)i >= 0; i--) {
5571 		enum rte_flow_action_type type = actions[i].type;
5572 		uint64_t reformat_type;
5573 
5574 		if (type == RTE_FLOW_ACTION_TYPE_INDIRECT)
5575 			type = masks[i].type;
5576 		switch (type) {
5577 		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
5578 		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
5579 		case RTE_FLOW_ACTION_TYPE_DROP:
5580 		case RTE_FLOW_ACTION_TYPE_SEND_TO_KERNEL:
5581 		case RTE_FLOW_ACTION_TYPE_JUMP:
5582 		case RTE_FLOW_ACTION_TYPE_QUEUE:
5583 		case RTE_FLOW_ACTION_TYPE_RSS:
5584 		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
5585 		case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
5586 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
5587 		case RTE_FLOW_ACTION_TYPE_VOID:
5588 		case RTE_FLOW_ACTION_TYPE_END:
5589 			break;
5590 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
5591 			reformat_type =
5592 				mlx5_decap_encap_reformat_type(actions, i,
5593 							       flags);
5594 			if (reformat_type == MLX5_FLOW_ACTION_DECAP) {
5595 				i++;
5596 				goto insert;
5597 			}
5598 			if (actions[i - 1].type == RTE_FLOW_ACTION_TYPE_RAW_DECAP)
5599 				i--;
5600 			break;
5601 		default:
5602 			i++; /* new MF inserted AFTER actions[i] */
5603 			goto insert;
5604 		}
5605 	}
5606 	i = 0;
5607 insert:
5608 	tail = act_num - i; /* num action to move */
5609 	memmove(actions + i + mf_num, actions + i, sizeof(actions[0]) * tail);
5610 	memcpy(actions + i, mf_actions, sizeof(actions[0]) * mf_num);
5611 	memmove(masks + i + mf_num, masks + i, sizeof(masks[0]) * tail);
5612 	memcpy(masks + i, mf_masks, sizeof(masks[0]) * mf_num);
5613 	return i;
5614 }
5615 
5616 static int
5617 flow_hw_validate_action_push_vlan(struct rte_eth_dev *dev,
5618 				  const
5619 				  struct rte_flow_actions_template_attr *attr,
5620 				  const struct rte_flow_action *action,
5621 				  const struct rte_flow_action *mask,
5622 				  struct rte_flow_error *error)
5623 {
5624 #define X_FIELD(ptr, t, f) (((ptr)->conf) && ((t *)((ptr)->conf))->f)
5625 
5626 	const bool masked_push =
5627 		X_FIELD(mask + MLX5_HW_VLAN_PUSH_TYPE_IDX,
5628 			const struct rte_flow_action_of_push_vlan, ethertype);
5629 	bool masked_param;
5630 
5631 	/*
5632 	 * Mandatory actions order:
5633 	 * OF_PUSH_VLAN / OF_SET_VLAN_VID [ / OF_SET_VLAN_PCP ]
5634 	 */
5635 	RTE_SET_USED(dev);
5636 	RTE_SET_USED(attr);
5637 	/* Check that mark matches OF_PUSH_VLAN */
5638 	if (mask[MLX5_HW_VLAN_PUSH_TYPE_IDX].type !=
5639 	    RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN)
5640 		return rte_flow_error_set(error, EINVAL,
5641 					  RTE_FLOW_ERROR_TYPE_ACTION,
5642 					  action, "OF_PUSH_VLAN: mask does not match");
5643 	/* Check that the second template and mask items are SET_VLAN_VID */
5644 	if (action[MLX5_HW_VLAN_PUSH_VID_IDX].type !=
5645 	    RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID ||
5646 	    mask[MLX5_HW_VLAN_PUSH_VID_IDX].type !=
5647 	    RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)
5648 		return rte_flow_error_set(error, EINVAL,
5649 					  RTE_FLOW_ERROR_TYPE_ACTION,
5650 					  action, "OF_PUSH_VLAN: invalid actions order");
5651 	masked_param = X_FIELD(mask + MLX5_HW_VLAN_PUSH_VID_IDX,
5652 			       const struct rte_flow_action_of_set_vlan_vid,
5653 			       vlan_vid);
5654 	/*
5655 	 * PMD requires OF_SET_VLAN_VID mask to must match OF_PUSH_VLAN
5656 	 */
5657 	if (masked_push ^ masked_param)
5658 		return rte_flow_error_set(error, EINVAL,
5659 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
5660 					  "OF_SET_VLAN_VID: mask does not match OF_PUSH_VLAN");
5661 	if (is_of_vlan_pcp_present(action)) {
5662 		if (mask[MLX5_HW_VLAN_PUSH_PCP_IDX].type !=
5663 		     RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP)
5664 			return rte_flow_error_set(error, EINVAL,
5665 						  RTE_FLOW_ERROR_TYPE_ACTION,
5666 						  action, "OF_SET_VLAN_PCP: missing mask configuration");
5667 		masked_param = X_FIELD(mask + MLX5_HW_VLAN_PUSH_PCP_IDX,
5668 				       const struct
5669 				       rte_flow_action_of_set_vlan_pcp,
5670 				       vlan_pcp);
5671 		/*
5672 		 * PMD requires OF_SET_VLAN_PCP mask to must match OF_PUSH_VLAN
5673 		 */
5674 		if (masked_push ^ masked_param)
5675 			return rte_flow_error_set(error, EINVAL,
5676 						  RTE_FLOW_ERROR_TYPE_ACTION, action,
5677 						  "OF_SET_VLAN_PCP: mask does not match OF_PUSH_VLAN");
5678 	}
5679 	return 0;
5680 #undef X_FIELD
5681 }
5682 
5683 static int
5684 flow_hw_validate_action_default_miss(struct rte_eth_dev *dev,
5685 				     const struct rte_flow_actions_template_attr *attr,
5686 				     uint64_t action_flags,
5687 				     struct rte_flow_error *error)
5688 {
5689 	/*
5690 	 * The private DEFAULT_MISS action is used internally for LACP in control
5691 	 * flows. So this validation can be ignored. It can be kept right now since
5692 	 * the validation will be done only once.
5693 	 */
5694 	struct mlx5_priv *priv = dev->data->dev_private;
5695 
5696 	if (!attr->ingress || attr->egress || attr->transfer)
5697 		return rte_flow_error_set(error, EINVAL,
5698 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5699 					  "DEFAULT MISS is only supported in ingress.");
5700 	if (!priv->hw_def_miss)
5701 		return rte_flow_error_set(error, EINVAL,
5702 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5703 					  "DEFAULT MISS action does not exist.");
5704 	if (action_flags & MLX5_FLOW_FATE_ACTIONS)
5705 		return rte_flow_error_set(error, EINVAL,
5706 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5707 					  "DEFAULT MISS should be the only termination.");
5708 	return 0;
5709 }
5710 
5711 static int
5712 mlx5_flow_hw_actions_validate(struct rte_eth_dev *dev,
5713 			      const struct rte_flow_actions_template_attr *attr,
5714 			      const struct rte_flow_action actions[],
5715 			      const struct rte_flow_action masks[],
5716 			      uint64_t *act_flags,
5717 			      struct rte_flow_error *error)
5718 {
5719 	struct mlx5_priv *priv = dev->data->dev_private;
5720 	const struct rte_flow_action_count *count_mask = NULL;
5721 	bool fixed_cnt = false;
5722 	uint64_t action_flags = 0;
5723 	bool actions_end = false;
5724 #ifdef HAVE_MLX5DV_DR_ACTION_CREATE_DEST_ROOT_TABLE
5725 	int table_type;
5726 #endif
5727 	uint16_t i;
5728 	int ret;
5729 	const struct rte_flow_action_ipv6_ext_remove *remove_data;
5730 
5731 	/* FDB actions are only valid to proxy port. */
5732 	if (attr->transfer && (!priv->sh->config.dv_esw_en || !priv->master))
5733 		return rte_flow_error_set(error, EINVAL,
5734 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5735 					  NULL,
5736 					  "transfer actions are only valid to proxy port");
5737 	for (i = 0; !actions_end; ++i) {
5738 		const struct rte_flow_action *action = &actions[i];
5739 		const struct rte_flow_action *mask = &masks[i];
5740 
5741 		MLX5_ASSERT(i < MLX5_HW_MAX_ACTS);
5742 		if (action->type != RTE_FLOW_ACTION_TYPE_INDIRECT &&
5743 		    action->type != mask->type)
5744 			return rte_flow_error_set(error, ENOTSUP,
5745 						  RTE_FLOW_ERROR_TYPE_ACTION,
5746 						  action,
5747 						  "mask type does not match action type");
5748 		switch ((int)action->type) {
5749 		case RTE_FLOW_ACTION_TYPE_VOID:
5750 			break;
5751 		case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
5752 			break;
5753 		case RTE_FLOW_ACTION_TYPE_INDIRECT:
5754 			ret = flow_hw_validate_action_indirect(dev, action,
5755 							       mask,
5756 							       &action_flags,
5757 							       &fixed_cnt,
5758 							       error);
5759 			if (ret < 0)
5760 				return ret;
5761 			break;
5762 		case RTE_FLOW_ACTION_TYPE_MARK:
5763 			/* TODO: Validation logic */
5764 			action_flags |= MLX5_FLOW_ACTION_MARK;
5765 			break;
5766 		case RTE_FLOW_ACTION_TYPE_DROP:
5767 			/* TODO: Validation logic */
5768 			action_flags |= MLX5_FLOW_ACTION_DROP;
5769 			break;
5770 		case RTE_FLOW_ACTION_TYPE_JUMP:
5771 			/* TODO: Validation logic */
5772 			action_flags |= MLX5_FLOW_ACTION_JUMP;
5773 			break;
5774 #ifdef HAVE_MLX5DV_DR_ACTION_CREATE_DEST_ROOT_TABLE
5775 		case RTE_FLOW_ACTION_TYPE_SEND_TO_KERNEL:
5776 			if (priv->shared_host)
5777 				return rte_flow_error_set(error, ENOTSUP,
5778 							  RTE_FLOW_ERROR_TYPE_ACTION,
5779 							  action,
5780 							  "action not supported in guest port");
5781 			table_type = attr->ingress ? MLX5DR_TABLE_TYPE_NIC_RX :
5782 				     ((attr->egress) ? MLX5DR_TABLE_TYPE_NIC_TX :
5783 				     MLX5DR_TABLE_TYPE_FDB);
5784 			if (!priv->hw_send_to_kernel[table_type])
5785 				return rte_flow_error_set(error, ENOTSUP,
5786 							  RTE_FLOW_ERROR_TYPE_ACTION,
5787 							  action,
5788 							  "action is not available");
5789 			action_flags |= MLX5_FLOW_ACTION_SEND_TO_KERNEL;
5790 			break;
5791 #endif
5792 		case RTE_FLOW_ACTION_TYPE_QUEUE:
5793 			/* TODO: Validation logic */
5794 			action_flags |= MLX5_FLOW_ACTION_QUEUE;
5795 			break;
5796 		case RTE_FLOW_ACTION_TYPE_RSS:
5797 			/* TODO: Validation logic */
5798 			action_flags |= MLX5_FLOW_ACTION_RSS;
5799 			break;
5800 		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
5801 			/* TODO: Validation logic */
5802 			action_flags |= MLX5_FLOW_ACTION_ENCAP;
5803 			break;
5804 		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
5805 			/* TODO: Validation logic */
5806 			action_flags |= MLX5_FLOW_ACTION_ENCAP;
5807 			break;
5808 		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
5809 			/* TODO: Validation logic */
5810 			action_flags |= MLX5_FLOW_ACTION_DECAP;
5811 			break;
5812 		case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
5813 			/* TODO: Validation logic */
5814 			action_flags |= MLX5_FLOW_ACTION_DECAP;
5815 			break;
5816 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
5817 			ret = flow_hw_validate_action_raw_encap(action, mask, error);
5818 			if (ret < 0)
5819 				return ret;
5820 			action_flags |= MLX5_FLOW_ACTION_ENCAP;
5821 			break;
5822 		case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
5823 			/* TODO: Validation logic */
5824 			action_flags |= MLX5_FLOW_ACTION_DECAP;
5825 			break;
5826 		case RTE_FLOW_ACTION_TYPE_IPV6_EXT_PUSH:
5827 			ret = flow_hw_validate_action_ipv6_ext_push(dev, action, error);
5828 			if (ret < 0)
5829 				return ret;
5830 			action_flags |= MLX5_FLOW_ACTION_IPV6_ROUTING_PUSH;
5831 			break;
5832 		case RTE_FLOW_ACTION_TYPE_IPV6_EXT_REMOVE:
5833 			remove_data = action->conf;
5834 			/* Remove action must be shared. */
5835 			if (remove_data->type != IPPROTO_ROUTING || !mask) {
5836 				DRV_LOG(ERR, "Only supports shared IPv6 routing remove");
5837 				return -EINVAL;
5838 			}
5839 			action_flags |= MLX5_FLOW_ACTION_IPV6_ROUTING_REMOVE;
5840 			break;
5841 		case RTE_FLOW_ACTION_TYPE_METER:
5842 			/* TODO: Validation logic */
5843 			action_flags |= MLX5_FLOW_ACTION_METER;
5844 			break;
5845 		case RTE_FLOW_ACTION_TYPE_METER_MARK:
5846 			ret = flow_hw_validate_action_meter_mark(dev, action,
5847 								 error);
5848 			if (ret < 0)
5849 				return ret;
5850 			action_flags |= MLX5_FLOW_ACTION_METER;
5851 			break;
5852 		case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
5853 			ret = flow_hw_validate_action_modify_field(dev, action, mask,
5854 								   error);
5855 			if (ret < 0)
5856 				return ret;
5857 			action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
5858 			break;
5859 		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
5860 			ret = flow_hw_validate_action_represented_port
5861 					(dev, action, mask, error);
5862 			if (ret < 0)
5863 				return ret;
5864 			action_flags |= MLX5_FLOW_ACTION_PORT_ID;
5865 			break;
5866 		case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
5867 			ret = flow_hw_validate_action_port_representor
5868 					(dev, attr, action, mask, error);
5869 			if (ret < 0)
5870 				return ret;
5871 			action_flags |= MLX5_FLOW_ACTION_PORT_REPRESENTOR;
5872 			break;
5873 		case RTE_FLOW_ACTION_TYPE_AGE:
5874 			if (count_mask && count_mask->id)
5875 				fixed_cnt = true;
5876 			ret = flow_hw_validate_action_age(dev, action,
5877 							  action_flags,
5878 							  fixed_cnt, error);
5879 			if (ret < 0)
5880 				return ret;
5881 			action_flags |= MLX5_FLOW_ACTION_AGE;
5882 			break;
5883 		case RTE_FLOW_ACTION_TYPE_COUNT:
5884 			ret = flow_hw_validate_action_count(dev, action, mask,
5885 							    action_flags,
5886 							    error);
5887 			if (ret < 0)
5888 				return ret;
5889 			count_mask = mask->conf;
5890 			action_flags |= MLX5_FLOW_ACTION_COUNT;
5891 			break;
5892 		case RTE_FLOW_ACTION_TYPE_CONNTRACK:
5893 			/* TODO: Validation logic */
5894 			action_flags |= MLX5_FLOW_ACTION_CT;
5895 			break;
5896 		case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
5897 			action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
5898 			break;
5899 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
5900 			action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
5901 			break;
5902 		case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
5903 			ret = flow_hw_validate_action_push_vlan
5904 					(dev, attr, action, mask, error);
5905 			if (ret != 0)
5906 				return ret;
5907 			i += is_of_vlan_pcp_present(action) ?
5908 				MLX5_HW_VLAN_PUSH_PCP_IDX :
5909 				MLX5_HW_VLAN_PUSH_VID_IDX;
5910 			action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
5911 			break;
5912 		case RTE_FLOW_ACTION_TYPE_END:
5913 			actions_end = true;
5914 			break;
5915 		case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
5916 			ret = flow_hw_validate_action_default_miss(dev, attr,
5917 								   action_flags, error);
5918 			if (ret < 0)
5919 				return ret;
5920 			action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
5921 			break;
5922 		default:
5923 			return rte_flow_error_set(error, ENOTSUP,
5924 						  RTE_FLOW_ERROR_TYPE_ACTION,
5925 						  action,
5926 						  "action not supported in template API");
5927 		}
5928 	}
5929 	if (act_flags != NULL)
5930 		*act_flags = action_flags;
5931 	return 0;
5932 }
5933 
5934 static int
5935 flow_hw_actions_validate(struct rte_eth_dev *dev,
5936 			 const struct rte_flow_actions_template_attr *attr,
5937 			 const struct rte_flow_action actions[],
5938 			 const struct rte_flow_action masks[],
5939 			 struct rte_flow_error *error)
5940 {
5941 	return mlx5_flow_hw_actions_validate(dev, attr, actions, masks, NULL, error);
5942 }
5943 
5944 
5945 static enum mlx5dr_action_type mlx5_hw_dr_action_types[] = {
5946 	[RTE_FLOW_ACTION_TYPE_MARK] = MLX5DR_ACTION_TYP_TAG,
5947 	[RTE_FLOW_ACTION_TYPE_DROP] = MLX5DR_ACTION_TYP_DROP,
5948 	[RTE_FLOW_ACTION_TYPE_JUMP] = MLX5DR_ACTION_TYP_TBL,
5949 	[RTE_FLOW_ACTION_TYPE_QUEUE] = MLX5DR_ACTION_TYP_TIR,
5950 	[RTE_FLOW_ACTION_TYPE_RSS] = MLX5DR_ACTION_TYP_TIR,
5951 	[RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP] = MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2,
5952 	[RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP] = MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2,
5953 	[RTE_FLOW_ACTION_TYPE_VXLAN_DECAP] = MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2,
5954 	[RTE_FLOW_ACTION_TYPE_NVGRE_DECAP] = MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2,
5955 	[RTE_FLOW_ACTION_TYPE_MODIFY_FIELD] = MLX5DR_ACTION_TYP_MODIFY_HDR,
5956 	[RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT] = MLX5DR_ACTION_TYP_VPORT,
5957 	[RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR] = MLX5DR_ACTION_TYP_MISS,
5958 	[RTE_FLOW_ACTION_TYPE_CONNTRACK] = MLX5DR_ACTION_TYP_ASO_CT,
5959 	[RTE_FLOW_ACTION_TYPE_OF_POP_VLAN] = MLX5DR_ACTION_TYP_POP_VLAN,
5960 	[RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN] = MLX5DR_ACTION_TYP_PUSH_VLAN,
5961 	[RTE_FLOW_ACTION_TYPE_SEND_TO_KERNEL] = MLX5DR_ACTION_TYP_DEST_ROOT,
5962 	[RTE_FLOW_ACTION_TYPE_IPV6_EXT_PUSH] = MLX5DR_ACTION_TYP_PUSH_IPV6_ROUTE_EXT,
5963 	[RTE_FLOW_ACTION_TYPE_IPV6_EXT_REMOVE] = MLX5DR_ACTION_TYP_POP_IPV6_ROUTE_EXT,
5964 };
5965 
5966 static inline void
5967 action_template_set_type(struct rte_flow_actions_template *at,
5968 			 enum mlx5dr_action_type *action_types,
5969 			 unsigned int action_src, uint16_t *curr_off,
5970 			 enum mlx5dr_action_type type)
5971 {
5972 	at->dr_off[action_src] = *curr_off;
5973 	action_types[*curr_off] = type;
5974 	*curr_off = *curr_off + 1;
5975 }
5976 
5977 static int
5978 flow_hw_dr_actions_template_handle_shared(int type, uint32_t action_src,
5979 					  enum mlx5dr_action_type *action_types,
5980 					  uint16_t *curr_off, uint16_t *cnt_off,
5981 					  struct rte_flow_actions_template *at)
5982 {
5983 	switch (type) {
5984 	case RTE_FLOW_ACTION_TYPE_RSS:
5985 		action_template_set_type(at, action_types, action_src, curr_off,
5986 					 MLX5DR_ACTION_TYP_TIR);
5987 		break;
5988 	case RTE_FLOW_ACTION_TYPE_AGE:
5989 	case RTE_FLOW_ACTION_TYPE_COUNT:
5990 		/*
5991 		 * Both AGE and COUNT action need counter, the first one fills
5992 		 * the action_types array, and the second only saves the offset.
5993 		 */
5994 		if (*cnt_off == UINT16_MAX) {
5995 			*cnt_off = *curr_off;
5996 			action_template_set_type(at, action_types,
5997 						 action_src, curr_off,
5998 						 MLX5DR_ACTION_TYP_CTR);
5999 		}
6000 		at->dr_off[action_src] = *cnt_off;
6001 		break;
6002 	case RTE_FLOW_ACTION_TYPE_CONNTRACK:
6003 		action_template_set_type(at, action_types, action_src, curr_off,
6004 					 MLX5DR_ACTION_TYP_ASO_CT);
6005 		break;
6006 	case RTE_FLOW_ACTION_TYPE_QUOTA:
6007 	case RTE_FLOW_ACTION_TYPE_METER_MARK:
6008 		action_template_set_type(at, action_types, action_src, curr_off,
6009 					 MLX5DR_ACTION_TYP_ASO_METER);
6010 		break;
6011 	default:
6012 		DRV_LOG(WARNING, "Unsupported shared action type: %d", type);
6013 		return -EINVAL;
6014 	}
6015 	return 0;
6016 }
6017 
6018 
6019 static int
6020 flow_hw_template_actions_list(struct rte_flow_actions_template *at,
6021 			      unsigned int action_src,
6022 			      enum mlx5dr_action_type *action_types,
6023 			      uint16_t *curr_off, uint16_t *cnt_off)
6024 {
6025 	int ret;
6026 	const struct rte_flow_action_indirect_list *indlst_conf = at->actions[action_src].conf;
6027 	enum mlx5_indirect_list_type list_type = mlx5_get_indirect_list_type(indlst_conf->handle);
6028 	const union {
6029 		struct mlx5_indlst_legacy *legacy;
6030 		struct rte_flow_action_list_handle *handle;
6031 	} indlst_obj = { .handle = indlst_conf->handle };
6032 	enum mlx5dr_action_type type;
6033 
6034 	switch (list_type) {
6035 	case MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY:
6036 		ret = flow_hw_dr_actions_template_handle_shared
6037 			(indlst_obj.legacy->legacy_type, action_src,
6038 			 action_types, curr_off, cnt_off, at);
6039 		if (ret)
6040 			return ret;
6041 		break;
6042 	case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
6043 		action_template_set_type(at, action_types, action_src, curr_off,
6044 					 MLX5DR_ACTION_TYP_DEST_ARRAY);
6045 		break;
6046 	case MLX5_INDIRECT_ACTION_LIST_TYPE_REFORMAT:
6047 		type = ((struct mlx5_hw_encap_decap_action *)
6048 			(indlst_conf->handle))->action_type;
6049 		action_template_set_type(at, action_types, action_src, curr_off, type);
6050 		break;
6051 	default:
6052 		DRV_LOG(ERR, "Unsupported indirect list type");
6053 		return -EINVAL;
6054 	}
6055 	return 0;
6056 }
6057 
6058 /**
6059  * Create DR action template based on a provided sequence of flow actions.
6060  *
6061  * @param[in] dev
6062  *   Pointer to the rte_eth_dev structure.
6063  * @param[in] at
6064  *   Pointer to flow actions template to be updated.
6065  *
6066  * @return
6067  *   DR action template pointer on success and action offsets in @p at are updated.
6068  *   NULL otherwise.
6069  */
6070 static struct mlx5dr_action_template *
6071 flow_hw_dr_actions_template_create(struct rte_eth_dev *dev,
6072 				   struct rte_flow_actions_template *at)
6073 {
6074 	struct mlx5dr_action_template *dr_template;
6075 	enum mlx5dr_action_type action_types[MLX5_HW_MAX_ACTS] = { MLX5DR_ACTION_TYP_LAST };
6076 	unsigned int i;
6077 	uint16_t curr_off;
6078 	enum mlx5dr_action_type reformat_act_type = MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2;
6079 	uint16_t reformat_off = UINT16_MAX;
6080 	uint16_t mhdr_off = UINT16_MAX;
6081 	uint16_t recom_off = UINT16_MAX;
6082 	uint16_t cnt_off = UINT16_MAX;
6083 	enum mlx5dr_action_type recom_type = MLX5DR_ACTION_TYP_LAST;
6084 	int ret;
6085 
6086 	for (i = 0, curr_off = 0; at->actions[i].type != RTE_FLOW_ACTION_TYPE_END; ++i) {
6087 		const struct rte_flow_action_raw_encap *raw_encap_data;
6088 		size_t data_size;
6089 		enum mlx5dr_action_type type;
6090 
6091 		if (curr_off >= MLX5_HW_MAX_ACTS)
6092 			goto err_actions_num;
6093 		switch ((int)at->actions[i].type) {
6094 		case RTE_FLOW_ACTION_TYPE_VOID:
6095 			break;
6096 		case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
6097 			ret = flow_hw_template_actions_list(at, i, action_types,
6098 							    &curr_off, &cnt_off);
6099 			if (ret)
6100 				return NULL;
6101 			break;
6102 		case RTE_FLOW_ACTION_TYPE_INDIRECT:
6103 			ret = flow_hw_dr_actions_template_handle_shared
6104 				(at->masks[i].type, i, action_types,
6105 				 &curr_off, &cnt_off, at);
6106 			if (ret)
6107 				return NULL;
6108 			break;
6109 		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
6110 		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
6111 		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
6112 		case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
6113 			MLX5_ASSERT(reformat_off == UINT16_MAX);
6114 			reformat_off = curr_off++;
6115 			reformat_act_type = mlx5_hw_dr_action_types[at->actions[i].type];
6116 			break;
6117 		case RTE_FLOW_ACTION_TYPE_IPV6_EXT_PUSH:
6118 			MLX5_ASSERT(recom_off == UINT16_MAX);
6119 			recom_type = MLX5DR_ACTION_TYP_PUSH_IPV6_ROUTE_EXT;
6120 			recom_off = curr_off++;
6121 			break;
6122 		case RTE_FLOW_ACTION_TYPE_IPV6_EXT_REMOVE:
6123 			MLX5_ASSERT(recom_off == UINT16_MAX);
6124 			recom_type = MLX5DR_ACTION_TYP_POP_IPV6_ROUTE_EXT;
6125 			recom_off = curr_off++;
6126 			break;
6127 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
6128 			raw_encap_data = at->actions[i].conf;
6129 			data_size = raw_encap_data->size;
6130 			if (reformat_off != UINT16_MAX) {
6131 				reformat_act_type = data_size < MLX5_ENCAPSULATION_DECISION_SIZE ?
6132 					MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2 :
6133 					MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3;
6134 			} else {
6135 				reformat_off = curr_off++;
6136 				reformat_act_type = MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
6137 			}
6138 			break;
6139 		case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
6140 			reformat_off = curr_off++;
6141 			reformat_act_type = MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2;
6142 			break;
6143 		case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
6144 			if (mhdr_off == UINT16_MAX) {
6145 				mhdr_off = curr_off++;
6146 				type = mlx5_hw_dr_action_types[at->actions[i].type];
6147 				action_types[mhdr_off] = type;
6148 			}
6149 			break;
6150 		case RTE_FLOW_ACTION_TYPE_METER:
6151 			at->dr_off[i] = curr_off;
6152 			action_types[curr_off++] = MLX5DR_ACTION_TYP_ASO_METER;
6153 			if (curr_off >= MLX5_HW_MAX_ACTS)
6154 				goto err_actions_num;
6155 			action_types[curr_off++] = MLX5DR_ACTION_TYP_TBL;
6156 			break;
6157 		case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
6158 			type = mlx5_hw_dr_action_types[at->actions[i].type];
6159 			at->dr_off[i] = curr_off;
6160 			action_types[curr_off++] = type;
6161 			i += is_of_vlan_pcp_present(at->actions + i) ?
6162 				MLX5_HW_VLAN_PUSH_PCP_IDX :
6163 				MLX5_HW_VLAN_PUSH_VID_IDX;
6164 			break;
6165 		case RTE_FLOW_ACTION_TYPE_METER_MARK:
6166 			at->dr_off[i] = curr_off;
6167 			action_types[curr_off++] = MLX5DR_ACTION_TYP_ASO_METER;
6168 			if (curr_off >= MLX5_HW_MAX_ACTS)
6169 				goto err_actions_num;
6170 			break;
6171 		case RTE_FLOW_ACTION_TYPE_AGE:
6172 		case RTE_FLOW_ACTION_TYPE_COUNT:
6173 			/*
6174 			 * Both AGE and COUNT action need counter, the first
6175 			 * one fills the action_types array, and the second only
6176 			 * saves the offset.
6177 			 */
6178 			if (cnt_off == UINT16_MAX) {
6179 				cnt_off = curr_off++;
6180 				action_types[cnt_off] = MLX5DR_ACTION_TYP_CTR;
6181 			}
6182 			at->dr_off[i] = cnt_off;
6183 			break;
6184 		case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
6185 			at->dr_off[i] = curr_off;
6186 			action_types[curr_off++] = MLX5DR_ACTION_TYP_MISS;
6187 			break;
6188 		default:
6189 			type = mlx5_hw_dr_action_types[at->actions[i].type];
6190 			at->dr_off[i] = curr_off;
6191 			action_types[curr_off++] = type;
6192 			break;
6193 		}
6194 	}
6195 	if (curr_off >= MLX5_HW_MAX_ACTS)
6196 		goto err_actions_num;
6197 	if (mhdr_off != UINT16_MAX)
6198 		at->mhdr_off = mhdr_off;
6199 	if (reformat_off != UINT16_MAX) {
6200 		at->reformat_off = reformat_off;
6201 		action_types[reformat_off] = reformat_act_type;
6202 	}
6203 	if (recom_off != UINT16_MAX) {
6204 		at->recom_off = recom_off;
6205 		action_types[recom_off] = recom_type;
6206 	}
6207 	dr_template = mlx5dr_action_template_create(action_types);
6208 	if (dr_template) {
6209 		at->dr_actions_num = curr_off;
6210 	} else {
6211 		DRV_LOG(ERR, "Failed to create DR action template: %d", rte_errno);
6212 		return NULL;
6213 	}
6214 	/* Create srh flex parser for remove anchor. */
6215 	if ((recom_type == MLX5DR_ACTION_TYP_POP_IPV6_ROUTE_EXT ||
6216 	     recom_type == MLX5DR_ACTION_TYP_PUSH_IPV6_ROUTE_EXT) &&
6217 	    mlx5_alloc_srh_flex_parser(dev)) {
6218 		DRV_LOG(ERR, "Failed to create srv6 flex parser");
6219 		claim_zero(mlx5dr_action_template_destroy(dr_template));
6220 		return NULL;
6221 	}
6222 	return dr_template;
6223 err_actions_num:
6224 	DRV_LOG(ERR, "Number of HW actions (%u) exceeded maximum (%u) allowed in template",
6225 		curr_off, MLX5_HW_MAX_ACTS);
6226 	return NULL;
6227 }
6228 
6229 static void
6230 flow_hw_set_vlan_vid(struct rte_eth_dev *dev,
6231 		     struct rte_flow_action *ra,
6232 		     struct rte_flow_action *rm,
6233 		     struct rte_flow_action_modify_field *spec,
6234 		     struct rte_flow_action_modify_field *mask,
6235 		     int set_vlan_vid_ix)
6236 {
6237 	struct rte_flow_error error;
6238 	const bool masked = rm[set_vlan_vid_ix].conf &&
6239 		(((const struct rte_flow_action_of_set_vlan_vid *)
6240 			rm[set_vlan_vid_ix].conf)->vlan_vid != 0);
6241 	const struct rte_flow_action_of_set_vlan_vid *conf =
6242 		ra[set_vlan_vid_ix].conf;
6243 	rte_be16_t vid = masked ? conf->vlan_vid : 0;
6244 	int width = mlx5_flow_item_field_width(dev, RTE_FLOW_FIELD_VLAN_ID, 0,
6245 					       NULL, &error);
6246 	*spec = (typeof(*spec)) {
6247 		.operation = RTE_FLOW_MODIFY_SET,
6248 		.dst = {
6249 			.field = RTE_FLOW_FIELD_VLAN_ID,
6250 			.level = 0, .offset = 0,
6251 		},
6252 		.src = {
6253 			.field = RTE_FLOW_FIELD_VALUE,
6254 			.level = vid,
6255 			.offset = 0,
6256 		},
6257 		.width = width,
6258 	};
6259 	*mask = (typeof(*mask)) {
6260 		.operation = RTE_FLOW_MODIFY_SET,
6261 		.dst = {
6262 			.field = RTE_FLOW_FIELD_VLAN_ID,
6263 			.level = 0xff, .offset = 0xffffffff,
6264 		},
6265 		.src = {
6266 			.field = RTE_FLOW_FIELD_VALUE,
6267 			.level = masked ? (1U << width) - 1 : 0,
6268 			.offset = 0,
6269 		},
6270 		.width = 0xffffffff,
6271 	};
6272 	ra[set_vlan_vid_ix].type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD;
6273 	ra[set_vlan_vid_ix].conf = spec;
6274 	rm[set_vlan_vid_ix].type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD;
6275 	rm[set_vlan_vid_ix].conf = mask;
6276 }
6277 
6278 static __rte_always_inline int
6279 flow_hw_set_vlan_vid_construct(struct rte_eth_dev *dev,
6280 			       struct mlx5_hw_q_job *job,
6281 			       struct mlx5_action_construct_data *act_data,
6282 			       const struct mlx5_hw_actions *hw_acts,
6283 			       const struct rte_flow_action *action)
6284 {
6285 	struct rte_flow_error error;
6286 	rte_be16_t vid = ((const struct rte_flow_action_of_set_vlan_vid *)
6287 			   action->conf)->vlan_vid;
6288 	int width = mlx5_flow_item_field_width(dev, RTE_FLOW_FIELD_VLAN_ID, 0,
6289 					       NULL, &error);
6290 	struct rte_flow_action_modify_field conf = {
6291 		.operation = RTE_FLOW_MODIFY_SET,
6292 		.dst = {
6293 			.field = RTE_FLOW_FIELD_VLAN_ID,
6294 			.level = 0, .offset = 0,
6295 		},
6296 		.src = {
6297 			.field = RTE_FLOW_FIELD_VALUE,
6298 			.level = vid,
6299 			.offset = 0,
6300 		},
6301 		.width = width,
6302 	};
6303 	struct rte_flow_action modify_action = {
6304 		.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
6305 		.conf = &conf
6306 	};
6307 
6308 	return flow_hw_modify_field_construct(job, act_data, hw_acts,
6309 					      &modify_action);
6310 }
6311 
6312 static int
6313 flow_hw_flex_item_acquire(struct rte_eth_dev *dev,
6314 			  struct rte_flow_item_flex_handle *handle,
6315 			  uint8_t *flex_item)
6316 {
6317 	int index = mlx5_flex_acquire_index(dev, handle, false);
6318 
6319 	MLX5_ASSERT(index >= 0 && index < (int)(sizeof(uint32_t) * CHAR_BIT));
6320 	if (index < 0)
6321 		return -1;
6322 	if (!(*flex_item & RTE_BIT32(index))) {
6323 		/* Don't count same flex item again. */
6324 		if (mlx5_flex_acquire_index(dev, handle, true) != index)
6325 			MLX5_ASSERT(false);
6326 		*flex_item |= (uint8_t)RTE_BIT32(index);
6327 	}
6328 	return 0;
6329 }
6330 
6331 static void
6332 flow_hw_flex_item_release(struct rte_eth_dev *dev, uint8_t *flex_item)
6333 {
6334 	while (*flex_item) {
6335 		int index = rte_bsf32(*flex_item);
6336 
6337 		mlx5_flex_release_index(dev, index);
6338 		*flex_item &= ~(uint8_t)RTE_BIT32(index);
6339 	}
6340 }
6341 static __rte_always_inline void
6342 flow_hw_actions_template_replace_container(const
6343 					   struct rte_flow_action *actions,
6344 					   const
6345 					   struct rte_flow_action *masks,
6346 					   struct rte_flow_action *new_actions,
6347 					   struct rte_flow_action *new_masks,
6348 					   struct rte_flow_action **ra,
6349 					   struct rte_flow_action **rm,
6350 					   uint32_t act_num)
6351 {
6352 	memcpy(new_actions, actions, sizeof(actions[0]) * act_num);
6353 	memcpy(new_masks, masks, sizeof(masks[0]) * act_num);
6354 	*ra = (void *)(uintptr_t)new_actions;
6355 	*rm = (void *)(uintptr_t)new_masks;
6356 }
6357 
6358 /* Action template copies these actions in rte_flow_conv() */
6359 
6360 static const struct rte_flow_action rx_meta_copy_action =  {
6361 	.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
6362 	.conf = &(struct rte_flow_action_modify_field){
6363 		.operation = RTE_FLOW_MODIFY_SET,
6364 		.dst = {
6365 			.field = (enum rte_flow_field_id)
6366 				MLX5_RTE_FLOW_FIELD_META_REG,
6367 			.tag_index = REG_B,
6368 		},
6369 		.src = {
6370 			.field = (enum rte_flow_field_id)
6371 				MLX5_RTE_FLOW_FIELD_META_REG,
6372 			.tag_index = REG_C_1,
6373 		},
6374 		.width = 32,
6375 	}
6376 };
6377 
6378 static const struct rte_flow_action rx_meta_copy_mask = {
6379 	.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
6380 	.conf = &(struct rte_flow_action_modify_field){
6381 		.operation = RTE_FLOW_MODIFY_SET,
6382 		.dst = {
6383 			.field = (enum rte_flow_field_id)
6384 				MLX5_RTE_FLOW_FIELD_META_REG,
6385 			.level = UINT8_MAX,
6386 			.tag_index = UINT8_MAX,
6387 			.offset = UINT32_MAX,
6388 		},
6389 		.src = {
6390 			.field = (enum rte_flow_field_id)
6391 				MLX5_RTE_FLOW_FIELD_META_REG,
6392 			.level = UINT8_MAX,
6393 			.tag_index = UINT8_MAX,
6394 			.offset = UINT32_MAX,
6395 		},
6396 		.width = UINT32_MAX,
6397 	}
6398 };
6399 
6400 static const struct rte_flow_action quota_color_inc_action = {
6401 	.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
6402 	.conf = &(struct rte_flow_action_modify_field) {
6403 		.operation = RTE_FLOW_MODIFY_ADD,
6404 		.dst = {
6405 			.field = RTE_FLOW_FIELD_METER_COLOR,
6406 			.level = 0, .offset = 0
6407 		},
6408 		.src = {
6409 			.field = RTE_FLOW_FIELD_VALUE,
6410 			.level = 1,
6411 			.offset = 0,
6412 		},
6413 		.width = 2
6414 	}
6415 };
6416 
6417 static const struct rte_flow_action quota_color_inc_mask = {
6418 	.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
6419 	.conf = &(struct rte_flow_action_modify_field) {
6420 		.operation = RTE_FLOW_MODIFY_ADD,
6421 		.dst = {
6422 			.field = RTE_FLOW_FIELD_METER_COLOR,
6423 			.level = UINT8_MAX,
6424 			.tag_index = UINT8_MAX,
6425 			.offset = UINT32_MAX,
6426 		},
6427 		.src = {
6428 			.field = RTE_FLOW_FIELD_VALUE,
6429 			.level = 3,
6430 			.offset = 0
6431 		},
6432 		.width = UINT32_MAX
6433 	}
6434 };
6435 
6436 /**
6437  * Create flow action template.
6438  *
6439  * @param[in] dev
6440  *   Pointer to the rte_eth_dev structure.
6441  * @param[in] attr
6442  *   Pointer to the action template attributes.
6443  * @param[in] actions
6444  *   Associated actions (list terminated by the END action).
6445  * @param[in] masks
6446  *   List of actions that marks which of the action's member is constant.
6447  * @param[out] error
6448  *   Pointer to error structure.
6449  *
6450  * @return
6451  *   Action template pointer on success, NULL otherwise and rte_errno is set.
6452  */
6453 static struct rte_flow_actions_template *
6454 flow_hw_actions_template_create(struct rte_eth_dev *dev,
6455 			const struct rte_flow_actions_template_attr *attr,
6456 			const struct rte_flow_action actions[],
6457 			const struct rte_flow_action masks[],
6458 			struct rte_flow_error *error)
6459 {
6460 	struct mlx5_priv *priv = dev->data->dev_private;
6461 	int len, act_len, mask_len;
6462 	unsigned int act_num;
6463 	unsigned int i;
6464 	struct rte_flow_actions_template *at = NULL;
6465 	uint16_t pos = UINT16_MAX;
6466 	uint64_t action_flags = 0;
6467 	struct rte_flow_action tmp_action[MLX5_HW_MAX_ACTS];
6468 	struct rte_flow_action tmp_mask[MLX5_HW_MAX_ACTS];
6469 	struct rte_flow_action *ra = (void *)(uintptr_t)actions;
6470 	struct rte_flow_action *rm = (void *)(uintptr_t)masks;
6471 	int set_vlan_vid_ix = -1;
6472 	struct rte_flow_action_modify_field set_vlan_vid_spec = {0, };
6473 	struct rte_flow_action_modify_field set_vlan_vid_mask = {0, };
6474 	struct rte_flow_action mf_actions[MLX5_HW_MAX_ACTS];
6475 	struct rte_flow_action mf_masks[MLX5_HW_MAX_ACTS];
6476 	uint32_t expand_mf_num = 0;
6477 	uint16_t src_off[MLX5_HW_MAX_ACTS] = {0, };
6478 
6479 	if (mlx5_flow_hw_actions_validate(dev, attr, actions, masks,
6480 					  &action_flags, error))
6481 		return NULL;
6482 	for (i = 0; ra[i].type != RTE_FLOW_ACTION_TYPE_END; ++i) {
6483 		switch (ra[i].type) {
6484 		/* OF_PUSH_VLAN *MUST* come before OF_SET_VLAN_VID */
6485 		case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
6486 			i += is_of_vlan_pcp_present(ra + i) ?
6487 				MLX5_HW_VLAN_PUSH_PCP_IDX :
6488 				MLX5_HW_VLAN_PUSH_VID_IDX;
6489 			break;
6490 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
6491 			set_vlan_vid_ix = i;
6492 			break;
6493 		default:
6494 			break;
6495 		}
6496 	}
6497 	/*
6498 	 * Count flow actions to allocate required space for storing DR offsets and to check
6499 	 * if temporary buffer would not be overrun.
6500 	 */
6501 	act_num = i + 1;
6502 	if (act_num >= MLX5_HW_MAX_ACTS) {
6503 		rte_flow_error_set(error, EINVAL,
6504 				   RTE_FLOW_ERROR_TYPE_ACTION, NULL, "Too many actions");
6505 		return NULL;
6506 	}
6507 	if (set_vlan_vid_ix != -1) {
6508 		/* If temporary action buffer was not used, copy template actions to it */
6509 		if (ra == actions)
6510 			flow_hw_actions_template_replace_container(actions,
6511 								   masks,
6512 								   tmp_action,
6513 								   tmp_mask,
6514 								   &ra, &rm,
6515 								   act_num);
6516 		flow_hw_set_vlan_vid(dev, ra, rm,
6517 				     &set_vlan_vid_spec, &set_vlan_vid_mask,
6518 				     set_vlan_vid_ix);
6519 		action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
6520 	}
6521 	if (action_flags & MLX5_FLOW_ACTION_QUOTA) {
6522 		mf_actions[expand_mf_num] = quota_color_inc_action;
6523 		mf_masks[expand_mf_num] = quota_color_inc_mask;
6524 		expand_mf_num++;
6525 	}
6526 	if (priv->sh->config.dv_xmeta_en == MLX5_XMETA_MODE_META32_HWS &&
6527 	    priv->sh->config.dv_esw_en &&
6528 	    (action_flags & (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS))) {
6529 		/* Insert META copy */
6530 		mf_actions[expand_mf_num] = rx_meta_copy_action;
6531 		mf_masks[expand_mf_num] = rx_meta_copy_mask;
6532 		expand_mf_num++;
6533 	}
6534 	if (expand_mf_num) {
6535 		if (act_num + expand_mf_num > MLX5_HW_MAX_ACTS) {
6536 			rte_flow_error_set(error, E2BIG,
6537 					   RTE_FLOW_ERROR_TYPE_ACTION,
6538 					   NULL, "cannot expand: too many actions");
6539 			return NULL;
6540 		}
6541 		if (ra == actions)
6542 			flow_hw_actions_template_replace_container(actions,
6543 								   masks,
6544 								   tmp_action,
6545 								   tmp_mask,
6546 								   &ra, &rm,
6547 								   act_num);
6548 		/* Application should make sure only one Q/RSS exist in one rule. */
6549 		pos = flow_hw_template_expand_modify_field(ra, rm,
6550 							   mf_actions,
6551 							   mf_masks,
6552 							   action_flags,
6553 							   act_num,
6554 							   expand_mf_num);
6555 		act_num += expand_mf_num;
6556 		for (i = pos + expand_mf_num; i < act_num; i++)
6557 			src_off[i] += expand_mf_num;
6558 		action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
6559 	}
6560 	act_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, NULL, 0, ra, error);
6561 	if (act_len <= 0)
6562 		return NULL;
6563 	len = RTE_ALIGN(act_len, 16);
6564 	mask_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, NULL, 0, rm, error);
6565 	if (mask_len <= 0)
6566 		return NULL;
6567 	len += RTE_ALIGN(mask_len, 16);
6568 	len += RTE_ALIGN(act_num * sizeof(*at->dr_off), 16);
6569 	len += RTE_ALIGN(act_num * sizeof(*at->src_off), 16);
6570 	at = mlx5_malloc(MLX5_MEM_ZERO, len + sizeof(*at),
6571 			 RTE_CACHE_LINE_SIZE, rte_socket_id());
6572 	if (!at) {
6573 		rte_flow_error_set(error, ENOMEM,
6574 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6575 				   NULL,
6576 				   "cannot allocate action template");
6577 		return NULL;
6578 	}
6579 	/* Actions part is in the first part. */
6580 	at->attr = *attr;
6581 	at->actions = (struct rte_flow_action *)(at + 1);
6582 	act_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, at->actions,
6583 				len, ra, error);
6584 	if (act_len <= 0)
6585 		goto error;
6586 	/* Masks part is in the second part. */
6587 	at->masks = (struct rte_flow_action *)(((uint8_t *)at->actions) + act_len);
6588 	mask_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, at->masks,
6589 				 len - act_len, rm, error);
6590 	if (mask_len <= 0)
6591 		goto error;
6592 	/* DR actions offsets in the third part. */
6593 	at->dr_off = (uint16_t *)((uint8_t *)at->masks + mask_len);
6594 	at->src_off = RTE_PTR_ADD(at->dr_off,
6595 				  RTE_ALIGN(act_num * sizeof(*at->dr_off), 16));
6596 	memcpy(at->src_off, src_off, act_num * sizeof(at->src_off[0]));
6597 	at->actions_num = act_num;
6598 	for (i = 0; i < at->actions_num; ++i)
6599 		at->dr_off[i] = UINT16_MAX;
6600 	at->reformat_off = UINT16_MAX;
6601 	at->mhdr_off = UINT16_MAX;
6602 	at->recom_off = UINT16_MAX;
6603 	for (i = 0; actions->type != RTE_FLOW_ACTION_TYPE_END;
6604 	     actions++, masks++, i++) {
6605 		const struct rte_flow_action_modify_field *info;
6606 
6607 		switch (actions->type) {
6608 		/*
6609 		 * mlx5 PMD hacks indirect action index directly to the action conf.
6610 		 * The rte_flow_conv() function copies the content from conf pointer.
6611 		 * Need to restore the indirect action index from action conf here.
6612 		 */
6613 		case RTE_FLOW_ACTION_TYPE_INDIRECT:
6614 			at->actions[i].conf = ra[i].conf;
6615 			at->masks[i].conf = rm[i].conf;
6616 			break;
6617 		case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
6618 			info = actions->conf;
6619 			if ((info->dst.field == RTE_FLOW_FIELD_FLEX_ITEM &&
6620 			     flow_hw_flex_item_acquire(dev, info->dst.flex_handle,
6621 						       &at->flex_item)) ||
6622 			    (info->src.field == RTE_FLOW_FIELD_FLEX_ITEM &&
6623 			     flow_hw_flex_item_acquire(dev, info->src.flex_handle,
6624 						       &at->flex_item)))
6625 				goto error;
6626 			break;
6627 		default:
6628 			break;
6629 		}
6630 	}
6631 	at->tmpl = flow_hw_dr_actions_template_create(dev, at);
6632 	if (!at->tmpl)
6633 		goto error;
6634 	at->action_flags = action_flags;
6635 	__atomic_fetch_add(&at->refcnt, 1, __ATOMIC_RELAXED);
6636 	LIST_INSERT_HEAD(&priv->flow_hw_at, at, next);
6637 	return at;
6638 error:
6639 	if (at) {
6640 		if (at->tmpl)
6641 			mlx5dr_action_template_destroy(at->tmpl);
6642 		mlx5_free(at);
6643 	}
6644 	rte_flow_error_set(error, rte_errno,
6645 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6646 			   "Failed to create action template");
6647 	return NULL;
6648 }
6649 
6650 /**
6651  * Destroy flow action template.
6652  *
6653  * @param[in] dev
6654  *   Pointer to the rte_eth_dev structure.
6655  * @param[in] template
6656  *   Pointer to the action template to be destroyed.
6657  * @param[out] error
6658  *   Pointer to error structure.
6659  *
6660  * @return
6661  *   0 on success, a negative errno value otherwise and rte_errno is set.
6662  */
6663 static int
6664 flow_hw_actions_template_destroy(struct rte_eth_dev *dev,
6665 				 struct rte_flow_actions_template *template,
6666 				 struct rte_flow_error *error __rte_unused)
6667 {
6668 	uint64_t flag = MLX5_FLOW_ACTION_IPV6_ROUTING_REMOVE |
6669 			MLX5_FLOW_ACTION_IPV6_ROUTING_PUSH;
6670 
6671 	if (__atomic_load_n(&template->refcnt, __ATOMIC_RELAXED) > 1) {
6672 		DRV_LOG(WARNING, "Action template %p is still in use.",
6673 			(void *)template);
6674 		return rte_flow_error_set(error, EBUSY,
6675 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6676 				   NULL,
6677 				   "action template in using");
6678 	}
6679 	if (template->action_flags & flag)
6680 		mlx5_free_srh_flex_parser(dev);
6681 	LIST_REMOVE(template, next);
6682 	flow_hw_flex_item_release(dev, &template->flex_item);
6683 	if (template->tmpl)
6684 		mlx5dr_action_template_destroy(template->tmpl);
6685 	mlx5_free(template);
6686 	return 0;
6687 }
6688 
6689 static uint32_t
6690 flow_hw_count_items(const struct rte_flow_item *items)
6691 {
6692 	const struct rte_flow_item *curr_item;
6693 	uint32_t nb_items;
6694 
6695 	nb_items = 0;
6696 	for (curr_item = items; curr_item->type != RTE_FLOW_ITEM_TYPE_END; ++curr_item)
6697 		++nb_items;
6698 	return ++nb_items;
6699 }
6700 
6701 static struct rte_flow_item *
6702 flow_hw_prepend_item(const struct rte_flow_item *items,
6703 		     const uint32_t nb_items,
6704 		     const struct rte_flow_item *new_item,
6705 		     struct rte_flow_error *error)
6706 {
6707 	struct rte_flow_item *copied_items;
6708 	size_t size;
6709 
6710 	/* Allocate new array of items. */
6711 	size = sizeof(*copied_items) * (nb_items + 1);
6712 	copied_items = mlx5_malloc(MLX5_MEM_ZERO, size, 0, rte_socket_id());
6713 	if (!copied_items) {
6714 		rte_flow_error_set(error, ENOMEM,
6715 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6716 				   NULL,
6717 				   "cannot allocate item template");
6718 		return NULL;
6719 	}
6720 	/* Put new item at the beginning and copy the rest. */
6721 	copied_items[0] = *new_item;
6722 	rte_memcpy(&copied_items[1], items, sizeof(*items) * nb_items);
6723 	return copied_items;
6724 }
6725 
6726 static inline bool
6727 flow_hw_item_compare_field_supported(enum rte_flow_field_id field)
6728 {
6729 	switch (field) {
6730 	case RTE_FLOW_FIELD_TAG:
6731 	case RTE_FLOW_FIELD_META:
6732 	case RTE_FLOW_FIELD_VALUE:
6733 		return true;
6734 	default:
6735 		break;
6736 	}
6737 	return false;
6738 }
6739 
6740 static int
6741 flow_hw_validate_item_compare(const struct rte_flow_item *item,
6742 			      struct rte_flow_error *error)
6743 {
6744 	const struct rte_flow_item_compare *comp_m = item->mask;
6745 	const struct rte_flow_item_compare *comp_v = item->spec;
6746 
6747 	if (unlikely(!comp_m))
6748 		return rte_flow_error_set(error, EINVAL,
6749 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6750 				   NULL,
6751 				   "compare item mask is missing");
6752 	if (comp_m->width != UINT32_MAX)
6753 		return rte_flow_error_set(error, EINVAL,
6754 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6755 				   NULL,
6756 				   "compare item only support full mask");
6757 	if (!flow_hw_item_compare_field_supported(comp_m->a.field) ||
6758 	    !flow_hw_item_compare_field_supported(comp_m->b.field))
6759 		return rte_flow_error_set(error, ENOTSUP,
6760 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6761 				   NULL,
6762 				   "compare item field not support");
6763 	if (comp_m->a.field == RTE_FLOW_FIELD_VALUE &&
6764 	    comp_m->b.field == RTE_FLOW_FIELD_VALUE)
6765 		return rte_flow_error_set(error, EINVAL,
6766 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6767 				   NULL,
6768 				   "compare between value is not valid");
6769 	if (comp_v) {
6770 		if (comp_v->operation != comp_m->operation ||
6771 		    comp_v->a.field != comp_m->a.field ||
6772 		    comp_v->b.field != comp_m->b.field)
6773 			return rte_flow_error_set(error, EINVAL,
6774 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6775 					   NULL,
6776 					   "compare item spec/mask not matching");
6777 		if ((comp_v->width & comp_m->width) != 32)
6778 			return rte_flow_error_set(error, EINVAL,
6779 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6780 					   NULL,
6781 					   "compare item only support full mask");
6782 	}
6783 	return 0;
6784 }
6785 
6786 static int
6787 flow_hw_pattern_validate(struct rte_eth_dev *dev,
6788 			 const struct rte_flow_pattern_template_attr *attr,
6789 			 const struct rte_flow_item items[],
6790 			 struct rte_flow_error *error)
6791 {
6792 	struct mlx5_priv *priv = dev->data->dev_private;
6793 	int i, tag_idx;
6794 	bool items_end = false;
6795 	uint32_t tag_bitmap = 0;
6796 	int ret;
6797 
6798 	if (!attr->ingress && !attr->egress && !attr->transfer)
6799 		return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR, NULL,
6800 					  "at least one of the direction attributes"
6801 					  " must be specified");
6802 	if (priv->sh->config.dv_esw_en) {
6803 		MLX5_ASSERT(priv->master || priv->representor);
6804 		if (priv->master) {
6805 			if ((attr->ingress && attr->egress) ||
6806 			    (attr->ingress && attr->transfer) ||
6807 			    (attr->egress && attr->transfer))
6808 				return rte_flow_error_set(error, EINVAL,
6809 							  RTE_FLOW_ERROR_TYPE_ATTR, NULL,
6810 							  "only one direction attribute at once"
6811 							  " can be used on transfer proxy port");
6812 		} else {
6813 			if (attr->transfer)
6814 				return rte_flow_error_set(error, EINVAL,
6815 							  RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, NULL,
6816 							  "transfer attribute cannot be used with"
6817 							  " port representors");
6818 			if (attr->ingress && attr->egress)
6819 				return rte_flow_error_set(error, EINVAL,
6820 							  RTE_FLOW_ERROR_TYPE_ATTR, NULL,
6821 							  "ingress and egress direction attributes"
6822 							  " cannot be used at the same time on"
6823 							  " port representors");
6824 		}
6825 	} else {
6826 		if (attr->transfer)
6827 			return rte_flow_error_set(error, EINVAL,
6828 						  RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, NULL,
6829 						  "transfer attribute cannot be used when"
6830 						  " E-Switch is disabled");
6831 	}
6832 	for (i = 0; !items_end; i++) {
6833 		int type = items[i].type;
6834 
6835 		switch (type) {
6836 		case RTE_FLOW_ITEM_TYPE_TAG:
6837 		{
6838 			const struct rte_flow_item_tag *tag =
6839 				(const struct rte_flow_item_tag *)items[i].spec;
6840 
6841 			if (tag == NULL)
6842 				return rte_flow_error_set(error, EINVAL,
6843 							  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6844 							  NULL,
6845 							  "Tag spec is NULL");
6846 			if (tag->index >= MLX5_FLOW_HW_TAGS_MAX &&
6847 			    tag->index != RTE_PMD_MLX5_LINEAR_HASH_TAG_INDEX)
6848 				return rte_flow_error_set(error, EINVAL,
6849 							  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6850 							  NULL,
6851 							  "Invalid tag index");
6852 			tag_idx = flow_hw_get_reg_id(dev, RTE_FLOW_ITEM_TYPE_TAG, tag->index);
6853 			if (tag_idx == REG_NON)
6854 				return rte_flow_error_set(error, EINVAL,
6855 							  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6856 							  NULL,
6857 							  "Unsupported tag index");
6858 			if (tag_bitmap & (1 << tag_idx))
6859 				return rte_flow_error_set(error, EINVAL,
6860 							  RTE_FLOW_ERROR_TYPE_ITEM,
6861 							  NULL,
6862 							  "Duplicated tag index");
6863 			tag_bitmap |= 1 << tag_idx;
6864 			break;
6865 		}
6866 		case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
6867 		{
6868 			const struct rte_flow_item_tag *tag =
6869 				(const struct rte_flow_item_tag *)items[i].spec;
6870 			uint16_t regcs = (uint8_t)priv->sh->cdev->config.hca_attr.set_reg_c;
6871 
6872 			if (!((1 << (tag->index - REG_C_0)) & regcs))
6873 				return rte_flow_error_set(error, EINVAL,
6874 							  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6875 							  NULL,
6876 							  "Unsupported internal tag index");
6877 			if (tag_bitmap & (1 << tag->index))
6878 				return rte_flow_error_set(error, EINVAL,
6879 							  RTE_FLOW_ERROR_TYPE_ITEM,
6880 							  NULL,
6881 							  "Duplicated tag index");
6882 			tag_bitmap |= 1 << tag->index;
6883 			break;
6884 		}
6885 		case RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT:
6886 			if (attr->ingress && priv->sh->config.repr_matching)
6887 				return rte_flow_error_set(error, EINVAL,
6888 						  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
6889 						  "represented port item cannot be used"
6890 						  " when ingress attribute is set");
6891 			if (attr->egress)
6892 				return rte_flow_error_set(error, EINVAL,
6893 						  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
6894 						  "represented port item cannot be used"
6895 						  " when egress attribute is set");
6896 			break;
6897 		case RTE_FLOW_ITEM_TYPE_META:
6898 			if (!priv->sh->config.dv_esw_en ||
6899 			    priv->sh->config.dv_xmeta_en != MLX5_XMETA_MODE_META32_HWS) {
6900 				if (attr->ingress)
6901 					return rte_flow_error_set(error, EINVAL,
6902 								  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
6903 								  "META item is not supported"
6904 								  " on current FW with ingress"
6905 								  " attribute");
6906 			}
6907 			break;
6908 		case RTE_FLOW_ITEM_TYPE_METER_COLOR:
6909 		{
6910 			int reg = flow_hw_get_reg_id(dev,
6911 						     RTE_FLOW_ITEM_TYPE_METER_COLOR,
6912 						     0);
6913 			if (reg == REG_NON)
6914 				return rte_flow_error_set(error, EINVAL,
6915 							  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6916 							  NULL,
6917 							  "Unsupported meter color register");
6918 			break;
6919 		}
6920 		case RTE_FLOW_ITEM_TYPE_AGGR_AFFINITY:
6921 		{
6922 			if (!priv->sh->lag_rx_port_affinity_en)
6923 				return rte_flow_error_set(error, EINVAL,
6924 							  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
6925 							  "Unsupported aggregated affinity with Older FW");
6926 			if ((attr->transfer && priv->fdb_def_rule) || attr->egress)
6927 				return rte_flow_error_set(error, EINVAL,
6928 							  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
6929 							  "Aggregated affinity item not supported"
6930 							  " with egress or transfer"
6931 							  " attribute");
6932 			break;
6933 		}
6934 		case RTE_FLOW_ITEM_TYPE_COMPARE:
6935 		{
6936 			ret = flow_hw_validate_item_compare(&items[i], error);
6937 			if (ret)
6938 				return ret;
6939 			break;
6940 		}
6941 		case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
6942 		{
6943 			int ret;
6944 
6945 			ret = mlx5_flow_geneve_tlv_option_validate(priv,
6946 								   &items[i],
6947 								   error);
6948 			if (ret < 0)
6949 				return ret;
6950 			break;
6951 		}
6952 		case RTE_FLOW_ITEM_TYPE_VOID:
6953 		case RTE_FLOW_ITEM_TYPE_ETH:
6954 		case RTE_FLOW_ITEM_TYPE_VLAN:
6955 		case RTE_FLOW_ITEM_TYPE_IPV4:
6956 		case RTE_FLOW_ITEM_TYPE_IPV6:
6957 		case RTE_FLOW_ITEM_TYPE_UDP:
6958 		case RTE_FLOW_ITEM_TYPE_TCP:
6959 		case RTE_FLOW_ITEM_TYPE_GTP:
6960 		case RTE_FLOW_ITEM_TYPE_GTP_PSC:
6961 		case RTE_FLOW_ITEM_TYPE_VXLAN:
6962 		case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
6963 		case RTE_FLOW_ITEM_TYPE_MPLS:
6964 		case RTE_FLOW_ITEM_TYPE_GENEVE:
6965 		case MLX5_RTE_FLOW_ITEM_TYPE_SQ:
6966 		case RTE_FLOW_ITEM_TYPE_GRE:
6967 		case RTE_FLOW_ITEM_TYPE_GRE_KEY:
6968 		case RTE_FLOW_ITEM_TYPE_GRE_OPTION:
6969 		case RTE_FLOW_ITEM_TYPE_ICMP:
6970 		case RTE_FLOW_ITEM_TYPE_ICMP6:
6971 		case RTE_FLOW_ITEM_TYPE_ICMP6_ECHO_REQUEST:
6972 		case RTE_FLOW_ITEM_TYPE_QUOTA:
6973 		case RTE_FLOW_ITEM_TYPE_ICMP6_ECHO_REPLY:
6974 		case RTE_FLOW_ITEM_TYPE_CONNTRACK:
6975 		case RTE_FLOW_ITEM_TYPE_IPV6_ROUTING_EXT:
6976 		case RTE_FLOW_ITEM_TYPE_ESP:
6977 		case RTE_FLOW_ITEM_TYPE_FLEX:
6978 		case RTE_FLOW_ITEM_TYPE_IB_BTH:
6979 		case RTE_FLOW_ITEM_TYPE_PTYPE:
6980 		case RTE_FLOW_ITEM_TYPE_RANDOM:
6981 			break;
6982 		case RTE_FLOW_ITEM_TYPE_INTEGRITY:
6983 			/*
6984 			 * Integrity flow item validation require access to
6985 			 * both item mask and spec.
6986 			 * Current HWS model allows item mask in pattern
6987 			 * template and item spec in flow rule.
6988 			 */
6989 			break;
6990 		case RTE_FLOW_ITEM_TYPE_END:
6991 			items_end = true;
6992 			break;
6993 		default:
6994 			return rte_flow_error_set(error, EINVAL,
6995 						  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6996 						  NULL,
6997 						  "Unsupported item type");
6998 		}
6999 	}
7000 	return 0;
7001 }
7002 
7003 static bool
7004 flow_hw_pattern_has_sq_match(const struct rte_flow_item *items)
7005 {
7006 	unsigned int i;
7007 
7008 	for (i = 0; items[i].type != RTE_FLOW_ITEM_TYPE_END; ++i)
7009 		if (items[i].type == (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_SQ)
7010 			return true;
7011 	return false;
7012 }
7013 
7014 /**
7015  * Create flow item template.
7016  *
7017  * @param[in] dev
7018  *   Pointer to the rte_eth_dev structure.
7019  * @param[in] attr
7020  *   Pointer to the item template attributes.
7021  * @param[in] items
7022  *   The template item pattern.
7023  * @param[out] error
7024  *   Pointer to error structure.
7025  *
7026  * @return
7027  *  Item template pointer on success, NULL otherwise and rte_errno is set.
7028  */
7029 static struct rte_flow_pattern_template *
7030 flow_hw_pattern_template_create(struct rte_eth_dev *dev,
7031 			     const struct rte_flow_pattern_template_attr *attr,
7032 			     const struct rte_flow_item items[],
7033 			     struct rte_flow_error *error)
7034 {
7035 	struct mlx5_priv *priv = dev->data->dev_private;
7036 	struct rte_flow_pattern_template *it;
7037 	struct rte_flow_item *copied_items = NULL;
7038 	const struct rte_flow_item *tmpl_items;
7039 	uint32_t orig_item_nb;
7040 	struct rte_flow_item port = {
7041 		.type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,
7042 		.mask = &rte_flow_item_ethdev_mask,
7043 	};
7044 	struct rte_flow_item_tag tag_v = {
7045 		.data = 0,
7046 		.index = REG_C_0,
7047 	};
7048 	struct rte_flow_item_tag tag_m = {
7049 		.data = flow_hw_tx_tag_regc_mask(dev),
7050 		.index = 0xff,
7051 	};
7052 	struct rte_flow_item tag = {
7053 		.type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_TAG,
7054 		.spec = &tag_v,
7055 		.mask = &tag_m,
7056 		.last = NULL
7057 	};
7058 	unsigned int i = 0;
7059 
7060 	if (flow_hw_pattern_validate(dev, attr, items, error))
7061 		return NULL;
7062 	orig_item_nb = flow_hw_count_items(items);
7063 	if (priv->sh->config.dv_esw_en &&
7064 	    priv->sh->config.repr_matching &&
7065 	    attr->ingress && !attr->egress && !attr->transfer) {
7066 		copied_items = flow_hw_prepend_item(items, orig_item_nb, &port, error);
7067 		if (!copied_items)
7068 			return NULL;
7069 		tmpl_items = copied_items;
7070 	} else if (priv->sh->config.dv_esw_en &&
7071 		   priv->sh->config.repr_matching &&
7072 		   !attr->ingress && attr->egress && !attr->transfer) {
7073 		if (flow_hw_pattern_has_sq_match(items)) {
7074 			DRV_LOG(DEBUG, "Port %u omitting implicit REG_C_0 match for egress "
7075 				       "pattern template", dev->data->port_id);
7076 			tmpl_items = items;
7077 			goto setup_pattern_template;
7078 		}
7079 		copied_items = flow_hw_prepend_item(items, orig_item_nb, &tag, error);
7080 		if (!copied_items)
7081 			return NULL;
7082 		tmpl_items = copied_items;
7083 	} else {
7084 		tmpl_items = items;
7085 	}
7086 setup_pattern_template:
7087 	it = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*it), 0, rte_socket_id());
7088 	if (!it) {
7089 		if (copied_items)
7090 			mlx5_free(copied_items);
7091 		rte_flow_error_set(error, ENOMEM,
7092 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7093 				   NULL,
7094 				   "cannot allocate item template");
7095 		return NULL;
7096 	}
7097 	it->attr = *attr;
7098 	it->orig_item_nb = orig_item_nb;
7099 	it->mt = mlx5dr_match_template_create(tmpl_items, attr->relaxed_matching);
7100 	if (!it->mt) {
7101 		if (copied_items)
7102 			mlx5_free(copied_items);
7103 		mlx5_free(it);
7104 		rte_flow_error_set(error, rte_errno,
7105 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7106 				   NULL,
7107 				   "cannot create match template");
7108 		return NULL;
7109 	}
7110 	it->item_flags = flow_hw_matching_item_flags_get(tmpl_items);
7111 	if (copied_items) {
7112 		if (attr->ingress)
7113 			it->implicit_port = true;
7114 		else if (attr->egress)
7115 			it->implicit_tag = true;
7116 		mlx5_free(copied_items);
7117 	}
7118 	/* Either inner or outer, can't both. */
7119 	if (it->item_flags & (MLX5_FLOW_ITEM_OUTER_IPV6_ROUTING_EXT |
7120 			      MLX5_FLOW_ITEM_INNER_IPV6_ROUTING_EXT)) {
7121 		if (((it->item_flags & MLX5_FLOW_ITEM_OUTER_IPV6_ROUTING_EXT) &&
7122 		     (it->item_flags & MLX5_FLOW_ITEM_INNER_IPV6_ROUTING_EXT)) ||
7123 		    (mlx5_alloc_srh_flex_parser(dev))) {
7124 			claim_zero(mlx5dr_match_template_destroy(it->mt));
7125 			mlx5_free(it);
7126 			rte_flow_error_set(error, rte_errno,
7127 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7128 					   "cannot create IPv6 routing extension support");
7129 			return NULL;
7130 		}
7131 	}
7132 	for (i = 0; items[i].type != RTE_FLOW_ITEM_TYPE_END; ++i) {
7133 		switch (items[i].type) {
7134 		case RTE_FLOW_ITEM_TYPE_FLEX: {
7135 			const struct rte_flow_item_flex *spec =
7136 				(const struct rte_flow_item_flex *)items[i].spec;
7137 			struct rte_flow_item_flex_handle *handle = spec->handle;
7138 
7139 			if (flow_hw_flex_item_acquire(dev, handle, &it->flex_item)) {
7140 				rte_flow_error_set(error, rte_errno,
7141 						   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7142 						   "Failed to acquire flex item");
7143 				goto error;
7144 			}
7145 			break;
7146 		}
7147 		case RTE_FLOW_ITEM_TYPE_GENEVE_OPT: {
7148 			const struct rte_flow_item_geneve_opt *spec = items[i].spec;
7149 
7150 			if (mlx5_geneve_tlv_option_register(priv, spec,
7151 							    &it->geneve_opt_mng)) {
7152 				rte_flow_error_set(error, rte_errno,
7153 						   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7154 						   "Failed to register GENEVE TLV option");
7155 				goto error;
7156 			}
7157 			break;
7158 		}
7159 		default:
7160 			break;
7161 		}
7162 	}
7163 	__atomic_fetch_add(&it->refcnt, 1, __ATOMIC_RELAXED);
7164 	LIST_INSERT_HEAD(&priv->flow_hw_itt, it, next);
7165 	return it;
7166 error:
7167 	flow_hw_flex_item_release(dev, &it->flex_item);
7168 	mlx5_geneve_tlv_options_unregister(priv, &it->geneve_opt_mng);
7169 	claim_zero(mlx5dr_match_template_destroy(it->mt));
7170 	mlx5_free(it);
7171 	return NULL;
7172 }
7173 
7174 /**
7175  * Destroy flow item template.
7176  *
7177  * @param[in] dev
7178  *   Pointer to the rte_eth_dev structure.
7179  * @param[in] template
7180  *   Pointer to the item template to be destroyed.
7181  * @param[out] error
7182  *   Pointer to error structure.
7183  *
7184  * @return
7185  *   0 on success, a negative errno value otherwise and rte_errno is set.
7186  */
7187 static int
7188 flow_hw_pattern_template_destroy(struct rte_eth_dev *dev,
7189 			      struct rte_flow_pattern_template *template,
7190 			      struct rte_flow_error *error __rte_unused)
7191 {
7192 	struct mlx5_priv *priv = dev->data->dev_private;
7193 
7194 	if (__atomic_load_n(&template->refcnt, __ATOMIC_RELAXED) > 1) {
7195 		DRV_LOG(WARNING, "Item template %p is still in use.",
7196 			(void *)template);
7197 		return rte_flow_error_set(error, EBUSY,
7198 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7199 				   NULL,
7200 				   "item template in using");
7201 	}
7202 	if (template->item_flags & (MLX5_FLOW_ITEM_OUTER_IPV6_ROUTING_EXT |
7203 				    MLX5_FLOW_ITEM_INNER_IPV6_ROUTING_EXT))
7204 		mlx5_free_srh_flex_parser(dev);
7205 	LIST_REMOVE(template, next);
7206 	flow_hw_flex_item_release(dev, &template->flex_item);
7207 	mlx5_geneve_tlv_options_unregister(priv, &template->geneve_opt_mng);
7208 	claim_zero(mlx5dr_match_template_destroy(template->mt));
7209 	mlx5_free(template);
7210 	return 0;
7211 }
7212 
7213 /*
7214  * Get information about HWS pre-configurable resources.
7215  *
7216  * @param[in] dev
7217  *   Pointer to the rte_eth_dev structure.
7218  * @param[out] port_info
7219  *   Pointer to port information.
7220  * @param[out] queue_info
7221  *   Pointer to queue information.
7222  * @param[out] error
7223  *   Pointer to error structure.
7224  *
7225  * @return
7226  *   0 on success, a negative errno value otherwise and rte_errno is set.
7227  */
7228 static int
7229 flow_hw_info_get(struct rte_eth_dev *dev,
7230 		 struct rte_flow_port_info *port_info,
7231 		 struct rte_flow_queue_info *queue_info,
7232 		 struct rte_flow_error *error __rte_unused)
7233 {
7234 	struct mlx5_priv *priv = dev->data->dev_private;
7235 	uint16_t port_id = dev->data->port_id;
7236 	struct rte_mtr_capabilities mtr_cap;
7237 	int ret;
7238 
7239 	memset(port_info, 0, sizeof(*port_info));
7240 	/* Queue size is unlimited from low-level. */
7241 	port_info->max_nb_queues = UINT32_MAX;
7242 	queue_info->max_size = UINT32_MAX;
7243 
7244 	memset(&mtr_cap, 0, sizeof(struct rte_mtr_capabilities));
7245 	ret = rte_mtr_capabilities_get(port_id, &mtr_cap, NULL);
7246 	if (!ret)
7247 		port_info->max_nb_meters = mtr_cap.n_max;
7248 	port_info->max_nb_counters = priv->sh->hws_max_nb_counters;
7249 	port_info->max_nb_aging_objects = port_info->max_nb_counters;
7250 	return 0;
7251 }
7252 
7253 /**
7254  * Create group callback.
7255  *
7256  * @param[in] tool_ctx
7257  *   Pointer to the hash list related context.
7258  * @param[in] cb_ctx
7259  *   Pointer to the group creation context.
7260  *
7261  * @return
7262  *   Group entry on success, NULL otherwise and rte_errno is set.
7263  */
7264 struct mlx5_list_entry *
7265 flow_hw_grp_create_cb(void *tool_ctx, void *cb_ctx)
7266 {
7267 	struct mlx5_dev_ctx_shared *sh = tool_ctx;
7268 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
7269 	struct rte_eth_dev *dev = ctx->dev;
7270 	struct rte_flow_attr *attr = (struct rte_flow_attr *)ctx->data;
7271 	struct mlx5_priv *priv = dev->data->dev_private;
7272 	struct mlx5dr_table_attr dr_tbl_attr = {0};
7273 	struct rte_flow_error *error = ctx->error;
7274 	struct mlx5_flow_group *grp_data;
7275 	struct mlx5dr_table *tbl = NULL;
7276 	struct mlx5dr_action *jump;
7277 	uint32_t idx = 0;
7278 
7279 	grp_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_HW_GRP], &idx);
7280 	if (!grp_data) {
7281 		rte_flow_error_set(error, ENOMEM,
7282 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7283 				   NULL,
7284 				   "cannot allocate flow table data entry");
7285 		return NULL;
7286 	}
7287 	dr_tbl_attr.level = attr->group;
7288 	if (attr->transfer)
7289 		dr_tbl_attr.type = MLX5DR_TABLE_TYPE_FDB;
7290 	else if (attr->egress)
7291 		dr_tbl_attr.type = MLX5DR_TABLE_TYPE_NIC_TX;
7292 	else
7293 		dr_tbl_attr.type = MLX5DR_TABLE_TYPE_NIC_RX;
7294 	tbl = mlx5dr_table_create(priv->dr_ctx, &dr_tbl_attr);
7295 	if (!tbl)
7296 		goto error;
7297 	grp_data->tbl = tbl;
7298 	if (attr->group) {
7299 		/* Jump action be used by non-root table. */
7300 		jump = mlx5dr_action_create_dest_table
7301 			(priv->dr_ctx, tbl,
7302 			 mlx5_hw_act_flag[!!attr->group][dr_tbl_attr.type]);
7303 		if (!jump)
7304 			goto error;
7305 		grp_data->jump.hws_action = jump;
7306 		/* Jump action be used by root table.  */
7307 		jump = mlx5dr_action_create_dest_table
7308 			(priv->dr_ctx, tbl,
7309 			 mlx5_hw_act_flag[MLX5_HW_ACTION_FLAG_ROOT]
7310 					 [dr_tbl_attr.type]);
7311 		if (!jump)
7312 			goto error;
7313 		grp_data->jump.root_action = jump;
7314 	}
7315 	grp_data->dev = dev;
7316 	grp_data->idx = idx;
7317 	grp_data->group_id = attr->group;
7318 	grp_data->type = dr_tbl_attr.type;
7319 	return &grp_data->entry;
7320 error:
7321 	if (grp_data->jump.root_action)
7322 		mlx5dr_action_destroy(grp_data->jump.root_action);
7323 	if (grp_data->jump.hws_action)
7324 		mlx5dr_action_destroy(grp_data->jump.hws_action);
7325 	if (tbl)
7326 		mlx5dr_table_destroy(tbl);
7327 	if (idx)
7328 		mlx5_ipool_free(sh->ipool[MLX5_IPOOL_HW_GRP], idx);
7329 	rte_flow_error_set(error, ENOMEM,
7330 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7331 			   NULL,
7332 			   "cannot allocate flow dr table");
7333 	return NULL;
7334 }
7335 
7336 /**
7337  * Remove group callback.
7338  *
7339  * @param[in] tool_ctx
7340  *   Pointer to the hash list related context.
7341  * @param[in] entry
7342  *   Pointer to the entry to be removed.
7343  */
7344 void
7345 flow_hw_grp_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
7346 {
7347 	struct mlx5_dev_ctx_shared *sh = tool_ctx;
7348 	struct mlx5_flow_group *grp_data =
7349 		    container_of(entry, struct mlx5_flow_group, entry);
7350 
7351 	MLX5_ASSERT(entry && sh);
7352 	/* To use the wrapper glue functions instead. */
7353 	if (grp_data->jump.hws_action)
7354 		mlx5dr_action_destroy(grp_data->jump.hws_action);
7355 	if (grp_data->jump.root_action)
7356 		mlx5dr_action_destroy(grp_data->jump.root_action);
7357 	mlx5dr_table_destroy(grp_data->tbl);
7358 	mlx5_ipool_free(sh->ipool[MLX5_IPOOL_HW_GRP], grp_data->idx);
7359 }
7360 
7361 /**
7362  * Match group callback.
7363  *
7364  * @param[in] tool_ctx
7365  *   Pointer to the hash list related context.
7366  * @param[in] entry
7367  *   Pointer to the group to be matched.
7368  * @param[in] cb_ctx
7369  *   Pointer to the group matching context.
7370  *
7371  * @return
7372  *   0 on matched, 1 on miss matched.
7373  */
7374 int
7375 flow_hw_grp_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry,
7376 		     void *cb_ctx)
7377 {
7378 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
7379 	struct mlx5_flow_group *grp_data =
7380 		container_of(entry, struct mlx5_flow_group, entry);
7381 	struct rte_flow_attr *attr =
7382 			(struct rte_flow_attr *)ctx->data;
7383 
7384 	return (grp_data->dev != ctx->dev) ||
7385 		(grp_data->group_id != attr->group) ||
7386 		((grp_data->type != MLX5DR_TABLE_TYPE_FDB) &&
7387 		attr->transfer) ||
7388 		((grp_data->type != MLX5DR_TABLE_TYPE_NIC_TX) &&
7389 		attr->egress) ||
7390 		((grp_data->type != MLX5DR_TABLE_TYPE_NIC_RX) &&
7391 		attr->ingress);
7392 }
7393 
7394 /**
7395  * Clone group entry callback.
7396  *
7397  * @param[in] tool_ctx
7398  *   Pointer to the hash list related context.
7399  * @param[in] entry
7400  *   Pointer to the group to be matched.
7401  * @param[in] cb_ctx
7402  *   Pointer to the group matching context.
7403  *
7404  * @return
7405  *   0 on matched, 1 on miss matched.
7406  */
7407 struct mlx5_list_entry *
7408 flow_hw_grp_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
7409 		     void *cb_ctx)
7410 {
7411 	struct mlx5_dev_ctx_shared *sh = tool_ctx;
7412 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
7413 	struct mlx5_flow_group *grp_data;
7414 	struct rte_flow_error *error = ctx->error;
7415 	uint32_t idx = 0;
7416 
7417 	grp_data = mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_HW_GRP], &idx);
7418 	if (!grp_data) {
7419 		rte_flow_error_set(error, ENOMEM,
7420 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7421 				   NULL,
7422 				   "cannot allocate flow table data entry");
7423 		return NULL;
7424 	}
7425 	memcpy(grp_data, oentry, sizeof(*grp_data));
7426 	grp_data->idx = idx;
7427 	return &grp_data->entry;
7428 }
7429 
7430 /**
7431  * Free cloned group entry callback.
7432  *
7433  * @param[in] tool_ctx
7434  *   Pointer to the hash list related context.
7435  * @param[in] entry
7436  *   Pointer to the group to be freed.
7437  */
7438 void
7439 flow_hw_grp_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
7440 {
7441 	struct mlx5_dev_ctx_shared *sh = tool_ctx;
7442 	struct mlx5_flow_group *grp_data =
7443 		    container_of(entry, struct mlx5_flow_group, entry);
7444 
7445 	mlx5_ipool_free(sh->ipool[MLX5_IPOOL_HW_GRP], grp_data->idx);
7446 }
7447 
7448 /**
7449  * Create and cache a vport action for given @p dev port. vport actions
7450  * cache is used in HWS with FDB flows.
7451  *
7452  * This function does not create any function if proxy port for @p dev port
7453  * was not configured for HW Steering.
7454  *
7455  * This function assumes that E-Switch is enabled and PMD is running with
7456  * HW Steering configured.
7457  *
7458  * @param dev
7459  *   Pointer to Ethernet device which will be the action destination.
7460  *
7461  * @return
7462  *   0 on success, positive value otherwise.
7463  */
7464 int
7465 flow_hw_create_vport_action(struct rte_eth_dev *dev)
7466 {
7467 	struct mlx5_priv *priv = dev->data->dev_private;
7468 	struct rte_eth_dev *proxy_dev;
7469 	struct mlx5_priv *proxy_priv;
7470 	uint16_t port_id = dev->data->port_id;
7471 	uint16_t proxy_port_id = port_id;
7472 	int ret;
7473 
7474 	ret = mlx5_flow_pick_transfer_proxy(dev, &proxy_port_id, NULL);
7475 	if (ret)
7476 		return ret;
7477 	proxy_dev = &rte_eth_devices[proxy_port_id];
7478 	proxy_priv = proxy_dev->data->dev_private;
7479 	if (!proxy_priv->hw_vport)
7480 		return 0;
7481 	if (proxy_priv->hw_vport[port_id]) {
7482 		DRV_LOG(ERR, "port %u HWS vport action already created",
7483 			port_id);
7484 		return -EINVAL;
7485 	}
7486 	proxy_priv->hw_vport[port_id] = mlx5dr_action_create_dest_vport
7487 			(proxy_priv->dr_ctx, priv->dev_port,
7488 			 MLX5DR_ACTION_FLAG_HWS_FDB);
7489 	if (!proxy_priv->hw_vport[port_id]) {
7490 		DRV_LOG(ERR, "port %u unable to create HWS vport action",
7491 			port_id);
7492 		return -EINVAL;
7493 	}
7494 	return 0;
7495 }
7496 
7497 /**
7498  * Destroys the vport action associated with @p dev device
7499  * from actions' cache.
7500  *
7501  * This function does not destroy any action if there is no action cached
7502  * for @p dev or proxy port was not configured for HW Steering.
7503  *
7504  * This function assumes that E-Switch is enabled and PMD is running with
7505  * HW Steering configured.
7506  *
7507  * @param dev
7508  *   Pointer to Ethernet device which will be the action destination.
7509  */
7510 void
7511 flow_hw_destroy_vport_action(struct rte_eth_dev *dev)
7512 {
7513 	struct rte_eth_dev *proxy_dev;
7514 	struct mlx5_priv *proxy_priv;
7515 	uint16_t port_id = dev->data->port_id;
7516 	uint16_t proxy_port_id = port_id;
7517 
7518 	if (mlx5_flow_pick_transfer_proxy(dev, &proxy_port_id, NULL))
7519 		return;
7520 	proxy_dev = &rte_eth_devices[proxy_port_id];
7521 	proxy_priv = proxy_dev->data->dev_private;
7522 	if (!proxy_priv->hw_vport || !proxy_priv->hw_vport[port_id])
7523 		return;
7524 	mlx5dr_action_destroy(proxy_priv->hw_vport[port_id]);
7525 	proxy_priv->hw_vport[port_id] = NULL;
7526 }
7527 
7528 static int
7529 flow_hw_create_vport_actions(struct mlx5_priv *priv)
7530 {
7531 	uint16_t port_id;
7532 
7533 	MLX5_ASSERT(!priv->hw_vport);
7534 	priv->hw_vport = mlx5_malloc(MLX5_MEM_ZERO,
7535 				     sizeof(*priv->hw_vport) * RTE_MAX_ETHPORTS,
7536 				     0, SOCKET_ID_ANY);
7537 	if (!priv->hw_vport)
7538 		return -ENOMEM;
7539 	DRV_LOG(DEBUG, "port %u :: creating vport actions", priv->dev_data->port_id);
7540 	DRV_LOG(DEBUG, "port %u ::    domain_id=%u", priv->dev_data->port_id, priv->domain_id);
7541 	MLX5_ETH_FOREACH_DEV(port_id, NULL) {
7542 		struct mlx5_priv *port_priv = rte_eth_devices[port_id].data->dev_private;
7543 
7544 		if (!port_priv ||
7545 		    port_priv->domain_id != priv->domain_id)
7546 			continue;
7547 		DRV_LOG(DEBUG, "port %u :: for port_id=%u, calling mlx5dr_action_create_dest_vport() with ibport=%u",
7548 			priv->dev_data->port_id, port_id, port_priv->dev_port);
7549 		priv->hw_vport[port_id] = mlx5dr_action_create_dest_vport
7550 				(priv->dr_ctx, port_priv->dev_port,
7551 				 MLX5DR_ACTION_FLAG_HWS_FDB);
7552 		DRV_LOG(DEBUG, "port %u :: priv->hw_vport[%u]=%p",
7553 			priv->dev_data->port_id, port_id, (void *)priv->hw_vport[port_id]);
7554 		if (!priv->hw_vport[port_id])
7555 			return -EINVAL;
7556 	}
7557 	return 0;
7558 }
7559 
7560 static void
7561 flow_hw_free_vport_actions(struct mlx5_priv *priv)
7562 {
7563 	uint16_t port_id;
7564 
7565 	if (!priv->hw_vport)
7566 		return;
7567 	for (port_id = 0; port_id < RTE_MAX_ETHPORTS; ++port_id)
7568 		if (priv->hw_vport[port_id])
7569 			mlx5dr_action_destroy(priv->hw_vport[port_id]);
7570 	mlx5_free(priv->hw_vport);
7571 	priv->hw_vport = NULL;
7572 }
7573 
7574 static void
7575 flow_hw_create_send_to_kernel_actions(struct mlx5_priv *priv __rte_unused)
7576 {
7577 #ifdef HAVE_MLX5DV_DR_ACTION_CREATE_DEST_ROOT_TABLE
7578 	int action_flag;
7579 	int i;
7580 	bool is_vf_sf_dev = priv->sh->dev_cap.vf || priv->sh->dev_cap.sf;
7581 
7582 	for (i = MLX5DR_TABLE_TYPE_NIC_RX; i < MLX5DR_TABLE_TYPE_MAX; i++) {
7583 		if ((!priv->sh->config.dv_esw_en || is_vf_sf_dev) &&
7584 		     i == MLX5DR_TABLE_TYPE_FDB)
7585 			continue;
7586 		action_flag = mlx5_hw_act_flag[1][i];
7587 		priv->hw_send_to_kernel[i] =
7588 				mlx5dr_action_create_dest_root(priv->dr_ctx,
7589 							MLX5_HW_LOWEST_PRIO_ROOT,
7590 							action_flag);
7591 		if (!priv->hw_send_to_kernel[i]) {
7592 			DRV_LOG(WARNING, "Unable to create HWS send to kernel action");
7593 			return;
7594 		}
7595 	}
7596 #endif
7597 }
7598 
7599 static void
7600 flow_hw_destroy_send_to_kernel_action(struct mlx5_priv *priv)
7601 {
7602 	int i;
7603 	for (i = MLX5DR_TABLE_TYPE_NIC_RX; i < MLX5DR_TABLE_TYPE_MAX; i++) {
7604 		if (priv->hw_send_to_kernel[i]) {
7605 			mlx5dr_action_destroy(priv->hw_send_to_kernel[i]);
7606 			priv->hw_send_to_kernel[i] = NULL;
7607 		}
7608 	}
7609 }
7610 
7611 /**
7612  * Create an egress pattern template matching on source SQ.
7613  *
7614  * @param dev
7615  *   Pointer to Ethernet device.
7616  * @param[out] error
7617  *   Pointer to error structure.
7618  *
7619  * @return
7620  *   Pointer to pattern template on success. NULL otherwise, and rte_errno is set.
7621  */
7622 static struct rte_flow_pattern_template *
7623 flow_hw_create_tx_repr_sq_pattern_tmpl(struct rte_eth_dev *dev, struct rte_flow_error *error)
7624 {
7625 	struct rte_flow_pattern_template_attr attr = {
7626 		.relaxed_matching = 0,
7627 		.egress = 1,
7628 	};
7629 	struct mlx5_rte_flow_item_sq sq_mask = {
7630 		.queue = UINT32_MAX,
7631 	};
7632 	struct rte_flow_item items[] = {
7633 		{
7634 			.type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_SQ,
7635 			.mask = &sq_mask,
7636 		},
7637 		{
7638 			.type = RTE_FLOW_ITEM_TYPE_END,
7639 		},
7640 	};
7641 
7642 	return flow_hw_pattern_template_create(dev, &attr, items, error);
7643 }
7644 
7645 static __rte_always_inline uint32_t
7646 flow_hw_tx_tag_regc_mask(struct rte_eth_dev *dev)
7647 {
7648 	struct mlx5_priv *priv = dev->data->dev_private;
7649 	uint32_t mask = priv->sh->dv_regc0_mask;
7650 
7651 	/* Mask is verified during device initialization. Sanity checking here. */
7652 	MLX5_ASSERT(mask != 0);
7653 	/*
7654 	 * Availability of sufficient number of bits in REG_C_0 is verified on initialization.
7655 	 * Sanity checking here.
7656 	 */
7657 	MLX5_ASSERT(rte_popcount32(mask) >= rte_popcount32(priv->vport_meta_mask));
7658 	return mask;
7659 }
7660 
7661 static __rte_always_inline uint32_t
7662 flow_hw_tx_tag_regc_value(struct rte_eth_dev *dev)
7663 {
7664 	struct mlx5_priv *priv = dev->data->dev_private;
7665 	uint32_t tag;
7666 
7667 	/* Mask is verified during device initialization. Sanity checking here. */
7668 	MLX5_ASSERT(priv->vport_meta_mask != 0);
7669 	tag = priv->vport_meta_tag >> (rte_bsf32(priv->vport_meta_mask));
7670 	/*
7671 	 * Availability of sufficient number of bits in REG_C_0 is verified on initialization.
7672 	 * Sanity checking here.
7673 	 */
7674 	MLX5_ASSERT((tag & priv->sh->dv_regc0_mask) == tag);
7675 	return tag;
7676 }
7677 
7678 static void
7679 flow_hw_update_action_mask(struct rte_flow_action *action,
7680 			   struct rte_flow_action *mask,
7681 			   enum rte_flow_action_type type,
7682 			   void *conf_v,
7683 			   void *conf_m)
7684 {
7685 	action->type = type;
7686 	action->conf = conf_v;
7687 	mask->type = type;
7688 	mask->conf = conf_m;
7689 }
7690 
7691 /**
7692  * Create an egress actions template with MODIFY_FIELD action for setting unused REG_C_0 bits
7693  * to vport tag and JUMP action to group 1.
7694  *
7695  * If extended metadata mode is enabled, then MODIFY_FIELD action for copying software metadata
7696  * to REG_C_1 is added as well.
7697  *
7698  * @param dev
7699  *   Pointer to Ethernet device.
7700  * @param[out] error
7701  *   Pointer to error structure.
7702  *
7703  * @return
7704  *   Pointer to actions template on success. NULL otherwise, and rte_errno is set.
7705  */
7706 static struct rte_flow_actions_template *
7707 flow_hw_create_tx_repr_tag_jump_acts_tmpl(struct rte_eth_dev *dev,
7708 					  struct rte_flow_error *error)
7709 {
7710 	uint32_t tag_mask = flow_hw_tx_tag_regc_mask(dev);
7711 	uint32_t tag_value = flow_hw_tx_tag_regc_value(dev);
7712 	struct rte_flow_actions_template_attr attr = {
7713 		.egress = 1,
7714 	};
7715 	struct rte_flow_action_modify_field set_tag_v = {
7716 		.operation = RTE_FLOW_MODIFY_SET,
7717 		.dst = {
7718 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
7719 			.tag_index = REG_C_0,
7720 			.offset = rte_bsf32(tag_mask),
7721 		},
7722 		.src = {
7723 			.field = RTE_FLOW_FIELD_VALUE,
7724 		},
7725 		.width = rte_popcount32(tag_mask),
7726 	};
7727 	struct rte_flow_action_modify_field set_tag_m = {
7728 		.operation = RTE_FLOW_MODIFY_SET,
7729 		.dst = {
7730 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
7731 			.level = UINT8_MAX,
7732 			.tag_index = UINT8_MAX,
7733 			.offset = UINT32_MAX,
7734 		},
7735 		.src = {
7736 			.field = RTE_FLOW_FIELD_VALUE,
7737 		},
7738 		.width = UINT32_MAX,
7739 	};
7740 	struct rte_flow_action_modify_field copy_metadata_v = {
7741 		.operation = RTE_FLOW_MODIFY_SET,
7742 		.dst = {
7743 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
7744 			.tag_index = REG_C_1,
7745 		},
7746 		.src = {
7747 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
7748 			.tag_index = REG_A,
7749 		},
7750 		.width = 32,
7751 	};
7752 	struct rte_flow_action_modify_field copy_metadata_m = {
7753 		.operation = RTE_FLOW_MODIFY_SET,
7754 		.dst = {
7755 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
7756 			.level = UINT8_MAX,
7757 			.tag_index = UINT8_MAX,
7758 			.offset = UINT32_MAX,
7759 		},
7760 		.src = {
7761 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
7762 			.level = UINT8_MAX,
7763 			.tag_index = UINT8_MAX,
7764 			.offset = UINT32_MAX,
7765 		},
7766 		.width = UINT32_MAX,
7767 	};
7768 	struct rte_flow_action_jump jump_v = {
7769 		.group = MLX5_HW_LOWEST_USABLE_GROUP,
7770 	};
7771 	struct rte_flow_action_jump jump_m = {
7772 		.group = UINT32_MAX,
7773 	};
7774 	struct rte_flow_action actions_v[4] = { { 0 } };
7775 	struct rte_flow_action actions_m[4] = { { 0 } };
7776 	unsigned int idx = 0;
7777 
7778 	rte_memcpy(set_tag_v.src.value, &tag_value, sizeof(tag_value));
7779 	rte_memcpy(set_tag_m.src.value, &tag_mask, sizeof(tag_mask));
7780 	flow_hw_update_action_mask(&actions_v[idx], &actions_m[idx],
7781 				   RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
7782 				   &set_tag_v, &set_tag_m);
7783 	idx++;
7784 	if (MLX5_SH(dev)->config.dv_xmeta_en == MLX5_XMETA_MODE_META32_HWS) {
7785 		flow_hw_update_action_mask(&actions_v[idx], &actions_m[idx],
7786 					   RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
7787 					   &copy_metadata_v, &copy_metadata_m);
7788 		idx++;
7789 	}
7790 	flow_hw_update_action_mask(&actions_v[idx], &actions_m[idx], RTE_FLOW_ACTION_TYPE_JUMP,
7791 				   &jump_v, &jump_m);
7792 	idx++;
7793 	flow_hw_update_action_mask(&actions_v[idx], &actions_m[idx], RTE_FLOW_ACTION_TYPE_END,
7794 				   NULL, NULL);
7795 	idx++;
7796 	MLX5_ASSERT(idx <= RTE_DIM(actions_v));
7797 	return flow_hw_actions_template_create(dev, &attr, actions_v, actions_m, error);
7798 }
7799 
7800 static void
7801 flow_hw_cleanup_tx_repr_tagging(struct rte_eth_dev *dev)
7802 {
7803 	struct mlx5_priv *priv = dev->data->dev_private;
7804 
7805 	if (priv->hw_tx_repr_tagging_tbl) {
7806 		flow_hw_table_destroy(dev, priv->hw_tx_repr_tagging_tbl, NULL);
7807 		priv->hw_tx_repr_tagging_tbl = NULL;
7808 	}
7809 	if (priv->hw_tx_repr_tagging_at) {
7810 		flow_hw_actions_template_destroy(dev, priv->hw_tx_repr_tagging_at, NULL);
7811 		priv->hw_tx_repr_tagging_at = NULL;
7812 	}
7813 	if (priv->hw_tx_repr_tagging_pt) {
7814 		flow_hw_pattern_template_destroy(dev, priv->hw_tx_repr_tagging_pt, NULL);
7815 		priv->hw_tx_repr_tagging_pt = NULL;
7816 	}
7817 }
7818 
7819 /**
7820  * Setup templates and table used to create default Tx flow rules. These default rules
7821  * allow for matching Tx representor traffic using a vport tag placed in unused bits of
7822  * REG_C_0 register.
7823  *
7824  * @param dev
7825  *   Pointer to Ethernet device.
7826  * @param[out] error
7827  *   Pointer to error structure.
7828  *
7829  * @return
7830  *   0 on success, negative errno value otherwise.
7831  */
7832 static int
7833 flow_hw_setup_tx_repr_tagging(struct rte_eth_dev *dev, struct rte_flow_error *error)
7834 {
7835 	struct mlx5_priv *priv = dev->data->dev_private;
7836 	struct rte_flow_template_table_attr attr = {
7837 		.flow_attr = {
7838 			.group = 0,
7839 			.priority = MLX5_HW_LOWEST_PRIO_ROOT,
7840 			.egress = 1,
7841 		},
7842 		.nb_flows = MLX5_HW_CTRL_FLOW_NB_RULES,
7843 	};
7844 	struct mlx5_flow_template_table_cfg cfg = {
7845 		.attr = attr,
7846 		.external = false,
7847 	};
7848 
7849 	MLX5_ASSERT(priv->sh->config.dv_esw_en);
7850 	MLX5_ASSERT(priv->sh->config.repr_matching);
7851 	priv->hw_tx_repr_tagging_pt =
7852 		flow_hw_create_tx_repr_sq_pattern_tmpl(dev, error);
7853 	if (!priv->hw_tx_repr_tagging_pt)
7854 		goto err;
7855 	priv->hw_tx_repr_tagging_at =
7856 		flow_hw_create_tx_repr_tag_jump_acts_tmpl(dev, error);
7857 	if (!priv->hw_tx_repr_tagging_at)
7858 		goto err;
7859 	priv->hw_tx_repr_tagging_tbl = flow_hw_table_create(dev, &cfg,
7860 							    &priv->hw_tx_repr_tagging_pt, 1,
7861 							    &priv->hw_tx_repr_tagging_at, 1,
7862 							    error);
7863 	if (!priv->hw_tx_repr_tagging_tbl)
7864 		goto err;
7865 	return 0;
7866 err:
7867 	flow_hw_cleanup_tx_repr_tagging(dev);
7868 	return -rte_errno;
7869 }
7870 
7871 static uint32_t
7872 flow_hw_esw_mgr_regc_marker_mask(struct rte_eth_dev *dev)
7873 {
7874 	uint32_t mask = MLX5_SH(dev)->dv_regc0_mask;
7875 
7876 	/* Mask is verified during device initialization. */
7877 	MLX5_ASSERT(mask != 0);
7878 	return mask;
7879 }
7880 
7881 static uint32_t
7882 flow_hw_esw_mgr_regc_marker(struct rte_eth_dev *dev)
7883 {
7884 	uint32_t mask = MLX5_SH(dev)->dv_regc0_mask;
7885 
7886 	/* Mask is verified during device initialization. */
7887 	MLX5_ASSERT(mask != 0);
7888 	return RTE_BIT32(rte_bsf32(mask));
7889 }
7890 
7891 /**
7892  * Creates a flow pattern template used to match on E-Switch Manager.
7893  * This template is used to set up a table for SQ miss default flow.
7894  *
7895  * @param dev
7896  *   Pointer to Ethernet device.
7897  * @param error
7898  *   Pointer to error structure.
7899  *
7900  * @return
7901  *   Pointer to flow pattern template on success, NULL otherwise.
7902  */
7903 static struct rte_flow_pattern_template *
7904 flow_hw_create_ctrl_esw_mgr_pattern_template(struct rte_eth_dev *dev,
7905 					     struct rte_flow_error *error)
7906 {
7907 	struct rte_flow_pattern_template_attr attr = {
7908 		.relaxed_matching = 0,
7909 		.transfer = 1,
7910 	};
7911 	struct rte_flow_item_ethdev port_spec = {
7912 		.port_id = MLX5_REPRESENTED_PORT_ESW_MGR,
7913 	};
7914 	struct rte_flow_item_ethdev port_mask = {
7915 		.port_id = UINT16_MAX,
7916 	};
7917 	struct mlx5_rte_flow_item_sq sq_mask = {
7918 		.queue = UINT32_MAX,
7919 	};
7920 	struct rte_flow_item items[] = {
7921 		{
7922 			.type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,
7923 			.spec = &port_spec,
7924 			.mask = &port_mask,
7925 		},
7926 		{
7927 			.type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_SQ,
7928 			.mask = &sq_mask,
7929 		},
7930 		{
7931 			.type = RTE_FLOW_ITEM_TYPE_END,
7932 		},
7933 	};
7934 
7935 	return flow_hw_pattern_template_create(dev, &attr, items, error);
7936 }
7937 
7938 /**
7939  * Creates a flow pattern template used to match REG_C_0 and a SQ.
7940  * Matching on REG_C_0 is set up to match on all bits usable by user-space.
7941  * If traffic was sent from E-Switch Manager, then all usable bits will be set to 0,
7942  * except the least significant bit, which will be set to 1.
7943  *
7944  * This template is used to set up a table for SQ miss default flow.
7945  *
7946  * @param dev
7947  *   Pointer to Ethernet device.
7948  * @param error
7949  *   Pointer to error structure.
7950  *
7951  * @return
7952  *   Pointer to flow pattern template on success, NULL otherwise.
7953  */
7954 static struct rte_flow_pattern_template *
7955 flow_hw_create_ctrl_regc_sq_pattern_template(struct rte_eth_dev *dev,
7956 					     struct rte_flow_error *error)
7957 {
7958 	struct rte_flow_pattern_template_attr attr = {
7959 		.relaxed_matching = 0,
7960 		.transfer = 1,
7961 	};
7962 	struct rte_flow_item_tag reg_c0_spec = {
7963 		.index = (uint8_t)REG_C_0,
7964 	};
7965 	struct rte_flow_item_tag reg_c0_mask = {
7966 		.index = 0xff,
7967 		.data = flow_hw_esw_mgr_regc_marker_mask(dev),
7968 	};
7969 	struct mlx5_rte_flow_item_sq queue_mask = {
7970 		.queue = UINT32_MAX,
7971 	};
7972 	struct rte_flow_item items[] = {
7973 		{
7974 			.type = (enum rte_flow_item_type)
7975 				MLX5_RTE_FLOW_ITEM_TYPE_TAG,
7976 			.spec = &reg_c0_spec,
7977 			.mask = &reg_c0_mask,
7978 		},
7979 		{
7980 			.type = (enum rte_flow_item_type)
7981 				MLX5_RTE_FLOW_ITEM_TYPE_SQ,
7982 			.mask = &queue_mask,
7983 		},
7984 		{
7985 			.type = RTE_FLOW_ITEM_TYPE_END,
7986 		},
7987 	};
7988 
7989 	return flow_hw_pattern_template_create(dev, &attr, items, error);
7990 }
7991 
7992 /**
7993  * Creates a flow pattern template with unmasked represented port matching.
7994  * This template is used to set up a table for default transfer flows
7995  * directing packets to group 1.
7996  *
7997  * @param dev
7998  *   Pointer to Ethernet device.
7999  * @param error
8000  *   Pointer to error structure.
8001  *
8002  * @return
8003  *   Pointer to flow pattern template on success, NULL otherwise.
8004  */
8005 static struct rte_flow_pattern_template *
8006 flow_hw_create_ctrl_port_pattern_template(struct rte_eth_dev *dev,
8007 					  struct rte_flow_error *error)
8008 {
8009 	struct rte_flow_pattern_template_attr attr = {
8010 		.relaxed_matching = 0,
8011 		.transfer = 1,
8012 	};
8013 	struct rte_flow_item_ethdev port_mask = {
8014 		.port_id = UINT16_MAX,
8015 	};
8016 	struct rte_flow_item items[] = {
8017 		{
8018 			.type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,
8019 			.mask = &port_mask,
8020 		},
8021 		{
8022 			.type = RTE_FLOW_ITEM_TYPE_END,
8023 		},
8024 	};
8025 
8026 	return flow_hw_pattern_template_create(dev, &attr, items, error);
8027 }
8028 
8029 /*
8030  * Creating a flow pattern template with all ETH packets matching.
8031  * This template is used to set up a table for default Tx copy (Tx metadata
8032  * to REG_C_1) flow rule usage.
8033  *
8034  * @param dev
8035  *   Pointer to Ethernet device.
8036  * @param error
8037  *   Pointer to error structure.
8038  *
8039  * @return
8040  *   Pointer to flow pattern template on success, NULL otherwise.
8041  */
8042 static struct rte_flow_pattern_template *
8043 flow_hw_create_tx_default_mreg_copy_pattern_template(struct rte_eth_dev *dev,
8044 						     struct rte_flow_error *error)
8045 {
8046 	struct rte_flow_pattern_template_attr tx_pa_attr = {
8047 		.relaxed_matching = 0,
8048 		.egress = 1,
8049 	};
8050 	struct rte_flow_item_eth promisc = {
8051 		.hdr.dst_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
8052 		.hdr.src_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
8053 		.hdr.ether_type = 0,
8054 	};
8055 	struct rte_flow_item eth_all[] = {
8056 		[0] = {
8057 			.type = RTE_FLOW_ITEM_TYPE_ETH,
8058 			.spec = &promisc,
8059 			.mask = &promisc,
8060 		},
8061 		[1] = {
8062 			.type = RTE_FLOW_ITEM_TYPE_END,
8063 		},
8064 	};
8065 
8066 	return flow_hw_pattern_template_create(dev, &tx_pa_attr, eth_all, error);
8067 }
8068 
8069 /*
8070  * Creating a flow pattern template with all LACP packets matching, only for NIC
8071  * ingress domain.
8072  *
8073  * @param dev
8074  *   Pointer to Ethernet device.
8075  * @param error
8076  *   Pointer to error structure.
8077  *
8078  * @return
8079  *   Pointer to flow pattern template on success, NULL otherwise.
8080  */
8081 static struct rte_flow_pattern_template *
8082 flow_hw_create_lacp_rx_pattern_template(struct rte_eth_dev *dev, struct rte_flow_error *error)
8083 {
8084 	struct rte_flow_pattern_template_attr pa_attr = {
8085 		.relaxed_matching = 0,
8086 		.ingress = 1,
8087 	};
8088 	struct rte_flow_item_eth lacp_mask = {
8089 		.dst.addr_bytes = "\x00\x00\x00\x00\x00\x00",
8090 		.src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
8091 		.type = 0xFFFF,
8092 	};
8093 	struct rte_flow_item eth_all[] = {
8094 		[0] = {
8095 			.type = RTE_FLOW_ITEM_TYPE_ETH,
8096 			.mask = &lacp_mask,
8097 		},
8098 		[1] = {
8099 			.type = RTE_FLOW_ITEM_TYPE_END,
8100 		},
8101 	};
8102 	return flow_hw_pattern_template_create(dev, &pa_attr, eth_all, error);
8103 }
8104 
8105 /**
8106  * Creates a flow actions template with modify field action and masked jump action.
8107  * Modify field action sets the least significant bit of REG_C_0 (usable by user-space)
8108  * to 1, meaning that packet was originated from E-Switch Manager. Jump action
8109  * transfers steering to group 1.
8110  *
8111  * @param dev
8112  *   Pointer to Ethernet device.
8113  * @param error
8114  *   Pointer to error structure.
8115  *
8116  * @return
8117  *   Pointer to flow actions template on success, NULL otherwise.
8118  */
8119 static struct rte_flow_actions_template *
8120 flow_hw_create_ctrl_regc_jump_actions_template(struct rte_eth_dev *dev,
8121 					       struct rte_flow_error *error)
8122 {
8123 	uint32_t marker_mask = flow_hw_esw_mgr_regc_marker_mask(dev);
8124 	uint32_t marker_bits = flow_hw_esw_mgr_regc_marker(dev);
8125 	struct rte_flow_actions_template_attr attr = {
8126 		.transfer = 1,
8127 	};
8128 	struct rte_flow_action_modify_field set_reg_v = {
8129 		.operation = RTE_FLOW_MODIFY_SET,
8130 		.dst = {
8131 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
8132 			.tag_index = REG_C_0,
8133 		},
8134 		.src = {
8135 			.field = RTE_FLOW_FIELD_VALUE,
8136 		},
8137 		.width = rte_popcount32(marker_mask),
8138 	};
8139 	struct rte_flow_action_modify_field set_reg_m = {
8140 		.operation = RTE_FLOW_MODIFY_SET,
8141 		.dst = {
8142 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
8143 			.level = UINT8_MAX,
8144 			.tag_index = UINT8_MAX,
8145 			.offset = UINT32_MAX,
8146 		},
8147 		.src = {
8148 			.field = RTE_FLOW_FIELD_VALUE,
8149 		},
8150 		.width = UINT32_MAX,
8151 	};
8152 	struct rte_flow_action_jump jump_v = {
8153 		.group = MLX5_HW_LOWEST_USABLE_GROUP,
8154 	};
8155 	struct rte_flow_action_jump jump_m = {
8156 		.group = UINT32_MAX,
8157 	};
8158 	struct rte_flow_action actions_v[] = {
8159 		{
8160 			.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
8161 			.conf = &set_reg_v,
8162 		},
8163 		{
8164 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
8165 			.conf = &jump_v,
8166 		},
8167 		{
8168 			.type = RTE_FLOW_ACTION_TYPE_END,
8169 		}
8170 	};
8171 	struct rte_flow_action actions_m[] = {
8172 		{
8173 			.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
8174 			.conf = &set_reg_m,
8175 		},
8176 		{
8177 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
8178 			.conf = &jump_m,
8179 		},
8180 		{
8181 			.type = RTE_FLOW_ACTION_TYPE_END,
8182 		}
8183 	};
8184 
8185 	set_reg_v.dst.offset = rte_bsf32(marker_mask);
8186 	rte_memcpy(set_reg_v.src.value, &marker_bits, sizeof(marker_bits));
8187 	rte_memcpy(set_reg_m.src.value, &marker_mask, sizeof(marker_mask));
8188 	return flow_hw_actions_template_create(dev, &attr, actions_v, actions_m, error);
8189 }
8190 
8191 /**
8192  * Creates a flow actions template with an unmasked JUMP action. Flows
8193  * based on this template will perform a jump to some group. This template
8194  * is used to set up tables for control flows.
8195  *
8196  * @param dev
8197  *   Pointer to Ethernet device.
8198  * @param group
8199  *   Destination group for this action template.
8200  * @param error
8201  *   Pointer to error structure.
8202  *
8203  * @return
8204  *   Pointer to flow actions template on success, NULL otherwise.
8205  */
8206 static struct rte_flow_actions_template *
8207 flow_hw_create_ctrl_jump_actions_template(struct rte_eth_dev *dev,
8208 					  uint32_t group,
8209 					  struct rte_flow_error *error)
8210 {
8211 	struct rte_flow_actions_template_attr attr = {
8212 		.transfer = 1,
8213 	};
8214 	struct rte_flow_action_jump jump_v = {
8215 		.group = group,
8216 	};
8217 	struct rte_flow_action_jump jump_m = {
8218 		.group = UINT32_MAX,
8219 	};
8220 	struct rte_flow_action actions_v[] = {
8221 		{
8222 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
8223 			.conf = &jump_v,
8224 		},
8225 		{
8226 			.type = RTE_FLOW_ACTION_TYPE_END,
8227 		}
8228 	};
8229 	struct rte_flow_action actions_m[] = {
8230 		{
8231 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
8232 			.conf = &jump_m,
8233 		},
8234 		{
8235 			.type = RTE_FLOW_ACTION_TYPE_END,
8236 		}
8237 	};
8238 
8239 	return flow_hw_actions_template_create(dev, &attr, actions_v,
8240 					       actions_m, error);
8241 }
8242 
8243 /**
8244  * Creates a flow action template with a unmasked REPRESENTED_PORT action.
8245  * It is used to create control flow tables.
8246  *
8247  * @param dev
8248  *   Pointer to Ethernet device.
8249  * @param error
8250  *   Pointer to error structure.
8251  *
8252  * @return
8253  *   Pointer to flow action template on success, NULL otherwise.
8254  */
8255 static struct rte_flow_actions_template *
8256 flow_hw_create_ctrl_port_actions_template(struct rte_eth_dev *dev,
8257 					  struct rte_flow_error *error)
8258 {
8259 	struct rte_flow_actions_template_attr attr = {
8260 		.transfer = 1,
8261 	};
8262 	struct rte_flow_action_ethdev port_v = {
8263 		.port_id = 0,
8264 	};
8265 	struct rte_flow_action actions_v[] = {
8266 		{
8267 			.type = RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT,
8268 			.conf = &port_v,
8269 		},
8270 		{
8271 			.type = RTE_FLOW_ACTION_TYPE_END,
8272 		}
8273 	};
8274 	struct rte_flow_action_ethdev port_m = {
8275 		.port_id = 0,
8276 	};
8277 	struct rte_flow_action actions_m[] = {
8278 		{
8279 			.type = RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT,
8280 			.conf = &port_m,
8281 		},
8282 		{
8283 			.type = RTE_FLOW_ACTION_TYPE_END,
8284 		}
8285 	};
8286 
8287 	return flow_hw_actions_template_create(dev, &attr, actions_v, actions_m, error);
8288 }
8289 
8290 /*
8291  * Creating an actions template to use header modify action for register
8292  * copying. This template is used to set up a table for copy flow.
8293  *
8294  * @param dev
8295  *   Pointer to Ethernet device.
8296  * @param error
8297  *   Pointer to error structure.
8298  *
8299  * @return
8300  *   Pointer to flow actions template on success, NULL otherwise.
8301  */
8302 static struct rte_flow_actions_template *
8303 flow_hw_create_tx_default_mreg_copy_actions_template(struct rte_eth_dev *dev,
8304 						     struct rte_flow_error *error)
8305 {
8306 	struct rte_flow_actions_template_attr tx_act_attr = {
8307 		.egress = 1,
8308 	};
8309 	const struct rte_flow_action_modify_field mreg_action = {
8310 		.operation = RTE_FLOW_MODIFY_SET,
8311 		.dst = {
8312 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
8313 			.tag_index = REG_C_1,
8314 		},
8315 		.src = {
8316 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
8317 			.tag_index = REG_A,
8318 		},
8319 		.width = 32,
8320 	};
8321 	const struct rte_flow_action_modify_field mreg_mask = {
8322 		.operation = RTE_FLOW_MODIFY_SET,
8323 		.dst = {
8324 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
8325 			.level = UINT8_MAX,
8326 			.tag_index = UINT8_MAX,
8327 			.offset = UINT32_MAX,
8328 		},
8329 		.src = {
8330 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
8331 			.level = UINT8_MAX,
8332 			.tag_index = UINT8_MAX,
8333 			.offset = UINT32_MAX,
8334 		},
8335 		.width = UINT32_MAX,
8336 	};
8337 	const struct rte_flow_action_jump jump_action = {
8338 		.group = 1,
8339 	};
8340 	const struct rte_flow_action_jump jump_mask = {
8341 		.group = UINT32_MAX,
8342 	};
8343 	const struct rte_flow_action actions[] = {
8344 		[0] = {
8345 			.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
8346 			.conf = &mreg_action,
8347 		},
8348 		[1] = {
8349 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
8350 			.conf = &jump_action,
8351 		},
8352 		[2] = {
8353 			.type = RTE_FLOW_ACTION_TYPE_END,
8354 		},
8355 	};
8356 	const struct rte_flow_action masks[] = {
8357 		[0] = {
8358 			.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
8359 			.conf = &mreg_mask,
8360 		},
8361 		[1] = {
8362 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
8363 			.conf = &jump_mask,
8364 		},
8365 		[2] = {
8366 			.type = RTE_FLOW_ACTION_TYPE_END,
8367 		},
8368 	};
8369 
8370 	return flow_hw_actions_template_create(dev, &tx_act_attr, actions,
8371 					       masks, error);
8372 }
8373 
8374 /*
8375  * Creating an actions template to use default miss to re-route packets to the
8376  * kernel driver stack.
8377  * On root table, only DEFAULT_MISS action can be used.
8378  *
8379  * @param dev
8380  *   Pointer to Ethernet device.
8381  * @param error
8382  *   Pointer to error structure.
8383  *
8384  * @return
8385  *   Pointer to flow actions template on success, NULL otherwise.
8386  */
8387 static struct rte_flow_actions_template *
8388 flow_hw_create_lacp_rx_actions_template(struct rte_eth_dev *dev, struct rte_flow_error *error)
8389 {
8390 	struct rte_flow_actions_template_attr act_attr = {
8391 		.ingress = 1,
8392 	};
8393 	const struct rte_flow_action actions[] = {
8394 		[0] = {
8395 			.type = (enum rte_flow_action_type)
8396 				MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS,
8397 		},
8398 		[1] = {
8399 			.type = RTE_FLOW_ACTION_TYPE_END,
8400 		},
8401 	};
8402 
8403 	return flow_hw_actions_template_create(dev, &act_attr, actions, actions, error);
8404 }
8405 
8406 /**
8407  * Creates a control flow table used to transfer traffic from E-Switch Manager
8408  * and TX queues from group 0 to group 1.
8409  *
8410  * @param dev
8411  *   Pointer to Ethernet device.
8412  * @param it
8413  *   Pointer to flow pattern template.
8414  * @param at
8415  *   Pointer to flow actions template.
8416  * @param error
8417  *   Pointer to error structure.
8418  *
8419  * @return
8420  *   Pointer to flow table on success, NULL otherwise.
8421  */
8422 static struct rte_flow_template_table*
8423 flow_hw_create_ctrl_sq_miss_root_table(struct rte_eth_dev *dev,
8424 				       struct rte_flow_pattern_template *it,
8425 				       struct rte_flow_actions_template *at,
8426 				       struct rte_flow_error *error)
8427 {
8428 	struct rte_flow_template_table_attr attr = {
8429 		.flow_attr = {
8430 			.group = 0,
8431 			.priority = MLX5_HW_LOWEST_PRIO_ROOT,
8432 			.ingress = 0,
8433 			.egress = 0,
8434 			.transfer = 1,
8435 		},
8436 		.nb_flows = MLX5_HW_CTRL_FLOW_NB_RULES,
8437 	};
8438 	struct mlx5_flow_template_table_cfg cfg = {
8439 		.attr = attr,
8440 		.external = false,
8441 	};
8442 
8443 	return flow_hw_table_create(dev, &cfg, &it, 1, &at, 1, error);
8444 }
8445 
8446 
8447 /**
8448  * Creates a control flow table used to transfer traffic from E-Switch Manager
8449  * and TX queues from group 0 to group 1.
8450  *
8451  * @param dev
8452  *   Pointer to Ethernet device.
8453  * @param it
8454  *   Pointer to flow pattern template.
8455  * @param at
8456  *   Pointer to flow actions template.
8457  * @param error
8458  *   Pointer to error structure.
8459  *
8460  * @return
8461  *   Pointer to flow table on success, NULL otherwise.
8462  */
8463 static struct rte_flow_template_table*
8464 flow_hw_create_ctrl_sq_miss_table(struct rte_eth_dev *dev,
8465 				  struct rte_flow_pattern_template *it,
8466 				  struct rte_flow_actions_template *at,
8467 				  struct rte_flow_error *error)
8468 {
8469 	struct rte_flow_template_table_attr attr = {
8470 		.flow_attr = {
8471 			.group = 1,
8472 			.priority = MLX5_HW_LOWEST_PRIO_NON_ROOT,
8473 			.ingress = 0,
8474 			.egress = 0,
8475 			.transfer = 1,
8476 		},
8477 		.nb_flows = MLX5_HW_CTRL_FLOW_NB_RULES,
8478 	};
8479 	struct mlx5_flow_template_table_cfg cfg = {
8480 		.attr = attr,
8481 		.external = false,
8482 	};
8483 
8484 	return flow_hw_table_create(dev, &cfg, &it, 1, &at, 1, error);
8485 }
8486 
8487 /*
8488  * Creating the default Tx metadata copy table on NIC Tx group 0.
8489  *
8490  * @param dev
8491  *   Pointer to Ethernet device.
8492  * @param pt
8493  *   Pointer to flow pattern template.
8494  * @param at
8495  *   Pointer to flow actions template.
8496  * @param error
8497  *   Pointer to error structure.
8498  *
8499  * @return
8500  *   Pointer to flow table on success, NULL otherwise.
8501  */
8502 static struct rte_flow_template_table*
8503 flow_hw_create_tx_default_mreg_copy_table(struct rte_eth_dev *dev,
8504 					  struct rte_flow_pattern_template *pt,
8505 					  struct rte_flow_actions_template *at,
8506 					  struct rte_flow_error *error)
8507 {
8508 	struct rte_flow_template_table_attr tx_tbl_attr = {
8509 		.flow_attr = {
8510 			.group = 0, /* Root */
8511 			.priority = MLX5_HW_LOWEST_PRIO_ROOT,
8512 			.egress = 1,
8513 		},
8514 		.nb_flows = 1, /* One default flow rule for all. */
8515 	};
8516 	struct mlx5_flow_template_table_cfg tx_tbl_cfg = {
8517 		.attr = tx_tbl_attr,
8518 		.external = false,
8519 	};
8520 
8521 	return flow_hw_table_create(dev, &tx_tbl_cfg, &pt, 1, &at, 1, error);
8522 }
8523 
8524 /**
8525  * Creates a control flow table used to transfer traffic
8526  * from group 0 to group 1.
8527  *
8528  * @param dev
8529  *   Pointer to Ethernet device.
8530  * @param it
8531  *   Pointer to flow pattern template.
8532  * @param at
8533  *   Pointer to flow actions template.
8534  * @param error
8535  *   Pointer to error structure.
8536  *
8537  * @return
8538  *   Pointer to flow table on success, NULL otherwise.
8539  */
8540 static struct rte_flow_template_table *
8541 flow_hw_create_ctrl_jump_table(struct rte_eth_dev *dev,
8542 			       struct rte_flow_pattern_template *it,
8543 			       struct rte_flow_actions_template *at,
8544 			       struct rte_flow_error *error)
8545 {
8546 	struct rte_flow_template_table_attr attr = {
8547 		.flow_attr = {
8548 			.group = 0,
8549 			.priority = 0,
8550 			.ingress = 0,
8551 			.egress = 0,
8552 			.transfer = 1,
8553 		},
8554 		.nb_flows = MLX5_HW_CTRL_FLOW_NB_RULES,
8555 	};
8556 	struct mlx5_flow_template_table_cfg cfg = {
8557 		.attr = attr,
8558 		.external = false,
8559 	};
8560 
8561 	return flow_hw_table_create(dev, &cfg, &it, 1, &at, 1, error);
8562 }
8563 
8564 /*
8565  * Create a table on the root group to for the LACP traffic redirecting.
8566  *
8567  * @param dev
8568  *   Pointer to Ethernet device.
8569  * @param it
8570  *   Pointer to flow pattern template.
8571  * @param at
8572  *   Pointer to flow actions template.
8573  *
8574  * @return
8575  *   Pointer to flow table on success, NULL otherwise.
8576  */
8577 static struct rte_flow_template_table *
8578 flow_hw_create_lacp_rx_table(struct rte_eth_dev *dev,
8579 			     struct rte_flow_pattern_template *it,
8580 			     struct rte_flow_actions_template *at,
8581 			     struct rte_flow_error *error)
8582 {
8583 	struct rte_flow_template_table_attr attr = {
8584 		.flow_attr = {
8585 			.group = 0,
8586 			.priority = 0,
8587 			.ingress = 1,
8588 			.egress = 0,
8589 			.transfer = 0,
8590 		},
8591 		.nb_flows = 1,
8592 	};
8593 	struct mlx5_flow_template_table_cfg cfg = {
8594 		.attr = attr,
8595 		.external = false,
8596 	};
8597 
8598 	return flow_hw_table_create(dev, &cfg, &it, 1, &at, 1, error);
8599 }
8600 
8601 /**
8602  * Creates a set of flow tables used to create control flows used
8603  * when E-Switch is engaged.
8604  *
8605  * @param dev
8606  *   Pointer to Ethernet device.
8607  * @param error
8608  *   Pointer to error structure.
8609  *
8610  * @return
8611  *   0 on success, negative values otherwise
8612  */
8613 static __rte_unused int
8614 flow_hw_create_ctrl_tables(struct rte_eth_dev *dev, struct rte_flow_error *error)
8615 {
8616 	struct mlx5_priv *priv = dev->data->dev_private;
8617 	struct rte_flow_pattern_template *esw_mgr_items_tmpl = NULL;
8618 	struct rte_flow_pattern_template *regc_sq_items_tmpl = NULL;
8619 	struct rte_flow_pattern_template *port_items_tmpl = NULL;
8620 	struct rte_flow_pattern_template *tx_meta_items_tmpl = NULL;
8621 	struct rte_flow_pattern_template *lacp_rx_items_tmpl = NULL;
8622 	struct rte_flow_actions_template *regc_jump_actions_tmpl = NULL;
8623 	struct rte_flow_actions_template *port_actions_tmpl = NULL;
8624 	struct rte_flow_actions_template *jump_one_actions_tmpl = NULL;
8625 	struct rte_flow_actions_template *tx_meta_actions_tmpl = NULL;
8626 	struct rte_flow_actions_template *lacp_rx_actions_tmpl = NULL;
8627 	uint32_t xmeta = priv->sh->config.dv_xmeta_en;
8628 	uint32_t repr_matching = priv->sh->config.repr_matching;
8629 	int ret;
8630 
8631 	/* Create templates and table for default SQ miss flow rules - root table. */
8632 	esw_mgr_items_tmpl = flow_hw_create_ctrl_esw_mgr_pattern_template(dev, error);
8633 	if (!esw_mgr_items_tmpl) {
8634 		DRV_LOG(ERR, "port %u failed to create E-Switch Manager item"
8635 			" template for control flows", dev->data->port_id);
8636 		goto err;
8637 	}
8638 	regc_jump_actions_tmpl = flow_hw_create_ctrl_regc_jump_actions_template(dev, error);
8639 	if (!regc_jump_actions_tmpl) {
8640 		DRV_LOG(ERR, "port %u failed to create REG_C set and jump action template"
8641 			" for control flows", dev->data->port_id);
8642 		goto err;
8643 	}
8644 	MLX5_ASSERT(priv->hw_esw_sq_miss_root_tbl == NULL);
8645 	priv->hw_esw_sq_miss_root_tbl = flow_hw_create_ctrl_sq_miss_root_table
8646 			(dev, esw_mgr_items_tmpl, regc_jump_actions_tmpl, error);
8647 	if (!priv->hw_esw_sq_miss_root_tbl) {
8648 		DRV_LOG(ERR, "port %u failed to create table for default sq miss (root table)"
8649 			" for control flows", dev->data->port_id);
8650 		goto err;
8651 	}
8652 	/* Create templates and table for default SQ miss flow rules - non-root table. */
8653 	regc_sq_items_tmpl = flow_hw_create_ctrl_regc_sq_pattern_template(dev, error);
8654 	if (!regc_sq_items_tmpl) {
8655 		DRV_LOG(ERR, "port %u failed to create SQ item template for"
8656 			" control flows", dev->data->port_id);
8657 		goto err;
8658 	}
8659 	port_actions_tmpl = flow_hw_create_ctrl_port_actions_template(dev, error);
8660 	if (!port_actions_tmpl) {
8661 		DRV_LOG(ERR, "port %u failed to create port action template"
8662 			" for control flows", dev->data->port_id);
8663 		goto err;
8664 	}
8665 	MLX5_ASSERT(priv->hw_esw_sq_miss_tbl == NULL);
8666 	priv->hw_esw_sq_miss_tbl = flow_hw_create_ctrl_sq_miss_table(dev, regc_sq_items_tmpl,
8667 								     port_actions_tmpl, error);
8668 	if (!priv->hw_esw_sq_miss_tbl) {
8669 		DRV_LOG(ERR, "port %u failed to create table for default sq miss (non-root table)"
8670 			" for control flows", dev->data->port_id);
8671 		goto err;
8672 	}
8673 	/* Create templates and table for default FDB jump flow rules. */
8674 	port_items_tmpl = flow_hw_create_ctrl_port_pattern_template(dev, error);
8675 	if (!port_items_tmpl) {
8676 		DRV_LOG(ERR, "port %u failed to create SQ item template for"
8677 			" control flows", dev->data->port_id);
8678 		goto err;
8679 	}
8680 	jump_one_actions_tmpl = flow_hw_create_ctrl_jump_actions_template
8681 			(dev, MLX5_HW_LOWEST_USABLE_GROUP, error);
8682 	if (!jump_one_actions_tmpl) {
8683 		DRV_LOG(ERR, "port %u failed to create jump action template"
8684 			" for control flows", dev->data->port_id);
8685 		goto err;
8686 	}
8687 	MLX5_ASSERT(priv->hw_esw_zero_tbl == NULL);
8688 	priv->hw_esw_zero_tbl = flow_hw_create_ctrl_jump_table(dev, port_items_tmpl,
8689 							       jump_one_actions_tmpl,
8690 							       error);
8691 	if (!priv->hw_esw_zero_tbl) {
8692 		DRV_LOG(ERR, "port %u failed to create table for default jump to group 1"
8693 			" for control flows", dev->data->port_id);
8694 		goto err;
8695 	}
8696 	/* Create templates and table for default Tx metadata copy flow rule. */
8697 	if (!repr_matching && xmeta == MLX5_XMETA_MODE_META32_HWS) {
8698 		tx_meta_items_tmpl =
8699 			flow_hw_create_tx_default_mreg_copy_pattern_template(dev, error);
8700 		if (!tx_meta_items_tmpl) {
8701 			DRV_LOG(ERR, "port %u failed to Tx metadata copy pattern"
8702 				" template for control flows", dev->data->port_id);
8703 			goto err;
8704 		}
8705 		tx_meta_actions_tmpl =
8706 			flow_hw_create_tx_default_mreg_copy_actions_template(dev, error);
8707 		if (!tx_meta_actions_tmpl) {
8708 			DRV_LOG(ERR, "port %u failed to Tx metadata copy actions"
8709 				" template for control flows", dev->data->port_id);
8710 			goto err;
8711 		}
8712 		MLX5_ASSERT(priv->hw_tx_meta_cpy_tbl == NULL);
8713 		priv->hw_tx_meta_cpy_tbl =
8714 			flow_hw_create_tx_default_mreg_copy_table(dev, tx_meta_items_tmpl,
8715 								  tx_meta_actions_tmpl, error);
8716 		if (!priv->hw_tx_meta_cpy_tbl) {
8717 			DRV_LOG(ERR, "port %u failed to create table for default"
8718 				" Tx metadata copy flow rule", dev->data->port_id);
8719 			goto err;
8720 		}
8721 	}
8722 	/* Create LACP default miss table. */
8723 	if (!priv->sh->config.lacp_by_user && priv->pf_bond >= 0) {
8724 		lacp_rx_items_tmpl = flow_hw_create_lacp_rx_pattern_template(dev, error);
8725 		if (!lacp_rx_items_tmpl) {
8726 			DRV_LOG(ERR, "port %u failed to create pattern template"
8727 				" for LACP Rx traffic", dev->data->port_id);
8728 			goto err;
8729 		}
8730 		lacp_rx_actions_tmpl = flow_hw_create_lacp_rx_actions_template(dev, error);
8731 		if (!lacp_rx_actions_tmpl) {
8732 			DRV_LOG(ERR, "port %u failed to create actions template"
8733 				" for LACP Rx traffic", dev->data->port_id);
8734 			goto err;
8735 		}
8736 		priv->hw_lacp_rx_tbl = flow_hw_create_lacp_rx_table(dev, lacp_rx_items_tmpl,
8737 								    lacp_rx_actions_tmpl, error);
8738 		if (!priv->hw_lacp_rx_tbl) {
8739 			DRV_LOG(ERR, "port %u failed to create template table for"
8740 				" for LACP Rx traffic", dev->data->port_id);
8741 			goto err;
8742 		}
8743 	}
8744 	return 0;
8745 err:
8746 	/* Do not overwrite the rte_errno. */
8747 	ret = -rte_errno;
8748 	if (ret == 0)
8749 		ret = rte_flow_error_set(error, EINVAL,
8750 					 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8751 					 "Failed to create control tables.");
8752 	if (priv->hw_tx_meta_cpy_tbl) {
8753 		flow_hw_table_destroy(dev, priv->hw_tx_meta_cpy_tbl, NULL);
8754 		priv->hw_tx_meta_cpy_tbl = NULL;
8755 	}
8756 	if (priv->hw_esw_zero_tbl) {
8757 		flow_hw_table_destroy(dev, priv->hw_esw_zero_tbl, NULL);
8758 		priv->hw_esw_zero_tbl = NULL;
8759 	}
8760 	if (priv->hw_esw_sq_miss_tbl) {
8761 		flow_hw_table_destroy(dev, priv->hw_esw_sq_miss_tbl, NULL);
8762 		priv->hw_esw_sq_miss_tbl = NULL;
8763 	}
8764 	if (priv->hw_esw_sq_miss_root_tbl) {
8765 		flow_hw_table_destroy(dev, priv->hw_esw_sq_miss_root_tbl, NULL);
8766 		priv->hw_esw_sq_miss_root_tbl = NULL;
8767 	}
8768 	if (lacp_rx_actions_tmpl)
8769 		flow_hw_actions_template_destroy(dev, lacp_rx_actions_tmpl, NULL);
8770 	if (tx_meta_actions_tmpl)
8771 		flow_hw_actions_template_destroy(dev, tx_meta_actions_tmpl, NULL);
8772 	if (jump_one_actions_tmpl)
8773 		flow_hw_actions_template_destroy(dev, jump_one_actions_tmpl, NULL);
8774 	if (port_actions_tmpl)
8775 		flow_hw_actions_template_destroy(dev, port_actions_tmpl, NULL);
8776 	if (regc_jump_actions_tmpl)
8777 		flow_hw_actions_template_destroy(dev, regc_jump_actions_tmpl, NULL);
8778 	if (lacp_rx_items_tmpl)
8779 		flow_hw_pattern_template_destroy(dev, lacp_rx_items_tmpl, NULL);
8780 	if (tx_meta_items_tmpl)
8781 		flow_hw_pattern_template_destroy(dev, tx_meta_items_tmpl, NULL);
8782 	if (port_items_tmpl)
8783 		flow_hw_pattern_template_destroy(dev, port_items_tmpl, NULL);
8784 	if (regc_sq_items_tmpl)
8785 		flow_hw_pattern_template_destroy(dev, regc_sq_items_tmpl, NULL);
8786 	if (esw_mgr_items_tmpl)
8787 		flow_hw_pattern_template_destroy(dev, esw_mgr_items_tmpl, NULL);
8788 	return ret;
8789 }
8790 
8791 static void
8792 flow_hw_ct_mng_destroy(struct rte_eth_dev *dev,
8793 		       struct mlx5_aso_ct_pools_mng *ct_mng)
8794 {
8795 	struct mlx5_priv *priv = dev->data->dev_private;
8796 
8797 	mlx5_aso_ct_queue_uninit(priv->sh, ct_mng);
8798 	mlx5_free(ct_mng);
8799 }
8800 
8801 static void
8802 flow_hw_ct_pool_destroy(struct rte_eth_dev *dev __rte_unused,
8803 			struct mlx5_aso_ct_pool *pool)
8804 {
8805 	if (pool->dr_action)
8806 		mlx5dr_action_destroy(pool->dr_action);
8807 	if (pool->devx_obj)
8808 		claim_zero(mlx5_devx_cmd_destroy(pool->devx_obj));
8809 	if (pool->cts)
8810 		mlx5_ipool_destroy(pool->cts);
8811 	mlx5_free(pool);
8812 }
8813 
8814 static struct mlx5_aso_ct_pool *
8815 flow_hw_ct_pool_create(struct rte_eth_dev *dev,
8816 		       const struct rte_flow_port_attr *port_attr)
8817 {
8818 	struct mlx5_priv *priv = dev->data->dev_private;
8819 	struct mlx5_aso_ct_pool *pool;
8820 	struct mlx5_devx_obj *obj;
8821 	uint32_t nb_cts = rte_align32pow2(port_attr->nb_conn_tracks);
8822 	uint32_t log_obj_size = rte_log2_u32(nb_cts);
8823 	struct mlx5_indexed_pool_config cfg = {
8824 		.size = sizeof(struct mlx5_aso_ct_action),
8825 		.trunk_size = 1 << 12,
8826 		.per_core_cache = 1 << 13,
8827 		.need_lock = 1,
8828 		.release_mem_en = !!priv->sh->config.reclaim_mode,
8829 		.malloc = mlx5_malloc,
8830 		.free = mlx5_free,
8831 		.type = "mlx5_hw_ct_action",
8832 	};
8833 	int reg_id;
8834 	uint32_t flags;
8835 
8836 	if (port_attr->flags & RTE_FLOW_PORT_FLAG_SHARE_INDIRECT) {
8837 		DRV_LOG(ERR, "Connection tracking is not supported "
8838 			     "in cross vHCA sharing mode");
8839 		rte_errno = ENOTSUP;
8840 		return NULL;
8841 	}
8842 	pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
8843 	if (!pool) {
8844 		rte_errno = ENOMEM;
8845 		return NULL;
8846 	}
8847 	obj = mlx5_devx_cmd_create_conn_track_offload_obj(priv->sh->cdev->ctx,
8848 							  priv->sh->cdev->pdn,
8849 							  log_obj_size);
8850 	if (!obj) {
8851 		rte_errno = ENODATA;
8852 		DRV_LOG(ERR, "Failed to create conn_track_offload_obj using DevX.");
8853 		goto err;
8854 	}
8855 	pool->devx_obj = obj;
8856 	reg_id = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, NULL);
8857 	flags = MLX5DR_ACTION_FLAG_HWS_RX | MLX5DR_ACTION_FLAG_HWS_TX;
8858 	if (priv->sh->config.dv_esw_en && priv->master)
8859 		flags |= MLX5DR_ACTION_FLAG_HWS_FDB;
8860 	pool->dr_action = mlx5dr_action_create_aso_ct(priv->dr_ctx,
8861 						      (struct mlx5dr_devx_obj *)obj,
8862 						      reg_id - REG_C_0, flags);
8863 	if (!pool->dr_action)
8864 		goto err;
8865 	/*
8866 	 * No need for local cache if CT number is a small number. Since
8867 	 * flow insertion rate will be very limited in that case. Here let's
8868 	 * set the number to less than default trunk size 4K.
8869 	 */
8870 	if (nb_cts <= cfg.trunk_size) {
8871 		cfg.per_core_cache = 0;
8872 		cfg.trunk_size = nb_cts;
8873 	} else if (nb_cts <= MLX5_HW_IPOOL_SIZE_THRESHOLD) {
8874 		cfg.per_core_cache = MLX5_HW_IPOOL_CACHE_MIN;
8875 	}
8876 	pool->cts = mlx5_ipool_create(&cfg);
8877 	if (!pool->cts)
8878 		goto err;
8879 	pool->sq = priv->ct_mng->aso_sqs;
8880 	/* Assign the last extra ASO SQ as public SQ. */
8881 	pool->shared_sq = &priv->ct_mng->aso_sqs[priv->nb_queue - 1];
8882 	return pool;
8883 err:
8884 	flow_hw_ct_pool_destroy(dev, pool);
8885 	return NULL;
8886 }
8887 
8888 static void
8889 flow_hw_destroy_vlan(struct rte_eth_dev *dev)
8890 {
8891 	struct mlx5_priv *priv = dev->data->dev_private;
8892 	enum mlx5dr_table_type i;
8893 
8894 	for (i = MLX5DR_TABLE_TYPE_NIC_RX; i < MLX5DR_TABLE_TYPE_MAX; i++) {
8895 		if (priv->hw_pop_vlan[i]) {
8896 			mlx5dr_action_destroy(priv->hw_pop_vlan[i]);
8897 			priv->hw_pop_vlan[i] = NULL;
8898 		}
8899 		if (priv->hw_push_vlan[i]) {
8900 			mlx5dr_action_destroy(priv->hw_push_vlan[i]);
8901 			priv->hw_push_vlan[i] = NULL;
8902 		}
8903 	}
8904 }
8905 
8906 static int
8907 flow_hw_create_vlan(struct rte_eth_dev *dev)
8908 {
8909 	struct mlx5_priv *priv = dev->data->dev_private;
8910 	enum mlx5dr_table_type i;
8911 	const enum mlx5dr_action_flags flags[MLX5DR_TABLE_TYPE_MAX] = {
8912 		MLX5DR_ACTION_FLAG_HWS_RX,
8913 		MLX5DR_ACTION_FLAG_HWS_TX,
8914 		MLX5DR_ACTION_FLAG_HWS_FDB
8915 	};
8916 
8917 	/* rte_errno is set in the mlx5dr_action* functions. */
8918 	for (i = MLX5DR_TABLE_TYPE_NIC_RX; i <= MLX5DR_TABLE_TYPE_NIC_TX; i++) {
8919 		priv->hw_pop_vlan[i] =
8920 			mlx5dr_action_create_pop_vlan(priv->dr_ctx, flags[i]);
8921 		if (!priv->hw_pop_vlan[i])
8922 			return -rte_errno;
8923 		priv->hw_push_vlan[i] =
8924 			mlx5dr_action_create_push_vlan(priv->dr_ctx, flags[i]);
8925 		if (!priv->hw_pop_vlan[i])
8926 			return -rte_errno;
8927 	}
8928 	if (priv->sh->config.dv_esw_en && priv->master) {
8929 		priv->hw_pop_vlan[MLX5DR_TABLE_TYPE_FDB] =
8930 			mlx5dr_action_create_pop_vlan
8931 				(priv->dr_ctx, MLX5DR_ACTION_FLAG_HWS_FDB);
8932 		if (!priv->hw_pop_vlan[MLX5DR_TABLE_TYPE_FDB])
8933 			return -rte_errno;
8934 		priv->hw_push_vlan[MLX5DR_TABLE_TYPE_FDB] =
8935 			mlx5dr_action_create_push_vlan
8936 				(priv->dr_ctx, MLX5DR_ACTION_FLAG_HWS_FDB);
8937 		if (!priv->hw_pop_vlan[MLX5DR_TABLE_TYPE_FDB])
8938 			return -rte_errno;
8939 	}
8940 	return 0;
8941 }
8942 
8943 static void
8944 flow_hw_cleanup_ctrl_rx_tables(struct rte_eth_dev *dev)
8945 {
8946 	struct mlx5_priv *priv = dev->data->dev_private;
8947 	unsigned int i;
8948 	unsigned int j;
8949 
8950 	if (!priv->hw_ctrl_rx)
8951 		return;
8952 	for (i = 0; i < MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_MAX; ++i) {
8953 		for (j = 0; j < MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX; ++j) {
8954 			struct rte_flow_template_table *tbl = priv->hw_ctrl_rx->tables[i][j].tbl;
8955 			struct rte_flow_pattern_template *pt = priv->hw_ctrl_rx->tables[i][j].pt;
8956 
8957 			if (tbl)
8958 				claim_zero(flow_hw_table_destroy(dev, tbl, NULL));
8959 			if (pt)
8960 				claim_zero(flow_hw_pattern_template_destroy(dev, pt, NULL));
8961 		}
8962 	}
8963 	for (i = 0; i < MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX; ++i) {
8964 		struct rte_flow_actions_template *at = priv->hw_ctrl_rx->rss[i];
8965 
8966 		if (at)
8967 			claim_zero(flow_hw_actions_template_destroy(dev, at, NULL));
8968 	}
8969 	mlx5_free(priv->hw_ctrl_rx);
8970 	priv->hw_ctrl_rx = NULL;
8971 }
8972 
8973 static uint64_t
8974 flow_hw_ctrl_rx_rss_type_hash_types(const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
8975 {
8976 	switch (rss_type) {
8977 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_NON_IP:
8978 		return 0;
8979 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4:
8980 		return RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | RTE_ETH_RSS_NONFRAG_IPV4_OTHER;
8981 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_UDP:
8982 		return RTE_ETH_RSS_NONFRAG_IPV4_UDP;
8983 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_TCP:
8984 		return RTE_ETH_RSS_NONFRAG_IPV4_TCP;
8985 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6:
8986 		return RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_OTHER;
8987 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_UDP:
8988 		return RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_IPV6_UDP_EX;
8989 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_TCP:
8990 		return RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_IPV6_TCP_EX;
8991 	default:
8992 		/* Should not reach here. */
8993 		MLX5_ASSERT(false);
8994 		return 0;
8995 	}
8996 }
8997 
8998 static struct rte_flow_actions_template *
8999 flow_hw_create_ctrl_rx_rss_template(struct rte_eth_dev *dev,
9000 				    const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
9001 {
9002 	struct mlx5_priv *priv = dev->data->dev_private;
9003 	struct rte_flow_actions_template_attr attr = {
9004 		.ingress = 1,
9005 	};
9006 	uint16_t queue[RTE_MAX_QUEUES_PER_PORT];
9007 	struct rte_flow_action_rss rss_conf = {
9008 		.func = RTE_ETH_HASH_FUNCTION_DEFAULT,
9009 		.level = 0,
9010 		.types = 0,
9011 		.key_len = priv->rss_conf.rss_key_len,
9012 		.key = priv->rss_conf.rss_key,
9013 		.queue_num = priv->reta_idx_n,
9014 		.queue = queue,
9015 	};
9016 	struct rte_flow_action actions[] = {
9017 		{
9018 			.type = RTE_FLOW_ACTION_TYPE_RSS,
9019 			.conf = &rss_conf,
9020 		},
9021 		{
9022 			.type = RTE_FLOW_ACTION_TYPE_END,
9023 		}
9024 	};
9025 	struct rte_flow_action masks[] = {
9026 		{
9027 			.type = RTE_FLOW_ACTION_TYPE_RSS,
9028 			.conf = &rss_conf,
9029 		},
9030 		{
9031 			.type = RTE_FLOW_ACTION_TYPE_END,
9032 		}
9033 	};
9034 	struct rte_flow_actions_template *at;
9035 	struct rte_flow_error error;
9036 	unsigned int i;
9037 
9038 	MLX5_ASSERT(priv->reta_idx_n > 0 && priv->reta_idx);
9039 	/* Select proper RSS hash types and based on that configure the actions template. */
9040 	rss_conf.types = flow_hw_ctrl_rx_rss_type_hash_types(rss_type);
9041 	if (rss_conf.types) {
9042 		for (i = 0; i < priv->reta_idx_n; ++i)
9043 			queue[i] = (*priv->reta_idx)[i];
9044 	} else {
9045 		rss_conf.queue_num = 1;
9046 		queue[0] = (*priv->reta_idx)[0];
9047 	}
9048 	at = flow_hw_actions_template_create(dev, &attr, actions, masks, &error);
9049 	if (!at)
9050 		DRV_LOG(ERR,
9051 			"Failed to create ctrl flow actions template: rte_errno(%d), type(%d): %s",
9052 			rte_errno, error.type,
9053 			error.message ? error.message : "(no stated reason)");
9054 	return at;
9055 }
9056 
9057 static uint32_t ctrl_rx_rss_priority_map[MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX] = {
9058 	[MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_NON_IP] = MLX5_HW_CTRL_RX_PRIO_L2,
9059 	[MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4] = MLX5_HW_CTRL_RX_PRIO_L3,
9060 	[MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_UDP] = MLX5_HW_CTRL_RX_PRIO_L4,
9061 	[MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_TCP] = MLX5_HW_CTRL_RX_PRIO_L4,
9062 	[MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6] = MLX5_HW_CTRL_RX_PRIO_L3,
9063 	[MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_UDP] = MLX5_HW_CTRL_RX_PRIO_L4,
9064 	[MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_TCP] = MLX5_HW_CTRL_RX_PRIO_L4,
9065 };
9066 
9067 static uint32_t ctrl_rx_nb_flows_map[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_MAX] = {
9068 	[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL] = 1,
9069 	[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL_MCAST] = 1,
9070 	[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST] = 1,
9071 	[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST_VLAN] = MLX5_MAX_VLAN_IDS,
9072 	[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST] = 1,
9073 	[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST_VLAN] = MLX5_MAX_VLAN_IDS,
9074 	[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST] = 1,
9075 	[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST_VLAN] = MLX5_MAX_VLAN_IDS,
9076 	[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC] = MLX5_MAX_UC_MAC_ADDRESSES,
9077 	[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN] =
9078 			MLX5_MAX_UC_MAC_ADDRESSES * MLX5_MAX_VLAN_IDS,
9079 };
9080 
9081 static struct rte_flow_template_table_attr
9082 flow_hw_get_ctrl_rx_table_attr(enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type,
9083 			       const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
9084 {
9085 	return (struct rte_flow_template_table_attr){
9086 		.flow_attr = {
9087 			.group = 0,
9088 			.priority = ctrl_rx_rss_priority_map[rss_type],
9089 			.ingress = 1,
9090 		},
9091 		.nb_flows = ctrl_rx_nb_flows_map[eth_pattern_type],
9092 	};
9093 }
9094 
9095 static struct rte_flow_item
9096 flow_hw_get_ctrl_rx_eth_item(const enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type)
9097 {
9098 	struct rte_flow_item item = {
9099 		.type = RTE_FLOW_ITEM_TYPE_ETH,
9100 		.mask = NULL,
9101 	};
9102 
9103 	switch (eth_pattern_type) {
9104 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL:
9105 		item.mask = &ctrl_rx_eth_promisc_mask;
9106 		break;
9107 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL_MCAST:
9108 		item.mask = &ctrl_rx_eth_mcast_mask;
9109 		break;
9110 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST:
9111 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST_VLAN:
9112 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC:
9113 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN:
9114 		item.mask = &ctrl_rx_eth_dmac_mask;
9115 		break;
9116 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST:
9117 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST_VLAN:
9118 		item.mask = &ctrl_rx_eth_ipv4_mcast_mask;
9119 		break;
9120 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST:
9121 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST_VLAN:
9122 		item.mask = &ctrl_rx_eth_ipv6_mcast_mask;
9123 		break;
9124 	default:
9125 		/* Should not reach here - ETH mask must be present. */
9126 		item.type = RTE_FLOW_ITEM_TYPE_END;
9127 		MLX5_ASSERT(false);
9128 		break;
9129 	}
9130 	return item;
9131 }
9132 
9133 static struct rte_flow_item
9134 flow_hw_get_ctrl_rx_vlan_item(const enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type)
9135 {
9136 	struct rte_flow_item item = {
9137 		.type = RTE_FLOW_ITEM_TYPE_VOID,
9138 		.mask = NULL,
9139 	};
9140 
9141 	switch (eth_pattern_type) {
9142 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST_VLAN:
9143 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST_VLAN:
9144 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST_VLAN:
9145 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN:
9146 		item.type = RTE_FLOW_ITEM_TYPE_VLAN;
9147 		item.mask = &rte_flow_item_vlan_mask;
9148 		break;
9149 	default:
9150 		/* Nothing to update. */
9151 		break;
9152 	}
9153 	return item;
9154 }
9155 
9156 static struct rte_flow_item
9157 flow_hw_get_ctrl_rx_l3_item(const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
9158 {
9159 	struct rte_flow_item item = {
9160 		.type = RTE_FLOW_ITEM_TYPE_VOID,
9161 		.mask = NULL,
9162 	};
9163 
9164 	switch (rss_type) {
9165 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4:
9166 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_UDP:
9167 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_TCP:
9168 		item.type = RTE_FLOW_ITEM_TYPE_IPV4;
9169 		break;
9170 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6:
9171 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_UDP:
9172 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_TCP:
9173 		item.type = RTE_FLOW_ITEM_TYPE_IPV6;
9174 		break;
9175 	default:
9176 		/* Nothing to update. */
9177 		break;
9178 	}
9179 	return item;
9180 }
9181 
9182 static struct rte_flow_item
9183 flow_hw_get_ctrl_rx_l4_item(const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
9184 {
9185 	struct rte_flow_item item = {
9186 		.type = RTE_FLOW_ITEM_TYPE_VOID,
9187 		.mask = NULL,
9188 	};
9189 
9190 	switch (rss_type) {
9191 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_UDP:
9192 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_UDP:
9193 		item.type = RTE_FLOW_ITEM_TYPE_UDP;
9194 		break;
9195 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_TCP:
9196 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_TCP:
9197 		item.type = RTE_FLOW_ITEM_TYPE_TCP;
9198 		break;
9199 	default:
9200 		/* Nothing to update. */
9201 		break;
9202 	}
9203 	return item;
9204 }
9205 
9206 static struct rte_flow_pattern_template *
9207 flow_hw_create_ctrl_rx_pattern_template
9208 		(struct rte_eth_dev *dev,
9209 		 const enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type,
9210 		 const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
9211 {
9212 	const struct rte_flow_pattern_template_attr attr = {
9213 		.relaxed_matching = 0,
9214 		.ingress = 1,
9215 	};
9216 	struct rte_flow_item items[] = {
9217 		/* Matching patterns */
9218 		flow_hw_get_ctrl_rx_eth_item(eth_pattern_type),
9219 		flow_hw_get_ctrl_rx_vlan_item(eth_pattern_type),
9220 		flow_hw_get_ctrl_rx_l3_item(rss_type),
9221 		flow_hw_get_ctrl_rx_l4_item(rss_type),
9222 		/* Terminate pattern */
9223 		{ .type = RTE_FLOW_ITEM_TYPE_END }
9224 	};
9225 
9226 	return flow_hw_pattern_template_create(dev, &attr, items, NULL);
9227 }
9228 
9229 static int
9230 flow_hw_create_ctrl_rx_tables(struct rte_eth_dev *dev)
9231 {
9232 	struct mlx5_priv *priv = dev->data->dev_private;
9233 	unsigned int i;
9234 	unsigned int j;
9235 	int ret;
9236 
9237 	MLX5_ASSERT(!priv->hw_ctrl_rx);
9238 	priv->hw_ctrl_rx = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*priv->hw_ctrl_rx),
9239 				       RTE_CACHE_LINE_SIZE, rte_socket_id());
9240 	if (!priv->hw_ctrl_rx) {
9241 		DRV_LOG(ERR, "Failed to allocate memory for Rx control flow tables");
9242 		rte_errno = ENOMEM;
9243 		return -rte_errno;
9244 	}
9245 	/* Create all pattern template variants. */
9246 	for (i = 0; i < MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_MAX; ++i) {
9247 		enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type = i;
9248 
9249 		for (j = 0; j < MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX; ++j) {
9250 			const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type = j;
9251 			struct rte_flow_template_table_attr attr;
9252 			struct rte_flow_pattern_template *pt;
9253 
9254 			attr = flow_hw_get_ctrl_rx_table_attr(eth_pattern_type, rss_type);
9255 			pt = flow_hw_create_ctrl_rx_pattern_template(dev, eth_pattern_type,
9256 								     rss_type);
9257 			if (!pt)
9258 				goto err;
9259 			priv->hw_ctrl_rx->tables[i][j].attr = attr;
9260 			priv->hw_ctrl_rx->tables[i][j].pt = pt;
9261 		}
9262 	}
9263 	return 0;
9264 err:
9265 	ret = rte_errno;
9266 	flow_hw_cleanup_ctrl_rx_tables(dev);
9267 	rte_errno = ret;
9268 	return -ret;
9269 }
9270 
9271 void
9272 mlx5_flow_hw_cleanup_ctrl_rx_templates(struct rte_eth_dev *dev)
9273 {
9274 	struct mlx5_priv *priv = dev->data->dev_private;
9275 	struct mlx5_flow_hw_ctrl_rx *hw_ctrl_rx;
9276 	unsigned int i;
9277 	unsigned int j;
9278 
9279 	if (!priv->dr_ctx)
9280 		return;
9281 	if (!priv->hw_ctrl_rx)
9282 		return;
9283 	hw_ctrl_rx = priv->hw_ctrl_rx;
9284 	for (i = 0; i < MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_MAX; ++i) {
9285 		for (j = 0; j < MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX; ++j) {
9286 			struct mlx5_flow_hw_ctrl_rx_table *tmpls = &hw_ctrl_rx->tables[i][j];
9287 
9288 			if (tmpls->tbl) {
9289 				claim_zero(flow_hw_table_destroy(dev, tmpls->tbl, NULL));
9290 				tmpls->tbl = NULL;
9291 			}
9292 		}
9293 	}
9294 	for (j = 0; j < MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX; ++j) {
9295 		if (hw_ctrl_rx->rss[j]) {
9296 			claim_zero(flow_hw_actions_template_destroy(dev, hw_ctrl_rx->rss[j], NULL));
9297 			hw_ctrl_rx->rss[j] = NULL;
9298 		}
9299 	}
9300 }
9301 
9302 /**
9303  * Copy the provided HWS configuration to a newly allocated buffer.
9304  *
9305  * @param[in] port_attr
9306  *   Port configuration attributes.
9307  * @param[in] nb_queue
9308  *   Number of queue.
9309  * @param[in] queue_attr
9310  *   Array that holds attributes for each flow queue.
9311  *
9312  * @return
9313  *   Pointer to copied HWS configuration is returned on success.
9314  *   Otherwise, NULL is returned and rte_errno is set.
9315  */
9316 static struct mlx5_flow_hw_attr *
9317 flow_hw_alloc_copy_config(const struct rte_flow_port_attr *port_attr,
9318 			  const uint16_t nb_queue,
9319 			  const struct rte_flow_queue_attr *queue_attr[],
9320 			  struct rte_flow_error *error)
9321 {
9322 	struct mlx5_flow_hw_attr *hw_attr;
9323 	size_t hw_attr_size;
9324 	unsigned int i;
9325 
9326 	hw_attr_size = sizeof(*hw_attr) + nb_queue * sizeof(*hw_attr->queue_attr);
9327 	hw_attr = mlx5_malloc(MLX5_MEM_ZERO, hw_attr_size, 0, SOCKET_ID_ANY);
9328 	if (!hw_attr) {
9329 		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9330 				   "Not enough memory to store configuration");
9331 		return NULL;
9332 	}
9333 	memcpy(&hw_attr->port_attr, port_attr, sizeof(*port_attr));
9334 	hw_attr->nb_queue = nb_queue;
9335 	/* Queue attributes are placed after the mlx5_flow_hw_attr. */
9336 	hw_attr->queue_attr = (struct rte_flow_queue_attr *)(hw_attr + 1);
9337 	for (i = 0; i < nb_queue; ++i)
9338 		memcpy(&hw_attr->queue_attr[i], queue_attr[i], sizeof(hw_attr->queue_attr[i]));
9339 	return hw_attr;
9340 }
9341 
9342 /**
9343  * Compares the preserved HWS configuration with the provided one.
9344  *
9345  * @param[in] hw_attr
9346  *   Pointer to preserved HWS configuration.
9347  * @param[in] new_pa
9348  *   Port configuration attributes to compare.
9349  * @param[in] new_nbq
9350  *   Number of queues to compare.
9351  * @param[in] new_qa
9352  *   Array that holds attributes for each flow queue.
9353  *
9354  * @return
9355  *   True if configurations are the same, false otherwise.
9356  */
9357 static bool
9358 flow_hw_compare_config(const struct mlx5_flow_hw_attr *hw_attr,
9359 		       const struct rte_flow_port_attr *new_pa,
9360 		       const uint16_t new_nbq,
9361 		       const struct rte_flow_queue_attr *new_qa[])
9362 {
9363 	const struct rte_flow_port_attr *old_pa = &hw_attr->port_attr;
9364 	const uint16_t old_nbq = hw_attr->nb_queue;
9365 	const struct rte_flow_queue_attr *old_qa = hw_attr->queue_attr;
9366 	unsigned int i;
9367 
9368 	if (old_pa->nb_counters != new_pa->nb_counters ||
9369 	    old_pa->nb_aging_objects != new_pa->nb_aging_objects ||
9370 	    old_pa->nb_meters != new_pa->nb_meters ||
9371 	    old_pa->nb_conn_tracks != new_pa->nb_conn_tracks ||
9372 	    old_pa->flags != new_pa->flags)
9373 		return false;
9374 	if (old_nbq != new_nbq)
9375 		return false;
9376 	for (i = 0; i < old_nbq; ++i)
9377 		if (old_qa[i].size != new_qa[i]->size)
9378 			return false;
9379 	return true;
9380 }
9381 
9382 /**
9383  * Configure port HWS resources.
9384  *
9385  * @param[in] dev
9386  *   Pointer to the rte_eth_dev structure.
9387  * @param[in] port_attr
9388  *   Port configuration attributes.
9389  * @param[in] nb_queue
9390  *   Number of queue.
9391  * @param[in] queue_attr
9392  *   Array that holds attributes for each flow queue.
9393  * @param[out] error
9394  *   Pointer to error structure.
9395  *
9396  * @return
9397  *   0 on success, a negative errno value otherwise and rte_errno is set.
9398  */
9399 static int
9400 flow_hw_configure(struct rte_eth_dev *dev,
9401 		  const struct rte_flow_port_attr *port_attr,
9402 		  uint16_t nb_queue,
9403 		  const struct rte_flow_queue_attr *queue_attr[],
9404 		  struct rte_flow_error *error)
9405 {
9406 	struct mlx5_priv *priv = dev->data->dev_private;
9407 	struct mlx5_priv *host_priv = NULL;
9408 	struct mlx5dr_context *dr_ctx = NULL;
9409 	struct mlx5dr_context_attr dr_ctx_attr = {0};
9410 	struct mlx5_hw_q *hw_q;
9411 	struct mlx5_hw_q_job *job = NULL;
9412 	uint32_t mem_size, i, j;
9413 	struct mlx5_indexed_pool_config cfg = {
9414 		.size = sizeof(struct mlx5_action_construct_data),
9415 		.trunk_size = 4096,
9416 		.need_lock = 1,
9417 		.release_mem_en = !!priv->sh->config.reclaim_mode,
9418 		.malloc = mlx5_malloc,
9419 		.free = mlx5_free,
9420 		.type = "mlx5_hw_action_construct_data",
9421 	};
9422 	/*
9423 	 * Adds one queue to be used by PMD.
9424 	 * The last queue will be used by the PMD.
9425 	 */
9426 	uint16_t nb_q_updated = 0;
9427 	struct rte_flow_queue_attr **_queue_attr = NULL;
9428 	struct rte_flow_queue_attr ctrl_queue_attr = {0};
9429 	bool is_proxy = !!(priv->sh->config.dv_esw_en && priv->master);
9430 	int ret = 0;
9431 	uint32_t action_flags;
9432 
9433 	if (!port_attr || !nb_queue || !queue_attr) {
9434 		rte_errno = EINVAL;
9435 		goto err;
9436 	}
9437 	/*
9438 	 * Calling rte_flow_configure() again is allowed if and only if
9439 	 * provided configuration matches the initially provided one.
9440 	 */
9441 	if (priv->dr_ctx) {
9442 		MLX5_ASSERT(priv->hw_attr != NULL);
9443 		for (i = 0; i < priv->nb_queue; i++) {
9444 			hw_q = &priv->hw_q[i];
9445 			/* Make sure all queues are empty. */
9446 			if (hw_q->size != hw_q->job_idx) {
9447 				rte_errno = EBUSY;
9448 				goto err;
9449 			}
9450 		}
9451 		if (flow_hw_compare_config(priv->hw_attr, port_attr, nb_queue, queue_attr))
9452 			return 0;
9453 		else
9454 			return rte_flow_error_set(error, ENOTSUP,
9455 						  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9456 						  "Changing HWS configuration attributes "
9457 						  "is not supported");
9458 	}
9459 	priv->hw_attr = flow_hw_alloc_copy_config(port_attr, nb_queue, queue_attr, error);
9460 	if (!priv->hw_attr) {
9461 		ret = -rte_errno;
9462 		goto err;
9463 	}
9464 	ctrl_queue_attr.size = queue_attr[0]->size;
9465 	nb_q_updated = nb_queue + 1;
9466 	_queue_attr = mlx5_malloc(MLX5_MEM_ZERO,
9467 				  nb_q_updated *
9468 				  sizeof(struct rte_flow_queue_attr *),
9469 				  64, SOCKET_ID_ANY);
9470 	if (!_queue_attr) {
9471 		rte_errno = ENOMEM;
9472 		goto err;
9473 	}
9474 
9475 	memcpy(_queue_attr, queue_attr, sizeof(void *) * nb_queue);
9476 	_queue_attr[nb_queue] = &ctrl_queue_attr;
9477 	priv->acts_ipool = mlx5_ipool_create(&cfg);
9478 	if (!priv->acts_ipool)
9479 		goto err;
9480 	/* Allocate the queue job descriptor LIFO. */
9481 	mem_size = sizeof(priv->hw_q[0]) * nb_q_updated;
9482 	for (i = 0; i < nb_q_updated; i++) {
9483 		/*
9484 		 * Check if the queues' size are all the same as the
9485 		 * limitation from HWS layer.
9486 		 */
9487 		if (_queue_attr[i]->size != _queue_attr[0]->size) {
9488 			rte_errno = EINVAL;
9489 			goto err;
9490 		}
9491 		mem_size += (sizeof(struct mlx5_hw_q_job *) +
9492 			    sizeof(struct mlx5_hw_q_job) +
9493 			    sizeof(uint8_t) * MLX5_ENCAP_MAX_LEN +
9494 			    sizeof(uint8_t) * MLX5_PUSH_MAX_LEN +
9495 			    sizeof(struct mlx5_modification_cmd) *
9496 			    MLX5_MHDR_MAX_CMD +
9497 			    sizeof(struct rte_flow_item) *
9498 			    MLX5_HW_MAX_ITEMS +
9499 				sizeof(struct rte_flow_hw)) *
9500 			    _queue_attr[i]->size;
9501 	}
9502 	priv->hw_q = mlx5_malloc(MLX5_MEM_ZERO, mem_size,
9503 				 64, SOCKET_ID_ANY);
9504 	if (!priv->hw_q) {
9505 		rte_errno = ENOMEM;
9506 		goto err;
9507 	}
9508 	for (i = 0; i < nb_q_updated; i++) {
9509 		char mz_name[RTE_MEMZONE_NAMESIZE];
9510 		uint8_t *encap = NULL, *push = NULL;
9511 		struct mlx5_modification_cmd *mhdr_cmd = NULL;
9512 		struct rte_flow_item *items = NULL;
9513 		struct rte_flow_hw *upd_flow = NULL;
9514 
9515 		priv->hw_q[i].job_idx = _queue_attr[i]->size;
9516 		priv->hw_q[i].size = _queue_attr[i]->size;
9517 		if (i == 0)
9518 			priv->hw_q[i].job = (struct mlx5_hw_q_job **)
9519 					    &priv->hw_q[nb_q_updated];
9520 		else
9521 			priv->hw_q[i].job = (struct mlx5_hw_q_job **)
9522 				&job[_queue_attr[i - 1]->size - 1].upd_flow[1];
9523 		job = (struct mlx5_hw_q_job *)
9524 		      &priv->hw_q[i].job[_queue_attr[i]->size];
9525 		mhdr_cmd = (struct mlx5_modification_cmd *)
9526 			   &job[_queue_attr[i]->size];
9527 		encap = (uint8_t *)
9528 			 &mhdr_cmd[_queue_attr[i]->size * MLX5_MHDR_MAX_CMD];
9529 		push = (uint8_t *)
9530 			 &encap[_queue_attr[i]->size * MLX5_ENCAP_MAX_LEN];
9531 		items = (struct rte_flow_item *)
9532 			 &push[_queue_attr[i]->size * MLX5_PUSH_MAX_LEN];
9533 		upd_flow = (struct rte_flow_hw *)
9534 			&items[_queue_attr[i]->size * MLX5_HW_MAX_ITEMS];
9535 		for (j = 0; j < _queue_attr[i]->size; j++) {
9536 			job[j].mhdr_cmd = &mhdr_cmd[j * MLX5_MHDR_MAX_CMD];
9537 			job[j].encap_data = &encap[j * MLX5_ENCAP_MAX_LEN];
9538 			job[j].push_data = &push[j * MLX5_PUSH_MAX_LEN];
9539 			job[j].items = &items[j * MLX5_HW_MAX_ITEMS];
9540 			job[j].upd_flow = &upd_flow[j];
9541 			priv->hw_q[i].job[j] = &job[j];
9542 		}
9543 		snprintf(mz_name, sizeof(mz_name), "port_%u_indir_act_cq_%u",
9544 			 dev->data->port_id, i);
9545 		priv->hw_q[i].indir_cq = rte_ring_create(mz_name,
9546 				_queue_attr[i]->size, SOCKET_ID_ANY,
9547 				RING_F_SP_ENQ | RING_F_SC_DEQ |
9548 				RING_F_EXACT_SZ);
9549 		if (!priv->hw_q[i].indir_cq)
9550 			goto err;
9551 		snprintf(mz_name, sizeof(mz_name), "port_%u_indir_act_iq_%u",
9552 			 dev->data->port_id, i);
9553 		priv->hw_q[i].indir_iq = rte_ring_create(mz_name,
9554 				_queue_attr[i]->size, SOCKET_ID_ANY,
9555 				RING_F_SP_ENQ | RING_F_SC_DEQ |
9556 				RING_F_EXACT_SZ);
9557 		if (!priv->hw_q[i].indir_iq)
9558 			goto err;
9559 	}
9560 	dr_ctx_attr.pd = priv->sh->cdev->pd;
9561 	dr_ctx_attr.queues = nb_q_updated;
9562 	/* Queue size should all be the same. Take the first one. */
9563 	dr_ctx_attr.queue_size = _queue_attr[0]->size;
9564 	if (port_attr->flags & RTE_FLOW_PORT_FLAG_SHARE_INDIRECT) {
9565 		struct rte_eth_dev *host_dev = NULL;
9566 		uint16_t port_id;
9567 
9568 		MLX5_ASSERT(rte_eth_dev_is_valid_port(port_attr->host_port_id));
9569 		if (is_proxy) {
9570 			DRV_LOG(ERR, "cross vHCA shared mode not supported "
9571 				"for E-Switch confgiurations");
9572 			rte_errno = ENOTSUP;
9573 			goto err;
9574 		}
9575 		MLX5_ETH_FOREACH_DEV(port_id, dev->device) {
9576 			if (port_id == port_attr->host_port_id) {
9577 				host_dev = &rte_eth_devices[port_id];
9578 				break;
9579 			}
9580 		}
9581 		if (!host_dev || host_dev == dev ||
9582 		    !host_dev->data || !host_dev->data->dev_private) {
9583 			DRV_LOG(ERR, "Invalid cross vHCA host port %u",
9584 				port_attr->host_port_id);
9585 			rte_errno = EINVAL;
9586 			goto err;
9587 		}
9588 		host_priv = host_dev->data->dev_private;
9589 		if (host_priv->sh->cdev->ctx == priv->sh->cdev->ctx) {
9590 			DRV_LOG(ERR, "Sibling ports %u and %u do not "
9591 				     "require cross vHCA sharing mode",
9592 				dev->data->port_id, port_attr->host_port_id);
9593 			rte_errno = EINVAL;
9594 			goto err;
9595 		}
9596 		if (host_priv->shared_host) {
9597 			DRV_LOG(ERR, "Host port %u is not the sharing base",
9598 				port_attr->host_port_id);
9599 			rte_errno = EINVAL;
9600 			goto err;
9601 		}
9602 		if (port_attr->nb_counters ||
9603 		    port_attr->nb_aging_objects ||
9604 		    port_attr->nb_meters ||
9605 		    port_attr->nb_conn_tracks) {
9606 			DRV_LOG(ERR,
9607 				"Object numbers on guest port must be zeros");
9608 			rte_errno = EINVAL;
9609 			goto err;
9610 		}
9611 		dr_ctx_attr.shared_ibv_ctx = host_priv->sh->cdev->ctx;
9612 		priv->shared_host = host_dev;
9613 		__atomic_fetch_add(&host_priv->shared_refcnt, 1, __ATOMIC_RELAXED);
9614 	}
9615 	dr_ctx = mlx5dr_context_open(priv->sh->cdev->ctx, &dr_ctx_attr);
9616 	/* rte_errno has been updated by HWS layer. */
9617 	if (!dr_ctx)
9618 		goto err;
9619 	priv->dr_ctx = dr_ctx;
9620 	priv->nb_queue = nb_q_updated;
9621 	rte_spinlock_init(&priv->hw_ctrl_lock);
9622 	LIST_INIT(&priv->hw_ctrl_flows);
9623 	LIST_INIT(&priv->hw_ext_ctrl_flows);
9624 	ret = flow_hw_create_ctrl_rx_tables(dev);
9625 	if (ret) {
9626 		rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9627 				   "Failed to set up Rx control flow templates");
9628 		goto err;
9629 	}
9630 	/* Initialize quotas */
9631 	if (port_attr->nb_quotas || (host_priv && host_priv->quota_ctx.devx_obj)) {
9632 		ret = mlx5_flow_quota_init(dev, port_attr->nb_quotas);
9633 		if (ret) {
9634 			rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9635 					   "Failed to initialize quota.");
9636 			goto err;
9637 		}
9638 	}
9639 	/* Initialize meter library*/
9640 	if (port_attr->nb_meters || (host_priv && host_priv->hws_mpool))
9641 		if (mlx5_flow_meter_init(dev, port_attr->nb_meters, 0, 0, nb_q_updated))
9642 			goto err;
9643 	/* Add global actions. */
9644 	for (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) {
9645 		uint32_t act_flags = 0;
9646 
9647 		act_flags = mlx5_hw_act_flag[i][0] | mlx5_hw_act_flag[i][1];
9648 		if (is_proxy)
9649 			act_flags |= mlx5_hw_act_flag[i][2];
9650 		priv->hw_drop[i] = mlx5dr_action_create_dest_drop(priv->dr_ctx, act_flags);
9651 		if (!priv->hw_drop[i])
9652 			goto err;
9653 		priv->hw_tag[i] = mlx5dr_action_create_tag
9654 			(priv->dr_ctx, mlx5_hw_act_flag[i][0]);
9655 		if (!priv->hw_tag[i])
9656 			goto err;
9657 	}
9658 	if (priv->sh->config.dv_esw_en && priv->sh->config.repr_matching) {
9659 		ret = flow_hw_setup_tx_repr_tagging(dev, error);
9660 		if (ret)
9661 			goto err;
9662 	}
9663 	/*
9664 	 * DEFAULT_MISS action have different behaviors in different domains.
9665 	 * In FDB, it will steering the packets to the E-switch manager.
9666 	 * In NIC Rx root, it will steering the packet to the kernel driver stack.
9667 	 * An action with all bits set in the flag can be created and the HWS
9668 	 * layer will translate it properly when being used in different rules.
9669 	 */
9670 	action_flags = MLX5DR_ACTION_FLAG_ROOT_RX | MLX5DR_ACTION_FLAG_HWS_RX |
9671 		       MLX5DR_ACTION_FLAG_ROOT_TX | MLX5DR_ACTION_FLAG_HWS_TX;
9672 	if (is_proxy)
9673 		action_flags |= (MLX5DR_ACTION_FLAG_ROOT_FDB | MLX5DR_ACTION_FLAG_HWS_FDB);
9674 	priv->hw_def_miss = mlx5dr_action_create_default_miss(priv->dr_ctx, action_flags);
9675 	if (!priv->hw_def_miss)
9676 		goto err;
9677 	if (is_proxy) {
9678 		ret = flow_hw_create_vport_actions(priv);
9679 		if (ret) {
9680 			rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9681 					   NULL, "Failed to create vport actions.");
9682 			goto err;
9683 		}
9684 		ret = flow_hw_create_ctrl_tables(dev, error);
9685 		if (ret)
9686 			goto err;
9687 	}
9688 	if (!priv->shared_host)
9689 		flow_hw_create_send_to_kernel_actions(priv);
9690 	if (port_attr->nb_conn_tracks || (host_priv && host_priv->hws_ctpool)) {
9691 		mem_size = sizeof(struct mlx5_aso_sq) * nb_q_updated +
9692 			   sizeof(*priv->ct_mng);
9693 		priv->ct_mng = mlx5_malloc(MLX5_MEM_ZERO, mem_size,
9694 					   RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
9695 		if (!priv->ct_mng)
9696 			goto err;
9697 		if (mlx5_aso_ct_queue_init(priv->sh, priv->ct_mng, nb_q_updated))
9698 			goto err;
9699 		priv->hws_ctpool = flow_hw_ct_pool_create(dev, port_attr);
9700 		if (!priv->hws_ctpool)
9701 			goto err;
9702 		priv->sh->ct_aso_en = 1;
9703 	}
9704 	if (port_attr->nb_counters || (host_priv && host_priv->hws_cpool)) {
9705 		priv->hws_cpool = mlx5_hws_cnt_pool_create(dev, port_attr,
9706 							   nb_queue);
9707 		if (priv->hws_cpool == NULL)
9708 			goto err;
9709 	}
9710 	if (port_attr->nb_aging_objects) {
9711 		if (port_attr->nb_counters == 0) {
9712 			/*
9713 			 * Aging management uses counter. Number counters
9714 			 * requesting should take into account a counter for
9715 			 * each flow rules containing AGE without counter.
9716 			 */
9717 			DRV_LOG(ERR, "Port %u AGE objects are requested (%u) "
9718 				"without counters requesting.",
9719 				dev->data->port_id,
9720 				port_attr->nb_aging_objects);
9721 			rte_errno = EINVAL;
9722 			goto err;
9723 		}
9724 		ret = mlx5_hws_age_pool_init(dev, port_attr, nb_queue);
9725 		if (ret < 0) {
9726 			rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9727 					   NULL, "Failed to init age pool.");
9728 			goto err;
9729 		}
9730 	}
9731 	ret = flow_hw_create_vlan(dev);
9732 	if (ret) {
9733 		rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9734 				   NULL, "Failed to VLAN actions.");
9735 		goto err;
9736 	}
9737 	if (_queue_attr)
9738 		mlx5_free(_queue_attr);
9739 	if (port_attr->flags & RTE_FLOW_PORT_FLAG_STRICT_QUEUE)
9740 		priv->hws_strict_queue = 1;
9741 	dev->flow_fp_ops = &mlx5_flow_hw_fp_ops;
9742 	return 0;
9743 err:
9744 	if (priv->hws_ctpool) {
9745 		flow_hw_ct_pool_destroy(dev, priv->hws_ctpool);
9746 		priv->hws_ctpool = NULL;
9747 	}
9748 	if (priv->ct_mng) {
9749 		flow_hw_ct_mng_destroy(dev, priv->ct_mng);
9750 		priv->ct_mng = NULL;
9751 	}
9752 	if (priv->hws_age_req)
9753 		mlx5_hws_age_pool_destroy(priv);
9754 	if (priv->hws_cpool) {
9755 		mlx5_hws_cnt_pool_destroy(priv->sh, priv->hws_cpool);
9756 		priv->hws_cpool = NULL;
9757 	}
9758 	mlx5_flow_quota_destroy(dev);
9759 	flow_hw_destroy_send_to_kernel_action(priv);
9760 	flow_hw_free_vport_actions(priv);
9761 	for (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) {
9762 		if (priv->hw_drop[i])
9763 			mlx5dr_action_destroy(priv->hw_drop[i]);
9764 		if (priv->hw_tag[i])
9765 			mlx5dr_action_destroy(priv->hw_tag[i]);
9766 	}
9767 	if (priv->hw_def_miss)
9768 		mlx5dr_action_destroy(priv->hw_def_miss);
9769 	flow_hw_destroy_vlan(dev);
9770 	if (dr_ctx)
9771 		claim_zero(mlx5dr_context_close(dr_ctx));
9772 	for (i = 0; i < nb_q_updated; i++) {
9773 		rte_ring_free(priv->hw_q[i].indir_iq);
9774 		rte_ring_free(priv->hw_q[i].indir_cq);
9775 	}
9776 	mlx5_free(priv->hw_q);
9777 	priv->hw_q = NULL;
9778 	if (priv->acts_ipool) {
9779 		mlx5_ipool_destroy(priv->acts_ipool);
9780 		priv->acts_ipool = NULL;
9781 	}
9782 	if (_queue_attr)
9783 		mlx5_free(_queue_attr);
9784 	if (priv->shared_host) {
9785 		__atomic_fetch_sub(&host_priv->shared_refcnt, 1, __ATOMIC_RELAXED);
9786 		priv->shared_host = NULL;
9787 	}
9788 	mlx5_free(priv->hw_attr);
9789 	priv->hw_attr = NULL;
9790 	/* Do not overwrite the internal errno information. */
9791 	if (ret)
9792 		return ret;
9793 	return rte_flow_error_set(error, rte_errno,
9794 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9795 				  "fail to configure port");
9796 }
9797 
9798 /**
9799  * Release HWS resources.
9800  *
9801  * @param[in] dev
9802  *   Pointer to the rte_eth_dev structure.
9803  */
9804 void
9805 flow_hw_resource_release(struct rte_eth_dev *dev)
9806 {
9807 	struct mlx5_priv *priv = dev->data->dev_private;
9808 	struct rte_flow_template_table *tbl;
9809 	struct rte_flow_pattern_template *it;
9810 	struct rte_flow_actions_template *at;
9811 	struct mlx5_flow_group *grp;
9812 	uint32_t i;
9813 
9814 	if (!priv->dr_ctx)
9815 		return;
9816 	dev->flow_fp_ops = &rte_flow_fp_default_ops;
9817 	flow_hw_rxq_flag_set(dev, false);
9818 	flow_hw_flush_all_ctrl_flows(dev);
9819 	flow_hw_cleanup_tx_repr_tagging(dev);
9820 	flow_hw_cleanup_ctrl_rx_tables(dev);
9821 	while (!LIST_EMPTY(&priv->flow_hw_grp)) {
9822 		grp = LIST_FIRST(&priv->flow_hw_grp);
9823 		flow_hw_group_unset_miss_group(dev, grp, NULL);
9824 	}
9825 	while (!LIST_EMPTY(&priv->flow_hw_tbl_ongo)) {
9826 		tbl = LIST_FIRST(&priv->flow_hw_tbl_ongo);
9827 		flow_hw_table_destroy(dev, tbl, NULL);
9828 	}
9829 	while (!LIST_EMPTY(&priv->flow_hw_tbl)) {
9830 		tbl = LIST_FIRST(&priv->flow_hw_tbl);
9831 		flow_hw_table_destroy(dev, tbl, NULL);
9832 	}
9833 	while (!LIST_EMPTY(&priv->flow_hw_itt)) {
9834 		it = LIST_FIRST(&priv->flow_hw_itt);
9835 		flow_hw_pattern_template_destroy(dev, it, NULL);
9836 	}
9837 	while (!LIST_EMPTY(&priv->flow_hw_at)) {
9838 		at = LIST_FIRST(&priv->flow_hw_at);
9839 		flow_hw_actions_template_destroy(dev, at, NULL);
9840 	}
9841 	for (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) {
9842 		if (priv->hw_drop[i])
9843 			mlx5dr_action_destroy(priv->hw_drop[i]);
9844 		if (priv->hw_tag[i])
9845 			mlx5dr_action_destroy(priv->hw_tag[i]);
9846 	}
9847 	if (priv->hw_def_miss)
9848 		mlx5dr_action_destroy(priv->hw_def_miss);
9849 	flow_hw_destroy_vlan(dev);
9850 	flow_hw_destroy_send_to_kernel_action(priv);
9851 	flow_hw_free_vport_actions(priv);
9852 	if (priv->acts_ipool) {
9853 		mlx5_ipool_destroy(priv->acts_ipool);
9854 		priv->acts_ipool = NULL;
9855 	}
9856 	if (priv->hws_age_req)
9857 		mlx5_hws_age_pool_destroy(priv);
9858 	if (priv->hws_cpool) {
9859 		mlx5_hws_cnt_pool_destroy(priv->sh, priv->hws_cpool);
9860 		priv->hws_cpool = NULL;
9861 	}
9862 	if (priv->hws_ctpool) {
9863 		flow_hw_ct_pool_destroy(dev, priv->hws_ctpool);
9864 		priv->hws_ctpool = NULL;
9865 	}
9866 	if (priv->ct_mng) {
9867 		flow_hw_ct_mng_destroy(dev, priv->ct_mng);
9868 		priv->ct_mng = NULL;
9869 	}
9870 	mlx5_flow_quota_destroy(dev);
9871 	for (i = 0; i < priv->nb_queue; i++) {
9872 		rte_ring_free(priv->hw_q[i].indir_iq);
9873 		rte_ring_free(priv->hw_q[i].indir_cq);
9874 	}
9875 	mlx5_free(priv->hw_q);
9876 	priv->hw_q = NULL;
9877 	claim_zero(mlx5dr_context_close(priv->dr_ctx));
9878 	if (priv->shared_host) {
9879 		struct mlx5_priv *host_priv = priv->shared_host->data->dev_private;
9880 		__atomic_fetch_sub(&host_priv->shared_refcnt, 1, __ATOMIC_RELAXED);
9881 		priv->shared_host = NULL;
9882 	}
9883 	priv->dr_ctx = NULL;
9884 	mlx5_free(priv->hw_attr);
9885 	priv->hw_attr = NULL;
9886 	priv->nb_queue = 0;
9887 }
9888 
9889 /* Sets vport tag and mask, for given port, used in HWS rules. */
9890 void
9891 flow_hw_set_port_info(struct rte_eth_dev *dev)
9892 {
9893 	struct mlx5_priv *priv = dev->data->dev_private;
9894 	uint16_t port_id = dev->data->port_id;
9895 	struct flow_hw_port_info *info;
9896 
9897 	MLX5_ASSERT(port_id < RTE_MAX_ETHPORTS);
9898 	info = &mlx5_flow_hw_port_infos[port_id];
9899 	info->regc_mask = priv->vport_meta_mask;
9900 	info->regc_value = priv->vport_meta_tag;
9901 	info->is_wire = mlx5_is_port_on_mpesw_device(priv) ? priv->mpesw_uplink : priv->master;
9902 }
9903 
9904 /* Clears vport tag and mask used for HWS rules. */
9905 void
9906 flow_hw_clear_port_info(struct rte_eth_dev *dev)
9907 {
9908 	uint16_t port_id = dev->data->port_id;
9909 	struct flow_hw_port_info *info;
9910 
9911 	MLX5_ASSERT(port_id < RTE_MAX_ETHPORTS);
9912 	info = &mlx5_flow_hw_port_infos[port_id];
9913 	info->regc_mask = 0;
9914 	info->regc_value = 0;
9915 	info->is_wire = 0;
9916 }
9917 
9918 static int
9919 flow_hw_conntrack_destroy(struct rte_eth_dev *dev __rte_unused,
9920 			  uint32_t idx,
9921 			  struct rte_flow_error *error)
9922 {
9923 	uint16_t owner = (uint16_t)MLX5_ACTION_CTX_CT_GET_OWNER(idx);
9924 	uint32_t ct_idx = MLX5_ACTION_CTX_CT_GET_IDX(idx);
9925 	struct rte_eth_dev *owndev = &rte_eth_devices[owner];
9926 	struct mlx5_priv *priv = owndev->data->dev_private;
9927 	struct mlx5_aso_ct_pool *pool = priv->hws_ctpool;
9928 	struct mlx5_aso_ct_action *ct;
9929 
9930 	ct = mlx5_ipool_get(pool->cts, ct_idx);
9931 	if (!ct) {
9932 		return rte_flow_error_set(error, EINVAL,
9933 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9934 				NULL,
9935 				"Invalid CT destruction index");
9936 	}
9937 	__atomic_store_n(&ct->state, ASO_CONNTRACK_FREE,
9938 				 __ATOMIC_RELAXED);
9939 	mlx5_ipool_free(pool->cts, ct_idx);
9940 	return 0;
9941 }
9942 
9943 static int
9944 flow_hw_conntrack_query(struct rte_eth_dev *dev, uint32_t queue, uint32_t idx,
9945 			struct rte_flow_action_conntrack *profile,
9946 			void *user_data, bool push,
9947 			struct rte_flow_error *error)
9948 {
9949 	struct mlx5_priv *priv = dev->data->dev_private;
9950 	struct mlx5_aso_ct_pool *pool = priv->hws_ctpool;
9951 	struct mlx5_aso_ct_action *ct;
9952 	uint16_t owner = (uint16_t)MLX5_ACTION_CTX_CT_GET_OWNER(idx);
9953 	uint32_t ct_idx;
9954 
9955 	if (owner != PORT_ID(priv))
9956 		return rte_flow_error_set(error, EACCES,
9957 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9958 				NULL,
9959 				"Can't query CT object owned by another port");
9960 	ct_idx = MLX5_ACTION_CTX_CT_GET_IDX(idx);
9961 	ct = mlx5_ipool_get(pool->cts, ct_idx);
9962 	if (!ct) {
9963 		return rte_flow_error_set(error, EINVAL,
9964 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9965 				NULL,
9966 				"Invalid CT query index");
9967 	}
9968 	profile->peer_port = ct->peer;
9969 	profile->is_original_dir = ct->is_original;
9970 	if (mlx5_aso_ct_query_by_wqe(priv->sh, queue, ct, profile, user_data, push))
9971 		return rte_flow_error_set(error, EIO,
9972 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9973 				NULL,
9974 				"Failed to query CT context");
9975 	return 0;
9976 }
9977 
9978 
9979 static int
9980 flow_hw_conntrack_update(struct rte_eth_dev *dev, uint32_t queue,
9981 			 const struct rte_flow_modify_conntrack *action_conf,
9982 			 uint32_t idx, void *user_data, bool push,
9983 			 struct rte_flow_error *error)
9984 {
9985 	struct mlx5_priv *priv = dev->data->dev_private;
9986 	struct mlx5_aso_ct_pool *pool = priv->hws_ctpool;
9987 	struct mlx5_aso_ct_action *ct;
9988 	const struct rte_flow_action_conntrack *new_prf;
9989 	uint16_t owner = (uint16_t)MLX5_ACTION_CTX_CT_GET_OWNER(idx);
9990 	uint32_t ct_idx;
9991 	int ret = 0;
9992 
9993 	if (PORT_ID(priv) != owner)
9994 		return rte_flow_error_set(error, EACCES,
9995 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9996 					  NULL,
9997 					  "Can't update CT object owned by another port");
9998 	ct_idx = MLX5_ACTION_CTX_CT_GET_IDX(idx);
9999 	ct = mlx5_ipool_get(pool->cts, ct_idx);
10000 	if (!ct) {
10001 		return rte_flow_error_set(error, EINVAL,
10002 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10003 				NULL,
10004 				"Invalid CT update index");
10005 	}
10006 	new_prf = &action_conf->new_ct;
10007 	if (action_conf->direction)
10008 		ct->is_original = !!new_prf->is_original_dir;
10009 	if (action_conf->state) {
10010 		/* Only validate the profile when it needs to be updated. */
10011 		ret = mlx5_validate_action_ct(dev, new_prf, error);
10012 		if (ret)
10013 			return ret;
10014 		ret = mlx5_aso_ct_update_by_wqe(priv->sh, queue, ct, new_prf,
10015 						user_data, push);
10016 		if (ret)
10017 			return rte_flow_error_set(error, EIO,
10018 					RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10019 					NULL,
10020 					"Failed to send CT context update WQE");
10021 		if (queue != MLX5_HW_INV_QUEUE)
10022 			return 0;
10023 		/* Block until ready or a failure in synchronous mode. */
10024 		ret = mlx5_aso_ct_available(priv->sh, queue, ct);
10025 		if (ret)
10026 			rte_flow_error_set(error, rte_errno,
10027 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10028 					   NULL,
10029 					   "Timeout to get the CT update");
10030 	}
10031 	return ret;
10032 }
10033 
10034 static struct rte_flow_action_handle *
10035 flow_hw_conntrack_create(struct rte_eth_dev *dev, uint32_t queue,
10036 			 const struct rte_flow_action_conntrack *pro,
10037 			 void *user_data, bool push,
10038 			 struct rte_flow_error *error)
10039 {
10040 	struct mlx5_priv *priv = dev->data->dev_private;
10041 	struct mlx5_aso_ct_pool *pool = priv->hws_ctpool;
10042 	struct mlx5_aso_ct_action *ct;
10043 	uint32_t ct_idx = 0;
10044 	int ret;
10045 	bool async = !!(queue != MLX5_HW_INV_QUEUE);
10046 
10047 	if (!pool) {
10048 		rte_flow_error_set(error, EINVAL,
10049 				   RTE_FLOW_ERROR_TYPE_ACTION, NULL,
10050 				   "CT is not enabled");
10051 		return 0;
10052 	}
10053 	ct = mlx5_ipool_zmalloc(pool->cts, &ct_idx);
10054 	if (!ct) {
10055 		rte_flow_error_set(error, rte_errno,
10056 				   RTE_FLOW_ERROR_TYPE_ACTION, NULL,
10057 				   "Failed to allocate CT object");
10058 		return 0;
10059 	}
10060 	ct->offset = ct_idx - 1;
10061 	ct->is_original = !!pro->is_original_dir;
10062 	ct->peer = pro->peer_port;
10063 	ct->pool = pool;
10064 	if (mlx5_aso_ct_update_by_wqe(priv->sh, queue, ct, pro, user_data, push)) {
10065 		mlx5_ipool_free(pool->cts, ct_idx);
10066 		rte_flow_error_set(error, EBUSY,
10067 				   RTE_FLOW_ERROR_TYPE_ACTION, NULL,
10068 				   "Failed to update CT");
10069 		return 0;
10070 	}
10071 	if (!async) {
10072 		ret = mlx5_aso_ct_available(priv->sh, queue, ct);
10073 		if (ret) {
10074 			mlx5_ipool_free(pool->cts, ct_idx);
10075 			rte_flow_error_set(error, rte_errno,
10076 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10077 					   NULL,
10078 					   "Timeout to get the CT update");
10079 			return 0;
10080 		}
10081 	}
10082 	return (struct rte_flow_action_handle *)(uintptr_t)
10083 		MLX5_ACTION_CTX_CT_GEN_IDX(PORT_ID(priv), ct_idx);
10084 }
10085 
10086 /**
10087  * Validate shared action.
10088  *
10089  * @param[in] dev
10090  *   Pointer to the rte_eth_dev structure.
10091  * @param[in] queue
10092  *   Which queue to be used.
10093  * @param[in] attr
10094  *   Operation attribute.
10095  * @param[in] conf
10096  *   Indirect action configuration.
10097  * @param[in] action
10098  *   rte_flow action detail.
10099  * @param[in] user_data
10100  *   Pointer to the user_data.
10101  * @param[out] error
10102  *   Pointer to error structure.
10103  *
10104  * @return
10105  *   0 on success, otherwise negative errno value.
10106  */
10107 static int
10108 flow_hw_action_handle_validate(struct rte_eth_dev *dev, uint32_t queue,
10109 			       const struct rte_flow_op_attr *attr,
10110 			       const struct rte_flow_indir_action_conf *conf,
10111 			       const struct rte_flow_action *action,
10112 			       void *user_data,
10113 			       struct rte_flow_error *error)
10114 {
10115 	struct mlx5_priv *priv = dev->data->dev_private;
10116 
10117 	RTE_SET_USED(attr);
10118 	RTE_SET_USED(queue);
10119 	RTE_SET_USED(user_data);
10120 	switch (action->type) {
10121 	case RTE_FLOW_ACTION_TYPE_AGE:
10122 		if (!priv->hws_age_req)
10123 			return rte_flow_error_set(error, EINVAL,
10124 						  RTE_FLOW_ERROR_TYPE_ACTION,
10125 						  NULL,
10126 						  "aging pool not initialized");
10127 		break;
10128 	case RTE_FLOW_ACTION_TYPE_COUNT:
10129 		if (!priv->hws_cpool)
10130 			return rte_flow_error_set(error, EINVAL,
10131 						  RTE_FLOW_ERROR_TYPE_ACTION,
10132 						  NULL,
10133 						  "counters pool not initialized");
10134 		break;
10135 	case RTE_FLOW_ACTION_TYPE_CONNTRACK:
10136 		if (priv->hws_ctpool == NULL)
10137 			return rte_flow_error_set(error, EINVAL,
10138 						  RTE_FLOW_ERROR_TYPE_ACTION,
10139 						  NULL,
10140 						  "CT pool not initialized");
10141 		return mlx5_validate_action_ct(dev, action->conf, error);
10142 	case RTE_FLOW_ACTION_TYPE_METER_MARK:
10143 		return flow_hw_validate_action_meter_mark(dev, action, error);
10144 	case RTE_FLOW_ACTION_TYPE_RSS:
10145 		return flow_dv_action_validate(dev, conf, action, error);
10146 	case RTE_FLOW_ACTION_TYPE_QUOTA:
10147 		return 0;
10148 	default:
10149 		return rte_flow_error_set(error, ENOTSUP,
10150 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
10151 					  "action type not supported");
10152 	}
10153 	return 0;
10154 }
10155 
10156 static __rte_always_inline bool
10157 flow_hw_action_push(const struct rte_flow_op_attr *attr)
10158 {
10159 	return attr ? !attr->postpone : true;
10160 }
10161 
10162 static __rte_always_inline struct mlx5_hw_q_job *
10163 flow_hw_action_job_init(struct mlx5_priv *priv, uint32_t queue,
10164 			const struct rte_flow_action_handle *handle,
10165 			void *user_data, void *query_data,
10166 			enum mlx5_hw_job_type type,
10167 			struct rte_flow_error *error)
10168 {
10169 	struct mlx5_hw_q_job *job;
10170 
10171 	MLX5_ASSERT(queue != MLX5_HW_INV_QUEUE);
10172 	job = flow_hw_job_get(priv, queue);
10173 	if (!job) {
10174 		rte_flow_error_set(error, ENOMEM,
10175 				   RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
10176 				   "Action destroy failed due to queue full.");
10177 		return NULL;
10178 	}
10179 	job->type = type;
10180 	job->action = handle;
10181 	job->user_data = user_data;
10182 	job->query.user = query_data;
10183 	return job;
10184 }
10185 
10186 static __rte_always_inline void
10187 flow_hw_action_finalize(struct rte_eth_dev *dev, uint32_t queue,
10188 			struct mlx5_hw_q_job *job,
10189 			bool push, bool aso, bool status)
10190 {
10191 	struct mlx5_priv *priv = dev->data->dev_private;
10192 
10193 	if (queue == MLX5_HW_INV_QUEUE)
10194 		queue = CTRL_QUEUE_ID(priv);
10195 	if (likely(status)) {
10196 		/* 1. add new job to a queue */
10197 		if (!aso)
10198 			rte_ring_enqueue(push ?
10199 					 priv->hw_q[queue].indir_cq :
10200 					 priv->hw_q[queue].indir_iq,
10201 					 job);
10202 		/* 2. send pending jobs */
10203 		if (push)
10204 			__flow_hw_push_action(dev, queue);
10205 	} else {
10206 		flow_hw_job_put(priv, job, queue);
10207 	}
10208 }
10209 
10210 /**
10211  * Create shared action.
10212  *
10213  * @param[in] dev
10214  *   Pointer to the rte_eth_dev structure.
10215  * @param[in] queue
10216  *   Which queue to be used.
10217  * @param[in] attr
10218  *   Operation attribute.
10219  * @param[in] conf
10220  *   Indirect action configuration.
10221  * @param[in] action
10222  *   rte_flow action detail.
10223  * @param[in] user_data
10224  *   Pointer to the user_data.
10225  * @param[out] error
10226  *   Pointer to error structure.
10227  *
10228  * @return
10229  *   Action handle on success, NULL otherwise and rte_errno is set.
10230  */
10231 static struct rte_flow_action_handle *
10232 flow_hw_action_handle_create(struct rte_eth_dev *dev, uint32_t queue,
10233 			     const struct rte_flow_op_attr *attr,
10234 			     const struct rte_flow_indir_action_conf *conf,
10235 			     const struct rte_flow_action *action,
10236 			     void *user_data,
10237 			     struct rte_flow_error *error)
10238 {
10239 	struct rte_flow_action_handle *handle = NULL;
10240 	struct mlx5_hw_q_job *job = NULL;
10241 	struct mlx5_priv *priv = dev->data->dev_private;
10242 	const struct rte_flow_action_age *age;
10243 	struct mlx5_aso_mtr *aso_mtr;
10244 	cnt_id_t cnt_id;
10245 	uint32_t mtr_id;
10246 	uint32_t age_idx;
10247 	bool push = flow_hw_action_push(attr);
10248 	bool aso = false;
10249 
10250 	if (attr) {
10251 		job = flow_hw_action_job_init(priv, queue, NULL, user_data,
10252 					      NULL, MLX5_HW_Q_JOB_TYPE_CREATE,
10253 					      error);
10254 		if (!job)
10255 			return NULL;
10256 	}
10257 	switch (action->type) {
10258 	case RTE_FLOW_ACTION_TYPE_AGE:
10259 		if (priv->hws_strict_queue) {
10260 			struct mlx5_age_info *info = GET_PORT_AGE_INFO(priv);
10261 
10262 			if (queue >= info->hw_q_age->nb_rings) {
10263 				rte_flow_error_set(error, EINVAL,
10264 						   RTE_FLOW_ERROR_TYPE_ACTION,
10265 						   NULL,
10266 						   "Invalid queue ID for indirect AGE.");
10267 				rte_errno = EINVAL;
10268 				return NULL;
10269 			}
10270 		}
10271 		age = action->conf;
10272 		age_idx = mlx5_hws_age_action_create(priv, queue, true, age,
10273 						     0, error);
10274 		if (age_idx == 0) {
10275 			rte_flow_error_set(error, ENODEV,
10276 					   RTE_FLOW_ERROR_TYPE_ACTION, NULL,
10277 					   "AGE are not configured!");
10278 		} else {
10279 			age_idx = (MLX5_INDIRECT_ACTION_TYPE_AGE <<
10280 				   MLX5_INDIRECT_ACTION_TYPE_OFFSET) | age_idx;
10281 			handle =
10282 			    (struct rte_flow_action_handle *)(uintptr_t)age_idx;
10283 		}
10284 		break;
10285 	case RTE_FLOW_ACTION_TYPE_COUNT:
10286 		if (mlx5_hws_cnt_shared_get(priv->hws_cpool, &cnt_id, 0))
10287 			rte_flow_error_set(error, ENODEV,
10288 					RTE_FLOW_ERROR_TYPE_ACTION,
10289 					NULL,
10290 					"counter are not configured!");
10291 		else
10292 			handle = (struct rte_flow_action_handle *)
10293 				 (uintptr_t)cnt_id;
10294 		break;
10295 	case RTE_FLOW_ACTION_TYPE_CONNTRACK:
10296 		aso = true;
10297 		handle = flow_hw_conntrack_create(dev, queue, action->conf, job,
10298 						  push, error);
10299 		break;
10300 	case RTE_FLOW_ACTION_TYPE_METER_MARK:
10301 		aso = true;
10302 		aso_mtr = flow_hw_meter_mark_alloc(dev, queue, action, job, push);
10303 		if (!aso_mtr)
10304 			break;
10305 		mtr_id = (MLX5_INDIRECT_ACTION_TYPE_METER_MARK <<
10306 			MLX5_INDIRECT_ACTION_TYPE_OFFSET) | (aso_mtr->fm.meter_id);
10307 		handle = (struct rte_flow_action_handle *)(uintptr_t)mtr_id;
10308 		break;
10309 	case RTE_FLOW_ACTION_TYPE_RSS:
10310 		handle = flow_dv_action_create(dev, conf, action, error);
10311 		break;
10312 	case RTE_FLOW_ACTION_TYPE_QUOTA:
10313 		aso = true;
10314 		handle = mlx5_quota_alloc(dev, queue, action->conf,
10315 					  job, push, error);
10316 		break;
10317 	default:
10318 		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
10319 				   NULL, "action type not supported");
10320 		break;
10321 	}
10322 	if (job) {
10323 		job->action = handle;
10324 		job->indirect_type = MLX5_HW_INDIRECT_TYPE_LEGACY;
10325 		flow_hw_action_finalize(dev, queue, job, push, aso,
10326 					handle != NULL);
10327 	}
10328 	return handle;
10329 }
10330 
10331 static int
10332 mlx5_flow_update_meter_mark(struct rte_eth_dev *dev, uint32_t queue,
10333 			    const struct rte_flow_update_meter_mark *upd_meter_mark,
10334 			    uint32_t idx, bool push,
10335 			    struct mlx5_hw_q_job *job, struct rte_flow_error *error)
10336 {
10337 	struct mlx5_priv *priv = dev->data->dev_private;
10338 	struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
10339 	const struct rte_flow_action_meter_mark *meter_mark = &upd_meter_mark->meter_mark;
10340 	struct mlx5_aso_mtr *aso_mtr = mlx5_ipool_get(pool->idx_pool, idx);
10341 	struct mlx5_flow_meter_info *fm;
10342 
10343 	if (!aso_mtr)
10344 		return rte_flow_error_set(error, EINVAL,
10345 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10346 					  NULL, "Invalid meter_mark update index");
10347 	fm = &aso_mtr->fm;
10348 	if (upd_meter_mark->profile_valid)
10349 		fm->profile = (struct mlx5_flow_meter_profile *)
10350 			(meter_mark->profile);
10351 	if (upd_meter_mark->color_mode_valid)
10352 		fm->color_aware = meter_mark->color_mode;
10353 	if (upd_meter_mark->state_valid)
10354 		fm->is_enable = meter_mark->state;
10355 	/* Update ASO flow meter by wqe. */
10356 	if (mlx5_aso_meter_update_by_wqe(priv->sh, queue,
10357 					 aso_mtr, &priv->mtr_bulk, job, push))
10358 		return rte_flow_error_set(error, EINVAL,
10359 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10360 					  NULL, "Unable to update ASO meter WQE");
10361 	/* Wait for ASO object completion. */
10362 	if (queue == MLX5_HW_INV_QUEUE &&
10363 	    mlx5_aso_mtr_wait(priv->sh, MLX5_HW_INV_QUEUE, aso_mtr))
10364 		return rte_flow_error_set(error, EINVAL,
10365 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10366 					  NULL, "Unable to wait for ASO meter CQE");
10367 	return 0;
10368 }
10369 
10370 /**
10371  * Update shared action.
10372  *
10373  * @param[in] dev
10374  *   Pointer to the rte_eth_dev structure.
10375  * @param[in] queue
10376  *   Which queue to be used.
10377  * @param[in] attr
10378  *   Operation attribute.
10379  * @param[in] handle
10380  *   Action handle to be updated.
10381  * @param[in] update
10382  *   Update value.
10383  * @param[in] user_data
10384  *   Pointer to the user_data.
10385  * @param[out] error
10386  *   Pointer to error structure.
10387  *
10388  * @return
10389  *   0 on success, negative value otherwise and rte_errno is set.
10390  */
10391 static int
10392 flow_hw_action_handle_update(struct rte_eth_dev *dev, uint32_t queue,
10393 			     const struct rte_flow_op_attr *attr,
10394 			     struct rte_flow_action_handle *handle,
10395 			     const void *update,
10396 			     void *user_data,
10397 			     struct rte_flow_error *error)
10398 {
10399 	struct mlx5_priv *priv = dev->data->dev_private;
10400 	const struct rte_flow_modify_conntrack *ct_conf =
10401 		(const struct rte_flow_modify_conntrack *)update;
10402 	struct mlx5_hw_q_job *job = NULL;
10403 	uint32_t act_idx = (uint32_t)(uintptr_t)handle;
10404 	uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
10405 	uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
10406 	int ret = 0;
10407 	bool push = flow_hw_action_push(attr);
10408 	bool aso = false;
10409 
10410 	if (attr) {
10411 		job = flow_hw_action_job_init(priv, queue, handle, user_data,
10412 					      NULL, MLX5_HW_Q_JOB_TYPE_UPDATE,
10413 					      error);
10414 		if (!job)
10415 			return -rte_errno;
10416 	}
10417 	switch (type) {
10418 	case MLX5_INDIRECT_ACTION_TYPE_AGE:
10419 		ret = mlx5_hws_age_action_update(priv, idx, update, error);
10420 		break;
10421 	case MLX5_INDIRECT_ACTION_TYPE_CT:
10422 		if (ct_conf->state)
10423 			aso = true;
10424 		ret = flow_hw_conntrack_update(dev, queue, update, act_idx,
10425 					       job, push, error);
10426 		break;
10427 	case MLX5_INDIRECT_ACTION_TYPE_METER_MARK:
10428 		aso = true;
10429 		ret = mlx5_flow_update_meter_mark(dev, queue, update, idx, push,
10430 						  job, error);
10431 		break;
10432 	case MLX5_INDIRECT_ACTION_TYPE_RSS:
10433 		ret = flow_dv_action_update(dev, handle, update, error);
10434 		break;
10435 	case MLX5_INDIRECT_ACTION_TYPE_QUOTA:
10436 		aso = true;
10437 		ret = mlx5_quota_query_update(dev, queue, handle, update, NULL,
10438 					      job, push, error);
10439 		break;
10440 	default:
10441 		ret = -ENOTSUP;
10442 		rte_flow_error_set(error, ENOTSUP,
10443 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
10444 					  "action type not supported");
10445 		break;
10446 	}
10447 	if (job)
10448 		flow_hw_action_finalize(dev, queue, job, push, aso, ret == 0);
10449 	return ret;
10450 }
10451 
10452 /**
10453  * Destroy shared action.
10454  *
10455  * @param[in] dev
10456  *   Pointer to the rte_eth_dev structure.
10457  * @param[in] queue
10458  *   Which queue to be used.
10459  * @param[in] attr
10460  *   Operation attribute.
10461  * @param[in] handle
10462  *   Action handle to be destroyed.
10463  * @param[in] user_data
10464  *   Pointer to the user_data.
10465  * @param[out] error
10466  *   Pointer to error structure.
10467  *
10468  * @return
10469  *   0 on success, negative value otherwise and rte_errno is set.
10470  */
10471 static int
10472 flow_hw_action_handle_destroy(struct rte_eth_dev *dev, uint32_t queue,
10473 			      const struct rte_flow_op_attr *attr,
10474 			      struct rte_flow_action_handle *handle,
10475 			      void *user_data,
10476 			      struct rte_flow_error *error)
10477 {
10478 	uint32_t act_idx = (uint32_t)(uintptr_t)handle;
10479 	uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
10480 	uint32_t age_idx = act_idx & MLX5_HWS_AGE_IDX_MASK;
10481 	uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
10482 	struct mlx5_priv *priv = dev->data->dev_private;
10483 	struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
10484 	struct mlx5_hw_q_job *job = NULL;
10485 	struct mlx5_aso_mtr *aso_mtr;
10486 	struct mlx5_flow_meter_info *fm;
10487 	bool push = flow_hw_action_push(attr);
10488 	bool aso = false;
10489 	int ret = 0;
10490 
10491 	if (attr) {
10492 		job = flow_hw_action_job_init(priv, queue, handle, user_data,
10493 					      NULL, MLX5_HW_Q_JOB_TYPE_DESTROY,
10494 					      error);
10495 		if (!job)
10496 			return -rte_errno;
10497 	}
10498 	switch (type) {
10499 	case MLX5_INDIRECT_ACTION_TYPE_AGE:
10500 		ret = mlx5_hws_age_action_destroy(priv, age_idx, error);
10501 		break;
10502 	case MLX5_INDIRECT_ACTION_TYPE_COUNT:
10503 		age_idx = mlx5_hws_cnt_age_get(priv->hws_cpool, act_idx);
10504 		if (age_idx != 0)
10505 			/*
10506 			 * If this counter belongs to indirect AGE, here is the
10507 			 * time to update the AGE.
10508 			 */
10509 			mlx5_hws_age_nb_cnt_decrease(priv, age_idx);
10510 		mlx5_hws_cnt_shared_put(priv->hws_cpool, &act_idx);
10511 		break;
10512 	case MLX5_INDIRECT_ACTION_TYPE_CT:
10513 		ret = flow_hw_conntrack_destroy(dev, act_idx, error);
10514 		break;
10515 	case MLX5_INDIRECT_ACTION_TYPE_METER_MARK:
10516 		aso_mtr = mlx5_ipool_get(pool->idx_pool, idx);
10517 		if (!aso_mtr) {
10518 			ret = -EINVAL;
10519 			rte_flow_error_set(error, EINVAL,
10520 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10521 				NULL, "Invalid meter_mark destroy index");
10522 			break;
10523 		}
10524 		fm = &aso_mtr->fm;
10525 		fm->is_enable = 0;
10526 		/* Update ASO flow meter by wqe. */
10527 		if (mlx5_aso_meter_update_by_wqe(priv->sh, queue, aso_mtr,
10528 						 &priv->mtr_bulk, job, push)) {
10529 			ret = -EINVAL;
10530 			rte_flow_error_set(error, EINVAL,
10531 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10532 				NULL, "Unable to update ASO meter WQE");
10533 			break;
10534 		}
10535 		/* Wait for ASO object completion. */
10536 		if (queue == MLX5_HW_INV_QUEUE &&
10537 		    mlx5_aso_mtr_wait(priv->sh, MLX5_HW_INV_QUEUE, aso_mtr)) {
10538 			ret = -EINVAL;
10539 			rte_flow_error_set(error, EINVAL,
10540 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10541 				NULL, "Unable to wait for ASO meter CQE");
10542 			break;
10543 		}
10544 		if (!job)
10545 			mlx5_ipool_free(pool->idx_pool, idx);
10546 		else
10547 			aso = true;
10548 		break;
10549 	case MLX5_INDIRECT_ACTION_TYPE_RSS:
10550 		ret = flow_dv_action_destroy(dev, handle, error);
10551 		break;
10552 	case MLX5_INDIRECT_ACTION_TYPE_QUOTA:
10553 		break;
10554 	default:
10555 		ret = -ENOTSUP;
10556 		rte_flow_error_set(error, ENOTSUP,
10557 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
10558 					  "action type not supported");
10559 		break;
10560 	}
10561 	if (job)
10562 		flow_hw_action_finalize(dev, queue, job, push, aso, ret == 0);
10563 	return ret;
10564 }
10565 
10566 static int
10567 flow_hw_query_counter(const struct rte_eth_dev *dev, uint32_t counter,
10568 		      void *data, struct rte_flow_error *error)
10569 {
10570 	struct mlx5_hws_cnt_pool *hpool;
10571 	struct mlx5_priv *priv = dev->data->dev_private;
10572 	struct mlx5_hws_cnt *cnt;
10573 	struct rte_flow_query_count *qc = data;
10574 	uint32_t iidx;
10575 	uint64_t pkts, bytes;
10576 
10577 	if (!mlx5_hws_cnt_id_valid(counter))
10578 		return rte_flow_error_set(error, EINVAL,
10579 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10580 				"counter are not available");
10581 	hpool = mlx5_hws_cnt_host_pool(priv->hws_cpool);
10582 	iidx = mlx5_hws_cnt_iidx(hpool, counter);
10583 	cnt = &hpool->pool[iidx];
10584 	__hws_cnt_query_raw(priv->hws_cpool, counter, &pkts, &bytes);
10585 	qc->hits_set = 1;
10586 	qc->bytes_set = 1;
10587 	qc->hits = pkts - cnt->reset.hits;
10588 	qc->bytes = bytes - cnt->reset.bytes;
10589 	if (qc->reset) {
10590 		cnt->reset.bytes = bytes;
10591 		cnt->reset.hits = pkts;
10592 	}
10593 	return 0;
10594 }
10595 
10596 /**
10597  * Query a flow rule AGE action for aging information.
10598  *
10599  * @param[in] dev
10600  *   Pointer to Ethernet device.
10601  * @param[in] age_idx
10602  *   Index of AGE action parameter.
10603  * @param[out] data
10604  *   Data retrieved by the query.
10605  * @param[out] error
10606  *   Perform verbose error reporting if not NULL.
10607  *
10608  * @return
10609  *   0 on success, a negative errno value otherwise and rte_errno is set.
10610  */
10611 static int
10612 flow_hw_query_age(const struct rte_eth_dev *dev, uint32_t age_idx, void *data,
10613 		  struct rte_flow_error *error)
10614 {
10615 	struct mlx5_priv *priv = dev->data->dev_private;
10616 	struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);
10617 	struct mlx5_indexed_pool *ipool = age_info->ages_ipool;
10618 	struct mlx5_hws_age_param *param = mlx5_ipool_get(ipool, age_idx);
10619 	struct rte_flow_query_age *resp = data;
10620 
10621 	if (!param || !param->timeout)
10622 		return rte_flow_error_set(error, EINVAL,
10623 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10624 					  NULL, "age data not available");
10625 	switch (__atomic_load_n(&param->state, __ATOMIC_RELAXED)) {
10626 	case HWS_AGE_AGED_OUT_REPORTED:
10627 	case HWS_AGE_AGED_OUT_NOT_REPORTED:
10628 		resp->aged = 1;
10629 		break;
10630 	case HWS_AGE_CANDIDATE:
10631 	case HWS_AGE_CANDIDATE_INSIDE_RING:
10632 		resp->aged = 0;
10633 		break;
10634 	case HWS_AGE_FREE:
10635 		/*
10636 		 * When state is FREE the flow itself should be invalid.
10637 		 * Fall-through.
10638 		 */
10639 	default:
10640 		MLX5_ASSERT(0);
10641 		break;
10642 	}
10643 	resp->sec_since_last_hit_valid = !resp->aged;
10644 	if (resp->sec_since_last_hit_valid)
10645 		resp->sec_since_last_hit = __atomic_load_n
10646 				 (&param->sec_since_last_hit, __ATOMIC_RELAXED);
10647 	return 0;
10648 }
10649 
10650 static int
10651 flow_hw_query(struct rte_eth_dev *dev, struct rte_flow *flow,
10652 	      const struct rte_flow_action *actions, void *data,
10653 	      struct rte_flow_error *error)
10654 {
10655 	int ret = -EINVAL;
10656 	struct rte_flow_hw *hw_flow = (struct rte_flow_hw *)flow;
10657 
10658 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
10659 		switch (actions->type) {
10660 		case RTE_FLOW_ACTION_TYPE_VOID:
10661 			break;
10662 		case RTE_FLOW_ACTION_TYPE_COUNT:
10663 			ret = flow_hw_query_counter(dev, hw_flow->cnt_id, data,
10664 						    error);
10665 			break;
10666 		case RTE_FLOW_ACTION_TYPE_AGE:
10667 			ret = flow_hw_query_age(dev, hw_flow->age_idx, data,
10668 						error);
10669 			break;
10670 		default:
10671 			return rte_flow_error_set(error, ENOTSUP,
10672 						  RTE_FLOW_ERROR_TYPE_ACTION,
10673 						  actions,
10674 						  "action not supported");
10675 		}
10676 	}
10677 	return ret;
10678 }
10679 
10680 /**
10681  * Validate indirect action.
10682  *
10683  * @param[in] dev
10684  *   Pointer to the Ethernet device structure.
10685  * @param[in] conf
10686  *   Shared action configuration.
10687  * @param[in] action
10688  *   Action specification used to create indirect action.
10689  * @param[out] error
10690  *   Perform verbose error reporting if not NULL. Initialized in case of
10691  *   error only.
10692  *
10693  * @return
10694  *   0 on success, otherwise negative errno value.
10695  */
10696 static int
10697 flow_hw_action_validate(struct rte_eth_dev *dev,
10698 			const struct rte_flow_indir_action_conf *conf,
10699 			const struct rte_flow_action *action,
10700 			struct rte_flow_error *err)
10701 {
10702 	return flow_hw_action_handle_validate(dev, MLX5_HW_INV_QUEUE, NULL,
10703 					      conf, action, NULL, err);
10704 }
10705 
10706 /**
10707  * Create indirect action.
10708  *
10709  * @param[in] dev
10710  *   Pointer to the Ethernet device structure.
10711  * @param[in] conf
10712  *   Shared action configuration.
10713  * @param[in] action
10714  *   Action specification used to create indirect action.
10715  * @param[out] error
10716  *   Perform verbose error reporting if not NULL. Initialized in case of
10717  *   error only.
10718  *
10719  * @return
10720  *   A valid shared action handle in case of success, NULL otherwise and
10721  *   rte_errno is set.
10722  */
10723 static struct rte_flow_action_handle *
10724 flow_hw_action_create(struct rte_eth_dev *dev,
10725 		       const struct rte_flow_indir_action_conf *conf,
10726 		       const struct rte_flow_action *action,
10727 		       struct rte_flow_error *err)
10728 {
10729 	return flow_hw_action_handle_create(dev, MLX5_HW_INV_QUEUE,
10730 					    NULL, conf, action, NULL, err);
10731 }
10732 
10733 /**
10734  * Destroy the indirect action.
10735  * Release action related resources on the NIC and the memory.
10736  * Lock free, (mutex should be acquired by caller).
10737  * Dispatcher for action type specific call.
10738  *
10739  * @param[in] dev
10740  *   Pointer to the Ethernet device structure.
10741  * @param[in] handle
10742  *   The indirect action object handle to be removed.
10743  * @param[out] error
10744  *   Perform verbose error reporting if not NULL. Initialized in case of
10745  *   error only.
10746  *
10747  * @return
10748  *   0 on success, otherwise negative errno value.
10749  */
10750 static int
10751 flow_hw_action_destroy(struct rte_eth_dev *dev,
10752 		       struct rte_flow_action_handle *handle,
10753 		       struct rte_flow_error *error)
10754 {
10755 	return flow_hw_action_handle_destroy(dev, MLX5_HW_INV_QUEUE,
10756 			NULL, handle, NULL, error);
10757 }
10758 
10759 /**
10760  * Updates in place shared action configuration.
10761  *
10762  * @param[in] dev
10763  *   Pointer to the Ethernet device structure.
10764  * @param[in] handle
10765  *   The indirect action object handle to be updated.
10766  * @param[in] update
10767  *   Action specification used to modify the action pointed by *handle*.
10768  *   *update* could be of same type with the action pointed by the *handle*
10769  *   handle argument, or some other structures like a wrapper, depending on
10770  *   the indirect action type.
10771  * @param[out] error
10772  *   Perform verbose error reporting if not NULL. Initialized in case of
10773  *   error only.
10774  *
10775  * @return
10776  *   0 on success, otherwise negative errno value.
10777  */
10778 static int
10779 flow_hw_action_update(struct rte_eth_dev *dev,
10780 		      struct rte_flow_action_handle *handle,
10781 		      const void *update,
10782 		      struct rte_flow_error *err)
10783 {
10784 	return flow_hw_action_handle_update(dev, MLX5_HW_INV_QUEUE,
10785 			NULL, handle, update, NULL, err);
10786 }
10787 
10788 static int
10789 flow_hw_action_handle_query(struct rte_eth_dev *dev, uint32_t queue,
10790 			    const struct rte_flow_op_attr *attr,
10791 			    const struct rte_flow_action_handle *handle,
10792 			    void *data, void *user_data,
10793 			    struct rte_flow_error *error)
10794 {
10795 	struct mlx5_priv *priv = dev->data->dev_private;
10796 	struct mlx5_hw_q_job *job = NULL;
10797 	uint32_t act_idx = (uint32_t)(uintptr_t)handle;
10798 	uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
10799 	uint32_t age_idx = act_idx & MLX5_HWS_AGE_IDX_MASK;
10800 	int ret;
10801 	bool push = flow_hw_action_push(attr);
10802 	bool aso = false;
10803 
10804 	if (attr) {
10805 		job = flow_hw_action_job_init(priv, queue, handle, user_data,
10806 					      data, MLX5_HW_Q_JOB_TYPE_QUERY,
10807 					      error);
10808 		if (!job)
10809 			return -rte_errno;
10810 	}
10811 	switch (type) {
10812 	case MLX5_INDIRECT_ACTION_TYPE_AGE:
10813 		ret = flow_hw_query_age(dev, age_idx, data, error);
10814 		break;
10815 	case MLX5_INDIRECT_ACTION_TYPE_COUNT:
10816 		ret = flow_hw_query_counter(dev, act_idx, data, error);
10817 		break;
10818 	case MLX5_INDIRECT_ACTION_TYPE_CT:
10819 		aso = true;
10820 		if (job)
10821 			job->query.user = data;
10822 		ret = flow_hw_conntrack_query(dev, queue, act_idx, data,
10823 					      job, push, error);
10824 		break;
10825 	case MLX5_INDIRECT_ACTION_TYPE_QUOTA:
10826 		aso = true;
10827 		ret = mlx5_quota_query(dev, queue, handle, data,
10828 				       job, push, error);
10829 		break;
10830 	default:
10831 		ret = -ENOTSUP;
10832 		rte_flow_error_set(error, ENOTSUP,
10833 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
10834 					  "action type not supported");
10835 		break;
10836 	}
10837 	if (job)
10838 		flow_hw_action_finalize(dev, queue, job, push, aso, ret == 0);
10839 	return ret;
10840 }
10841 
10842 static int
10843 flow_hw_async_action_handle_query_update
10844 			(struct rte_eth_dev *dev, uint32_t queue,
10845 			 const struct rte_flow_op_attr *attr,
10846 			 struct rte_flow_action_handle *handle,
10847 			 const void *update, void *query,
10848 			 enum rte_flow_query_update_mode qu_mode,
10849 			 void *user_data, struct rte_flow_error *error)
10850 {
10851 	struct mlx5_priv *priv = dev->data->dev_private;
10852 	bool push = flow_hw_action_push(attr);
10853 	bool aso = false;
10854 	struct mlx5_hw_q_job *job = NULL;
10855 	int ret = 0;
10856 
10857 	if (attr) {
10858 		job = flow_hw_action_job_init(priv, queue, handle, user_data,
10859 					      query,
10860 					      MLX5_HW_Q_JOB_TYPE_UPDATE_QUERY,
10861 					      error);
10862 		if (!job)
10863 			return -rte_errno;
10864 	}
10865 	switch (MLX5_INDIRECT_ACTION_TYPE_GET(handle)) {
10866 	case MLX5_INDIRECT_ACTION_TYPE_QUOTA:
10867 		if (qu_mode != RTE_FLOW_QU_QUERY_FIRST) {
10868 			ret = rte_flow_error_set
10869 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF,
10870 				 NULL, "quota action must query before update");
10871 			break;
10872 		}
10873 		aso = true;
10874 		ret = mlx5_quota_query_update(dev, queue, handle,
10875 					      update, query, job, push, error);
10876 		break;
10877 	default:
10878 		ret = rte_flow_error_set(error, ENOTSUP,
10879 					 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, "update and query not supportred");
10880 	}
10881 	if (job)
10882 		flow_hw_action_finalize(dev, queue, job, push, aso, ret == 0);
10883 	return ret;
10884 }
10885 
10886 static int
10887 flow_hw_action_query(struct rte_eth_dev *dev,
10888 		     const struct rte_flow_action_handle *handle, void *data,
10889 		     struct rte_flow_error *error)
10890 {
10891 	return flow_hw_action_handle_query(dev, MLX5_HW_INV_QUEUE, NULL,
10892 			handle, data, NULL, error);
10893 }
10894 
10895 static int
10896 flow_hw_action_query_update(struct rte_eth_dev *dev,
10897 			    struct rte_flow_action_handle *handle,
10898 			    const void *update, void *query,
10899 			    enum rte_flow_query_update_mode qu_mode,
10900 			    struct rte_flow_error *error)
10901 {
10902 	return flow_hw_async_action_handle_query_update(dev, MLX5_HW_INV_QUEUE,
10903 							NULL, handle, update,
10904 							query, qu_mode, NULL,
10905 							error);
10906 }
10907 
10908 /**
10909  * Get aged-out flows of a given port on the given HWS flow queue.
10910  *
10911  * @param[in] dev
10912  *   Pointer to the Ethernet device structure.
10913  * @param[in] queue_id
10914  *   Flow queue to query. Ignored when RTE_FLOW_PORT_FLAG_STRICT_QUEUE not set.
10915  * @param[in, out] contexts
10916  *   The address of an array of pointers to the aged-out flows contexts.
10917  * @param[in] nb_contexts
10918  *   The length of context array pointers.
10919  * @param[out] error
10920  *   Perform verbose error reporting if not NULL. Initialized in case of
10921  *   error only.
10922  *
10923  * @return
10924  *   if nb_contexts is 0, return the amount of all aged contexts.
10925  *   if nb_contexts is not 0 , return the amount of aged flows reported
10926  *   in the context array, otherwise negative errno value.
10927  */
10928 static int
10929 flow_hw_get_q_aged_flows(struct rte_eth_dev *dev, uint32_t queue_id,
10930 			 void **contexts, uint32_t nb_contexts,
10931 			 struct rte_flow_error *error)
10932 {
10933 	struct mlx5_priv *priv = dev->data->dev_private;
10934 	struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);
10935 	struct rte_ring *r;
10936 	int nb_flows = 0;
10937 
10938 	if (nb_contexts && !contexts)
10939 		return rte_flow_error_set(error, EINVAL,
10940 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10941 					  NULL, "empty context");
10942 	if (priv->hws_strict_queue) {
10943 		if (queue_id >= age_info->hw_q_age->nb_rings)
10944 			return rte_flow_error_set(error, EINVAL,
10945 						RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10946 						NULL, "invalid queue id");
10947 		r = age_info->hw_q_age->aged_lists[queue_id];
10948 	} else {
10949 		r = age_info->hw_age.aged_list;
10950 		MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
10951 	}
10952 	if (nb_contexts == 0)
10953 		return rte_ring_count(r);
10954 	while ((uint32_t)nb_flows < nb_contexts) {
10955 		uint32_t age_idx;
10956 
10957 		if (rte_ring_dequeue_elem(r, &age_idx, sizeof(uint32_t)) < 0)
10958 			break;
10959 		/* get the AGE context if the aged-out index is still valid. */
10960 		contexts[nb_flows] = mlx5_hws_age_context_get(priv, age_idx);
10961 		if (!contexts[nb_flows])
10962 			continue;
10963 		nb_flows++;
10964 	}
10965 	return nb_flows;
10966 }
10967 
10968 /**
10969  * Get aged-out flows.
10970  *
10971  * This function is relevant only if RTE_FLOW_PORT_FLAG_STRICT_QUEUE isn't set.
10972  *
10973  * @param[in] dev
10974  *   Pointer to the Ethernet device structure.
10975  * @param[in] contexts
10976  *   The address of an array of pointers to the aged-out flows contexts.
10977  * @param[in] nb_contexts
10978  *   The length of context array pointers.
10979  * @param[out] error
10980  *   Perform verbose error reporting if not NULL. Initialized in case of
10981  *   error only.
10982  *
10983  * @return
10984  *   how many contexts get in success, otherwise negative errno value.
10985  *   if nb_contexts is 0, return the amount of all aged contexts.
10986  *   if nb_contexts is not 0 , return the amount of aged flows reported
10987  *   in the context array.
10988  */
10989 static int
10990 flow_hw_get_aged_flows(struct rte_eth_dev *dev, void **contexts,
10991 		       uint32_t nb_contexts, struct rte_flow_error *error)
10992 {
10993 	struct mlx5_priv *priv = dev->data->dev_private;
10994 
10995 	if (priv->hws_strict_queue)
10996 		DRV_LOG(WARNING,
10997 			"port %u get aged flows called in strict queue mode.",
10998 			dev->data->port_id);
10999 	return flow_hw_get_q_aged_flows(dev, 0, contexts, nb_contexts, error);
11000 }
11001 
11002 static void
11003 mlx5_mirror_destroy_clone(struct rte_eth_dev *dev,
11004 			  struct mlx5_mirror_clone *clone)
11005 {
11006 	switch (clone->type) {
11007 	case RTE_FLOW_ACTION_TYPE_RSS:
11008 	case RTE_FLOW_ACTION_TYPE_QUEUE:
11009 		mlx5_hrxq_release(dev,
11010 				  ((struct mlx5_hrxq *)(clone->action_ctx))->idx);
11011 		break;
11012 	case RTE_FLOW_ACTION_TYPE_JUMP:
11013 		flow_hw_jump_release(dev, clone->action_ctx);
11014 		break;
11015 	case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
11016 	case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
11017 	case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
11018 	case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
11019 	case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
11020 	case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
11021 	default:
11022 		break;
11023 	}
11024 }
11025 
11026 void
11027 mlx5_hw_mirror_destroy(struct rte_eth_dev *dev, struct mlx5_mirror *mirror)
11028 {
11029 	uint32_t i;
11030 
11031 	mlx5_indirect_list_remove_entry(&mirror->indirect);
11032 	for (i = 0; i < mirror->clones_num; i++)
11033 		mlx5_mirror_destroy_clone(dev, &mirror->clone[i]);
11034 	if (mirror->mirror_action)
11035 		mlx5dr_action_destroy(mirror->mirror_action);
11036 	mlx5_free(mirror);
11037 }
11038 
11039 static __rte_always_inline bool
11040 mlx5_mirror_terminal_action(const struct rte_flow_action *action)
11041 {
11042 	switch (action->type) {
11043 	case RTE_FLOW_ACTION_TYPE_JUMP:
11044 	case RTE_FLOW_ACTION_TYPE_RSS:
11045 	case RTE_FLOW_ACTION_TYPE_QUEUE:
11046 	case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
11047 	case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
11048 		return true;
11049 	default:
11050 		break;
11051 	}
11052 	return false;
11053 }
11054 
11055 static bool
11056 mlx5_mirror_validate_sample_action(struct rte_eth_dev *dev,
11057 				   const struct rte_flow_attr *flow_attr,
11058 				   const struct rte_flow_action *action)
11059 {
11060 	struct mlx5_priv *priv = dev->data->dev_private;
11061 	const struct rte_flow_action_ethdev *port = NULL;
11062 	bool is_proxy = MLX5_HW_PORT_IS_PROXY(priv);
11063 
11064 	if (!action)
11065 		return false;
11066 	switch (action->type) {
11067 	case RTE_FLOW_ACTION_TYPE_QUEUE:
11068 	case RTE_FLOW_ACTION_TYPE_RSS:
11069 		if (flow_attr->transfer)
11070 			return false;
11071 		break;
11072 	case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
11073 		if (!is_proxy || !flow_attr->transfer)
11074 			return false;
11075 		port = action->conf;
11076 		if (!port || port->port_id != MLX5_REPRESENTED_PORT_ESW_MGR)
11077 			return false;
11078 		break;
11079 	case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
11080 	case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
11081 	case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
11082 	case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
11083 	case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
11084 		if (!is_proxy || !flow_attr->transfer)
11085 			return false;
11086 		if (action[0].type == RTE_FLOW_ACTION_TYPE_RAW_DECAP &&
11087 		    action[1].type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP)
11088 			return false;
11089 		break;
11090 	default:
11091 		return false;
11092 	}
11093 	return true;
11094 }
11095 
11096 /**
11097  * Valid mirror actions list includes one or two SAMPLE actions
11098  * followed by JUMP.
11099  *
11100  * @return
11101  * Number of mirrors *action* list was valid.
11102  * -EINVAL otherwise.
11103  */
11104 static int
11105 mlx5_hw_mirror_actions_list_validate(struct rte_eth_dev *dev,
11106 				     const struct rte_flow_attr *flow_attr,
11107 				     const struct rte_flow_action *actions)
11108 {
11109 	if (actions[0].type == RTE_FLOW_ACTION_TYPE_SAMPLE) {
11110 		int i = 1;
11111 		bool valid;
11112 		const struct rte_flow_action_sample *sample = actions[0].conf;
11113 		valid = mlx5_mirror_validate_sample_action(dev, flow_attr,
11114 							   sample->actions);
11115 		if (!valid)
11116 			return -EINVAL;
11117 		if (actions[1].type == RTE_FLOW_ACTION_TYPE_SAMPLE) {
11118 			i = 2;
11119 			sample = actions[1].conf;
11120 			valid = mlx5_mirror_validate_sample_action(dev, flow_attr,
11121 								   sample->actions);
11122 			if (!valid)
11123 				return -EINVAL;
11124 		}
11125 		return mlx5_mirror_terminal_action(actions + i) ? i + 1 : -EINVAL;
11126 	}
11127 	return -EINVAL;
11128 }
11129 
11130 static int
11131 mirror_format_tir(struct rte_eth_dev *dev,
11132 		  struct mlx5_mirror_clone *clone,
11133 		  const struct mlx5_flow_template_table_cfg *table_cfg,
11134 		  const struct rte_flow_action *action,
11135 		  struct mlx5dr_action_dest_attr *dest_attr,
11136 		  struct rte_flow_error *error)
11137 {
11138 	uint32_t hws_flags;
11139 	enum mlx5dr_table_type table_type;
11140 	struct mlx5_hrxq *tir_ctx;
11141 
11142 	table_type = get_mlx5dr_table_type(&table_cfg->attr.flow_attr);
11143 	hws_flags = mlx5_hw_act_flag[MLX5_HW_ACTION_FLAG_NONE_ROOT][table_type];
11144 	tir_ctx = flow_hw_tir_action_register(dev, hws_flags, action);
11145 	if (!tir_ctx)
11146 		return rte_flow_error_set(error, EINVAL,
11147 					  RTE_FLOW_ERROR_TYPE_ACTION,
11148 					  action, "failed to create QUEUE action for mirror clone");
11149 	dest_attr->dest = tir_ctx->action;
11150 	clone->action_ctx = tir_ctx;
11151 	return 0;
11152 }
11153 
11154 static int
11155 mirror_format_jump(struct rte_eth_dev *dev,
11156 		   struct mlx5_mirror_clone *clone,
11157 		   const struct mlx5_flow_template_table_cfg *table_cfg,
11158 		   const struct rte_flow_action *action,
11159 		   struct mlx5dr_action_dest_attr *dest_attr,
11160 		   struct rte_flow_error *error)
11161 {
11162 	const struct rte_flow_action_jump *jump_conf = action->conf;
11163 	struct mlx5_hw_jump_action *jump = flow_hw_jump_action_register
11164 						(dev, table_cfg,
11165 						 jump_conf->group, error);
11166 
11167 	if (!jump)
11168 		return rte_flow_error_set(error, EINVAL,
11169 					  RTE_FLOW_ERROR_TYPE_ACTION,
11170 					  action, "failed to create JUMP action for mirror clone");
11171 	dest_attr->dest = jump->hws_action;
11172 	clone->action_ctx = jump;
11173 	return 0;
11174 }
11175 
11176 static int
11177 mirror_format_port(struct rte_eth_dev *dev,
11178 		   const struct rte_flow_action *action,
11179 		   struct mlx5dr_action_dest_attr *dest_attr,
11180 		   struct rte_flow_error __rte_unused *error)
11181 {
11182 	struct mlx5_priv *priv = dev->data->dev_private;
11183 	const struct rte_flow_action_ethdev *port_action = action->conf;
11184 
11185 	dest_attr->dest = priv->hw_vport[port_action->port_id];
11186 	return 0;
11187 }
11188 
11189 static int
11190 hw_mirror_clone_reformat(const struct rte_flow_action *actions,
11191 			 struct mlx5dr_action_dest_attr *dest_attr,
11192 			 enum mlx5dr_action_type *action_type,
11193 			 uint8_t *reformat_buf, bool decap)
11194 {
11195 	int ret;
11196 	const struct rte_flow_item *encap_item = NULL;
11197 	const struct rte_flow_action_raw_encap *encap_conf = NULL;
11198 	typeof(dest_attr->reformat) *reformat = &dest_attr->reformat;
11199 
11200 	switch (actions[0].type) {
11201 	case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
11202 		encap_conf = actions[0].conf;
11203 		break;
11204 	case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
11205 		encap_item = MLX5_CONST_ENCAP_ITEM(rte_flow_action_vxlan_encap,
11206 						   actions);
11207 		break;
11208 	case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
11209 		encap_item = MLX5_CONST_ENCAP_ITEM(rte_flow_action_nvgre_encap,
11210 						   actions);
11211 		break;
11212 	default:
11213 		return -EINVAL;
11214 	}
11215 	*action_type = decap ?
11216 		       MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3 :
11217 		       MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
11218 	if (encap_item) {
11219 		ret = flow_dv_convert_encap_data(encap_item, reformat_buf,
11220 						 &reformat->reformat_data_sz, NULL);
11221 		if (ret)
11222 			return -EINVAL;
11223 		reformat->reformat_data = reformat_buf;
11224 	} else {
11225 		reformat->reformat_data = (void *)(uintptr_t)encap_conf->data;
11226 		reformat->reformat_data_sz = encap_conf->size;
11227 	}
11228 	return 0;
11229 }
11230 
11231 static int
11232 hw_mirror_format_clone(struct rte_eth_dev *dev,
11233 			struct mlx5_mirror_clone *clone,
11234 			const struct mlx5_flow_template_table_cfg *table_cfg,
11235 			const struct rte_flow_action *actions,
11236 			struct mlx5dr_action_dest_attr *dest_attr,
11237 			uint8_t *reformat_buf, struct rte_flow_error *error)
11238 {
11239 	struct mlx5_priv *priv = dev->data->dev_private;
11240 	int ret;
11241 	uint32_t i;
11242 	bool decap_seen = false;
11243 
11244 	for (i = 0; actions[i].type != RTE_FLOW_ACTION_TYPE_END; i++) {
11245 		dest_attr->action_type[i] = mlx5_hw_dr_action_types[actions[i].type];
11246 		switch (actions[i].type) {
11247 		case RTE_FLOW_ACTION_TYPE_QUEUE:
11248 		case RTE_FLOW_ACTION_TYPE_RSS:
11249 			ret = mirror_format_tir(dev, clone, table_cfg,
11250 						&actions[i], dest_attr, error);
11251 			if (ret)
11252 				return ret;
11253 			break;
11254 		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
11255 			ret = mirror_format_port(dev, &actions[i],
11256 						 dest_attr, error);
11257 			if (ret)
11258 				return ret;
11259 			break;
11260 		case RTE_FLOW_ACTION_TYPE_JUMP:
11261 			ret = mirror_format_jump(dev, clone, table_cfg,
11262 						 &actions[i], dest_attr, error);
11263 			if (ret)
11264 				return ret;
11265 			break;
11266 		case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
11267 			dest_attr->dest = priv->hw_def_miss;
11268 			break;
11269 		case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
11270 			decap_seen = true;
11271 			break;
11272 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
11273 		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
11274 		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
11275 			ret = hw_mirror_clone_reformat(&actions[i], dest_attr,
11276 						       &dest_attr->action_type[i],
11277 						       reformat_buf, decap_seen);
11278 			if (ret < 0)
11279 				return rte_flow_error_set(error, EINVAL,
11280 							  RTE_FLOW_ERROR_TYPE_ACTION,
11281 							  &actions[i],
11282 							  "failed to create reformat action");
11283 			break;
11284 		default:
11285 			return rte_flow_error_set(error, EINVAL,
11286 						  RTE_FLOW_ERROR_TYPE_ACTION,
11287 						  &actions[i], "unsupported sample action");
11288 		}
11289 		clone->type = actions->type;
11290 	}
11291 	dest_attr->action_type[i] = MLX5DR_ACTION_TYP_LAST;
11292 	return 0;
11293 }
11294 
11295 static struct rte_flow_action_list_handle *
11296 mlx5_hw_mirror_handle_create(struct rte_eth_dev *dev,
11297 			     const struct mlx5_flow_template_table_cfg *table_cfg,
11298 			     const struct rte_flow_action *actions,
11299 			     struct rte_flow_error *error)
11300 {
11301 	uint32_t hws_flags;
11302 	int ret = 0, i, clones_num;
11303 	struct mlx5_mirror *mirror;
11304 	enum mlx5dr_table_type table_type;
11305 	struct mlx5_priv *priv = dev->data->dev_private;
11306 	const struct rte_flow_attr *flow_attr = &table_cfg->attr.flow_attr;
11307 	uint8_t reformat_buf[MLX5_MIRROR_MAX_CLONES_NUM][MLX5_ENCAP_MAX_LEN];
11308 	struct mlx5dr_action_dest_attr mirror_attr[MLX5_MIRROR_MAX_CLONES_NUM + 1];
11309 	enum mlx5dr_action_type array_action_types[MLX5_MIRROR_MAX_CLONES_NUM + 1]
11310 						  [MLX5_MIRROR_MAX_SAMPLE_ACTIONS_LEN + 1];
11311 
11312 	memset(mirror_attr, 0, sizeof(mirror_attr));
11313 	memset(array_action_types, 0, sizeof(array_action_types));
11314 	table_type = get_mlx5dr_table_type(flow_attr);
11315 	hws_flags = mlx5_hw_act_flag[MLX5_HW_ACTION_FLAG_NONE_ROOT][table_type];
11316 	clones_num = mlx5_hw_mirror_actions_list_validate(dev, flow_attr,
11317 							  actions);
11318 	if (clones_num < 0) {
11319 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
11320 				   actions, "Invalid mirror list format");
11321 		return NULL;
11322 	}
11323 	mirror = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*mirror),
11324 			     0, SOCKET_ID_ANY);
11325 	if (!mirror) {
11326 		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ACTION,
11327 				   actions, "Failed to allocate mirror context");
11328 		return NULL;
11329 	}
11330 
11331 	mirror->indirect.type = MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR;
11332 	mirror->clones_num = clones_num;
11333 	for (i = 0; i < clones_num; i++) {
11334 		const struct rte_flow_action *clone_actions;
11335 
11336 		mirror_attr[i].action_type = array_action_types[i];
11337 		if (actions[i].type == RTE_FLOW_ACTION_TYPE_SAMPLE) {
11338 			const struct rte_flow_action_sample *sample = actions[i].conf;
11339 
11340 			clone_actions = sample->actions;
11341 		} else {
11342 			clone_actions = &actions[i];
11343 		}
11344 		ret = hw_mirror_format_clone(dev, &mirror->clone[i], table_cfg,
11345 					     clone_actions, &mirror_attr[i],
11346 					     reformat_buf[i], error);
11347 
11348 		if (ret)
11349 			goto error;
11350 	}
11351 	hws_flags |= MLX5DR_ACTION_FLAG_SHARED;
11352 	mirror->mirror_action = mlx5dr_action_create_dest_array(priv->dr_ctx,
11353 								clones_num,
11354 								mirror_attr,
11355 								hws_flags);
11356 	if (!mirror->mirror_action) {
11357 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
11358 				   actions, "Failed to create HWS mirror action");
11359 		goto error;
11360 	}
11361 
11362 	mlx5_indirect_list_add_entry(&priv->indirect_list_head, &mirror->indirect);
11363 	return (struct rte_flow_action_list_handle *)mirror;
11364 
11365 error:
11366 	mlx5_hw_mirror_destroy(dev, mirror);
11367 	return NULL;
11368 }
11369 
11370 void
11371 mlx5_destroy_legacy_indirect(__rte_unused struct rte_eth_dev *dev,
11372 			     struct mlx5_indirect_list *ptr)
11373 {
11374 	struct mlx5_indlst_legacy *obj = (typeof(obj))ptr;
11375 
11376 	switch (obj->legacy_type) {
11377 	case RTE_FLOW_ACTION_TYPE_METER_MARK:
11378 		break; /* ASO meters were released in mlx5_flow_meter_flush() */
11379 	default:
11380 		break;
11381 	}
11382 	mlx5_free(obj);
11383 }
11384 
11385 static struct rte_flow_action_list_handle *
11386 mlx5_create_legacy_indlst(struct rte_eth_dev *dev, uint32_t queue,
11387 			  const struct rte_flow_op_attr *attr,
11388 			  const struct rte_flow_indir_action_conf *conf,
11389 			  const struct rte_flow_action *actions,
11390 			  void *user_data, struct rte_flow_error *error)
11391 {
11392 	struct mlx5_priv *priv = dev->data->dev_private;
11393 	struct mlx5_indlst_legacy *indlst_obj = mlx5_malloc(MLX5_MEM_ZERO,
11394 							    sizeof(*indlst_obj),
11395 							    0, SOCKET_ID_ANY);
11396 
11397 	if (!indlst_obj)
11398 		return NULL;
11399 	indlst_obj->handle = flow_hw_action_handle_create(dev, queue, attr, conf,
11400 							  actions, user_data,
11401 							  error);
11402 	if (!indlst_obj->handle) {
11403 		mlx5_free(indlst_obj);
11404 		return NULL;
11405 	}
11406 	indlst_obj->legacy_type = actions[0].type;
11407 	indlst_obj->indirect.type = MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY;
11408 	mlx5_indirect_list_add_entry(&priv->indirect_list_head, &indlst_obj->indirect);
11409 	return (struct rte_flow_action_list_handle *)indlst_obj;
11410 }
11411 
11412 static __rte_always_inline enum mlx5_indirect_list_type
11413 flow_hw_inlist_type_get(const struct rte_flow_action *actions)
11414 {
11415 	switch (actions[0].type) {
11416 	case RTE_FLOW_ACTION_TYPE_SAMPLE:
11417 		return MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR;
11418 	case RTE_FLOW_ACTION_TYPE_METER_MARK:
11419 		return actions[1].type == RTE_FLOW_ACTION_TYPE_END ?
11420 		       MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY :
11421 		       MLX5_INDIRECT_ACTION_LIST_TYPE_ERR;
11422 	case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
11423 	case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
11424 		return MLX5_INDIRECT_ACTION_LIST_TYPE_REFORMAT;
11425 	default:
11426 		break;
11427 	}
11428 	return MLX5_INDIRECT_ACTION_LIST_TYPE_ERR;
11429 }
11430 
11431 static struct rte_flow_action_list_handle*
11432 mlx5_hw_decap_encap_handle_create(struct rte_eth_dev *dev,
11433 				  const struct mlx5_flow_template_table_cfg *table_cfg,
11434 				  const struct rte_flow_action *actions,
11435 				  struct rte_flow_error *error)
11436 {
11437 	struct mlx5_priv *priv = dev->data->dev_private;
11438 	const struct rte_flow_attr *flow_attr = &table_cfg->attr.flow_attr;
11439 	const struct rte_flow_action *encap = NULL;
11440 	const struct rte_flow_action *decap = NULL;
11441 	struct rte_flow_indir_action_conf indirect_conf = {
11442 		.ingress = flow_attr->ingress,
11443 		.egress = flow_attr->egress,
11444 		.transfer = flow_attr->transfer,
11445 	};
11446 	struct mlx5_hw_encap_decap_action *handle;
11447 	uint64_t action_flags = 0;
11448 
11449 	/*
11450 	 * Allow
11451 	 * 1. raw_decap / raw_encap / end
11452 	 * 2. raw_encap / end
11453 	 * 3. raw_decap / end
11454 	 */
11455 	while (actions->type != RTE_FLOW_ACTION_TYPE_END) {
11456 		if (actions->type == RTE_FLOW_ACTION_TYPE_RAW_DECAP) {
11457 			if (action_flags) {
11458 				rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
11459 						   actions, "Invalid indirect action list sequence");
11460 				return NULL;
11461 			}
11462 			action_flags |= MLX5_FLOW_ACTION_DECAP;
11463 			decap = actions;
11464 		} else if (actions->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
11465 			if (action_flags & MLX5_FLOW_ACTION_ENCAP) {
11466 				rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
11467 						   actions, "Invalid indirect action list sequence");
11468 				return NULL;
11469 			}
11470 			action_flags |= MLX5_FLOW_ACTION_ENCAP;
11471 			encap = actions;
11472 		} else {
11473 			rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
11474 					   actions, "Invalid indirect action type in list");
11475 			return NULL;
11476 		}
11477 		actions++;
11478 	}
11479 	if (!decap && !encap) {
11480 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
11481 				   actions, "Invalid indirect action combinations");
11482 		return NULL;
11483 	}
11484 	handle = mlx5_reformat_action_create(dev, &indirect_conf, encap, decap, error);
11485 	if (!handle) {
11486 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
11487 				   actions, "Failed to create HWS decap_encap action");
11488 		return NULL;
11489 	}
11490 	handle->indirect.type = MLX5_INDIRECT_ACTION_LIST_TYPE_REFORMAT;
11491 	LIST_INSERT_HEAD(&priv->indirect_list_head, &handle->indirect, entry);
11492 	return (struct rte_flow_action_list_handle *)handle;
11493 }
11494 
11495 static struct rte_flow_action_list_handle *
11496 flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue,
11497 					const struct rte_flow_op_attr *attr,
11498 					const struct rte_flow_indir_action_conf *conf,
11499 					const struct rte_flow_action *actions,
11500 					void *user_data,
11501 					struct rte_flow_error *error)
11502 {
11503 	struct mlx5_hw_q_job *job = NULL;
11504 	bool push = flow_hw_action_push(attr);
11505 	enum mlx5_indirect_list_type list_type;
11506 	struct rte_flow_action_list_handle *handle;
11507 	struct mlx5_priv *priv = dev->data->dev_private;
11508 	const struct mlx5_flow_template_table_cfg table_cfg = {
11509 		.external = true,
11510 		.attr = {
11511 			.flow_attr = {
11512 				.ingress = conf->ingress,
11513 				.egress = conf->egress,
11514 				.transfer = conf->transfer
11515 			}
11516 		}
11517 	};
11518 
11519 	if (!actions) {
11520 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
11521 				   NULL, "No action list");
11522 		return NULL;
11523 	}
11524 	list_type = flow_hw_inlist_type_get(actions);
11525 	if (list_type == MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY) {
11526 		/*
11527 		 * Legacy indirect actions already have
11528 		 * async resources management. No need to do it twice.
11529 		 */
11530 		handle = mlx5_create_legacy_indlst(dev, queue, attr, conf,
11531 						   actions, user_data, error);
11532 		goto end;
11533 	}
11534 	if (attr) {
11535 		job = flow_hw_action_job_init(priv, queue, NULL, user_data,
11536 					      NULL, MLX5_HW_Q_JOB_TYPE_CREATE,
11537 					      error);
11538 		if (!job)
11539 			return NULL;
11540 	}
11541 	switch (list_type) {
11542 	case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
11543 		handle = mlx5_hw_mirror_handle_create(dev, &table_cfg,
11544 						      actions, error);
11545 		break;
11546 	case MLX5_INDIRECT_ACTION_LIST_TYPE_REFORMAT:
11547 		handle = mlx5_hw_decap_encap_handle_create(dev, &table_cfg,
11548 							   actions, error);
11549 		break;
11550 	default:
11551 		handle = NULL;
11552 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
11553 				   actions, "Invalid list");
11554 	}
11555 	if (job) {
11556 		job->action = handle;
11557 		job->indirect_type = MLX5_HW_INDIRECT_TYPE_LIST;
11558 		flow_hw_action_finalize(dev, queue, job, push, false,
11559 					handle != NULL);
11560 	}
11561 end:
11562 	return handle;
11563 }
11564 
11565 static struct rte_flow_action_list_handle *
11566 flow_hw_action_list_handle_create(struct rte_eth_dev *dev,
11567 				  const struct rte_flow_indir_action_conf *conf,
11568 				  const struct rte_flow_action *actions,
11569 				  struct rte_flow_error *error)
11570 {
11571 	return flow_hw_async_action_list_handle_create(dev, MLX5_HW_INV_QUEUE,
11572 						       NULL, conf, actions,
11573 						       NULL, error);
11574 }
11575 
11576 static int
11577 flow_hw_async_action_list_handle_destroy
11578 			(struct rte_eth_dev *dev, uint32_t queue,
11579 			 const struct rte_flow_op_attr *attr,
11580 			 struct rte_flow_action_list_handle *handle,
11581 			 void *user_data, struct rte_flow_error *error)
11582 {
11583 	int ret = 0;
11584 	struct mlx5_hw_q_job *job = NULL;
11585 	bool push = flow_hw_action_push(attr);
11586 	struct mlx5_priv *priv = dev->data->dev_private;
11587 	enum mlx5_indirect_list_type type =
11588 		mlx5_get_indirect_list_type((void *)handle);
11589 
11590 	if (type == MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY) {
11591 		struct mlx5_indlst_legacy *legacy = (typeof(legacy))handle;
11592 
11593 		ret = flow_hw_action_handle_destroy(dev, queue, attr,
11594 						    legacy->handle,
11595 						    user_data, error);
11596 		mlx5_indirect_list_remove_entry(&legacy->indirect);
11597 		goto end;
11598 	}
11599 	if (attr) {
11600 		job = flow_hw_action_job_init(priv, queue, NULL, user_data,
11601 					      NULL, MLX5_HW_Q_JOB_TYPE_DESTROY,
11602 					      error);
11603 		if (!job)
11604 			return rte_errno;
11605 	}
11606 	switch (type) {
11607 	case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
11608 		mlx5_hw_mirror_destroy(dev, (struct mlx5_mirror *)handle);
11609 		break;
11610 	case MLX5_INDIRECT_ACTION_LIST_TYPE_REFORMAT:
11611 		LIST_REMOVE(&((struct mlx5_hw_encap_decap_action *)handle)->indirect,
11612 			    entry);
11613 		mlx5_reformat_action_destroy(dev, handle, error);
11614 		break;
11615 	default:
11616 		ret = rte_flow_error_set(error, EINVAL,
11617 					  RTE_FLOW_ERROR_TYPE_ACTION,
11618 					  NULL, "Invalid indirect list handle");
11619 	}
11620 	if (job) {
11621 		flow_hw_action_finalize(dev, queue, job, push, false, true);
11622 	}
11623 end:
11624 	return ret;
11625 }
11626 
11627 static int
11628 flow_hw_action_list_handle_destroy(struct rte_eth_dev *dev,
11629 				   struct rte_flow_action_list_handle *handle,
11630 				   struct rte_flow_error *error)
11631 {
11632 	return flow_hw_async_action_list_handle_destroy(dev, MLX5_HW_INV_QUEUE,
11633 							NULL, handle, NULL,
11634 							error);
11635 }
11636 
11637 static int
11638 flow_hw_async_action_list_handle_query_update
11639 		(struct rte_eth_dev *dev, uint32_t queue_id,
11640 		 const struct rte_flow_op_attr *attr,
11641 		 const struct rte_flow_action_list_handle *handle,
11642 		 const void **update, void **query,
11643 		 enum rte_flow_query_update_mode mode,
11644 		 void *user_data, struct rte_flow_error *error)
11645 {
11646 	enum mlx5_indirect_list_type type =
11647 		mlx5_get_indirect_list_type((const void *)handle);
11648 
11649 	if (type == MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY) {
11650 		struct mlx5_indlst_legacy *legacy = (void *)(uintptr_t)handle;
11651 
11652 		if (update && query)
11653 			return flow_hw_async_action_handle_query_update
11654 				(dev, queue_id, attr, legacy->handle,
11655 				 update, query, mode, user_data, error);
11656 		else if (update && update[0])
11657 			return flow_hw_action_handle_update(dev, queue_id, attr,
11658 							    legacy->handle, update[0],
11659 							    user_data, error);
11660 		else if (query && query[0])
11661 			return flow_hw_action_handle_query(dev, queue_id, attr,
11662 							   legacy->handle, query[0],
11663 							   user_data, error);
11664 		else
11665 			return rte_flow_error_set(error, EINVAL,
11666 						  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11667 						  NULL, "invalid legacy handle query_update parameters");
11668 	}
11669 	return -ENOTSUP;
11670 }
11671 
11672 static int
11673 flow_hw_action_list_handle_query_update(struct rte_eth_dev *dev,
11674 					const struct rte_flow_action_list_handle *handle,
11675 					const void **update, void **query,
11676 					enum rte_flow_query_update_mode mode,
11677 					struct rte_flow_error *error)
11678 {
11679 	return flow_hw_async_action_list_handle_query_update
11680 					(dev, MLX5_HW_INV_QUEUE, NULL, handle,
11681 					 update, query, mode, NULL, error);
11682 }
11683 
11684 static int
11685 flow_hw_calc_table_hash(struct rte_eth_dev *dev,
11686 			 const struct rte_flow_template_table *table,
11687 			 const struct rte_flow_item pattern[],
11688 			 uint8_t pattern_template_index,
11689 			 uint32_t *hash, struct rte_flow_error *error)
11690 {
11691 	const struct rte_flow_item *items;
11692 	/* Temp job to allow adding missing items */
11693 	static struct rte_flow_item tmp_items[MLX5_HW_MAX_ITEMS];
11694 	static struct mlx5_hw_q_job job = {.items = tmp_items};
11695 	int res;
11696 
11697 	items = flow_hw_get_rule_items(dev, table, pattern,
11698 				       pattern_template_index,
11699 				       &job);
11700 	res = mlx5dr_rule_hash_calculate(table->matcher, items,
11701 					 pattern_template_index,
11702 					 MLX5DR_RULE_HASH_CALC_MODE_RAW,
11703 					 hash);
11704 	if (res)
11705 		return rte_flow_error_set(error, res,
11706 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11707 					  NULL,
11708 					  "hash could not be calculated");
11709 	return 0;
11710 }
11711 
11712 static int
11713 flow_hw_calc_encap_hash(struct rte_eth_dev *dev,
11714 			const struct rte_flow_item pattern[],
11715 			enum rte_flow_encap_hash_field dest_field,
11716 			uint8_t *hash,
11717 			struct rte_flow_error *error)
11718 {
11719 	struct mlx5_priv *priv = dev->data->dev_private;
11720 	struct mlx5dr_crc_encap_entropy_hash_fields data;
11721 	enum mlx5dr_crc_encap_entropy_hash_size res_size =
11722 			dest_field == RTE_FLOW_ENCAP_HASH_FIELD_SRC_PORT ?
11723 				MLX5DR_CRC_ENCAP_ENTROPY_HASH_SIZE_16 :
11724 				MLX5DR_CRC_ENCAP_ENTROPY_HASH_SIZE_8;
11725 	int res;
11726 
11727 	memset(&data, 0, sizeof(struct mlx5dr_crc_encap_entropy_hash_fields));
11728 
11729 	for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
11730 		switch (pattern->type) {
11731 		case RTE_FLOW_ITEM_TYPE_IPV4:
11732 			data.dst.ipv4_addr =
11733 				((const struct rte_flow_item_ipv4 *)(pattern->spec))->hdr.dst_addr;
11734 			data.src.ipv4_addr =
11735 				((const struct rte_flow_item_ipv4 *)(pattern->spec))->hdr.src_addr;
11736 			break;
11737 		case RTE_FLOW_ITEM_TYPE_IPV6:
11738 			memcpy(data.dst.ipv6_addr,
11739 			       ((const struct rte_flow_item_ipv6 *)(pattern->spec))->hdr.dst_addr,
11740 			       sizeof(data.dst.ipv6_addr));
11741 			memcpy(data.src.ipv6_addr,
11742 			       ((const struct rte_flow_item_ipv6 *)(pattern->spec))->hdr.src_addr,
11743 			       sizeof(data.src.ipv6_addr));
11744 			break;
11745 		case RTE_FLOW_ITEM_TYPE_UDP:
11746 			data.next_protocol = IPPROTO_UDP;
11747 			data.dst_port =
11748 				((const struct rte_flow_item_udp *)(pattern->spec))->hdr.dst_port;
11749 			data.src_port =
11750 				((const struct rte_flow_item_udp *)(pattern->spec))->hdr.src_port;
11751 			break;
11752 		case RTE_FLOW_ITEM_TYPE_TCP:
11753 			data.next_protocol = IPPROTO_TCP;
11754 			data.dst_port =
11755 				((const struct rte_flow_item_tcp *)(pattern->spec))->hdr.dst_port;
11756 			data.src_port =
11757 				((const struct rte_flow_item_tcp *)(pattern->spec))->hdr.src_port;
11758 			break;
11759 		case RTE_FLOW_ITEM_TYPE_ICMP:
11760 			data.next_protocol = IPPROTO_ICMP;
11761 			break;
11762 		case RTE_FLOW_ITEM_TYPE_ICMP6:
11763 			data.next_protocol = IPPROTO_ICMPV6;
11764 			break;
11765 		default:
11766 			break;
11767 		}
11768 	}
11769 	res = mlx5dr_crc_encap_entropy_hash_calc(priv->dr_ctx, &data, hash, res_size);
11770 	if (res)
11771 		return rte_flow_error_set(error, res,
11772 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11773 					  NULL, "error while calculating encap hash");
11774 	return 0;
11775 }
11776 
11777 const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {
11778 	.info_get = flow_hw_info_get,
11779 	.configure = flow_hw_configure,
11780 	.pattern_validate = flow_hw_pattern_validate,
11781 	.pattern_template_create = flow_hw_pattern_template_create,
11782 	.pattern_template_destroy = flow_hw_pattern_template_destroy,
11783 	.actions_validate = flow_hw_actions_validate,
11784 	.actions_template_create = flow_hw_actions_template_create,
11785 	.actions_template_destroy = flow_hw_actions_template_destroy,
11786 	.template_table_create = flow_hw_template_table_create,
11787 	.template_table_destroy = flow_hw_table_destroy,
11788 	.group_set_miss_actions = flow_hw_group_set_miss_actions,
11789 	.async_flow_create = flow_hw_async_flow_create,
11790 	.async_flow_create_by_index = flow_hw_async_flow_create_by_index,
11791 	.async_flow_update = flow_hw_async_flow_update,
11792 	.async_flow_destroy = flow_hw_async_flow_destroy,
11793 	.pull = flow_hw_pull,
11794 	.push = flow_hw_push,
11795 	.async_action_create = flow_hw_action_handle_create,
11796 	.async_action_destroy = flow_hw_action_handle_destroy,
11797 	.async_action_update = flow_hw_action_handle_update,
11798 	.async_action_query_update = flow_hw_async_action_handle_query_update,
11799 	.async_action_query = flow_hw_action_handle_query,
11800 	.action_validate = flow_hw_action_validate,
11801 	.action_create = flow_hw_action_create,
11802 	.action_destroy = flow_hw_action_destroy,
11803 	.action_update = flow_hw_action_update,
11804 	.action_query = flow_hw_action_query,
11805 	.action_query_update = flow_hw_action_query_update,
11806 	.action_list_handle_create = flow_hw_action_list_handle_create,
11807 	.action_list_handle_destroy = flow_hw_action_list_handle_destroy,
11808 	.action_list_handle_query_update =
11809 		flow_hw_action_list_handle_query_update,
11810 	.async_action_list_handle_create =
11811 		flow_hw_async_action_list_handle_create,
11812 	.async_action_list_handle_destroy =
11813 		flow_hw_async_action_list_handle_destroy,
11814 	.async_action_list_handle_query_update =
11815 		flow_hw_async_action_list_handle_query_update,
11816 	.query = flow_hw_query,
11817 	.get_aged_flows = flow_hw_get_aged_flows,
11818 	.get_q_aged_flows = flow_hw_get_q_aged_flows,
11819 	.item_create = flow_dv_item_create,
11820 	.item_release = flow_dv_item_release,
11821 	.flow_calc_table_hash = flow_hw_calc_table_hash,
11822 	.flow_calc_encap_hash = flow_hw_calc_encap_hash,
11823 };
11824 
11825 /**
11826  * Creates a control flow using flow template API on @p proxy_dev device,
11827  * on behalf of @p owner_dev device.
11828  *
11829  * This function uses locks internally to synchronize access to the
11830  * flow queue.
11831  *
11832  * Created flow is stored in private list associated with @p proxy_dev device.
11833  *
11834  * @param owner_dev
11835  *   Pointer to Ethernet device on behalf of which flow is created.
11836  * @param proxy_dev
11837  *   Pointer to Ethernet device on which flow is created.
11838  * @param table
11839  *   Pointer to flow table.
11840  * @param items
11841  *   Pointer to flow rule items.
11842  * @param item_template_idx
11843  *   Index of an item template associated with @p table.
11844  * @param actions
11845  *   Pointer to flow rule actions.
11846  * @param action_template_idx
11847  *   Index of an action template associated with @p table.
11848  * @param info
11849  *   Additional info about control flow rule.
11850  * @param external
11851  *   External ctrl flow.
11852  *
11853  * @return
11854  *   0 on success, negative errno value otherwise and rte_errno set.
11855  */
11856 static __rte_unused int
11857 flow_hw_create_ctrl_flow(struct rte_eth_dev *owner_dev,
11858 			 struct rte_eth_dev *proxy_dev,
11859 			 struct rte_flow_template_table *table,
11860 			 struct rte_flow_item items[],
11861 			 uint8_t item_template_idx,
11862 			 struct rte_flow_action actions[],
11863 			 uint8_t action_template_idx,
11864 			 struct mlx5_hw_ctrl_flow_info *info,
11865 			 bool external)
11866 {
11867 	struct mlx5_priv *priv = proxy_dev->data->dev_private;
11868 	uint32_t queue = CTRL_QUEUE_ID(priv);
11869 	struct rte_flow_op_attr op_attr = {
11870 		.postpone = 0,
11871 	};
11872 	struct rte_flow *flow = NULL;
11873 	struct mlx5_hw_ctrl_flow *entry = NULL;
11874 	int ret;
11875 
11876 	rte_spinlock_lock(&priv->hw_ctrl_lock);
11877 	entry = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_SYS, sizeof(*entry),
11878 			    0, SOCKET_ID_ANY);
11879 	if (!entry) {
11880 		DRV_LOG(ERR, "port %u not enough memory to create control flows",
11881 			proxy_dev->data->port_id);
11882 		rte_errno = ENOMEM;
11883 		ret = -rte_errno;
11884 		goto error;
11885 	}
11886 	flow = flow_hw_async_flow_create(proxy_dev, queue, &op_attr, table,
11887 					 items, item_template_idx,
11888 					 actions, action_template_idx,
11889 					 NULL, NULL);
11890 	if (!flow) {
11891 		DRV_LOG(ERR, "port %u failed to enqueue create control"
11892 			" flow operation", proxy_dev->data->port_id);
11893 		ret = -rte_errno;
11894 		goto error;
11895 	}
11896 	ret = __flow_hw_pull_comp(proxy_dev, queue, NULL);
11897 	if (ret) {
11898 		DRV_LOG(ERR, "port %u failed to insert control flow",
11899 			proxy_dev->data->port_id);
11900 		rte_errno = EINVAL;
11901 		ret = -rte_errno;
11902 		goto error;
11903 	}
11904 	entry->owner_dev = owner_dev;
11905 	entry->flow = flow;
11906 	if (info)
11907 		entry->info = *info;
11908 	else
11909 		entry->info.type = MLX5_HW_CTRL_FLOW_TYPE_GENERAL;
11910 	if (external)
11911 		LIST_INSERT_HEAD(&priv->hw_ext_ctrl_flows, entry, next);
11912 	else
11913 		LIST_INSERT_HEAD(&priv->hw_ctrl_flows, entry, next);
11914 	rte_spinlock_unlock(&priv->hw_ctrl_lock);
11915 	return 0;
11916 error:
11917 	if (entry)
11918 		mlx5_free(entry);
11919 	rte_spinlock_unlock(&priv->hw_ctrl_lock);
11920 	return ret;
11921 }
11922 
11923 /**
11924  * Destroys a control flow @p flow using flow template API on @p dev device.
11925  *
11926  * This function uses locks internally to synchronize access to the
11927  * flow queue.
11928  *
11929  * If the @p flow is stored on any private list/pool, then caller must free up
11930  * the relevant resources.
11931  *
11932  * @param dev
11933  *   Pointer to Ethernet device.
11934  * @param flow
11935  *   Pointer to flow rule.
11936  *
11937  * @return
11938  *   0 on success, non-zero value otherwise.
11939  */
11940 static int
11941 flow_hw_destroy_ctrl_flow(struct rte_eth_dev *dev, struct rte_flow *flow)
11942 {
11943 	struct mlx5_priv *priv = dev->data->dev_private;
11944 	uint32_t queue = CTRL_QUEUE_ID(priv);
11945 	struct rte_flow_op_attr op_attr = {
11946 		.postpone = 0,
11947 	};
11948 	int ret;
11949 
11950 	rte_spinlock_lock(&priv->hw_ctrl_lock);
11951 	ret = flow_hw_async_flow_destroy(dev, queue, &op_attr, flow, NULL, NULL);
11952 	if (ret) {
11953 		DRV_LOG(ERR, "port %u failed to enqueue destroy control"
11954 			" flow operation", dev->data->port_id);
11955 		goto exit;
11956 	}
11957 	ret = __flow_hw_pull_comp(dev, queue, NULL);
11958 	if (ret) {
11959 		DRV_LOG(ERR, "port %u failed to destroy control flow",
11960 			dev->data->port_id);
11961 		rte_errno = EINVAL;
11962 		ret = -rte_errno;
11963 		goto exit;
11964 	}
11965 exit:
11966 	rte_spinlock_unlock(&priv->hw_ctrl_lock);
11967 	return ret;
11968 }
11969 
11970 /**
11971  * Destroys control flows created on behalf of @p owner device on @p dev device.
11972  *
11973  * @param dev
11974  *   Pointer to Ethernet device on which control flows were created.
11975  * @param owner
11976  *   Pointer to Ethernet device owning control flows.
11977  *
11978  * @return
11979  *   0 on success, otherwise negative error code is returned and
11980  *   rte_errno is set.
11981  */
11982 static int
11983 flow_hw_flush_ctrl_flows_owned_by(struct rte_eth_dev *dev, struct rte_eth_dev *owner)
11984 {
11985 	struct mlx5_priv *priv = dev->data->dev_private;
11986 	struct mlx5_hw_ctrl_flow *cf;
11987 	struct mlx5_hw_ctrl_flow *cf_next;
11988 	int ret;
11989 
11990 	cf = LIST_FIRST(&priv->hw_ctrl_flows);
11991 	while (cf != NULL) {
11992 		cf_next = LIST_NEXT(cf, next);
11993 		if (cf->owner_dev == owner) {
11994 			ret = flow_hw_destroy_ctrl_flow(dev, cf->flow);
11995 			if (ret) {
11996 				rte_errno = ret;
11997 				return -ret;
11998 			}
11999 			LIST_REMOVE(cf, next);
12000 			mlx5_free(cf);
12001 		}
12002 		cf = cf_next;
12003 	}
12004 	return 0;
12005 }
12006 
12007 /**
12008  * Destroys control flows created for @p owner_dev device.
12009  *
12010  * @param owner_dev
12011  *   Pointer to Ethernet device owning control flows.
12012  *
12013  * @return
12014  *   0 on success, otherwise negative error code is returned and
12015  *   rte_errno is set.
12016  */
12017 int
12018 mlx5_flow_hw_flush_ctrl_flows(struct rte_eth_dev *owner_dev)
12019 {
12020 	struct mlx5_priv *owner_priv = owner_dev->data->dev_private;
12021 	struct rte_eth_dev *proxy_dev;
12022 	uint16_t owner_port_id = owner_dev->data->port_id;
12023 	uint16_t proxy_port_id = owner_dev->data->port_id;
12024 	int ret;
12025 
12026 	/* Flush all flows created by this port for itself. */
12027 	ret = flow_hw_flush_ctrl_flows_owned_by(owner_dev, owner_dev);
12028 	if (ret)
12029 		return ret;
12030 	/* Flush all flows created for this port on proxy port. */
12031 	if (owner_priv->sh->config.dv_esw_en) {
12032 		ret = rte_flow_pick_transfer_proxy(owner_port_id, &proxy_port_id, NULL);
12033 		if (ret == -ENODEV) {
12034 			DRV_LOG(DEBUG, "Unable to find transfer proxy port for port %u. It was "
12035 				       "probably closed. Control flows were cleared.",
12036 				       owner_port_id);
12037 			rte_errno = 0;
12038 			return 0;
12039 		} else if (ret) {
12040 			DRV_LOG(ERR, "Unable to find proxy port for port %u (ret = %d)",
12041 				owner_port_id, ret);
12042 			return ret;
12043 		}
12044 		proxy_dev = &rte_eth_devices[proxy_port_id];
12045 	} else {
12046 		proxy_dev = owner_dev;
12047 	}
12048 	return flow_hw_flush_ctrl_flows_owned_by(proxy_dev, owner_dev);
12049 }
12050 
12051 /**
12052  * Destroys all control flows created on @p dev device.
12053  *
12054  * @param owner_dev
12055  *   Pointer to Ethernet device.
12056  *
12057  * @return
12058  *   0 on success, otherwise negative error code is returned and
12059  *   rte_errno is set.
12060  */
12061 static int
12062 flow_hw_flush_all_ctrl_flows(struct rte_eth_dev *dev)
12063 {
12064 	struct mlx5_priv *priv = dev->data->dev_private;
12065 	struct mlx5_hw_ctrl_flow *cf;
12066 	struct mlx5_hw_ctrl_flow *cf_next;
12067 	int ret;
12068 
12069 	cf = LIST_FIRST(&priv->hw_ctrl_flows);
12070 	while (cf != NULL) {
12071 		cf_next = LIST_NEXT(cf, next);
12072 		ret = flow_hw_destroy_ctrl_flow(dev, cf->flow);
12073 		if (ret) {
12074 			rte_errno = ret;
12075 			return -ret;
12076 		}
12077 		LIST_REMOVE(cf, next);
12078 		mlx5_free(cf);
12079 		cf = cf_next;
12080 	}
12081 	cf = LIST_FIRST(&priv->hw_ext_ctrl_flows);
12082 	while (cf != NULL) {
12083 		cf_next = LIST_NEXT(cf, next);
12084 		ret = flow_hw_destroy_ctrl_flow(dev, cf->flow);
12085 		if (ret) {
12086 			rte_errno = ret;
12087 			return -ret;
12088 		}
12089 		LIST_REMOVE(cf, next);
12090 		mlx5_free(cf);
12091 		cf = cf_next;
12092 	}
12093 	return 0;
12094 }
12095 
12096 int
12097 mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn, bool external)
12098 {
12099 	uint16_t port_id = dev->data->port_id;
12100 	struct rte_flow_item_ethdev esw_mgr_spec = {
12101 		.port_id = MLX5_REPRESENTED_PORT_ESW_MGR,
12102 	};
12103 	struct rte_flow_item_ethdev esw_mgr_mask = {
12104 		.port_id = MLX5_REPRESENTED_PORT_ESW_MGR,
12105 	};
12106 	struct rte_flow_item_tag reg_c0_spec = {
12107 		.index = (uint8_t)REG_C_0,
12108 		.data = flow_hw_esw_mgr_regc_marker(dev),
12109 	};
12110 	struct rte_flow_item_tag reg_c0_mask = {
12111 		.index = 0xff,
12112 		.data = flow_hw_esw_mgr_regc_marker_mask(dev),
12113 	};
12114 	struct mlx5_rte_flow_item_sq sq_spec = {
12115 		.queue = sqn,
12116 	};
12117 	struct rte_flow_action_ethdev port = {
12118 		.port_id = port_id,
12119 	};
12120 	struct rte_flow_item items[3] = { { 0 } };
12121 	struct rte_flow_action actions[3] = { { 0 } };
12122 	struct mlx5_hw_ctrl_flow_info flow_info = {
12123 		.type = MLX5_HW_CTRL_FLOW_TYPE_SQ_MISS_ROOT,
12124 		.esw_mgr_sq = sqn,
12125 	};
12126 	struct rte_eth_dev *proxy_dev;
12127 	struct mlx5_priv *proxy_priv;
12128 	uint16_t proxy_port_id = dev->data->port_id;
12129 	int ret;
12130 
12131 	ret = rte_flow_pick_transfer_proxy(port_id, &proxy_port_id, NULL);
12132 	if (ret) {
12133 		DRV_LOG(ERR, "Unable to pick transfer proxy port for port %u. Transfer proxy "
12134 			     "port must be present to create default SQ miss flows.",
12135 			     port_id);
12136 		return ret;
12137 	}
12138 	proxy_dev = &rte_eth_devices[proxy_port_id];
12139 	proxy_priv = proxy_dev->data->dev_private;
12140 	if (!proxy_priv->dr_ctx) {
12141 		DRV_LOG(DEBUG, "Transfer proxy port (port %u) of port %u must be configured "
12142 			       "for HWS to create default SQ miss flows. Default flows will "
12143 			       "not be created.",
12144 			       proxy_port_id, port_id);
12145 		return 0;
12146 	}
12147 	if (!proxy_priv->hw_esw_sq_miss_root_tbl ||
12148 	    !proxy_priv->hw_esw_sq_miss_tbl) {
12149 		DRV_LOG(ERR, "Transfer proxy port (port %u) of port %u was configured, but "
12150 			     "default flow tables were not created.",
12151 			     proxy_port_id, port_id);
12152 		rte_errno = ENOMEM;
12153 		return -rte_errno;
12154 	}
12155 	/*
12156 	 * Create a root SQ miss flow rule - match E-Switch Manager and SQ,
12157 	 * and jump to group 1.
12158 	 */
12159 	items[0] = (struct rte_flow_item){
12160 		.type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,
12161 		.spec = &esw_mgr_spec,
12162 		.mask = &esw_mgr_mask,
12163 	};
12164 	items[1] = (struct rte_flow_item){
12165 		.type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_SQ,
12166 		.spec = &sq_spec,
12167 	};
12168 	items[2] = (struct rte_flow_item){
12169 		.type = RTE_FLOW_ITEM_TYPE_END,
12170 	};
12171 	actions[0] = (struct rte_flow_action){
12172 		.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
12173 	};
12174 	actions[1] = (struct rte_flow_action){
12175 		.type = RTE_FLOW_ACTION_TYPE_JUMP,
12176 	};
12177 	actions[2] = (struct rte_flow_action) {
12178 		.type = RTE_FLOW_ACTION_TYPE_END,
12179 	};
12180 	ret = flow_hw_create_ctrl_flow(dev, proxy_dev, proxy_priv->hw_esw_sq_miss_root_tbl,
12181 				       items, 0, actions, 0, &flow_info, external);
12182 	if (ret) {
12183 		DRV_LOG(ERR, "Port %u failed to create root SQ miss flow rule for SQ %u, ret %d",
12184 			port_id, sqn, ret);
12185 		return ret;
12186 	}
12187 	/*
12188 	 * Create a non-root SQ miss flow rule - match REG_C_0 marker and SQ,
12189 	 * and forward to port.
12190 	 */
12191 	items[0] = (struct rte_flow_item){
12192 		.type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_TAG,
12193 		.spec = &reg_c0_spec,
12194 		.mask = &reg_c0_mask,
12195 	};
12196 	items[1] = (struct rte_flow_item){
12197 		.type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_SQ,
12198 		.spec = &sq_spec,
12199 	};
12200 	items[2] = (struct rte_flow_item){
12201 		.type = RTE_FLOW_ITEM_TYPE_END,
12202 	};
12203 	actions[0] = (struct rte_flow_action){
12204 		.type = RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT,
12205 		.conf = &port,
12206 	};
12207 	actions[1] = (struct rte_flow_action){
12208 		.type = RTE_FLOW_ACTION_TYPE_END,
12209 	};
12210 	flow_info.type = MLX5_HW_CTRL_FLOW_TYPE_SQ_MISS;
12211 	ret = flow_hw_create_ctrl_flow(dev, proxy_dev, proxy_priv->hw_esw_sq_miss_tbl,
12212 				       items, 0, actions, 0, &flow_info, external);
12213 	if (ret) {
12214 		DRV_LOG(ERR, "Port %u failed to create HWS SQ miss flow rule for SQ %u, ret %d",
12215 			port_id, sqn, ret);
12216 		return ret;
12217 	}
12218 	return 0;
12219 }
12220 
12221 static bool
12222 flow_hw_is_matching_sq_miss_flow(struct mlx5_hw_ctrl_flow *cf,
12223 				 struct rte_eth_dev *dev,
12224 				 uint32_t sqn)
12225 {
12226 	if (cf->owner_dev != dev)
12227 		return false;
12228 	if (cf->info.type == MLX5_HW_CTRL_FLOW_TYPE_SQ_MISS_ROOT && cf->info.esw_mgr_sq == sqn)
12229 		return true;
12230 	if (cf->info.type == MLX5_HW_CTRL_FLOW_TYPE_SQ_MISS && cf->info.esw_mgr_sq == sqn)
12231 		return true;
12232 	return false;
12233 }
12234 
12235 int
12236 mlx5_flow_hw_esw_destroy_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn)
12237 {
12238 	uint16_t port_id = dev->data->port_id;
12239 	uint16_t proxy_port_id = dev->data->port_id;
12240 	struct rte_eth_dev *proxy_dev;
12241 	struct mlx5_priv *proxy_priv;
12242 	struct mlx5_hw_ctrl_flow *cf;
12243 	struct mlx5_hw_ctrl_flow *cf_next;
12244 	int ret;
12245 
12246 	ret = rte_flow_pick_transfer_proxy(port_id, &proxy_port_id, NULL);
12247 	if (ret) {
12248 		DRV_LOG(ERR, "Unable to pick transfer proxy port for port %u. Transfer proxy "
12249 			     "port must be present for default SQ miss flow rules to exist.",
12250 			     port_id);
12251 		return ret;
12252 	}
12253 	proxy_dev = &rte_eth_devices[proxy_port_id];
12254 	proxy_priv = proxy_dev->data->dev_private;
12255 	if (!proxy_priv->dr_ctx)
12256 		return 0;
12257 	if (!proxy_priv->hw_esw_sq_miss_root_tbl ||
12258 	    !proxy_priv->hw_esw_sq_miss_tbl)
12259 		return 0;
12260 	cf = LIST_FIRST(&proxy_priv->hw_ctrl_flows);
12261 	while (cf != NULL) {
12262 		cf_next = LIST_NEXT(cf, next);
12263 		if (flow_hw_is_matching_sq_miss_flow(cf, dev, sqn)) {
12264 			claim_zero(flow_hw_destroy_ctrl_flow(proxy_dev, cf->flow));
12265 			LIST_REMOVE(cf, next);
12266 			mlx5_free(cf);
12267 		}
12268 		cf = cf_next;
12269 	}
12270 	return 0;
12271 }
12272 
12273 int
12274 mlx5_flow_hw_esw_create_default_jump_flow(struct rte_eth_dev *dev)
12275 {
12276 	uint16_t port_id = dev->data->port_id;
12277 	struct rte_flow_item_ethdev port_spec = {
12278 		.port_id = port_id,
12279 	};
12280 	struct rte_flow_item items[] = {
12281 		{
12282 			.type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,
12283 			.spec = &port_spec,
12284 		},
12285 		{
12286 			.type = RTE_FLOW_ITEM_TYPE_END,
12287 		},
12288 	};
12289 	struct rte_flow_action_jump jump = {
12290 		.group = 1,
12291 	};
12292 	struct rte_flow_action actions[] = {
12293 		{
12294 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
12295 			.conf = &jump,
12296 		},
12297 		{
12298 			.type = RTE_FLOW_ACTION_TYPE_END,
12299 		}
12300 	};
12301 	struct mlx5_hw_ctrl_flow_info flow_info = {
12302 		.type = MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_JUMP,
12303 	};
12304 	struct rte_eth_dev *proxy_dev;
12305 	struct mlx5_priv *proxy_priv;
12306 	uint16_t proxy_port_id = dev->data->port_id;
12307 	int ret;
12308 
12309 	ret = rte_flow_pick_transfer_proxy(port_id, &proxy_port_id, NULL);
12310 	if (ret) {
12311 		DRV_LOG(ERR, "Unable to pick transfer proxy port for port %u. Transfer proxy "
12312 			     "port must be present to create default FDB jump rule.",
12313 			     port_id);
12314 		return ret;
12315 	}
12316 	proxy_dev = &rte_eth_devices[proxy_port_id];
12317 	proxy_priv = proxy_dev->data->dev_private;
12318 	if (!proxy_priv->dr_ctx) {
12319 		DRV_LOG(DEBUG, "Transfer proxy port (port %u) of port %u must be configured "
12320 			       "for HWS to create default FDB jump rule. Default rule will "
12321 			       "not be created.",
12322 			       proxy_port_id, port_id);
12323 		return 0;
12324 	}
12325 	if (!proxy_priv->hw_esw_zero_tbl) {
12326 		DRV_LOG(ERR, "Transfer proxy port (port %u) of port %u was configured, but "
12327 			     "default flow tables were not created.",
12328 			     proxy_port_id, port_id);
12329 		rte_errno = EINVAL;
12330 		return -rte_errno;
12331 	}
12332 	return flow_hw_create_ctrl_flow(dev, proxy_dev,
12333 					proxy_priv->hw_esw_zero_tbl,
12334 					items, 0, actions, 0, &flow_info, false);
12335 }
12336 
12337 int
12338 mlx5_flow_hw_create_tx_default_mreg_copy_flow(struct rte_eth_dev *dev)
12339 {
12340 	struct mlx5_priv *priv = dev->data->dev_private;
12341 	struct rte_flow_item_eth promisc = {
12342 		.hdr.dst_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
12343 		.hdr.src_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
12344 		.hdr.ether_type = 0,
12345 	};
12346 	struct rte_flow_item eth_all[] = {
12347 		[0] = {
12348 			.type = RTE_FLOW_ITEM_TYPE_ETH,
12349 			.spec = &promisc,
12350 			.mask = &promisc,
12351 		},
12352 		[1] = {
12353 			.type = RTE_FLOW_ITEM_TYPE_END,
12354 		},
12355 	};
12356 	struct rte_flow_action_modify_field mreg_action = {
12357 		.operation = RTE_FLOW_MODIFY_SET,
12358 		.dst = {
12359 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
12360 			.tag_index = REG_C_1,
12361 		},
12362 		.src = {
12363 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
12364 			.tag_index = REG_A,
12365 		},
12366 		.width = 32,
12367 	};
12368 	struct rte_flow_action copy_reg_action[] = {
12369 		[0] = {
12370 			.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
12371 			.conf = &mreg_action,
12372 		},
12373 		[1] = {
12374 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
12375 		},
12376 		[2] = {
12377 			.type = RTE_FLOW_ACTION_TYPE_END,
12378 		},
12379 	};
12380 	struct mlx5_hw_ctrl_flow_info flow_info = {
12381 		.type = MLX5_HW_CTRL_FLOW_TYPE_TX_META_COPY,
12382 	};
12383 
12384 	MLX5_ASSERT(priv->master);
12385 	if (!priv->dr_ctx || !priv->hw_tx_meta_cpy_tbl)
12386 		return 0;
12387 	return flow_hw_create_ctrl_flow(dev, dev,
12388 					priv->hw_tx_meta_cpy_tbl,
12389 					eth_all, 0, copy_reg_action, 0, &flow_info, false);
12390 }
12391 
12392 int
12393 mlx5_flow_hw_tx_repr_matching_flow(struct rte_eth_dev *dev, uint32_t sqn, bool external)
12394 {
12395 	struct mlx5_priv *priv = dev->data->dev_private;
12396 	struct mlx5_rte_flow_item_sq sq_spec = {
12397 		.queue = sqn,
12398 	};
12399 	struct rte_flow_item items[] = {
12400 		{
12401 			.type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_SQ,
12402 			.spec = &sq_spec,
12403 		},
12404 		{
12405 			.type = RTE_FLOW_ITEM_TYPE_END,
12406 		},
12407 	};
12408 	/*
12409 	 * Allocate actions array suitable for all cases - extended metadata enabled or not.
12410 	 * With extended metadata there will be an additional MODIFY_FIELD action before JUMP.
12411 	 */
12412 	struct rte_flow_action actions[] = {
12413 		{ .type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD },
12414 		{ .type = RTE_FLOW_ACTION_TYPE_JUMP },
12415 		{ .type = RTE_FLOW_ACTION_TYPE_END },
12416 		{ .type = RTE_FLOW_ACTION_TYPE_END },
12417 	};
12418 	struct mlx5_hw_ctrl_flow_info flow_info = {
12419 		.type = MLX5_HW_CTRL_FLOW_TYPE_TX_REPR_MATCH,
12420 		.tx_repr_sq = sqn,
12421 	};
12422 
12423 	/* It is assumed that caller checked for representor matching. */
12424 	MLX5_ASSERT(priv->sh->config.repr_matching);
12425 	if (!priv->dr_ctx) {
12426 		DRV_LOG(DEBUG, "Port %u must be configured for HWS, before creating "
12427 			       "default egress flow rules. Omitting creation.",
12428 			       dev->data->port_id);
12429 		return 0;
12430 	}
12431 	if (!priv->hw_tx_repr_tagging_tbl) {
12432 		DRV_LOG(ERR, "Port %u is configured for HWS, but table for default "
12433 			     "egress flow rules does not exist.",
12434 			     dev->data->port_id);
12435 		rte_errno = EINVAL;
12436 		return -rte_errno;
12437 	}
12438 	/*
12439 	 * If extended metadata mode is enabled, then an additional MODIFY_FIELD action must be
12440 	 * placed before terminating JUMP action.
12441 	 */
12442 	if (priv->sh->config.dv_xmeta_en == MLX5_XMETA_MODE_META32_HWS) {
12443 		actions[1].type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD;
12444 		actions[2].type = RTE_FLOW_ACTION_TYPE_JUMP;
12445 	}
12446 	return flow_hw_create_ctrl_flow(dev, dev, priv->hw_tx_repr_tagging_tbl,
12447 					items, 0, actions, 0, &flow_info, external);
12448 }
12449 
12450 int
12451 mlx5_flow_hw_lacp_rx_flow(struct rte_eth_dev *dev)
12452 {
12453 	struct mlx5_priv *priv = dev->data->dev_private;
12454 	struct rte_flow_item_eth lacp_item = {
12455 		.type = RTE_BE16(RTE_ETHER_TYPE_SLOW),
12456 	};
12457 	struct rte_flow_item eth_lacp[] = {
12458 		[0] = {
12459 			.type = RTE_FLOW_ITEM_TYPE_ETH,
12460 			.spec = &lacp_item,
12461 			.mask = &lacp_item,
12462 		},
12463 		[1] = {
12464 			.type = RTE_FLOW_ITEM_TYPE_END,
12465 		},
12466 	};
12467 	struct rte_flow_action miss_action[] = {
12468 		[0] = {
12469 			.type = (enum rte_flow_action_type)
12470 				MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS,
12471 		},
12472 		[1] = {
12473 			.type = RTE_FLOW_ACTION_TYPE_END,
12474 		},
12475 	};
12476 	struct mlx5_hw_ctrl_flow_info flow_info = {
12477 		.type = MLX5_HW_CTRL_FLOW_TYPE_LACP_RX,
12478 	};
12479 
12480 	MLX5_ASSERT(priv->master);
12481 	if (!priv->dr_ctx || !priv->hw_lacp_rx_tbl)
12482 		return 0;
12483 	return flow_hw_create_ctrl_flow(dev, dev, priv->hw_lacp_rx_tbl, eth_lacp, 0,
12484 					miss_action, 0, &flow_info, false);
12485 }
12486 
12487 static uint32_t
12488 __calc_pattern_flags(const enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type)
12489 {
12490 	switch (eth_pattern_type) {
12491 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL:
12492 		return MLX5_CTRL_PROMISCUOUS;
12493 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL_MCAST:
12494 		return MLX5_CTRL_ALL_MULTICAST;
12495 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST:
12496 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST_VLAN:
12497 		return MLX5_CTRL_BROADCAST;
12498 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST:
12499 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST_VLAN:
12500 		return MLX5_CTRL_IPV4_MULTICAST;
12501 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST:
12502 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST_VLAN:
12503 		return MLX5_CTRL_IPV6_MULTICAST;
12504 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC:
12505 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN:
12506 		return MLX5_CTRL_DMAC;
12507 	default:
12508 		/* Should not reach here. */
12509 		MLX5_ASSERT(false);
12510 		return 0;
12511 	}
12512 }
12513 
12514 static uint32_t
12515 __calc_vlan_flags(const enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type)
12516 {
12517 	switch (eth_pattern_type) {
12518 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST_VLAN:
12519 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST_VLAN:
12520 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST_VLAN:
12521 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN:
12522 		return MLX5_CTRL_VLAN_FILTER;
12523 	default:
12524 		return 0;
12525 	}
12526 }
12527 
12528 static bool
12529 eth_pattern_type_is_requested(const enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type,
12530 			      uint32_t flags)
12531 {
12532 	uint32_t pattern_flags = __calc_pattern_flags(eth_pattern_type);
12533 	uint32_t vlan_flags = __calc_vlan_flags(eth_pattern_type);
12534 	bool pattern_requested = !!(pattern_flags & flags);
12535 	bool consider_vlan = vlan_flags || (MLX5_CTRL_VLAN_FILTER & flags);
12536 	bool vlan_requested = !!(vlan_flags & flags);
12537 
12538 	if (consider_vlan)
12539 		return pattern_requested && vlan_requested;
12540 	else
12541 		return pattern_requested;
12542 }
12543 
12544 static bool
12545 rss_type_is_requested(struct mlx5_priv *priv,
12546 		      const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
12547 {
12548 	struct rte_flow_actions_template *at = priv->hw_ctrl_rx->rss[rss_type];
12549 	unsigned int i;
12550 
12551 	for (i = 0; at->actions[i].type != RTE_FLOW_ACTION_TYPE_END; ++i) {
12552 		if (at->actions[i].type == RTE_FLOW_ACTION_TYPE_RSS) {
12553 			const struct rte_flow_action_rss *rss = at->actions[i].conf;
12554 			uint64_t rss_types = rss->types;
12555 
12556 			if ((rss_types & priv->rss_conf.rss_hf) != rss_types)
12557 				return false;
12558 		}
12559 	}
12560 	return true;
12561 }
12562 
12563 static const struct rte_flow_item_eth *
12564 __get_eth_spec(const enum mlx5_flow_ctrl_rx_eth_pattern_type pattern)
12565 {
12566 	switch (pattern) {
12567 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL:
12568 		return &ctrl_rx_eth_promisc_spec;
12569 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL_MCAST:
12570 		return &ctrl_rx_eth_mcast_spec;
12571 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST:
12572 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST_VLAN:
12573 		return &ctrl_rx_eth_bcast_spec;
12574 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST:
12575 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST_VLAN:
12576 		return &ctrl_rx_eth_ipv4_mcast_spec;
12577 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST:
12578 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST_VLAN:
12579 		return &ctrl_rx_eth_ipv6_mcast_spec;
12580 	default:
12581 		/* This case should not be reached. */
12582 		MLX5_ASSERT(false);
12583 		return NULL;
12584 	}
12585 }
12586 
12587 static int
12588 __flow_hw_ctrl_flows_single(struct rte_eth_dev *dev,
12589 			    struct rte_flow_template_table *tbl,
12590 			    const enum mlx5_flow_ctrl_rx_eth_pattern_type pattern_type,
12591 			    const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
12592 {
12593 	const struct rte_flow_item_eth *eth_spec = __get_eth_spec(pattern_type);
12594 	struct rte_flow_item items[5];
12595 	struct rte_flow_action actions[] = {
12596 		{ .type = RTE_FLOW_ACTION_TYPE_RSS },
12597 		{ .type = RTE_FLOW_ACTION_TYPE_END },
12598 	};
12599 	struct mlx5_hw_ctrl_flow_info flow_info = {
12600 		.type = MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS,
12601 	};
12602 
12603 	if (!eth_spec)
12604 		return -EINVAL;
12605 	memset(items, 0, sizeof(items));
12606 	items[0] = (struct rte_flow_item){
12607 		.type = RTE_FLOW_ITEM_TYPE_ETH,
12608 		.spec = eth_spec,
12609 	};
12610 	items[1] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_VOID };
12611 	items[2] = flow_hw_get_ctrl_rx_l3_item(rss_type);
12612 	items[3] = flow_hw_get_ctrl_rx_l4_item(rss_type);
12613 	items[4] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_END };
12614 	/* Without VLAN filtering, only a single flow rule must be created. */
12615 	return flow_hw_create_ctrl_flow(dev, dev, tbl, items, 0, actions, 0, &flow_info, false);
12616 }
12617 
12618 static int
12619 __flow_hw_ctrl_flows_single_vlan(struct rte_eth_dev *dev,
12620 				 struct rte_flow_template_table *tbl,
12621 				 const enum mlx5_flow_ctrl_rx_eth_pattern_type pattern_type,
12622 				 const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
12623 {
12624 	struct mlx5_priv *priv = dev->data->dev_private;
12625 	const struct rte_flow_item_eth *eth_spec = __get_eth_spec(pattern_type);
12626 	struct rte_flow_item items[5];
12627 	struct rte_flow_action actions[] = {
12628 		{ .type = RTE_FLOW_ACTION_TYPE_RSS },
12629 		{ .type = RTE_FLOW_ACTION_TYPE_END },
12630 	};
12631 	struct mlx5_hw_ctrl_flow_info flow_info = {
12632 		.type = MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS,
12633 	};
12634 	unsigned int i;
12635 
12636 	if (!eth_spec)
12637 		return -EINVAL;
12638 	memset(items, 0, sizeof(items));
12639 	items[0] = (struct rte_flow_item){
12640 		.type = RTE_FLOW_ITEM_TYPE_ETH,
12641 		.spec = eth_spec,
12642 	};
12643 	/* Optional VLAN for now will be VOID - will be filled later. */
12644 	items[1] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_VLAN };
12645 	items[2] = flow_hw_get_ctrl_rx_l3_item(rss_type);
12646 	items[3] = flow_hw_get_ctrl_rx_l4_item(rss_type);
12647 	items[4] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_END };
12648 	/* Since VLAN filtering is done, create a single flow rule for each registered vid. */
12649 	for (i = 0; i < priv->vlan_filter_n; ++i) {
12650 		uint16_t vlan = priv->vlan_filter[i];
12651 		struct rte_flow_item_vlan vlan_spec = {
12652 			.hdr.vlan_tci = rte_cpu_to_be_16(vlan),
12653 		};
12654 
12655 		items[1].spec = &vlan_spec;
12656 		if (flow_hw_create_ctrl_flow(dev, dev,
12657 					     tbl, items, 0, actions, 0, &flow_info, false))
12658 			return -rte_errno;
12659 	}
12660 	return 0;
12661 }
12662 
12663 static int
12664 __flow_hw_ctrl_flows_unicast(struct rte_eth_dev *dev,
12665 			     struct rte_flow_template_table *tbl,
12666 			     const enum mlx5_flow_ctrl_rx_eth_pattern_type pattern_type,
12667 			     const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
12668 {
12669 	struct rte_flow_item_eth eth_spec;
12670 	struct rte_flow_item items[5];
12671 	struct rte_flow_action actions[] = {
12672 		{ .type = RTE_FLOW_ACTION_TYPE_RSS },
12673 		{ .type = RTE_FLOW_ACTION_TYPE_END },
12674 	};
12675 	struct mlx5_hw_ctrl_flow_info flow_info = {
12676 		.type = MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS,
12677 	};
12678 	const struct rte_ether_addr cmp = {
12679 		.addr_bytes = "\x00\x00\x00\x00\x00\x00",
12680 	};
12681 	unsigned int i;
12682 
12683 	RTE_SET_USED(pattern_type);
12684 
12685 	memset(&eth_spec, 0, sizeof(eth_spec));
12686 	memset(items, 0, sizeof(items));
12687 	items[0] = (struct rte_flow_item){
12688 		.type = RTE_FLOW_ITEM_TYPE_ETH,
12689 		.spec = &eth_spec,
12690 	};
12691 	items[1] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_VOID };
12692 	items[2] = flow_hw_get_ctrl_rx_l3_item(rss_type);
12693 	items[3] = flow_hw_get_ctrl_rx_l4_item(rss_type);
12694 	items[4] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_END };
12695 	for (i = 0; i < MLX5_MAX_MAC_ADDRESSES; ++i) {
12696 		struct rte_ether_addr *mac = &dev->data->mac_addrs[i];
12697 
12698 		if (!memcmp(mac, &cmp, sizeof(*mac)))
12699 			continue;
12700 		memcpy(&eth_spec.hdr.dst_addr.addr_bytes, mac->addr_bytes, RTE_ETHER_ADDR_LEN);
12701 		if (flow_hw_create_ctrl_flow(dev, dev,
12702 					     tbl, items, 0, actions, 0, &flow_info, false))
12703 			return -rte_errno;
12704 	}
12705 	return 0;
12706 }
12707 
12708 static int
12709 __flow_hw_ctrl_flows_unicast_vlan(struct rte_eth_dev *dev,
12710 				  struct rte_flow_template_table *tbl,
12711 				  const enum mlx5_flow_ctrl_rx_eth_pattern_type pattern_type,
12712 				  const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
12713 {
12714 	struct mlx5_priv *priv = dev->data->dev_private;
12715 	struct rte_flow_item_eth eth_spec;
12716 	struct rte_flow_item items[5];
12717 	struct rte_flow_action actions[] = {
12718 		{ .type = RTE_FLOW_ACTION_TYPE_RSS },
12719 		{ .type = RTE_FLOW_ACTION_TYPE_END },
12720 	};
12721 	struct mlx5_hw_ctrl_flow_info flow_info = {
12722 		.type = MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS,
12723 	};
12724 	const struct rte_ether_addr cmp = {
12725 		.addr_bytes = "\x00\x00\x00\x00\x00\x00",
12726 	};
12727 	unsigned int i;
12728 	unsigned int j;
12729 
12730 	RTE_SET_USED(pattern_type);
12731 
12732 	memset(&eth_spec, 0, sizeof(eth_spec));
12733 	memset(items, 0, sizeof(items));
12734 	items[0] = (struct rte_flow_item){
12735 		.type = RTE_FLOW_ITEM_TYPE_ETH,
12736 		.spec = &eth_spec,
12737 	};
12738 	items[1] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_VLAN };
12739 	items[2] = flow_hw_get_ctrl_rx_l3_item(rss_type);
12740 	items[3] = flow_hw_get_ctrl_rx_l4_item(rss_type);
12741 	items[4] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_END };
12742 	for (i = 0; i < MLX5_MAX_MAC_ADDRESSES; ++i) {
12743 		struct rte_ether_addr *mac = &dev->data->mac_addrs[i];
12744 
12745 		if (!memcmp(mac, &cmp, sizeof(*mac)))
12746 			continue;
12747 		memcpy(&eth_spec.hdr.dst_addr.addr_bytes, mac->addr_bytes, RTE_ETHER_ADDR_LEN);
12748 		for (j = 0; j < priv->vlan_filter_n; ++j) {
12749 			uint16_t vlan = priv->vlan_filter[j];
12750 			struct rte_flow_item_vlan vlan_spec = {
12751 				.hdr.vlan_tci = rte_cpu_to_be_16(vlan),
12752 			};
12753 
12754 			items[1].spec = &vlan_spec;
12755 			if (flow_hw_create_ctrl_flow(dev, dev, tbl, items, 0, actions, 0,
12756 						     &flow_info, false))
12757 				return -rte_errno;
12758 		}
12759 	}
12760 	return 0;
12761 }
12762 
12763 static int
12764 __flow_hw_ctrl_flows(struct rte_eth_dev *dev,
12765 		     struct rte_flow_template_table *tbl,
12766 		     const enum mlx5_flow_ctrl_rx_eth_pattern_type pattern_type,
12767 		     const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
12768 {
12769 	switch (pattern_type) {
12770 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL:
12771 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL_MCAST:
12772 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST:
12773 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST:
12774 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST:
12775 		return __flow_hw_ctrl_flows_single(dev, tbl, pattern_type, rss_type);
12776 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST_VLAN:
12777 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST_VLAN:
12778 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST_VLAN:
12779 		return __flow_hw_ctrl_flows_single_vlan(dev, tbl, pattern_type, rss_type);
12780 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC:
12781 		return __flow_hw_ctrl_flows_unicast(dev, tbl, pattern_type, rss_type);
12782 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN:
12783 		return __flow_hw_ctrl_flows_unicast_vlan(dev, tbl, pattern_type, rss_type);
12784 	default:
12785 		/* Should not reach here. */
12786 		MLX5_ASSERT(false);
12787 		rte_errno = EINVAL;
12788 		return -EINVAL;
12789 	}
12790 }
12791 
12792 
12793 int
12794 mlx5_flow_hw_ctrl_flows(struct rte_eth_dev *dev, uint32_t flags)
12795 {
12796 	struct mlx5_priv *priv = dev->data->dev_private;
12797 	struct mlx5_flow_hw_ctrl_rx *hw_ctrl_rx;
12798 	unsigned int i;
12799 	unsigned int j;
12800 	int ret = 0;
12801 
12802 	RTE_SET_USED(priv);
12803 	RTE_SET_USED(flags);
12804 	if (!priv->dr_ctx) {
12805 		DRV_LOG(DEBUG, "port %u Control flow rules will not be created. "
12806 			       "HWS needs to be configured beforehand.",
12807 			       dev->data->port_id);
12808 		return 0;
12809 	}
12810 	if (!priv->hw_ctrl_rx) {
12811 		DRV_LOG(ERR, "port %u Control flow rules templates were not created.",
12812 			dev->data->port_id);
12813 		rte_errno = EINVAL;
12814 		return -rte_errno;
12815 	}
12816 	hw_ctrl_rx = priv->hw_ctrl_rx;
12817 	for (i = 0; i < MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_MAX; ++i) {
12818 		const enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type = i;
12819 
12820 		if (!eth_pattern_type_is_requested(eth_pattern_type, flags))
12821 			continue;
12822 		for (j = 0; j < MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX; ++j) {
12823 			const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type = j;
12824 			struct rte_flow_actions_template *at;
12825 			struct mlx5_flow_hw_ctrl_rx_table *tmpls = &hw_ctrl_rx->tables[i][j];
12826 			const struct mlx5_flow_template_table_cfg cfg = {
12827 				.attr = tmpls->attr,
12828 				.external = 0,
12829 			};
12830 
12831 			if (!hw_ctrl_rx->rss[rss_type]) {
12832 				at = flow_hw_create_ctrl_rx_rss_template(dev, rss_type);
12833 				if (!at)
12834 					return -rte_errno;
12835 				hw_ctrl_rx->rss[rss_type] = at;
12836 			} else {
12837 				at = hw_ctrl_rx->rss[rss_type];
12838 			}
12839 			if (!rss_type_is_requested(priv, rss_type))
12840 				continue;
12841 			if (!tmpls->tbl) {
12842 				tmpls->tbl = flow_hw_table_create(dev, &cfg,
12843 								  &tmpls->pt, 1, &at, 1, NULL);
12844 				if (!tmpls->tbl) {
12845 					DRV_LOG(ERR, "port %u Failed to create template table "
12846 						     "for control flow rules. Unable to create "
12847 						     "control flow rules.",
12848 						     dev->data->port_id);
12849 					return -rte_errno;
12850 				}
12851 			}
12852 
12853 			ret = __flow_hw_ctrl_flows(dev, tmpls->tbl, eth_pattern_type, rss_type);
12854 			if (ret) {
12855 				DRV_LOG(ERR, "port %u Failed to create control flow rule.",
12856 					dev->data->port_id);
12857 				return ret;
12858 			}
12859 		}
12860 	}
12861 	return 0;
12862 }
12863 
12864 void
12865 mlx5_flow_meter_uninit(struct rte_eth_dev *dev)
12866 {
12867 	struct mlx5_priv *priv = dev->data->dev_private;
12868 
12869 	if (priv->mtr_policy_arr) {
12870 		mlx5_free(priv->mtr_policy_arr);
12871 		priv->mtr_policy_arr = NULL;
12872 	}
12873 	if (priv->mtr_profile_arr) {
12874 		mlx5_free(priv->mtr_profile_arr);
12875 		priv->mtr_profile_arr = NULL;
12876 	}
12877 	if (priv->hws_mpool) {
12878 		mlx5_aso_mtr_queue_uninit(priv->sh, priv->hws_mpool, NULL);
12879 		mlx5_ipool_destroy(priv->hws_mpool->idx_pool);
12880 		mlx5_free(priv->hws_mpool);
12881 		priv->hws_mpool = NULL;
12882 	}
12883 	if (priv->mtr_bulk.aso) {
12884 		mlx5_free(priv->mtr_bulk.aso);
12885 		priv->mtr_bulk.aso = NULL;
12886 		priv->mtr_bulk.size = 0;
12887 		mlx5_aso_queue_uninit(priv->sh, ASO_OPC_MOD_POLICER);
12888 	}
12889 	if (priv->mtr_bulk.action) {
12890 		mlx5dr_action_destroy(priv->mtr_bulk.action);
12891 		priv->mtr_bulk.action = NULL;
12892 	}
12893 	if (priv->mtr_bulk.devx_obj) {
12894 		claim_zero(mlx5_devx_cmd_destroy(priv->mtr_bulk.devx_obj));
12895 		priv->mtr_bulk.devx_obj = NULL;
12896 	}
12897 }
12898 
12899 int
12900 mlx5_flow_meter_init(struct rte_eth_dev *dev,
12901 		     uint32_t nb_meters,
12902 		     uint32_t nb_meter_profiles,
12903 		     uint32_t nb_meter_policies,
12904 		     uint32_t nb_queues)
12905 {
12906 	struct mlx5_priv *priv = dev->data->dev_private;
12907 	struct mlx5_devx_obj *dcs = NULL;
12908 	uint32_t log_obj_size;
12909 	int ret = 0;
12910 	int reg_id;
12911 	struct mlx5_aso_mtr *aso;
12912 	uint32_t i;
12913 	struct rte_flow_error error;
12914 	uint32_t flags;
12915 	uint32_t nb_mtrs = rte_align32pow2(nb_meters);
12916 	struct mlx5_indexed_pool_config cfg = {
12917 		.size = sizeof(struct mlx5_aso_mtr),
12918 		.trunk_size = 1 << 12,
12919 		.per_core_cache = 1 << 13,
12920 		.need_lock = 1,
12921 		.release_mem_en = !!priv->sh->config.reclaim_mode,
12922 		.malloc = mlx5_malloc,
12923 		.max_idx = nb_meters,
12924 		.free = mlx5_free,
12925 		.type = "mlx5_hw_mtr_mark_action",
12926 	};
12927 
12928 	if (!nb_meters) {
12929 		ret = ENOTSUP;
12930 		rte_flow_error_set(&error, ENOMEM,
12931 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12932 				  NULL, "Meter configuration is invalid.");
12933 		goto err;
12934 	}
12935 	if (!priv->mtr_en || !priv->sh->meter_aso_en) {
12936 		ret = ENOTSUP;
12937 		rte_flow_error_set(&error, ENOMEM,
12938 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12939 				  NULL, "Meter ASO is not supported.");
12940 		goto err;
12941 	}
12942 	priv->mtr_config.nb_meters = nb_meters;
12943 	log_obj_size = rte_log2_u32(nb_meters >> 1);
12944 	dcs = mlx5_devx_cmd_create_flow_meter_aso_obj
12945 		(priv->sh->cdev->ctx, priv->sh->cdev->pdn,
12946 			log_obj_size);
12947 	if (!dcs) {
12948 		ret = ENOMEM;
12949 		rte_flow_error_set(&error, ENOMEM,
12950 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12951 				  NULL, "Meter ASO object allocation failed.");
12952 		goto err;
12953 	}
12954 	priv->mtr_bulk.devx_obj = dcs;
12955 	reg_id = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, NULL);
12956 	if (reg_id < 0) {
12957 		ret = ENOTSUP;
12958 		rte_flow_error_set(&error, ENOMEM,
12959 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12960 				  NULL, "Meter register is not available.");
12961 		goto err;
12962 	}
12963 	flags = MLX5DR_ACTION_FLAG_HWS_RX | MLX5DR_ACTION_FLAG_HWS_TX;
12964 	if (priv->sh->config.dv_esw_en && priv->master)
12965 		flags |= MLX5DR_ACTION_FLAG_HWS_FDB;
12966 	priv->mtr_bulk.action = mlx5dr_action_create_aso_meter
12967 			(priv->dr_ctx, (struct mlx5dr_devx_obj *)dcs,
12968 				reg_id - REG_C_0, flags);
12969 	if (!priv->mtr_bulk.action) {
12970 		ret = ENOMEM;
12971 		rte_flow_error_set(&error, ENOMEM,
12972 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12973 				  NULL, "Meter action creation failed.");
12974 		goto err;
12975 	}
12976 	priv->mtr_bulk.aso = mlx5_malloc(MLX5_MEM_ZERO,
12977 					 sizeof(struct mlx5_aso_mtr) *
12978 					 nb_meters,
12979 					 RTE_CACHE_LINE_SIZE,
12980 					 SOCKET_ID_ANY);
12981 	if (!priv->mtr_bulk.aso) {
12982 		ret = ENOMEM;
12983 		rte_flow_error_set(&error, ENOMEM,
12984 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12985 				  NULL, "Meter bulk ASO allocation failed.");
12986 		goto err;
12987 	}
12988 	priv->mtr_bulk.size = nb_meters;
12989 	aso = priv->mtr_bulk.aso;
12990 	for (i = 0; i < priv->mtr_bulk.size; i++) {
12991 		aso->type = ASO_METER_DIRECT;
12992 		aso->state = ASO_METER_WAIT;
12993 		aso->offset = i;
12994 		aso++;
12995 	}
12996 	priv->hws_mpool = mlx5_malloc(MLX5_MEM_ZERO,
12997 				sizeof(struct mlx5_aso_mtr_pool),
12998 				RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
12999 	if (!priv->hws_mpool) {
13000 		ret = ENOMEM;
13001 		rte_flow_error_set(&error, ENOMEM,
13002 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13003 				  NULL, "Meter ipool allocation failed.");
13004 		goto err;
13005 	}
13006 	priv->hws_mpool->devx_obj = priv->mtr_bulk.devx_obj;
13007 	priv->hws_mpool->action = priv->mtr_bulk.action;
13008 	priv->hws_mpool->nb_sq = nb_queues;
13009 	if (mlx5_aso_mtr_queue_init(priv->sh, priv->hws_mpool,
13010 				    &priv->sh->mtrmng->pools_mng, nb_queues)) {
13011 		ret = ENOMEM;
13012 		rte_flow_error_set(&error, ENOMEM,
13013 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13014 				  NULL, "Meter ASO queue allocation failed.");
13015 		goto err;
13016 	}
13017 	/*
13018 	 * No need for local cache if Meter number is a small number.
13019 	 * Since flow insertion rate will be very limited in that case.
13020 	 * Here let's set the number to less than default trunk size 4K.
13021 	 */
13022 	if (nb_mtrs <= cfg.trunk_size) {
13023 		cfg.per_core_cache = 0;
13024 		cfg.trunk_size = nb_mtrs;
13025 	} else if (nb_mtrs <= MLX5_HW_IPOOL_SIZE_THRESHOLD) {
13026 		cfg.per_core_cache = MLX5_HW_IPOOL_CACHE_MIN;
13027 	}
13028 	priv->hws_mpool->idx_pool = mlx5_ipool_create(&cfg);
13029 	if (nb_meter_profiles) {
13030 		priv->mtr_config.nb_meter_profiles = nb_meter_profiles;
13031 		priv->mtr_profile_arr =
13032 			mlx5_malloc(MLX5_MEM_ZERO,
13033 				    sizeof(struct mlx5_flow_meter_profile) *
13034 				    nb_meter_profiles,
13035 				    RTE_CACHE_LINE_SIZE,
13036 				    SOCKET_ID_ANY);
13037 		if (!priv->mtr_profile_arr) {
13038 			ret = ENOMEM;
13039 			rte_flow_error_set(&error, ENOMEM,
13040 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13041 					   NULL, "Meter profile allocation failed.");
13042 			goto err;
13043 		}
13044 	}
13045 	if (nb_meter_policies) {
13046 		priv->mtr_config.nb_meter_policies = nb_meter_policies;
13047 		priv->mtr_policy_arr =
13048 			mlx5_malloc(MLX5_MEM_ZERO,
13049 				    sizeof(struct mlx5_flow_meter_policy) *
13050 				    nb_meter_policies,
13051 				    RTE_CACHE_LINE_SIZE,
13052 				    SOCKET_ID_ANY);
13053 		if (!priv->mtr_policy_arr) {
13054 			ret = ENOMEM;
13055 			rte_flow_error_set(&error, ENOMEM,
13056 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13057 					   NULL, "Meter policy allocation failed.");
13058 			goto err;
13059 		}
13060 	}
13061 	return 0;
13062 err:
13063 	mlx5_flow_meter_uninit(dev);
13064 	return ret;
13065 }
13066 
13067 static __rte_always_inline uint32_t
13068 mlx5_reformat_domain_to_tbl_type(const struct rte_flow_indir_action_conf *domain)
13069 {
13070 	uint32_t tbl_type;
13071 
13072 	if (domain->transfer)
13073 		tbl_type = MLX5DR_ACTION_FLAG_HWS_FDB;
13074 	else if (domain->egress)
13075 		tbl_type = MLX5DR_ACTION_FLAG_HWS_TX;
13076 	else if (domain->ingress)
13077 		tbl_type = MLX5DR_ACTION_FLAG_HWS_RX;
13078 	else
13079 		tbl_type = UINT32_MAX;
13080 	return tbl_type;
13081 }
13082 
13083 static struct mlx5_hw_encap_decap_action *
13084 __mlx5_reformat_create(struct rte_eth_dev *dev,
13085 		       const struct rte_flow_action_raw_encap *encap_conf,
13086 		       const struct rte_flow_indir_action_conf *domain,
13087 		       enum mlx5dr_action_type type)
13088 {
13089 	struct mlx5_priv *priv = dev->data->dev_private;
13090 	struct mlx5_hw_encap_decap_action *handle;
13091 	struct mlx5dr_action_reformat_header hdr;
13092 	uint32_t flags;
13093 
13094 	flags = mlx5_reformat_domain_to_tbl_type(domain);
13095 	flags |= (uint32_t)MLX5DR_ACTION_FLAG_SHARED;
13096 	if (flags == UINT32_MAX) {
13097 		DRV_LOG(ERR, "Reformat: invalid indirect action configuration");
13098 		return NULL;
13099 	}
13100 	/* Allocate new list entry. */
13101 	handle = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*handle), 0, SOCKET_ID_ANY);
13102 	if (!handle) {
13103 		DRV_LOG(ERR, "Reformat: failed to allocate reformat entry");
13104 		return NULL;
13105 	}
13106 	handle->action_type = type;
13107 	hdr.sz = encap_conf ? encap_conf->size : 0;
13108 	hdr.data = encap_conf ? encap_conf->data : NULL;
13109 	handle->action = mlx5dr_action_create_reformat(priv->dr_ctx,
13110 					type, 1, &hdr, 0, flags);
13111 	if (!handle->action) {
13112 		DRV_LOG(ERR, "Reformat: failed to create reformat action");
13113 		mlx5_free(handle);
13114 		return NULL;
13115 	}
13116 	return handle;
13117 }
13118 
13119 /**
13120  * Create mlx5 reformat action.
13121  *
13122  * @param[in] dev
13123  *   Pointer to rte_eth_dev structure.
13124  * @param[in] conf
13125  *   Pointer to the indirect action parameters.
13126  * @param[in] encap_action
13127  *   Pointer to the raw_encap action configuration.
13128  * @param[in] decap_action
13129  *   Pointer to the raw_decap action configuration.
13130  * @param[out] error
13131  *   Pointer to error structure.
13132  *
13133  * @return
13134  *   A valid shared action handle in case of success, NULL otherwise and
13135  *   rte_errno is set.
13136  */
13137 struct mlx5_hw_encap_decap_action*
13138 mlx5_reformat_action_create(struct rte_eth_dev *dev,
13139 			    const struct rte_flow_indir_action_conf *conf,
13140 			    const struct rte_flow_action *encap_action,
13141 			    const struct rte_flow_action *decap_action,
13142 			    struct rte_flow_error *error)
13143 {
13144 	struct mlx5_priv *priv = dev->data->dev_private;
13145 	struct mlx5_hw_encap_decap_action *handle;
13146 	const struct rte_flow_action_raw_encap *encap = NULL;
13147 	const struct rte_flow_action_raw_decap *decap = NULL;
13148 	enum mlx5dr_action_type type = MLX5DR_ACTION_TYP_LAST;
13149 
13150 	MLX5_ASSERT(!encap_action || encap_action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP);
13151 	MLX5_ASSERT(!decap_action || decap_action->type == RTE_FLOW_ACTION_TYPE_RAW_DECAP);
13152 	if (priv->sh->config.dv_flow_en != 2) {
13153 		rte_flow_error_set(error, ENOTSUP,
13154 				   RTE_FLOW_ERROR_TYPE_ACTION, encap_action,
13155 				   "Reformat: hardware does not support");
13156 		return NULL;
13157 	}
13158 	if (!conf || (conf->transfer + conf->egress + conf->ingress != 1)) {
13159 		rte_flow_error_set(error, EINVAL,
13160 				   RTE_FLOW_ERROR_TYPE_ACTION, encap_action,
13161 				   "Reformat: domain should be specified");
13162 		return NULL;
13163 	}
13164 	if ((encap_action && !encap_action->conf) || (decap_action && !decap_action->conf)) {
13165 		rte_flow_error_set(error, EINVAL,
13166 				   RTE_FLOW_ERROR_TYPE_ACTION, encap_action,
13167 				   "Reformat: missed action configuration");
13168 		return NULL;
13169 	}
13170 	if (encap_action && !decap_action) {
13171 		encap = (const struct rte_flow_action_raw_encap *)encap_action->conf;
13172 		if (!encap->size || encap->size > MLX5_ENCAP_MAX_LEN ||
13173 		    encap->size < MLX5_ENCAPSULATION_DECISION_SIZE) {
13174 			rte_flow_error_set(error, EINVAL,
13175 					   RTE_FLOW_ERROR_TYPE_ACTION, encap_action,
13176 					   "Reformat: Invalid encap length");
13177 			return NULL;
13178 		}
13179 		type = MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
13180 	} else if (decap_action && !encap_action) {
13181 		decap = (const struct rte_flow_action_raw_decap *)decap_action->conf;
13182 		if (!decap->size || decap->size < MLX5_ENCAPSULATION_DECISION_SIZE) {
13183 			rte_flow_error_set(error, EINVAL,
13184 					   RTE_FLOW_ERROR_TYPE_ACTION, encap_action,
13185 					   "Reformat: Invalid decap length");
13186 			return NULL;
13187 		}
13188 		type = MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2;
13189 	} else if (encap_action && decap_action) {
13190 		decap = (const struct rte_flow_action_raw_decap *)decap_action->conf;
13191 		encap = (const struct rte_flow_action_raw_encap *)encap_action->conf;
13192 		if (decap->size < MLX5_ENCAPSULATION_DECISION_SIZE &&
13193 		    encap->size >= MLX5_ENCAPSULATION_DECISION_SIZE &&
13194 		    encap->size <= MLX5_ENCAP_MAX_LEN) {
13195 			type = MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3;
13196 		} else if (decap->size >= MLX5_ENCAPSULATION_DECISION_SIZE &&
13197 			   encap->size < MLX5_ENCAPSULATION_DECISION_SIZE) {
13198 			type = MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2;
13199 		} else {
13200 			rte_flow_error_set(error, EINVAL,
13201 					   RTE_FLOW_ERROR_TYPE_ACTION, encap_action,
13202 					   "Reformat: Invalid decap & encap length");
13203 			return NULL;
13204 		}
13205 	} else if (!encap_action && !decap_action) {
13206 		rte_flow_error_set(error, EINVAL,
13207 				   RTE_FLOW_ERROR_TYPE_ACTION, encap_action,
13208 				   "Reformat: Invalid decap & encap configurations");
13209 		return NULL;
13210 	}
13211 	if (!priv->dr_ctx) {
13212 		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
13213 				   encap_action, "Reformat: HWS not supported");
13214 		return NULL;
13215 	}
13216 	handle = __mlx5_reformat_create(dev, encap, conf, type);
13217 	if (!handle) {
13218 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, encap_action,
13219 				   "Reformat: failed to create indirect action");
13220 		return NULL;
13221 	}
13222 	return handle;
13223 }
13224 
13225 /**
13226  * Destroy the indirect reformat action.
13227  * Release action related resources on the NIC and the memory.
13228  * Lock free, (mutex should be acquired by caller).
13229  *
13230  * @param[in] dev
13231  *   Pointer to the Ethernet device structure.
13232  * @param[in] handle
13233  *   The indirect action list handle to be removed.
13234  * @param[out] error
13235  *   Perform verbose error reporting if not NULL. Initialized in case of
13236  *   error only.
13237  *
13238  * @return
13239  *   0 on success, otherwise negative errno value.
13240  */
13241 int
13242 mlx5_reformat_action_destroy(struct rte_eth_dev *dev,
13243 			     struct rte_flow_action_list_handle *handle,
13244 			     struct rte_flow_error *error)
13245 {
13246 	struct mlx5_priv *priv = dev->data->dev_private;
13247 	struct mlx5_hw_encap_decap_action *action;
13248 
13249 	action = (struct mlx5_hw_encap_decap_action *)handle;
13250 	if (!priv->dr_ctx || !action)
13251 		return rte_flow_error_set(error, ENOTSUP,
13252 					  RTE_FLOW_ERROR_TYPE_ACTION, handle,
13253 					  "Reformat: invalid action handle");
13254 	mlx5dr_action_destroy(action->action);
13255 	mlx5_free(handle);
13256 	return 0;
13257 }
13258 
13259 static struct rte_flow_fp_ops mlx5_flow_hw_fp_ops = {
13260 	.async_create = flow_hw_async_flow_create,
13261 	.async_create_by_index = flow_hw_async_flow_create_by_index,
13262 	.async_actions_update = flow_hw_async_flow_update,
13263 	.async_destroy = flow_hw_async_flow_destroy,
13264 	.push = flow_hw_push,
13265 	.pull = flow_hw_pull,
13266 	.async_action_handle_create = flow_hw_action_handle_create,
13267 	.async_action_handle_destroy = flow_hw_action_handle_destroy,
13268 	.async_action_handle_update = flow_hw_action_handle_update,
13269 	.async_action_handle_query = flow_hw_action_handle_query,
13270 	.async_action_handle_query_update = flow_hw_async_action_handle_query_update,
13271 	.async_action_list_handle_create = flow_hw_async_action_list_handle_create,
13272 	.async_action_list_handle_destroy = flow_hw_async_action_list_handle_destroy,
13273 	.async_action_list_handle_query_update =
13274 		flow_hw_async_action_list_handle_query_update,
13275 };
13276 
13277 #endif
13278