xref: /dpdk/drivers/net/mlx5/mlx5_flow_hw.c (revision 3da59f30a23f2e795d2315f3d949e1b3e0ce0c3d)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2022 NVIDIA Corporation & Affiliates
3  */
4 
5 #include <rte_flow.h>
6 
7 #include <mlx5_malloc.h>
8 
9 #include "mlx5.h"
10 #include "mlx5_defs.h"
11 #include "mlx5_flow.h"
12 #include "mlx5_rx.h"
13 
14 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
15 #include "mlx5_hws_cnt.h"
16 
17 /* The maximum actions support in the flow. */
18 #define MLX5_HW_MAX_ACTS 16
19 
20 /*
21  * The default ipool threshold value indicates which per_core_cache
22  * value to set.
23  */
24 #define MLX5_HW_IPOOL_SIZE_THRESHOLD (1 << 19)
25 /* The default min local cache size. */
26 #define MLX5_HW_IPOOL_CACHE_MIN (1 << 9)
27 
28 /* Default push burst threshold. */
29 #define BURST_THR 32u
30 
31 /* Default queue to flush the flows. */
32 #define MLX5_DEFAULT_FLUSH_QUEUE 0
33 
34 /* Maximum number of rules in control flow tables. */
35 #define MLX5_HW_CTRL_FLOW_NB_RULES (4096)
36 
37 /* Lowest flow group usable by an application if group translation is done. */
38 #define MLX5_HW_LOWEST_USABLE_GROUP (1)
39 
40 /* Maximum group index usable by user applications for transfer flows. */
41 #define MLX5_HW_MAX_TRANSFER_GROUP (UINT32_MAX - 1)
42 
43 /* Maximum group index usable by user applications for egress flows. */
44 #define MLX5_HW_MAX_EGRESS_GROUP (UINT32_MAX - 1)
45 
46 /* Lowest priority for HW root table. */
47 #define MLX5_HW_LOWEST_PRIO_ROOT 15
48 
49 /* Lowest priority for HW non-root table. */
50 #define MLX5_HW_LOWEST_PRIO_NON_ROOT (UINT32_MAX)
51 
52 /* Priorities for Rx control flow rules. */
53 #define MLX5_HW_CTRL_RX_PRIO_L2 (MLX5_HW_LOWEST_PRIO_ROOT)
54 #define MLX5_HW_CTRL_RX_PRIO_L3 (MLX5_HW_LOWEST_PRIO_ROOT - 1)
55 #define MLX5_HW_CTRL_RX_PRIO_L4 (MLX5_HW_LOWEST_PRIO_ROOT - 2)
56 
57 #define MLX5_HW_VLAN_PUSH_TYPE_IDX 0
58 #define MLX5_HW_VLAN_PUSH_VID_IDX 1
59 #define MLX5_HW_VLAN_PUSH_PCP_IDX 2
60 
61 #define MLX5_MIRROR_MAX_CLONES_NUM 3
62 #define MLX5_MIRROR_MAX_SAMPLE_ACTIONS_LEN 4
63 
64 #define MLX5_HW_PORT_IS_PROXY(priv) \
65 	(!!((priv)->sh->esw_mode && (priv)->master))
66 
67 
68 struct mlx5_indlst_legacy {
69 	struct mlx5_indirect_list indirect;
70 	struct rte_flow_action_handle *handle;
71 	enum rte_flow_action_type legacy_type;
72 };
73 
74 #define MLX5_CONST_ENCAP_ITEM(encap_type, ptr) \
75 (((const struct encap_type *)(ptr))->definition)
76 
77 struct mlx5_multi_pattern_ctx {
78 	union {
79 		struct mlx5dr_action_reformat_header reformat_hdr;
80 		struct mlx5dr_action_mh_pattern mh_pattern;
81 	};
82 	union {
83 		/* action template auxiliary structures for object destruction */
84 		struct mlx5_hw_encap_decap_action *encap;
85 		struct mlx5_hw_modify_header_action *mhdr;
86 	};
87 	/* multi pattern action */
88 	struct mlx5dr_rule_action *rule_action;
89 };
90 
91 #define MLX5_MULTIPATTERN_ENCAP_NUM 4
92 
93 struct mlx5_tbl_multi_pattern_ctx {
94 	struct {
95 		uint32_t elements_num;
96 		struct mlx5_multi_pattern_ctx ctx[MLX5_HW_TBL_MAX_ACTION_TEMPLATE];
97 	} reformat[MLX5_MULTIPATTERN_ENCAP_NUM];
98 
99 	struct {
100 		uint32_t elements_num;
101 		struct mlx5_multi_pattern_ctx ctx[MLX5_HW_TBL_MAX_ACTION_TEMPLATE];
102 	} mh;
103 };
104 
105 #define MLX5_EMPTY_MULTI_PATTERN_CTX {{{0,}},}
106 
107 static int
108 mlx5_tbl_multi_pattern_process(struct rte_eth_dev *dev,
109 			       struct rte_flow_template_table *tbl,
110 			       struct mlx5_tbl_multi_pattern_ctx *mpat,
111 			       struct rte_flow_error *error);
112 
113 static __rte_always_inline int
114 mlx5_multi_pattern_reformat_to_index(enum mlx5dr_action_type type)
115 {
116 	switch (type) {
117 	case MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2:
118 		return 0;
119 	case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2:
120 		return 1;
121 	case MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
122 		return 2;
123 	case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3:
124 		return 3;
125 	default:
126 		break;
127 	}
128 	return -1;
129 }
130 
131 static __rte_always_inline enum mlx5dr_action_type
132 mlx5_multi_pattern_reformat_index_to_type(uint32_t ix)
133 {
134 	switch (ix) {
135 	case 0:
136 		return MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2;
137 	case 1:
138 		return MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
139 	case 2:
140 		return MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2;
141 	case 3:
142 		return MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3;
143 	default:
144 		break;
145 	}
146 	return MLX5DR_ACTION_TYP_MAX;
147 }
148 
149 static inline enum mlx5dr_table_type
150 get_mlx5dr_table_type(const struct rte_flow_attr *attr)
151 {
152 	enum mlx5dr_table_type type;
153 
154 	if (attr->transfer)
155 		type = MLX5DR_TABLE_TYPE_FDB;
156 	else if (attr->egress)
157 		type = MLX5DR_TABLE_TYPE_NIC_TX;
158 	else
159 		type = MLX5DR_TABLE_TYPE_NIC_RX;
160 	return type;
161 }
162 
163 struct mlx5_mirror_clone {
164 	enum rte_flow_action_type type;
165 	void *action_ctx;
166 };
167 
168 struct mlx5_mirror {
169 	struct mlx5_indirect_list indirect;
170 	uint32_t clones_num;
171 	struct mlx5dr_action *mirror_action;
172 	struct mlx5_mirror_clone clone[MLX5_MIRROR_MAX_CLONES_NUM];
173 };
174 
175 static int flow_hw_flush_all_ctrl_flows(struct rte_eth_dev *dev);
176 static int flow_hw_translate_group(struct rte_eth_dev *dev,
177 				   const struct mlx5_flow_template_table_cfg *cfg,
178 				   uint32_t group,
179 				   uint32_t *table_group,
180 				   struct rte_flow_error *error);
181 static __rte_always_inline int
182 flow_hw_set_vlan_vid_construct(struct rte_eth_dev *dev,
183 			       struct mlx5_hw_q_job *job,
184 			       struct mlx5_action_construct_data *act_data,
185 			       const struct mlx5_hw_actions *hw_acts,
186 			       const struct rte_flow_action *action);
187 static void
188 flow_hw_construct_quota(struct mlx5_priv *priv,
189 			struct mlx5dr_rule_action *rule_act, uint32_t qid);
190 
191 static __rte_always_inline uint32_t flow_hw_tx_tag_regc_mask(struct rte_eth_dev *dev);
192 static __rte_always_inline uint32_t flow_hw_tx_tag_regc_value(struct rte_eth_dev *dev);
193 
194 const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops;
195 
196 /* DR action flags with different table. */
197 static uint32_t mlx5_hw_act_flag[MLX5_HW_ACTION_FLAG_MAX]
198 				[MLX5DR_TABLE_TYPE_MAX] = {
199 	{
200 		MLX5DR_ACTION_FLAG_ROOT_RX,
201 		MLX5DR_ACTION_FLAG_ROOT_TX,
202 		MLX5DR_ACTION_FLAG_ROOT_FDB,
203 	},
204 	{
205 		MLX5DR_ACTION_FLAG_HWS_RX,
206 		MLX5DR_ACTION_FLAG_HWS_TX,
207 		MLX5DR_ACTION_FLAG_HWS_FDB,
208 	},
209 };
210 
211 /* Ethernet item spec for promiscuous mode. */
212 static const struct rte_flow_item_eth ctrl_rx_eth_promisc_spec = {
213 	.hdr.dst_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
214 	.hdr.src_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
215 	.hdr.ether_type = 0,
216 };
217 /* Ethernet item mask for promiscuous mode. */
218 static const struct rte_flow_item_eth ctrl_rx_eth_promisc_mask = {
219 	.hdr.dst_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
220 	.hdr.src_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
221 	.hdr.ether_type = 0,
222 };
223 
224 /* Ethernet item spec for all multicast mode. */
225 static const struct rte_flow_item_eth ctrl_rx_eth_mcast_spec = {
226 	.hdr.dst_addr.addr_bytes = "\x01\x00\x00\x00\x00\x00",
227 	.hdr.src_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
228 	.hdr.ether_type = 0,
229 };
230 /* Ethernet item mask for all multicast mode. */
231 static const struct rte_flow_item_eth ctrl_rx_eth_mcast_mask = {
232 	.hdr.dst_addr.addr_bytes = "\x01\x00\x00\x00\x00\x00",
233 	.hdr.src_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
234 	.hdr.ether_type = 0,
235 };
236 
237 /* Ethernet item spec for IPv4 multicast traffic. */
238 static const struct rte_flow_item_eth ctrl_rx_eth_ipv4_mcast_spec = {
239 	.hdr.dst_addr.addr_bytes = "\x01\x00\x5e\x00\x00\x00",
240 	.hdr.src_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
241 	.hdr.ether_type = 0,
242 };
243 /* Ethernet item mask for IPv4 multicast traffic. */
244 static const struct rte_flow_item_eth ctrl_rx_eth_ipv4_mcast_mask = {
245 	.hdr.dst_addr.addr_bytes = "\xff\xff\xff\x00\x00\x00",
246 	.hdr.src_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
247 	.hdr.ether_type = 0,
248 };
249 
250 /* Ethernet item spec for IPv6 multicast traffic. */
251 static const struct rte_flow_item_eth ctrl_rx_eth_ipv6_mcast_spec = {
252 	.hdr.dst_addr.addr_bytes = "\x33\x33\x00\x00\x00\x00",
253 	.hdr.src_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
254 	.hdr.ether_type = 0,
255 };
256 /* Ethernet item mask for IPv6 multicast traffic. */
257 static const struct rte_flow_item_eth ctrl_rx_eth_ipv6_mcast_mask = {
258 	.hdr.dst_addr.addr_bytes = "\xff\xff\x00\x00\x00\x00",
259 	.hdr.src_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
260 	.hdr.ether_type = 0,
261 };
262 
263 /* Ethernet item mask for unicast traffic. */
264 static const struct rte_flow_item_eth ctrl_rx_eth_dmac_mask = {
265 	.hdr.dst_addr.addr_bytes = "\xff\xff\xff\xff\xff\xff",
266 	.hdr.src_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
267 	.hdr.ether_type = 0,
268 };
269 
270 /* Ethernet item spec for broadcast. */
271 static const struct rte_flow_item_eth ctrl_rx_eth_bcast_spec = {
272 	.hdr.dst_addr.addr_bytes = "\xff\xff\xff\xff\xff\xff",
273 	.hdr.src_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
274 	.hdr.ether_type = 0,
275 };
276 
277 static __rte_always_inline struct mlx5_hw_q_job *
278 flow_hw_job_get(struct mlx5_priv *priv, uint32_t queue)
279 {
280 	MLX5_ASSERT(priv->hw_q[queue].job_idx <= priv->hw_q[queue].size);
281 	return priv->hw_q[queue].job_idx ?
282 	       priv->hw_q[queue].job[--priv->hw_q[queue].job_idx] : NULL;
283 }
284 
285 static __rte_always_inline void
286 flow_hw_job_put(struct mlx5_priv *priv, struct mlx5_hw_q_job *job, uint32_t queue)
287 {
288 	MLX5_ASSERT(priv->hw_q[queue].job_idx < priv->hw_q[queue].size);
289 	priv->hw_q[queue].job[priv->hw_q[queue].job_idx++] = job;
290 }
291 
292 static inline enum mlx5dr_matcher_insert_mode
293 flow_hw_matcher_insert_mode_get(enum rte_flow_table_insertion_type insert_type)
294 {
295 	if (insert_type == RTE_FLOW_TABLE_INSERTION_TYPE_PATTERN)
296 		return MLX5DR_MATCHER_INSERT_BY_HASH;
297 	else
298 		return MLX5DR_MATCHER_INSERT_BY_INDEX;
299 }
300 
301 static inline enum mlx5dr_matcher_distribute_mode
302 flow_hw_matcher_distribute_mode_get(enum rte_flow_table_hash_func hash_func)
303 {
304 	if (hash_func == RTE_FLOW_TABLE_HASH_FUNC_LINEAR)
305 		return MLX5DR_MATCHER_DISTRIBUTE_BY_LINEAR;
306 	else
307 		return MLX5DR_MATCHER_DISTRIBUTE_BY_HASH;
308 }
309 
310 /**
311  * Set the hash fields according to the @p rss_desc information.
312  *
313  * @param[in] rss_desc
314  *   Pointer to the mlx5_flow_rss_desc.
315  * @param[out] hash_fields
316  *   Pointer to the RSS hash fields.
317  */
318 static void
319 flow_hw_hashfields_set(struct mlx5_flow_rss_desc *rss_desc,
320 		       uint64_t *hash_fields)
321 {
322 	uint64_t fields = 0;
323 	int rss_inner = 0;
324 	uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
325 
326 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
327 	if (rss_desc->level >= 2)
328 		rss_inner = 1;
329 #endif
330 	if (rss_types & MLX5_IPV4_LAYER_TYPES) {
331 		if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
332 			fields |= IBV_RX_HASH_SRC_IPV4;
333 		else if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
334 			fields |= IBV_RX_HASH_DST_IPV4;
335 		else
336 			fields |= MLX5_IPV4_IBV_RX_HASH;
337 	} else if (rss_types & MLX5_IPV6_LAYER_TYPES) {
338 		if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
339 			fields |= IBV_RX_HASH_SRC_IPV6;
340 		else if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
341 			fields |= IBV_RX_HASH_DST_IPV6;
342 		else
343 			fields |= MLX5_IPV6_IBV_RX_HASH;
344 	}
345 	if (rss_types & RTE_ETH_RSS_UDP) {
346 		if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
347 			fields |= IBV_RX_HASH_SRC_PORT_UDP;
348 		else if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
349 			fields |= IBV_RX_HASH_DST_PORT_UDP;
350 		else
351 			fields |= MLX5_UDP_IBV_RX_HASH;
352 	} else if (rss_types & RTE_ETH_RSS_TCP) {
353 		if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
354 			fields |= IBV_RX_HASH_SRC_PORT_TCP;
355 		else if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
356 			fields |= IBV_RX_HASH_DST_PORT_TCP;
357 		else
358 			fields |= MLX5_TCP_IBV_RX_HASH;
359 	}
360 	if (rss_types & RTE_ETH_RSS_ESP)
361 		fields |= IBV_RX_HASH_IPSEC_SPI;
362 	if (rss_inner)
363 		fields |= IBV_RX_HASH_INNER;
364 	*hash_fields = fields;
365 }
366 
367 /**
368  * Generate the matching pattern item flags.
369  *
370  * @param[in] items
371  *   Pointer to the list of items.
372  *
373  * @return
374  *   Matching item flags. RSS hash field function
375  *   silently ignores the flags which are unsupported.
376  */
377 static uint64_t
378 flow_hw_matching_item_flags_get(const struct rte_flow_item items[])
379 {
380 	uint64_t item_flags = 0;
381 	uint64_t last_item = 0;
382 
383 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
384 		int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
385 		int item_type = items->type;
386 
387 		switch (item_type) {
388 		case RTE_FLOW_ITEM_TYPE_IPV4:
389 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
390 					     MLX5_FLOW_LAYER_OUTER_L3_IPV4;
391 			break;
392 		case RTE_FLOW_ITEM_TYPE_IPV6:
393 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
394 					     MLX5_FLOW_LAYER_OUTER_L3_IPV6;
395 			break;
396 		case RTE_FLOW_ITEM_TYPE_TCP:
397 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
398 					     MLX5_FLOW_LAYER_OUTER_L4_TCP;
399 			break;
400 		case RTE_FLOW_ITEM_TYPE_UDP:
401 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
402 					     MLX5_FLOW_LAYER_OUTER_L4_UDP;
403 			break;
404 		case RTE_FLOW_ITEM_TYPE_IPV6_ROUTING_EXT:
405 			last_item = tunnel ? MLX5_FLOW_ITEM_INNER_IPV6_ROUTING_EXT :
406 					     MLX5_FLOW_ITEM_OUTER_IPV6_ROUTING_EXT;
407 			break;
408 		case RTE_FLOW_ITEM_TYPE_GRE:
409 			last_item = MLX5_FLOW_LAYER_GRE;
410 			break;
411 		case RTE_FLOW_ITEM_TYPE_NVGRE:
412 			last_item = MLX5_FLOW_LAYER_GRE;
413 			break;
414 		case RTE_FLOW_ITEM_TYPE_VXLAN:
415 			last_item = MLX5_FLOW_LAYER_VXLAN;
416 			break;
417 		case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
418 			last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
419 			break;
420 		case RTE_FLOW_ITEM_TYPE_GENEVE:
421 			last_item = MLX5_FLOW_LAYER_GENEVE;
422 			break;
423 		case RTE_FLOW_ITEM_TYPE_MPLS:
424 			last_item = MLX5_FLOW_LAYER_MPLS;
425 			break;
426 		case RTE_FLOW_ITEM_TYPE_GTP:
427 			last_item = MLX5_FLOW_LAYER_GTP;
428 			break;
429 		default:
430 			break;
431 		}
432 		item_flags |= last_item;
433 	}
434 	return item_flags;
435 }
436 
437 /**
438  * Register destination table DR jump action.
439  *
440  * @param[in] dev
441  *   Pointer to the rte_eth_dev structure.
442  * @param[in] table_attr
443  *   Pointer to the flow attributes.
444  * @param[in] dest_group
445  *   The destination group ID.
446  * @param[out] error
447  *   Pointer to error structure.
448  *
449  * @return
450  *    Table on success, NULL otherwise and rte_errno is set.
451  */
452 static struct mlx5_hw_jump_action *
453 flow_hw_jump_action_register(struct rte_eth_dev *dev,
454 			     const struct mlx5_flow_template_table_cfg *cfg,
455 			     uint32_t dest_group,
456 			     struct rte_flow_error *error)
457 {
458 	struct mlx5_priv *priv = dev->data->dev_private;
459 	struct rte_flow_attr jattr = cfg->attr.flow_attr;
460 	struct mlx5_flow_group *grp;
461 	struct mlx5_flow_cb_ctx ctx = {
462 		.dev = dev,
463 		.error = error,
464 		.data = &jattr,
465 	};
466 	struct mlx5_list_entry *ge;
467 	uint32_t target_group;
468 
469 	target_group = dest_group;
470 	if (flow_hw_translate_group(dev, cfg, dest_group, &target_group, error))
471 		return NULL;
472 	jattr.group = target_group;
473 	ge = mlx5_hlist_register(priv->sh->flow_tbls, target_group, &ctx);
474 	if (!ge)
475 		return NULL;
476 	grp = container_of(ge, struct mlx5_flow_group, entry);
477 	return &grp->jump;
478 }
479 
480 /**
481  * Release jump action.
482  *
483  * @param[in] dev
484  *   Pointer to the rte_eth_dev structure.
485  * @param[in] jump
486  *   Pointer to the jump action.
487  */
488 
489 static void
490 flow_hw_jump_release(struct rte_eth_dev *dev, struct mlx5_hw_jump_action *jump)
491 {
492 	struct mlx5_priv *priv = dev->data->dev_private;
493 	struct mlx5_flow_group *grp;
494 
495 	grp = container_of
496 		(jump, struct mlx5_flow_group, jump);
497 	mlx5_hlist_unregister(priv->sh->flow_tbls, &grp->entry);
498 }
499 
500 /**
501  * Register queue/RSS action.
502  *
503  * @param[in] dev
504  *   Pointer to the rte_eth_dev structure.
505  * @param[in] hws_flags
506  *   DR action flags.
507  * @param[in] action
508  *   rte flow action.
509  *
510  * @return
511  *    Table on success, NULL otherwise and rte_errno is set.
512  */
513 static inline struct mlx5_hrxq*
514 flow_hw_tir_action_register(struct rte_eth_dev *dev,
515 			    uint32_t hws_flags,
516 			    const struct rte_flow_action *action)
517 {
518 	struct mlx5_flow_rss_desc rss_desc = {
519 		.hws_flags = hws_flags,
520 	};
521 	struct mlx5_hrxq *hrxq;
522 
523 	if (action->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
524 		const struct rte_flow_action_queue *queue = action->conf;
525 
526 		rss_desc.const_q = &queue->index;
527 		rss_desc.queue_num = 1;
528 	} else {
529 		const struct rte_flow_action_rss *rss = action->conf;
530 
531 		rss_desc.queue_num = rss->queue_num;
532 		rss_desc.const_q = rss->queue;
533 		memcpy(rss_desc.key,
534 		       !rss->key ? rss_hash_default_key : rss->key,
535 		       MLX5_RSS_HASH_KEY_LEN);
536 		rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
537 		rss_desc.types = !rss->types ? RTE_ETH_RSS_IP : rss->types;
538 		rss_desc.symmetric_hash_function = MLX5_RSS_IS_SYMM(rss->func);
539 		flow_hw_hashfields_set(&rss_desc, &rss_desc.hash_fields);
540 		flow_dv_action_rss_l34_hash_adjust(rss->types,
541 						   &rss_desc.hash_fields);
542 		if (rss->level > 1) {
543 			rss_desc.hash_fields |= IBV_RX_HASH_INNER;
544 			rss_desc.tunnel = 1;
545 		}
546 	}
547 	hrxq = mlx5_hrxq_get(dev, &rss_desc);
548 	return hrxq;
549 }
550 
551 static __rte_always_inline int
552 flow_hw_ct_compile(struct rte_eth_dev *dev,
553 		   uint32_t queue, uint32_t idx,
554 		   struct mlx5dr_rule_action *rule_act)
555 {
556 	struct mlx5_priv *priv = dev->data->dev_private;
557 	struct mlx5_aso_ct_action *ct;
558 
559 	ct = mlx5_ipool_get(priv->hws_ctpool->cts, MLX5_ACTION_CTX_CT_GET_IDX(idx));
560 	if (!ct || mlx5_aso_ct_available(priv->sh, queue, ct))
561 		return -1;
562 	rule_act->action = priv->hws_ctpool->dr_action;
563 	rule_act->aso_ct.offset = ct->offset;
564 	rule_act->aso_ct.direction = ct->is_original ?
565 		MLX5DR_ACTION_ASO_CT_DIRECTION_INITIATOR :
566 		MLX5DR_ACTION_ASO_CT_DIRECTION_RESPONDER;
567 	return 0;
568 }
569 
570 static void
571 flow_hw_template_destroy_reformat_action(struct mlx5_hw_encap_decap_action *encap_decap)
572 {
573 	if (encap_decap->multi_pattern) {
574 		uint32_t refcnt = __atomic_sub_fetch(encap_decap->multi_pattern_refcnt,
575 						     1, __ATOMIC_RELAXED);
576 		if (refcnt)
577 			return;
578 		mlx5_free((void *)(uintptr_t)encap_decap->multi_pattern_refcnt);
579 	}
580 	if (encap_decap->action)
581 		mlx5dr_action_destroy(encap_decap->action);
582 }
583 
584 static void
585 flow_hw_template_destroy_mhdr_action(struct mlx5_hw_modify_header_action *mhdr)
586 {
587 	if (mhdr->multi_pattern) {
588 		uint32_t refcnt = __atomic_sub_fetch(mhdr->multi_pattern_refcnt,
589 						     1, __ATOMIC_RELAXED);
590 		if (refcnt)
591 			return;
592 		mlx5_free((void *)(uintptr_t)mhdr->multi_pattern_refcnt);
593 	}
594 	if (mhdr->action)
595 		mlx5dr_action_destroy(mhdr->action);
596 }
597 
598 /**
599  * Destroy DR actions created by action template.
600  *
601  * For DR actions created during table creation's action translate.
602  * Need to destroy the DR action when destroying the table.
603  *
604  * @param[in] dev
605  *   Pointer to the rte_eth_dev structure.
606  * @param[in] acts
607  *   Pointer to the template HW steering DR actions.
608  */
609 static void
610 __flow_hw_action_template_destroy(struct rte_eth_dev *dev,
611 				 struct mlx5_hw_actions *acts)
612 {
613 	struct mlx5_priv *priv = dev->data->dev_private;
614 	struct mlx5_action_construct_data *data;
615 
616 	while (!LIST_EMPTY(&acts->act_list)) {
617 		data = LIST_FIRST(&acts->act_list);
618 		LIST_REMOVE(data, next);
619 		mlx5_ipool_free(priv->acts_ipool, data->idx);
620 	}
621 
622 	if (acts->mark)
623 		if (!(__atomic_fetch_sub(&priv->hws_mark_refcnt, 1, __ATOMIC_RELAXED) - 1))
624 			flow_hw_rxq_flag_set(dev, false);
625 
626 	if (acts->jump) {
627 		struct mlx5_flow_group *grp;
628 
629 		grp = container_of
630 			(acts->jump, struct mlx5_flow_group, jump);
631 		mlx5_hlist_unregister(priv->sh->flow_tbls, &grp->entry);
632 		acts->jump = NULL;
633 	}
634 	if (acts->tir) {
635 		mlx5_hrxq_release(dev, acts->tir->idx);
636 		acts->tir = NULL;
637 	}
638 	if (acts->encap_decap) {
639 		flow_hw_template_destroy_reformat_action(acts->encap_decap);
640 		mlx5_free(acts->encap_decap);
641 		acts->encap_decap = NULL;
642 	}
643 	if (acts->push_remove) {
644 		if (acts->push_remove->action)
645 			mlx5dr_action_destroy(acts->push_remove->action);
646 		mlx5_free(acts->push_remove);
647 		acts->push_remove = NULL;
648 	}
649 	if (acts->mhdr) {
650 		flow_hw_template_destroy_mhdr_action(acts->mhdr);
651 		mlx5_free(acts->mhdr);
652 		acts->mhdr = NULL;
653 	}
654 	if (mlx5_hws_cnt_id_valid(acts->cnt_id)) {
655 		mlx5_hws_cnt_shared_put(priv->hws_cpool, &acts->cnt_id);
656 		acts->cnt_id = 0;
657 	}
658 	if (acts->mtr_id) {
659 		mlx5_ipool_free(priv->hws_mpool->idx_pool, acts->mtr_id);
660 		acts->mtr_id = 0;
661 	}
662 }
663 
664 /**
665  * Append dynamic action to the dynamic action list.
666  *
667  * @param[in] priv
668  *   Pointer to the port private data structure.
669  * @param[in] acts
670  *   Pointer to the template HW steering DR actions.
671  * @param[in] type
672  *   Action type.
673  * @param[in] action_src
674  *   Offset of source rte flow action.
675  * @param[in] action_dst
676  *   Offset of destination DR action.
677  *
678  * @return
679  *    0 on success, negative value otherwise and rte_errno is set.
680  */
681 static __rte_always_inline struct mlx5_action_construct_data *
682 __flow_hw_act_data_alloc(struct mlx5_priv *priv,
683 			 enum rte_flow_action_type type,
684 			 uint16_t action_src,
685 			 uint16_t action_dst)
686 {
687 	struct mlx5_action_construct_data *act_data;
688 	uint32_t idx = 0;
689 
690 	act_data = mlx5_ipool_zmalloc(priv->acts_ipool, &idx);
691 	if (!act_data)
692 		return NULL;
693 	act_data->idx = idx;
694 	act_data->type = type;
695 	act_data->action_src = action_src;
696 	act_data->action_dst = action_dst;
697 	return act_data;
698 }
699 
700 /**
701  * Append dynamic action to the dynamic action list.
702  *
703  * @param[in] priv
704  *   Pointer to the port private data structure.
705  * @param[in] acts
706  *   Pointer to the template HW steering DR actions.
707  * @param[in] type
708  *   Action type.
709  * @param[in] action_src
710  *   Offset of source rte flow action.
711  * @param[in] action_dst
712  *   Offset of destination DR action.
713  *
714  * @return
715  *    0 on success, negative value otherwise and rte_errno is set.
716  */
717 static __rte_always_inline int
718 __flow_hw_act_data_general_append(struct mlx5_priv *priv,
719 				  struct mlx5_hw_actions *acts,
720 				  enum rte_flow_action_type type,
721 				  uint16_t action_src,
722 				  uint16_t action_dst)
723 {
724 	struct mlx5_action_construct_data *act_data;
725 
726 	act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
727 	if (!act_data)
728 		return -1;
729 	LIST_INSERT_HEAD(&acts->act_list, act_data, next);
730 	return 0;
731 }
732 
733 static __rte_always_inline int
734 flow_hw_act_data_indirect_list_append(struct mlx5_priv *priv,
735 				      struct mlx5_hw_actions *acts,
736 				      enum rte_flow_action_type type,
737 				      uint16_t action_src, uint16_t action_dst,
738 				      indirect_list_callback_t cb)
739 {
740 	struct mlx5_action_construct_data *act_data;
741 
742 	act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
743 	if (!act_data)
744 		return -1;
745 	act_data->indirect_list_cb = cb;
746 	LIST_INSERT_HEAD(&acts->act_list, act_data, next);
747 	return 0;
748 }
749 /**
750  * Append dynamic encap action to the dynamic action list.
751  *
752  * @param[in] priv
753  *   Pointer to the port private data structure.
754  * @param[in] acts
755  *   Pointer to the template HW steering DR actions.
756  * @param[in] type
757  *   Action type.
758  * @param[in] action_src
759  *   Offset of source rte flow action.
760  * @param[in] action_dst
761  *   Offset of destination DR action.
762  * @param[in] len
763  *   Length of the data to be updated.
764  *
765  * @return
766  *    0 on success, negative value otherwise and rte_errno is set.
767  */
768 static __rte_always_inline int
769 __flow_hw_act_data_encap_append(struct mlx5_priv *priv,
770 				struct mlx5_hw_actions *acts,
771 				enum rte_flow_action_type type,
772 				uint16_t action_src,
773 				uint16_t action_dst,
774 				uint16_t len)
775 {
776 	struct mlx5_action_construct_data *act_data;
777 
778 	act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
779 	if (!act_data)
780 		return -1;
781 	act_data->encap.len = len;
782 	LIST_INSERT_HEAD(&acts->act_list, act_data, next);
783 	return 0;
784 }
785 
786 /**
787  * Append dynamic push action to the dynamic action list.
788  *
789  * @param[in] dev
790  *   Pointer to the port.
791  * @param[in] acts
792  *   Pointer to the template HW steering DR actions.
793  * @param[in] type
794  *   Action type.
795  * @param[in] action_src
796  *   Offset of source rte flow action.
797  * @param[in] action_dst
798  *   Offset of destination DR action.
799  * @param[in] len
800  *   Length of the data to be updated.
801  *
802  * @return
803  *    Data pointer on success, NULL otherwise and rte_errno is set.
804  */
805 static __rte_always_inline void *
806 __flow_hw_act_data_push_append(struct rte_eth_dev *dev,
807 			       struct mlx5_hw_actions *acts,
808 			       enum rte_flow_action_type type,
809 			       uint16_t action_src,
810 			       uint16_t action_dst,
811 			       uint16_t len)
812 {
813 	struct mlx5_action_construct_data *act_data;
814 	struct mlx5_priv *priv = dev->data->dev_private;
815 
816 	act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
817 	if (!act_data)
818 		return NULL;
819 	act_data->ipv6_ext.len = len;
820 	LIST_INSERT_HEAD(&acts->act_list, act_data, next);
821 	return act_data;
822 }
823 
824 static __rte_always_inline int
825 __flow_hw_act_data_hdr_modify_append(struct mlx5_priv *priv,
826 				     struct mlx5_hw_actions *acts,
827 				     enum rte_flow_action_type type,
828 				     uint16_t action_src,
829 				     uint16_t action_dst,
830 				     uint16_t mhdr_cmds_off,
831 				     uint16_t mhdr_cmds_end,
832 				     bool shared,
833 				     struct field_modify_info *field,
834 				     struct field_modify_info *dcopy,
835 				     uint32_t *mask)
836 {
837 	struct mlx5_action_construct_data *act_data;
838 
839 	act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
840 	if (!act_data)
841 		return -1;
842 	act_data->modify_header.mhdr_cmds_off = mhdr_cmds_off;
843 	act_data->modify_header.mhdr_cmds_end = mhdr_cmds_end;
844 	act_data->modify_header.shared = shared;
845 	rte_memcpy(act_data->modify_header.field, field,
846 		   sizeof(*field) * MLX5_ACT_MAX_MOD_FIELDS);
847 	rte_memcpy(act_data->modify_header.dcopy, dcopy,
848 		   sizeof(*dcopy) * MLX5_ACT_MAX_MOD_FIELDS);
849 	rte_memcpy(act_data->modify_header.mask, mask,
850 		   sizeof(*mask) * MLX5_ACT_MAX_MOD_FIELDS);
851 	LIST_INSERT_HEAD(&acts->act_list, act_data, next);
852 	return 0;
853 }
854 
855 /**
856  * Append shared RSS action to the dynamic action list.
857  *
858  * @param[in] priv
859  *   Pointer to the port private data structure.
860  * @param[in] acts
861  *   Pointer to the template HW steering DR actions.
862  * @param[in] type
863  *   Action type.
864  * @param[in] action_src
865  *   Offset of source rte flow action.
866  * @param[in] action_dst
867  *   Offset of destination DR action.
868  * @param[in] idx
869  *   Shared RSS index.
870  * @param[in] rss
871  *   Pointer to the shared RSS info.
872  *
873  * @return
874  *    0 on success, negative value otherwise and rte_errno is set.
875  */
876 static __rte_always_inline int
877 __flow_hw_act_data_shared_rss_append(struct mlx5_priv *priv,
878 				     struct mlx5_hw_actions *acts,
879 				     enum rte_flow_action_type type,
880 				     uint16_t action_src,
881 				     uint16_t action_dst,
882 				     uint32_t idx,
883 				     struct mlx5_shared_action_rss *rss)
884 {
885 	struct mlx5_action_construct_data *act_data;
886 
887 	act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
888 	if (!act_data)
889 		return -1;
890 	act_data->shared_rss.level = rss->origin.level;
891 	act_data->shared_rss.types = !rss->origin.types ? RTE_ETH_RSS_IP :
892 				     rss->origin.types;
893 	act_data->shared_rss.idx = idx;
894 	act_data->shared_rss.symmetric_hash_function =
895 		MLX5_RSS_IS_SYMM(rss->origin.func);
896 	LIST_INSERT_HEAD(&acts->act_list, act_data, next);
897 	return 0;
898 }
899 
900 /**
901  * Append shared counter action to the dynamic action list.
902  *
903  * @param[in] priv
904  *   Pointer to the port private data structure.
905  * @param[in] acts
906  *   Pointer to the template HW steering DR actions.
907  * @param[in] type
908  *   Action type.
909  * @param[in] action_src
910  *   Offset of source rte flow action.
911  * @param[in] action_dst
912  *   Offset of destination DR action.
913  * @param[in] cnt_id
914  *   Shared counter id.
915  *
916  * @return
917  *    0 on success, negative value otherwise and rte_errno is set.
918  */
919 static __rte_always_inline int
920 __flow_hw_act_data_shared_cnt_append(struct mlx5_priv *priv,
921 				     struct mlx5_hw_actions *acts,
922 				     enum rte_flow_action_type type,
923 				     uint16_t action_src,
924 				     uint16_t action_dst,
925 				     cnt_id_t cnt_id)
926 {
927 	struct mlx5_action_construct_data *act_data;
928 
929 	act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
930 	if (!act_data)
931 		return -1;
932 	act_data->type = type;
933 	act_data->shared_counter.id = cnt_id;
934 	LIST_INSERT_HEAD(&acts->act_list, act_data, next);
935 	return 0;
936 }
937 
938 /**
939  * Append shared meter_mark action to the dynamic action list.
940  *
941  * @param[in] priv
942  *   Pointer to the port private data structure.
943  * @param[in] acts
944  *   Pointer to the template HW steering DR actions.
945  * @param[in] type
946  *   Action type.
947  * @param[in] action_src
948  *   Offset of source rte flow action.
949  * @param[in] action_dst
950  *   Offset of destination DR action.
951  * @param[in] mtr_id
952  *   Shared meter id.
953  *
954  * @return
955  *    0 on success, negative value otherwise and rte_errno is set.
956  */
957 static __rte_always_inline int
958 __flow_hw_act_data_shared_mtr_append(struct mlx5_priv *priv,
959 				     struct mlx5_hw_actions *acts,
960 				     enum rte_flow_action_type type,
961 				     uint16_t action_src,
962 				     uint16_t action_dst,
963 				     cnt_id_t mtr_id)
964 {	struct mlx5_action_construct_data *act_data;
965 
966 	act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
967 	if (!act_data)
968 		return -1;
969 	act_data->type = type;
970 	act_data->shared_meter.id = mtr_id;
971 	LIST_INSERT_HEAD(&acts->act_list, act_data, next);
972 	return 0;
973 }
974 
975 /**
976  * Translate shared indirect action.
977  *
978  * @param[in] dev
979  *   Pointer to the rte_eth_dev data structure.
980  * @param[in] action
981  *   Pointer to the shared indirect rte_flow action.
982  * @param[in] acts
983  *   Pointer to the template HW steering DR actions.
984  * @param[in] action_src
985  *   Offset of source rte flow action.
986  * @param[in] action_dst
987  *   Offset of destination DR action.
988  *
989  * @return
990  *    0 on success, negative value otherwise and rte_errno is set.
991  */
992 static __rte_always_inline int
993 flow_hw_shared_action_translate(struct rte_eth_dev *dev,
994 				const struct rte_flow_action *action,
995 				struct mlx5_hw_actions *acts,
996 				uint16_t action_src,
997 				uint16_t action_dst)
998 {
999 	struct mlx5_priv *priv = dev->data->dev_private;
1000 	struct mlx5_shared_action_rss *shared_rss;
1001 	uint32_t act_idx = (uint32_t)(uintptr_t)action->conf;
1002 	uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
1003 	uint32_t idx = act_idx &
1004 		       ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
1005 
1006 	switch (type) {
1007 	case MLX5_INDIRECT_ACTION_TYPE_RSS:
1008 		shared_rss = mlx5_ipool_get
1009 		  (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
1010 		if (!shared_rss || __flow_hw_act_data_shared_rss_append
1011 		    (priv, acts,
1012 		    (enum rte_flow_action_type)MLX5_RTE_FLOW_ACTION_TYPE_RSS,
1013 		    action_src, action_dst, idx, shared_rss))
1014 			return -1;
1015 		break;
1016 	case MLX5_INDIRECT_ACTION_TYPE_COUNT:
1017 		if (__flow_hw_act_data_shared_cnt_append(priv, acts,
1018 			(enum rte_flow_action_type)
1019 			MLX5_RTE_FLOW_ACTION_TYPE_COUNT,
1020 			action_src, action_dst, act_idx))
1021 			return -1;
1022 		break;
1023 	case MLX5_INDIRECT_ACTION_TYPE_AGE:
1024 		/* Not supported, prevent by validate function. */
1025 		MLX5_ASSERT(0);
1026 		break;
1027 	case MLX5_INDIRECT_ACTION_TYPE_CT:
1028 		if (flow_hw_ct_compile(dev, MLX5_HW_INV_QUEUE,
1029 				       idx, &acts->rule_acts[action_dst]))
1030 			return -1;
1031 		break;
1032 	case MLX5_INDIRECT_ACTION_TYPE_METER_MARK:
1033 		if (__flow_hw_act_data_shared_mtr_append(priv, acts,
1034 			(enum rte_flow_action_type)
1035 			MLX5_RTE_FLOW_ACTION_TYPE_METER_MARK,
1036 			action_src, action_dst, idx))
1037 			return -1;
1038 		break;
1039 	case MLX5_INDIRECT_ACTION_TYPE_QUOTA:
1040 		flow_hw_construct_quota(priv, &acts->rule_acts[action_dst], idx);
1041 		break;
1042 	default:
1043 		DRV_LOG(WARNING, "Unsupported shared action type:%d", type);
1044 		break;
1045 	}
1046 	return 0;
1047 }
1048 
1049 static __rte_always_inline bool
1050 flow_hw_action_modify_field_is_shared(const struct rte_flow_action *action,
1051 				      const struct rte_flow_action *mask)
1052 {
1053 	const struct rte_flow_action_modify_field *v = action->conf;
1054 	const struct rte_flow_action_modify_field *m = mask->conf;
1055 
1056 	if (v->src.field == RTE_FLOW_FIELD_VALUE) {
1057 		uint32_t j;
1058 
1059 		for (j = 0; j < RTE_DIM(m->src.value); ++j) {
1060 			/*
1061 			 * Immediate value is considered to be masked
1062 			 * (and thus shared by all flow rules), if mask
1063 			 * is non-zero. Partial mask over immediate value
1064 			 * is not allowed.
1065 			 */
1066 			if (m->src.value[j])
1067 				return true;
1068 		}
1069 		return false;
1070 	}
1071 	if (v->src.field == RTE_FLOW_FIELD_POINTER)
1072 		return m->src.pvalue != NULL;
1073 	/*
1074 	 * Source field types other than VALUE and
1075 	 * POINTER are always shared.
1076 	 */
1077 	return true;
1078 }
1079 
1080 static __rte_always_inline bool
1081 flow_hw_should_insert_nop(const struct mlx5_hw_modify_header_action *mhdr,
1082 			  const struct mlx5_modification_cmd *cmd)
1083 {
1084 	struct mlx5_modification_cmd last_cmd = { { 0 } };
1085 	struct mlx5_modification_cmd new_cmd = { { 0 } };
1086 	const uint32_t cmds_num = mhdr->mhdr_cmds_num;
1087 	unsigned int last_type;
1088 	bool should_insert = false;
1089 
1090 	if (cmds_num == 0)
1091 		return false;
1092 	last_cmd = *(&mhdr->mhdr_cmds[cmds_num - 1]);
1093 	last_cmd.data0 = rte_be_to_cpu_32(last_cmd.data0);
1094 	last_cmd.data1 = rte_be_to_cpu_32(last_cmd.data1);
1095 	last_type = last_cmd.action_type;
1096 	new_cmd = *cmd;
1097 	new_cmd.data0 = rte_be_to_cpu_32(new_cmd.data0);
1098 	new_cmd.data1 = rte_be_to_cpu_32(new_cmd.data1);
1099 	switch (new_cmd.action_type) {
1100 	case MLX5_MODIFICATION_TYPE_SET:
1101 	case MLX5_MODIFICATION_TYPE_ADD:
1102 		if (last_type == MLX5_MODIFICATION_TYPE_SET ||
1103 		    last_type == MLX5_MODIFICATION_TYPE_ADD)
1104 			should_insert = new_cmd.field == last_cmd.field;
1105 		else if (last_type == MLX5_MODIFICATION_TYPE_COPY)
1106 			should_insert = new_cmd.field == last_cmd.dst_field;
1107 		else if (last_type == MLX5_MODIFICATION_TYPE_NOP)
1108 			should_insert = false;
1109 		else
1110 			MLX5_ASSERT(false); /* Other types are not supported. */
1111 		break;
1112 	case MLX5_MODIFICATION_TYPE_COPY:
1113 		if (last_type == MLX5_MODIFICATION_TYPE_SET ||
1114 		    last_type == MLX5_MODIFICATION_TYPE_ADD)
1115 			should_insert = (new_cmd.field == last_cmd.field ||
1116 					 new_cmd.dst_field == last_cmd.field);
1117 		else if (last_type == MLX5_MODIFICATION_TYPE_COPY)
1118 			should_insert = (new_cmd.field == last_cmd.dst_field ||
1119 					 new_cmd.dst_field == last_cmd.dst_field);
1120 		else if (last_type == MLX5_MODIFICATION_TYPE_NOP)
1121 			should_insert = false;
1122 		else
1123 			MLX5_ASSERT(false); /* Other types are not supported. */
1124 		break;
1125 	default:
1126 		/* Other action types should be rejected on AT validation. */
1127 		MLX5_ASSERT(false);
1128 		break;
1129 	}
1130 	return should_insert;
1131 }
1132 
1133 static __rte_always_inline int
1134 flow_hw_mhdr_cmd_nop_append(struct mlx5_hw_modify_header_action *mhdr)
1135 {
1136 	struct mlx5_modification_cmd *nop;
1137 	uint32_t num = mhdr->mhdr_cmds_num;
1138 
1139 	if (num + 1 >= MLX5_MHDR_MAX_CMD)
1140 		return -ENOMEM;
1141 	nop = mhdr->mhdr_cmds + num;
1142 	nop->data0 = 0;
1143 	nop->action_type = MLX5_MODIFICATION_TYPE_NOP;
1144 	nop->data0 = rte_cpu_to_be_32(nop->data0);
1145 	nop->data1 = 0;
1146 	mhdr->mhdr_cmds_num = num + 1;
1147 	return 0;
1148 }
1149 
1150 static __rte_always_inline int
1151 flow_hw_mhdr_cmd_append(struct mlx5_hw_modify_header_action *mhdr,
1152 			struct mlx5_modification_cmd *cmd)
1153 {
1154 	uint32_t num = mhdr->mhdr_cmds_num;
1155 
1156 	if (num + 1 >= MLX5_MHDR_MAX_CMD)
1157 		return -ENOMEM;
1158 	mhdr->mhdr_cmds[num] = *cmd;
1159 	mhdr->mhdr_cmds_num = num + 1;
1160 	return 0;
1161 }
1162 
1163 static __rte_always_inline int
1164 flow_hw_converted_mhdr_cmds_append(struct mlx5_hw_modify_header_action *mhdr,
1165 				   struct mlx5_flow_dv_modify_hdr_resource *resource)
1166 {
1167 	uint32_t idx;
1168 	int ret;
1169 
1170 	for (idx = 0; idx < resource->actions_num; ++idx) {
1171 		struct mlx5_modification_cmd *src = &resource->actions[idx];
1172 
1173 		if (flow_hw_should_insert_nop(mhdr, src)) {
1174 			ret = flow_hw_mhdr_cmd_nop_append(mhdr);
1175 			if (ret)
1176 				return ret;
1177 		}
1178 		ret = flow_hw_mhdr_cmd_append(mhdr, src);
1179 		if (ret)
1180 			return ret;
1181 	}
1182 	return 0;
1183 }
1184 
1185 static __rte_always_inline void
1186 flow_hw_modify_field_init(struct mlx5_hw_modify_header_action *mhdr,
1187 			  struct rte_flow_actions_template *at)
1188 {
1189 	memset(mhdr, 0, sizeof(*mhdr));
1190 	/* Modify header action without any commands is shared by default. */
1191 	mhdr->shared = true;
1192 	mhdr->pos = at->mhdr_off;
1193 }
1194 
1195 static __rte_always_inline int
1196 flow_hw_modify_field_compile(struct rte_eth_dev *dev,
1197 			     const struct rte_flow_attr *attr,
1198 			     const struct rte_flow_action *action, /* Current action from AT. */
1199 			     const struct rte_flow_action *action_mask, /* Current mask from AT. */
1200 			     struct mlx5_hw_actions *acts,
1201 			     struct mlx5_hw_modify_header_action *mhdr,
1202 			     uint16_t src_pos,
1203 			     struct rte_flow_error *error)
1204 {
1205 	struct mlx5_priv *priv = dev->data->dev_private;
1206 	const struct rte_flow_action_modify_field *conf = action->conf;
1207 	union {
1208 		struct mlx5_flow_dv_modify_hdr_resource resource;
1209 		uint8_t data[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
1210 			     sizeof(struct mlx5_modification_cmd) * MLX5_MHDR_MAX_CMD];
1211 	} dummy;
1212 	struct mlx5_flow_dv_modify_hdr_resource *resource;
1213 	struct rte_flow_item item = {
1214 		.spec = NULL,
1215 		.mask = NULL
1216 	};
1217 	struct field_modify_info field[MLX5_ACT_MAX_MOD_FIELDS] = {
1218 						{0, 0, MLX5_MODI_OUT_NONE} };
1219 	struct field_modify_info dcopy[MLX5_ACT_MAX_MOD_FIELDS] = {
1220 						{0, 0, MLX5_MODI_OUT_NONE} };
1221 	uint32_t mask[MLX5_ACT_MAX_MOD_FIELDS] = { 0 };
1222 	uint32_t type, value = 0;
1223 	uint16_t cmds_start, cmds_end;
1224 	bool shared;
1225 	int ret;
1226 
1227 	/*
1228 	 * Modify header action is shared if previous modify_field actions
1229 	 * are shared and currently compiled action is shared.
1230 	 */
1231 	shared = flow_hw_action_modify_field_is_shared(action, action_mask);
1232 	mhdr->shared &= shared;
1233 	if (conf->src.field == RTE_FLOW_FIELD_POINTER ||
1234 	    conf->src.field == RTE_FLOW_FIELD_VALUE) {
1235 		type = conf->operation == RTE_FLOW_MODIFY_SET ? MLX5_MODIFICATION_TYPE_SET :
1236 								MLX5_MODIFICATION_TYPE_ADD;
1237 		/* For SET/ADD fill the destination field (field) first. */
1238 		mlx5_flow_field_id_to_modify_info(&conf->dst, field, mask,
1239 						  conf->width, dev,
1240 						  attr, error);
1241 		item.spec = conf->src.field == RTE_FLOW_FIELD_POINTER ?
1242 				(void *)(uintptr_t)conf->src.pvalue :
1243 				(void *)(uintptr_t)&conf->src.value;
1244 		if (conf->dst.field == RTE_FLOW_FIELD_META ||
1245 		    conf->dst.field == RTE_FLOW_FIELD_TAG ||
1246 		    conf->dst.field == RTE_FLOW_FIELD_METER_COLOR ||
1247 		    conf->dst.field == (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG) {
1248 			uint8_t tag_index = flow_tag_index_get(&conf->dst);
1249 
1250 			value = *(const unaligned_uint32_t *)item.spec;
1251 			if (conf->dst.field == RTE_FLOW_FIELD_TAG &&
1252 			    tag_index == RTE_PMD_MLX5_LINEAR_HASH_TAG_INDEX)
1253 				value = rte_cpu_to_be_32(value << 16);
1254 			else
1255 				value = rte_cpu_to_be_32(value);
1256 			item.spec = &value;
1257 		} else if (conf->dst.field == RTE_FLOW_FIELD_GTP_PSC_QFI) {
1258 			/*
1259 			 * QFI is passed as an uint8_t integer, but it is accessed through
1260 			 * a 2nd least significant byte of a 32-bit field in modify header command.
1261 			 */
1262 			value = *(const uint8_t *)item.spec;
1263 			value = rte_cpu_to_be_32(value << 8);
1264 			item.spec = &value;
1265 		}
1266 	} else {
1267 		type = MLX5_MODIFICATION_TYPE_COPY;
1268 		/* For COPY fill the destination field (dcopy) without mask. */
1269 		mlx5_flow_field_id_to_modify_info(&conf->dst, dcopy, NULL,
1270 						  conf->width, dev,
1271 						  attr, error);
1272 		/* Then construct the source field (field) with mask. */
1273 		mlx5_flow_field_id_to_modify_info(&conf->src, field, mask,
1274 						  conf->width, dev,
1275 						  attr, error);
1276 	}
1277 	item.mask = &mask;
1278 	memset(&dummy, 0, sizeof(dummy));
1279 	resource = &dummy.resource;
1280 	ret = flow_dv_convert_modify_action(&item, field, dcopy, resource, type, error);
1281 	if (ret)
1282 		return ret;
1283 	MLX5_ASSERT(resource->actions_num > 0);
1284 	/*
1285 	 * If previous modify field action collide with this one, then insert NOP command.
1286 	 * This NOP command will not be a part of action's command range used to update commands
1287 	 * on rule creation.
1288 	 */
1289 	if (flow_hw_should_insert_nop(mhdr, &resource->actions[0])) {
1290 		ret = flow_hw_mhdr_cmd_nop_append(mhdr);
1291 		if (ret)
1292 			return rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1293 						  NULL, "too many modify field operations specified");
1294 	}
1295 	cmds_start = mhdr->mhdr_cmds_num;
1296 	ret = flow_hw_converted_mhdr_cmds_append(mhdr, resource);
1297 	if (ret)
1298 		return rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1299 					  NULL, "too many modify field operations specified");
1300 
1301 	cmds_end = mhdr->mhdr_cmds_num;
1302 	if (shared)
1303 		return 0;
1304 	ret = __flow_hw_act_data_hdr_modify_append(priv, acts, RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
1305 						   src_pos, mhdr->pos,
1306 						   cmds_start, cmds_end, shared,
1307 						   field, dcopy, mask);
1308 	if (ret)
1309 		return rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1310 					  NULL, "not enough memory to store modify field metadata");
1311 	return 0;
1312 }
1313 
1314 static uint32_t
1315 flow_hw_count_nop_modify_field(struct mlx5_hw_modify_header_action *mhdr)
1316 {
1317 	uint32_t i;
1318 	uint32_t nops = 0;
1319 
1320 	for (i = 0; i < mhdr->mhdr_cmds_num; ++i) {
1321 		struct mlx5_modification_cmd cmd = mhdr->mhdr_cmds[i];
1322 
1323 		cmd.data0 = rte_be_to_cpu_32(cmd.data0);
1324 		if (cmd.action_type == MLX5_MODIFICATION_TYPE_NOP)
1325 			++nops;
1326 	}
1327 	return nops;
1328 }
1329 
1330 static int
1331 flow_hw_validate_compiled_modify_field(struct rte_eth_dev *dev,
1332 				       const struct mlx5_flow_template_table_cfg *cfg,
1333 				       struct mlx5_hw_modify_header_action *mhdr,
1334 				       struct rte_flow_error *error)
1335 {
1336 	struct mlx5_priv *priv = dev->data->dev_private;
1337 	struct mlx5_hca_attr *hca_attr = &priv->sh->cdev->config.hca_attr;
1338 
1339 	/*
1340 	 * Header modify pattern length limitation is only valid for HWS groups, i.e. groups > 0.
1341 	 * In group 0, MODIFY_FIELD actions are handled with header modify actions
1342 	 * managed by rdma-core.
1343 	 */
1344 	if (cfg->attr.flow_attr.group != 0 &&
1345 	    mhdr->mhdr_cmds_num > hca_attr->max_header_modify_pattern_length) {
1346 		uint32_t nops = flow_hw_count_nop_modify_field(mhdr);
1347 
1348 		DRV_LOG(ERR, "Too many modify header commands generated from "
1349 			     "MODIFY_FIELD actions. "
1350 			     "Generated HW commands = %u (amount of NOP commands = %u). "
1351 			     "Maximum supported = %u.",
1352 			     mhdr->mhdr_cmds_num, nops,
1353 			     hca_attr->max_header_modify_pattern_length);
1354 		return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1355 					  "Number of MODIFY_FIELD actions exceeds maximum "
1356 					  "supported limit of actions");
1357 	}
1358 	return 0;
1359 }
1360 
1361 static int
1362 flow_hw_represented_port_compile(struct rte_eth_dev *dev,
1363 				 const struct rte_flow_attr *attr,
1364 				 const struct rte_flow_action *action,
1365 				 const struct rte_flow_action *action_mask,
1366 				 struct mlx5_hw_actions *acts,
1367 				 uint16_t action_src, uint16_t action_dst,
1368 				 struct rte_flow_error *error)
1369 {
1370 	struct mlx5_priv *priv = dev->data->dev_private;
1371 	const struct rte_flow_action_ethdev *v = action->conf;
1372 	const struct rte_flow_action_ethdev *m = action_mask->conf;
1373 	int ret;
1374 
1375 	if (!attr->group)
1376 		return rte_flow_error_set(error, EINVAL,
1377 					  RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1378 					  "represented_port action cannot"
1379 					  " be used on group 0");
1380 	if (!attr->transfer)
1381 		return rte_flow_error_set(error, EINVAL,
1382 					  RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1383 					  NULL,
1384 					  "represented_port action requires"
1385 					  " transfer attribute");
1386 	if (attr->ingress || attr->egress)
1387 		return rte_flow_error_set(error, EINVAL,
1388 					  RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1389 					  "represented_port action cannot"
1390 					  " be used with direction attributes");
1391 	if (!priv->master)
1392 		return rte_flow_error_set(error, EINVAL,
1393 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1394 					  "represented_port action must"
1395 					  " be used on proxy port");
1396 	if (m && !!m->port_id) {
1397 		struct mlx5_priv *port_priv;
1398 
1399 		if (!v)
1400 			return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1401 						  action, "port index was not provided");
1402 		port_priv = mlx5_port_to_eswitch_info(v->port_id, false);
1403 		if (port_priv == NULL)
1404 			return rte_flow_error_set
1405 					(error, EINVAL,
1406 					 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1407 					 "port does not exist or unable to"
1408 					 " obtain E-Switch info for port");
1409 		MLX5_ASSERT(priv->hw_vport != NULL);
1410 		if (priv->hw_vport[v->port_id]) {
1411 			acts->rule_acts[action_dst].action =
1412 					priv->hw_vport[v->port_id];
1413 		} else {
1414 			return rte_flow_error_set
1415 					(error, EINVAL,
1416 					 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1417 					 "cannot use represented_port action"
1418 					 " with this port");
1419 		}
1420 	} else {
1421 		ret = __flow_hw_act_data_general_append
1422 				(priv, acts, action->type,
1423 				 action_src, action_dst);
1424 		if (ret)
1425 			return rte_flow_error_set
1426 					(error, ENOMEM,
1427 					 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1428 					 "not enough memory to store"
1429 					 " vport action");
1430 	}
1431 	return 0;
1432 }
1433 
1434 static __rte_always_inline int
1435 flow_hw_meter_compile(struct rte_eth_dev *dev,
1436 		      const struct mlx5_flow_template_table_cfg *cfg,
1437 		      uint16_t aso_mtr_pos,
1438 		      uint16_t jump_pos,
1439 		      const struct rte_flow_action *action,
1440 		      struct mlx5_hw_actions *acts,
1441 		      struct rte_flow_error *error)
1442 {
1443 	struct mlx5_priv *priv = dev->data->dev_private;
1444 	struct mlx5_aso_mtr *aso_mtr;
1445 	const struct rte_flow_action_meter *meter = action->conf;
1446 	uint32_t group = cfg->attr.flow_attr.group;
1447 
1448 	aso_mtr = mlx5_aso_meter_by_idx(priv, meter->mtr_id);
1449 	acts->rule_acts[aso_mtr_pos].action = priv->mtr_bulk.action;
1450 	acts->rule_acts[aso_mtr_pos].aso_meter.offset = aso_mtr->offset;
1451 	acts->jump = flow_hw_jump_action_register
1452 		(dev, cfg, aso_mtr->fm.group, error);
1453 	if (!acts->jump)
1454 		return -ENOMEM;
1455 	acts->rule_acts[jump_pos].action = (!!group) ?
1456 				    acts->jump->hws_action :
1457 				    acts->jump->root_action;
1458 	if (mlx5_aso_mtr_wait(priv->sh, MLX5_HW_INV_QUEUE, aso_mtr))
1459 		return -ENOMEM;
1460 	return 0;
1461 }
1462 
1463 static __rte_always_inline int
1464 flow_hw_cnt_compile(struct rte_eth_dev *dev, uint32_t  start_pos,
1465 		      struct mlx5_hw_actions *acts)
1466 {
1467 	struct mlx5_priv *priv = dev->data->dev_private;
1468 	uint32_t pos = start_pos;
1469 	cnt_id_t cnt_id;
1470 	int ret;
1471 
1472 	ret = mlx5_hws_cnt_shared_get(priv->hws_cpool, &cnt_id, 0);
1473 	if (ret != 0)
1474 		return ret;
1475 	ret = mlx5_hws_cnt_pool_get_action_offset
1476 				(priv->hws_cpool,
1477 				 cnt_id,
1478 				 &acts->rule_acts[pos].action,
1479 				 &acts->rule_acts[pos].counter.offset);
1480 	if (ret != 0)
1481 		return ret;
1482 	acts->cnt_id = cnt_id;
1483 	return 0;
1484 }
1485 
1486 static __rte_always_inline bool
1487 is_of_vlan_pcp_present(const struct rte_flow_action *actions)
1488 {
1489 	/*
1490 	 * Order of RTE VLAN push actions is
1491 	 * OF_PUSH_VLAN / OF_SET_VLAN_VID [ / OF_SET_VLAN_PCP ]
1492 	 */
1493 	return actions[MLX5_HW_VLAN_PUSH_PCP_IDX].type ==
1494 		RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP;
1495 }
1496 
1497 static __rte_always_inline bool
1498 is_template_masked_push_vlan(const struct rte_flow_action_of_push_vlan *mask)
1499 {
1500 	/*
1501 	 * In masked push VLAN template all RTE push actions are masked.
1502 	 */
1503 	return mask && mask->ethertype != 0;
1504 }
1505 
1506 static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
1507 {
1508 /*
1509  * OpenFlow Switch Specification defines 801.1q VID as 12+1 bits.
1510  */
1511 	rte_be32_t type, vid, pcp;
1512 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1513 	rte_be32_t vid_lo, vid_hi;
1514 #endif
1515 
1516 	type = ((const struct rte_flow_action_of_push_vlan *)
1517 		actions[MLX5_HW_VLAN_PUSH_TYPE_IDX].conf)->ethertype;
1518 	vid = ((const struct rte_flow_action_of_set_vlan_vid *)
1519 		actions[MLX5_HW_VLAN_PUSH_VID_IDX].conf)->vlan_vid;
1520 	pcp = is_of_vlan_pcp_present(actions) ?
1521 	      ((const struct rte_flow_action_of_set_vlan_pcp *)
1522 		      actions[MLX5_HW_VLAN_PUSH_PCP_IDX].conf)->vlan_pcp : 0;
1523 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1524 	vid_hi = vid & 0xff;
1525 	vid_lo = vid >> 8;
1526 	return (((vid_lo << 8) | (pcp << 5) | vid_hi) << 16) | type;
1527 #else
1528 	return (type << 16) | (pcp << 13) | vid;
1529 #endif
1530 }
1531 
1532 static __rte_always_inline struct mlx5_aso_mtr *
1533 flow_hw_meter_mark_alloc(struct rte_eth_dev *dev, uint32_t queue,
1534 			 const struct rte_flow_action *action,
1535 			 void *user_data, bool push)
1536 {
1537 	struct mlx5_priv *priv = dev->data->dev_private;
1538 	struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
1539 	const struct rte_flow_action_meter_mark *meter_mark = action->conf;
1540 	struct mlx5_aso_mtr *aso_mtr;
1541 	struct mlx5_flow_meter_info *fm;
1542 	uint32_t mtr_id;
1543 
1544 	if (meter_mark->profile == NULL)
1545 		return NULL;
1546 	aso_mtr = mlx5_ipool_malloc(priv->hws_mpool->idx_pool, &mtr_id);
1547 	if (!aso_mtr)
1548 		return NULL;
1549 	/* Fill the flow meter parameters. */
1550 	aso_mtr->type = ASO_METER_INDIRECT;
1551 	fm = &aso_mtr->fm;
1552 	fm->meter_id = mtr_id;
1553 	fm->profile = (struct mlx5_flow_meter_profile *)(meter_mark->profile);
1554 	fm->is_enable = meter_mark->state;
1555 	fm->color_aware = meter_mark->color_mode;
1556 	aso_mtr->pool = pool;
1557 	aso_mtr->state = (queue == MLX5_HW_INV_QUEUE) ?
1558 			  ASO_METER_WAIT : ASO_METER_WAIT_ASYNC;
1559 	aso_mtr->offset = mtr_id - 1;
1560 	aso_mtr->init_color = fm->color_aware ? RTE_COLORS : RTE_COLOR_GREEN;
1561 	/* Update ASO flow meter by wqe. */
1562 	if (mlx5_aso_meter_update_by_wqe(priv->sh, queue, aso_mtr,
1563 					 &priv->mtr_bulk, user_data, push)) {
1564 		mlx5_ipool_free(pool->idx_pool, mtr_id);
1565 		return NULL;
1566 	}
1567 	/* Wait for ASO object completion. */
1568 	if (queue == MLX5_HW_INV_QUEUE &&
1569 	    mlx5_aso_mtr_wait(priv->sh, MLX5_HW_INV_QUEUE, aso_mtr)) {
1570 		mlx5_ipool_free(pool->idx_pool, mtr_id);
1571 		return NULL;
1572 	}
1573 	return aso_mtr;
1574 }
1575 
1576 static __rte_always_inline int
1577 flow_hw_meter_mark_compile(struct rte_eth_dev *dev,
1578 			   uint16_t aso_mtr_pos,
1579 			   const struct rte_flow_action *action,
1580 			   struct mlx5dr_rule_action *acts,
1581 			   uint32_t *index,
1582 			   uint32_t queue)
1583 {
1584 	struct mlx5_priv *priv = dev->data->dev_private;
1585 	struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
1586 	struct mlx5_aso_mtr *aso_mtr;
1587 
1588 	aso_mtr = flow_hw_meter_mark_alloc(dev, queue, action, NULL, true);
1589 	if (!aso_mtr)
1590 		return -1;
1591 
1592 	/* Compile METER_MARK action */
1593 	acts[aso_mtr_pos].action = pool->action;
1594 	acts[aso_mtr_pos].aso_meter.offset = aso_mtr->offset;
1595 	*index = aso_mtr->fm.meter_id;
1596 	return 0;
1597 }
1598 
1599 static int
1600 flow_hw_translate_indirect_mirror(__rte_unused struct rte_eth_dev *dev,
1601 				  __rte_unused const struct mlx5_action_construct_data *act_data,
1602 				  const struct rte_flow_action *action,
1603 				  struct mlx5dr_rule_action *dr_rule)
1604 {
1605 	const struct rte_flow_action_indirect_list *list_conf = action->conf;
1606 	const struct mlx5_mirror *mirror = (typeof(mirror))list_conf->handle;
1607 
1608 	dr_rule->action = mirror->mirror_action;
1609 	return 0;
1610 }
1611 
1612 /**
1613  * HWS mirror implemented as FW island.
1614  * The action does not support indirect list flow configuration.
1615  * If template handle was masked, use handle mirror action in flow rules.
1616  * Otherwise let flow rule specify mirror handle.
1617  */
1618 static int
1619 hws_table_tmpl_translate_indirect_mirror(struct rte_eth_dev *dev,
1620 					 const struct rte_flow_action *action,
1621 					 const struct rte_flow_action *mask,
1622 					 struct mlx5_hw_actions *acts,
1623 					 uint16_t action_src, uint16_t action_dst)
1624 {
1625 	int ret = 0;
1626 	const struct rte_flow_action_indirect_list *mask_conf = mask->conf;
1627 
1628 	if (mask_conf && mask_conf->handle) {
1629 		/**
1630 		 * If mirror handle was masked, assign fixed DR5 mirror action.
1631 		 */
1632 		flow_hw_translate_indirect_mirror(dev, NULL, action,
1633 						  &acts->rule_acts[action_dst]);
1634 	} else {
1635 		struct mlx5_priv *priv = dev->data->dev_private;
1636 		ret = flow_hw_act_data_indirect_list_append
1637 			(priv, acts, RTE_FLOW_ACTION_TYPE_INDIRECT_LIST,
1638 			 action_src, action_dst,
1639 			 flow_hw_translate_indirect_mirror);
1640 	}
1641 	return ret;
1642 }
1643 
1644 static int
1645 flow_hw_reformat_action(__rte_unused struct rte_eth_dev *dev,
1646 			__rte_unused const struct mlx5_action_construct_data *data,
1647 			const struct rte_flow_action *action,
1648 			struct mlx5dr_rule_action *dr_rule)
1649 {
1650 	const struct rte_flow_action_indirect_list *indlst_conf = action->conf;
1651 
1652 	dr_rule->action = ((struct mlx5_hw_encap_decap_action *)
1653 			   (indlst_conf->handle))->action;
1654 	if (!dr_rule->action)
1655 		return -EINVAL;
1656 	return 0;
1657 }
1658 
1659 /**
1660  * Template conf must not be masked. If handle is masked, use the one in template,
1661  * otherwise update per flow rule.
1662  */
1663 static int
1664 hws_table_tmpl_translate_indirect_reformat(struct rte_eth_dev *dev,
1665 					   const struct rte_flow_action *action,
1666 					   const struct rte_flow_action *mask,
1667 					   struct mlx5_hw_actions *acts,
1668 					   uint16_t action_src, uint16_t action_dst)
1669 {
1670 	int ret = -1;
1671 	const struct rte_flow_action_indirect_list *mask_conf = mask->conf;
1672 	struct mlx5_priv *priv = dev->data->dev_private;
1673 
1674 	if (mask_conf && mask_conf->handle && !mask_conf->conf)
1675 		/**
1676 		 * If handle was masked, assign fixed DR action.
1677 		 */
1678 		ret = flow_hw_reformat_action(dev, NULL, action,
1679 					      &acts->rule_acts[action_dst]);
1680 	else if (mask_conf && !mask_conf->handle && !mask_conf->conf)
1681 		ret = flow_hw_act_data_indirect_list_append
1682 			(priv, acts, RTE_FLOW_ACTION_TYPE_INDIRECT_LIST,
1683 			 action_src, action_dst, flow_hw_reformat_action);
1684 	return ret;
1685 }
1686 
1687 static int
1688 flow_dr_set_meter(struct mlx5_priv *priv,
1689 		  struct mlx5dr_rule_action *dr_rule,
1690 		  const struct rte_flow_action_indirect_list *action_conf)
1691 {
1692 	const struct mlx5_indlst_legacy *legacy_obj =
1693 		(typeof(legacy_obj))action_conf->handle;
1694 	struct mlx5_aso_mtr_pool *mtr_pool = priv->hws_mpool;
1695 	uint32_t act_idx = (uint32_t)(uintptr_t)legacy_obj->handle;
1696 	uint32_t mtr_id = act_idx & (RTE_BIT32(MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
1697 	struct mlx5_aso_mtr *aso_mtr = mlx5_ipool_get(mtr_pool->idx_pool, mtr_id);
1698 
1699 	if (!aso_mtr)
1700 		return -EINVAL;
1701 	dr_rule->action = mtr_pool->action;
1702 	dr_rule->aso_meter.offset = aso_mtr->offset;
1703 	return 0;
1704 }
1705 
1706 __rte_always_inline static void
1707 flow_dr_mtr_flow_color(struct mlx5dr_rule_action *dr_rule, enum rte_color init_color)
1708 {
1709 	dr_rule->aso_meter.init_color =
1710 		(enum mlx5dr_action_aso_meter_color)rte_col_2_mlx5_col(init_color);
1711 }
1712 
1713 static int
1714 flow_hw_translate_indirect_meter(struct rte_eth_dev *dev,
1715 				 const struct mlx5_action_construct_data *act_data,
1716 				 const struct rte_flow_action *action,
1717 				 struct mlx5dr_rule_action *dr_rule)
1718 {
1719 	int ret;
1720 	struct mlx5_priv *priv = dev->data->dev_private;
1721 	const struct rte_flow_action_indirect_list *action_conf = action->conf;
1722 	const struct rte_flow_indirect_update_flow_meter_mark **flow_conf =
1723 		(typeof(flow_conf))action_conf->conf;
1724 
1725 	/*
1726 	 * Masked indirect handle set dr5 action during template table
1727 	 * translation.
1728 	 */
1729 	if (!dr_rule->action) {
1730 		ret = flow_dr_set_meter(priv, dr_rule, action_conf);
1731 		if (ret)
1732 			return ret;
1733 	}
1734 	if (!act_data->shared_meter.conf_masked) {
1735 		if (flow_conf && flow_conf[0] && flow_conf[0]->init_color < RTE_COLORS)
1736 			flow_dr_mtr_flow_color(dr_rule, flow_conf[0]->init_color);
1737 	}
1738 	return 0;
1739 }
1740 
1741 static int
1742 hws_table_tmpl_translate_indirect_meter(struct rte_eth_dev *dev,
1743 					const struct rte_flow_action *action,
1744 					const struct rte_flow_action *mask,
1745 					struct mlx5_hw_actions *acts,
1746 					uint16_t action_src, uint16_t action_dst)
1747 {
1748 	int ret;
1749 	struct mlx5_priv *priv = dev->data->dev_private;
1750 	const struct rte_flow_action_indirect_list *action_conf = action->conf;
1751 	const struct rte_flow_action_indirect_list *mask_conf = mask->conf;
1752 	bool is_handle_masked = mask_conf && mask_conf->handle;
1753 	bool is_conf_masked = mask_conf && mask_conf->conf && mask_conf->conf[0];
1754 	struct mlx5dr_rule_action *dr_rule = &acts->rule_acts[action_dst];
1755 
1756 	if (is_handle_masked) {
1757 		ret = flow_dr_set_meter(priv, dr_rule, action->conf);
1758 		if (ret)
1759 			return ret;
1760 	}
1761 	if (is_conf_masked) {
1762 		const struct
1763 			rte_flow_indirect_update_flow_meter_mark **flow_conf =
1764 			(typeof(flow_conf))action_conf->conf;
1765 		flow_dr_mtr_flow_color(dr_rule,
1766 				       flow_conf[0]->init_color);
1767 	}
1768 	if (!is_handle_masked || !is_conf_masked) {
1769 		struct mlx5_action_construct_data *act_data;
1770 
1771 		ret = flow_hw_act_data_indirect_list_append
1772 			(priv, acts, RTE_FLOW_ACTION_TYPE_INDIRECT_LIST,
1773 			 action_src, action_dst, flow_hw_translate_indirect_meter);
1774 		if (ret)
1775 			return ret;
1776 		act_data = LIST_FIRST(&acts->act_list);
1777 		act_data->shared_meter.conf_masked = is_conf_masked;
1778 	}
1779 	return 0;
1780 }
1781 
1782 static int
1783 hws_table_tmpl_translate_indirect_legacy(struct rte_eth_dev *dev,
1784 					 const struct rte_flow_action *action,
1785 					 const struct rte_flow_action *mask,
1786 					 struct mlx5_hw_actions *acts,
1787 					 uint16_t action_src, uint16_t action_dst)
1788 {
1789 	int ret;
1790 	const struct rte_flow_action_indirect_list *indlst_conf = action->conf;
1791 	struct mlx5_indlst_legacy *indlst_obj = (typeof(indlst_obj))indlst_conf->handle;
1792 	uint32_t act_idx = (uint32_t)(uintptr_t)indlst_obj->handle;
1793 	uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
1794 
1795 	switch (type) {
1796 	case MLX5_INDIRECT_ACTION_TYPE_METER_MARK:
1797 		ret = hws_table_tmpl_translate_indirect_meter(dev, action, mask,
1798 							      acts, action_src,
1799 							      action_dst);
1800 		break;
1801 	default:
1802 		ret = -EINVAL;
1803 		break;
1804 	}
1805 	return ret;
1806 }
1807 
1808 /*
1809  * template .. indirect_list handle Ht conf Ct ..
1810  * mask     .. indirect_list handle Hm conf Cm ..
1811  *
1812  * PMD requires Ht != 0 to resolve handle type.
1813  * If Ht was masked (Hm != 0) DR5 action will be set according to Ht and will
1814  * not change. Otherwise, DR5 action will be resolved during flow rule build.
1815  * If Ct was masked (Cm != 0), table template processing updates base
1816  * indirect action configuration with Ct parameters.
1817  */
1818 static int
1819 table_template_translate_indirect_list(struct rte_eth_dev *dev,
1820 				       const struct rte_flow_action *action,
1821 				       const struct rte_flow_action *mask,
1822 				       struct mlx5_hw_actions *acts,
1823 				       uint16_t action_src, uint16_t action_dst)
1824 {
1825 	int ret = 0;
1826 	enum mlx5_indirect_list_type type;
1827 	const struct rte_flow_action_indirect_list *list_conf = action->conf;
1828 
1829 	if (!list_conf || !list_conf->handle)
1830 		return -EINVAL;
1831 	type = mlx5_get_indirect_list_type(list_conf->handle);
1832 	switch (type) {
1833 	case MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY:
1834 		ret = hws_table_tmpl_translate_indirect_legacy(dev, action, mask,
1835 							       acts, action_src,
1836 							       action_dst);
1837 		break;
1838 	case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
1839 		ret = hws_table_tmpl_translate_indirect_mirror(dev, action, mask,
1840 							       acts, action_src,
1841 							       action_dst);
1842 		break;
1843 	case MLX5_INDIRECT_ACTION_LIST_TYPE_REFORMAT:
1844 		if (list_conf->conf)
1845 			return -EINVAL;
1846 		ret = hws_table_tmpl_translate_indirect_reformat(dev, action, mask,
1847 								 acts, action_src,
1848 								 action_dst);
1849 		break;
1850 	default:
1851 		return -EINVAL;
1852 	}
1853 	return ret;
1854 }
1855 
1856 static int
1857 mlx5_tbl_translate_reformat(struct mlx5_priv *priv,
1858 			    const struct rte_flow_template_table_attr *table_attr,
1859 			    struct mlx5_hw_actions *acts,
1860 			    struct rte_flow_actions_template *at,
1861 			    const struct rte_flow_item *enc_item,
1862 			    const struct rte_flow_item *enc_item_m,
1863 			    uint8_t *encap_data, uint8_t *encap_data_m,
1864 			    struct mlx5_tbl_multi_pattern_ctx *mp_ctx,
1865 			    size_t data_size, uint16_t reformat_src,
1866 			    enum mlx5dr_action_type refmt_type,
1867 			    struct rte_flow_error *error)
1868 {
1869 	int mp_reformat_ix = mlx5_multi_pattern_reformat_to_index(refmt_type);
1870 	const struct rte_flow_attr *attr = &table_attr->flow_attr;
1871 	enum mlx5dr_table_type tbl_type = get_mlx5dr_table_type(attr);
1872 	struct mlx5dr_action_reformat_header hdr;
1873 	uint8_t buf[MLX5_ENCAP_MAX_LEN];
1874 	bool shared_rfmt = false;
1875 	int ret;
1876 
1877 	MLX5_ASSERT(at->reformat_off != UINT16_MAX);
1878 	if (enc_item) {
1879 		MLX5_ASSERT(!encap_data);
1880 		ret = flow_dv_convert_encap_data(enc_item, buf, &data_size, error);
1881 		if (ret)
1882 			return ret;
1883 		encap_data = buf;
1884 		if (enc_item_m)
1885 			shared_rfmt = true;
1886 	} else if (encap_data && encap_data_m) {
1887 		shared_rfmt = true;
1888 	}
1889 	acts->encap_decap = mlx5_malloc(MLX5_MEM_ZERO,
1890 					sizeof(*acts->encap_decap) + data_size,
1891 					0, SOCKET_ID_ANY);
1892 	if (!acts->encap_decap)
1893 		return rte_flow_error_set(error, ENOMEM,
1894 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1895 					  NULL, "no memory for reformat context");
1896 	hdr.sz = data_size;
1897 	hdr.data = encap_data;
1898 	if (shared_rfmt || mp_reformat_ix < 0) {
1899 		uint16_t reformat_ix = at->reformat_off;
1900 		uint32_t flags = mlx5_hw_act_flag[!!attr->group][tbl_type] |
1901 				 MLX5DR_ACTION_FLAG_SHARED;
1902 
1903 		acts->encap_decap->action =
1904 			mlx5dr_action_create_reformat(priv->dr_ctx, refmt_type,
1905 						      1, &hdr, 0, flags);
1906 		if (!acts->encap_decap->action)
1907 			return -rte_errno;
1908 		acts->rule_acts[reformat_ix].action = acts->encap_decap->action;
1909 		acts->rule_acts[reformat_ix].reformat.data = acts->encap_decap->data;
1910 		acts->rule_acts[reformat_ix].reformat.offset = 0;
1911 		acts->encap_decap->shared = true;
1912 	} else {
1913 		uint32_t ix;
1914 		typeof(mp_ctx->reformat[0]) *reformat_ctx = mp_ctx->reformat +
1915 							    mp_reformat_ix;
1916 
1917 		ix = reformat_ctx->elements_num++;
1918 		reformat_ctx->ctx[ix].reformat_hdr = hdr;
1919 		reformat_ctx->ctx[ix].rule_action = &acts->rule_acts[at->reformat_off];
1920 		reformat_ctx->ctx[ix].encap = acts->encap_decap;
1921 		acts->rule_acts[at->reformat_off].reformat.hdr_idx = ix;
1922 		acts->encap_decap_pos = at->reformat_off;
1923 		acts->encap_decap->data_size = data_size;
1924 		ret = __flow_hw_act_data_encap_append
1925 			(priv, acts, (at->actions + reformat_src)->type,
1926 			 reformat_src, at->reformat_off, data_size);
1927 		if (ret)
1928 			return -rte_errno;
1929 	}
1930 	return 0;
1931 }
1932 
1933 static int
1934 mlx5_tbl_translate_modify_header(struct rte_eth_dev *dev,
1935 				 const struct mlx5_flow_template_table_cfg *cfg,
1936 				 struct mlx5_hw_actions *acts,
1937 				 struct mlx5_tbl_multi_pattern_ctx *mp_ctx,
1938 				 struct mlx5_hw_modify_header_action *mhdr,
1939 				 struct rte_flow_error *error)
1940 {
1941 	struct mlx5_priv *priv = dev->data->dev_private;
1942 	const struct rte_flow_template_table_attr *table_attr = &cfg->attr;
1943 	const struct rte_flow_attr *attr = &table_attr->flow_attr;
1944 	enum mlx5dr_table_type tbl_type = get_mlx5dr_table_type(attr);
1945 	uint16_t mhdr_ix = mhdr->pos;
1946 	struct mlx5dr_action_mh_pattern pattern = {
1947 		.sz = sizeof(struct mlx5_modification_cmd) * mhdr->mhdr_cmds_num
1948 	};
1949 
1950 	if (flow_hw_validate_compiled_modify_field(dev, cfg, mhdr, error)) {
1951 		__flow_hw_action_template_destroy(dev, acts);
1952 		return -rte_errno;
1953 	}
1954 	acts->mhdr = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*acts->mhdr),
1955 				 0, SOCKET_ID_ANY);
1956 	if (!acts->mhdr)
1957 		return rte_flow_error_set(error, ENOMEM,
1958 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1959 					  NULL, "translate modify_header: no memory for modify header context");
1960 	rte_memcpy(acts->mhdr, mhdr, sizeof(*mhdr));
1961 	pattern.data = (__be64 *)acts->mhdr->mhdr_cmds;
1962 	if (mhdr->shared) {
1963 		uint32_t flags = mlx5_hw_act_flag[!!attr->group][tbl_type] |
1964 				 MLX5DR_ACTION_FLAG_SHARED;
1965 
1966 		acts->mhdr->action = mlx5dr_action_create_modify_header
1967 						(priv->dr_ctx, 1, &pattern, 0,
1968 						 flags);
1969 		if (!acts->mhdr->action)
1970 			return rte_flow_error_set(error, rte_errno,
1971 						  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1972 						  NULL, "translate modify_header: failed to create DR action");
1973 		acts->rule_acts[mhdr_ix].action = acts->mhdr->action;
1974 	} else {
1975 		typeof(mp_ctx->mh) *mh = &mp_ctx->mh;
1976 		uint32_t idx = mh->elements_num;
1977 		struct mlx5_multi_pattern_ctx *mh_ctx = mh->ctx + mh->elements_num++;
1978 
1979 		mh_ctx->mh_pattern = pattern;
1980 		mh_ctx->mhdr = acts->mhdr;
1981 		mh_ctx->rule_action = &acts->rule_acts[mhdr_ix];
1982 		acts->rule_acts[mhdr_ix].modify_header.pattern_idx = idx;
1983 	}
1984 	return 0;
1985 }
1986 
1987 
1988 static int
1989 mlx5_create_ipv6_ext_reformat(struct rte_eth_dev *dev,
1990 			      const struct mlx5_flow_template_table_cfg *cfg,
1991 			      struct mlx5_hw_actions *acts,
1992 			      struct rte_flow_actions_template *at,
1993 			      uint8_t *push_data, uint8_t *push_data_m,
1994 			      size_t push_size, uint16_t recom_src,
1995 			      enum mlx5dr_action_type recom_type)
1996 {
1997 	struct mlx5_priv *priv = dev->data->dev_private;
1998 	const struct rte_flow_template_table_attr *table_attr = &cfg->attr;
1999 	const struct rte_flow_attr *attr = &table_attr->flow_attr;
2000 	enum mlx5dr_table_type type = get_mlx5dr_table_type(attr);
2001 	struct mlx5_action_construct_data *act_data;
2002 	struct mlx5dr_action_reformat_header hdr = {0};
2003 	uint32_t flag, bulk = 0;
2004 
2005 	flag = mlx5_hw_act_flag[!!attr->group][type];
2006 	acts->push_remove = mlx5_malloc(MLX5_MEM_ZERO,
2007 					sizeof(*acts->push_remove) + push_size,
2008 					0, SOCKET_ID_ANY);
2009 	if (!acts->push_remove)
2010 		return -ENOMEM;
2011 
2012 	switch (recom_type) {
2013 	case MLX5DR_ACTION_TYP_PUSH_IPV6_ROUTE_EXT:
2014 		if (!push_data || !push_size)
2015 			goto err1;
2016 		if (!push_data_m) {
2017 			bulk = rte_log2_u32(table_attr->nb_flows);
2018 		} else {
2019 			flag |= MLX5DR_ACTION_FLAG_SHARED;
2020 			acts->push_remove->shared = 1;
2021 		}
2022 		acts->push_remove->data_size = push_size;
2023 		memcpy(acts->push_remove->data, push_data, push_size);
2024 		hdr.data = push_data;
2025 		hdr.sz = push_size;
2026 		break;
2027 	case MLX5DR_ACTION_TYP_POP_IPV6_ROUTE_EXT:
2028 		flag |= MLX5DR_ACTION_FLAG_SHARED;
2029 		acts->push_remove->shared = 1;
2030 		break;
2031 	default:
2032 		break;
2033 	}
2034 
2035 	acts->push_remove->action =
2036 		mlx5dr_action_create_reformat_ipv6_ext(priv->dr_ctx,
2037 				recom_type, &hdr, bulk, flag);
2038 	if (!acts->push_remove->action)
2039 		goto err1;
2040 	acts->rule_acts[at->recom_off].action = acts->push_remove->action;
2041 	acts->rule_acts[at->recom_off].ipv6_ext.header = acts->push_remove->data;
2042 	acts->rule_acts[at->recom_off].ipv6_ext.offset = 0;
2043 	acts->push_remove_pos = at->recom_off;
2044 	if (!acts->push_remove->shared) {
2045 		act_data = __flow_hw_act_data_push_append(dev, acts,
2046 				RTE_FLOW_ACTION_TYPE_IPV6_EXT_PUSH,
2047 				recom_src, at->recom_off, push_size);
2048 		if (!act_data)
2049 			goto err;
2050 	}
2051 	return 0;
2052 err:
2053 	if (acts->push_remove->action)
2054 		mlx5dr_action_destroy(acts->push_remove->action);
2055 err1:
2056 	if (acts->push_remove) {
2057 		mlx5_free(acts->push_remove);
2058 		acts->push_remove = NULL;
2059 	}
2060 	return -EINVAL;
2061 }
2062 
2063 /**
2064  * Translate rte_flow actions to DR action.
2065  *
2066  * As the action template has already indicated the actions. Translate
2067  * the rte_flow actions to DR action if possbile. So in flow create
2068  * stage we will save cycles from handing the actions' organizing.
2069  * For the actions with limited information, need to add these to a
2070  * list.
2071  *
2072  * @param[in] dev
2073  *   Pointer to the rte_eth_dev structure.
2074  * @param[in] cfg
2075  *   Pointer to the table configuration.
2076  * @param[in/out] acts
2077  *   Pointer to the template HW steering DR actions.
2078  * @param[in] at
2079  *   Action template.
2080  * @param[out] error
2081  *   Pointer to error structure.
2082  *
2083  * @return
2084  *   0 on success, a negative errno otherwise and rte_errno is set.
2085  */
2086 static int
2087 __flow_hw_actions_translate(struct rte_eth_dev *dev,
2088 			    const struct mlx5_flow_template_table_cfg *cfg,
2089 			    struct mlx5_hw_actions *acts,
2090 			    struct rte_flow_actions_template *at,
2091 			    struct mlx5_tbl_multi_pattern_ctx *mp_ctx,
2092 			    struct rte_flow_error *error)
2093 {
2094 	struct mlx5_priv *priv = dev->data->dev_private;
2095 	const struct rte_flow_template_table_attr *table_attr = &cfg->attr;
2096 	struct mlx5_hca_flex_attr *hca_attr = &priv->sh->cdev->config.hca_attr.flex;
2097 	const struct rte_flow_attr *attr = &table_attr->flow_attr;
2098 	struct rte_flow_action *actions = at->actions;
2099 	struct rte_flow_action *masks = at->masks;
2100 	enum mlx5dr_action_type refmt_type = MLX5DR_ACTION_TYP_LAST;
2101 	enum mlx5dr_action_type recom_type = MLX5DR_ACTION_TYP_LAST;
2102 	const struct rte_flow_action_raw_encap *raw_encap_data;
2103 	const struct rte_flow_action_ipv6_ext_push *ipv6_ext_data;
2104 	const struct rte_flow_item *enc_item = NULL, *enc_item_m = NULL;
2105 	uint16_t reformat_src = 0, recom_src = 0;
2106 	uint8_t *encap_data = NULL, *encap_data_m = NULL;
2107 	uint8_t *push_data = NULL, *push_data_m = NULL;
2108 	size_t data_size = 0, push_size = 0;
2109 	struct mlx5_hw_modify_header_action mhdr = { 0 };
2110 	bool actions_end = false;
2111 	uint32_t type;
2112 	bool reformat_used = false;
2113 	bool recom_used = false;
2114 	unsigned int of_vlan_offset;
2115 	uint16_t jump_pos;
2116 	uint32_t ct_idx;
2117 	int ret, err;
2118 	uint32_t target_grp = 0;
2119 	int table_type;
2120 
2121 	flow_hw_modify_field_init(&mhdr, at);
2122 	if (attr->transfer)
2123 		type = MLX5DR_TABLE_TYPE_FDB;
2124 	else if (attr->egress)
2125 		type = MLX5DR_TABLE_TYPE_NIC_TX;
2126 	else
2127 		type = MLX5DR_TABLE_TYPE_NIC_RX;
2128 	for (; !actions_end; actions++, masks++) {
2129 		uint64_t pos = actions - at->actions;
2130 		uint16_t src_pos = pos - at->src_off[pos];
2131 		uint16_t dr_pos = at->dr_off[pos];
2132 
2133 		switch ((int)actions->type) {
2134 		case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
2135 			if (!attr->group) {
2136 				DRV_LOG(ERR, "Indirect action is not supported in root table.");
2137 				goto err;
2138 			}
2139 			ret = table_template_translate_indirect_list
2140 				(dev, actions, masks, acts, src_pos, dr_pos);
2141 			if (ret)
2142 				goto err;
2143 			break;
2144 		case RTE_FLOW_ACTION_TYPE_INDIRECT:
2145 			if (!attr->group) {
2146 				DRV_LOG(ERR, "Indirect action is not supported in root table.");
2147 				goto err;
2148 			}
2149 			if (actions->conf && masks->conf) {
2150 				if (flow_hw_shared_action_translate
2151 				(dev, actions, acts, src_pos, dr_pos))
2152 					goto err;
2153 			} else if (__flow_hw_act_data_general_append
2154 					(priv, acts, RTE_FLOW_ACTION_TYPE_INDIRECT,
2155 					 src_pos, dr_pos)){
2156 				goto err;
2157 			}
2158 			break;
2159 		case RTE_FLOW_ACTION_TYPE_VOID:
2160 			break;
2161 		case RTE_FLOW_ACTION_TYPE_DROP:
2162 			acts->rule_acts[dr_pos].action =
2163 				priv->hw_drop[!!attr->group];
2164 			break;
2165 		case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
2166 			if (!attr->group) {
2167 				DRV_LOG(ERR, "Port representor is not supported in root table.");
2168 				goto err;
2169 			}
2170 			acts->rule_acts[dr_pos].action = priv->hw_def_miss;
2171 			break;
2172 		case RTE_FLOW_ACTION_TYPE_MARK:
2173 			acts->mark = true;
2174 			if (masks->conf &&
2175 			    ((const struct rte_flow_action_mark *)
2176 			     masks->conf)->id)
2177 				acts->rule_acts[dr_pos].tag.value =
2178 					mlx5_flow_mark_set
2179 					(((const struct rte_flow_action_mark *)
2180 					(actions->conf))->id);
2181 			else if (__flow_hw_act_data_general_append(priv, acts,
2182 								   actions->type,
2183 								   src_pos, dr_pos))
2184 				goto err;
2185 			acts->rule_acts[dr_pos].action =
2186 				priv->hw_tag[!!attr->group];
2187 			__atomic_fetch_add(&priv->hws_mark_refcnt, 1, __ATOMIC_RELAXED);
2188 			flow_hw_rxq_flag_set(dev, true);
2189 			break;
2190 		case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
2191 			acts->rule_acts[dr_pos].action =
2192 				priv->hw_push_vlan[type];
2193 			if (is_template_masked_push_vlan(masks->conf))
2194 				acts->rule_acts[dr_pos].push_vlan.vlan_hdr =
2195 					vlan_hdr_to_be32(actions);
2196 			else if (__flow_hw_act_data_general_append
2197 					(priv, acts, actions->type,
2198 					 src_pos, dr_pos))
2199 				goto err;
2200 			of_vlan_offset = is_of_vlan_pcp_present(actions) ?
2201 					MLX5_HW_VLAN_PUSH_PCP_IDX :
2202 					MLX5_HW_VLAN_PUSH_VID_IDX;
2203 			actions += of_vlan_offset;
2204 			masks += of_vlan_offset;
2205 			break;
2206 		case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
2207 			acts->rule_acts[dr_pos].action =
2208 				priv->hw_pop_vlan[type];
2209 			break;
2210 		case RTE_FLOW_ACTION_TYPE_JUMP:
2211 			if (masks->conf &&
2212 			    ((const struct rte_flow_action_jump *)
2213 			     masks->conf)->group) {
2214 				uint32_t jump_group =
2215 					((const struct rte_flow_action_jump *)
2216 					actions->conf)->group;
2217 				acts->jump = flow_hw_jump_action_register
2218 						(dev, cfg, jump_group, error);
2219 				if (!acts->jump)
2220 					goto err;
2221 				acts->rule_acts[dr_pos].action = (!!attr->group) ?
2222 								 acts->jump->hws_action :
2223 								 acts->jump->root_action;
2224 			} else if (__flow_hw_act_data_general_append
2225 					(priv, acts, actions->type,
2226 					 src_pos, dr_pos)){
2227 				goto err;
2228 			}
2229 			break;
2230 		case RTE_FLOW_ACTION_TYPE_QUEUE:
2231 			if (masks->conf &&
2232 			    ((const struct rte_flow_action_queue *)
2233 			     masks->conf)->index) {
2234 				acts->tir = flow_hw_tir_action_register
2235 				(dev,
2236 				 mlx5_hw_act_flag[!!attr->group][type],
2237 				 actions);
2238 				if (!acts->tir)
2239 					goto err;
2240 				acts->rule_acts[dr_pos].action =
2241 					acts->tir->action;
2242 			} else if (__flow_hw_act_data_general_append
2243 					(priv, acts, actions->type,
2244 					 src_pos, dr_pos)) {
2245 				goto err;
2246 			}
2247 			break;
2248 		case RTE_FLOW_ACTION_TYPE_RSS:
2249 			if (actions->conf && masks->conf) {
2250 				acts->tir = flow_hw_tir_action_register
2251 				(dev,
2252 				 mlx5_hw_act_flag[!!attr->group][type],
2253 				 actions);
2254 				if (!acts->tir)
2255 					goto err;
2256 				acts->rule_acts[dr_pos].action =
2257 					acts->tir->action;
2258 			} else if (__flow_hw_act_data_general_append
2259 					(priv, acts, actions->type,
2260 					 src_pos, dr_pos)) {
2261 				goto err;
2262 			}
2263 			break;
2264 		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
2265 			MLX5_ASSERT(!reformat_used);
2266 			enc_item = MLX5_CONST_ENCAP_ITEM(rte_flow_action_vxlan_encap,
2267 							 actions->conf);
2268 			if (masks->conf)
2269 				enc_item_m = MLX5_CONST_ENCAP_ITEM(rte_flow_action_vxlan_encap,
2270 								   masks->conf);
2271 			reformat_used = true;
2272 			reformat_src = src_pos;
2273 			refmt_type = MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
2274 			break;
2275 		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
2276 			MLX5_ASSERT(!reformat_used);
2277 			enc_item = MLX5_CONST_ENCAP_ITEM(rte_flow_action_nvgre_encap,
2278 							 actions->conf);
2279 			if (masks->conf)
2280 				enc_item_m = MLX5_CONST_ENCAP_ITEM(rte_flow_action_nvgre_encap,
2281 								   masks->conf);
2282 			reformat_used = true;
2283 			reformat_src = src_pos;
2284 			refmt_type = MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
2285 			break;
2286 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
2287 			raw_encap_data =
2288 				(const struct rte_flow_action_raw_encap *)
2289 				 masks->conf;
2290 			if (raw_encap_data)
2291 				encap_data_m = raw_encap_data->data;
2292 			raw_encap_data =
2293 				(const struct rte_flow_action_raw_encap *)
2294 				 actions->conf;
2295 			encap_data = raw_encap_data->data;
2296 			data_size = raw_encap_data->size;
2297 			if (reformat_used) {
2298 				refmt_type = data_size <
2299 				MLX5_ENCAPSULATION_DECISION_SIZE ?
2300 				MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2 :
2301 				MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3;
2302 			} else {
2303 				reformat_used = true;
2304 				refmt_type =
2305 				MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
2306 			}
2307 			reformat_src = src_pos;
2308 			break;
2309 		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
2310 		case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
2311 			MLX5_ASSERT(!reformat_used);
2312 			reformat_used = true;
2313 			refmt_type = MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2;
2314 			break;
2315 		case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
2316 			reformat_used = true;
2317 			refmt_type = MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2;
2318 			break;
2319 		case RTE_FLOW_ACTION_TYPE_IPV6_EXT_PUSH:
2320 			if (!hca_attr->query_match_sample_info || !hca_attr->parse_graph_anchor ||
2321 			    !priv->sh->srh_flex_parser.flex.mapnum) {
2322 				DRV_LOG(ERR, "SRv6 anchor is not supported.");
2323 				goto err;
2324 			}
2325 			MLX5_ASSERT(!recom_used && !recom_type);
2326 			recom_used = true;
2327 			recom_type = MLX5DR_ACTION_TYP_PUSH_IPV6_ROUTE_EXT;
2328 			ipv6_ext_data =
2329 				(const struct rte_flow_action_ipv6_ext_push *)masks->conf;
2330 			if (ipv6_ext_data)
2331 				push_data_m = ipv6_ext_data->data;
2332 			ipv6_ext_data =
2333 				(const struct rte_flow_action_ipv6_ext_push *)actions->conf;
2334 			if (ipv6_ext_data) {
2335 				push_data = ipv6_ext_data->data;
2336 				push_size = ipv6_ext_data->size;
2337 			}
2338 			recom_src = src_pos;
2339 			break;
2340 		case RTE_FLOW_ACTION_TYPE_IPV6_EXT_REMOVE:
2341 			if (!hca_attr->query_match_sample_info || !hca_attr->parse_graph_anchor ||
2342 			    !priv->sh->srh_flex_parser.flex.mapnum) {
2343 				DRV_LOG(ERR, "SRv6 anchor is not supported.");
2344 				goto err;
2345 			}
2346 			recom_used = true;
2347 			recom_type = MLX5DR_ACTION_TYP_POP_IPV6_ROUTE_EXT;
2348 			break;
2349 		case RTE_FLOW_ACTION_TYPE_SEND_TO_KERNEL:
2350 			flow_hw_translate_group(dev, cfg, attr->group,
2351 						&target_grp, error);
2352 			if (target_grp == 0) {
2353 				__flow_hw_action_template_destroy(dev, acts);
2354 				return rte_flow_error_set(error, ENOTSUP,
2355 						RTE_FLOW_ERROR_TYPE_ACTION,
2356 						NULL,
2357 						"Send to kernel action on root table is not supported in HW steering mode");
2358 			}
2359 			table_type = attr->ingress ? MLX5DR_TABLE_TYPE_NIC_RX :
2360 				     ((attr->egress) ? MLX5DR_TABLE_TYPE_NIC_TX :
2361 				      MLX5DR_TABLE_TYPE_FDB);
2362 			acts->rule_acts[dr_pos].action = priv->hw_send_to_kernel[table_type];
2363 			break;
2364 		case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
2365 			err = flow_hw_modify_field_compile(dev, attr, actions,
2366 							   masks, acts, &mhdr,
2367 							   src_pos, error);
2368 			if (err)
2369 				goto err;
2370 			break;
2371 		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
2372 			if (flow_hw_represented_port_compile
2373 					(dev, attr, actions,
2374 					 masks, acts, src_pos, dr_pos, error))
2375 				goto err;
2376 			break;
2377 		case RTE_FLOW_ACTION_TYPE_METER:
2378 			/*
2379 			 * METER action is compiled to 2 DR actions - ASO_METER and FT.
2380 			 * Calculated DR offset is stored only for ASO_METER and FT
2381 			 * is assumed to be the next action.
2382 			 */
2383 			jump_pos = dr_pos + 1;
2384 			if (actions->conf && masks->conf &&
2385 			    ((const struct rte_flow_action_meter *)
2386 			     masks->conf)->mtr_id) {
2387 				err = flow_hw_meter_compile(dev, cfg,
2388 							    dr_pos, jump_pos, actions, acts, error);
2389 				if (err)
2390 					goto err;
2391 			} else if (__flow_hw_act_data_general_append(priv, acts,
2392 								     actions->type,
2393 								     src_pos,
2394 								     dr_pos))
2395 				goto err;
2396 			break;
2397 		case RTE_FLOW_ACTION_TYPE_AGE:
2398 			flow_hw_translate_group(dev, cfg, attr->group,
2399 						&target_grp, error);
2400 			if (target_grp == 0) {
2401 				__flow_hw_action_template_destroy(dev, acts);
2402 				return rte_flow_error_set(error, ENOTSUP,
2403 						RTE_FLOW_ERROR_TYPE_ACTION,
2404 						NULL,
2405 						"Age action on root table is not supported in HW steering mode");
2406 			}
2407 			if (__flow_hw_act_data_general_append(priv, acts,
2408 							      actions->type,
2409 							      src_pos,
2410 							      dr_pos))
2411 				goto err;
2412 			break;
2413 		case RTE_FLOW_ACTION_TYPE_COUNT:
2414 			flow_hw_translate_group(dev, cfg, attr->group,
2415 						&target_grp, error);
2416 			if (target_grp == 0) {
2417 				__flow_hw_action_template_destroy(dev, acts);
2418 				return rte_flow_error_set(error, ENOTSUP,
2419 						RTE_FLOW_ERROR_TYPE_ACTION,
2420 						NULL,
2421 						"Counter action on root table is not supported in HW steering mode");
2422 			}
2423 			if ((at->action_flags & MLX5_FLOW_ACTION_AGE) ||
2424 			    (at->action_flags & MLX5_FLOW_ACTION_INDIRECT_AGE))
2425 				/*
2426 				 * When both COUNT and AGE are requested, it is
2427 				 * saved as AGE action which creates also the
2428 				 * counter.
2429 				 */
2430 				break;
2431 			if (masks->conf &&
2432 			    ((const struct rte_flow_action_count *)
2433 			     masks->conf)->id) {
2434 				err = flow_hw_cnt_compile(dev, dr_pos, acts);
2435 				if (err)
2436 					goto err;
2437 			} else if (__flow_hw_act_data_general_append
2438 					(priv, acts, actions->type,
2439 					 src_pos, dr_pos)) {
2440 				goto err;
2441 			}
2442 			break;
2443 		case RTE_FLOW_ACTION_TYPE_CONNTRACK:
2444 			if (masks->conf) {
2445 				ct_idx = MLX5_ACTION_CTX_CT_GET_IDX
2446 					 ((uint32_t)(uintptr_t)actions->conf);
2447 				if (flow_hw_ct_compile(dev, MLX5_HW_INV_QUEUE, ct_idx,
2448 						       &acts->rule_acts[dr_pos]))
2449 					goto err;
2450 			} else if (__flow_hw_act_data_general_append
2451 					(priv, acts, actions->type,
2452 					 src_pos, dr_pos)) {
2453 				goto err;
2454 			}
2455 			break;
2456 		case RTE_FLOW_ACTION_TYPE_METER_MARK:
2457 			if (actions->conf && masks->conf &&
2458 			    ((const struct rte_flow_action_meter_mark *)
2459 			     masks->conf)->profile) {
2460 				err = flow_hw_meter_mark_compile(dev,
2461 								 dr_pos, actions,
2462 								 acts->rule_acts,
2463 								 &acts->mtr_id,
2464 								 MLX5_HW_INV_QUEUE);
2465 				if (err)
2466 					goto err;
2467 			} else if (__flow_hw_act_data_general_append(priv, acts,
2468 								     actions->type,
2469 								     src_pos,
2470 								     dr_pos))
2471 				goto err;
2472 			break;
2473 		case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
2474 			/* Internal, can be skipped. */
2475 			if (!!attr->group) {
2476 				DRV_LOG(ERR, "DEFAULT MISS action is only"
2477 					" supported in root table.");
2478 				goto err;
2479 			}
2480 			acts->rule_acts[dr_pos].action = priv->hw_def_miss;
2481 			break;
2482 		case RTE_FLOW_ACTION_TYPE_END:
2483 			actions_end = true;
2484 			break;
2485 		default:
2486 			break;
2487 		}
2488 	}
2489 	if (mhdr.pos != UINT16_MAX) {
2490 		ret = mlx5_tbl_translate_modify_header(dev, cfg, acts, mp_ctx,
2491 						       &mhdr, error);
2492 		if (ret)
2493 			goto err;
2494 	}
2495 	if (reformat_used) {
2496 		ret = mlx5_tbl_translate_reformat(priv, table_attr, acts, at,
2497 						  enc_item, enc_item_m,
2498 						  encap_data, encap_data_m,
2499 						  mp_ctx, data_size,
2500 						  reformat_src,
2501 						  refmt_type, error);
2502 		if (ret)
2503 			goto err;
2504 	}
2505 	if (recom_used) {
2506 		MLX5_ASSERT(at->recom_off != UINT16_MAX);
2507 		ret = mlx5_create_ipv6_ext_reformat(dev, cfg, acts, at, push_data,
2508 						    push_data_m, push_size, recom_src,
2509 						    recom_type);
2510 		if (ret)
2511 			goto err;
2512 	}
2513 	return 0;
2514 err:
2515 	err = rte_errno;
2516 	__flow_hw_action_template_destroy(dev, acts);
2517 	return rte_flow_error_set(error, err,
2518 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2519 				  "fail to create rte table");
2520 }
2521 
2522 /**
2523  * Translate rte_flow actions to DR action.
2524  *
2525  * @param[in] dev
2526  *   Pointer to the rte_eth_dev structure.
2527  * @param[in] tbl
2528  *   Pointer to the flow template table.
2529  * @param[out] error
2530  *   Pointer to error structure.
2531  *
2532  * @return
2533  *    0 on success, negative value otherwise and rte_errno is set.
2534  */
2535 static int
2536 flow_hw_actions_translate(struct rte_eth_dev *dev,
2537 			  struct rte_flow_template_table *tbl,
2538 			  struct rte_flow_error *error)
2539 {
2540 	int ret;
2541 	uint32_t i;
2542 	struct mlx5_tbl_multi_pattern_ctx mpat = MLX5_EMPTY_MULTI_PATTERN_CTX;
2543 
2544 	for (i = 0; i < tbl->nb_action_templates; i++) {
2545 		if (__flow_hw_actions_translate(dev, &tbl->cfg,
2546 						&tbl->ats[i].acts,
2547 						tbl->ats[i].action_template,
2548 						&mpat, error))
2549 			goto err;
2550 	}
2551 	ret = mlx5_tbl_multi_pattern_process(dev, tbl, &mpat, error);
2552 	if (ret)
2553 		goto err;
2554 	return 0;
2555 err:
2556 	while (i--)
2557 		__flow_hw_action_template_destroy(dev, &tbl->ats[i].acts);
2558 	return -1;
2559 }
2560 
2561 /**
2562  * Get shared indirect action.
2563  *
2564  * @param[in] dev
2565  *   Pointer to the rte_eth_dev data structure.
2566  * @param[in] act_data
2567  *   Pointer to the recorded action construct data.
2568  * @param[in] item_flags
2569  *   The matcher itme_flags used for RSS lookup.
2570  * @param[in] rule_act
2571  *   Pointer to the shared action's destination rule DR action.
2572  *
2573  * @return
2574  *    0 on success, negative value otherwise and rte_errno is set.
2575  */
2576 static __rte_always_inline int
2577 flow_hw_shared_action_get(struct rte_eth_dev *dev,
2578 			  struct mlx5_action_construct_data *act_data,
2579 			  const uint64_t item_flags,
2580 			  struct mlx5dr_rule_action *rule_act)
2581 {
2582 	struct mlx5_priv *priv = dev->data->dev_private;
2583 	struct mlx5_flow_rss_desc rss_desc = { 0 };
2584 	uint64_t hash_fields = 0;
2585 	uint32_t hrxq_idx = 0;
2586 	struct mlx5_hrxq *hrxq = NULL;
2587 	int act_type = act_data->type;
2588 
2589 	switch (act_type) {
2590 	case MLX5_RTE_FLOW_ACTION_TYPE_RSS:
2591 		rss_desc.level = act_data->shared_rss.level;
2592 		rss_desc.types = act_data->shared_rss.types;
2593 		rss_desc.symmetric_hash_function = act_data->shared_rss.symmetric_hash_function;
2594 		flow_dv_hashfields_set(item_flags, &rss_desc, &hash_fields);
2595 		hrxq_idx = flow_dv_action_rss_hrxq_lookup
2596 			(dev, act_data->shared_rss.idx, hash_fields);
2597 		if (hrxq_idx)
2598 			hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
2599 					      hrxq_idx);
2600 		if (hrxq) {
2601 			rule_act->action = hrxq->action;
2602 			return 0;
2603 		}
2604 		break;
2605 	default:
2606 		DRV_LOG(WARNING, "Unsupported shared action type:%d",
2607 			act_data->type);
2608 		break;
2609 	}
2610 	return -1;
2611 }
2612 
2613 static void
2614 flow_hw_construct_quota(struct mlx5_priv *priv,
2615 			struct mlx5dr_rule_action *rule_act, uint32_t qid)
2616 {
2617 	rule_act->action = priv->quota_ctx.dr_action;
2618 	rule_act->aso_meter.offset = qid - 1;
2619 	rule_act->aso_meter.init_color =
2620 		MLX5DR_ACTION_ASO_METER_COLOR_GREEN;
2621 }
2622 
2623 /**
2624  * Construct shared indirect action.
2625  *
2626  * @param[in] dev
2627  *   Pointer to the rte_eth_dev data structure.
2628  * @param[in] queue
2629  *   The flow creation queue index.
2630  * @param[in] action
2631  *   Pointer to the shared indirect rte_flow action.
2632  * @param[in] table
2633  *   Pointer to the flow table.
2634  * @param[in] it_idx
2635  *   Item template index the action template refer to.
2636  * @param[in] action_flags
2637  *   Actions bit-map detected in this template.
2638  * @param[in, out] flow
2639  *   Pointer to the flow containing the counter.
2640  * @param[in] rule_act
2641  *   Pointer to the shared action's destination rule DR action.
2642  *
2643  * @return
2644  *    0 on success, negative value otherwise and rte_errno is set.
2645  */
2646 static __rte_always_inline int
2647 flow_hw_shared_action_construct(struct rte_eth_dev *dev, uint32_t queue,
2648 				const struct rte_flow_action *action,
2649 				struct rte_flow_template_table *table,
2650 				const uint8_t it_idx, uint64_t action_flags,
2651 				struct rte_flow_hw *flow,
2652 				struct mlx5dr_rule_action *rule_act)
2653 {
2654 	struct mlx5_priv *priv = dev->data->dev_private;
2655 	struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
2656 	struct mlx5_action_construct_data act_data;
2657 	struct mlx5_shared_action_rss *shared_rss;
2658 	struct mlx5_aso_mtr *aso_mtr;
2659 	struct mlx5_age_info *age_info;
2660 	struct mlx5_hws_age_param *param;
2661 	uint32_t act_idx = (uint32_t)(uintptr_t)action->conf;
2662 	uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
2663 	uint32_t idx = act_idx &
2664 		       ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
2665 	uint64_t item_flags;
2666 	cnt_id_t age_cnt;
2667 
2668 	memset(&act_data, 0, sizeof(act_data));
2669 	switch (type) {
2670 	case MLX5_INDIRECT_ACTION_TYPE_RSS:
2671 		act_data.type = MLX5_RTE_FLOW_ACTION_TYPE_RSS;
2672 		shared_rss = mlx5_ipool_get
2673 			(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
2674 		if (!shared_rss)
2675 			return -1;
2676 		act_data.shared_rss.idx = idx;
2677 		act_data.shared_rss.level = shared_rss->origin.level;
2678 		act_data.shared_rss.types = !shared_rss->origin.types ?
2679 					    RTE_ETH_RSS_IP :
2680 					    shared_rss->origin.types;
2681 		act_data.shared_rss.symmetric_hash_function =
2682 			MLX5_RSS_IS_SYMM(shared_rss->origin.func);
2683 
2684 		item_flags = table->its[it_idx]->item_flags;
2685 		if (flow_hw_shared_action_get
2686 				(dev, &act_data, item_flags, rule_act))
2687 			return -1;
2688 		break;
2689 	case MLX5_INDIRECT_ACTION_TYPE_COUNT:
2690 		if (mlx5_hws_cnt_pool_get_action_offset(priv->hws_cpool,
2691 				act_idx,
2692 				&rule_act->action,
2693 				&rule_act->counter.offset))
2694 			return -1;
2695 		flow->cnt_id = act_idx;
2696 		break;
2697 	case MLX5_INDIRECT_ACTION_TYPE_AGE:
2698 		/*
2699 		 * Save the index with the indirect type, to recognize
2700 		 * it in flow destroy.
2701 		 */
2702 		flow->age_idx = act_idx;
2703 		if (action_flags & MLX5_FLOW_ACTION_INDIRECT_COUNT)
2704 			/*
2705 			 * The mutual update for idirect AGE & COUNT will be
2706 			 * performed later after we have ID for both of them.
2707 			 */
2708 			break;
2709 		age_info = GET_PORT_AGE_INFO(priv);
2710 		param = mlx5_ipool_get(age_info->ages_ipool, idx);
2711 		if (param == NULL)
2712 			return -1;
2713 		if (action_flags & MLX5_FLOW_ACTION_COUNT) {
2714 			if (mlx5_hws_cnt_pool_get(priv->hws_cpool,
2715 						  &param->queue_id, &age_cnt,
2716 						  idx) < 0)
2717 				return -1;
2718 			flow->cnt_id = age_cnt;
2719 			param->nb_cnts++;
2720 		} else {
2721 			/*
2722 			 * Get the counter of this indirect AGE or create one
2723 			 * if doesn't exist.
2724 			 */
2725 			age_cnt = mlx5_hws_age_cnt_get(priv, param, idx);
2726 			if (age_cnt == 0)
2727 				return -1;
2728 		}
2729 		if (mlx5_hws_cnt_pool_get_action_offset(priv->hws_cpool,
2730 						     age_cnt, &rule_act->action,
2731 						     &rule_act->counter.offset))
2732 			return -1;
2733 		break;
2734 	case MLX5_INDIRECT_ACTION_TYPE_CT:
2735 		if (flow_hw_ct_compile(dev, queue, idx, rule_act))
2736 			return -1;
2737 		break;
2738 	case MLX5_INDIRECT_ACTION_TYPE_METER_MARK:
2739 		/* Find ASO object. */
2740 		aso_mtr = mlx5_ipool_get(pool->idx_pool, idx);
2741 		if (!aso_mtr)
2742 			return -1;
2743 		rule_act->action = pool->action;
2744 		rule_act->aso_meter.offset = aso_mtr->offset;
2745 		break;
2746 	case MLX5_INDIRECT_ACTION_TYPE_QUOTA:
2747 		flow_hw_construct_quota(priv, rule_act, idx);
2748 		break;
2749 	default:
2750 		DRV_LOG(WARNING, "Unsupported shared action type:%d", type);
2751 		break;
2752 	}
2753 	return 0;
2754 }
2755 
2756 static __rte_always_inline int
2757 flow_hw_mhdr_cmd_is_nop(const struct mlx5_modification_cmd *cmd)
2758 {
2759 	struct mlx5_modification_cmd cmd_he = {
2760 		.data0 = rte_be_to_cpu_32(cmd->data0),
2761 		.data1 = 0,
2762 	};
2763 
2764 	return cmd_he.action_type == MLX5_MODIFICATION_TYPE_NOP;
2765 }
2766 
2767 /**
2768  * Construct flow action array.
2769  *
2770  * For action template contains dynamic actions, these actions need to
2771  * be updated according to the rte_flow action during flow creation.
2772  *
2773  * @param[in] dev
2774  *   Pointer to the rte_eth_dev structure.
2775  * @param[in] job
2776  *   Pointer to job descriptor.
2777  * @param[in] hw_acts
2778  *   Pointer to translated actions from template.
2779  * @param[in] it_idx
2780  *   Item template index the action template refer to.
2781  * @param[in] actions
2782  *   Array of rte_flow action need to be checked.
2783  * @param[in] rule_acts
2784  *   Array of DR rule actions to be used during flow creation..
2785  * @param[in] acts_num
2786  *   Pointer to the real acts_num flow has.
2787  *
2788  * @return
2789  *    0 on success, negative value otherwise and rte_errno is set.
2790  */
2791 static __rte_always_inline int
2792 flow_hw_modify_field_construct(struct mlx5_hw_q_job *job,
2793 			       struct mlx5_action_construct_data *act_data,
2794 			       const struct mlx5_hw_actions *hw_acts,
2795 			       const struct rte_flow_action *action)
2796 {
2797 	const struct rte_flow_action_modify_field *mhdr_action = action->conf;
2798 	uint8_t values[16] = { 0 };
2799 	unaligned_uint32_t *value_p;
2800 	uint32_t i;
2801 	struct field_modify_info *field;
2802 
2803 	if (!hw_acts->mhdr)
2804 		return -1;
2805 	if (hw_acts->mhdr->shared || act_data->modify_header.shared)
2806 		return 0;
2807 	MLX5_ASSERT(mhdr_action->operation == RTE_FLOW_MODIFY_SET ||
2808 		    mhdr_action->operation == RTE_FLOW_MODIFY_ADD);
2809 	if (mhdr_action->src.field != RTE_FLOW_FIELD_VALUE &&
2810 	    mhdr_action->src.field != RTE_FLOW_FIELD_POINTER)
2811 		return 0;
2812 	if (mhdr_action->src.field == RTE_FLOW_FIELD_VALUE)
2813 		rte_memcpy(values, &mhdr_action->src.value, sizeof(values));
2814 	else
2815 		rte_memcpy(values, mhdr_action->src.pvalue, sizeof(values));
2816 	if (mhdr_action->dst.field == RTE_FLOW_FIELD_META ||
2817 	    mhdr_action->dst.field == RTE_FLOW_FIELD_TAG ||
2818 	    mhdr_action->dst.field == RTE_FLOW_FIELD_METER_COLOR ||
2819 	    mhdr_action->dst.field == (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG) {
2820 		uint8_t tag_index = flow_tag_index_get(&mhdr_action->dst);
2821 
2822 		value_p = (unaligned_uint32_t *)values;
2823 		if (mhdr_action->dst.field == RTE_FLOW_FIELD_TAG &&
2824 		    tag_index == RTE_PMD_MLX5_LINEAR_HASH_TAG_INDEX)
2825 			*value_p = rte_cpu_to_be_32(*value_p << 16);
2826 		else
2827 			*value_p = rte_cpu_to_be_32(*value_p);
2828 	} else if (mhdr_action->dst.field == RTE_FLOW_FIELD_GTP_PSC_QFI) {
2829 		uint32_t tmp;
2830 
2831 		/*
2832 		 * QFI is passed as an uint8_t integer, but it is accessed through
2833 		 * a 2nd least significant byte of a 32-bit field in modify header command.
2834 		 */
2835 		tmp = values[0];
2836 		value_p = (unaligned_uint32_t *)values;
2837 		*value_p = rte_cpu_to_be_32(tmp << 8);
2838 	}
2839 	i = act_data->modify_header.mhdr_cmds_off;
2840 	field = act_data->modify_header.field;
2841 	do {
2842 		uint32_t off_b;
2843 		uint32_t mask;
2844 		uint32_t data;
2845 		const uint8_t *mask_src;
2846 
2847 		if (i >= act_data->modify_header.mhdr_cmds_end)
2848 			return -1;
2849 		if (flow_hw_mhdr_cmd_is_nop(&job->mhdr_cmd[i])) {
2850 			++i;
2851 			continue;
2852 		}
2853 		mask_src = (const uint8_t *)act_data->modify_header.mask;
2854 		mask = flow_dv_fetch_field(mask_src + field->offset, field->size);
2855 		if (!mask) {
2856 			++field;
2857 			continue;
2858 		}
2859 		off_b = rte_bsf32(mask);
2860 		data = flow_dv_fetch_field(values + field->offset, field->size);
2861 		data = (data & mask) >> off_b;
2862 		job->mhdr_cmd[i++].data1 = rte_cpu_to_be_32(data);
2863 		++field;
2864 	} while (field->size);
2865 	return 0;
2866 }
2867 
2868 /**
2869  * Construct flow action array.
2870  *
2871  * For action template contains dynamic actions, these actions need to
2872  * be updated according to the rte_flow action during flow creation.
2873  *
2874  * @param[in] dev
2875  *   Pointer to the rte_eth_dev structure.
2876  * @param[in] job
2877  *   Pointer to job descriptor.
2878  * @param[in] hw_acts
2879  *   Pointer to translated actions from template.
2880  * @param[in] it_idx
2881  *   Item template index the action template refer to.
2882  * @param[in] actions
2883  *   Array of rte_flow action need to be checked.
2884  * @param[in] rule_acts
2885  *   Array of DR rule actions to be used during flow creation..
2886  * @param[in] acts_num
2887  *   Pointer to the real acts_num flow has.
2888  *
2889  * @return
2890  *    0 on success, negative value otherwise and rte_errno is set.
2891  */
2892 static __rte_always_inline int
2893 flow_hw_actions_construct(struct rte_eth_dev *dev,
2894 			  struct mlx5_hw_q_job *job,
2895 			  const struct mlx5_hw_action_template *hw_at,
2896 			  const uint8_t it_idx,
2897 			  const struct rte_flow_action actions[],
2898 			  struct mlx5dr_rule_action *rule_acts,
2899 			  uint32_t queue,
2900 			  struct rte_flow_error *error)
2901 {
2902 	struct mlx5_priv *priv = dev->data->dev_private;
2903 	struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
2904 	struct rte_flow_template_table *table = job->flow->table;
2905 	struct mlx5_action_construct_data *act_data;
2906 	const struct rte_flow_actions_template *at = hw_at->action_template;
2907 	const struct mlx5_hw_actions *hw_acts = &hw_at->acts;
2908 	const struct rte_flow_action *action;
2909 	const struct rte_flow_action_raw_encap *raw_encap_data;
2910 	const struct rte_flow_action_ipv6_ext_push *ipv6_push;
2911 	const struct rte_flow_item *enc_item = NULL;
2912 	const struct rte_flow_action_ethdev *port_action = NULL;
2913 	const struct rte_flow_action_meter *meter = NULL;
2914 	const struct rte_flow_action_age *age = NULL;
2915 	uint8_t *buf = job->encap_data;
2916 	uint8_t *push_buf = job->push_data;
2917 	struct rte_flow_attr attr = {
2918 			.ingress = 1,
2919 	};
2920 	uint32_t ft_flag;
2921 	size_t encap_len = 0;
2922 	int ret;
2923 	uint32_t age_idx = 0;
2924 	struct mlx5_aso_mtr *aso_mtr;
2925 
2926 	rte_memcpy(rule_acts, hw_acts->rule_acts, sizeof(*rule_acts) * at->dr_actions_num);
2927 	attr.group = table->grp->group_id;
2928 	ft_flag = mlx5_hw_act_flag[!!table->grp->group_id][table->type];
2929 	if (table->type == MLX5DR_TABLE_TYPE_FDB) {
2930 		attr.transfer = 1;
2931 		attr.ingress = 1;
2932 	} else if (table->type == MLX5DR_TABLE_TYPE_NIC_TX) {
2933 		attr.egress = 1;
2934 		attr.ingress = 0;
2935 	} else {
2936 		attr.ingress = 1;
2937 	}
2938 	if (hw_acts->mhdr && hw_acts->mhdr->mhdr_cmds_num > 0) {
2939 		uint16_t pos = hw_acts->mhdr->pos;
2940 
2941 		if (!hw_acts->mhdr->shared) {
2942 			rule_acts[pos].modify_header.offset =
2943 						job->flow->res_idx - 1;
2944 			rule_acts[pos].modify_header.data =
2945 						(uint8_t *)job->mhdr_cmd;
2946 			rte_memcpy(job->mhdr_cmd, hw_acts->mhdr->mhdr_cmds,
2947 				   sizeof(*job->mhdr_cmd) * hw_acts->mhdr->mhdr_cmds_num);
2948 		}
2949 	}
2950 	LIST_FOREACH(act_data, &hw_acts->act_list, next) {
2951 		uint32_t jump_group;
2952 		uint32_t tag;
2953 		uint64_t item_flags;
2954 		struct mlx5_hw_jump_action *jump;
2955 		struct mlx5_hrxq *hrxq;
2956 		uint32_t ct_idx;
2957 		cnt_id_t cnt_id;
2958 		uint32_t *cnt_queue;
2959 		uint32_t mtr_id;
2960 
2961 		action = &actions[act_data->action_src];
2962 		/*
2963 		 * action template construction replaces
2964 		 * OF_SET_VLAN_VID with MODIFY_FIELD
2965 		 */
2966 		if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)
2967 			MLX5_ASSERT(act_data->type ==
2968 				    RTE_FLOW_ACTION_TYPE_MODIFY_FIELD);
2969 		else
2970 			MLX5_ASSERT(action->type ==
2971 				    RTE_FLOW_ACTION_TYPE_INDIRECT ||
2972 				    (int)action->type == act_data->type);
2973 		switch ((int)act_data->type) {
2974 		case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
2975 			act_data->indirect_list_cb(dev, act_data, actions,
2976 						   &rule_acts[act_data->action_dst]);
2977 			break;
2978 		case RTE_FLOW_ACTION_TYPE_INDIRECT:
2979 			if (flow_hw_shared_action_construct
2980 					(dev, queue, action, table, it_idx,
2981 					 at->action_flags, job->flow,
2982 					 &rule_acts[act_data->action_dst]))
2983 				return -1;
2984 			break;
2985 		case RTE_FLOW_ACTION_TYPE_VOID:
2986 			break;
2987 		case RTE_FLOW_ACTION_TYPE_MARK:
2988 			tag = mlx5_flow_mark_set
2989 			      (((const struct rte_flow_action_mark *)
2990 			      (action->conf))->id);
2991 			rule_acts[act_data->action_dst].tag.value = tag;
2992 			break;
2993 		case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
2994 			rule_acts[act_data->action_dst].push_vlan.vlan_hdr =
2995 				vlan_hdr_to_be32(action);
2996 			break;
2997 		case RTE_FLOW_ACTION_TYPE_JUMP:
2998 			jump_group = ((const struct rte_flow_action_jump *)
2999 						action->conf)->group;
3000 			jump = flow_hw_jump_action_register
3001 				(dev, &table->cfg, jump_group, NULL);
3002 			if (!jump)
3003 				return -1;
3004 			rule_acts[act_data->action_dst].action =
3005 			(!!attr.group) ? jump->hws_action : jump->root_action;
3006 			job->flow->jump = jump;
3007 			job->flow->fate_type = MLX5_FLOW_FATE_JUMP;
3008 			break;
3009 		case RTE_FLOW_ACTION_TYPE_RSS:
3010 		case RTE_FLOW_ACTION_TYPE_QUEUE:
3011 			hrxq = flow_hw_tir_action_register(dev,
3012 					ft_flag,
3013 					action);
3014 			if (!hrxq)
3015 				return -1;
3016 			rule_acts[act_data->action_dst].action = hrxq->action;
3017 			job->flow->hrxq = hrxq;
3018 			job->flow->fate_type = MLX5_FLOW_FATE_QUEUE;
3019 			break;
3020 		case MLX5_RTE_FLOW_ACTION_TYPE_RSS:
3021 			item_flags = table->its[it_idx]->item_flags;
3022 			if (flow_hw_shared_action_get
3023 				(dev, act_data, item_flags,
3024 				 &rule_acts[act_data->action_dst]))
3025 				return -1;
3026 			break;
3027 		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3028 			enc_item = ((const struct rte_flow_action_vxlan_encap *)
3029 				   action->conf)->definition;
3030 			if (flow_dv_convert_encap_data(enc_item, buf, &encap_len, NULL))
3031 				return -1;
3032 			break;
3033 		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
3034 			enc_item = ((const struct rte_flow_action_nvgre_encap *)
3035 				   action->conf)->definition;
3036 			if (flow_dv_convert_encap_data(enc_item, buf, &encap_len, NULL))
3037 				return -1;
3038 			break;
3039 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
3040 			raw_encap_data =
3041 				(const struct rte_flow_action_raw_encap *)
3042 				 action->conf;
3043 			rte_memcpy((void *)buf, raw_encap_data->data, act_data->encap.len);
3044 			MLX5_ASSERT(raw_encap_data->size ==
3045 				    act_data->encap.len);
3046 			break;
3047 		case RTE_FLOW_ACTION_TYPE_IPV6_EXT_PUSH:
3048 			ipv6_push =
3049 				(const struct rte_flow_action_ipv6_ext_push *)action->conf;
3050 			rte_memcpy((void *)push_buf, ipv6_push->data,
3051 				   act_data->ipv6_ext.len);
3052 			MLX5_ASSERT(ipv6_push->size == act_data->ipv6_ext.len);
3053 			break;
3054 		case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
3055 			if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)
3056 				ret = flow_hw_set_vlan_vid_construct(dev, job,
3057 								     act_data,
3058 								     hw_acts,
3059 								     action);
3060 			else
3061 				ret = flow_hw_modify_field_construct(job,
3062 								     act_data,
3063 								     hw_acts,
3064 								     action);
3065 			if (ret)
3066 				return -1;
3067 			break;
3068 		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
3069 			port_action = action->conf;
3070 			if (!priv->hw_vport[port_action->port_id])
3071 				return -1;
3072 			rule_acts[act_data->action_dst].action =
3073 					priv->hw_vport[port_action->port_id];
3074 			break;
3075 		case RTE_FLOW_ACTION_TYPE_QUOTA:
3076 			flow_hw_construct_quota(priv,
3077 						rule_acts + act_data->action_dst,
3078 						act_data->shared_meter.id);
3079 			break;
3080 		case RTE_FLOW_ACTION_TYPE_METER:
3081 			meter = action->conf;
3082 			mtr_id = meter->mtr_id;
3083 			aso_mtr = mlx5_aso_meter_by_idx(priv, mtr_id);
3084 			rule_acts[act_data->action_dst].action =
3085 				priv->mtr_bulk.action;
3086 			rule_acts[act_data->action_dst].aso_meter.offset =
3087 								aso_mtr->offset;
3088 			jump = flow_hw_jump_action_register
3089 				(dev, &table->cfg, aso_mtr->fm.group, NULL);
3090 			if (!jump)
3091 				return -1;
3092 			MLX5_ASSERT
3093 				(!rule_acts[act_data->action_dst + 1].action);
3094 			rule_acts[act_data->action_dst + 1].action =
3095 					(!!attr.group) ? jump->hws_action :
3096 							 jump->root_action;
3097 			job->flow->jump = jump;
3098 			job->flow->fate_type = MLX5_FLOW_FATE_JUMP;
3099 			if (mlx5_aso_mtr_wait(priv->sh, MLX5_HW_INV_QUEUE, aso_mtr))
3100 				return -1;
3101 			break;
3102 		case RTE_FLOW_ACTION_TYPE_AGE:
3103 			age = action->conf;
3104 			/*
3105 			 * First, create the AGE parameter, then create its
3106 			 * counter later:
3107 			 * Regular counter - in next case.
3108 			 * Indirect counter - update it after the loop.
3109 			 */
3110 			age_idx = mlx5_hws_age_action_create(priv, queue, 0,
3111 							     age,
3112 							     job->flow->res_idx,
3113 							     error);
3114 			if (age_idx == 0)
3115 				return -rte_errno;
3116 			job->flow->age_idx = age_idx;
3117 			if (at->action_flags & MLX5_FLOW_ACTION_INDIRECT_COUNT)
3118 				/*
3119 				 * When AGE uses indirect counter, no need to
3120 				 * create counter but need to update it with the
3121 				 * AGE parameter, will be done after the loop.
3122 				 */
3123 				break;
3124 			/* Fall-through. */
3125 		case RTE_FLOW_ACTION_TYPE_COUNT:
3126 			/* If the port is engaged in resource sharing, do not use queue cache. */
3127 			cnt_queue = mlx5_hws_cnt_is_pool_shared(priv) ? NULL : &queue;
3128 			ret = mlx5_hws_cnt_pool_get(priv->hws_cpool, cnt_queue, &cnt_id, age_idx);
3129 			if (ret != 0)
3130 				return ret;
3131 			ret = mlx5_hws_cnt_pool_get_action_offset
3132 				(priv->hws_cpool,
3133 				 cnt_id,
3134 				 &rule_acts[act_data->action_dst].action,
3135 				 &rule_acts[act_data->action_dst].counter.offset
3136 				 );
3137 			if (ret != 0)
3138 				return ret;
3139 			job->flow->cnt_id = cnt_id;
3140 			break;
3141 		case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
3142 			ret = mlx5_hws_cnt_pool_get_action_offset
3143 				(priv->hws_cpool,
3144 				 act_data->shared_counter.id,
3145 				 &rule_acts[act_data->action_dst].action,
3146 				 &rule_acts[act_data->action_dst].counter.offset
3147 				 );
3148 			if (ret != 0)
3149 				return ret;
3150 			job->flow->cnt_id = act_data->shared_counter.id;
3151 			break;
3152 		case RTE_FLOW_ACTION_TYPE_CONNTRACK:
3153 			ct_idx = MLX5_ACTION_CTX_CT_GET_IDX
3154 				 ((uint32_t)(uintptr_t)action->conf);
3155 			if (flow_hw_ct_compile(dev, queue, ct_idx,
3156 					       &rule_acts[act_data->action_dst]))
3157 				return -1;
3158 			break;
3159 		case MLX5_RTE_FLOW_ACTION_TYPE_METER_MARK:
3160 			mtr_id = act_data->shared_meter.id &
3161 				((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
3162 			/* Find ASO object. */
3163 			aso_mtr = mlx5_ipool_get(pool->idx_pool, mtr_id);
3164 			if (!aso_mtr)
3165 				return -1;
3166 			rule_acts[act_data->action_dst].action =
3167 							pool->action;
3168 			rule_acts[act_data->action_dst].aso_meter.offset =
3169 							aso_mtr->offset;
3170 			break;
3171 		case RTE_FLOW_ACTION_TYPE_METER_MARK:
3172 			/*
3173 			 * Allocate meter directly will slow down flow
3174 			 * insertion rate.
3175 			 */
3176 			ret = flow_hw_meter_mark_compile(dev,
3177 				act_data->action_dst, action,
3178 				rule_acts, &job->flow->mtr_id, MLX5_HW_INV_QUEUE);
3179 			if (ret != 0)
3180 				return ret;
3181 			break;
3182 		default:
3183 			break;
3184 		}
3185 	}
3186 	if (at->action_flags & MLX5_FLOW_ACTION_INDIRECT_COUNT) {
3187 		if (at->action_flags & MLX5_FLOW_ACTION_INDIRECT_AGE) {
3188 			age_idx = job->flow->age_idx & MLX5_HWS_AGE_IDX_MASK;
3189 			if (mlx5_hws_cnt_age_get(priv->hws_cpool,
3190 						 job->flow->cnt_id) != age_idx)
3191 				/*
3192 				 * This is first use of this indirect counter
3193 				 * for this indirect AGE, need to increase the
3194 				 * number of counters.
3195 				 */
3196 				mlx5_hws_age_nb_cnt_increase(priv, age_idx);
3197 		}
3198 		/*
3199 		 * Update this indirect counter the indirect/direct AGE in which
3200 		 * using it.
3201 		 */
3202 		mlx5_hws_cnt_age_set(priv->hws_cpool, job->flow->cnt_id,
3203 				     age_idx);
3204 	}
3205 	if (hw_acts->encap_decap && !hw_acts->encap_decap->shared) {
3206 		rule_acts[hw_acts->encap_decap_pos].reformat.offset =
3207 				job->flow->res_idx - 1;
3208 		rule_acts[hw_acts->encap_decap_pos].reformat.data = buf;
3209 	}
3210 	if (hw_acts->push_remove && !hw_acts->push_remove->shared) {
3211 		rule_acts[hw_acts->push_remove_pos].ipv6_ext.offset =
3212 				job->flow->res_idx - 1;
3213 		rule_acts[hw_acts->push_remove_pos].ipv6_ext.header = push_buf;
3214 	}
3215 	if (mlx5_hws_cnt_id_valid(hw_acts->cnt_id))
3216 		job->flow->cnt_id = hw_acts->cnt_id;
3217 	return 0;
3218 }
3219 
3220 static const struct rte_flow_item *
3221 flow_hw_get_rule_items(struct rte_eth_dev *dev,
3222 		       const struct rte_flow_template_table *table,
3223 		       const struct rte_flow_item items[],
3224 		       uint8_t pattern_template_index,
3225 		       struct mlx5_hw_q_job *job)
3226 {
3227 	struct rte_flow_pattern_template *pt = table->its[pattern_template_index];
3228 
3229 	/* Only one implicit item can be added to flow rule pattern. */
3230 	MLX5_ASSERT(!pt->implicit_port || !pt->implicit_tag);
3231 	/* At least one item was allocated in job descriptor for items. */
3232 	MLX5_ASSERT(MLX5_HW_MAX_ITEMS >= 1);
3233 	if (pt->implicit_port) {
3234 		if (pt->orig_item_nb + 1 > MLX5_HW_MAX_ITEMS) {
3235 			rte_errno = ENOMEM;
3236 			return NULL;
3237 		}
3238 		/* Set up represented port item in job descriptor. */
3239 		job->port_spec = (struct rte_flow_item_ethdev){
3240 			.port_id = dev->data->port_id,
3241 		};
3242 		job->items[0] = (struct rte_flow_item){
3243 			.type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,
3244 			.spec = &job->port_spec,
3245 		};
3246 		rte_memcpy(&job->items[1], items, sizeof(*items) * pt->orig_item_nb);
3247 		return job->items;
3248 	} else if (pt->implicit_tag) {
3249 		if (pt->orig_item_nb + 1 > MLX5_HW_MAX_ITEMS) {
3250 			rte_errno = ENOMEM;
3251 			return NULL;
3252 		}
3253 		/* Set up tag item in job descriptor. */
3254 		job->tag_spec = (struct rte_flow_item_tag){
3255 			.data = flow_hw_tx_tag_regc_value(dev),
3256 		};
3257 		job->items[0] = (struct rte_flow_item){
3258 			.type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_TAG,
3259 			.spec = &job->tag_spec,
3260 		};
3261 		rte_memcpy(&job->items[1], items, sizeof(*items) * pt->orig_item_nb);
3262 		return job->items;
3263 	} else {
3264 		return items;
3265 	}
3266 }
3267 
3268 /**
3269  * Enqueue HW steering flow creation.
3270  *
3271  * The flow will be applied to the HW only if the postpone bit is not set or
3272  * the extra push function is called.
3273  * The flow creation status should be checked from dequeue result.
3274  *
3275  * @param[in] dev
3276  *   Pointer to the rte_eth_dev structure.
3277  * @param[in] queue
3278  *   The queue to create the flow.
3279  * @param[in] attr
3280  *   Pointer to the flow operation attributes.
3281  * @param[in] items
3282  *   Items with flow spec value.
3283  * @param[in] pattern_template_index
3284  *   The item pattern flow follows from the table.
3285  * @param[in] actions
3286  *   Action with flow spec value.
3287  * @param[in] action_template_index
3288  *   The action pattern flow follows from the table.
3289  * @param[in] user_data
3290  *   Pointer to the user_data.
3291  * @param[out] error
3292  *   Pointer to error structure.
3293  *
3294  * @return
3295  *    Flow pointer on success, NULL otherwise and rte_errno is set.
3296  */
3297 static struct rte_flow *
3298 flow_hw_async_flow_create(struct rte_eth_dev *dev,
3299 			  uint32_t queue,
3300 			  const struct rte_flow_op_attr *attr,
3301 			  struct rte_flow_template_table *table,
3302 			  const struct rte_flow_item items[],
3303 			  uint8_t pattern_template_index,
3304 			  const struct rte_flow_action actions[],
3305 			  uint8_t action_template_index,
3306 			  void *user_data,
3307 			  struct rte_flow_error *error)
3308 {
3309 	struct mlx5_priv *priv = dev->data->dev_private;
3310 	struct mlx5dr_rule_attr rule_attr = {
3311 		.queue_id = queue,
3312 		.user_data = user_data,
3313 		.burst = attr->postpone,
3314 	};
3315 	struct mlx5dr_rule_action rule_acts[MLX5_HW_MAX_ACTS];
3316 	struct rte_flow_hw *flow = NULL;
3317 	struct mlx5_hw_q_job *job = NULL;
3318 	const struct rte_flow_item *rule_items;
3319 	uint32_t flow_idx = 0;
3320 	uint32_t res_idx = 0;
3321 	int ret;
3322 
3323 	if (unlikely((!dev->data->dev_started))) {
3324 		rte_errno = EINVAL;
3325 		goto error;
3326 	}
3327 	job = flow_hw_job_get(priv, queue);
3328 	if (!job) {
3329 		rte_errno = ENOMEM;
3330 		goto error;
3331 	}
3332 	flow = mlx5_ipool_zmalloc(table->flow, &flow_idx);
3333 	if (!flow)
3334 		goto error;
3335 	mlx5_ipool_malloc(table->resource, &res_idx);
3336 	if (!res_idx)
3337 		goto error;
3338 	/*
3339 	 * Set the table here in order to know the destination table
3340 	 * when free the flow afterward.
3341 	 */
3342 	flow->table = table;
3343 	flow->mt_idx = pattern_template_index;
3344 	flow->idx = flow_idx;
3345 	flow->res_idx = res_idx;
3346 	/*
3347 	 * Set the job type here in order to know if the flow memory
3348 	 * should be freed or not when get the result from dequeue.
3349 	 */
3350 	job->type = MLX5_HW_Q_JOB_TYPE_CREATE;
3351 	job->flow = flow;
3352 	job->user_data = user_data;
3353 	rule_attr.user_data = job;
3354 	/*
3355 	 * Indexed pool returns 1-based indices, but mlx5dr expects 0-based indices for rule
3356 	 * insertion hints.
3357 	 */
3358 	MLX5_ASSERT(res_idx > 0);
3359 	flow->rule_idx = res_idx - 1;
3360 	rule_attr.rule_idx = flow->rule_idx;
3361 	/*
3362 	 * Construct the flow actions based on the input actions.
3363 	 * The implicitly appended action is always fixed, like metadata
3364 	 * copy action from FDB to NIC Rx.
3365 	 * No need to copy and contrust a new "actions" list based on the
3366 	 * user's input, in order to save the cost.
3367 	 */
3368 	if (flow_hw_actions_construct(dev, job,
3369 				      &table->ats[action_template_index],
3370 				      pattern_template_index, actions,
3371 				      rule_acts, queue, error)) {
3372 		rte_errno = EINVAL;
3373 		goto error;
3374 	}
3375 	rule_items = flow_hw_get_rule_items(dev, table, items,
3376 					    pattern_template_index, job);
3377 	if (!rule_items)
3378 		goto error;
3379 	ret = mlx5dr_rule_create(table->matcher,
3380 				 pattern_template_index, rule_items,
3381 				 action_template_index, rule_acts,
3382 				 &rule_attr, (struct mlx5dr_rule *)flow->rule);
3383 	if (likely(!ret))
3384 		return (struct rte_flow *)flow;
3385 error:
3386 	if (job)
3387 		flow_hw_job_put(priv, job, queue);
3388 	if (flow_idx)
3389 		mlx5_ipool_free(table->flow, flow_idx);
3390 	if (res_idx)
3391 		mlx5_ipool_free(table->resource, res_idx);
3392 	rte_flow_error_set(error, rte_errno,
3393 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3394 			   "fail to create rte flow");
3395 	return NULL;
3396 }
3397 
3398 /**
3399  * Enqueue HW steering flow creation by index.
3400  *
3401  * The flow will be applied to the HW only if the postpone bit is not set or
3402  * the extra push function is called.
3403  * The flow creation status should be checked from dequeue result.
3404  *
3405  * @param[in] dev
3406  *   Pointer to the rte_eth_dev structure.
3407  * @param[in] queue
3408  *   The queue to create the flow.
3409  * @param[in] attr
3410  *   Pointer to the flow operation attributes.
3411  * @param[in] rule_index
3412  *   The item pattern flow follows from the table.
3413  * @param[in] actions
3414  *   Action with flow spec value.
3415  * @param[in] action_template_index
3416  *   The action pattern flow follows from the table.
3417  * @param[in] user_data
3418  *   Pointer to the user_data.
3419  * @param[out] error
3420  *   Pointer to error structure.
3421  *
3422  * @return
3423  *    Flow pointer on success, NULL otherwise and rte_errno is set.
3424  */
3425 static struct rte_flow *
3426 flow_hw_async_flow_create_by_index(struct rte_eth_dev *dev,
3427 			  uint32_t queue,
3428 			  const struct rte_flow_op_attr *attr,
3429 			  struct rte_flow_template_table *table,
3430 			  uint32_t rule_index,
3431 			  const struct rte_flow_action actions[],
3432 			  uint8_t action_template_index,
3433 			  void *user_data,
3434 			  struct rte_flow_error *error)
3435 {
3436 	struct rte_flow_item items[] = {{.type = RTE_FLOW_ITEM_TYPE_END,}};
3437 	struct mlx5_priv *priv = dev->data->dev_private;
3438 	struct mlx5dr_rule_attr rule_attr = {
3439 		.queue_id = queue,
3440 		.user_data = user_data,
3441 		.burst = attr->postpone,
3442 	};
3443 	struct mlx5dr_rule_action rule_acts[MLX5_HW_MAX_ACTS];
3444 	struct rte_flow_hw *flow = NULL;
3445 	struct mlx5_hw_q_job *job = NULL;
3446 	uint32_t flow_idx = 0;
3447 	uint32_t res_idx = 0;
3448 	int ret;
3449 
3450 	if (unlikely(rule_index >= table->cfg.attr.nb_flows)) {
3451 		rte_errno = EINVAL;
3452 		goto error;
3453 	}
3454 	job = flow_hw_job_get(priv, queue);
3455 	if (!job) {
3456 		rte_errno = ENOMEM;
3457 		goto error;
3458 	}
3459 	flow = mlx5_ipool_zmalloc(table->flow, &flow_idx);
3460 	if (!flow)
3461 		goto error;
3462 	mlx5_ipool_malloc(table->resource, &res_idx);
3463 	if (!res_idx)
3464 		goto error;
3465 	/*
3466 	 * Set the table here in order to know the destination table
3467 	 * when free the flow afterwards.
3468 	 */
3469 	flow->table = table;
3470 	flow->mt_idx = 0;
3471 	flow->idx = flow_idx;
3472 	flow->res_idx = res_idx;
3473 	/*
3474 	 * Set the job type here in order to know if the flow memory
3475 	 * should be freed or not when get the result from dequeue.
3476 	 */
3477 	job->type = MLX5_HW_Q_JOB_TYPE_CREATE;
3478 	job->flow = flow;
3479 	job->user_data = user_data;
3480 	rule_attr.user_data = job;
3481 	/*
3482 	 * Set the rule index.
3483 	 */
3484 	flow->rule_idx = rule_index;
3485 	rule_attr.rule_idx = flow->rule_idx;
3486 	/*
3487 	 * Construct the flow actions based on the input actions.
3488 	 * The implicitly appended action is always fixed, like metadata
3489 	 * copy action from FDB to NIC Rx.
3490 	 * No need to copy and contrust a new "actions" list based on the
3491 	 * user's input, in order to save the cost.
3492 	 */
3493 	if (flow_hw_actions_construct(dev, job,
3494 				      &table->ats[action_template_index],
3495 				      0, actions, rule_acts, queue, error)) {
3496 		rte_errno = EINVAL;
3497 		goto error;
3498 	}
3499 	ret = mlx5dr_rule_create(table->matcher,
3500 				 0, items, action_template_index, rule_acts,
3501 				 &rule_attr, (struct mlx5dr_rule *)flow->rule);
3502 	if (likely(!ret))
3503 		return (struct rte_flow *)flow;
3504 error:
3505 	if (job)
3506 		flow_hw_job_put(priv, job, queue);
3507 	if (res_idx)
3508 		mlx5_ipool_free(table->resource, res_idx);
3509 	if (flow_idx)
3510 		mlx5_ipool_free(table->flow, flow_idx);
3511 	rte_flow_error_set(error, rte_errno,
3512 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3513 			   "fail to create rte flow");
3514 	return NULL;
3515 }
3516 
3517 /**
3518  * Enqueue HW steering flow update.
3519  *
3520  * The flow will be applied to the HW only if the postpone bit is not set or
3521  * the extra push function is called.
3522  * The flow destruction status should be checked from dequeue result.
3523  *
3524  * @param[in] dev
3525  *   Pointer to the rte_eth_dev structure.
3526  * @param[in] queue
3527  *   The queue to destroy the flow.
3528  * @param[in] attr
3529  *   Pointer to the flow operation attributes.
3530  * @param[in] flow
3531  *   Pointer to the flow to be destroyed.
3532  * @param[in] actions
3533  *   Action with flow spec value.
3534  * @param[in] action_template_index
3535  *   The action pattern flow follows from the table.
3536  * @param[in] user_data
3537  *   Pointer to the user_data.
3538  * @param[out] error
3539  *   Pointer to error structure.
3540  *
3541  * @return
3542  *    0 on success, negative value otherwise and rte_errno is set.
3543  */
3544 static int
3545 flow_hw_async_flow_update(struct rte_eth_dev *dev,
3546 			   uint32_t queue,
3547 			   const struct rte_flow_op_attr *attr,
3548 			   struct rte_flow *flow,
3549 			   const struct rte_flow_action actions[],
3550 			   uint8_t action_template_index,
3551 			   void *user_data,
3552 			   struct rte_flow_error *error)
3553 {
3554 	struct mlx5_priv *priv = dev->data->dev_private;
3555 	struct mlx5dr_rule_attr rule_attr = {
3556 		.queue_id = queue,
3557 		.user_data = user_data,
3558 		.burst = attr->postpone,
3559 	};
3560 	struct mlx5dr_rule_action rule_acts[MLX5_HW_MAX_ACTS];
3561 	struct rte_flow_hw *of = (struct rte_flow_hw *)flow;
3562 	struct rte_flow_hw *nf;
3563 	struct rte_flow_template_table *table = of->table;
3564 	struct mlx5_hw_q_job *job = NULL;
3565 	uint32_t res_idx = 0;
3566 	int ret;
3567 
3568 	job = flow_hw_job_get(priv, queue);
3569 	if (!job) {
3570 		rte_errno = ENOMEM;
3571 		goto error;
3572 	}
3573 	mlx5_ipool_malloc(table->resource, &res_idx);
3574 	if (!res_idx)
3575 		goto error;
3576 	nf = job->upd_flow;
3577 	memset(nf, 0, sizeof(struct rte_flow_hw));
3578 	/*
3579 	 * Set the table here in order to know the destination table
3580 	 * when free the flow afterwards.
3581 	 */
3582 	nf->table = table;
3583 	nf->mt_idx = of->mt_idx;
3584 	nf->idx = of->idx;
3585 	nf->res_idx = res_idx;
3586 	/*
3587 	 * Set the job type here in order to know if the flow memory
3588 	 * should be freed or not when get the result from dequeue.
3589 	 */
3590 	job->type = MLX5_HW_Q_JOB_TYPE_UPDATE;
3591 	job->flow = nf;
3592 	job->user_data = user_data;
3593 	rule_attr.user_data = job;
3594 	/*
3595 	 * Indexed pool returns 1-based indices, but mlx5dr expects 0-based indices for rule
3596 	 * insertion hints.
3597 	 */
3598 	MLX5_ASSERT(res_idx > 0);
3599 	nf->rule_idx = res_idx - 1;
3600 	rule_attr.rule_idx = nf->rule_idx;
3601 	/*
3602 	 * Construct the flow actions based on the input actions.
3603 	 * The implicitly appended action is always fixed, like metadata
3604 	 * copy action from FDB to NIC Rx.
3605 	 * No need to copy and contrust a new "actions" list based on the
3606 	 * user's input, in order to save the cost.
3607 	 */
3608 	if (flow_hw_actions_construct(dev, job,
3609 				      &table->ats[action_template_index],
3610 				      nf->mt_idx, actions,
3611 				      rule_acts, queue, error)) {
3612 		rte_errno = EINVAL;
3613 		goto error;
3614 	}
3615 	/*
3616 	 * Switch the old flow and the new flow.
3617 	 */
3618 	job->flow = of;
3619 	job->upd_flow = nf;
3620 	ret = mlx5dr_rule_action_update((struct mlx5dr_rule *)of->rule,
3621 					action_template_index, rule_acts, &rule_attr);
3622 	if (likely(!ret))
3623 		return 0;
3624 error:
3625 	/* Flow created fail, return the descriptor and flow memory. */
3626 	if (job)
3627 		flow_hw_job_put(priv, job, queue);
3628 	if (res_idx)
3629 		mlx5_ipool_free(table->resource, res_idx);
3630 	return rte_flow_error_set(error, rte_errno,
3631 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3632 			"fail to update rte flow");
3633 }
3634 
3635 /**
3636  * Enqueue HW steering flow destruction.
3637  *
3638  * The flow will be applied to the HW only if the postpone bit is not set or
3639  * the extra push function is called.
3640  * The flow destruction status should be checked from dequeue result.
3641  *
3642  * @param[in] dev
3643  *   Pointer to the rte_eth_dev structure.
3644  * @param[in] queue
3645  *   The queue to destroy the flow.
3646  * @param[in] attr
3647  *   Pointer to the flow operation attributes.
3648  * @param[in] flow
3649  *   Pointer to the flow to be destroyed.
3650  * @param[in] user_data
3651  *   Pointer to the user_data.
3652  * @param[out] error
3653  *   Pointer to error structure.
3654  *
3655  * @return
3656  *    0 on success, negative value otherwise and rte_errno is set.
3657  */
3658 static int
3659 flow_hw_async_flow_destroy(struct rte_eth_dev *dev,
3660 			   uint32_t queue,
3661 			   const struct rte_flow_op_attr *attr,
3662 			   struct rte_flow *flow,
3663 			   void *user_data,
3664 			   struct rte_flow_error *error)
3665 {
3666 	struct mlx5_priv *priv = dev->data->dev_private;
3667 	struct mlx5dr_rule_attr rule_attr = {
3668 		.queue_id = queue,
3669 		.user_data = user_data,
3670 		.burst = attr->postpone,
3671 	};
3672 	struct rte_flow_hw *fh = (struct rte_flow_hw *)flow;
3673 	struct mlx5_hw_q_job *job;
3674 	int ret;
3675 
3676 	job = flow_hw_job_get(priv, queue);
3677 	if (!job)
3678 		return rte_flow_error_set(error, ENOMEM,
3679 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3680 					  "fail to destroy rte flow: flow queue full");
3681 	job->type = MLX5_HW_Q_JOB_TYPE_DESTROY;
3682 	job->user_data = user_data;
3683 	job->flow = fh;
3684 	rule_attr.user_data = job;
3685 	rule_attr.rule_idx = fh->rule_idx;
3686 	ret = mlx5dr_rule_destroy((struct mlx5dr_rule *)fh->rule, &rule_attr);
3687 	if (ret) {
3688 		flow_hw_job_put(priv, job, queue);
3689 		return rte_flow_error_set(error, rte_errno,
3690 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3691 					  "fail to destroy rte flow");
3692 	}
3693 	return 0;
3694 }
3695 
3696 /**
3697  * Release the AGE and counter for given flow.
3698  *
3699  * @param[in] priv
3700  *   Pointer to the port private data structure.
3701  * @param[in] queue
3702  *   The queue to release the counter.
3703  * @param[in, out] flow
3704  *   Pointer to the flow containing the counter.
3705  * @param[out] error
3706  *   Pointer to error structure.
3707  */
3708 static void
3709 flow_hw_age_count_release(struct mlx5_priv *priv, uint32_t queue,
3710 			  struct rte_flow_hw *flow,
3711 			  struct rte_flow_error *error)
3712 {
3713 	uint32_t *cnt_queue;
3714 
3715 	if (mlx5_hws_cnt_is_shared(priv->hws_cpool, flow->cnt_id)) {
3716 		if (flow->age_idx && !mlx5_hws_age_is_indirect(flow->age_idx)) {
3717 			/* Remove this AGE parameter from indirect counter. */
3718 			mlx5_hws_cnt_age_set(priv->hws_cpool, flow->cnt_id, 0);
3719 			/* Release the AGE parameter. */
3720 			mlx5_hws_age_action_destroy(priv, flow->age_idx, error);
3721 			flow->age_idx = 0;
3722 		}
3723 		return;
3724 	}
3725 	/* If the port is engaged in resource sharing, do not use queue cache. */
3726 	cnt_queue = mlx5_hws_cnt_is_pool_shared(priv) ? NULL : &queue;
3727 	/* Put the counter first to reduce the race risk in BG thread. */
3728 	mlx5_hws_cnt_pool_put(priv->hws_cpool, cnt_queue, &flow->cnt_id);
3729 	flow->cnt_id = 0;
3730 	if (flow->age_idx) {
3731 		if (mlx5_hws_age_is_indirect(flow->age_idx)) {
3732 			uint32_t idx = flow->age_idx & MLX5_HWS_AGE_IDX_MASK;
3733 
3734 			mlx5_hws_age_nb_cnt_decrease(priv, idx);
3735 		} else {
3736 			/* Release the AGE parameter. */
3737 			mlx5_hws_age_action_destroy(priv, flow->age_idx, error);
3738 		}
3739 		flow->age_idx = 0;
3740 	}
3741 }
3742 
3743 static __rte_always_inline void
3744 flow_hw_pull_legacy_indirect_comp(struct rte_eth_dev *dev, struct mlx5_hw_q_job *job,
3745 				  uint32_t queue)
3746 {
3747 	struct mlx5_priv *priv = dev->data->dev_private;
3748 	struct mlx5_aso_ct_action *aso_ct;
3749 	struct mlx5_aso_mtr *aso_mtr;
3750 	uint32_t type, idx;
3751 
3752 	if (MLX5_INDIRECT_ACTION_TYPE_GET(job->action) ==
3753 	    MLX5_INDIRECT_ACTION_TYPE_QUOTA) {
3754 		mlx5_quota_async_completion(dev, queue, job);
3755 	} else if (job->type == MLX5_HW_Q_JOB_TYPE_DESTROY) {
3756 		type = MLX5_INDIRECT_ACTION_TYPE_GET(job->action);
3757 		if (type == MLX5_INDIRECT_ACTION_TYPE_METER_MARK) {
3758 			idx = MLX5_INDIRECT_ACTION_IDX_GET(job->action);
3759 			mlx5_ipool_free(priv->hws_mpool->idx_pool, idx);
3760 		}
3761 	} else if (job->type == MLX5_HW_Q_JOB_TYPE_CREATE) {
3762 		type = MLX5_INDIRECT_ACTION_TYPE_GET(job->action);
3763 		if (type == MLX5_INDIRECT_ACTION_TYPE_METER_MARK) {
3764 			idx = MLX5_INDIRECT_ACTION_IDX_GET(job->action);
3765 			aso_mtr = mlx5_ipool_get(priv->hws_mpool->idx_pool, idx);
3766 			aso_mtr->state = ASO_METER_READY;
3767 		} else if (type == MLX5_INDIRECT_ACTION_TYPE_CT) {
3768 			idx = MLX5_ACTION_CTX_CT_GET_IDX
3769 			((uint32_t)(uintptr_t)job->action);
3770 			aso_ct = mlx5_ipool_get(priv->hws_ctpool->cts, idx);
3771 			aso_ct->state = ASO_CONNTRACK_READY;
3772 		}
3773 	} else if (job->type == MLX5_HW_Q_JOB_TYPE_QUERY) {
3774 		type = MLX5_INDIRECT_ACTION_TYPE_GET(job->action);
3775 		if (type == MLX5_INDIRECT_ACTION_TYPE_CT) {
3776 			idx = MLX5_ACTION_CTX_CT_GET_IDX
3777 			((uint32_t)(uintptr_t)job->action);
3778 			aso_ct = mlx5_ipool_get(priv->hws_ctpool->cts, idx);
3779 			mlx5_aso_ct_obj_analyze(job->query.user,
3780 						job->query.hw);
3781 			aso_ct->state = ASO_CONNTRACK_READY;
3782 		}
3783 	} else {
3784 		/*
3785 		 * rte_flow_op_result::user data can point to
3786 		 * struct mlx5_aso_mtr object as well
3787 		 */
3788 		if (queue != CTRL_QUEUE_ID(priv))
3789 			MLX5_ASSERT(false);
3790 	}
3791 }
3792 
3793 static inline int
3794 __flow_hw_pull_indir_action_comp(struct rte_eth_dev *dev,
3795 				 uint32_t queue,
3796 				 struct rte_flow_op_result res[],
3797 				 uint16_t n_res)
3798 
3799 {
3800 	struct mlx5_priv *priv = dev->data->dev_private;
3801 	struct rte_ring *r = priv->hw_q[queue].indir_cq;
3802 	void *user_data = NULL;
3803 	int ret_comp, i;
3804 
3805 	ret_comp = (int)rte_ring_count(r);
3806 	if (ret_comp > n_res)
3807 		ret_comp = n_res;
3808 	for (i = 0; i < ret_comp; i++) {
3809 		rte_ring_dequeue(r, &user_data);
3810 		res[i].user_data = user_data;
3811 		res[i].status = RTE_FLOW_OP_SUCCESS;
3812 	}
3813 	if (ret_comp < n_res && priv->hws_mpool)
3814 		ret_comp += mlx5_aso_pull_completion(&priv->hws_mpool->sq[queue],
3815 				&res[ret_comp], n_res - ret_comp);
3816 	if (ret_comp < n_res && priv->hws_ctpool)
3817 		ret_comp += mlx5_aso_pull_completion(&priv->ct_mng->aso_sqs[queue],
3818 				&res[ret_comp], n_res - ret_comp);
3819 	if (ret_comp < n_res && priv->quota_ctx.sq)
3820 		ret_comp += mlx5_aso_pull_completion(&priv->quota_ctx.sq[queue],
3821 						     &res[ret_comp],
3822 						     n_res - ret_comp);
3823 	for (i = 0; i <  ret_comp; i++) {
3824 		struct mlx5_hw_q_job *job = (struct mlx5_hw_q_job *)res[i].user_data;
3825 
3826 		/* Restore user data. */
3827 		res[i].user_data = job->user_data;
3828 		if (job->indirect_type == MLX5_HW_INDIRECT_TYPE_LEGACY)
3829 			flow_hw_pull_legacy_indirect_comp(dev, job, queue);
3830 		/*
3831 		 * Current PMD supports 2 indirect action list types - MIRROR and REFORMAT.
3832 		 * These indirect list types do not post WQE to create action.
3833 		 * Future indirect list types that do post WQE will add
3834 		 * completion handlers here.
3835 		 */
3836 		flow_hw_job_put(priv, job, queue);
3837 	}
3838 	return ret_comp;
3839 }
3840 
3841 /**
3842  * Pull the enqueued flows.
3843  *
3844  * For flows enqueued from creation/destruction, the status should be
3845  * checked from the dequeue result.
3846  *
3847  * @param[in] dev
3848  *   Pointer to the rte_eth_dev structure.
3849  * @param[in] queue
3850  *   The queue to pull the result.
3851  * @param[in/out] res
3852  *   Array to save the results.
3853  * @param[in] n_res
3854  *   Available result with the array.
3855  * @param[out] error
3856  *   Pointer to error structure.
3857  *
3858  * @return
3859  *    Result number on success, negative value otherwise and rte_errno is set.
3860  */
3861 static int
3862 flow_hw_pull(struct rte_eth_dev *dev,
3863 	     uint32_t queue,
3864 	     struct rte_flow_op_result res[],
3865 	     uint16_t n_res,
3866 	     struct rte_flow_error *error)
3867 {
3868 	struct mlx5_priv *priv = dev->data->dev_private;
3869 	struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
3870 	struct mlx5_hw_q_job *job;
3871 	uint32_t res_idx;
3872 	int ret, i;
3873 
3874 	/* 1. Pull the flow completion. */
3875 	ret = mlx5dr_send_queue_poll(priv->dr_ctx, queue, res, n_res);
3876 	if (ret < 0)
3877 		return rte_flow_error_set(error, rte_errno,
3878 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3879 				"fail to query flow queue");
3880 	for (i = 0; i <  ret; i++) {
3881 		job = (struct mlx5_hw_q_job *)res[i].user_data;
3882 		/* Release the original resource index in case of update. */
3883 		res_idx = job->flow->res_idx;
3884 		/* Restore user data. */
3885 		res[i].user_data = job->user_data;
3886 		if (job->type == MLX5_HW_Q_JOB_TYPE_DESTROY ||
3887 		    job->type == MLX5_HW_Q_JOB_TYPE_UPDATE) {
3888 			if (job->flow->fate_type == MLX5_FLOW_FATE_JUMP)
3889 				flow_hw_jump_release(dev, job->flow->jump);
3890 			else if (job->flow->fate_type == MLX5_FLOW_FATE_QUEUE)
3891 				mlx5_hrxq_obj_release(dev, job->flow->hrxq);
3892 			if (mlx5_hws_cnt_id_valid(job->flow->cnt_id))
3893 				flow_hw_age_count_release(priv, queue,
3894 							  job->flow, error);
3895 			if (job->flow->mtr_id) {
3896 				mlx5_ipool_free(pool->idx_pool,	job->flow->mtr_id);
3897 				job->flow->mtr_id = 0;
3898 			}
3899 			if (job->type == MLX5_HW_Q_JOB_TYPE_DESTROY) {
3900 				mlx5_ipool_free(job->flow->table->resource, res_idx);
3901 				mlx5_ipool_free(job->flow->table->flow, job->flow->idx);
3902 			} else {
3903 				rte_memcpy(job->flow, job->upd_flow,
3904 					offsetof(struct rte_flow_hw, rule));
3905 				mlx5_ipool_free(job->flow->table->resource, res_idx);
3906 			}
3907 		}
3908 		flow_hw_job_put(priv, job, queue);
3909 	}
3910 	/* 2. Pull indirect action comp. */
3911 	if (ret < n_res)
3912 		ret += __flow_hw_pull_indir_action_comp(dev, queue, &res[ret],
3913 							n_res - ret);
3914 	return ret;
3915 }
3916 
3917 static inline uint32_t
3918 __flow_hw_push_action(struct rte_eth_dev *dev,
3919 		    uint32_t queue)
3920 {
3921 	struct mlx5_priv *priv = dev->data->dev_private;
3922 	struct rte_ring *iq = priv->hw_q[queue].indir_iq;
3923 	struct rte_ring *cq = priv->hw_q[queue].indir_cq;
3924 	void *job = NULL;
3925 	uint32_t ret, i;
3926 
3927 	ret = rte_ring_count(iq);
3928 	for (i = 0; i < ret; i++) {
3929 		rte_ring_dequeue(iq, &job);
3930 		rte_ring_enqueue(cq, job);
3931 	}
3932 	if (!priv->shared_host) {
3933 		if (priv->hws_ctpool)
3934 			mlx5_aso_push_wqe(priv->sh,
3935 					  &priv->ct_mng->aso_sqs[queue]);
3936 		if (priv->hws_mpool)
3937 			mlx5_aso_push_wqe(priv->sh,
3938 					  &priv->hws_mpool->sq[queue]);
3939 	}
3940 	return priv->hw_q[queue].size - priv->hw_q[queue].job_idx;
3941 }
3942 
3943 static int
3944 __flow_hw_push(struct rte_eth_dev *dev,
3945 	       uint32_t queue,
3946 	       struct rte_flow_error *error)
3947 {
3948 	struct mlx5_priv *priv = dev->data->dev_private;
3949 	int ret, num;
3950 
3951 	num = __flow_hw_push_action(dev, queue);
3952 	ret = mlx5dr_send_queue_action(priv->dr_ctx, queue,
3953 				       MLX5DR_SEND_QUEUE_ACTION_DRAIN_ASYNC);
3954 	if (ret) {
3955 		rte_flow_error_set(error, rte_errno,
3956 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3957 				   "fail to push flows");
3958 		return ret;
3959 	}
3960 	return num;
3961 }
3962 
3963 /**
3964  * Push the enqueued flows to HW.
3965  *
3966  * Force apply all the enqueued flows to the HW.
3967  *
3968  * @param[in] dev
3969  *   Pointer to the rte_eth_dev structure.
3970  * @param[in] queue
3971  *   The queue to push the flow.
3972  * @param[out] error
3973  *   Pointer to error structure.
3974  *
3975  * @return
3976  *    0 on success, negative value otherwise and rte_errno is set.
3977  */
3978 static int
3979 flow_hw_push(struct rte_eth_dev *dev,
3980 	     uint32_t queue, struct rte_flow_error *error)
3981 {
3982 	int ret = __flow_hw_push(dev, queue, error);
3983 
3984 	return ret >= 0 ? 0 : ret;
3985 }
3986 
3987 /**
3988  * Drain the enqueued flows' completion.
3989  *
3990  * @param[in] dev
3991  *   Pointer to the rte_eth_dev structure.
3992  * @param[in] queue
3993  *   The queue to pull the flow.
3994  * @param[out] error
3995  *   Pointer to error structure.
3996  *
3997  * @return
3998  *    0 on success, negative value otherwise and rte_errno is set.
3999  */
4000 static int
4001 __flow_hw_pull_comp(struct rte_eth_dev *dev,
4002 		    uint32_t queue, struct rte_flow_error *error)
4003 {
4004 	struct rte_flow_op_result comp[BURST_THR];
4005 	int ret, i, empty_loop = 0;
4006 	uint32_t pending_rules;
4007 
4008 	ret = __flow_hw_push(dev, queue, error);
4009 	if (ret < 0)
4010 		return ret;
4011 	pending_rules = ret;
4012 	while (pending_rules) {
4013 		ret = flow_hw_pull(dev, queue, comp, BURST_THR, error);
4014 		if (ret < 0)
4015 			return -1;
4016 		if (!ret) {
4017 			rte_delay_us_sleep(MLX5_ASO_WQE_CQE_RESPONSE_DELAY);
4018 			if (++empty_loop > 5) {
4019 				DRV_LOG(WARNING, "No available dequeue %u, quit.", pending_rules);
4020 				break;
4021 			}
4022 			continue;
4023 		}
4024 		for (i = 0; i < ret; i++) {
4025 			if (comp[i].status == RTE_FLOW_OP_ERROR)
4026 				DRV_LOG(WARNING, "Flow flush get error CQE.");
4027 		}
4028 		/*
4029 		 * Indirect **SYNC** METER_MARK and CT actions do not
4030 		 * remove completion after WQE post.
4031 		 * That implementation avoids HW timeout.
4032 		 * The completion is removed before the following WQE post.
4033 		 * However, HWS queue updates do not reflect that behaviour.
4034 		 * Therefore, during port destruction sync queue may have
4035 		 * pending completions.
4036 		 */
4037 		pending_rules -= RTE_MIN(pending_rules, (uint32_t)ret);
4038 		empty_loop = 0;
4039 	}
4040 	return 0;
4041 }
4042 
4043 /**
4044  * Flush created flows.
4045  *
4046  * @param[in] dev
4047  *   Pointer to the rte_eth_dev structure.
4048  * @param[out] error
4049  *   Pointer to error structure.
4050  *
4051  * @return
4052  *    0 on success, negative value otherwise and rte_errno is set.
4053  */
4054 int
4055 flow_hw_q_flow_flush(struct rte_eth_dev *dev,
4056 		     struct rte_flow_error *error)
4057 {
4058 	struct mlx5_priv *priv = dev->data->dev_private;
4059 	struct mlx5_hw_q *hw_q = &priv->hw_q[MLX5_DEFAULT_FLUSH_QUEUE];
4060 	struct rte_flow_template_table *tbl;
4061 	struct rte_flow_hw *flow;
4062 	struct rte_flow_op_attr attr = {
4063 		.postpone = 0,
4064 	};
4065 	uint32_t pending_rules = 0;
4066 	uint32_t queue;
4067 	uint32_t fidx;
4068 
4069 	/*
4070 	 * Ensure to push and dequeue all the enqueued flow
4071 	 * creation/destruction jobs in case user forgot to
4072 	 * dequeue. Or the enqueued created flows will be
4073 	 * leaked. The forgotten dequeues would also cause
4074 	 * flow flush get extra CQEs as expected and pending_rules
4075 	 * be minus value.
4076 	 */
4077 	for (queue = 0; queue < priv->nb_queue; queue++) {
4078 		if (__flow_hw_pull_comp(dev, queue, error))
4079 			return -1;
4080 	}
4081 	/* Flush flow per-table from MLX5_DEFAULT_FLUSH_QUEUE. */
4082 	LIST_FOREACH(tbl, &priv->flow_hw_tbl, next) {
4083 		if (!tbl->cfg.external)
4084 			continue;
4085 		MLX5_IPOOL_FOREACH(tbl->flow, fidx, flow) {
4086 			if (flow_hw_async_flow_destroy(dev,
4087 						MLX5_DEFAULT_FLUSH_QUEUE,
4088 						&attr,
4089 						(struct rte_flow *)flow,
4090 						NULL,
4091 						error))
4092 				return -1;
4093 			pending_rules++;
4094 			/* Drain completion with queue size. */
4095 			if (pending_rules >= hw_q->size) {
4096 				if (__flow_hw_pull_comp(dev,
4097 							MLX5_DEFAULT_FLUSH_QUEUE,
4098 							error))
4099 					return -1;
4100 				pending_rules = 0;
4101 			}
4102 		}
4103 	}
4104 	/* Drain left completion. */
4105 	if (pending_rules &&
4106 	    __flow_hw_pull_comp(dev, MLX5_DEFAULT_FLUSH_QUEUE, error))
4107 		return -1;
4108 	return 0;
4109 }
4110 
4111 static int
4112 mlx5_tbl_multi_pattern_process(struct rte_eth_dev *dev,
4113 			       struct rte_flow_template_table *tbl,
4114 			       struct mlx5_tbl_multi_pattern_ctx *mpat,
4115 			       struct rte_flow_error *error)
4116 {
4117 	uint32_t i;
4118 	struct mlx5_priv *priv = dev->data->dev_private;
4119 	const struct rte_flow_template_table_attr *table_attr = &tbl->cfg.attr;
4120 	const struct rte_flow_attr *attr = &table_attr->flow_attr;
4121 	enum mlx5dr_table_type type = get_mlx5dr_table_type(attr);
4122 	uint32_t flags = mlx5_hw_act_flag[!!attr->group][type];
4123 	struct mlx5dr_action *dr_action;
4124 	uint32_t bulk_size = rte_log2_u32(table_attr->nb_flows);
4125 
4126 	for (i = 0; i < MLX5_MULTIPATTERN_ENCAP_NUM; i++) {
4127 		uint32_t j;
4128 		uint32_t *reformat_refcnt;
4129 		typeof(mpat->reformat[0]) *reformat = mpat->reformat + i;
4130 		struct mlx5dr_action_reformat_header hdr[MLX5_HW_TBL_MAX_ACTION_TEMPLATE];
4131 		enum mlx5dr_action_type reformat_type =
4132 			mlx5_multi_pattern_reformat_index_to_type(i);
4133 
4134 		if (!reformat->elements_num)
4135 			continue;
4136 		for (j = 0; j < reformat->elements_num; j++)
4137 			hdr[j] = reformat->ctx[j].reformat_hdr;
4138 		reformat_refcnt = mlx5_malloc(MLX5_MEM_ZERO, sizeof(uint32_t), 0,
4139 					      rte_socket_id());
4140 		if (!reformat_refcnt)
4141 			return rte_flow_error_set(error, ENOMEM,
4142 						  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4143 						  NULL, "failed to allocate multi-pattern encap counter");
4144 		*reformat_refcnt = reformat->elements_num;
4145 		dr_action = mlx5dr_action_create_reformat
4146 			(priv->dr_ctx, reformat_type, reformat->elements_num, hdr,
4147 			 bulk_size, flags);
4148 		if (!dr_action) {
4149 			mlx5_free(reformat_refcnt);
4150 			return rte_flow_error_set(error, rte_errno,
4151 						  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4152 						  NULL,
4153 						  "failed to create multi-pattern encap action");
4154 		}
4155 		for (j = 0; j < reformat->elements_num; j++) {
4156 			reformat->ctx[j].rule_action->action = dr_action;
4157 			reformat->ctx[j].encap->action = dr_action;
4158 			reformat->ctx[j].encap->multi_pattern = 1;
4159 			reformat->ctx[j].encap->multi_pattern_refcnt = reformat_refcnt;
4160 		}
4161 	}
4162 	if (mpat->mh.elements_num) {
4163 		typeof(mpat->mh) *mh = &mpat->mh;
4164 		struct mlx5dr_action_mh_pattern pattern[MLX5_HW_TBL_MAX_ACTION_TEMPLATE];
4165 		uint32_t *mh_refcnt = mlx5_malloc(MLX5_MEM_ZERO, sizeof(uint32_t),
4166 						 0, rte_socket_id());
4167 
4168 		if (!mh_refcnt)
4169 			return rte_flow_error_set(error, ENOMEM,
4170 						  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4171 						  NULL, "failed to allocate modify header counter");
4172 		*mh_refcnt = mpat->mh.elements_num;
4173 		for (i = 0; i < mpat->mh.elements_num; i++)
4174 			pattern[i] = mh->ctx[i].mh_pattern;
4175 		dr_action = mlx5dr_action_create_modify_header
4176 			(priv->dr_ctx, mpat->mh.elements_num, pattern,
4177 			 bulk_size, flags);
4178 		if (!dr_action) {
4179 			mlx5_free(mh_refcnt);
4180 			return rte_flow_error_set(error, rte_errno,
4181 						  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4182 						  NULL,
4183 						  "failed to create multi-pattern header modify action");
4184 		}
4185 		for (i = 0; i < mpat->mh.elements_num; i++) {
4186 			mh->ctx[i].rule_action->action = dr_action;
4187 			mh->ctx[i].mhdr->action = dr_action;
4188 			mh->ctx[i].mhdr->multi_pattern = 1;
4189 			mh->ctx[i].mhdr->multi_pattern_refcnt = mh_refcnt;
4190 		}
4191 	}
4192 
4193 	return 0;
4194 }
4195 
4196 static int
4197 mlx5_hw_build_template_table(struct rte_eth_dev *dev,
4198 			     uint8_t nb_action_templates,
4199 			     struct rte_flow_actions_template *action_templates[],
4200 			     struct mlx5dr_action_template *at[],
4201 			     struct rte_flow_template_table *tbl,
4202 			     struct rte_flow_error *error)
4203 {
4204 	int ret;
4205 	uint8_t i;
4206 	struct mlx5_tbl_multi_pattern_ctx mpat = MLX5_EMPTY_MULTI_PATTERN_CTX;
4207 
4208 	for (i = 0; i < nb_action_templates; i++) {
4209 		uint32_t refcnt = __atomic_add_fetch(&action_templates[i]->refcnt, 1,
4210 						     __ATOMIC_RELAXED);
4211 
4212 		if (refcnt <= 1) {
4213 			rte_flow_error_set(error, EINVAL,
4214 					   RTE_FLOW_ERROR_TYPE_ACTION,
4215 					   &action_templates[i], "invalid AT refcount");
4216 			goto at_error;
4217 		}
4218 		at[i] = action_templates[i]->tmpl;
4219 		tbl->ats[i].action_template = action_templates[i];
4220 		LIST_INIT(&tbl->ats[i].acts.act_list);
4221 		/* do NOT translate table action if `dev` was not started */
4222 		if (!dev->data->dev_started)
4223 			continue;
4224 		ret = __flow_hw_actions_translate(dev, &tbl->cfg,
4225 						  &tbl->ats[i].acts,
4226 						  action_templates[i],
4227 						  &mpat, error);
4228 		if (ret) {
4229 			i++;
4230 			goto at_error;
4231 		}
4232 	}
4233 	tbl->nb_action_templates = nb_action_templates;
4234 	ret = mlx5_tbl_multi_pattern_process(dev, tbl, &mpat, error);
4235 	if (ret)
4236 		goto at_error;
4237 	return 0;
4238 
4239 at_error:
4240 	while (i--) {
4241 		__flow_hw_action_template_destroy(dev, &tbl->ats[i].acts);
4242 		__atomic_sub_fetch(&action_templates[i]->refcnt,
4243 				   1, __ATOMIC_RELAXED);
4244 	}
4245 	return rte_errno;
4246 }
4247 
4248 /**
4249  * Create flow table.
4250  *
4251  * The input item and action templates will be binded to the table.
4252  * Flow memory will also be allocated. Matcher will be created based
4253  * on the item template. Action will be translated to the dedicated
4254  * DR action if possible.
4255  *
4256  * @param[in] dev
4257  *   Pointer to the rte_eth_dev structure.
4258  * @param[in] table_cfg
4259  *   Pointer to the table configuration.
4260  * @param[in] item_templates
4261  *   Item template array to be binded to the table.
4262  * @param[in] nb_item_templates
4263  *   Number of item template.
4264  * @param[in] action_templates
4265  *   Action template array to be binded to the table.
4266  * @param[in] nb_action_templates
4267  *   Number of action template.
4268  * @param[out] error
4269  *   Pointer to error structure.
4270  *
4271  * @return
4272  *    Table on success, NULL otherwise and rte_errno is set.
4273  */
4274 static struct rte_flow_template_table *
4275 flow_hw_table_create(struct rte_eth_dev *dev,
4276 		     const struct mlx5_flow_template_table_cfg *table_cfg,
4277 		     struct rte_flow_pattern_template *item_templates[],
4278 		     uint8_t nb_item_templates,
4279 		     struct rte_flow_actions_template *action_templates[],
4280 		     uint8_t nb_action_templates,
4281 		     struct rte_flow_error *error)
4282 {
4283 	struct rte_flow_error sub_error = {
4284 		.type = RTE_FLOW_ERROR_TYPE_NONE,
4285 		.cause = NULL,
4286 		.message = NULL,
4287 	};
4288 	struct mlx5_priv *priv = dev->data->dev_private;
4289 	struct mlx5dr_matcher_attr matcher_attr = {0};
4290 	struct rte_flow_template_table *tbl = NULL;
4291 	struct mlx5_flow_group *grp;
4292 	struct mlx5dr_match_template *mt[MLX5_HW_TBL_MAX_ITEM_TEMPLATE];
4293 	struct mlx5dr_action_template *at[MLX5_HW_TBL_MAX_ACTION_TEMPLATE];
4294 	const struct rte_flow_template_table_attr *attr = &table_cfg->attr;
4295 	struct rte_flow_attr flow_attr = attr->flow_attr;
4296 	struct mlx5_flow_cb_ctx ctx = {
4297 		.dev = dev,
4298 		.error = &sub_error,
4299 		.data = &flow_attr,
4300 	};
4301 	struct mlx5_indexed_pool_config cfg = {
4302 		.size = sizeof(struct rte_flow_hw) + mlx5dr_rule_get_handle_size(),
4303 		.trunk_size = 1 << 12,
4304 		.per_core_cache = 1 << 13,
4305 		.need_lock = 1,
4306 		.release_mem_en = !!priv->sh->config.reclaim_mode,
4307 		.malloc = mlx5_malloc,
4308 		.free = mlx5_free,
4309 		.type = "mlx5_hw_table_flow",
4310 	};
4311 	struct mlx5_list_entry *ge;
4312 	uint32_t i = 0, max_tpl = MLX5_HW_TBL_MAX_ITEM_TEMPLATE;
4313 	uint32_t nb_flows = rte_align32pow2(attr->nb_flows);
4314 	bool port_started = !!dev->data->dev_started;
4315 	int err;
4316 
4317 	/* HWS layer accepts only 1 item template with root table. */
4318 	if (!attr->flow_attr.group)
4319 		max_tpl = 1;
4320 	cfg.max_idx = nb_flows;
4321 	/* For table has very limited flows, disable cache. */
4322 	if (nb_flows < cfg.trunk_size) {
4323 		cfg.per_core_cache = 0;
4324 		cfg.trunk_size = nb_flows;
4325 	} else if (nb_flows <= MLX5_HW_IPOOL_SIZE_THRESHOLD) {
4326 		cfg.per_core_cache = MLX5_HW_IPOOL_CACHE_MIN;
4327 	}
4328 	/* Check if we requires too many templates. */
4329 	if (nb_item_templates > max_tpl ||
4330 	    nb_action_templates > MLX5_HW_TBL_MAX_ACTION_TEMPLATE) {
4331 		rte_errno = EINVAL;
4332 		goto error;
4333 	}
4334 	/* Allocate the table memory. */
4335 	tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*tbl), 0, rte_socket_id());
4336 	if (!tbl)
4337 		goto error;
4338 	tbl->cfg = *table_cfg;
4339 	/* Allocate flow indexed pool. */
4340 	tbl->flow = mlx5_ipool_create(&cfg);
4341 	if (!tbl->flow)
4342 		goto error;
4343 	/* Allocate rule indexed pool. */
4344 	cfg.size = 0;
4345 	cfg.type = "mlx5_hw_table_rule";
4346 	cfg.max_idx += priv->hw_q[0].size;
4347 	tbl->resource = mlx5_ipool_create(&cfg);
4348 	if (!tbl->resource)
4349 		goto error;
4350 	/* Register the flow group. */
4351 	ge = mlx5_hlist_register(priv->sh->groups, attr->flow_attr.group, &ctx);
4352 	if (!ge)
4353 		goto error;
4354 	grp = container_of(ge, struct mlx5_flow_group, entry);
4355 	tbl->grp = grp;
4356 	/* Prepare matcher information. */
4357 	matcher_attr.optimize_flow_src = MLX5DR_MATCHER_FLOW_SRC_ANY;
4358 	matcher_attr.priority = attr->flow_attr.priority;
4359 	matcher_attr.optimize_using_rule_idx = true;
4360 	matcher_attr.mode = MLX5DR_MATCHER_RESOURCE_MODE_RULE;
4361 	matcher_attr.insert_mode = flow_hw_matcher_insert_mode_get(attr->insertion_type);
4362 	if (attr->hash_func == RTE_FLOW_TABLE_HASH_FUNC_CRC16) {
4363 		DRV_LOG(ERR, "16-bit checksum hash type is not supported");
4364 		rte_errno = ENOTSUP;
4365 		goto it_error;
4366 	}
4367 	matcher_attr.distribute_mode = flow_hw_matcher_distribute_mode_get(attr->hash_func);
4368 	matcher_attr.rule.num_log = rte_log2_u32(nb_flows);
4369 	/* Parse hints information. */
4370 	if (attr->specialize) {
4371 		if (attr->specialize == RTE_FLOW_TABLE_SPECIALIZE_TRANSFER_WIRE_ORIG)
4372 			matcher_attr.optimize_flow_src = MLX5DR_MATCHER_FLOW_SRC_WIRE;
4373 		else if (attr->specialize == RTE_FLOW_TABLE_SPECIALIZE_TRANSFER_VPORT_ORIG)
4374 			matcher_attr.optimize_flow_src = MLX5DR_MATCHER_FLOW_SRC_VPORT;
4375 		else
4376 			DRV_LOG(INFO, "Unsupported hint value %x", attr->specialize);
4377 	}
4378 	/* Build the item template. */
4379 	for (i = 0; i < nb_item_templates; i++) {
4380 		uint32_t ret;
4381 
4382 		if ((flow_attr.ingress && !item_templates[i]->attr.ingress) ||
4383 		    (flow_attr.egress && !item_templates[i]->attr.egress) ||
4384 		    (flow_attr.transfer && !item_templates[i]->attr.transfer)) {
4385 			DRV_LOG(ERR, "pattern template and template table attribute mismatch");
4386 			rte_errno = EINVAL;
4387 			goto it_error;
4388 		}
4389 		ret = __atomic_fetch_add(&item_templates[i]->refcnt, 1,
4390 					 __ATOMIC_RELAXED) + 1;
4391 		if (ret <= 1) {
4392 			rte_errno = EINVAL;
4393 			goto it_error;
4394 		}
4395 		mt[i] = item_templates[i]->mt;
4396 		tbl->its[i] = item_templates[i];
4397 	}
4398 	tbl->nb_item_templates = nb_item_templates;
4399 	/* Build the action template. */
4400 	err = mlx5_hw_build_template_table(dev, nb_action_templates,
4401 					   action_templates, at, tbl, &sub_error);
4402 	if (err) {
4403 		i = nb_item_templates;
4404 		goto it_error;
4405 	}
4406 	tbl->matcher = mlx5dr_matcher_create
4407 		(tbl->grp->tbl, mt, nb_item_templates, at, nb_action_templates, &matcher_attr);
4408 	if (!tbl->matcher)
4409 		goto at_error;
4410 	tbl->type = attr->flow_attr.transfer ? MLX5DR_TABLE_TYPE_FDB :
4411 		    (attr->flow_attr.egress ? MLX5DR_TABLE_TYPE_NIC_TX :
4412 		    MLX5DR_TABLE_TYPE_NIC_RX);
4413 	if (port_started)
4414 		LIST_INSERT_HEAD(&priv->flow_hw_tbl, tbl, next);
4415 	else
4416 		LIST_INSERT_HEAD(&priv->flow_hw_tbl_ongo, tbl, next);
4417 	return tbl;
4418 at_error:
4419 	for (i = 0; i < nb_action_templates; i++) {
4420 		__flow_hw_action_template_destroy(dev, &tbl->ats[i].acts);
4421 		__atomic_fetch_sub(&action_templates[i]->refcnt,
4422 				   1, __ATOMIC_RELAXED);
4423 	}
4424 	i = nb_item_templates;
4425 it_error:
4426 	while (i--)
4427 		__atomic_fetch_sub(&item_templates[i]->refcnt,
4428 				   1, __ATOMIC_RELAXED);
4429 error:
4430 	err = rte_errno;
4431 	if (tbl) {
4432 		if (tbl->grp)
4433 			mlx5_hlist_unregister(priv->sh->groups,
4434 					      &tbl->grp->entry);
4435 		if (tbl->resource)
4436 			mlx5_ipool_destroy(tbl->resource);
4437 		if (tbl->flow)
4438 			mlx5_ipool_destroy(tbl->flow);
4439 		mlx5_free(tbl);
4440 	}
4441 	if (error != NULL) {
4442 		if (sub_error.type == RTE_FLOW_ERROR_TYPE_NONE)
4443 			rte_flow_error_set(error, err, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4444 					   "Failed to create template table");
4445 		else
4446 			rte_memcpy(error, &sub_error, sizeof(sub_error));
4447 	}
4448 	return NULL;
4449 }
4450 
4451 /**
4452  * Update flow template table.
4453  *
4454  * @param[in] dev
4455  *   Pointer to the rte_eth_dev structure.
4456  * @param[out] error
4457  *   Pointer to error structure.
4458  *
4459  * @return
4460  *    0 on success, negative value otherwise and rte_errno is set.
4461  */
4462 int
4463 flow_hw_table_update(struct rte_eth_dev *dev,
4464 		     struct rte_flow_error *error)
4465 {
4466 	struct mlx5_priv *priv = dev->data->dev_private;
4467 	struct rte_flow_template_table *tbl;
4468 
4469 	while ((tbl = LIST_FIRST(&priv->flow_hw_tbl_ongo)) != NULL) {
4470 		if (flow_hw_actions_translate(dev, tbl, error))
4471 			return -1;
4472 		LIST_REMOVE(tbl, next);
4473 		LIST_INSERT_HEAD(&priv->flow_hw_tbl, tbl, next);
4474 	}
4475 	return 0;
4476 }
4477 
4478 /**
4479  * Translates group index specified by the user in @p attr to internal
4480  * group index.
4481  *
4482  * Translation is done by incrementing group index, so group n becomes n + 1.
4483  *
4484  * @param[in] dev
4485  *   Pointer to Ethernet device.
4486  * @param[in] cfg
4487  *   Pointer to the template table configuration.
4488  * @param[in] group
4489  *   Currently used group index (table group or jump destination).
4490  * @param[out] table_group
4491  *   Pointer to output group index.
4492  * @param[out] error
4493  *   Pointer to error structure.
4494  *
4495  * @return
4496  *   0 on success. Otherwise, returns negative error code, rte_errno is set
4497  *   and error structure is filled.
4498  */
4499 static int
4500 flow_hw_translate_group(struct rte_eth_dev *dev,
4501 			const struct mlx5_flow_template_table_cfg *cfg,
4502 			uint32_t group,
4503 			uint32_t *table_group,
4504 			struct rte_flow_error *error)
4505 {
4506 	struct mlx5_priv *priv = dev->data->dev_private;
4507 	struct mlx5_sh_config *config = &priv->sh->config;
4508 	const struct rte_flow_attr *flow_attr = &cfg->attr.flow_attr;
4509 
4510 	if (config->dv_esw_en &&
4511 	    priv->fdb_def_rule &&
4512 	    cfg->external &&
4513 	    flow_attr->transfer) {
4514 		if (group > MLX5_HW_MAX_TRANSFER_GROUP)
4515 			return rte_flow_error_set(error, EINVAL,
4516 						  RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
4517 						  NULL,
4518 						  "group index not supported");
4519 		*table_group = group + 1;
4520 	} else if (config->dv_esw_en &&
4521 		   (config->repr_matching || config->dv_xmeta_en == MLX5_XMETA_MODE_META32_HWS) &&
4522 		   cfg->external &&
4523 		   flow_attr->egress) {
4524 		/*
4525 		 * On E-Switch setups, default egress flow rules are inserted to allow
4526 		 * representor matching and/or preserving metadata across steering domains.
4527 		 * These flow rules are inserted in group 0 and this group is reserved by PMD
4528 		 * for these purposes.
4529 		 *
4530 		 * As a result, if representor matching or extended metadata mode is enabled,
4531 		 * group provided by the user must be incremented to avoid inserting flow rules
4532 		 * in group 0.
4533 		 */
4534 		if (group > MLX5_HW_MAX_EGRESS_GROUP)
4535 			return rte_flow_error_set(error, EINVAL,
4536 						  RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
4537 						  NULL,
4538 						  "group index not supported");
4539 		*table_group = group + 1;
4540 	} else {
4541 		*table_group = group;
4542 	}
4543 	return 0;
4544 }
4545 
4546 /**
4547  * Create flow table.
4548  *
4549  * This function is a wrapper over @ref flow_hw_table_create(), which translates parameters
4550  * provided by user to proper internal values.
4551  *
4552  * @param[in] dev
4553  *   Pointer to Ethernet device.
4554  * @param[in] attr
4555  *   Pointer to the table attributes.
4556  * @param[in] item_templates
4557  *   Item template array to be binded to the table.
4558  * @param[in] nb_item_templates
4559  *   Number of item templates.
4560  * @param[in] action_templates
4561  *   Action template array to be binded to the table.
4562  * @param[in] nb_action_templates
4563  *   Number of action templates.
4564  * @param[out] error
4565  *   Pointer to error structure.
4566  *
4567  * @return
4568  *   Table on success, Otherwise, returns negative error code, rte_errno is set
4569  *   and error structure is filled.
4570  */
4571 static struct rte_flow_template_table *
4572 flow_hw_template_table_create(struct rte_eth_dev *dev,
4573 			      const struct rte_flow_template_table_attr *attr,
4574 			      struct rte_flow_pattern_template *item_templates[],
4575 			      uint8_t nb_item_templates,
4576 			      struct rte_flow_actions_template *action_templates[],
4577 			      uint8_t nb_action_templates,
4578 			      struct rte_flow_error *error)
4579 {
4580 	struct mlx5_flow_template_table_cfg cfg = {
4581 		.attr = *attr,
4582 		.external = true,
4583 	};
4584 	uint32_t group = attr->flow_attr.group;
4585 
4586 	if (flow_hw_translate_group(dev, &cfg, group, &cfg.attr.flow_attr.group, error))
4587 		return NULL;
4588 	return flow_hw_table_create(dev, &cfg, item_templates, nb_item_templates,
4589 				    action_templates, nb_action_templates, error);
4590 }
4591 
4592 /**
4593  * Destroy flow table.
4594  *
4595  * @param[in] dev
4596  *   Pointer to the rte_eth_dev structure.
4597  * @param[in] table
4598  *   Pointer to the table to be destroyed.
4599  * @param[out] error
4600  *   Pointer to error structure.
4601  *
4602  * @return
4603  *   0 on success, a negative errno value otherwise and rte_errno is set.
4604  */
4605 static int
4606 flow_hw_table_destroy(struct rte_eth_dev *dev,
4607 		      struct rte_flow_template_table *table,
4608 		      struct rte_flow_error *error)
4609 {
4610 	struct mlx5_priv *priv = dev->data->dev_private;
4611 	int i;
4612 	uint32_t fidx = 1;
4613 	uint32_t ridx = 1;
4614 
4615 	/* Build ipool allocated object bitmap. */
4616 	mlx5_ipool_flush_cache(table->resource);
4617 	mlx5_ipool_flush_cache(table->flow);
4618 	/* Check if ipool has allocated objects. */
4619 	if (table->refcnt ||
4620 	    mlx5_ipool_get_next(table->flow, &fidx) ||
4621 	    mlx5_ipool_get_next(table->resource, &ridx)) {
4622 		DRV_LOG(WARNING, "Table %p is still in use.", (void *)table);
4623 		return rte_flow_error_set(error, EBUSY,
4624 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4625 				   NULL,
4626 				   "table in use");
4627 	}
4628 	LIST_REMOVE(table, next);
4629 	for (i = 0; i < table->nb_item_templates; i++)
4630 		__atomic_fetch_sub(&table->its[i]->refcnt,
4631 				   1, __ATOMIC_RELAXED);
4632 	for (i = 0; i < table->nb_action_templates; i++) {
4633 		__flow_hw_action_template_destroy(dev, &table->ats[i].acts);
4634 		__atomic_fetch_sub(&table->ats[i].action_template->refcnt,
4635 				   1, __ATOMIC_RELAXED);
4636 	}
4637 	mlx5dr_matcher_destroy(table->matcher);
4638 	mlx5_hlist_unregister(priv->sh->groups, &table->grp->entry);
4639 	mlx5_ipool_destroy(table->resource);
4640 	mlx5_ipool_destroy(table->flow);
4641 	mlx5_free(table);
4642 	return 0;
4643 }
4644 
4645 /**
4646  * Parse group's miss actions.
4647  *
4648  * @param[in] dev
4649  *   Pointer to the rte_eth_dev structure.
4650  * @param[in] cfg
4651  *   Pointer to the table_cfg structure.
4652  * @param[in] actions
4653  *   Array of actions to perform on group miss. Supported types:
4654  *   RTE_FLOW_ACTION_TYPE_JUMP, RTE_FLOW_ACTION_TYPE_VOID, RTE_FLOW_ACTION_TYPE_END.
4655  * @param[out] dst_group_id
4656  *   Pointer to destination group id output. will be set to 0 if actions is END,
4657  *   otherwise will be set to destination group id.
4658  * @param[out] error
4659  *   Pointer to error structure.
4660  *
4661  * @return
4662  *   0 on success, a negative errno value otherwise and rte_errno is set.
4663  */
4664 
4665 static int
4666 flow_hw_group_parse_miss_actions(struct rte_eth_dev *dev,
4667 				 struct mlx5_flow_template_table_cfg *cfg,
4668 				 const struct rte_flow_action actions[],
4669 				 uint32_t *dst_group_id,
4670 				 struct rte_flow_error *error)
4671 {
4672 	const struct rte_flow_action_jump *jump_conf;
4673 	uint32_t temp = 0;
4674 	uint32_t i;
4675 
4676 	for (i = 0; actions[i].type != RTE_FLOW_ACTION_TYPE_END; i++) {
4677 		switch (actions[i].type) {
4678 		case RTE_FLOW_ACTION_TYPE_VOID:
4679 			continue;
4680 		case RTE_FLOW_ACTION_TYPE_JUMP:
4681 			if (temp)
4682 				return rte_flow_error_set(error, ENOTSUP,
4683 							  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, actions,
4684 							  "Miss actions can contain only a single JUMP");
4685 
4686 			jump_conf = (const struct rte_flow_action_jump *)actions[i].conf;
4687 			if (!jump_conf)
4688 				return rte_flow_error_set(error, EINVAL,
4689 							  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4690 							  jump_conf, "Jump conf must not be NULL");
4691 
4692 			if (flow_hw_translate_group(dev, cfg, jump_conf->group, &temp, error))
4693 				return -rte_errno;
4694 
4695 			if (!temp)
4696 				return rte_flow_error_set(error, EINVAL,
4697 							  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4698 							  "Failed to set group miss actions - Invalid target group");
4699 			break;
4700 		default:
4701 			return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
4702 						  &actions[i], "Unsupported default miss action type");
4703 		}
4704 	}
4705 
4706 	*dst_group_id = temp;
4707 	return 0;
4708 }
4709 
4710 /**
4711  * Set group's miss group.
4712  *
4713  * @param[in] dev
4714  *   Pointer to the rte_eth_dev structure.
4715  * @param[in] cfg
4716  *   Pointer to the table_cfg structure.
4717  * @param[in] src_grp
4718  *   Pointer to source group structure.
4719  *   if NULL, a new group will be created based on group id from cfg->attr.flow_attr.group.
4720  * @param[in] dst_grp
4721  *   Pointer to destination group structure.
4722  * @param[out] error
4723  *   Pointer to error structure.
4724  *
4725  * @return
4726  *   0 on success, a negative errno value otherwise and rte_errno is set.
4727  */
4728 
4729 static int
4730 flow_hw_group_set_miss_group(struct rte_eth_dev *dev,
4731 			     struct mlx5_flow_template_table_cfg *cfg,
4732 			     struct mlx5_flow_group *src_grp,
4733 			     struct mlx5_flow_group *dst_grp,
4734 			     struct rte_flow_error *error)
4735 {
4736 	struct rte_flow_error sub_error = {
4737 		.type = RTE_FLOW_ERROR_TYPE_NONE,
4738 		.cause = NULL,
4739 		.message = NULL,
4740 	};
4741 	struct mlx5_flow_cb_ctx ctx = {
4742 		.dev = dev,
4743 		.error = &sub_error,
4744 		.data = &cfg->attr.flow_attr,
4745 	};
4746 	struct mlx5_priv *priv = dev->data->dev_private;
4747 	struct mlx5_list_entry *ge;
4748 	bool ref = false;
4749 	int ret;
4750 
4751 	if (!dst_grp)
4752 		return -EINVAL;
4753 
4754 	/* If group doesn't exist - needs to be created. */
4755 	if (!src_grp) {
4756 		ge = mlx5_hlist_register(priv->sh->groups, cfg->attr.flow_attr.group, &ctx);
4757 		if (!ge)
4758 			return -rte_errno;
4759 
4760 		src_grp = container_of(ge, struct mlx5_flow_group, entry);
4761 		LIST_INSERT_HEAD(&priv->flow_hw_grp, src_grp, next);
4762 		ref = true;
4763 	} else if (!src_grp->miss_group) {
4764 		/* If group exists, but has no miss actions - need to increase ref_cnt. */
4765 		LIST_INSERT_HEAD(&priv->flow_hw_grp, src_grp, next);
4766 		src_grp->entry.ref_cnt++;
4767 		ref = true;
4768 	}
4769 
4770 	ret = mlx5dr_table_set_default_miss(src_grp->tbl, dst_grp->tbl);
4771 	if (ret)
4772 		goto mlx5dr_error;
4773 
4774 	/* If group existed and had old miss actions - ref_cnt is already correct.
4775 	 * However, need to reduce ref counter for old miss group.
4776 	 */
4777 	if (src_grp->miss_group)
4778 		mlx5_hlist_unregister(priv->sh->groups, &src_grp->miss_group->entry);
4779 
4780 	src_grp->miss_group = dst_grp;
4781 	return 0;
4782 
4783 mlx5dr_error:
4784 	/* Reduce src_grp ref_cnt back & remove from grp list in case of mlx5dr error */
4785 	if (ref) {
4786 		mlx5_hlist_unregister(priv->sh->groups, &src_grp->entry);
4787 		LIST_REMOVE(src_grp, next);
4788 	}
4789 
4790 	return rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4791 				  "Failed to set group miss actions");
4792 }
4793 
4794 /**
4795  * Unset group's miss group.
4796  *
4797  * @param[in] dev
4798  *   Pointer to the rte_eth_dev structure.
4799  * @param[in] grp
4800  *   Pointer to group structure.
4801  * @param[out] error
4802  *   Pointer to error structure.
4803  *
4804  * @return
4805  *   0 on success, a negative errno value otherwise and rte_errno is set.
4806  */
4807 
4808 static int
4809 flow_hw_group_unset_miss_group(struct rte_eth_dev *dev,
4810 			       struct mlx5_flow_group *grp,
4811 			       struct rte_flow_error *error)
4812 {
4813 	struct mlx5_priv *priv = dev->data->dev_private;
4814 	int ret;
4815 
4816 	/* If group doesn't exist - no need to change anything. */
4817 	if (!grp)
4818 		return 0;
4819 
4820 	/* If group exists, but miss actions is already default behavior -
4821 	 * no need to change anything.
4822 	 */
4823 	if (!grp->miss_group)
4824 		return 0;
4825 
4826 	ret = mlx5dr_table_set_default_miss(grp->tbl, NULL);
4827 	if (ret)
4828 		return rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4829 					  "Failed to unset group miss actions");
4830 
4831 	mlx5_hlist_unregister(priv->sh->groups, &grp->miss_group->entry);
4832 	grp->miss_group = NULL;
4833 
4834 	LIST_REMOVE(grp, next);
4835 	mlx5_hlist_unregister(priv->sh->groups, &grp->entry);
4836 
4837 	return 0;
4838 }
4839 
4840 /**
4841  * Set group miss actions.
4842  *
4843  * @param[in] dev
4844  *   Pointer to the rte_eth_dev structure.
4845  * @param[in] group_id
4846  *   Group id.
4847  * @param[in] attr
4848  *   Pointer to group attributes structure.
4849  * @param[in] actions
4850  *   Array of actions to perform on group miss. Supported types:
4851  *   RTE_FLOW_ACTION_TYPE_JUMP, RTE_FLOW_ACTION_TYPE_VOID, RTE_FLOW_ACTION_TYPE_END.
4852  * @param[out] error
4853  *   Pointer to error structure.
4854  *
4855  * @return
4856  *   0 on success, a negative errno value otherwise and rte_errno is set.
4857  */
4858 
4859 static int
4860 flow_hw_group_set_miss_actions(struct rte_eth_dev *dev,
4861 			       uint32_t group_id,
4862 			       const struct rte_flow_group_attr *attr,
4863 			       const struct rte_flow_action actions[],
4864 			       struct rte_flow_error *error)
4865 {
4866 	struct rte_flow_error sub_error = {
4867 		.type = RTE_FLOW_ERROR_TYPE_NONE,
4868 		.cause = NULL,
4869 		.message = NULL,
4870 	};
4871 	struct mlx5_flow_template_table_cfg cfg = {
4872 		.external = true,
4873 		.attr = {
4874 			.flow_attr = {
4875 				.group = group_id,
4876 				.ingress = attr->ingress,
4877 				.egress = attr->egress,
4878 				.transfer = attr->transfer,
4879 			},
4880 		},
4881 	};
4882 	struct mlx5_flow_cb_ctx ctx = {
4883 		.dev = dev,
4884 		.error = &sub_error,
4885 		.data = &cfg.attr.flow_attr,
4886 	};
4887 	struct mlx5_priv *priv = dev->data->dev_private;
4888 	struct mlx5_flow_group *src_grp = NULL;
4889 	struct mlx5_flow_group *dst_grp = NULL;
4890 	struct mlx5_list_entry *ge;
4891 	uint32_t dst_group_id = 0;
4892 	int ret;
4893 
4894 	if (flow_hw_translate_group(dev, &cfg, group_id, &group_id, error))
4895 		return -rte_errno;
4896 
4897 	if (!group_id)
4898 		return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4899 					  NULL, "Failed to set group miss actions - invalid group id");
4900 
4901 	ret = flow_hw_group_parse_miss_actions(dev, &cfg, actions, &dst_group_id, error);
4902 	if (ret)
4903 		return -rte_errno;
4904 
4905 	if (dst_group_id == group_id) {
4906 		return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4907 					  NULL, "Failed to set group miss actions - target group id must differ from group_id");
4908 	}
4909 
4910 	cfg.attr.flow_attr.group = group_id;
4911 	ge = mlx5_hlist_lookup(priv->sh->groups, group_id, &ctx);
4912 	if (ge)
4913 		src_grp = container_of(ge, struct mlx5_flow_group, entry);
4914 
4915 	if (dst_group_id) {
4916 		/* Increase ref_cnt for new miss group. */
4917 		cfg.attr.flow_attr.group = dst_group_id;
4918 		ge = mlx5_hlist_register(priv->sh->groups, dst_group_id, &ctx);
4919 		if (!ge)
4920 			return -rte_errno;
4921 
4922 		dst_grp = container_of(ge, struct mlx5_flow_group, entry);
4923 
4924 		cfg.attr.flow_attr.group = group_id;
4925 		ret = flow_hw_group_set_miss_group(dev, &cfg, src_grp, dst_grp, error);
4926 		if (ret)
4927 			goto error;
4928 	} else {
4929 		return flow_hw_group_unset_miss_group(dev, src_grp, error);
4930 	}
4931 
4932 	return 0;
4933 
4934 error:
4935 	if (dst_grp)
4936 		mlx5_hlist_unregister(priv->sh->groups, &dst_grp->entry);
4937 	return -rte_errno;
4938 }
4939 
4940 static bool
4941 flow_hw_modify_field_is_used(const struct rte_flow_action_modify_field *action,
4942 			     enum rte_flow_field_id field)
4943 {
4944 	return action->src.field == field || action->dst.field == field;
4945 }
4946 
4947 static int
4948 flow_hw_validate_action_modify_field(struct rte_eth_dev *dev,
4949 				     const struct rte_flow_action *action,
4950 				     const struct rte_flow_action *mask,
4951 				     struct rte_flow_error *error)
4952 {
4953 	const struct rte_flow_action_modify_field *action_conf = action->conf;
4954 	const struct rte_flow_action_modify_field *mask_conf = mask->conf;
4955 	int ret;
4956 
4957 	if (!mask_conf)
4958 		return rte_flow_error_set(error, EINVAL,
4959 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
4960 					  "modify_field mask conf is missing");
4961 	if (action_conf->operation != mask_conf->operation)
4962 		return rte_flow_error_set(error, EINVAL,
4963 				RTE_FLOW_ERROR_TYPE_ACTION, action,
4964 				"modify_field operation mask and template are not equal");
4965 	if (action_conf->dst.field != mask_conf->dst.field)
4966 		return rte_flow_error_set(error, EINVAL,
4967 				RTE_FLOW_ERROR_TYPE_ACTION, action,
4968 				"destination field mask and template are not equal");
4969 	if (action_conf->dst.field == RTE_FLOW_FIELD_POINTER ||
4970 	    action_conf->dst.field == RTE_FLOW_FIELD_VALUE ||
4971 	    action_conf->dst.field == RTE_FLOW_FIELD_HASH_RESULT)
4972 		return rte_flow_error_set(error, EINVAL,
4973 				RTE_FLOW_ERROR_TYPE_ACTION, action,
4974 				"immediate value, pointer and hash result cannot be used as destination");
4975 	ret = flow_validate_modify_field_level(&action_conf->dst, error);
4976 	if (ret)
4977 		return ret;
4978 	if (action_conf->dst.tag_index &&
4979 	    !flow_modify_field_support_tag_array(action_conf->dst.field))
4980 		return rte_flow_error_set(error, EINVAL,
4981 				RTE_FLOW_ERROR_TYPE_ACTION, action,
4982 				"destination tag index is not supported");
4983 	if (action_conf->dst.class_id)
4984 		return rte_flow_error_set(error, EINVAL,
4985 				RTE_FLOW_ERROR_TYPE_ACTION, action,
4986 				"destination class id is not supported");
4987 	if (mask_conf->dst.level != UINT8_MAX)
4988 		return rte_flow_error_set(error, EINVAL,
4989 			RTE_FLOW_ERROR_TYPE_ACTION, action,
4990 			"destination encapsulation level must be fully masked");
4991 	if (mask_conf->dst.offset != UINT32_MAX)
4992 		return rte_flow_error_set(error, EINVAL,
4993 			RTE_FLOW_ERROR_TYPE_ACTION, action,
4994 			"destination offset level must be fully masked");
4995 	if (action_conf->src.field != mask_conf->src.field)
4996 		return rte_flow_error_set(error, EINVAL,
4997 				RTE_FLOW_ERROR_TYPE_ACTION, action,
4998 				"destination field mask and template are not equal");
4999 	if (action_conf->src.field != RTE_FLOW_FIELD_POINTER &&
5000 	    action_conf->src.field != RTE_FLOW_FIELD_VALUE) {
5001 		if (action_conf->src.tag_index &&
5002 		    !flow_modify_field_support_tag_array(action_conf->src.field))
5003 			return rte_flow_error_set(error, EINVAL,
5004 				RTE_FLOW_ERROR_TYPE_ACTION, action,
5005 				"source tag index is not supported");
5006 		if (action_conf->src.class_id)
5007 			return rte_flow_error_set(error, EINVAL,
5008 				RTE_FLOW_ERROR_TYPE_ACTION, action,
5009 				"source class id is not supported");
5010 		if (mask_conf->src.level != UINT8_MAX)
5011 			return rte_flow_error_set(error, EINVAL,
5012 				RTE_FLOW_ERROR_TYPE_ACTION, action,
5013 				"source encapsulation level must be fully masked");
5014 		if (mask_conf->src.offset != UINT32_MAX)
5015 			return rte_flow_error_set(error, EINVAL,
5016 				RTE_FLOW_ERROR_TYPE_ACTION, action,
5017 				"source offset level must be fully masked");
5018 		ret = flow_validate_modify_field_level(&action_conf->src, error);
5019 		if (ret)
5020 			return ret;
5021 	}
5022 	if ((action_conf->dst.field == RTE_FLOW_FIELD_TAG &&
5023 	     action_conf->dst.tag_index >= MLX5_FLOW_HW_TAGS_MAX &&
5024 	     action_conf->dst.tag_index != RTE_PMD_MLX5_LINEAR_HASH_TAG_INDEX) ||
5025 	    (action_conf->src.field == RTE_FLOW_FIELD_TAG &&
5026 	     action_conf->src.tag_index >= MLX5_FLOW_HW_TAGS_MAX &&
5027 	     action_conf->src.tag_index != RTE_PMD_MLX5_LINEAR_HASH_TAG_INDEX))
5028 		return rte_flow_error_set(error, EINVAL,
5029 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
5030 				 "tag index is out of range");
5031 	if ((action_conf->dst.field == RTE_FLOW_FIELD_TAG &&
5032 	     flow_hw_get_reg_id(dev, RTE_FLOW_ITEM_TYPE_TAG, action_conf->dst.tag_index) == REG_NON) ||
5033 	    (action_conf->src.field == RTE_FLOW_FIELD_TAG &&
5034 	     flow_hw_get_reg_id(dev, RTE_FLOW_ITEM_TYPE_TAG, action_conf->src.tag_index) == REG_NON))
5035 		return rte_flow_error_set(error, EINVAL,
5036 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
5037 					  "tag index is out of range");
5038 	if (mask_conf->width != UINT32_MAX)
5039 		return rte_flow_error_set(error, EINVAL,
5040 				RTE_FLOW_ERROR_TYPE_ACTION, action,
5041 				"modify_field width field must be fully masked");
5042 	if (flow_hw_modify_field_is_used(action_conf, RTE_FLOW_FIELD_START))
5043 		return rte_flow_error_set(error, EINVAL,
5044 				RTE_FLOW_ERROR_TYPE_ACTION, action,
5045 				"modifying arbitrary place in a packet is not supported");
5046 	if (flow_hw_modify_field_is_used(action_conf, RTE_FLOW_FIELD_VLAN_TYPE))
5047 		return rte_flow_error_set(error, EINVAL,
5048 				RTE_FLOW_ERROR_TYPE_ACTION, action,
5049 				"modifying vlan_type is not supported");
5050 	if (flow_hw_modify_field_is_used(action_conf, RTE_FLOW_FIELD_GENEVE_VNI))
5051 		return rte_flow_error_set(error, EINVAL,
5052 				RTE_FLOW_ERROR_TYPE_ACTION, action,
5053 				"modifying Geneve VNI is not supported");
5054 	/* Due to HW bug, tunnel MPLS header is read only. */
5055 	if (action_conf->dst.field == RTE_FLOW_FIELD_MPLS)
5056 		return rte_flow_error_set(error, EINVAL,
5057 				RTE_FLOW_ERROR_TYPE_ACTION, action,
5058 				"MPLS cannot be used as destination");
5059 	return 0;
5060 }
5061 static int
5062 flow_hw_validate_action_port_representor(struct rte_eth_dev *dev __rte_unused,
5063 					 const struct rte_flow_actions_template_attr *attr,
5064 					 const struct rte_flow_action *action,
5065 					 const struct rte_flow_action *mask,
5066 					 struct rte_flow_error *error)
5067 {
5068 	const struct rte_flow_action_ethdev *action_conf = NULL;
5069 	const struct rte_flow_action_ethdev *mask_conf = NULL;
5070 
5071 	/* If transfer is set, port has been validated as proxy port. */
5072 	if (!attr->transfer)
5073 		return rte_flow_error_set(error, EINVAL,
5074 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5075 					  "cannot use port_representor actions"
5076 					  " without an E-Switch");
5077 	if (!action || !mask)
5078 		return rte_flow_error_set(error, EINVAL,
5079 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5080 					  "actiona and mask configuration must be set");
5081 	action_conf = action->conf;
5082 	mask_conf = mask->conf;
5083 	if (!mask_conf || mask_conf->port_id != MLX5_REPRESENTED_PORT_ESW_MGR ||
5084 	    !action_conf || action_conf->port_id != MLX5_REPRESENTED_PORT_ESW_MGR)
5085 		return rte_flow_error_set(error, EINVAL,
5086 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5087 					  "only eswitch manager port 0xffff is"
5088 					  " supported");
5089 	return 0;
5090 }
5091 
5092 static int
5093 flow_hw_validate_action_represented_port(struct rte_eth_dev *dev,
5094 					 const struct rte_flow_action *action,
5095 					 const struct rte_flow_action *mask,
5096 					 struct rte_flow_error *error)
5097 {
5098 	const struct rte_flow_action_ethdev *action_conf = action->conf;
5099 	const struct rte_flow_action_ethdev *mask_conf = mask->conf;
5100 	struct mlx5_priv *priv = dev->data->dev_private;
5101 
5102 	if (!priv->sh->config.dv_esw_en)
5103 		return rte_flow_error_set(error, EINVAL,
5104 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5105 					  "cannot use represented_port actions"
5106 					  " without an E-Switch");
5107 	if (mask_conf && mask_conf->port_id) {
5108 		struct mlx5_priv *port_priv;
5109 		struct mlx5_priv *dev_priv;
5110 
5111 		if (!action_conf)
5112 			return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
5113 						  action, "port index was not provided");
5114 		port_priv = mlx5_port_to_eswitch_info(action_conf->port_id, false);
5115 		if (!port_priv)
5116 			return rte_flow_error_set(error, rte_errno,
5117 						  RTE_FLOW_ERROR_TYPE_ACTION,
5118 						  action,
5119 						  "failed to obtain E-Switch"
5120 						  " info for port");
5121 		dev_priv = mlx5_dev_to_eswitch_info(dev);
5122 		if (!dev_priv)
5123 			return rte_flow_error_set(error, rte_errno,
5124 						  RTE_FLOW_ERROR_TYPE_ACTION,
5125 						  action,
5126 						  "failed to obtain E-Switch"
5127 						  " info for transfer proxy");
5128 		if (port_priv->domain_id != dev_priv->domain_id)
5129 			return rte_flow_error_set(error, rte_errno,
5130 						  RTE_FLOW_ERROR_TYPE_ACTION,
5131 						  action,
5132 						  "cannot forward to port from"
5133 						  " a different E-Switch");
5134 	}
5135 	return 0;
5136 }
5137 
5138 /**
5139  * Validate AGE action.
5140  *
5141  * @param[in] dev
5142  *   Pointer to rte_eth_dev structure.
5143  * @param[in] action
5144  *   Pointer to the indirect action.
5145  * @param[in] action_flags
5146  *   Holds the actions detected until now.
5147  * @param[in] fixed_cnt
5148  *   Indicator if this list has a fixed COUNT action.
5149  * @param[out] error
5150  *   Pointer to error structure.
5151  *
5152  * @return
5153  *   0 on success, a negative errno value otherwise and rte_errno is set.
5154  */
5155 static int
5156 flow_hw_validate_action_age(struct rte_eth_dev *dev,
5157 			    const struct rte_flow_action *action,
5158 			    uint64_t action_flags, bool fixed_cnt,
5159 			    struct rte_flow_error *error)
5160 {
5161 	struct mlx5_priv *priv = dev->data->dev_private;
5162 	struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);
5163 
5164 	if (!priv->sh->cdev->config.devx)
5165 		return rte_flow_error_set(error, ENOTSUP,
5166 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5167 					  NULL, "AGE action not supported");
5168 	if (age_info->ages_ipool == NULL)
5169 		return rte_flow_error_set(error, EINVAL,
5170 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5171 					  "aging pool not initialized");
5172 	if ((action_flags & MLX5_FLOW_ACTION_AGE) ||
5173 	    (action_flags & MLX5_FLOW_ACTION_INDIRECT_AGE))
5174 		return rte_flow_error_set(error, EINVAL,
5175 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
5176 					  "duplicate AGE actions set");
5177 	if (fixed_cnt)
5178 		return rte_flow_error_set(error, EINVAL,
5179 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
5180 					  "AGE and fixed COUNT combination is not supported");
5181 	return 0;
5182 }
5183 
5184 /**
5185  * Validate count action.
5186  *
5187  * @param[in] dev
5188  *   Pointer to rte_eth_dev structure.
5189  * @param[in] action
5190  *   Pointer to the indirect action.
5191  * @param[in] mask
5192  *   Pointer to the indirect action mask.
5193  * @param[in] action_flags
5194  *   Holds the actions detected until now.
5195  * @param[out] error
5196  *   Pointer to error structure.
5197  *
5198  * @return
5199  *   0 on success, a negative errno value otherwise and rte_errno is set.
5200  */
5201 static int
5202 flow_hw_validate_action_count(struct rte_eth_dev *dev,
5203 			      const struct rte_flow_action *action,
5204 			      const struct rte_flow_action *mask,
5205 			      uint64_t action_flags,
5206 			      struct rte_flow_error *error)
5207 {
5208 	struct mlx5_priv *priv = dev->data->dev_private;
5209 	const struct rte_flow_action_count *count = mask->conf;
5210 
5211 	if (!priv->sh->cdev->config.devx)
5212 		return rte_flow_error_set(error, ENOTSUP,
5213 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
5214 					  "count action not supported");
5215 	if (!priv->hws_cpool)
5216 		return rte_flow_error_set(error, EINVAL,
5217 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
5218 					  "counters pool not initialized");
5219 	if ((action_flags & MLX5_FLOW_ACTION_COUNT) ||
5220 	    (action_flags & MLX5_FLOW_ACTION_INDIRECT_COUNT))
5221 		return rte_flow_error_set(error, EINVAL,
5222 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
5223 					  "duplicate count actions set");
5224 	if (count && count->id && (action_flags & MLX5_FLOW_ACTION_AGE))
5225 		return rte_flow_error_set(error, EINVAL,
5226 					  RTE_FLOW_ERROR_TYPE_ACTION, mask,
5227 					  "AGE and COUNT action shared by mask combination is not supported");
5228 	return 0;
5229 }
5230 
5231 /**
5232  * Validate meter_mark action.
5233  *
5234  * @param[in] dev
5235  *   Pointer to rte_eth_dev structure.
5236  * @param[in] action
5237  *   Pointer to the indirect action.
5238  * @param[out] error
5239  *   Pointer to error structure.
5240  *
5241  * @return
5242  *   0 on success, a negative errno value otherwise and rte_errno is set.
5243  */
5244 static int
5245 flow_hw_validate_action_meter_mark(struct rte_eth_dev *dev,
5246 			      const struct rte_flow_action *action,
5247 			      struct rte_flow_error *error)
5248 {
5249 	struct mlx5_priv *priv = dev->data->dev_private;
5250 
5251 	RTE_SET_USED(action);
5252 
5253 	if (!priv->sh->cdev->config.devx)
5254 		return rte_flow_error_set(error, ENOTSUP,
5255 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
5256 					  "meter_mark action not supported");
5257 	if (!priv->hws_mpool)
5258 		return rte_flow_error_set(error, EINVAL,
5259 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
5260 					  "meter_mark pool not initialized");
5261 	return 0;
5262 }
5263 
5264 /**
5265  * Validate indirect action.
5266  *
5267  * @param[in] dev
5268  *   Pointer to rte_eth_dev structure.
5269  * @param[in] action
5270  *   Pointer to the indirect action.
5271  * @param[in] mask
5272  *   Pointer to the indirect action mask.
5273  * @param[in, out] action_flags
5274  *   Holds the actions detected until now.
5275  * @param[in, out] fixed_cnt
5276  *   Pointer to indicator if this list has a fixed COUNT action.
5277  * @param[out] error
5278  *   Pointer to error structure.
5279  *
5280  * @return
5281  *   0 on success, a negative errno value otherwise and rte_errno is set.
5282  */
5283 static int
5284 flow_hw_validate_action_indirect(struct rte_eth_dev *dev,
5285 				 const struct rte_flow_action *action,
5286 				 const struct rte_flow_action *mask,
5287 				 uint64_t *action_flags, bool *fixed_cnt,
5288 				 struct rte_flow_error *error)
5289 {
5290 	uint32_t type;
5291 	int ret;
5292 
5293 	if (!mask)
5294 		return rte_flow_error_set(error, EINVAL,
5295 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5296 					  "Unable to determine indirect action type without a mask specified");
5297 	type = mask->type;
5298 	switch (type) {
5299 	case RTE_FLOW_ACTION_TYPE_METER_MARK:
5300 		ret = flow_hw_validate_action_meter_mark(dev, mask, error);
5301 		if (ret < 0)
5302 			return ret;
5303 		*action_flags |= MLX5_FLOW_ACTION_METER;
5304 		break;
5305 	case RTE_FLOW_ACTION_TYPE_RSS:
5306 		/* TODO: Validation logic (same as flow_hw_actions_validate) */
5307 		*action_flags |= MLX5_FLOW_ACTION_RSS;
5308 		break;
5309 	case RTE_FLOW_ACTION_TYPE_CONNTRACK:
5310 		/* TODO: Validation logic (same as flow_hw_actions_validate) */
5311 		*action_flags |= MLX5_FLOW_ACTION_CT;
5312 		break;
5313 	case RTE_FLOW_ACTION_TYPE_COUNT:
5314 		if (action->conf && mask->conf) {
5315 			if ((*action_flags & MLX5_FLOW_ACTION_AGE) ||
5316 			    (*action_flags & MLX5_FLOW_ACTION_INDIRECT_AGE))
5317 				/*
5318 				 * AGE cannot use indirect counter which is
5319 				 * shared with enother flow rules.
5320 				 */
5321 				return rte_flow_error_set(error, EINVAL,
5322 						  RTE_FLOW_ERROR_TYPE_ACTION,
5323 						  NULL,
5324 						  "AGE and fixed COUNT combination is not supported");
5325 			*fixed_cnt = true;
5326 		}
5327 		ret = flow_hw_validate_action_count(dev, action, mask,
5328 						    *action_flags, error);
5329 		if (ret < 0)
5330 			return ret;
5331 		*action_flags |= MLX5_FLOW_ACTION_INDIRECT_COUNT;
5332 		break;
5333 	case RTE_FLOW_ACTION_TYPE_AGE:
5334 		ret = flow_hw_validate_action_age(dev, action, *action_flags,
5335 						  *fixed_cnt, error);
5336 		if (ret < 0)
5337 			return ret;
5338 		*action_flags |= MLX5_FLOW_ACTION_INDIRECT_AGE;
5339 		break;
5340 	case RTE_FLOW_ACTION_TYPE_QUOTA:
5341 		/* TODO: add proper quota verification */
5342 		*action_flags |= MLX5_FLOW_ACTION_QUOTA;
5343 		break;
5344 	default:
5345 		DRV_LOG(WARNING, "Unsupported shared action type: %d", type);
5346 		return rte_flow_error_set(error, ENOTSUP,
5347 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, mask,
5348 					  "Unsupported indirect action type");
5349 	}
5350 	return 0;
5351 }
5352 
5353 /**
5354  * Validate ipv6_ext_push action.
5355  *
5356  * @param[in] dev
5357  *   Pointer to rte_eth_dev structure.
5358  * @param[in] action
5359  *   Pointer to the indirect action.
5360  * @param[out] error
5361  *   Pointer to error structure.
5362  *
5363  * @return
5364  *   0 on success, a negative errno value otherwise and rte_errno is set.
5365  */
5366 static int
5367 flow_hw_validate_action_ipv6_ext_push(struct rte_eth_dev *dev __rte_unused,
5368 				      const struct rte_flow_action *action,
5369 				      struct rte_flow_error *error)
5370 {
5371 	const struct rte_flow_action_ipv6_ext_push *raw_push_data = action->conf;
5372 
5373 	if (!raw_push_data || !raw_push_data->size || !raw_push_data->data)
5374 		return rte_flow_error_set(error, EINVAL,
5375 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
5376 					  "invalid ipv6_ext_push data");
5377 	if (raw_push_data->type != IPPROTO_ROUTING ||
5378 	    raw_push_data->size > MLX5_PUSH_MAX_LEN)
5379 		return rte_flow_error_set(error, EINVAL,
5380 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
5381 					  "Unsupported ipv6_ext_push type or length");
5382 	return 0;
5383 }
5384 
5385 /**
5386  * Validate raw_encap action.
5387  *
5388  * @param[in] dev
5389  *   Pointer to rte_eth_dev structure.
5390  * @param[in] action
5391  *   Pointer to the indirect action.
5392  * @param[out] error
5393  *   Pointer to error structure.
5394  *
5395  * @return
5396  *   0 on success, a negative errno value otherwise and rte_errno is set.
5397  */
5398 static int
5399 flow_hw_validate_action_raw_encap(const struct rte_flow_action *action,
5400 				  const struct rte_flow_action *mask,
5401 				  struct rte_flow_error *error)
5402 {
5403 	const struct rte_flow_action_raw_encap *mask_conf = mask->conf;
5404 	const struct rte_flow_action_raw_encap *action_conf = action->conf;
5405 
5406 	if (!mask_conf || !mask_conf->size)
5407 		return rte_flow_error_set(error, EINVAL,
5408 					  RTE_FLOW_ERROR_TYPE_ACTION, mask,
5409 					  "raw_encap: size must be masked");
5410 	if (!action_conf || !action_conf->size)
5411 		return rte_flow_error_set(error, EINVAL,
5412 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
5413 					  "raw_encap: invalid action configuration");
5414 	if (mask_conf->data && !action_conf->data)
5415 		return rte_flow_error_set(error, EINVAL,
5416 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
5417 					  "raw_encap: masked data is missing");
5418 	return 0;
5419 }
5420 
5421 /**
5422  * Process `... / raw_decap / raw_encap / ...` actions sequence.
5423  * The PMD handles the sequence as a single encap or decap reformat action,
5424  * depending on the raw_encap configuration.
5425  *
5426  * The function assumes that the raw_decap / raw_encap location
5427  * in actions template list complies with relative HWS actions order:
5428  * for the required reformat configuration:
5429  * ENCAP configuration must appear before [JUMP|DROP|PORT]
5430  * DECAP configuration must appear at the template head.
5431  */
5432 static uint64_t
5433 mlx5_decap_encap_reformat_type(const struct rte_flow_action *actions,
5434 			       uint32_t encap_ind, uint64_t flags)
5435 {
5436 	const struct rte_flow_action_raw_encap *encap = actions[encap_ind].conf;
5437 
5438 	if ((flags & MLX5_FLOW_ACTION_DECAP) == 0)
5439 		return MLX5_FLOW_ACTION_ENCAP;
5440 	if (actions[encap_ind - 1].type != RTE_FLOW_ACTION_TYPE_RAW_DECAP)
5441 		return MLX5_FLOW_ACTION_ENCAP;
5442 	return encap->size >= MLX5_ENCAPSULATION_DECISION_SIZE ?
5443 	       MLX5_FLOW_ACTION_ENCAP : MLX5_FLOW_ACTION_DECAP;
5444 }
5445 
5446 static inline uint16_t
5447 flow_hw_template_expand_modify_field(struct rte_flow_action actions[],
5448 				     struct rte_flow_action masks[],
5449 				     const struct rte_flow_action *mf_actions,
5450 				     const struct rte_flow_action *mf_masks,
5451 				     uint64_t flags, uint32_t act_num,
5452 				     uint32_t mf_num)
5453 {
5454 	uint32_t i, tail;
5455 
5456 	MLX5_ASSERT(actions && masks);
5457 	MLX5_ASSERT(mf_num > 0);
5458 	if (flags & MLX5_FLOW_ACTION_MODIFY_FIELD) {
5459 		/*
5460 		 * Application action template already has Modify Field.
5461 		 * It's location will be used in DR.
5462 		 * Expanded MF action can be added before the END.
5463 		 */
5464 		i = act_num - 1;
5465 		goto insert;
5466 	}
5467 	/**
5468 	 * Locate the first action positioned BEFORE the new MF.
5469 	 *
5470 	 * Search for a place to insert modify header
5471 	 * from the END action backwards:
5472 	 * 1. END is always present in actions array
5473 	 * 2. END location is always at action[act_num - 1]
5474 	 * 3. END always positioned AFTER modify field location
5475 	 *
5476 	 * Relative actions order is the same for RX, TX and FDB.
5477 	 *
5478 	 * Current actions order (draft-3)
5479 	 * @see action_order_arr[]
5480 	 */
5481 	for (i = act_num - 2; (int)i >= 0; i--) {
5482 		enum rte_flow_action_type type = actions[i].type;
5483 		uint64_t reformat_type;
5484 
5485 		if (type == RTE_FLOW_ACTION_TYPE_INDIRECT)
5486 			type = masks[i].type;
5487 		switch (type) {
5488 		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
5489 		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
5490 		case RTE_FLOW_ACTION_TYPE_DROP:
5491 		case RTE_FLOW_ACTION_TYPE_SEND_TO_KERNEL:
5492 		case RTE_FLOW_ACTION_TYPE_JUMP:
5493 		case RTE_FLOW_ACTION_TYPE_QUEUE:
5494 		case RTE_FLOW_ACTION_TYPE_RSS:
5495 		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
5496 		case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
5497 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
5498 		case RTE_FLOW_ACTION_TYPE_VOID:
5499 		case RTE_FLOW_ACTION_TYPE_END:
5500 			break;
5501 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
5502 			reformat_type =
5503 				mlx5_decap_encap_reformat_type(actions, i,
5504 							       flags);
5505 			if (reformat_type == MLX5_FLOW_ACTION_DECAP) {
5506 				i++;
5507 				goto insert;
5508 			}
5509 			if (actions[i - 1].type == RTE_FLOW_ACTION_TYPE_RAW_DECAP)
5510 				i--;
5511 			break;
5512 		default:
5513 			i++; /* new MF inserted AFTER actions[i] */
5514 			goto insert;
5515 		}
5516 	}
5517 	i = 0;
5518 insert:
5519 	tail = act_num - i; /* num action to move */
5520 	memmove(actions + i + mf_num, actions + i, sizeof(actions[0]) * tail);
5521 	memcpy(actions + i, mf_actions, sizeof(actions[0]) * mf_num);
5522 	memmove(masks + i + mf_num, masks + i, sizeof(masks[0]) * tail);
5523 	memcpy(masks + i, mf_masks, sizeof(masks[0]) * mf_num);
5524 	return i;
5525 }
5526 
5527 static int
5528 flow_hw_validate_action_push_vlan(struct rte_eth_dev *dev,
5529 				  const
5530 				  struct rte_flow_actions_template_attr *attr,
5531 				  const struct rte_flow_action *action,
5532 				  const struct rte_flow_action *mask,
5533 				  struct rte_flow_error *error)
5534 {
5535 #define X_FIELD(ptr, t, f) (((ptr)->conf) && ((t *)((ptr)->conf))->f)
5536 
5537 	const bool masked_push =
5538 		X_FIELD(mask + MLX5_HW_VLAN_PUSH_TYPE_IDX,
5539 			const struct rte_flow_action_of_push_vlan, ethertype);
5540 	bool masked_param;
5541 
5542 	/*
5543 	 * Mandatory actions order:
5544 	 * OF_PUSH_VLAN / OF_SET_VLAN_VID [ / OF_SET_VLAN_PCP ]
5545 	 */
5546 	RTE_SET_USED(dev);
5547 	RTE_SET_USED(attr);
5548 	/* Check that mark matches OF_PUSH_VLAN */
5549 	if (mask[MLX5_HW_VLAN_PUSH_TYPE_IDX].type !=
5550 	    RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN)
5551 		return rte_flow_error_set(error, EINVAL,
5552 					  RTE_FLOW_ERROR_TYPE_ACTION,
5553 					  action, "OF_PUSH_VLAN: mask does not match");
5554 	/* Check that the second template and mask items are SET_VLAN_VID */
5555 	if (action[MLX5_HW_VLAN_PUSH_VID_IDX].type !=
5556 	    RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID ||
5557 	    mask[MLX5_HW_VLAN_PUSH_VID_IDX].type !=
5558 	    RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)
5559 		return rte_flow_error_set(error, EINVAL,
5560 					  RTE_FLOW_ERROR_TYPE_ACTION,
5561 					  action, "OF_PUSH_VLAN: invalid actions order");
5562 	masked_param = X_FIELD(mask + MLX5_HW_VLAN_PUSH_VID_IDX,
5563 			       const struct rte_flow_action_of_set_vlan_vid,
5564 			       vlan_vid);
5565 	/*
5566 	 * PMD requires OF_SET_VLAN_VID mask to must match OF_PUSH_VLAN
5567 	 */
5568 	if (masked_push ^ masked_param)
5569 		return rte_flow_error_set(error, EINVAL,
5570 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
5571 					  "OF_SET_VLAN_VID: mask does not match OF_PUSH_VLAN");
5572 	if (is_of_vlan_pcp_present(action)) {
5573 		if (mask[MLX5_HW_VLAN_PUSH_PCP_IDX].type !=
5574 		     RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP)
5575 			return rte_flow_error_set(error, EINVAL,
5576 						  RTE_FLOW_ERROR_TYPE_ACTION,
5577 						  action, "OF_SET_VLAN_PCP: missing mask configuration");
5578 		masked_param = X_FIELD(mask + MLX5_HW_VLAN_PUSH_PCP_IDX,
5579 				       const struct
5580 				       rte_flow_action_of_set_vlan_pcp,
5581 				       vlan_pcp);
5582 		/*
5583 		 * PMD requires OF_SET_VLAN_PCP mask to must match OF_PUSH_VLAN
5584 		 */
5585 		if (masked_push ^ masked_param)
5586 			return rte_flow_error_set(error, EINVAL,
5587 						  RTE_FLOW_ERROR_TYPE_ACTION, action,
5588 						  "OF_SET_VLAN_PCP: mask does not match OF_PUSH_VLAN");
5589 	}
5590 	return 0;
5591 #undef X_FIELD
5592 }
5593 
5594 static int
5595 flow_hw_validate_action_default_miss(struct rte_eth_dev *dev,
5596 				     const struct rte_flow_actions_template_attr *attr,
5597 				     uint64_t action_flags,
5598 				     struct rte_flow_error *error)
5599 {
5600 	/*
5601 	 * The private DEFAULT_MISS action is used internally for LACP in control
5602 	 * flows. So this validation can be ignored. It can be kept right now since
5603 	 * the validation will be done only once.
5604 	 */
5605 	struct mlx5_priv *priv = dev->data->dev_private;
5606 
5607 	if (!attr->ingress || attr->egress || attr->transfer)
5608 		return rte_flow_error_set(error, EINVAL,
5609 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5610 					  "DEFAULT MISS is only supported in ingress.");
5611 	if (!priv->hw_def_miss)
5612 		return rte_flow_error_set(error, EINVAL,
5613 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5614 					  "DEFAULT MISS action does not exist.");
5615 	if (action_flags & MLX5_FLOW_FATE_ACTIONS)
5616 		return rte_flow_error_set(error, EINVAL,
5617 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5618 					  "DEFAULT MISS should be the only termination.");
5619 	return 0;
5620 }
5621 
5622 static int
5623 mlx5_flow_hw_actions_validate(struct rte_eth_dev *dev,
5624 			      const struct rte_flow_actions_template_attr *attr,
5625 			      const struct rte_flow_action actions[],
5626 			      const struct rte_flow_action masks[],
5627 			      uint64_t *act_flags,
5628 			      struct rte_flow_error *error)
5629 {
5630 	struct mlx5_priv *priv = dev->data->dev_private;
5631 	const struct rte_flow_action_count *count_mask = NULL;
5632 	bool fixed_cnt = false;
5633 	uint64_t action_flags = 0;
5634 	bool actions_end = false;
5635 #ifdef HAVE_MLX5DV_DR_ACTION_CREATE_DEST_ROOT_TABLE
5636 	int table_type;
5637 #endif
5638 	uint16_t i;
5639 	int ret;
5640 	const struct rte_flow_action_ipv6_ext_remove *remove_data;
5641 
5642 	/* FDB actions are only valid to proxy port. */
5643 	if (attr->transfer && (!priv->sh->config.dv_esw_en || !priv->master))
5644 		return rte_flow_error_set(error, EINVAL,
5645 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5646 					  NULL,
5647 					  "transfer actions are only valid to proxy port");
5648 	for (i = 0; !actions_end; ++i) {
5649 		const struct rte_flow_action *action = &actions[i];
5650 		const struct rte_flow_action *mask = &masks[i];
5651 
5652 		MLX5_ASSERT(i < MLX5_HW_MAX_ACTS);
5653 		if (action->type != RTE_FLOW_ACTION_TYPE_INDIRECT &&
5654 		    action->type != mask->type)
5655 			return rte_flow_error_set(error, ENOTSUP,
5656 						  RTE_FLOW_ERROR_TYPE_ACTION,
5657 						  action,
5658 						  "mask type does not match action type");
5659 		switch ((int)action->type) {
5660 		case RTE_FLOW_ACTION_TYPE_VOID:
5661 			break;
5662 		case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
5663 			break;
5664 		case RTE_FLOW_ACTION_TYPE_INDIRECT:
5665 			ret = flow_hw_validate_action_indirect(dev, action,
5666 							       mask,
5667 							       &action_flags,
5668 							       &fixed_cnt,
5669 							       error);
5670 			if (ret < 0)
5671 				return ret;
5672 			break;
5673 		case RTE_FLOW_ACTION_TYPE_MARK:
5674 			/* TODO: Validation logic */
5675 			action_flags |= MLX5_FLOW_ACTION_MARK;
5676 			break;
5677 		case RTE_FLOW_ACTION_TYPE_DROP:
5678 			/* TODO: Validation logic */
5679 			action_flags |= MLX5_FLOW_ACTION_DROP;
5680 			break;
5681 		case RTE_FLOW_ACTION_TYPE_JUMP:
5682 			/* TODO: Validation logic */
5683 			action_flags |= MLX5_FLOW_ACTION_JUMP;
5684 			break;
5685 #ifdef HAVE_MLX5DV_DR_ACTION_CREATE_DEST_ROOT_TABLE
5686 		case RTE_FLOW_ACTION_TYPE_SEND_TO_KERNEL:
5687 			if (priv->shared_host)
5688 				return rte_flow_error_set(error, ENOTSUP,
5689 							  RTE_FLOW_ERROR_TYPE_ACTION,
5690 							  action,
5691 							  "action not supported in guest port");
5692 			table_type = attr->ingress ? MLX5DR_TABLE_TYPE_NIC_RX :
5693 				     ((attr->egress) ? MLX5DR_TABLE_TYPE_NIC_TX :
5694 				     MLX5DR_TABLE_TYPE_FDB);
5695 			if (!priv->hw_send_to_kernel[table_type])
5696 				return rte_flow_error_set(error, ENOTSUP,
5697 							  RTE_FLOW_ERROR_TYPE_ACTION,
5698 							  action,
5699 							  "action is not available");
5700 			action_flags |= MLX5_FLOW_ACTION_SEND_TO_KERNEL;
5701 			break;
5702 #endif
5703 		case RTE_FLOW_ACTION_TYPE_QUEUE:
5704 			/* TODO: Validation logic */
5705 			action_flags |= MLX5_FLOW_ACTION_QUEUE;
5706 			break;
5707 		case RTE_FLOW_ACTION_TYPE_RSS:
5708 			/* TODO: Validation logic */
5709 			action_flags |= MLX5_FLOW_ACTION_RSS;
5710 			break;
5711 		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
5712 			/* TODO: Validation logic */
5713 			action_flags |= MLX5_FLOW_ACTION_ENCAP;
5714 			break;
5715 		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
5716 			/* TODO: Validation logic */
5717 			action_flags |= MLX5_FLOW_ACTION_ENCAP;
5718 			break;
5719 		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
5720 			/* TODO: Validation logic */
5721 			action_flags |= MLX5_FLOW_ACTION_DECAP;
5722 			break;
5723 		case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
5724 			/* TODO: Validation logic */
5725 			action_flags |= MLX5_FLOW_ACTION_DECAP;
5726 			break;
5727 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
5728 			ret = flow_hw_validate_action_raw_encap(action, mask, error);
5729 			if (ret < 0)
5730 				return ret;
5731 			action_flags |= MLX5_FLOW_ACTION_ENCAP;
5732 			break;
5733 		case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
5734 			/* TODO: Validation logic */
5735 			action_flags |= MLX5_FLOW_ACTION_DECAP;
5736 			break;
5737 		case RTE_FLOW_ACTION_TYPE_IPV6_EXT_PUSH:
5738 			ret = flow_hw_validate_action_ipv6_ext_push(dev, action, error);
5739 			if (ret < 0)
5740 				return ret;
5741 			action_flags |= MLX5_FLOW_ACTION_IPV6_ROUTING_PUSH;
5742 			break;
5743 		case RTE_FLOW_ACTION_TYPE_IPV6_EXT_REMOVE:
5744 			remove_data = action->conf;
5745 			/* Remove action must be shared. */
5746 			if (remove_data->type != IPPROTO_ROUTING || !mask) {
5747 				DRV_LOG(ERR, "Only supports shared IPv6 routing remove");
5748 				return -EINVAL;
5749 			}
5750 			action_flags |= MLX5_FLOW_ACTION_IPV6_ROUTING_REMOVE;
5751 			break;
5752 		case RTE_FLOW_ACTION_TYPE_METER:
5753 			/* TODO: Validation logic */
5754 			action_flags |= MLX5_FLOW_ACTION_METER;
5755 			break;
5756 		case RTE_FLOW_ACTION_TYPE_METER_MARK:
5757 			ret = flow_hw_validate_action_meter_mark(dev, action,
5758 								 error);
5759 			if (ret < 0)
5760 				return ret;
5761 			action_flags |= MLX5_FLOW_ACTION_METER;
5762 			break;
5763 		case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
5764 			ret = flow_hw_validate_action_modify_field(dev, action, mask,
5765 								   error);
5766 			if (ret < 0)
5767 				return ret;
5768 			action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
5769 			break;
5770 		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
5771 			ret = flow_hw_validate_action_represented_port
5772 					(dev, action, mask, error);
5773 			if (ret < 0)
5774 				return ret;
5775 			action_flags |= MLX5_FLOW_ACTION_PORT_ID;
5776 			break;
5777 		case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
5778 			ret = flow_hw_validate_action_port_representor
5779 					(dev, attr, action, mask, error);
5780 			if (ret < 0)
5781 				return ret;
5782 			action_flags |= MLX5_FLOW_ACTION_PORT_REPRESENTOR;
5783 			break;
5784 		case RTE_FLOW_ACTION_TYPE_AGE:
5785 			if (count_mask && count_mask->id)
5786 				fixed_cnt = true;
5787 			ret = flow_hw_validate_action_age(dev, action,
5788 							  action_flags,
5789 							  fixed_cnt, error);
5790 			if (ret < 0)
5791 				return ret;
5792 			action_flags |= MLX5_FLOW_ACTION_AGE;
5793 			break;
5794 		case RTE_FLOW_ACTION_TYPE_COUNT:
5795 			ret = flow_hw_validate_action_count(dev, action, mask,
5796 							    action_flags,
5797 							    error);
5798 			if (ret < 0)
5799 				return ret;
5800 			count_mask = mask->conf;
5801 			action_flags |= MLX5_FLOW_ACTION_COUNT;
5802 			break;
5803 		case RTE_FLOW_ACTION_TYPE_CONNTRACK:
5804 			/* TODO: Validation logic */
5805 			action_flags |= MLX5_FLOW_ACTION_CT;
5806 			break;
5807 		case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
5808 			action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
5809 			break;
5810 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
5811 			action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
5812 			break;
5813 		case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
5814 			ret = flow_hw_validate_action_push_vlan
5815 					(dev, attr, action, mask, error);
5816 			if (ret != 0)
5817 				return ret;
5818 			i += is_of_vlan_pcp_present(action) ?
5819 				MLX5_HW_VLAN_PUSH_PCP_IDX :
5820 				MLX5_HW_VLAN_PUSH_VID_IDX;
5821 			action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
5822 			break;
5823 		case RTE_FLOW_ACTION_TYPE_END:
5824 			actions_end = true;
5825 			break;
5826 		case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
5827 			ret = flow_hw_validate_action_default_miss(dev, attr,
5828 								   action_flags, error);
5829 			if (ret < 0)
5830 				return ret;
5831 			action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
5832 			break;
5833 		default:
5834 			return rte_flow_error_set(error, ENOTSUP,
5835 						  RTE_FLOW_ERROR_TYPE_ACTION,
5836 						  action,
5837 						  "action not supported in template API");
5838 		}
5839 	}
5840 	if (act_flags != NULL)
5841 		*act_flags = action_flags;
5842 	return 0;
5843 }
5844 
5845 static int
5846 flow_hw_actions_validate(struct rte_eth_dev *dev,
5847 			 const struct rte_flow_actions_template_attr *attr,
5848 			 const struct rte_flow_action actions[],
5849 			 const struct rte_flow_action masks[],
5850 			 struct rte_flow_error *error)
5851 {
5852 	return mlx5_flow_hw_actions_validate(dev, attr, actions, masks, NULL, error);
5853 }
5854 
5855 
5856 static enum mlx5dr_action_type mlx5_hw_dr_action_types[] = {
5857 	[RTE_FLOW_ACTION_TYPE_MARK] = MLX5DR_ACTION_TYP_TAG,
5858 	[RTE_FLOW_ACTION_TYPE_DROP] = MLX5DR_ACTION_TYP_DROP,
5859 	[RTE_FLOW_ACTION_TYPE_JUMP] = MLX5DR_ACTION_TYP_TBL,
5860 	[RTE_FLOW_ACTION_TYPE_QUEUE] = MLX5DR_ACTION_TYP_TIR,
5861 	[RTE_FLOW_ACTION_TYPE_RSS] = MLX5DR_ACTION_TYP_TIR,
5862 	[RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP] = MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2,
5863 	[RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP] = MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2,
5864 	[RTE_FLOW_ACTION_TYPE_VXLAN_DECAP] = MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2,
5865 	[RTE_FLOW_ACTION_TYPE_NVGRE_DECAP] = MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2,
5866 	[RTE_FLOW_ACTION_TYPE_MODIFY_FIELD] = MLX5DR_ACTION_TYP_MODIFY_HDR,
5867 	[RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT] = MLX5DR_ACTION_TYP_VPORT,
5868 	[RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR] = MLX5DR_ACTION_TYP_MISS,
5869 	[RTE_FLOW_ACTION_TYPE_CONNTRACK] = MLX5DR_ACTION_TYP_ASO_CT,
5870 	[RTE_FLOW_ACTION_TYPE_OF_POP_VLAN] = MLX5DR_ACTION_TYP_POP_VLAN,
5871 	[RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN] = MLX5DR_ACTION_TYP_PUSH_VLAN,
5872 	[RTE_FLOW_ACTION_TYPE_SEND_TO_KERNEL] = MLX5DR_ACTION_TYP_DEST_ROOT,
5873 	[RTE_FLOW_ACTION_TYPE_IPV6_EXT_PUSH] = MLX5DR_ACTION_TYP_PUSH_IPV6_ROUTE_EXT,
5874 	[RTE_FLOW_ACTION_TYPE_IPV6_EXT_REMOVE] = MLX5DR_ACTION_TYP_POP_IPV6_ROUTE_EXT,
5875 };
5876 
5877 static inline void
5878 action_template_set_type(struct rte_flow_actions_template *at,
5879 			 enum mlx5dr_action_type *action_types,
5880 			 unsigned int action_src, uint16_t *curr_off,
5881 			 enum mlx5dr_action_type type)
5882 {
5883 	at->dr_off[action_src] = *curr_off;
5884 	action_types[*curr_off] = type;
5885 	*curr_off = *curr_off + 1;
5886 }
5887 
5888 static int
5889 flow_hw_dr_actions_template_handle_shared(int type, uint32_t action_src,
5890 					  enum mlx5dr_action_type *action_types,
5891 					  uint16_t *curr_off, uint16_t *cnt_off,
5892 					  struct rte_flow_actions_template *at)
5893 {
5894 	switch (type) {
5895 	case RTE_FLOW_ACTION_TYPE_RSS:
5896 		action_template_set_type(at, action_types, action_src, curr_off,
5897 					 MLX5DR_ACTION_TYP_TIR);
5898 		break;
5899 	case RTE_FLOW_ACTION_TYPE_AGE:
5900 	case RTE_FLOW_ACTION_TYPE_COUNT:
5901 		/*
5902 		 * Both AGE and COUNT action need counter, the first one fills
5903 		 * the action_types array, and the second only saves the offset.
5904 		 */
5905 		if (*cnt_off == UINT16_MAX) {
5906 			*cnt_off = *curr_off;
5907 			action_template_set_type(at, action_types,
5908 						 action_src, curr_off,
5909 						 MLX5DR_ACTION_TYP_CTR);
5910 		}
5911 		at->dr_off[action_src] = *cnt_off;
5912 		break;
5913 	case RTE_FLOW_ACTION_TYPE_CONNTRACK:
5914 		action_template_set_type(at, action_types, action_src, curr_off,
5915 					 MLX5DR_ACTION_TYP_ASO_CT);
5916 		break;
5917 	case RTE_FLOW_ACTION_TYPE_QUOTA:
5918 	case RTE_FLOW_ACTION_TYPE_METER_MARK:
5919 		action_template_set_type(at, action_types, action_src, curr_off,
5920 					 MLX5DR_ACTION_TYP_ASO_METER);
5921 		break;
5922 	default:
5923 		DRV_LOG(WARNING, "Unsupported shared action type: %d", type);
5924 		return -EINVAL;
5925 	}
5926 	return 0;
5927 }
5928 
5929 
5930 static int
5931 flow_hw_template_actions_list(struct rte_flow_actions_template *at,
5932 			      unsigned int action_src,
5933 			      enum mlx5dr_action_type *action_types,
5934 			      uint16_t *curr_off, uint16_t *cnt_off)
5935 {
5936 	int ret;
5937 	const struct rte_flow_action_indirect_list *indlst_conf = at->actions[action_src].conf;
5938 	enum mlx5_indirect_list_type list_type = mlx5_get_indirect_list_type(indlst_conf->handle);
5939 	const union {
5940 		struct mlx5_indlst_legacy *legacy;
5941 		struct rte_flow_action_list_handle *handle;
5942 	} indlst_obj = { .handle = indlst_conf->handle };
5943 	enum mlx5dr_action_type type;
5944 
5945 	switch (list_type) {
5946 	case MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY:
5947 		ret = flow_hw_dr_actions_template_handle_shared
5948 			(indlst_obj.legacy->legacy_type, action_src,
5949 			 action_types, curr_off, cnt_off, at);
5950 		if (ret)
5951 			return ret;
5952 		break;
5953 	case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
5954 		action_template_set_type(at, action_types, action_src, curr_off,
5955 					 MLX5DR_ACTION_TYP_DEST_ARRAY);
5956 		break;
5957 	case MLX5_INDIRECT_ACTION_LIST_TYPE_REFORMAT:
5958 		type = ((struct mlx5_hw_encap_decap_action *)
5959 			(indlst_conf->handle))->action_type;
5960 		action_template_set_type(at, action_types, action_src, curr_off, type);
5961 		break;
5962 	default:
5963 		DRV_LOG(ERR, "Unsupported indirect list type");
5964 		return -EINVAL;
5965 	}
5966 	return 0;
5967 }
5968 
5969 /**
5970  * Create DR action template based on a provided sequence of flow actions.
5971  *
5972  * @param[in] dev
5973  *   Pointer to the rte_eth_dev structure.
5974  * @param[in] at
5975  *   Pointer to flow actions template to be updated.
5976  *
5977  * @return
5978  *   DR action template pointer on success and action offsets in @p at are updated.
5979  *   NULL otherwise.
5980  */
5981 static struct mlx5dr_action_template *
5982 flow_hw_dr_actions_template_create(struct rte_eth_dev *dev,
5983 				   struct rte_flow_actions_template *at)
5984 {
5985 	struct mlx5dr_action_template *dr_template;
5986 	enum mlx5dr_action_type action_types[MLX5_HW_MAX_ACTS] = { MLX5DR_ACTION_TYP_LAST };
5987 	unsigned int i;
5988 	uint16_t curr_off;
5989 	enum mlx5dr_action_type reformat_act_type = MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2;
5990 	uint16_t reformat_off = UINT16_MAX;
5991 	uint16_t mhdr_off = UINT16_MAX;
5992 	uint16_t recom_off = UINT16_MAX;
5993 	uint16_t cnt_off = UINT16_MAX;
5994 	enum mlx5dr_action_type recom_type = MLX5DR_ACTION_TYP_LAST;
5995 	int ret;
5996 
5997 	for (i = 0, curr_off = 0; at->actions[i].type != RTE_FLOW_ACTION_TYPE_END; ++i) {
5998 		const struct rte_flow_action_raw_encap *raw_encap_data;
5999 		size_t data_size;
6000 		enum mlx5dr_action_type type;
6001 
6002 		if (curr_off >= MLX5_HW_MAX_ACTS)
6003 			goto err_actions_num;
6004 		switch ((int)at->actions[i].type) {
6005 		case RTE_FLOW_ACTION_TYPE_VOID:
6006 			break;
6007 		case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
6008 			ret = flow_hw_template_actions_list(at, i, action_types,
6009 							    &curr_off, &cnt_off);
6010 			if (ret)
6011 				return NULL;
6012 			break;
6013 		case RTE_FLOW_ACTION_TYPE_INDIRECT:
6014 			ret = flow_hw_dr_actions_template_handle_shared
6015 				(at->masks[i].type, i, action_types,
6016 				 &curr_off, &cnt_off, at);
6017 			if (ret)
6018 				return NULL;
6019 			break;
6020 		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
6021 		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
6022 		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
6023 		case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
6024 			MLX5_ASSERT(reformat_off == UINT16_MAX);
6025 			reformat_off = curr_off++;
6026 			reformat_act_type = mlx5_hw_dr_action_types[at->actions[i].type];
6027 			break;
6028 		case RTE_FLOW_ACTION_TYPE_IPV6_EXT_PUSH:
6029 			MLX5_ASSERT(recom_off == UINT16_MAX);
6030 			recom_type = MLX5DR_ACTION_TYP_PUSH_IPV6_ROUTE_EXT;
6031 			recom_off = curr_off++;
6032 			break;
6033 		case RTE_FLOW_ACTION_TYPE_IPV6_EXT_REMOVE:
6034 			MLX5_ASSERT(recom_off == UINT16_MAX);
6035 			recom_type = MLX5DR_ACTION_TYP_POP_IPV6_ROUTE_EXT;
6036 			recom_off = curr_off++;
6037 			break;
6038 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
6039 			raw_encap_data = at->actions[i].conf;
6040 			data_size = raw_encap_data->size;
6041 			if (reformat_off != UINT16_MAX) {
6042 				reformat_act_type = data_size < MLX5_ENCAPSULATION_DECISION_SIZE ?
6043 					MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2 :
6044 					MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3;
6045 			} else {
6046 				reformat_off = curr_off++;
6047 				reformat_act_type = MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
6048 			}
6049 			break;
6050 		case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
6051 			reformat_off = curr_off++;
6052 			reformat_act_type = MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2;
6053 			break;
6054 		case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
6055 			if (mhdr_off == UINT16_MAX) {
6056 				mhdr_off = curr_off++;
6057 				type = mlx5_hw_dr_action_types[at->actions[i].type];
6058 				action_types[mhdr_off] = type;
6059 			}
6060 			break;
6061 		case RTE_FLOW_ACTION_TYPE_METER:
6062 			at->dr_off[i] = curr_off;
6063 			action_types[curr_off++] = MLX5DR_ACTION_TYP_ASO_METER;
6064 			if (curr_off >= MLX5_HW_MAX_ACTS)
6065 				goto err_actions_num;
6066 			action_types[curr_off++] = MLX5DR_ACTION_TYP_TBL;
6067 			break;
6068 		case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
6069 			type = mlx5_hw_dr_action_types[at->actions[i].type];
6070 			at->dr_off[i] = curr_off;
6071 			action_types[curr_off++] = type;
6072 			i += is_of_vlan_pcp_present(at->actions + i) ?
6073 				MLX5_HW_VLAN_PUSH_PCP_IDX :
6074 				MLX5_HW_VLAN_PUSH_VID_IDX;
6075 			break;
6076 		case RTE_FLOW_ACTION_TYPE_METER_MARK:
6077 			at->dr_off[i] = curr_off;
6078 			action_types[curr_off++] = MLX5DR_ACTION_TYP_ASO_METER;
6079 			if (curr_off >= MLX5_HW_MAX_ACTS)
6080 				goto err_actions_num;
6081 			break;
6082 		case RTE_FLOW_ACTION_TYPE_AGE:
6083 		case RTE_FLOW_ACTION_TYPE_COUNT:
6084 			/*
6085 			 * Both AGE and COUNT action need counter, the first
6086 			 * one fills the action_types array, and the second only
6087 			 * saves the offset.
6088 			 */
6089 			if (cnt_off == UINT16_MAX) {
6090 				cnt_off = curr_off++;
6091 				action_types[cnt_off] = MLX5DR_ACTION_TYP_CTR;
6092 			}
6093 			at->dr_off[i] = cnt_off;
6094 			break;
6095 		case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
6096 			at->dr_off[i] = curr_off;
6097 			action_types[curr_off++] = MLX5DR_ACTION_TYP_MISS;
6098 			break;
6099 		default:
6100 			type = mlx5_hw_dr_action_types[at->actions[i].type];
6101 			at->dr_off[i] = curr_off;
6102 			action_types[curr_off++] = type;
6103 			break;
6104 		}
6105 	}
6106 	if (curr_off >= MLX5_HW_MAX_ACTS)
6107 		goto err_actions_num;
6108 	if (mhdr_off != UINT16_MAX)
6109 		at->mhdr_off = mhdr_off;
6110 	if (reformat_off != UINT16_MAX) {
6111 		at->reformat_off = reformat_off;
6112 		action_types[reformat_off] = reformat_act_type;
6113 	}
6114 	if (recom_off != UINT16_MAX) {
6115 		at->recom_off = recom_off;
6116 		action_types[recom_off] = recom_type;
6117 	}
6118 	dr_template = mlx5dr_action_template_create(action_types);
6119 	if (dr_template) {
6120 		at->dr_actions_num = curr_off;
6121 	} else {
6122 		DRV_LOG(ERR, "Failed to create DR action template: %d", rte_errno);
6123 		return NULL;
6124 	}
6125 	/* Create srh flex parser for remove anchor. */
6126 	if ((recom_type == MLX5DR_ACTION_TYP_POP_IPV6_ROUTE_EXT ||
6127 	     recom_type == MLX5DR_ACTION_TYP_PUSH_IPV6_ROUTE_EXT) &&
6128 	    mlx5_alloc_srh_flex_parser(dev)) {
6129 		DRV_LOG(ERR, "Failed to create srv6 flex parser");
6130 		claim_zero(mlx5dr_action_template_destroy(dr_template));
6131 		return NULL;
6132 	}
6133 	return dr_template;
6134 err_actions_num:
6135 	DRV_LOG(ERR, "Number of HW actions (%u) exceeded maximum (%u) allowed in template",
6136 		curr_off, MLX5_HW_MAX_ACTS);
6137 	return NULL;
6138 }
6139 
6140 static void
6141 flow_hw_set_vlan_vid(struct rte_eth_dev *dev,
6142 		     struct rte_flow_action *ra,
6143 		     struct rte_flow_action *rm,
6144 		     struct rte_flow_action_modify_field *spec,
6145 		     struct rte_flow_action_modify_field *mask,
6146 		     int set_vlan_vid_ix)
6147 {
6148 	struct rte_flow_error error;
6149 	const bool masked = rm[set_vlan_vid_ix].conf &&
6150 		(((const struct rte_flow_action_of_set_vlan_vid *)
6151 			rm[set_vlan_vid_ix].conf)->vlan_vid != 0);
6152 	const struct rte_flow_action_of_set_vlan_vid *conf =
6153 		ra[set_vlan_vid_ix].conf;
6154 	rte_be16_t vid = masked ? conf->vlan_vid : 0;
6155 	int width = mlx5_flow_item_field_width(dev, RTE_FLOW_FIELD_VLAN_ID, 0,
6156 					       NULL, &error);
6157 	*spec = (typeof(*spec)) {
6158 		.operation = RTE_FLOW_MODIFY_SET,
6159 		.dst = {
6160 			.field = RTE_FLOW_FIELD_VLAN_ID,
6161 			.level = 0, .offset = 0,
6162 		},
6163 		.src = {
6164 			.field = RTE_FLOW_FIELD_VALUE,
6165 			.level = vid,
6166 			.offset = 0,
6167 		},
6168 		.width = width,
6169 	};
6170 	*mask = (typeof(*mask)) {
6171 		.operation = RTE_FLOW_MODIFY_SET,
6172 		.dst = {
6173 			.field = RTE_FLOW_FIELD_VLAN_ID,
6174 			.level = 0xff, .offset = 0xffffffff,
6175 		},
6176 		.src = {
6177 			.field = RTE_FLOW_FIELD_VALUE,
6178 			.level = masked ? (1U << width) - 1 : 0,
6179 			.offset = 0,
6180 		},
6181 		.width = 0xffffffff,
6182 	};
6183 	ra[set_vlan_vid_ix].type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD;
6184 	ra[set_vlan_vid_ix].conf = spec;
6185 	rm[set_vlan_vid_ix].type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD;
6186 	rm[set_vlan_vid_ix].conf = mask;
6187 }
6188 
6189 static __rte_always_inline int
6190 flow_hw_set_vlan_vid_construct(struct rte_eth_dev *dev,
6191 			       struct mlx5_hw_q_job *job,
6192 			       struct mlx5_action_construct_data *act_data,
6193 			       const struct mlx5_hw_actions *hw_acts,
6194 			       const struct rte_flow_action *action)
6195 {
6196 	struct rte_flow_error error;
6197 	rte_be16_t vid = ((const struct rte_flow_action_of_set_vlan_vid *)
6198 			   action->conf)->vlan_vid;
6199 	int width = mlx5_flow_item_field_width(dev, RTE_FLOW_FIELD_VLAN_ID, 0,
6200 					       NULL, &error);
6201 	struct rte_flow_action_modify_field conf = {
6202 		.operation = RTE_FLOW_MODIFY_SET,
6203 		.dst = {
6204 			.field = RTE_FLOW_FIELD_VLAN_ID,
6205 			.level = 0, .offset = 0,
6206 		},
6207 		.src = {
6208 			.field = RTE_FLOW_FIELD_VALUE,
6209 			.level = vid,
6210 			.offset = 0,
6211 		},
6212 		.width = width,
6213 	};
6214 	struct rte_flow_action modify_action = {
6215 		.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
6216 		.conf = &conf
6217 	};
6218 
6219 	return flow_hw_modify_field_construct(job, act_data, hw_acts,
6220 					      &modify_action);
6221 }
6222 
6223 static int
6224 flow_hw_flex_item_acquire(struct rte_eth_dev *dev,
6225 			  struct rte_flow_item_flex_handle *handle,
6226 			  uint8_t *flex_item)
6227 {
6228 	int index = mlx5_flex_acquire_index(dev, handle, false);
6229 
6230 	MLX5_ASSERT(index >= 0 && index < (int)(sizeof(uint32_t) * CHAR_BIT));
6231 	if (index < 0)
6232 		return -1;
6233 	if (!(*flex_item & RTE_BIT32(index))) {
6234 		/* Don't count same flex item again. */
6235 		if (mlx5_flex_acquire_index(dev, handle, true) != index)
6236 			MLX5_ASSERT(false);
6237 		*flex_item |= (uint8_t)RTE_BIT32(index);
6238 	}
6239 	return 0;
6240 }
6241 
6242 static void
6243 flow_hw_flex_item_release(struct rte_eth_dev *dev, uint8_t *flex_item)
6244 {
6245 	while (*flex_item) {
6246 		int index = rte_bsf32(*flex_item);
6247 
6248 		mlx5_flex_release_index(dev, index);
6249 		*flex_item &= ~(uint8_t)RTE_BIT32(index);
6250 	}
6251 }
6252 static __rte_always_inline void
6253 flow_hw_actions_template_replace_container(const
6254 					   struct rte_flow_action *actions,
6255 					   const
6256 					   struct rte_flow_action *masks,
6257 					   struct rte_flow_action *new_actions,
6258 					   struct rte_flow_action *new_masks,
6259 					   struct rte_flow_action **ra,
6260 					   struct rte_flow_action **rm,
6261 					   uint32_t act_num)
6262 {
6263 	memcpy(new_actions, actions, sizeof(actions[0]) * act_num);
6264 	memcpy(new_masks, masks, sizeof(masks[0]) * act_num);
6265 	*ra = (void *)(uintptr_t)new_actions;
6266 	*rm = (void *)(uintptr_t)new_masks;
6267 }
6268 
6269 /* Action template copies these actions in rte_flow_conv() */
6270 
6271 static const struct rte_flow_action rx_meta_copy_action =  {
6272 	.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
6273 	.conf = &(struct rte_flow_action_modify_field){
6274 		.operation = RTE_FLOW_MODIFY_SET,
6275 		.dst = {
6276 			.field = (enum rte_flow_field_id)
6277 				MLX5_RTE_FLOW_FIELD_META_REG,
6278 			.tag_index = REG_B,
6279 		},
6280 		.src = {
6281 			.field = (enum rte_flow_field_id)
6282 				MLX5_RTE_FLOW_FIELD_META_REG,
6283 			.tag_index = REG_C_1,
6284 		},
6285 		.width = 32,
6286 	}
6287 };
6288 
6289 static const struct rte_flow_action rx_meta_copy_mask = {
6290 	.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
6291 	.conf = &(struct rte_flow_action_modify_field){
6292 		.operation = RTE_FLOW_MODIFY_SET,
6293 		.dst = {
6294 			.field = (enum rte_flow_field_id)
6295 				MLX5_RTE_FLOW_FIELD_META_REG,
6296 			.level = UINT8_MAX,
6297 			.tag_index = UINT8_MAX,
6298 			.offset = UINT32_MAX,
6299 		},
6300 		.src = {
6301 			.field = (enum rte_flow_field_id)
6302 				MLX5_RTE_FLOW_FIELD_META_REG,
6303 			.level = UINT8_MAX,
6304 			.tag_index = UINT8_MAX,
6305 			.offset = UINT32_MAX,
6306 		},
6307 		.width = UINT32_MAX,
6308 	}
6309 };
6310 
6311 static const struct rte_flow_action quota_color_inc_action = {
6312 	.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
6313 	.conf = &(struct rte_flow_action_modify_field) {
6314 		.operation = RTE_FLOW_MODIFY_ADD,
6315 		.dst = {
6316 			.field = RTE_FLOW_FIELD_METER_COLOR,
6317 			.level = 0, .offset = 0
6318 		},
6319 		.src = {
6320 			.field = RTE_FLOW_FIELD_VALUE,
6321 			.level = 1,
6322 			.offset = 0,
6323 		},
6324 		.width = 2
6325 	}
6326 };
6327 
6328 static const struct rte_flow_action quota_color_inc_mask = {
6329 	.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
6330 	.conf = &(struct rte_flow_action_modify_field) {
6331 		.operation = RTE_FLOW_MODIFY_ADD,
6332 		.dst = {
6333 			.field = RTE_FLOW_FIELD_METER_COLOR,
6334 			.level = UINT8_MAX,
6335 			.tag_index = UINT8_MAX,
6336 			.offset = UINT32_MAX,
6337 		},
6338 		.src = {
6339 			.field = RTE_FLOW_FIELD_VALUE,
6340 			.level = 3,
6341 			.offset = 0
6342 		},
6343 		.width = UINT32_MAX
6344 	}
6345 };
6346 
6347 /**
6348  * Create flow action template.
6349  *
6350  * @param[in] dev
6351  *   Pointer to the rte_eth_dev structure.
6352  * @param[in] attr
6353  *   Pointer to the action template attributes.
6354  * @param[in] actions
6355  *   Associated actions (list terminated by the END action).
6356  * @param[in] masks
6357  *   List of actions that marks which of the action's member is constant.
6358  * @param[out] error
6359  *   Pointer to error structure.
6360  *
6361  * @return
6362  *   Action template pointer on success, NULL otherwise and rte_errno is set.
6363  */
6364 static struct rte_flow_actions_template *
6365 flow_hw_actions_template_create(struct rte_eth_dev *dev,
6366 			const struct rte_flow_actions_template_attr *attr,
6367 			const struct rte_flow_action actions[],
6368 			const struct rte_flow_action masks[],
6369 			struct rte_flow_error *error)
6370 {
6371 	struct mlx5_priv *priv = dev->data->dev_private;
6372 	int len, act_len, mask_len;
6373 	unsigned int act_num;
6374 	unsigned int i;
6375 	struct rte_flow_actions_template *at = NULL;
6376 	uint16_t pos = UINT16_MAX;
6377 	uint64_t action_flags = 0;
6378 	struct rte_flow_action tmp_action[MLX5_HW_MAX_ACTS];
6379 	struct rte_flow_action tmp_mask[MLX5_HW_MAX_ACTS];
6380 	struct rte_flow_action *ra = (void *)(uintptr_t)actions;
6381 	struct rte_flow_action *rm = (void *)(uintptr_t)masks;
6382 	int set_vlan_vid_ix = -1;
6383 	struct rte_flow_action_modify_field set_vlan_vid_spec = {0, };
6384 	struct rte_flow_action_modify_field set_vlan_vid_mask = {0, };
6385 	struct rte_flow_action mf_actions[MLX5_HW_MAX_ACTS];
6386 	struct rte_flow_action mf_masks[MLX5_HW_MAX_ACTS];
6387 	uint32_t expand_mf_num = 0;
6388 	uint16_t src_off[MLX5_HW_MAX_ACTS] = {0, };
6389 
6390 	if (mlx5_flow_hw_actions_validate(dev, attr, actions, masks,
6391 					  &action_flags, error))
6392 		return NULL;
6393 	for (i = 0; ra[i].type != RTE_FLOW_ACTION_TYPE_END; ++i) {
6394 		switch (ra[i].type) {
6395 		/* OF_PUSH_VLAN *MUST* come before OF_SET_VLAN_VID */
6396 		case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
6397 			i += is_of_vlan_pcp_present(ra + i) ?
6398 				MLX5_HW_VLAN_PUSH_PCP_IDX :
6399 				MLX5_HW_VLAN_PUSH_VID_IDX;
6400 			break;
6401 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
6402 			set_vlan_vid_ix = i;
6403 			break;
6404 		default:
6405 			break;
6406 		}
6407 	}
6408 	/*
6409 	 * Count flow actions to allocate required space for storing DR offsets and to check
6410 	 * if temporary buffer would not be overrun.
6411 	 */
6412 	act_num = i + 1;
6413 	if (act_num >= MLX5_HW_MAX_ACTS) {
6414 		rte_flow_error_set(error, EINVAL,
6415 				   RTE_FLOW_ERROR_TYPE_ACTION, NULL, "Too many actions");
6416 		return NULL;
6417 	}
6418 	if (set_vlan_vid_ix != -1) {
6419 		/* If temporary action buffer was not used, copy template actions to it */
6420 		if (ra == actions)
6421 			flow_hw_actions_template_replace_container(actions,
6422 								   masks,
6423 								   tmp_action,
6424 								   tmp_mask,
6425 								   &ra, &rm,
6426 								   act_num);
6427 		flow_hw_set_vlan_vid(dev, ra, rm,
6428 				     &set_vlan_vid_spec, &set_vlan_vid_mask,
6429 				     set_vlan_vid_ix);
6430 		action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
6431 	}
6432 	if (action_flags & MLX5_FLOW_ACTION_QUOTA) {
6433 		mf_actions[expand_mf_num] = quota_color_inc_action;
6434 		mf_masks[expand_mf_num] = quota_color_inc_mask;
6435 		expand_mf_num++;
6436 	}
6437 	if (priv->sh->config.dv_xmeta_en == MLX5_XMETA_MODE_META32_HWS &&
6438 	    priv->sh->config.dv_esw_en &&
6439 	    (action_flags & (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS))) {
6440 		/* Insert META copy */
6441 		mf_actions[expand_mf_num] = rx_meta_copy_action;
6442 		mf_masks[expand_mf_num] = rx_meta_copy_mask;
6443 		expand_mf_num++;
6444 	}
6445 	if (expand_mf_num) {
6446 		if (act_num + expand_mf_num > MLX5_HW_MAX_ACTS) {
6447 			rte_flow_error_set(error, E2BIG,
6448 					   RTE_FLOW_ERROR_TYPE_ACTION,
6449 					   NULL, "cannot expand: too many actions");
6450 			return NULL;
6451 		}
6452 		if (ra == actions)
6453 			flow_hw_actions_template_replace_container(actions,
6454 								   masks,
6455 								   tmp_action,
6456 								   tmp_mask,
6457 								   &ra, &rm,
6458 								   act_num);
6459 		/* Application should make sure only one Q/RSS exist in one rule. */
6460 		pos = flow_hw_template_expand_modify_field(ra, rm,
6461 							   mf_actions,
6462 							   mf_masks,
6463 							   action_flags,
6464 							   act_num,
6465 							   expand_mf_num);
6466 		act_num += expand_mf_num;
6467 		for (i = pos + expand_mf_num; i < act_num; i++)
6468 			src_off[i] += expand_mf_num;
6469 		action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
6470 	}
6471 	act_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, NULL, 0, ra, error);
6472 	if (act_len <= 0)
6473 		return NULL;
6474 	len = RTE_ALIGN(act_len, 16);
6475 	mask_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, NULL, 0, rm, error);
6476 	if (mask_len <= 0)
6477 		return NULL;
6478 	len += RTE_ALIGN(mask_len, 16);
6479 	len += RTE_ALIGN(act_num * sizeof(*at->dr_off), 16);
6480 	len += RTE_ALIGN(act_num * sizeof(*at->src_off), 16);
6481 	at = mlx5_malloc(MLX5_MEM_ZERO, len + sizeof(*at),
6482 			 RTE_CACHE_LINE_SIZE, rte_socket_id());
6483 	if (!at) {
6484 		rte_flow_error_set(error, ENOMEM,
6485 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6486 				   NULL,
6487 				   "cannot allocate action template");
6488 		return NULL;
6489 	}
6490 	/* Actions part is in the first part. */
6491 	at->attr = *attr;
6492 	at->actions = (struct rte_flow_action *)(at + 1);
6493 	act_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, at->actions,
6494 				len, ra, error);
6495 	if (act_len <= 0)
6496 		goto error;
6497 	/* Masks part is in the second part. */
6498 	at->masks = (struct rte_flow_action *)(((uint8_t *)at->actions) + act_len);
6499 	mask_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, at->masks,
6500 				 len - act_len, rm, error);
6501 	if (mask_len <= 0)
6502 		goto error;
6503 	/* DR actions offsets in the third part. */
6504 	at->dr_off = (uint16_t *)((uint8_t *)at->masks + mask_len);
6505 	at->src_off = RTE_PTR_ADD(at->dr_off,
6506 				  RTE_ALIGN(act_num * sizeof(*at->dr_off), 16));
6507 	memcpy(at->src_off, src_off, act_num * sizeof(at->src_off[0]));
6508 	at->actions_num = act_num;
6509 	for (i = 0; i < at->actions_num; ++i)
6510 		at->dr_off[i] = UINT16_MAX;
6511 	at->reformat_off = UINT16_MAX;
6512 	at->mhdr_off = UINT16_MAX;
6513 	at->recom_off = UINT16_MAX;
6514 	for (i = 0; actions->type != RTE_FLOW_ACTION_TYPE_END;
6515 	     actions++, masks++, i++) {
6516 		const struct rte_flow_action_modify_field *info;
6517 
6518 		switch (actions->type) {
6519 		/*
6520 		 * mlx5 PMD hacks indirect action index directly to the action conf.
6521 		 * The rte_flow_conv() function copies the content from conf pointer.
6522 		 * Need to restore the indirect action index from action conf here.
6523 		 */
6524 		case RTE_FLOW_ACTION_TYPE_INDIRECT:
6525 			at->actions[i].conf = ra[i].conf;
6526 			at->masks[i].conf = rm[i].conf;
6527 			break;
6528 		case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
6529 			info = actions->conf;
6530 			if ((info->dst.field == RTE_FLOW_FIELD_FLEX_ITEM &&
6531 			     flow_hw_flex_item_acquire(dev, info->dst.flex_handle,
6532 						       &at->flex_item)) ||
6533 			    (info->src.field == RTE_FLOW_FIELD_FLEX_ITEM &&
6534 			     flow_hw_flex_item_acquire(dev, info->src.flex_handle,
6535 						       &at->flex_item)))
6536 				goto error;
6537 			break;
6538 		default:
6539 			break;
6540 		}
6541 	}
6542 	at->tmpl = flow_hw_dr_actions_template_create(dev, at);
6543 	if (!at->tmpl)
6544 		goto error;
6545 	at->action_flags = action_flags;
6546 	__atomic_fetch_add(&at->refcnt, 1, __ATOMIC_RELAXED);
6547 	LIST_INSERT_HEAD(&priv->flow_hw_at, at, next);
6548 	return at;
6549 error:
6550 	if (at) {
6551 		if (at->tmpl)
6552 			mlx5dr_action_template_destroy(at->tmpl);
6553 		mlx5_free(at);
6554 	}
6555 	rte_flow_error_set(error, rte_errno,
6556 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6557 			   "Failed to create action template");
6558 	return NULL;
6559 }
6560 
6561 /**
6562  * Destroy flow action template.
6563  *
6564  * @param[in] dev
6565  *   Pointer to the rte_eth_dev structure.
6566  * @param[in] template
6567  *   Pointer to the action template to be destroyed.
6568  * @param[out] error
6569  *   Pointer to error structure.
6570  *
6571  * @return
6572  *   0 on success, a negative errno value otherwise and rte_errno is set.
6573  */
6574 static int
6575 flow_hw_actions_template_destroy(struct rte_eth_dev *dev,
6576 				 struct rte_flow_actions_template *template,
6577 				 struct rte_flow_error *error __rte_unused)
6578 {
6579 	uint64_t flag = MLX5_FLOW_ACTION_IPV6_ROUTING_REMOVE |
6580 			MLX5_FLOW_ACTION_IPV6_ROUTING_PUSH;
6581 
6582 	if (__atomic_load_n(&template->refcnt, __ATOMIC_RELAXED) > 1) {
6583 		DRV_LOG(WARNING, "Action template %p is still in use.",
6584 			(void *)template);
6585 		return rte_flow_error_set(error, EBUSY,
6586 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6587 				   NULL,
6588 				   "action template in using");
6589 	}
6590 	if (template->action_flags & flag)
6591 		mlx5_free_srh_flex_parser(dev);
6592 	LIST_REMOVE(template, next);
6593 	flow_hw_flex_item_release(dev, &template->flex_item);
6594 	if (template->tmpl)
6595 		mlx5dr_action_template_destroy(template->tmpl);
6596 	mlx5_free(template);
6597 	return 0;
6598 }
6599 
6600 static uint32_t
6601 flow_hw_count_items(const struct rte_flow_item *items)
6602 {
6603 	const struct rte_flow_item *curr_item;
6604 	uint32_t nb_items;
6605 
6606 	nb_items = 0;
6607 	for (curr_item = items; curr_item->type != RTE_FLOW_ITEM_TYPE_END; ++curr_item)
6608 		++nb_items;
6609 	return ++nb_items;
6610 }
6611 
6612 static struct rte_flow_item *
6613 flow_hw_prepend_item(const struct rte_flow_item *items,
6614 		     const uint32_t nb_items,
6615 		     const struct rte_flow_item *new_item,
6616 		     struct rte_flow_error *error)
6617 {
6618 	struct rte_flow_item *copied_items;
6619 	size_t size;
6620 
6621 	/* Allocate new array of items. */
6622 	size = sizeof(*copied_items) * (nb_items + 1);
6623 	copied_items = mlx5_malloc(MLX5_MEM_ZERO, size, 0, rte_socket_id());
6624 	if (!copied_items) {
6625 		rte_flow_error_set(error, ENOMEM,
6626 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6627 				   NULL,
6628 				   "cannot allocate item template");
6629 		return NULL;
6630 	}
6631 	/* Put new item at the beginning and copy the rest. */
6632 	copied_items[0] = *new_item;
6633 	rte_memcpy(&copied_items[1], items, sizeof(*items) * nb_items);
6634 	return copied_items;
6635 }
6636 
6637 static int
6638 flow_hw_pattern_validate(struct rte_eth_dev *dev,
6639 			 const struct rte_flow_pattern_template_attr *attr,
6640 			 const struct rte_flow_item items[],
6641 			 struct rte_flow_error *error)
6642 {
6643 	struct mlx5_priv *priv = dev->data->dev_private;
6644 	int i, tag_idx;
6645 	bool items_end = false;
6646 	uint32_t tag_bitmap = 0;
6647 
6648 	if (!attr->ingress && !attr->egress && !attr->transfer)
6649 		return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR, NULL,
6650 					  "at least one of the direction attributes"
6651 					  " must be specified");
6652 	if (priv->sh->config.dv_esw_en) {
6653 		MLX5_ASSERT(priv->master || priv->representor);
6654 		if (priv->master) {
6655 			if ((attr->ingress && attr->egress) ||
6656 			    (attr->ingress && attr->transfer) ||
6657 			    (attr->egress && attr->transfer))
6658 				return rte_flow_error_set(error, EINVAL,
6659 							  RTE_FLOW_ERROR_TYPE_ATTR, NULL,
6660 							  "only one direction attribute at once"
6661 							  " can be used on transfer proxy port");
6662 		} else {
6663 			if (attr->transfer)
6664 				return rte_flow_error_set(error, EINVAL,
6665 							  RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, NULL,
6666 							  "transfer attribute cannot be used with"
6667 							  " port representors");
6668 			if (attr->ingress && attr->egress)
6669 				return rte_flow_error_set(error, EINVAL,
6670 							  RTE_FLOW_ERROR_TYPE_ATTR, NULL,
6671 							  "ingress and egress direction attributes"
6672 							  " cannot be used at the same time on"
6673 							  " port representors");
6674 		}
6675 	} else {
6676 		if (attr->transfer)
6677 			return rte_flow_error_set(error, EINVAL,
6678 						  RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, NULL,
6679 						  "transfer attribute cannot be used when"
6680 						  " E-Switch is disabled");
6681 	}
6682 	for (i = 0; !items_end; i++) {
6683 		int type = items[i].type;
6684 
6685 		switch (type) {
6686 		case RTE_FLOW_ITEM_TYPE_TAG:
6687 		{
6688 			const struct rte_flow_item_tag *tag =
6689 				(const struct rte_flow_item_tag *)items[i].spec;
6690 
6691 			if (tag == NULL)
6692 				return rte_flow_error_set(error, EINVAL,
6693 							  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6694 							  NULL,
6695 							  "Tag spec is NULL");
6696 			if (tag->index >= MLX5_FLOW_HW_TAGS_MAX &&
6697 			    tag->index != RTE_PMD_MLX5_LINEAR_HASH_TAG_INDEX)
6698 				return rte_flow_error_set(error, EINVAL,
6699 							  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6700 							  NULL,
6701 							  "Invalid tag index");
6702 			tag_idx = flow_hw_get_reg_id(dev, RTE_FLOW_ITEM_TYPE_TAG, tag->index);
6703 			if (tag_idx == REG_NON)
6704 				return rte_flow_error_set(error, EINVAL,
6705 							  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6706 							  NULL,
6707 							  "Unsupported tag index");
6708 			if (tag_bitmap & (1 << tag_idx))
6709 				return rte_flow_error_set(error, EINVAL,
6710 							  RTE_FLOW_ERROR_TYPE_ITEM,
6711 							  NULL,
6712 							  "Duplicated tag index");
6713 			tag_bitmap |= 1 << tag_idx;
6714 			break;
6715 		}
6716 		case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
6717 		{
6718 			const struct rte_flow_item_tag *tag =
6719 				(const struct rte_flow_item_tag *)items[i].spec;
6720 			uint16_t regcs = (uint8_t)priv->sh->cdev->config.hca_attr.set_reg_c;
6721 
6722 			if (!((1 << (tag->index - REG_C_0)) & regcs))
6723 				return rte_flow_error_set(error, EINVAL,
6724 							  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6725 							  NULL,
6726 							  "Unsupported internal tag index");
6727 			if (tag_bitmap & (1 << tag->index))
6728 				return rte_flow_error_set(error, EINVAL,
6729 							  RTE_FLOW_ERROR_TYPE_ITEM,
6730 							  NULL,
6731 							  "Duplicated tag index");
6732 			tag_bitmap |= 1 << tag->index;
6733 			break;
6734 		}
6735 		case RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT:
6736 			if (attr->ingress && priv->sh->config.repr_matching)
6737 				return rte_flow_error_set(error, EINVAL,
6738 						  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
6739 						  "represented port item cannot be used"
6740 						  " when ingress attribute is set");
6741 			if (attr->egress)
6742 				return rte_flow_error_set(error, EINVAL,
6743 						  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
6744 						  "represented port item cannot be used"
6745 						  " when egress attribute is set");
6746 			break;
6747 		case RTE_FLOW_ITEM_TYPE_META:
6748 			if (!priv->sh->config.dv_esw_en ||
6749 			    priv->sh->config.dv_xmeta_en != MLX5_XMETA_MODE_META32_HWS) {
6750 				if (attr->ingress)
6751 					return rte_flow_error_set(error, EINVAL,
6752 								  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
6753 								  "META item is not supported"
6754 								  " on current FW with ingress"
6755 								  " attribute");
6756 			}
6757 			break;
6758 		case RTE_FLOW_ITEM_TYPE_METER_COLOR:
6759 		{
6760 			int reg = flow_hw_get_reg_id(dev,
6761 						     RTE_FLOW_ITEM_TYPE_METER_COLOR,
6762 						     0);
6763 			if (reg == REG_NON)
6764 				return rte_flow_error_set(error, EINVAL,
6765 							  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6766 							  NULL,
6767 							  "Unsupported meter color register");
6768 			break;
6769 		}
6770 		case RTE_FLOW_ITEM_TYPE_AGGR_AFFINITY:
6771 		{
6772 			if (!priv->sh->lag_rx_port_affinity_en)
6773 				return rte_flow_error_set(error, EINVAL,
6774 							  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
6775 							  "Unsupported aggregated affinity with Older FW");
6776 			if ((attr->transfer && priv->fdb_def_rule) || attr->egress)
6777 				return rte_flow_error_set(error, EINVAL,
6778 							  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
6779 							  "Aggregated affinity item not supported"
6780 							  " with egress or transfer"
6781 							  " attribute");
6782 			break;
6783 		}
6784 		case RTE_FLOW_ITEM_TYPE_VOID:
6785 		case RTE_FLOW_ITEM_TYPE_ETH:
6786 		case RTE_FLOW_ITEM_TYPE_VLAN:
6787 		case RTE_FLOW_ITEM_TYPE_IPV4:
6788 		case RTE_FLOW_ITEM_TYPE_IPV6:
6789 		case RTE_FLOW_ITEM_TYPE_UDP:
6790 		case RTE_FLOW_ITEM_TYPE_TCP:
6791 		case RTE_FLOW_ITEM_TYPE_GTP:
6792 		case RTE_FLOW_ITEM_TYPE_GTP_PSC:
6793 		case RTE_FLOW_ITEM_TYPE_VXLAN:
6794 		case RTE_FLOW_ITEM_TYPE_MPLS:
6795 		case MLX5_RTE_FLOW_ITEM_TYPE_SQ:
6796 		case RTE_FLOW_ITEM_TYPE_GRE:
6797 		case RTE_FLOW_ITEM_TYPE_GRE_KEY:
6798 		case RTE_FLOW_ITEM_TYPE_GRE_OPTION:
6799 		case RTE_FLOW_ITEM_TYPE_ICMP:
6800 		case RTE_FLOW_ITEM_TYPE_ICMP6:
6801 		case RTE_FLOW_ITEM_TYPE_ICMP6_ECHO_REQUEST:
6802 		case RTE_FLOW_ITEM_TYPE_QUOTA:
6803 		case RTE_FLOW_ITEM_TYPE_ICMP6_ECHO_REPLY:
6804 		case RTE_FLOW_ITEM_TYPE_CONNTRACK:
6805 		case RTE_FLOW_ITEM_TYPE_IPV6_ROUTING_EXT:
6806 		case RTE_FLOW_ITEM_TYPE_ESP:
6807 		case RTE_FLOW_ITEM_TYPE_FLEX:
6808 		case RTE_FLOW_ITEM_TYPE_IB_BTH:
6809 		case RTE_FLOW_ITEM_TYPE_PTYPE:
6810 			break;
6811 		case RTE_FLOW_ITEM_TYPE_INTEGRITY:
6812 			/*
6813 			 * Integrity flow item validation require access to
6814 			 * both item mask and spec.
6815 			 * Current HWS model allows item mask in pattern
6816 			 * template and item spec in flow rule.
6817 			 */
6818 			break;
6819 		case RTE_FLOW_ITEM_TYPE_END:
6820 			items_end = true;
6821 			break;
6822 		default:
6823 			return rte_flow_error_set(error, EINVAL,
6824 						  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6825 						  NULL,
6826 						  "Unsupported item type");
6827 		}
6828 	}
6829 	return 0;
6830 }
6831 
6832 static bool
6833 flow_hw_pattern_has_sq_match(const struct rte_flow_item *items)
6834 {
6835 	unsigned int i;
6836 
6837 	for (i = 0; items[i].type != RTE_FLOW_ITEM_TYPE_END; ++i)
6838 		if (items[i].type == (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_SQ)
6839 			return true;
6840 	return false;
6841 }
6842 
6843 /**
6844  * Create flow item template.
6845  *
6846  * @param[in] dev
6847  *   Pointer to the rte_eth_dev structure.
6848  * @param[in] attr
6849  *   Pointer to the item template attributes.
6850  * @param[in] items
6851  *   The template item pattern.
6852  * @param[out] error
6853  *   Pointer to error structure.
6854  *
6855  * @return
6856  *  Item template pointer on success, NULL otherwise and rte_errno is set.
6857  */
6858 static struct rte_flow_pattern_template *
6859 flow_hw_pattern_template_create(struct rte_eth_dev *dev,
6860 			     const struct rte_flow_pattern_template_attr *attr,
6861 			     const struct rte_flow_item items[],
6862 			     struct rte_flow_error *error)
6863 {
6864 	struct mlx5_priv *priv = dev->data->dev_private;
6865 	struct rte_flow_pattern_template *it;
6866 	struct rte_flow_item *copied_items = NULL;
6867 	const struct rte_flow_item *tmpl_items;
6868 	uint32_t orig_item_nb;
6869 	struct rte_flow_item port = {
6870 		.type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,
6871 		.mask = &rte_flow_item_ethdev_mask,
6872 	};
6873 	struct rte_flow_item_tag tag_v = {
6874 		.data = 0,
6875 		.index = REG_C_0,
6876 	};
6877 	struct rte_flow_item_tag tag_m = {
6878 		.data = flow_hw_tx_tag_regc_mask(dev),
6879 		.index = 0xff,
6880 	};
6881 	struct rte_flow_item tag = {
6882 		.type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_TAG,
6883 		.spec = &tag_v,
6884 		.mask = &tag_m,
6885 		.last = NULL
6886 	};
6887 	unsigned int i = 0;
6888 
6889 	if (flow_hw_pattern_validate(dev, attr, items, error))
6890 		return NULL;
6891 	orig_item_nb = flow_hw_count_items(items);
6892 	if (priv->sh->config.dv_esw_en &&
6893 	    priv->sh->config.repr_matching &&
6894 	    attr->ingress && !attr->egress && !attr->transfer) {
6895 		copied_items = flow_hw_prepend_item(items, orig_item_nb, &port, error);
6896 		if (!copied_items)
6897 			return NULL;
6898 		tmpl_items = copied_items;
6899 	} else if (priv->sh->config.dv_esw_en &&
6900 		   priv->sh->config.repr_matching &&
6901 		   !attr->ingress && attr->egress && !attr->transfer) {
6902 		if (flow_hw_pattern_has_sq_match(items)) {
6903 			DRV_LOG(DEBUG, "Port %u omitting implicit REG_C_0 match for egress "
6904 				       "pattern template", dev->data->port_id);
6905 			tmpl_items = items;
6906 			goto setup_pattern_template;
6907 		}
6908 		copied_items = flow_hw_prepend_item(items, orig_item_nb, &tag, error);
6909 		if (!copied_items)
6910 			return NULL;
6911 		tmpl_items = copied_items;
6912 	} else {
6913 		tmpl_items = items;
6914 	}
6915 setup_pattern_template:
6916 	it = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*it), 0, rte_socket_id());
6917 	if (!it) {
6918 		if (copied_items)
6919 			mlx5_free(copied_items);
6920 		rte_flow_error_set(error, ENOMEM,
6921 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6922 				   NULL,
6923 				   "cannot allocate item template");
6924 		return NULL;
6925 	}
6926 	it->attr = *attr;
6927 	it->orig_item_nb = orig_item_nb;
6928 	it->mt = mlx5dr_match_template_create(tmpl_items, attr->relaxed_matching);
6929 	if (!it->mt) {
6930 		if (copied_items)
6931 			mlx5_free(copied_items);
6932 		mlx5_free(it);
6933 		rte_flow_error_set(error, rte_errno,
6934 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6935 				   NULL,
6936 				   "cannot create match template");
6937 		return NULL;
6938 	}
6939 	it->item_flags = flow_hw_matching_item_flags_get(tmpl_items);
6940 	if (copied_items) {
6941 		if (attr->ingress)
6942 			it->implicit_port = true;
6943 		else if (attr->egress)
6944 			it->implicit_tag = true;
6945 		mlx5_free(copied_items);
6946 	}
6947 	/* Either inner or outer, can't both. */
6948 	if (it->item_flags & (MLX5_FLOW_ITEM_OUTER_IPV6_ROUTING_EXT |
6949 			      MLX5_FLOW_ITEM_INNER_IPV6_ROUTING_EXT)) {
6950 		if (((it->item_flags & MLX5_FLOW_ITEM_OUTER_IPV6_ROUTING_EXT) &&
6951 		     (it->item_flags & MLX5_FLOW_ITEM_INNER_IPV6_ROUTING_EXT)) ||
6952 		    (mlx5_alloc_srh_flex_parser(dev))) {
6953 			claim_zero(mlx5dr_match_template_destroy(it->mt));
6954 			mlx5_free(it);
6955 			rte_flow_error_set(error, rte_errno,
6956 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6957 					   "cannot create IPv6 routing extension support");
6958 			return NULL;
6959 		}
6960 	}
6961 	for (i = 0; items[i].type != RTE_FLOW_ITEM_TYPE_END; ++i) {
6962 		if (items[i].type == RTE_FLOW_ITEM_TYPE_FLEX) {
6963 			const struct rte_flow_item_flex *spec =
6964 				(const struct rte_flow_item_flex *)items[i].spec;
6965 			struct rte_flow_item_flex_handle *handle = spec->handle;
6966 
6967 			if (flow_hw_flex_item_acquire(dev, handle, &it->flex_item)) {
6968 				claim_zero(mlx5dr_match_template_destroy(it->mt));
6969 				mlx5_free(it);
6970 				rte_flow_error_set(error, rte_errno,
6971 						   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6972 						   "Failed to acquire flex item");
6973 				return NULL;
6974 			}
6975 		}
6976 	}
6977 	__atomic_fetch_add(&it->refcnt, 1, __ATOMIC_RELAXED);
6978 	LIST_INSERT_HEAD(&priv->flow_hw_itt, it, next);
6979 	return it;
6980 }
6981 
6982 /**
6983  * Destroy flow item template.
6984  *
6985  * @param[in] dev
6986  *   Pointer to the rte_eth_dev structure.
6987  * @param[in] template
6988  *   Pointer to the item template to be destroyed.
6989  * @param[out] error
6990  *   Pointer to error structure.
6991  *
6992  * @return
6993  *   0 on success, a negative errno value otherwise and rte_errno is set.
6994  */
6995 static int
6996 flow_hw_pattern_template_destroy(struct rte_eth_dev *dev,
6997 			      struct rte_flow_pattern_template *template,
6998 			      struct rte_flow_error *error __rte_unused)
6999 {
7000 	if (__atomic_load_n(&template->refcnt, __ATOMIC_RELAXED) > 1) {
7001 		DRV_LOG(WARNING, "Item template %p is still in use.",
7002 			(void *)template);
7003 		return rte_flow_error_set(error, EBUSY,
7004 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7005 				   NULL,
7006 				   "item template in using");
7007 	}
7008 	if (template->item_flags & (MLX5_FLOW_ITEM_OUTER_IPV6_ROUTING_EXT |
7009 				    MLX5_FLOW_ITEM_INNER_IPV6_ROUTING_EXT))
7010 		mlx5_free_srh_flex_parser(dev);
7011 	LIST_REMOVE(template, next);
7012 	flow_hw_flex_item_release(dev, &template->flex_item);
7013 	claim_zero(mlx5dr_match_template_destroy(template->mt));
7014 	mlx5_free(template);
7015 	return 0;
7016 }
7017 
7018 /*
7019  * Get information about HWS pre-configurable resources.
7020  *
7021  * @param[in] dev
7022  *   Pointer to the rte_eth_dev structure.
7023  * @param[out] port_info
7024  *   Pointer to port information.
7025  * @param[out] queue_info
7026  *   Pointer to queue information.
7027  * @param[out] error
7028  *   Pointer to error structure.
7029  *
7030  * @return
7031  *   0 on success, a negative errno value otherwise and rte_errno is set.
7032  */
7033 static int
7034 flow_hw_info_get(struct rte_eth_dev *dev,
7035 		 struct rte_flow_port_info *port_info,
7036 		 struct rte_flow_queue_info *queue_info,
7037 		 struct rte_flow_error *error __rte_unused)
7038 {
7039 	struct mlx5_priv *priv = dev->data->dev_private;
7040 	uint16_t port_id = dev->data->port_id;
7041 	struct rte_mtr_capabilities mtr_cap;
7042 	int ret;
7043 
7044 	memset(port_info, 0, sizeof(*port_info));
7045 	/* Queue size is unlimited from low-level. */
7046 	port_info->max_nb_queues = UINT32_MAX;
7047 	queue_info->max_size = UINT32_MAX;
7048 
7049 	memset(&mtr_cap, 0, sizeof(struct rte_mtr_capabilities));
7050 	ret = rte_mtr_capabilities_get(port_id, &mtr_cap, NULL);
7051 	if (!ret)
7052 		port_info->max_nb_meters = mtr_cap.n_max;
7053 	port_info->max_nb_counters = priv->sh->hws_max_nb_counters;
7054 	port_info->max_nb_aging_objects = port_info->max_nb_counters;
7055 	return 0;
7056 }
7057 
7058 /**
7059  * Create group callback.
7060  *
7061  * @param[in] tool_ctx
7062  *   Pointer to the hash list related context.
7063  * @param[in] cb_ctx
7064  *   Pointer to the group creation context.
7065  *
7066  * @return
7067  *   Group entry on success, NULL otherwise and rte_errno is set.
7068  */
7069 struct mlx5_list_entry *
7070 flow_hw_grp_create_cb(void *tool_ctx, void *cb_ctx)
7071 {
7072 	struct mlx5_dev_ctx_shared *sh = tool_ctx;
7073 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
7074 	struct rte_eth_dev *dev = ctx->dev;
7075 	struct rte_flow_attr *attr = (struct rte_flow_attr *)ctx->data;
7076 	struct mlx5_priv *priv = dev->data->dev_private;
7077 	struct mlx5dr_table_attr dr_tbl_attr = {0};
7078 	struct rte_flow_error *error = ctx->error;
7079 	struct mlx5_flow_group *grp_data;
7080 	struct mlx5dr_table *tbl = NULL;
7081 	struct mlx5dr_action *jump;
7082 	uint32_t idx = 0;
7083 
7084 	grp_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_HW_GRP], &idx);
7085 	if (!grp_data) {
7086 		rte_flow_error_set(error, ENOMEM,
7087 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7088 				   NULL,
7089 				   "cannot allocate flow table data entry");
7090 		return NULL;
7091 	}
7092 	dr_tbl_attr.level = attr->group;
7093 	if (attr->transfer)
7094 		dr_tbl_attr.type = MLX5DR_TABLE_TYPE_FDB;
7095 	else if (attr->egress)
7096 		dr_tbl_attr.type = MLX5DR_TABLE_TYPE_NIC_TX;
7097 	else
7098 		dr_tbl_attr.type = MLX5DR_TABLE_TYPE_NIC_RX;
7099 	tbl = mlx5dr_table_create(priv->dr_ctx, &dr_tbl_attr);
7100 	if (!tbl)
7101 		goto error;
7102 	grp_data->tbl = tbl;
7103 	if (attr->group) {
7104 		/* Jump action be used by non-root table. */
7105 		jump = mlx5dr_action_create_dest_table
7106 			(priv->dr_ctx, tbl,
7107 			 mlx5_hw_act_flag[!!attr->group][dr_tbl_attr.type]);
7108 		if (!jump)
7109 			goto error;
7110 		grp_data->jump.hws_action = jump;
7111 		/* Jump action be used by root table.  */
7112 		jump = mlx5dr_action_create_dest_table
7113 			(priv->dr_ctx, tbl,
7114 			 mlx5_hw_act_flag[MLX5_HW_ACTION_FLAG_ROOT]
7115 					 [dr_tbl_attr.type]);
7116 		if (!jump)
7117 			goto error;
7118 		grp_data->jump.root_action = jump;
7119 	}
7120 	grp_data->dev = dev;
7121 	grp_data->idx = idx;
7122 	grp_data->group_id = attr->group;
7123 	grp_data->type = dr_tbl_attr.type;
7124 	return &grp_data->entry;
7125 error:
7126 	if (grp_data->jump.root_action)
7127 		mlx5dr_action_destroy(grp_data->jump.root_action);
7128 	if (grp_data->jump.hws_action)
7129 		mlx5dr_action_destroy(grp_data->jump.hws_action);
7130 	if (tbl)
7131 		mlx5dr_table_destroy(tbl);
7132 	if (idx)
7133 		mlx5_ipool_free(sh->ipool[MLX5_IPOOL_HW_GRP], idx);
7134 	rte_flow_error_set(error, ENOMEM,
7135 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7136 			   NULL,
7137 			   "cannot allocate flow dr table");
7138 	return NULL;
7139 }
7140 
7141 /**
7142  * Remove group callback.
7143  *
7144  * @param[in] tool_ctx
7145  *   Pointer to the hash list related context.
7146  * @param[in] entry
7147  *   Pointer to the entry to be removed.
7148  */
7149 void
7150 flow_hw_grp_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
7151 {
7152 	struct mlx5_dev_ctx_shared *sh = tool_ctx;
7153 	struct mlx5_flow_group *grp_data =
7154 		    container_of(entry, struct mlx5_flow_group, entry);
7155 
7156 	MLX5_ASSERT(entry && sh);
7157 	/* To use the wrapper glue functions instead. */
7158 	if (grp_data->jump.hws_action)
7159 		mlx5dr_action_destroy(grp_data->jump.hws_action);
7160 	if (grp_data->jump.root_action)
7161 		mlx5dr_action_destroy(grp_data->jump.root_action);
7162 	mlx5dr_table_destroy(grp_data->tbl);
7163 	mlx5_ipool_free(sh->ipool[MLX5_IPOOL_HW_GRP], grp_data->idx);
7164 }
7165 
7166 /**
7167  * Match group callback.
7168  *
7169  * @param[in] tool_ctx
7170  *   Pointer to the hash list related context.
7171  * @param[in] entry
7172  *   Pointer to the group to be matched.
7173  * @param[in] cb_ctx
7174  *   Pointer to the group matching context.
7175  *
7176  * @return
7177  *   0 on matched, 1 on miss matched.
7178  */
7179 int
7180 flow_hw_grp_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry,
7181 		     void *cb_ctx)
7182 {
7183 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
7184 	struct mlx5_flow_group *grp_data =
7185 		container_of(entry, struct mlx5_flow_group, entry);
7186 	struct rte_flow_attr *attr =
7187 			(struct rte_flow_attr *)ctx->data;
7188 
7189 	return (grp_data->dev != ctx->dev) ||
7190 		(grp_data->group_id != attr->group) ||
7191 		((grp_data->type != MLX5DR_TABLE_TYPE_FDB) &&
7192 		attr->transfer) ||
7193 		((grp_data->type != MLX5DR_TABLE_TYPE_NIC_TX) &&
7194 		attr->egress) ||
7195 		((grp_data->type != MLX5DR_TABLE_TYPE_NIC_RX) &&
7196 		attr->ingress);
7197 }
7198 
7199 /**
7200  * Clone group entry callback.
7201  *
7202  * @param[in] tool_ctx
7203  *   Pointer to the hash list related context.
7204  * @param[in] entry
7205  *   Pointer to the group to be matched.
7206  * @param[in] cb_ctx
7207  *   Pointer to the group matching context.
7208  *
7209  * @return
7210  *   0 on matched, 1 on miss matched.
7211  */
7212 struct mlx5_list_entry *
7213 flow_hw_grp_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
7214 		     void *cb_ctx)
7215 {
7216 	struct mlx5_dev_ctx_shared *sh = tool_ctx;
7217 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
7218 	struct mlx5_flow_group *grp_data;
7219 	struct rte_flow_error *error = ctx->error;
7220 	uint32_t idx = 0;
7221 
7222 	grp_data = mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_HW_GRP], &idx);
7223 	if (!grp_data) {
7224 		rte_flow_error_set(error, ENOMEM,
7225 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7226 				   NULL,
7227 				   "cannot allocate flow table data entry");
7228 		return NULL;
7229 	}
7230 	memcpy(grp_data, oentry, sizeof(*grp_data));
7231 	grp_data->idx = idx;
7232 	return &grp_data->entry;
7233 }
7234 
7235 /**
7236  * Free cloned group entry callback.
7237  *
7238  * @param[in] tool_ctx
7239  *   Pointer to the hash list related context.
7240  * @param[in] entry
7241  *   Pointer to the group to be freed.
7242  */
7243 void
7244 flow_hw_grp_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
7245 {
7246 	struct mlx5_dev_ctx_shared *sh = tool_ctx;
7247 	struct mlx5_flow_group *grp_data =
7248 		    container_of(entry, struct mlx5_flow_group, entry);
7249 
7250 	mlx5_ipool_free(sh->ipool[MLX5_IPOOL_HW_GRP], grp_data->idx);
7251 }
7252 
7253 /**
7254  * Create and cache a vport action for given @p dev port. vport actions
7255  * cache is used in HWS with FDB flows.
7256  *
7257  * This function does not create any function if proxy port for @p dev port
7258  * was not configured for HW Steering.
7259  *
7260  * This function assumes that E-Switch is enabled and PMD is running with
7261  * HW Steering configured.
7262  *
7263  * @param dev
7264  *   Pointer to Ethernet device which will be the action destination.
7265  *
7266  * @return
7267  *   0 on success, positive value otherwise.
7268  */
7269 int
7270 flow_hw_create_vport_action(struct rte_eth_dev *dev)
7271 {
7272 	struct mlx5_priv *priv = dev->data->dev_private;
7273 	struct rte_eth_dev *proxy_dev;
7274 	struct mlx5_priv *proxy_priv;
7275 	uint16_t port_id = dev->data->port_id;
7276 	uint16_t proxy_port_id = port_id;
7277 	int ret;
7278 
7279 	ret = mlx5_flow_pick_transfer_proxy(dev, &proxy_port_id, NULL);
7280 	if (ret)
7281 		return ret;
7282 	proxy_dev = &rte_eth_devices[proxy_port_id];
7283 	proxy_priv = proxy_dev->data->dev_private;
7284 	if (!proxy_priv->hw_vport)
7285 		return 0;
7286 	if (proxy_priv->hw_vport[port_id]) {
7287 		DRV_LOG(ERR, "port %u HWS vport action already created",
7288 			port_id);
7289 		return -EINVAL;
7290 	}
7291 	proxy_priv->hw_vport[port_id] = mlx5dr_action_create_dest_vport
7292 			(proxy_priv->dr_ctx, priv->dev_port,
7293 			 MLX5DR_ACTION_FLAG_HWS_FDB);
7294 	if (!proxy_priv->hw_vport[port_id]) {
7295 		DRV_LOG(ERR, "port %u unable to create HWS vport action",
7296 			port_id);
7297 		return -EINVAL;
7298 	}
7299 	return 0;
7300 }
7301 
7302 /**
7303  * Destroys the vport action associated with @p dev device
7304  * from actions' cache.
7305  *
7306  * This function does not destroy any action if there is no action cached
7307  * for @p dev or proxy port was not configured for HW Steering.
7308  *
7309  * This function assumes that E-Switch is enabled and PMD is running with
7310  * HW Steering configured.
7311  *
7312  * @param dev
7313  *   Pointer to Ethernet device which will be the action destination.
7314  */
7315 void
7316 flow_hw_destroy_vport_action(struct rte_eth_dev *dev)
7317 {
7318 	struct rte_eth_dev *proxy_dev;
7319 	struct mlx5_priv *proxy_priv;
7320 	uint16_t port_id = dev->data->port_id;
7321 	uint16_t proxy_port_id = port_id;
7322 
7323 	if (mlx5_flow_pick_transfer_proxy(dev, &proxy_port_id, NULL))
7324 		return;
7325 	proxy_dev = &rte_eth_devices[proxy_port_id];
7326 	proxy_priv = proxy_dev->data->dev_private;
7327 	if (!proxy_priv->hw_vport || !proxy_priv->hw_vport[port_id])
7328 		return;
7329 	mlx5dr_action_destroy(proxy_priv->hw_vport[port_id]);
7330 	proxy_priv->hw_vport[port_id] = NULL;
7331 }
7332 
7333 static int
7334 flow_hw_create_vport_actions(struct mlx5_priv *priv)
7335 {
7336 	uint16_t port_id;
7337 
7338 	MLX5_ASSERT(!priv->hw_vport);
7339 	priv->hw_vport = mlx5_malloc(MLX5_MEM_ZERO,
7340 				     sizeof(*priv->hw_vport) * RTE_MAX_ETHPORTS,
7341 				     0, SOCKET_ID_ANY);
7342 	if (!priv->hw_vport)
7343 		return -ENOMEM;
7344 	DRV_LOG(DEBUG, "port %u :: creating vport actions", priv->dev_data->port_id);
7345 	DRV_LOG(DEBUG, "port %u ::    domain_id=%u", priv->dev_data->port_id, priv->domain_id);
7346 	MLX5_ETH_FOREACH_DEV(port_id, NULL) {
7347 		struct mlx5_priv *port_priv = rte_eth_devices[port_id].data->dev_private;
7348 
7349 		if (!port_priv ||
7350 		    port_priv->domain_id != priv->domain_id)
7351 			continue;
7352 		DRV_LOG(DEBUG, "port %u :: for port_id=%u, calling mlx5dr_action_create_dest_vport() with ibport=%u",
7353 			priv->dev_data->port_id, port_id, port_priv->dev_port);
7354 		priv->hw_vport[port_id] = mlx5dr_action_create_dest_vport
7355 				(priv->dr_ctx, port_priv->dev_port,
7356 				 MLX5DR_ACTION_FLAG_HWS_FDB);
7357 		DRV_LOG(DEBUG, "port %u :: priv->hw_vport[%u]=%p",
7358 			priv->dev_data->port_id, port_id, (void *)priv->hw_vport[port_id]);
7359 		if (!priv->hw_vport[port_id])
7360 			return -EINVAL;
7361 	}
7362 	return 0;
7363 }
7364 
7365 static void
7366 flow_hw_free_vport_actions(struct mlx5_priv *priv)
7367 {
7368 	uint16_t port_id;
7369 
7370 	if (!priv->hw_vport)
7371 		return;
7372 	for (port_id = 0; port_id < RTE_MAX_ETHPORTS; ++port_id)
7373 		if (priv->hw_vport[port_id])
7374 			mlx5dr_action_destroy(priv->hw_vport[port_id]);
7375 	mlx5_free(priv->hw_vport);
7376 	priv->hw_vport = NULL;
7377 }
7378 
7379 static void
7380 flow_hw_create_send_to_kernel_actions(struct mlx5_priv *priv __rte_unused)
7381 {
7382 #ifdef HAVE_MLX5DV_DR_ACTION_CREATE_DEST_ROOT_TABLE
7383 	int action_flag;
7384 	int i;
7385 	bool is_vf_sf_dev = priv->sh->dev_cap.vf || priv->sh->dev_cap.sf;
7386 
7387 	for (i = MLX5DR_TABLE_TYPE_NIC_RX; i < MLX5DR_TABLE_TYPE_MAX; i++) {
7388 		if ((!priv->sh->config.dv_esw_en || is_vf_sf_dev) &&
7389 		     i == MLX5DR_TABLE_TYPE_FDB)
7390 			continue;
7391 		action_flag = mlx5_hw_act_flag[1][i];
7392 		priv->hw_send_to_kernel[i] =
7393 				mlx5dr_action_create_dest_root(priv->dr_ctx,
7394 							MLX5_HW_LOWEST_PRIO_ROOT,
7395 							action_flag);
7396 		if (!priv->hw_send_to_kernel[i]) {
7397 			DRV_LOG(WARNING, "Unable to create HWS send to kernel action");
7398 			return;
7399 		}
7400 	}
7401 #endif
7402 }
7403 
7404 static void
7405 flow_hw_destroy_send_to_kernel_action(struct mlx5_priv *priv)
7406 {
7407 	int i;
7408 	for (i = MLX5DR_TABLE_TYPE_NIC_RX; i < MLX5DR_TABLE_TYPE_MAX; i++) {
7409 		if (priv->hw_send_to_kernel[i]) {
7410 			mlx5dr_action_destroy(priv->hw_send_to_kernel[i]);
7411 			priv->hw_send_to_kernel[i] = NULL;
7412 		}
7413 	}
7414 }
7415 
7416 /**
7417  * Create an egress pattern template matching on source SQ.
7418  *
7419  * @param dev
7420  *   Pointer to Ethernet device.
7421  * @param[out] error
7422  *   Pointer to error structure.
7423  *
7424  * @return
7425  *   Pointer to pattern template on success. NULL otherwise, and rte_errno is set.
7426  */
7427 static struct rte_flow_pattern_template *
7428 flow_hw_create_tx_repr_sq_pattern_tmpl(struct rte_eth_dev *dev, struct rte_flow_error *error)
7429 {
7430 	struct rte_flow_pattern_template_attr attr = {
7431 		.relaxed_matching = 0,
7432 		.egress = 1,
7433 	};
7434 	struct mlx5_rte_flow_item_sq sq_mask = {
7435 		.queue = UINT32_MAX,
7436 	};
7437 	struct rte_flow_item items[] = {
7438 		{
7439 			.type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_SQ,
7440 			.mask = &sq_mask,
7441 		},
7442 		{
7443 			.type = RTE_FLOW_ITEM_TYPE_END,
7444 		},
7445 	};
7446 
7447 	return flow_hw_pattern_template_create(dev, &attr, items, error);
7448 }
7449 
7450 static __rte_always_inline uint32_t
7451 flow_hw_tx_tag_regc_mask(struct rte_eth_dev *dev)
7452 {
7453 	struct mlx5_priv *priv = dev->data->dev_private;
7454 	uint32_t mask = priv->sh->dv_regc0_mask;
7455 
7456 	/* Mask is verified during device initialization. Sanity checking here. */
7457 	MLX5_ASSERT(mask != 0);
7458 	/*
7459 	 * Availability of sufficient number of bits in REG_C_0 is verified on initialization.
7460 	 * Sanity checking here.
7461 	 */
7462 	MLX5_ASSERT(rte_popcount32(mask) >= rte_popcount32(priv->vport_meta_mask));
7463 	return mask;
7464 }
7465 
7466 static __rte_always_inline uint32_t
7467 flow_hw_tx_tag_regc_value(struct rte_eth_dev *dev)
7468 {
7469 	struct mlx5_priv *priv = dev->data->dev_private;
7470 	uint32_t tag;
7471 
7472 	/* Mask is verified during device initialization. Sanity checking here. */
7473 	MLX5_ASSERT(priv->vport_meta_mask != 0);
7474 	tag = priv->vport_meta_tag >> (rte_bsf32(priv->vport_meta_mask));
7475 	/*
7476 	 * Availability of sufficient number of bits in REG_C_0 is verified on initialization.
7477 	 * Sanity checking here.
7478 	 */
7479 	MLX5_ASSERT((tag & priv->sh->dv_regc0_mask) == tag);
7480 	return tag;
7481 }
7482 
7483 static void
7484 flow_hw_update_action_mask(struct rte_flow_action *action,
7485 			   struct rte_flow_action *mask,
7486 			   enum rte_flow_action_type type,
7487 			   void *conf_v,
7488 			   void *conf_m)
7489 {
7490 	action->type = type;
7491 	action->conf = conf_v;
7492 	mask->type = type;
7493 	mask->conf = conf_m;
7494 }
7495 
7496 /**
7497  * Create an egress actions template with MODIFY_FIELD action for setting unused REG_C_0 bits
7498  * to vport tag and JUMP action to group 1.
7499  *
7500  * If extended metadata mode is enabled, then MODIFY_FIELD action for copying software metadata
7501  * to REG_C_1 is added as well.
7502  *
7503  * @param dev
7504  *   Pointer to Ethernet device.
7505  * @param[out] error
7506  *   Pointer to error structure.
7507  *
7508  * @return
7509  *   Pointer to actions template on success. NULL otherwise, and rte_errno is set.
7510  */
7511 static struct rte_flow_actions_template *
7512 flow_hw_create_tx_repr_tag_jump_acts_tmpl(struct rte_eth_dev *dev,
7513 					  struct rte_flow_error *error)
7514 {
7515 	uint32_t tag_mask = flow_hw_tx_tag_regc_mask(dev);
7516 	uint32_t tag_value = flow_hw_tx_tag_regc_value(dev);
7517 	struct rte_flow_actions_template_attr attr = {
7518 		.egress = 1,
7519 	};
7520 	struct rte_flow_action_modify_field set_tag_v = {
7521 		.operation = RTE_FLOW_MODIFY_SET,
7522 		.dst = {
7523 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
7524 			.tag_index = REG_C_0,
7525 			.offset = rte_bsf32(tag_mask),
7526 		},
7527 		.src = {
7528 			.field = RTE_FLOW_FIELD_VALUE,
7529 		},
7530 		.width = rte_popcount32(tag_mask),
7531 	};
7532 	struct rte_flow_action_modify_field set_tag_m = {
7533 		.operation = RTE_FLOW_MODIFY_SET,
7534 		.dst = {
7535 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
7536 			.level = UINT8_MAX,
7537 			.tag_index = UINT8_MAX,
7538 			.offset = UINT32_MAX,
7539 		},
7540 		.src = {
7541 			.field = RTE_FLOW_FIELD_VALUE,
7542 		},
7543 		.width = UINT32_MAX,
7544 	};
7545 	struct rte_flow_action_modify_field copy_metadata_v = {
7546 		.operation = RTE_FLOW_MODIFY_SET,
7547 		.dst = {
7548 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
7549 			.tag_index = REG_C_1,
7550 		},
7551 		.src = {
7552 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
7553 			.tag_index = REG_A,
7554 		},
7555 		.width = 32,
7556 	};
7557 	struct rte_flow_action_modify_field copy_metadata_m = {
7558 		.operation = RTE_FLOW_MODIFY_SET,
7559 		.dst = {
7560 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
7561 			.level = UINT8_MAX,
7562 			.tag_index = UINT8_MAX,
7563 			.offset = UINT32_MAX,
7564 		},
7565 		.src = {
7566 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
7567 			.level = UINT8_MAX,
7568 			.tag_index = UINT8_MAX,
7569 			.offset = UINT32_MAX,
7570 		},
7571 		.width = UINT32_MAX,
7572 	};
7573 	struct rte_flow_action_jump jump_v = {
7574 		.group = MLX5_HW_LOWEST_USABLE_GROUP,
7575 	};
7576 	struct rte_flow_action_jump jump_m = {
7577 		.group = UINT32_MAX,
7578 	};
7579 	struct rte_flow_action actions_v[4] = { { 0 } };
7580 	struct rte_flow_action actions_m[4] = { { 0 } };
7581 	unsigned int idx = 0;
7582 
7583 	rte_memcpy(set_tag_v.src.value, &tag_value, sizeof(tag_value));
7584 	rte_memcpy(set_tag_m.src.value, &tag_mask, sizeof(tag_mask));
7585 	flow_hw_update_action_mask(&actions_v[idx], &actions_m[idx],
7586 				   RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
7587 				   &set_tag_v, &set_tag_m);
7588 	idx++;
7589 	if (MLX5_SH(dev)->config.dv_xmeta_en == MLX5_XMETA_MODE_META32_HWS) {
7590 		flow_hw_update_action_mask(&actions_v[idx], &actions_m[idx],
7591 					   RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
7592 					   &copy_metadata_v, &copy_metadata_m);
7593 		idx++;
7594 	}
7595 	flow_hw_update_action_mask(&actions_v[idx], &actions_m[idx], RTE_FLOW_ACTION_TYPE_JUMP,
7596 				   &jump_v, &jump_m);
7597 	idx++;
7598 	flow_hw_update_action_mask(&actions_v[idx], &actions_m[idx], RTE_FLOW_ACTION_TYPE_END,
7599 				   NULL, NULL);
7600 	idx++;
7601 	MLX5_ASSERT(idx <= RTE_DIM(actions_v));
7602 	return flow_hw_actions_template_create(dev, &attr, actions_v, actions_m, error);
7603 }
7604 
7605 static void
7606 flow_hw_cleanup_tx_repr_tagging(struct rte_eth_dev *dev)
7607 {
7608 	struct mlx5_priv *priv = dev->data->dev_private;
7609 
7610 	if (priv->hw_tx_repr_tagging_tbl) {
7611 		flow_hw_table_destroy(dev, priv->hw_tx_repr_tagging_tbl, NULL);
7612 		priv->hw_tx_repr_tagging_tbl = NULL;
7613 	}
7614 	if (priv->hw_tx_repr_tagging_at) {
7615 		flow_hw_actions_template_destroy(dev, priv->hw_tx_repr_tagging_at, NULL);
7616 		priv->hw_tx_repr_tagging_at = NULL;
7617 	}
7618 	if (priv->hw_tx_repr_tagging_pt) {
7619 		flow_hw_pattern_template_destroy(dev, priv->hw_tx_repr_tagging_pt, NULL);
7620 		priv->hw_tx_repr_tagging_pt = NULL;
7621 	}
7622 }
7623 
7624 /**
7625  * Setup templates and table used to create default Tx flow rules. These default rules
7626  * allow for matching Tx representor traffic using a vport tag placed in unused bits of
7627  * REG_C_0 register.
7628  *
7629  * @param dev
7630  *   Pointer to Ethernet device.
7631  * @param[out] error
7632  *   Pointer to error structure.
7633  *
7634  * @return
7635  *   0 on success, negative errno value otherwise.
7636  */
7637 static int
7638 flow_hw_setup_tx_repr_tagging(struct rte_eth_dev *dev, struct rte_flow_error *error)
7639 {
7640 	struct mlx5_priv *priv = dev->data->dev_private;
7641 	struct rte_flow_template_table_attr attr = {
7642 		.flow_attr = {
7643 			.group = 0,
7644 			.priority = MLX5_HW_LOWEST_PRIO_ROOT,
7645 			.egress = 1,
7646 		},
7647 		.nb_flows = MLX5_HW_CTRL_FLOW_NB_RULES,
7648 	};
7649 	struct mlx5_flow_template_table_cfg cfg = {
7650 		.attr = attr,
7651 		.external = false,
7652 	};
7653 
7654 	MLX5_ASSERT(priv->sh->config.dv_esw_en);
7655 	MLX5_ASSERT(priv->sh->config.repr_matching);
7656 	priv->hw_tx_repr_tagging_pt =
7657 		flow_hw_create_tx_repr_sq_pattern_tmpl(dev, error);
7658 	if (!priv->hw_tx_repr_tagging_pt)
7659 		goto err;
7660 	priv->hw_tx_repr_tagging_at =
7661 		flow_hw_create_tx_repr_tag_jump_acts_tmpl(dev, error);
7662 	if (!priv->hw_tx_repr_tagging_at)
7663 		goto err;
7664 	priv->hw_tx_repr_tagging_tbl = flow_hw_table_create(dev, &cfg,
7665 							    &priv->hw_tx_repr_tagging_pt, 1,
7666 							    &priv->hw_tx_repr_tagging_at, 1,
7667 							    error);
7668 	if (!priv->hw_tx_repr_tagging_tbl)
7669 		goto err;
7670 	return 0;
7671 err:
7672 	flow_hw_cleanup_tx_repr_tagging(dev);
7673 	return -rte_errno;
7674 }
7675 
7676 static uint32_t
7677 flow_hw_esw_mgr_regc_marker_mask(struct rte_eth_dev *dev)
7678 {
7679 	uint32_t mask = MLX5_SH(dev)->dv_regc0_mask;
7680 
7681 	/* Mask is verified during device initialization. */
7682 	MLX5_ASSERT(mask != 0);
7683 	return mask;
7684 }
7685 
7686 static uint32_t
7687 flow_hw_esw_mgr_regc_marker(struct rte_eth_dev *dev)
7688 {
7689 	uint32_t mask = MLX5_SH(dev)->dv_regc0_mask;
7690 
7691 	/* Mask is verified during device initialization. */
7692 	MLX5_ASSERT(mask != 0);
7693 	return RTE_BIT32(rte_bsf32(mask));
7694 }
7695 
7696 /**
7697  * Creates a flow pattern template used to match on E-Switch Manager.
7698  * This template is used to set up a table for SQ miss default flow.
7699  *
7700  * @param dev
7701  *   Pointer to Ethernet device.
7702  * @param error
7703  *   Pointer to error structure.
7704  *
7705  * @return
7706  *   Pointer to flow pattern template on success, NULL otherwise.
7707  */
7708 static struct rte_flow_pattern_template *
7709 flow_hw_create_ctrl_esw_mgr_pattern_template(struct rte_eth_dev *dev,
7710 					     struct rte_flow_error *error)
7711 {
7712 	struct rte_flow_pattern_template_attr attr = {
7713 		.relaxed_matching = 0,
7714 		.transfer = 1,
7715 	};
7716 	struct rte_flow_item_ethdev port_spec = {
7717 		.port_id = MLX5_REPRESENTED_PORT_ESW_MGR,
7718 	};
7719 	struct rte_flow_item_ethdev port_mask = {
7720 		.port_id = UINT16_MAX,
7721 	};
7722 	struct mlx5_rte_flow_item_sq sq_mask = {
7723 		.queue = UINT32_MAX,
7724 	};
7725 	struct rte_flow_item items[] = {
7726 		{
7727 			.type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,
7728 			.spec = &port_spec,
7729 			.mask = &port_mask,
7730 		},
7731 		{
7732 			.type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_SQ,
7733 			.mask = &sq_mask,
7734 		},
7735 		{
7736 			.type = RTE_FLOW_ITEM_TYPE_END,
7737 		},
7738 	};
7739 
7740 	return flow_hw_pattern_template_create(dev, &attr, items, error);
7741 }
7742 
7743 /**
7744  * Creates a flow pattern template used to match REG_C_0 and a SQ.
7745  * Matching on REG_C_0 is set up to match on all bits usable by user-space.
7746  * If traffic was sent from E-Switch Manager, then all usable bits will be set to 0,
7747  * except the least significant bit, which will be set to 1.
7748  *
7749  * This template is used to set up a table for SQ miss default flow.
7750  *
7751  * @param dev
7752  *   Pointer to Ethernet device.
7753  * @param error
7754  *   Pointer to error structure.
7755  *
7756  * @return
7757  *   Pointer to flow pattern template on success, NULL otherwise.
7758  */
7759 static struct rte_flow_pattern_template *
7760 flow_hw_create_ctrl_regc_sq_pattern_template(struct rte_eth_dev *dev,
7761 					     struct rte_flow_error *error)
7762 {
7763 	struct rte_flow_pattern_template_attr attr = {
7764 		.relaxed_matching = 0,
7765 		.transfer = 1,
7766 	};
7767 	struct rte_flow_item_tag reg_c0_spec = {
7768 		.index = (uint8_t)REG_C_0,
7769 	};
7770 	struct rte_flow_item_tag reg_c0_mask = {
7771 		.index = 0xff,
7772 		.data = flow_hw_esw_mgr_regc_marker_mask(dev),
7773 	};
7774 	struct mlx5_rte_flow_item_sq queue_mask = {
7775 		.queue = UINT32_MAX,
7776 	};
7777 	struct rte_flow_item items[] = {
7778 		{
7779 			.type = (enum rte_flow_item_type)
7780 				MLX5_RTE_FLOW_ITEM_TYPE_TAG,
7781 			.spec = &reg_c0_spec,
7782 			.mask = &reg_c0_mask,
7783 		},
7784 		{
7785 			.type = (enum rte_flow_item_type)
7786 				MLX5_RTE_FLOW_ITEM_TYPE_SQ,
7787 			.mask = &queue_mask,
7788 		},
7789 		{
7790 			.type = RTE_FLOW_ITEM_TYPE_END,
7791 		},
7792 	};
7793 
7794 	return flow_hw_pattern_template_create(dev, &attr, items, error);
7795 }
7796 
7797 /**
7798  * Creates a flow pattern template with unmasked represented port matching.
7799  * This template is used to set up a table for default transfer flows
7800  * directing packets to group 1.
7801  *
7802  * @param dev
7803  *   Pointer to Ethernet device.
7804  * @param error
7805  *   Pointer to error structure.
7806  *
7807  * @return
7808  *   Pointer to flow pattern template on success, NULL otherwise.
7809  */
7810 static struct rte_flow_pattern_template *
7811 flow_hw_create_ctrl_port_pattern_template(struct rte_eth_dev *dev,
7812 					  struct rte_flow_error *error)
7813 {
7814 	struct rte_flow_pattern_template_attr attr = {
7815 		.relaxed_matching = 0,
7816 		.transfer = 1,
7817 	};
7818 	struct rte_flow_item_ethdev port_mask = {
7819 		.port_id = UINT16_MAX,
7820 	};
7821 	struct rte_flow_item items[] = {
7822 		{
7823 			.type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,
7824 			.mask = &port_mask,
7825 		},
7826 		{
7827 			.type = RTE_FLOW_ITEM_TYPE_END,
7828 		},
7829 	};
7830 
7831 	return flow_hw_pattern_template_create(dev, &attr, items, error);
7832 }
7833 
7834 /*
7835  * Creating a flow pattern template with all ETH packets matching.
7836  * This template is used to set up a table for default Tx copy (Tx metadata
7837  * to REG_C_1) flow rule usage.
7838  *
7839  * @param dev
7840  *   Pointer to Ethernet device.
7841  * @param error
7842  *   Pointer to error structure.
7843  *
7844  * @return
7845  *   Pointer to flow pattern template on success, NULL otherwise.
7846  */
7847 static struct rte_flow_pattern_template *
7848 flow_hw_create_tx_default_mreg_copy_pattern_template(struct rte_eth_dev *dev,
7849 						     struct rte_flow_error *error)
7850 {
7851 	struct rte_flow_pattern_template_attr tx_pa_attr = {
7852 		.relaxed_matching = 0,
7853 		.egress = 1,
7854 	};
7855 	struct rte_flow_item_eth promisc = {
7856 		.hdr.dst_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
7857 		.hdr.src_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
7858 		.hdr.ether_type = 0,
7859 	};
7860 	struct rte_flow_item eth_all[] = {
7861 		[0] = {
7862 			.type = RTE_FLOW_ITEM_TYPE_ETH,
7863 			.spec = &promisc,
7864 			.mask = &promisc,
7865 		},
7866 		[1] = {
7867 			.type = RTE_FLOW_ITEM_TYPE_END,
7868 		},
7869 	};
7870 
7871 	return flow_hw_pattern_template_create(dev, &tx_pa_attr, eth_all, error);
7872 }
7873 
7874 /*
7875  * Creating a flow pattern template with all LACP packets matching, only for NIC
7876  * ingress domain.
7877  *
7878  * @param dev
7879  *   Pointer to Ethernet device.
7880  * @param error
7881  *   Pointer to error structure.
7882  *
7883  * @return
7884  *   Pointer to flow pattern template on success, NULL otherwise.
7885  */
7886 static struct rte_flow_pattern_template *
7887 flow_hw_create_lacp_rx_pattern_template(struct rte_eth_dev *dev, struct rte_flow_error *error)
7888 {
7889 	struct rte_flow_pattern_template_attr pa_attr = {
7890 		.relaxed_matching = 0,
7891 		.ingress = 1,
7892 	};
7893 	struct rte_flow_item_eth lacp_mask = {
7894 		.dst.addr_bytes = "\x00\x00\x00\x00\x00\x00",
7895 		.src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
7896 		.type = 0xFFFF,
7897 	};
7898 	struct rte_flow_item eth_all[] = {
7899 		[0] = {
7900 			.type = RTE_FLOW_ITEM_TYPE_ETH,
7901 			.mask = &lacp_mask,
7902 		},
7903 		[1] = {
7904 			.type = RTE_FLOW_ITEM_TYPE_END,
7905 		},
7906 	};
7907 	return flow_hw_pattern_template_create(dev, &pa_attr, eth_all, error);
7908 }
7909 
7910 /**
7911  * Creates a flow actions template with modify field action and masked jump action.
7912  * Modify field action sets the least significant bit of REG_C_0 (usable by user-space)
7913  * to 1, meaning that packet was originated from E-Switch Manager. Jump action
7914  * transfers steering to group 1.
7915  *
7916  * @param dev
7917  *   Pointer to Ethernet device.
7918  * @param error
7919  *   Pointer to error structure.
7920  *
7921  * @return
7922  *   Pointer to flow actions template on success, NULL otherwise.
7923  */
7924 static struct rte_flow_actions_template *
7925 flow_hw_create_ctrl_regc_jump_actions_template(struct rte_eth_dev *dev,
7926 					       struct rte_flow_error *error)
7927 {
7928 	uint32_t marker_mask = flow_hw_esw_mgr_regc_marker_mask(dev);
7929 	uint32_t marker_bits = flow_hw_esw_mgr_regc_marker(dev);
7930 	struct rte_flow_actions_template_attr attr = {
7931 		.transfer = 1,
7932 	};
7933 	struct rte_flow_action_modify_field set_reg_v = {
7934 		.operation = RTE_FLOW_MODIFY_SET,
7935 		.dst = {
7936 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
7937 			.tag_index = REG_C_0,
7938 		},
7939 		.src = {
7940 			.field = RTE_FLOW_FIELD_VALUE,
7941 		},
7942 		.width = rte_popcount32(marker_mask),
7943 	};
7944 	struct rte_flow_action_modify_field set_reg_m = {
7945 		.operation = RTE_FLOW_MODIFY_SET,
7946 		.dst = {
7947 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
7948 			.level = UINT8_MAX,
7949 			.tag_index = UINT8_MAX,
7950 			.offset = UINT32_MAX,
7951 		},
7952 		.src = {
7953 			.field = RTE_FLOW_FIELD_VALUE,
7954 		},
7955 		.width = UINT32_MAX,
7956 	};
7957 	struct rte_flow_action_jump jump_v = {
7958 		.group = MLX5_HW_LOWEST_USABLE_GROUP,
7959 	};
7960 	struct rte_flow_action_jump jump_m = {
7961 		.group = UINT32_MAX,
7962 	};
7963 	struct rte_flow_action actions_v[] = {
7964 		{
7965 			.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
7966 			.conf = &set_reg_v,
7967 		},
7968 		{
7969 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
7970 			.conf = &jump_v,
7971 		},
7972 		{
7973 			.type = RTE_FLOW_ACTION_TYPE_END,
7974 		}
7975 	};
7976 	struct rte_flow_action actions_m[] = {
7977 		{
7978 			.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
7979 			.conf = &set_reg_m,
7980 		},
7981 		{
7982 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
7983 			.conf = &jump_m,
7984 		},
7985 		{
7986 			.type = RTE_FLOW_ACTION_TYPE_END,
7987 		}
7988 	};
7989 
7990 	set_reg_v.dst.offset = rte_bsf32(marker_mask);
7991 	rte_memcpy(set_reg_v.src.value, &marker_bits, sizeof(marker_bits));
7992 	rte_memcpy(set_reg_m.src.value, &marker_mask, sizeof(marker_mask));
7993 	return flow_hw_actions_template_create(dev, &attr, actions_v, actions_m, error);
7994 }
7995 
7996 /**
7997  * Creates a flow actions template with an unmasked JUMP action. Flows
7998  * based on this template will perform a jump to some group. This template
7999  * is used to set up tables for control flows.
8000  *
8001  * @param dev
8002  *   Pointer to Ethernet device.
8003  * @param group
8004  *   Destination group for this action template.
8005  * @param error
8006  *   Pointer to error structure.
8007  *
8008  * @return
8009  *   Pointer to flow actions template on success, NULL otherwise.
8010  */
8011 static struct rte_flow_actions_template *
8012 flow_hw_create_ctrl_jump_actions_template(struct rte_eth_dev *dev,
8013 					  uint32_t group,
8014 					  struct rte_flow_error *error)
8015 {
8016 	struct rte_flow_actions_template_attr attr = {
8017 		.transfer = 1,
8018 	};
8019 	struct rte_flow_action_jump jump_v = {
8020 		.group = group,
8021 	};
8022 	struct rte_flow_action_jump jump_m = {
8023 		.group = UINT32_MAX,
8024 	};
8025 	struct rte_flow_action actions_v[] = {
8026 		{
8027 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
8028 			.conf = &jump_v,
8029 		},
8030 		{
8031 			.type = RTE_FLOW_ACTION_TYPE_END,
8032 		}
8033 	};
8034 	struct rte_flow_action actions_m[] = {
8035 		{
8036 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
8037 			.conf = &jump_m,
8038 		},
8039 		{
8040 			.type = RTE_FLOW_ACTION_TYPE_END,
8041 		}
8042 	};
8043 
8044 	return flow_hw_actions_template_create(dev, &attr, actions_v,
8045 					       actions_m, error);
8046 }
8047 
8048 /**
8049  * Creates a flow action template with a unmasked REPRESENTED_PORT action.
8050  * It is used to create control flow tables.
8051  *
8052  * @param dev
8053  *   Pointer to Ethernet device.
8054  * @param error
8055  *   Pointer to error structure.
8056  *
8057  * @return
8058  *   Pointer to flow action template on success, NULL otherwise.
8059  */
8060 static struct rte_flow_actions_template *
8061 flow_hw_create_ctrl_port_actions_template(struct rte_eth_dev *dev,
8062 					  struct rte_flow_error *error)
8063 {
8064 	struct rte_flow_actions_template_attr attr = {
8065 		.transfer = 1,
8066 	};
8067 	struct rte_flow_action_ethdev port_v = {
8068 		.port_id = 0,
8069 	};
8070 	struct rte_flow_action actions_v[] = {
8071 		{
8072 			.type = RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT,
8073 			.conf = &port_v,
8074 		},
8075 		{
8076 			.type = RTE_FLOW_ACTION_TYPE_END,
8077 		}
8078 	};
8079 	struct rte_flow_action_ethdev port_m = {
8080 		.port_id = 0,
8081 	};
8082 	struct rte_flow_action actions_m[] = {
8083 		{
8084 			.type = RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT,
8085 			.conf = &port_m,
8086 		},
8087 		{
8088 			.type = RTE_FLOW_ACTION_TYPE_END,
8089 		}
8090 	};
8091 
8092 	return flow_hw_actions_template_create(dev, &attr, actions_v, actions_m, error);
8093 }
8094 
8095 /*
8096  * Creating an actions template to use header modify action for register
8097  * copying. This template is used to set up a table for copy flow.
8098  *
8099  * @param dev
8100  *   Pointer to Ethernet device.
8101  * @param error
8102  *   Pointer to error structure.
8103  *
8104  * @return
8105  *   Pointer to flow actions template on success, NULL otherwise.
8106  */
8107 static struct rte_flow_actions_template *
8108 flow_hw_create_tx_default_mreg_copy_actions_template(struct rte_eth_dev *dev,
8109 						     struct rte_flow_error *error)
8110 {
8111 	struct rte_flow_actions_template_attr tx_act_attr = {
8112 		.egress = 1,
8113 	};
8114 	const struct rte_flow_action_modify_field mreg_action = {
8115 		.operation = RTE_FLOW_MODIFY_SET,
8116 		.dst = {
8117 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
8118 			.tag_index = REG_C_1,
8119 		},
8120 		.src = {
8121 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
8122 			.tag_index = REG_A,
8123 		},
8124 		.width = 32,
8125 	};
8126 	const struct rte_flow_action_modify_field mreg_mask = {
8127 		.operation = RTE_FLOW_MODIFY_SET,
8128 		.dst = {
8129 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
8130 			.level = UINT8_MAX,
8131 			.tag_index = UINT8_MAX,
8132 			.offset = UINT32_MAX,
8133 		},
8134 		.src = {
8135 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
8136 			.level = UINT8_MAX,
8137 			.tag_index = UINT8_MAX,
8138 			.offset = UINT32_MAX,
8139 		},
8140 		.width = UINT32_MAX,
8141 	};
8142 	const struct rte_flow_action_jump jump_action = {
8143 		.group = 1,
8144 	};
8145 	const struct rte_flow_action_jump jump_mask = {
8146 		.group = UINT32_MAX,
8147 	};
8148 	const struct rte_flow_action actions[] = {
8149 		[0] = {
8150 			.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
8151 			.conf = &mreg_action,
8152 		},
8153 		[1] = {
8154 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
8155 			.conf = &jump_action,
8156 		},
8157 		[2] = {
8158 			.type = RTE_FLOW_ACTION_TYPE_END,
8159 		},
8160 	};
8161 	const struct rte_flow_action masks[] = {
8162 		[0] = {
8163 			.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
8164 			.conf = &mreg_mask,
8165 		},
8166 		[1] = {
8167 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
8168 			.conf = &jump_mask,
8169 		},
8170 		[2] = {
8171 			.type = RTE_FLOW_ACTION_TYPE_END,
8172 		},
8173 	};
8174 
8175 	return flow_hw_actions_template_create(dev, &tx_act_attr, actions,
8176 					       masks, error);
8177 }
8178 
8179 /*
8180  * Creating an actions template to use default miss to re-route packets to the
8181  * kernel driver stack.
8182  * On root table, only DEFAULT_MISS action can be used.
8183  *
8184  * @param dev
8185  *   Pointer to Ethernet device.
8186  * @param error
8187  *   Pointer to error structure.
8188  *
8189  * @return
8190  *   Pointer to flow actions template on success, NULL otherwise.
8191  */
8192 static struct rte_flow_actions_template *
8193 flow_hw_create_lacp_rx_actions_template(struct rte_eth_dev *dev, struct rte_flow_error *error)
8194 {
8195 	struct rte_flow_actions_template_attr act_attr = {
8196 		.ingress = 1,
8197 	};
8198 	const struct rte_flow_action actions[] = {
8199 		[0] = {
8200 			.type = (enum rte_flow_action_type)
8201 				MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS,
8202 		},
8203 		[1] = {
8204 			.type = RTE_FLOW_ACTION_TYPE_END,
8205 		},
8206 	};
8207 
8208 	return flow_hw_actions_template_create(dev, &act_attr, actions, actions, error);
8209 }
8210 
8211 /**
8212  * Creates a control flow table used to transfer traffic from E-Switch Manager
8213  * and TX queues from group 0 to group 1.
8214  *
8215  * @param dev
8216  *   Pointer to Ethernet device.
8217  * @param it
8218  *   Pointer to flow pattern template.
8219  * @param at
8220  *   Pointer to flow actions template.
8221  * @param error
8222  *   Pointer to error structure.
8223  *
8224  * @return
8225  *   Pointer to flow table on success, NULL otherwise.
8226  */
8227 static struct rte_flow_template_table*
8228 flow_hw_create_ctrl_sq_miss_root_table(struct rte_eth_dev *dev,
8229 				       struct rte_flow_pattern_template *it,
8230 				       struct rte_flow_actions_template *at,
8231 				       struct rte_flow_error *error)
8232 {
8233 	struct rte_flow_template_table_attr attr = {
8234 		.flow_attr = {
8235 			.group = 0,
8236 			.priority = MLX5_HW_LOWEST_PRIO_ROOT,
8237 			.ingress = 0,
8238 			.egress = 0,
8239 			.transfer = 1,
8240 		},
8241 		.nb_flows = MLX5_HW_CTRL_FLOW_NB_RULES,
8242 	};
8243 	struct mlx5_flow_template_table_cfg cfg = {
8244 		.attr = attr,
8245 		.external = false,
8246 	};
8247 
8248 	return flow_hw_table_create(dev, &cfg, &it, 1, &at, 1, error);
8249 }
8250 
8251 
8252 /**
8253  * Creates a control flow table used to transfer traffic from E-Switch Manager
8254  * and TX queues from group 0 to group 1.
8255  *
8256  * @param dev
8257  *   Pointer to Ethernet device.
8258  * @param it
8259  *   Pointer to flow pattern template.
8260  * @param at
8261  *   Pointer to flow actions template.
8262  * @param error
8263  *   Pointer to error structure.
8264  *
8265  * @return
8266  *   Pointer to flow table on success, NULL otherwise.
8267  */
8268 static struct rte_flow_template_table*
8269 flow_hw_create_ctrl_sq_miss_table(struct rte_eth_dev *dev,
8270 				  struct rte_flow_pattern_template *it,
8271 				  struct rte_flow_actions_template *at,
8272 				  struct rte_flow_error *error)
8273 {
8274 	struct rte_flow_template_table_attr attr = {
8275 		.flow_attr = {
8276 			.group = 1,
8277 			.priority = MLX5_HW_LOWEST_PRIO_NON_ROOT,
8278 			.ingress = 0,
8279 			.egress = 0,
8280 			.transfer = 1,
8281 		},
8282 		.nb_flows = MLX5_HW_CTRL_FLOW_NB_RULES,
8283 	};
8284 	struct mlx5_flow_template_table_cfg cfg = {
8285 		.attr = attr,
8286 		.external = false,
8287 	};
8288 
8289 	return flow_hw_table_create(dev, &cfg, &it, 1, &at, 1, error);
8290 }
8291 
8292 /*
8293  * Creating the default Tx metadata copy table on NIC Tx group 0.
8294  *
8295  * @param dev
8296  *   Pointer to Ethernet device.
8297  * @param pt
8298  *   Pointer to flow pattern template.
8299  * @param at
8300  *   Pointer to flow actions template.
8301  * @param error
8302  *   Pointer to error structure.
8303  *
8304  * @return
8305  *   Pointer to flow table on success, NULL otherwise.
8306  */
8307 static struct rte_flow_template_table*
8308 flow_hw_create_tx_default_mreg_copy_table(struct rte_eth_dev *dev,
8309 					  struct rte_flow_pattern_template *pt,
8310 					  struct rte_flow_actions_template *at,
8311 					  struct rte_flow_error *error)
8312 {
8313 	struct rte_flow_template_table_attr tx_tbl_attr = {
8314 		.flow_attr = {
8315 			.group = 0, /* Root */
8316 			.priority = MLX5_HW_LOWEST_PRIO_ROOT,
8317 			.egress = 1,
8318 		},
8319 		.nb_flows = 1, /* One default flow rule for all. */
8320 	};
8321 	struct mlx5_flow_template_table_cfg tx_tbl_cfg = {
8322 		.attr = tx_tbl_attr,
8323 		.external = false,
8324 	};
8325 
8326 	return flow_hw_table_create(dev, &tx_tbl_cfg, &pt, 1, &at, 1, error);
8327 }
8328 
8329 /**
8330  * Creates a control flow table used to transfer traffic
8331  * from group 0 to group 1.
8332  *
8333  * @param dev
8334  *   Pointer to Ethernet device.
8335  * @param it
8336  *   Pointer to flow pattern template.
8337  * @param at
8338  *   Pointer to flow actions template.
8339  * @param error
8340  *   Pointer to error structure.
8341  *
8342  * @return
8343  *   Pointer to flow table on success, NULL otherwise.
8344  */
8345 static struct rte_flow_template_table *
8346 flow_hw_create_ctrl_jump_table(struct rte_eth_dev *dev,
8347 			       struct rte_flow_pattern_template *it,
8348 			       struct rte_flow_actions_template *at,
8349 			       struct rte_flow_error *error)
8350 {
8351 	struct rte_flow_template_table_attr attr = {
8352 		.flow_attr = {
8353 			.group = 0,
8354 			.priority = 0,
8355 			.ingress = 0,
8356 			.egress = 0,
8357 			.transfer = 1,
8358 		},
8359 		.nb_flows = MLX5_HW_CTRL_FLOW_NB_RULES,
8360 	};
8361 	struct mlx5_flow_template_table_cfg cfg = {
8362 		.attr = attr,
8363 		.external = false,
8364 	};
8365 
8366 	return flow_hw_table_create(dev, &cfg, &it, 1, &at, 1, error);
8367 }
8368 
8369 /*
8370  * Create a table on the root group to for the LACP traffic redirecting.
8371  *
8372  * @param dev
8373  *   Pointer to Ethernet device.
8374  * @param it
8375  *   Pointer to flow pattern template.
8376  * @param at
8377  *   Pointer to flow actions template.
8378  *
8379  * @return
8380  *   Pointer to flow table on success, NULL otherwise.
8381  */
8382 static struct rte_flow_template_table *
8383 flow_hw_create_lacp_rx_table(struct rte_eth_dev *dev,
8384 			     struct rte_flow_pattern_template *it,
8385 			     struct rte_flow_actions_template *at,
8386 			     struct rte_flow_error *error)
8387 {
8388 	struct rte_flow_template_table_attr attr = {
8389 		.flow_attr = {
8390 			.group = 0,
8391 			.priority = 0,
8392 			.ingress = 1,
8393 			.egress = 0,
8394 			.transfer = 0,
8395 		},
8396 		.nb_flows = 1,
8397 	};
8398 	struct mlx5_flow_template_table_cfg cfg = {
8399 		.attr = attr,
8400 		.external = false,
8401 	};
8402 
8403 	return flow_hw_table_create(dev, &cfg, &it, 1, &at, 1, error);
8404 }
8405 
8406 /**
8407  * Creates a set of flow tables used to create control flows used
8408  * when E-Switch is engaged.
8409  *
8410  * @param dev
8411  *   Pointer to Ethernet device.
8412  * @param error
8413  *   Pointer to error structure.
8414  *
8415  * @return
8416  *   0 on success, negative values otherwise
8417  */
8418 static __rte_unused int
8419 flow_hw_create_ctrl_tables(struct rte_eth_dev *dev, struct rte_flow_error *error)
8420 {
8421 	struct mlx5_priv *priv = dev->data->dev_private;
8422 	struct rte_flow_pattern_template *esw_mgr_items_tmpl = NULL;
8423 	struct rte_flow_pattern_template *regc_sq_items_tmpl = NULL;
8424 	struct rte_flow_pattern_template *port_items_tmpl = NULL;
8425 	struct rte_flow_pattern_template *tx_meta_items_tmpl = NULL;
8426 	struct rte_flow_pattern_template *lacp_rx_items_tmpl = NULL;
8427 	struct rte_flow_actions_template *regc_jump_actions_tmpl = NULL;
8428 	struct rte_flow_actions_template *port_actions_tmpl = NULL;
8429 	struct rte_flow_actions_template *jump_one_actions_tmpl = NULL;
8430 	struct rte_flow_actions_template *tx_meta_actions_tmpl = NULL;
8431 	struct rte_flow_actions_template *lacp_rx_actions_tmpl = NULL;
8432 	uint32_t xmeta = priv->sh->config.dv_xmeta_en;
8433 	uint32_t repr_matching = priv->sh->config.repr_matching;
8434 	int ret;
8435 
8436 	/* Create templates and table for default SQ miss flow rules - root table. */
8437 	esw_mgr_items_tmpl = flow_hw_create_ctrl_esw_mgr_pattern_template(dev, error);
8438 	if (!esw_mgr_items_tmpl) {
8439 		DRV_LOG(ERR, "port %u failed to create E-Switch Manager item"
8440 			" template for control flows", dev->data->port_id);
8441 		goto err;
8442 	}
8443 	regc_jump_actions_tmpl = flow_hw_create_ctrl_regc_jump_actions_template(dev, error);
8444 	if (!regc_jump_actions_tmpl) {
8445 		DRV_LOG(ERR, "port %u failed to create REG_C set and jump action template"
8446 			" for control flows", dev->data->port_id);
8447 		goto err;
8448 	}
8449 	MLX5_ASSERT(priv->hw_esw_sq_miss_root_tbl == NULL);
8450 	priv->hw_esw_sq_miss_root_tbl = flow_hw_create_ctrl_sq_miss_root_table
8451 			(dev, esw_mgr_items_tmpl, regc_jump_actions_tmpl, error);
8452 	if (!priv->hw_esw_sq_miss_root_tbl) {
8453 		DRV_LOG(ERR, "port %u failed to create table for default sq miss (root table)"
8454 			" for control flows", dev->data->port_id);
8455 		goto err;
8456 	}
8457 	/* Create templates and table for default SQ miss flow rules - non-root table. */
8458 	regc_sq_items_tmpl = flow_hw_create_ctrl_regc_sq_pattern_template(dev, error);
8459 	if (!regc_sq_items_tmpl) {
8460 		DRV_LOG(ERR, "port %u failed to create SQ item template for"
8461 			" control flows", dev->data->port_id);
8462 		goto err;
8463 	}
8464 	port_actions_tmpl = flow_hw_create_ctrl_port_actions_template(dev, error);
8465 	if (!port_actions_tmpl) {
8466 		DRV_LOG(ERR, "port %u failed to create port action template"
8467 			" for control flows", dev->data->port_id);
8468 		goto err;
8469 	}
8470 	MLX5_ASSERT(priv->hw_esw_sq_miss_tbl == NULL);
8471 	priv->hw_esw_sq_miss_tbl = flow_hw_create_ctrl_sq_miss_table(dev, regc_sq_items_tmpl,
8472 								     port_actions_tmpl, error);
8473 	if (!priv->hw_esw_sq_miss_tbl) {
8474 		DRV_LOG(ERR, "port %u failed to create table for default sq miss (non-root table)"
8475 			" for control flows", dev->data->port_id);
8476 		goto err;
8477 	}
8478 	/* Create templates and table for default FDB jump flow rules. */
8479 	port_items_tmpl = flow_hw_create_ctrl_port_pattern_template(dev, error);
8480 	if (!port_items_tmpl) {
8481 		DRV_LOG(ERR, "port %u failed to create SQ item template for"
8482 			" control flows", dev->data->port_id);
8483 		goto err;
8484 	}
8485 	jump_one_actions_tmpl = flow_hw_create_ctrl_jump_actions_template
8486 			(dev, MLX5_HW_LOWEST_USABLE_GROUP, error);
8487 	if (!jump_one_actions_tmpl) {
8488 		DRV_LOG(ERR, "port %u failed to create jump action template"
8489 			" for control flows", dev->data->port_id);
8490 		goto err;
8491 	}
8492 	MLX5_ASSERT(priv->hw_esw_zero_tbl == NULL);
8493 	priv->hw_esw_zero_tbl = flow_hw_create_ctrl_jump_table(dev, port_items_tmpl,
8494 							       jump_one_actions_tmpl,
8495 							       error);
8496 	if (!priv->hw_esw_zero_tbl) {
8497 		DRV_LOG(ERR, "port %u failed to create table for default jump to group 1"
8498 			" for control flows", dev->data->port_id);
8499 		goto err;
8500 	}
8501 	/* Create templates and table for default Tx metadata copy flow rule. */
8502 	if (!repr_matching && xmeta == MLX5_XMETA_MODE_META32_HWS) {
8503 		tx_meta_items_tmpl =
8504 			flow_hw_create_tx_default_mreg_copy_pattern_template(dev, error);
8505 		if (!tx_meta_items_tmpl) {
8506 			DRV_LOG(ERR, "port %u failed to Tx metadata copy pattern"
8507 				" template for control flows", dev->data->port_id);
8508 			goto err;
8509 		}
8510 		tx_meta_actions_tmpl =
8511 			flow_hw_create_tx_default_mreg_copy_actions_template(dev, error);
8512 		if (!tx_meta_actions_tmpl) {
8513 			DRV_LOG(ERR, "port %u failed to Tx metadata copy actions"
8514 				" template for control flows", dev->data->port_id);
8515 			goto err;
8516 		}
8517 		MLX5_ASSERT(priv->hw_tx_meta_cpy_tbl == NULL);
8518 		priv->hw_tx_meta_cpy_tbl =
8519 			flow_hw_create_tx_default_mreg_copy_table(dev, tx_meta_items_tmpl,
8520 								  tx_meta_actions_tmpl, error);
8521 		if (!priv->hw_tx_meta_cpy_tbl) {
8522 			DRV_LOG(ERR, "port %u failed to create table for default"
8523 				" Tx metadata copy flow rule", dev->data->port_id);
8524 			goto err;
8525 		}
8526 	}
8527 	/* Create LACP default miss table. */
8528 	if (!priv->sh->config.lacp_by_user && priv->pf_bond >= 0) {
8529 		lacp_rx_items_tmpl = flow_hw_create_lacp_rx_pattern_template(dev, error);
8530 		if (!lacp_rx_items_tmpl) {
8531 			DRV_LOG(ERR, "port %u failed to create pattern template"
8532 				" for LACP Rx traffic", dev->data->port_id);
8533 			goto err;
8534 		}
8535 		lacp_rx_actions_tmpl = flow_hw_create_lacp_rx_actions_template(dev, error);
8536 		if (!lacp_rx_actions_tmpl) {
8537 			DRV_LOG(ERR, "port %u failed to create actions template"
8538 				" for LACP Rx traffic", dev->data->port_id);
8539 			goto err;
8540 		}
8541 		priv->hw_lacp_rx_tbl = flow_hw_create_lacp_rx_table(dev, lacp_rx_items_tmpl,
8542 								    lacp_rx_actions_tmpl, error);
8543 		if (!priv->hw_lacp_rx_tbl) {
8544 			DRV_LOG(ERR, "port %u failed to create template table for"
8545 				" for LACP Rx traffic", dev->data->port_id);
8546 			goto err;
8547 		}
8548 	}
8549 	return 0;
8550 err:
8551 	/* Do not overwrite the rte_errno. */
8552 	ret = -rte_errno;
8553 	if (ret == 0)
8554 		ret = rte_flow_error_set(error, EINVAL,
8555 					 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8556 					 "Failed to create control tables.");
8557 	if (priv->hw_tx_meta_cpy_tbl) {
8558 		flow_hw_table_destroy(dev, priv->hw_tx_meta_cpy_tbl, NULL);
8559 		priv->hw_tx_meta_cpy_tbl = NULL;
8560 	}
8561 	if (priv->hw_esw_zero_tbl) {
8562 		flow_hw_table_destroy(dev, priv->hw_esw_zero_tbl, NULL);
8563 		priv->hw_esw_zero_tbl = NULL;
8564 	}
8565 	if (priv->hw_esw_sq_miss_tbl) {
8566 		flow_hw_table_destroy(dev, priv->hw_esw_sq_miss_tbl, NULL);
8567 		priv->hw_esw_sq_miss_tbl = NULL;
8568 	}
8569 	if (priv->hw_esw_sq_miss_root_tbl) {
8570 		flow_hw_table_destroy(dev, priv->hw_esw_sq_miss_root_tbl, NULL);
8571 		priv->hw_esw_sq_miss_root_tbl = NULL;
8572 	}
8573 	if (lacp_rx_actions_tmpl)
8574 		flow_hw_actions_template_destroy(dev, lacp_rx_actions_tmpl, NULL);
8575 	if (tx_meta_actions_tmpl)
8576 		flow_hw_actions_template_destroy(dev, tx_meta_actions_tmpl, NULL);
8577 	if (jump_one_actions_tmpl)
8578 		flow_hw_actions_template_destroy(dev, jump_one_actions_tmpl, NULL);
8579 	if (port_actions_tmpl)
8580 		flow_hw_actions_template_destroy(dev, port_actions_tmpl, NULL);
8581 	if (regc_jump_actions_tmpl)
8582 		flow_hw_actions_template_destroy(dev, regc_jump_actions_tmpl, NULL);
8583 	if (lacp_rx_items_tmpl)
8584 		flow_hw_pattern_template_destroy(dev, lacp_rx_items_tmpl, NULL);
8585 	if (tx_meta_items_tmpl)
8586 		flow_hw_pattern_template_destroy(dev, tx_meta_items_tmpl, NULL);
8587 	if (port_items_tmpl)
8588 		flow_hw_pattern_template_destroy(dev, port_items_tmpl, NULL);
8589 	if (regc_sq_items_tmpl)
8590 		flow_hw_pattern_template_destroy(dev, regc_sq_items_tmpl, NULL);
8591 	if (esw_mgr_items_tmpl)
8592 		flow_hw_pattern_template_destroy(dev, esw_mgr_items_tmpl, NULL);
8593 	return ret;
8594 }
8595 
8596 static void
8597 flow_hw_ct_mng_destroy(struct rte_eth_dev *dev,
8598 		       struct mlx5_aso_ct_pools_mng *ct_mng)
8599 {
8600 	struct mlx5_priv *priv = dev->data->dev_private;
8601 
8602 	mlx5_aso_ct_queue_uninit(priv->sh, ct_mng);
8603 	mlx5_free(ct_mng);
8604 }
8605 
8606 static void
8607 flow_hw_ct_pool_destroy(struct rte_eth_dev *dev __rte_unused,
8608 			struct mlx5_aso_ct_pool *pool)
8609 {
8610 	if (pool->dr_action)
8611 		mlx5dr_action_destroy(pool->dr_action);
8612 	if (pool->devx_obj)
8613 		claim_zero(mlx5_devx_cmd_destroy(pool->devx_obj));
8614 	if (pool->cts)
8615 		mlx5_ipool_destroy(pool->cts);
8616 	mlx5_free(pool);
8617 }
8618 
8619 static struct mlx5_aso_ct_pool *
8620 flow_hw_ct_pool_create(struct rte_eth_dev *dev,
8621 		       const struct rte_flow_port_attr *port_attr)
8622 {
8623 	struct mlx5_priv *priv = dev->data->dev_private;
8624 	struct mlx5_aso_ct_pool *pool;
8625 	struct mlx5_devx_obj *obj;
8626 	uint32_t nb_cts = rte_align32pow2(port_attr->nb_conn_tracks);
8627 	uint32_t log_obj_size = rte_log2_u32(nb_cts);
8628 	struct mlx5_indexed_pool_config cfg = {
8629 		.size = sizeof(struct mlx5_aso_ct_action),
8630 		.trunk_size = 1 << 12,
8631 		.per_core_cache = 1 << 13,
8632 		.need_lock = 1,
8633 		.release_mem_en = !!priv->sh->config.reclaim_mode,
8634 		.malloc = mlx5_malloc,
8635 		.free = mlx5_free,
8636 		.type = "mlx5_hw_ct_action",
8637 	};
8638 	int reg_id;
8639 	uint32_t flags;
8640 
8641 	if (port_attr->flags & RTE_FLOW_PORT_FLAG_SHARE_INDIRECT) {
8642 		DRV_LOG(ERR, "Connection tracking is not supported "
8643 			     "in cross vHCA sharing mode");
8644 		rte_errno = ENOTSUP;
8645 		return NULL;
8646 	}
8647 	pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
8648 	if (!pool) {
8649 		rte_errno = ENOMEM;
8650 		return NULL;
8651 	}
8652 	obj = mlx5_devx_cmd_create_conn_track_offload_obj(priv->sh->cdev->ctx,
8653 							  priv->sh->cdev->pdn,
8654 							  log_obj_size);
8655 	if (!obj) {
8656 		rte_errno = ENODATA;
8657 		DRV_LOG(ERR, "Failed to create conn_track_offload_obj using DevX.");
8658 		goto err;
8659 	}
8660 	pool->devx_obj = obj;
8661 	reg_id = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, NULL);
8662 	flags = MLX5DR_ACTION_FLAG_HWS_RX | MLX5DR_ACTION_FLAG_HWS_TX;
8663 	if (priv->sh->config.dv_esw_en && priv->master)
8664 		flags |= MLX5DR_ACTION_FLAG_HWS_FDB;
8665 	pool->dr_action = mlx5dr_action_create_aso_ct(priv->dr_ctx,
8666 						      (struct mlx5dr_devx_obj *)obj,
8667 						      reg_id - REG_C_0, flags);
8668 	if (!pool->dr_action)
8669 		goto err;
8670 	/*
8671 	 * No need for local cache if CT number is a small number. Since
8672 	 * flow insertion rate will be very limited in that case. Here let's
8673 	 * set the number to less than default trunk size 4K.
8674 	 */
8675 	if (nb_cts <= cfg.trunk_size) {
8676 		cfg.per_core_cache = 0;
8677 		cfg.trunk_size = nb_cts;
8678 	} else if (nb_cts <= MLX5_HW_IPOOL_SIZE_THRESHOLD) {
8679 		cfg.per_core_cache = MLX5_HW_IPOOL_CACHE_MIN;
8680 	}
8681 	pool->cts = mlx5_ipool_create(&cfg);
8682 	if (!pool->cts)
8683 		goto err;
8684 	pool->sq = priv->ct_mng->aso_sqs;
8685 	/* Assign the last extra ASO SQ as public SQ. */
8686 	pool->shared_sq = &priv->ct_mng->aso_sqs[priv->nb_queue - 1];
8687 	return pool;
8688 err:
8689 	flow_hw_ct_pool_destroy(dev, pool);
8690 	return NULL;
8691 }
8692 
8693 static void
8694 flow_hw_destroy_vlan(struct rte_eth_dev *dev)
8695 {
8696 	struct mlx5_priv *priv = dev->data->dev_private;
8697 	enum mlx5dr_table_type i;
8698 
8699 	for (i = MLX5DR_TABLE_TYPE_NIC_RX; i < MLX5DR_TABLE_TYPE_MAX; i++) {
8700 		if (priv->hw_pop_vlan[i]) {
8701 			mlx5dr_action_destroy(priv->hw_pop_vlan[i]);
8702 			priv->hw_pop_vlan[i] = NULL;
8703 		}
8704 		if (priv->hw_push_vlan[i]) {
8705 			mlx5dr_action_destroy(priv->hw_push_vlan[i]);
8706 			priv->hw_push_vlan[i] = NULL;
8707 		}
8708 	}
8709 }
8710 
8711 static int
8712 flow_hw_create_vlan(struct rte_eth_dev *dev)
8713 {
8714 	struct mlx5_priv *priv = dev->data->dev_private;
8715 	enum mlx5dr_table_type i;
8716 	const enum mlx5dr_action_flags flags[MLX5DR_TABLE_TYPE_MAX] = {
8717 		MLX5DR_ACTION_FLAG_HWS_RX,
8718 		MLX5DR_ACTION_FLAG_HWS_TX,
8719 		MLX5DR_ACTION_FLAG_HWS_FDB
8720 	};
8721 
8722 	/* rte_errno is set in the mlx5dr_action* functions. */
8723 	for (i = MLX5DR_TABLE_TYPE_NIC_RX; i <= MLX5DR_TABLE_TYPE_NIC_TX; i++) {
8724 		priv->hw_pop_vlan[i] =
8725 			mlx5dr_action_create_pop_vlan(priv->dr_ctx, flags[i]);
8726 		if (!priv->hw_pop_vlan[i])
8727 			return -rte_errno;
8728 		priv->hw_push_vlan[i] =
8729 			mlx5dr_action_create_push_vlan(priv->dr_ctx, flags[i]);
8730 		if (!priv->hw_pop_vlan[i])
8731 			return -rte_errno;
8732 	}
8733 	if (priv->sh->config.dv_esw_en && priv->master) {
8734 		priv->hw_pop_vlan[MLX5DR_TABLE_TYPE_FDB] =
8735 			mlx5dr_action_create_pop_vlan
8736 				(priv->dr_ctx, MLX5DR_ACTION_FLAG_HWS_FDB);
8737 		if (!priv->hw_pop_vlan[MLX5DR_TABLE_TYPE_FDB])
8738 			return -rte_errno;
8739 		priv->hw_push_vlan[MLX5DR_TABLE_TYPE_FDB] =
8740 			mlx5dr_action_create_push_vlan
8741 				(priv->dr_ctx, MLX5DR_ACTION_FLAG_HWS_FDB);
8742 		if (!priv->hw_pop_vlan[MLX5DR_TABLE_TYPE_FDB])
8743 			return -rte_errno;
8744 	}
8745 	return 0;
8746 }
8747 
8748 static void
8749 flow_hw_cleanup_ctrl_rx_tables(struct rte_eth_dev *dev)
8750 {
8751 	struct mlx5_priv *priv = dev->data->dev_private;
8752 	unsigned int i;
8753 	unsigned int j;
8754 
8755 	if (!priv->hw_ctrl_rx)
8756 		return;
8757 	for (i = 0; i < MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_MAX; ++i) {
8758 		for (j = 0; j < MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX; ++j) {
8759 			struct rte_flow_template_table *tbl = priv->hw_ctrl_rx->tables[i][j].tbl;
8760 			struct rte_flow_pattern_template *pt = priv->hw_ctrl_rx->tables[i][j].pt;
8761 
8762 			if (tbl)
8763 				claim_zero(flow_hw_table_destroy(dev, tbl, NULL));
8764 			if (pt)
8765 				claim_zero(flow_hw_pattern_template_destroy(dev, pt, NULL));
8766 		}
8767 	}
8768 	for (i = 0; i < MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX; ++i) {
8769 		struct rte_flow_actions_template *at = priv->hw_ctrl_rx->rss[i];
8770 
8771 		if (at)
8772 			claim_zero(flow_hw_actions_template_destroy(dev, at, NULL));
8773 	}
8774 	mlx5_free(priv->hw_ctrl_rx);
8775 	priv->hw_ctrl_rx = NULL;
8776 }
8777 
8778 static uint64_t
8779 flow_hw_ctrl_rx_rss_type_hash_types(const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
8780 {
8781 	switch (rss_type) {
8782 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_NON_IP:
8783 		return 0;
8784 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4:
8785 		return RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | RTE_ETH_RSS_NONFRAG_IPV4_OTHER;
8786 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_UDP:
8787 		return RTE_ETH_RSS_NONFRAG_IPV4_UDP;
8788 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_TCP:
8789 		return RTE_ETH_RSS_NONFRAG_IPV4_TCP;
8790 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6:
8791 		return RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_OTHER;
8792 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_UDP:
8793 		return RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_IPV6_UDP_EX;
8794 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_TCP:
8795 		return RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_IPV6_TCP_EX;
8796 	default:
8797 		/* Should not reach here. */
8798 		MLX5_ASSERT(false);
8799 		return 0;
8800 	}
8801 }
8802 
8803 static struct rte_flow_actions_template *
8804 flow_hw_create_ctrl_rx_rss_template(struct rte_eth_dev *dev,
8805 				    const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
8806 {
8807 	struct mlx5_priv *priv = dev->data->dev_private;
8808 	struct rte_flow_actions_template_attr attr = {
8809 		.ingress = 1,
8810 	};
8811 	uint16_t queue[RTE_MAX_QUEUES_PER_PORT];
8812 	struct rte_flow_action_rss rss_conf = {
8813 		.func = RTE_ETH_HASH_FUNCTION_DEFAULT,
8814 		.level = 0,
8815 		.types = 0,
8816 		.key_len = priv->rss_conf.rss_key_len,
8817 		.key = priv->rss_conf.rss_key,
8818 		.queue_num = priv->reta_idx_n,
8819 		.queue = queue,
8820 	};
8821 	struct rte_flow_action actions[] = {
8822 		{
8823 			.type = RTE_FLOW_ACTION_TYPE_RSS,
8824 			.conf = &rss_conf,
8825 		},
8826 		{
8827 			.type = RTE_FLOW_ACTION_TYPE_END,
8828 		}
8829 	};
8830 	struct rte_flow_action masks[] = {
8831 		{
8832 			.type = RTE_FLOW_ACTION_TYPE_RSS,
8833 			.conf = &rss_conf,
8834 		},
8835 		{
8836 			.type = RTE_FLOW_ACTION_TYPE_END,
8837 		}
8838 	};
8839 	struct rte_flow_actions_template *at;
8840 	struct rte_flow_error error;
8841 	unsigned int i;
8842 
8843 	MLX5_ASSERT(priv->reta_idx_n > 0 && priv->reta_idx);
8844 	/* Select proper RSS hash types and based on that configure the actions template. */
8845 	rss_conf.types = flow_hw_ctrl_rx_rss_type_hash_types(rss_type);
8846 	if (rss_conf.types) {
8847 		for (i = 0; i < priv->reta_idx_n; ++i)
8848 			queue[i] = (*priv->reta_idx)[i];
8849 	} else {
8850 		rss_conf.queue_num = 1;
8851 		queue[0] = (*priv->reta_idx)[0];
8852 	}
8853 	at = flow_hw_actions_template_create(dev, &attr, actions, masks, &error);
8854 	if (!at)
8855 		DRV_LOG(ERR,
8856 			"Failed to create ctrl flow actions template: rte_errno(%d), type(%d): %s",
8857 			rte_errno, error.type,
8858 			error.message ? error.message : "(no stated reason)");
8859 	return at;
8860 }
8861 
8862 static uint32_t ctrl_rx_rss_priority_map[MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX] = {
8863 	[MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_NON_IP] = MLX5_HW_CTRL_RX_PRIO_L2,
8864 	[MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4] = MLX5_HW_CTRL_RX_PRIO_L3,
8865 	[MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_UDP] = MLX5_HW_CTRL_RX_PRIO_L4,
8866 	[MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_TCP] = MLX5_HW_CTRL_RX_PRIO_L4,
8867 	[MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6] = MLX5_HW_CTRL_RX_PRIO_L3,
8868 	[MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_UDP] = MLX5_HW_CTRL_RX_PRIO_L4,
8869 	[MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_TCP] = MLX5_HW_CTRL_RX_PRIO_L4,
8870 };
8871 
8872 static uint32_t ctrl_rx_nb_flows_map[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_MAX] = {
8873 	[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL] = 1,
8874 	[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL_MCAST] = 1,
8875 	[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST] = 1,
8876 	[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST_VLAN] = MLX5_MAX_VLAN_IDS,
8877 	[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST] = 1,
8878 	[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST_VLAN] = MLX5_MAX_VLAN_IDS,
8879 	[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST] = 1,
8880 	[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST_VLAN] = MLX5_MAX_VLAN_IDS,
8881 	[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC] = MLX5_MAX_UC_MAC_ADDRESSES,
8882 	[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN] =
8883 			MLX5_MAX_UC_MAC_ADDRESSES * MLX5_MAX_VLAN_IDS,
8884 };
8885 
8886 static struct rte_flow_template_table_attr
8887 flow_hw_get_ctrl_rx_table_attr(enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type,
8888 			       const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
8889 {
8890 	return (struct rte_flow_template_table_attr){
8891 		.flow_attr = {
8892 			.group = 0,
8893 			.priority = ctrl_rx_rss_priority_map[rss_type],
8894 			.ingress = 1,
8895 		},
8896 		.nb_flows = ctrl_rx_nb_flows_map[eth_pattern_type],
8897 	};
8898 }
8899 
8900 static struct rte_flow_item
8901 flow_hw_get_ctrl_rx_eth_item(const enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type)
8902 {
8903 	struct rte_flow_item item = {
8904 		.type = RTE_FLOW_ITEM_TYPE_ETH,
8905 		.mask = NULL,
8906 	};
8907 
8908 	switch (eth_pattern_type) {
8909 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL:
8910 		item.mask = &ctrl_rx_eth_promisc_mask;
8911 		break;
8912 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL_MCAST:
8913 		item.mask = &ctrl_rx_eth_mcast_mask;
8914 		break;
8915 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST:
8916 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST_VLAN:
8917 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC:
8918 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN:
8919 		item.mask = &ctrl_rx_eth_dmac_mask;
8920 		break;
8921 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST:
8922 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST_VLAN:
8923 		item.mask = &ctrl_rx_eth_ipv4_mcast_mask;
8924 		break;
8925 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST:
8926 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST_VLAN:
8927 		item.mask = &ctrl_rx_eth_ipv6_mcast_mask;
8928 		break;
8929 	default:
8930 		/* Should not reach here - ETH mask must be present. */
8931 		item.type = RTE_FLOW_ITEM_TYPE_END;
8932 		MLX5_ASSERT(false);
8933 		break;
8934 	}
8935 	return item;
8936 }
8937 
8938 static struct rte_flow_item
8939 flow_hw_get_ctrl_rx_vlan_item(const enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type)
8940 {
8941 	struct rte_flow_item item = {
8942 		.type = RTE_FLOW_ITEM_TYPE_VOID,
8943 		.mask = NULL,
8944 	};
8945 
8946 	switch (eth_pattern_type) {
8947 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST_VLAN:
8948 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST_VLAN:
8949 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST_VLAN:
8950 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN:
8951 		item.type = RTE_FLOW_ITEM_TYPE_VLAN;
8952 		item.mask = &rte_flow_item_vlan_mask;
8953 		break;
8954 	default:
8955 		/* Nothing to update. */
8956 		break;
8957 	}
8958 	return item;
8959 }
8960 
8961 static struct rte_flow_item
8962 flow_hw_get_ctrl_rx_l3_item(const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
8963 {
8964 	struct rte_flow_item item = {
8965 		.type = RTE_FLOW_ITEM_TYPE_VOID,
8966 		.mask = NULL,
8967 	};
8968 
8969 	switch (rss_type) {
8970 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4:
8971 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_UDP:
8972 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_TCP:
8973 		item.type = RTE_FLOW_ITEM_TYPE_IPV4;
8974 		break;
8975 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6:
8976 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_UDP:
8977 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_TCP:
8978 		item.type = RTE_FLOW_ITEM_TYPE_IPV6;
8979 		break;
8980 	default:
8981 		/* Nothing to update. */
8982 		break;
8983 	}
8984 	return item;
8985 }
8986 
8987 static struct rte_flow_item
8988 flow_hw_get_ctrl_rx_l4_item(const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
8989 {
8990 	struct rte_flow_item item = {
8991 		.type = RTE_FLOW_ITEM_TYPE_VOID,
8992 		.mask = NULL,
8993 	};
8994 
8995 	switch (rss_type) {
8996 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_UDP:
8997 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_UDP:
8998 		item.type = RTE_FLOW_ITEM_TYPE_UDP;
8999 		break;
9000 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_TCP:
9001 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_TCP:
9002 		item.type = RTE_FLOW_ITEM_TYPE_TCP;
9003 		break;
9004 	default:
9005 		/* Nothing to update. */
9006 		break;
9007 	}
9008 	return item;
9009 }
9010 
9011 static struct rte_flow_pattern_template *
9012 flow_hw_create_ctrl_rx_pattern_template
9013 		(struct rte_eth_dev *dev,
9014 		 const enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type,
9015 		 const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
9016 {
9017 	const struct rte_flow_pattern_template_attr attr = {
9018 		.relaxed_matching = 0,
9019 		.ingress = 1,
9020 	};
9021 	struct rte_flow_item items[] = {
9022 		/* Matching patterns */
9023 		flow_hw_get_ctrl_rx_eth_item(eth_pattern_type),
9024 		flow_hw_get_ctrl_rx_vlan_item(eth_pattern_type),
9025 		flow_hw_get_ctrl_rx_l3_item(rss_type),
9026 		flow_hw_get_ctrl_rx_l4_item(rss_type),
9027 		/* Terminate pattern */
9028 		{ .type = RTE_FLOW_ITEM_TYPE_END }
9029 	};
9030 
9031 	return flow_hw_pattern_template_create(dev, &attr, items, NULL);
9032 }
9033 
9034 static int
9035 flow_hw_create_ctrl_rx_tables(struct rte_eth_dev *dev)
9036 {
9037 	struct mlx5_priv *priv = dev->data->dev_private;
9038 	unsigned int i;
9039 	unsigned int j;
9040 	int ret;
9041 
9042 	MLX5_ASSERT(!priv->hw_ctrl_rx);
9043 	priv->hw_ctrl_rx = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*priv->hw_ctrl_rx),
9044 				       RTE_CACHE_LINE_SIZE, rte_socket_id());
9045 	if (!priv->hw_ctrl_rx) {
9046 		DRV_LOG(ERR, "Failed to allocate memory for Rx control flow tables");
9047 		rte_errno = ENOMEM;
9048 		return -rte_errno;
9049 	}
9050 	/* Create all pattern template variants. */
9051 	for (i = 0; i < MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_MAX; ++i) {
9052 		enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type = i;
9053 
9054 		for (j = 0; j < MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX; ++j) {
9055 			const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type = j;
9056 			struct rte_flow_template_table_attr attr;
9057 			struct rte_flow_pattern_template *pt;
9058 
9059 			attr = flow_hw_get_ctrl_rx_table_attr(eth_pattern_type, rss_type);
9060 			pt = flow_hw_create_ctrl_rx_pattern_template(dev, eth_pattern_type,
9061 								     rss_type);
9062 			if (!pt)
9063 				goto err;
9064 			priv->hw_ctrl_rx->tables[i][j].attr = attr;
9065 			priv->hw_ctrl_rx->tables[i][j].pt = pt;
9066 		}
9067 	}
9068 	return 0;
9069 err:
9070 	ret = rte_errno;
9071 	flow_hw_cleanup_ctrl_rx_tables(dev);
9072 	rte_errno = ret;
9073 	return -ret;
9074 }
9075 
9076 void
9077 mlx5_flow_hw_cleanup_ctrl_rx_templates(struct rte_eth_dev *dev)
9078 {
9079 	struct mlx5_priv *priv = dev->data->dev_private;
9080 	struct mlx5_flow_hw_ctrl_rx *hw_ctrl_rx;
9081 	unsigned int i;
9082 	unsigned int j;
9083 
9084 	if (!priv->dr_ctx)
9085 		return;
9086 	if (!priv->hw_ctrl_rx)
9087 		return;
9088 	hw_ctrl_rx = priv->hw_ctrl_rx;
9089 	for (i = 0; i < MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_MAX; ++i) {
9090 		for (j = 0; j < MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX; ++j) {
9091 			struct mlx5_flow_hw_ctrl_rx_table *tmpls = &hw_ctrl_rx->tables[i][j];
9092 
9093 			if (tmpls->tbl) {
9094 				claim_zero(flow_hw_table_destroy(dev, tmpls->tbl, NULL));
9095 				tmpls->tbl = NULL;
9096 			}
9097 		}
9098 	}
9099 	for (j = 0; j < MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX; ++j) {
9100 		if (hw_ctrl_rx->rss[j]) {
9101 			claim_zero(flow_hw_actions_template_destroy(dev, hw_ctrl_rx->rss[j], NULL));
9102 			hw_ctrl_rx->rss[j] = NULL;
9103 		}
9104 	}
9105 }
9106 
9107 /**
9108  * Copy the provided HWS configuration to a newly allocated buffer.
9109  *
9110  * @param[in] port_attr
9111  *   Port configuration attributes.
9112  * @param[in] nb_queue
9113  *   Number of queue.
9114  * @param[in] queue_attr
9115  *   Array that holds attributes for each flow queue.
9116  *
9117  * @return
9118  *   Pointer to copied HWS configuration is returned on success.
9119  *   Otherwise, NULL is returned and rte_errno is set.
9120  */
9121 static struct mlx5_flow_hw_attr *
9122 flow_hw_alloc_copy_config(const struct rte_flow_port_attr *port_attr,
9123 			  const uint16_t nb_queue,
9124 			  const struct rte_flow_queue_attr *queue_attr[],
9125 			  struct rte_flow_error *error)
9126 {
9127 	struct mlx5_flow_hw_attr *hw_attr;
9128 	size_t hw_attr_size;
9129 	unsigned int i;
9130 
9131 	hw_attr_size = sizeof(*hw_attr) + nb_queue * sizeof(*hw_attr->queue_attr);
9132 	hw_attr = mlx5_malloc(MLX5_MEM_ZERO, hw_attr_size, 0, SOCKET_ID_ANY);
9133 	if (!hw_attr) {
9134 		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9135 				   "Not enough memory to store configuration");
9136 		return NULL;
9137 	}
9138 	memcpy(&hw_attr->port_attr, port_attr, sizeof(*port_attr));
9139 	hw_attr->nb_queue = nb_queue;
9140 	/* Queue attributes are placed after the mlx5_flow_hw_attr. */
9141 	hw_attr->queue_attr = (struct rte_flow_queue_attr *)(hw_attr + 1);
9142 	for (i = 0; i < nb_queue; ++i)
9143 		memcpy(&hw_attr->queue_attr[i], queue_attr[i], sizeof(hw_attr->queue_attr[i]));
9144 	return hw_attr;
9145 }
9146 
9147 /**
9148  * Compares the preserved HWS configuration with the provided one.
9149  *
9150  * @param[in] hw_attr
9151  *   Pointer to preserved HWS configuration.
9152  * @param[in] new_pa
9153  *   Port configuration attributes to compare.
9154  * @param[in] new_nbq
9155  *   Number of queues to compare.
9156  * @param[in] new_qa
9157  *   Array that holds attributes for each flow queue.
9158  *
9159  * @return
9160  *   True if configurations are the same, false otherwise.
9161  */
9162 static bool
9163 flow_hw_compare_config(const struct mlx5_flow_hw_attr *hw_attr,
9164 		       const struct rte_flow_port_attr *new_pa,
9165 		       const uint16_t new_nbq,
9166 		       const struct rte_flow_queue_attr *new_qa[])
9167 {
9168 	const struct rte_flow_port_attr *old_pa = &hw_attr->port_attr;
9169 	const uint16_t old_nbq = hw_attr->nb_queue;
9170 	const struct rte_flow_queue_attr *old_qa = hw_attr->queue_attr;
9171 	unsigned int i;
9172 
9173 	if (old_pa->nb_counters != new_pa->nb_counters ||
9174 	    old_pa->nb_aging_objects != new_pa->nb_aging_objects ||
9175 	    old_pa->nb_meters != new_pa->nb_meters ||
9176 	    old_pa->nb_conn_tracks != new_pa->nb_conn_tracks ||
9177 	    old_pa->flags != new_pa->flags)
9178 		return false;
9179 	if (old_nbq != new_nbq)
9180 		return false;
9181 	for (i = 0; i < old_nbq; ++i)
9182 		if (old_qa[i].size != new_qa[i]->size)
9183 			return false;
9184 	return true;
9185 }
9186 
9187 /**
9188  * Configure port HWS resources.
9189  *
9190  * @param[in] dev
9191  *   Pointer to the rte_eth_dev structure.
9192  * @param[in] port_attr
9193  *   Port configuration attributes.
9194  * @param[in] nb_queue
9195  *   Number of queue.
9196  * @param[in] queue_attr
9197  *   Array that holds attributes for each flow queue.
9198  * @param[out] error
9199  *   Pointer to error structure.
9200  *
9201  * @return
9202  *   0 on success, a negative errno value otherwise and rte_errno is set.
9203  */
9204 static int
9205 flow_hw_configure(struct rte_eth_dev *dev,
9206 		  const struct rte_flow_port_attr *port_attr,
9207 		  uint16_t nb_queue,
9208 		  const struct rte_flow_queue_attr *queue_attr[],
9209 		  struct rte_flow_error *error)
9210 {
9211 	struct mlx5_priv *priv = dev->data->dev_private;
9212 	struct mlx5_priv *host_priv = NULL;
9213 	struct mlx5dr_context *dr_ctx = NULL;
9214 	struct mlx5dr_context_attr dr_ctx_attr = {0};
9215 	struct mlx5_hw_q *hw_q;
9216 	struct mlx5_hw_q_job *job = NULL;
9217 	uint32_t mem_size, i, j;
9218 	struct mlx5_indexed_pool_config cfg = {
9219 		.size = sizeof(struct mlx5_action_construct_data),
9220 		.trunk_size = 4096,
9221 		.need_lock = 1,
9222 		.release_mem_en = !!priv->sh->config.reclaim_mode,
9223 		.malloc = mlx5_malloc,
9224 		.free = mlx5_free,
9225 		.type = "mlx5_hw_action_construct_data",
9226 	};
9227 	/*
9228 	 * Adds one queue to be used by PMD.
9229 	 * The last queue will be used by the PMD.
9230 	 */
9231 	uint16_t nb_q_updated = 0;
9232 	struct rte_flow_queue_attr **_queue_attr = NULL;
9233 	struct rte_flow_queue_attr ctrl_queue_attr = {0};
9234 	bool is_proxy = !!(priv->sh->config.dv_esw_en && priv->master);
9235 	int ret = 0;
9236 	uint32_t action_flags;
9237 
9238 	if (!port_attr || !nb_queue || !queue_attr) {
9239 		rte_errno = EINVAL;
9240 		goto err;
9241 	}
9242 	/*
9243 	 * Calling rte_flow_configure() again is allowed if and only if
9244 	 * provided configuration matches the initially provided one.
9245 	 */
9246 	if (priv->dr_ctx) {
9247 		MLX5_ASSERT(priv->hw_attr != NULL);
9248 		for (i = 0; i < priv->nb_queue; i++) {
9249 			hw_q = &priv->hw_q[i];
9250 			/* Make sure all queues are empty. */
9251 			if (hw_q->size != hw_q->job_idx) {
9252 				rte_errno = EBUSY;
9253 				goto err;
9254 			}
9255 		}
9256 		if (flow_hw_compare_config(priv->hw_attr, port_attr, nb_queue, queue_attr))
9257 			return 0;
9258 		else
9259 			return rte_flow_error_set(error, ENOTSUP,
9260 						  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9261 						  "Changing HWS configuration attributes "
9262 						  "is not supported");
9263 	}
9264 	priv->hw_attr = flow_hw_alloc_copy_config(port_attr, nb_queue, queue_attr, error);
9265 	if (!priv->hw_attr) {
9266 		ret = -rte_errno;
9267 		goto err;
9268 	}
9269 	ctrl_queue_attr.size = queue_attr[0]->size;
9270 	nb_q_updated = nb_queue + 1;
9271 	_queue_attr = mlx5_malloc(MLX5_MEM_ZERO,
9272 				  nb_q_updated *
9273 				  sizeof(struct rte_flow_queue_attr *),
9274 				  64, SOCKET_ID_ANY);
9275 	if (!_queue_attr) {
9276 		rte_errno = ENOMEM;
9277 		goto err;
9278 	}
9279 
9280 	memcpy(_queue_attr, queue_attr, sizeof(void *) * nb_queue);
9281 	_queue_attr[nb_queue] = &ctrl_queue_attr;
9282 	priv->acts_ipool = mlx5_ipool_create(&cfg);
9283 	if (!priv->acts_ipool)
9284 		goto err;
9285 	/* Allocate the queue job descriptor LIFO. */
9286 	mem_size = sizeof(priv->hw_q[0]) * nb_q_updated;
9287 	for (i = 0; i < nb_q_updated; i++) {
9288 		/*
9289 		 * Check if the queues' size are all the same as the
9290 		 * limitation from HWS layer.
9291 		 */
9292 		if (_queue_attr[i]->size != _queue_attr[0]->size) {
9293 			rte_errno = EINVAL;
9294 			goto err;
9295 		}
9296 		mem_size += (sizeof(struct mlx5_hw_q_job *) +
9297 			    sizeof(struct mlx5_hw_q_job) +
9298 			    sizeof(uint8_t) * MLX5_ENCAP_MAX_LEN +
9299 			    sizeof(uint8_t) * MLX5_PUSH_MAX_LEN +
9300 			    sizeof(struct mlx5_modification_cmd) *
9301 			    MLX5_MHDR_MAX_CMD +
9302 			    sizeof(struct rte_flow_item) *
9303 			    MLX5_HW_MAX_ITEMS +
9304 				sizeof(struct rte_flow_hw)) *
9305 			    _queue_attr[i]->size;
9306 	}
9307 	priv->hw_q = mlx5_malloc(MLX5_MEM_ZERO, mem_size,
9308 				 64, SOCKET_ID_ANY);
9309 	if (!priv->hw_q) {
9310 		rte_errno = ENOMEM;
9311 		goto err;
9312 	}
9313 	for (i = 0; i < nb_q_updated; i++) {
9314 		char mz_name[RTE_MEMZONE_NAMESIZE];
9315 		uint8_t *encap = NULL, *push = NULL;
9316 		struct mlx5_modification_cmd *mhdr_cmd = NULL;
9317 		struct rte_flow_item *items = NULL;
9318 		struct rte_flow_hw *upd_flow = NULL;
9319 
9320 		priv->hw_q[i].job_idx = _queue_attr[i]->size;
9321 		priv->hw_q[i].size = _queue_attr[i]->size;
9322 		if (i == 0)
9323 			priv->hw_q[i].job = (struct mlx5_hw_q_job **)
9324 					    &priv->hw_q[nb_q_updated];
9325 		else
9326 			priv->hw_q[i].job = (struct mlx5_hw_q_job **)
9327 				&job[_queue_attr[i - 1]->size - 1].upd_flow[1];
9328 		job = (struct mlx5_hw_q_job *)
9329 		      &priv->hw_q[i].job[_queue_attr[i]->size];
9330 		mhdr_cmd = (struct mlx5_modification_cmd *)
9331 			   &job[_queue_attr[i]->size];
9332 		encap = (uint8_t *)
9333 			 &mhdr_cmd[_queue_attr[i]->size * MLX5_MHDR_MAX_CMD];
9334 		push = (uint8_t *)
9335 			 &encap[_queue_attr[i]->size * MLX5_ENCAP_MAX_LEN];
9336 		items = (struct rte_flow_item *)
9337 			 &push[_queue_attr[i]->size * MLX5_PUSH_MAX_LEN];
9338 		upd_flow = (struct rte_flow_hw *)
9339 			&items[_queue_attr[i]->size * MLX5_HW_MAX_ITEMS];
9340 		for (j = 0; j < _queue_attr[i]->size; j++) {
9341 			job[j].mhdr_cmd = &mhdr_cmd[j * MLX5_MHDR_MAX_CMD];
9342 			job[j].encap_data = &encap[j * MLX5_ENCAP_MAX_LEN];
9343 			job[j].push_data = &push[j * MLX5_PUSH_MAX_LEN];
9344 			job[j].items = &items[j * MLX5_HW_MAX_ITEMS];
9345 			job[j].upd_flow = &upd_flow[j];
9346 			priv->hw_q[i].job[j] = &job[j];
9347 		}
9348 		snprintf(mz_name, sizeof(mz_name), "port_%u_indir_act_cq_%u",
9349 			 dev->data->port_id, i);
9350 		priv->hw_q[i].indir_cq = rte_ring_create(mz_name,
9351 				_queue_attr[i]->size, SOCKET_ID_ANY,
9352 				RING_F_SP_ENQ | RING_F_SC_DEQ |
9353 				RING_F_EXACT_SZ);
9354 		if (!priv->hw_q[i].indir_cq)
9355 			goto err;
9356 		snprintf(mz_name, sizeof(mz_name), "port_%u_indir_act_iq_%u",
9357 			 dev->data->port_id, i);
9358 		priv->hw_q[i].indir_iq = rte_ring_create(mz_name,
9359 				_queue_attr[i]->size, SOCKET_ID_ANY,
9360 				RING_F_SP_ENQ | RING_F_SC_DEQ |
9361 				RING_F_EXACT_SZ);
9362 		if (!priv->hw_q[i].indir_iq)
9363 			goto err;
9364 	}
9365 	dr_ctx_attr.pd = priv->sh->cdev->pd;
9366 	dr_ctx_attr.queues = nb_q_updated;
9367 	/* Queue size should all be the same. Take the first one. */
9368 	dr_ctx_attr.queue_size = _queue_attr[0]->size;
9369 	if (port_attr->flags & RTE_FLOW_PORT_FLAG_SHARE_INDIRECT) {
9370 		struct rte_eth_dev *host_dev = NULL;
9371 		uint16_t port_id;
9372 
9373 		MLX5_ASSERT(rte_eth_dev_is_valid_port(port_attr->host_port_id));
9374 		if (is_proxy) {
9375 			DRV_LOG(ERR, "cross vHCA shared mode not supported "
9376 				"for E-Switch confgiurations");
9377 			rte_errno = ENOTSUP;
9378 			goto err;
9379 		}
9380 		MLX5_ETH_FOREACH_DEV(port_id, dev->device) {
9381 			if (port_id == port_attr->host_port_id) {
9382 				host_dev = &rte_eth_devices[port_id];
9383 				break;
9384 			}
9385 		}
9386 		if (!host_dev || host_dev == dev ||
9387 		    !host_dev->data || !host_dev->data->dev_private) {
9388 			DRV_LOG(ERR, "Invalid cross vHCA host port %u",
9389 				port_attr->host_port_id);
9390 			rte_errno = EINVAL;
9391 			goto err;
9392 		}
9393 		host_priv = host_dev->data->dev_private;
9394 		if (host_priv->sh->cdev->ctx == priv->sh->cdev->ctx) {
9395 			DRV_LOG(ERR, "Sibling ports %u and %u do not "
9396 				     "require cross vHCA sharing mode",
9397 				dev->data->port_id, port_attr->host_port_id);
9398 			rte_errno = EINVAL;
9399 			goto err;
9400 		}
9401 		if (host_priv->shared_host) {
9402 			DRV_LOG(ERR, "Host port %u is not the sharing base",
9403 				port_attr->host_port_id);
9404 			rte_errno = EINVAL;
9405 			goto err;
9406 		}
9407 		if (port_attr->nb_counters ||
9408 		    port_attr->nb_aging_objects ||
9409 		    port_attr->nb_meters ||
9410 		    port_attr->nb_conn_tracks) {
9411 			DRV_LOG(ERR,
9412 				"Object numbers on guest port must be zeros");
9413 			rte_errno = EINVAL;
9414 			goto err;
9415 		}
9416 		dr_ctx_attr.shared_ibv_ctx = host_priv->sh->cdev->ctx;
9417 		priv->shared_host = host_dev;
9418 		__atomic_fetch_add(&host_priv->shared_refcnt, 1, __ATOMIC_RELAXED);
9419 	}
9420 	dr_ctx = mlx5dr_context_open(priv->sh->cdev->ctx, &dr_ctx_attr);
9421 	/* rte_errno has been updated by HWS layer. */
9422 	if (!dr_ctx)
9423 		goto err;
9424 	priv->dr_ctx = dr_ctx;
9425 	priv->nb_queue = nb_q_updated;
9426 	rte_spinlock_init(&priv->hw_ctrl_lock);
9427 	LIST_INIT(&priv->hw_ctrl_flows);
9428 	LIST_INIT(&priv->hw_ext_ctrl_flows);
9429 	ret = flow_hw_create_ctrl_rx_tables(dev);
9430 	if (ret) {
9431 		rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9432 				   "Failed to set up Rx control flow templates");
9433 		goto err;
9434 	}
9435 	/* Initialize quotas */
9436 	if (port_attr->nb_quotas || (host_priv && host_priv->quota_ctx.devx_obj)) {
9437 		ret = mlx5_flow_quota_init(dev, port_attr->nb_quotas);
9438 		if (ret) {
9439 			rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9440 					   "Failed to initialize quota.");
9441 			goto err;
9442 		}
9443 	}
9444 	/* Initialize meter library*/
9445 	if (port_attr->nb_meters || (host_priv && host_priv->hws_mpool))
9446 		if (mlx5_flow_meter_init(dev, port_attr->nb_meters, 0, 0, nb_q_updated))
9447 			goto err;
9448 	/* Add global actions. */
9449 	for (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) {
9450 		uint32_t act_flags = 0;
9451 
9452 		act_flags = mlx5_hw_act_flag[i][0] | mlx5_hw_act_flag[i][1];
9453 		if (is_proxy)
9454 			act_flags |= mlx5_hw_act_flag[i][2];
9455 		priv->hw_drop[i] = mlx5dr_action_create_dest_drop(priv->dr_ctx, act_flags);
9456 		if (!priv->hw_drop[i])
9457 			goto err;
9458 		priv->hw_tag[i] = mlx5dr_action_create_tag
9459 			(priv->dr_ctx, mlx5_hw_act_flag[i][0]);
9460 		if (!priv->hw_tag[i])
9461 			goto err;
9462 	}
9463 	if (priv->sh->config.dv_esw_en && priv->sh->config.repr_matching) {
9464 		ret = flow_hw_setup_tx_repr_tagging(dev, error);
9465 		if (ret)
9466 			goto err;
9467 	}
9468 	/*
9469 	 * DEFAULT_MISS action have different behaviors in different domains.
9470 	 * In FDB, it will steering the packets to the E-switch manager.
9471 	 * In NIC Rx root, it will steering the packet to the kernel driver stack.
9472 	 * An action with all bits set in the flag can be created and the HWS
9473 	 * layer will translate it properly when being used in different rules.
9474 	 */
9475 	action_flags = MLX5DR_ACTION_FLAG_ROOT_RX | MLX5DR_ACTION_FLAG_HWS_RX |
9476 		       MLX5DR_ACTION_FLAG_ROOT_TX | MLX5DR_ACTION_FLAG_HWS_TX;
9477 	if (is_proxy)
9478 		action_flags |= (MLX5DR_ACTION_FLAG_ROOT_FDB | MLX5DR_ACTION_FLAG_HWS_FDB);
9479 	priv->hw_def_miss = mlx5dr_action_create_default_miss(priv->dr_ctx, action_flags);
9480 	if (!priv->hw_def_miss)
9481 		goto err;
9482 	if (is_proxy) {
9483 		ret = flow_hw_create_vport_actions(priv);
9484 		if (ret) {
9485 			rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9486 					   NULL, "Failed to create vport actions.");
9487 			goto err;
9488 		}
9489 		ret = flow_hw_create_ctrl_tables(dev, error);
9490 		if (ret)
9491 			goto err;
9492 	}
9493 	if (!priv->shared_host)
9494 		flow_hw_create_send_to_kernel_actions(priv);
9495 	if (port_attr->nb_conn_tracks || (host_priv && host_priv->hws_ctpool)) {
9496 		mem_size = sizeof(struct mlx5_aso_sq) * nb_q_updated +
9497 			   sizeof(*priv->ct_mng);
9498 		priv->ct_mng = mlx5_malloc(MLX5_MEM_ZERO, mem_size,
9499 					   RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
9500 		if (!priv->ct_mng)
9501 			goto err;
9502 		if (mlx5_aso_ct_queue_init(priv->sh, priv->ct_mng, nb_q_updated))
9503 			goto err;
9504 		priv->hws_ctpool = flow_hw_ct_pool_create(dev, port_attr);
9505 		if (!priv->hws_ctpool)
9506 			goto err;
9507 		priv->sh->ct_aso_en = 1;
9508 	}
9509 	if (port_attr->nb_counters || (host_priv && host_priv->hws_cpool)) {
9510 		priv->hws_cpool = mlx5_hws_cnt_pool_create(dev, port_attr,
9511 							   nb_queue);
9512 		if (priv->hws_cpool == NULL)
9513 			goto err;
9514 	}
9515 	if (port_attr->nb_aging_objects) {
9516 		if (port_attr->nb_counters == 0) {
9517 			/*
9518 			 * Aging management uses counter. Number counters
9519 			 * requesting should take into account a counter for
9520 			 * each flow rules containing AGE without counter.
9521 			 */
9522 			DRV_LOG(ERR, "Port %u AGE objects are requested (%u) "
9523 				"without counters requesting.",
9524 				dev->data->port_id,
9525 				port_attr->nb_aging_objects);
9526 			rte_errno = EINVAL;
9527 			goto err;
9528 		}
9529 		ret = mlx5_hws_age_pool_init(dev, port_attr, nb_queue);
9530 		if (ret < 0) {
9531 			rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9532 					   NULL, "Failed to init age pool.");
9533 			goto err;
9534 		}
9535 	}
9536 	ret = flow_hw_create_vlan(dev);
9537 	if (ret) {
9538 		rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9539 				   NULL, "Failed to VLAN actions.");
9540 		goto err;
9541 	}
9542 	if (_queue_attr)
9543 		mlx5_free(_queue_attr);
9544 	if (port_attr->flags & RTE_FLOW_PORT_FLAG_STRICT_QUEUE)
9545 		priv->hws_strict_queue = 1;
9546 	return 0;
9547 err:
9548 	if (priv->hws_ctpool) {
9549 		flow_hw_ct_pool_destroy(dev, priv->hws_ctpool);
9550 		priv->hws_ctpool = NULL;
9551 	}
9552 	if (priv->ct_mng) {
9553 		flow_hw_ct_mng_destroy(dev, priv->ct_mng);
9554 		priv->ct_mng = NULL;
9555 	}
9556 	if (priv->hws_age_req)
9557 		mlx5_hws_age_pool_destroy(priv);
9558 	if (priv->hws_cpool) {
9559 		mlx5_hws_cnt_pool_destroy(priv->sh, priv->hws_cpool);
9560 		priv->hws_cpool = NULL;
9561 	}
9562 	mlx5_flow_quota_destroy(dev);
9563 	flow_hw_destroy_send_to_kernel_action(priv);
9564 	flow_hw_free_vport_actions(priv);
9565 	for (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) {
9566 		if (priv->hw_drop[i])
9567 			mlx5dr_action_destroy(priv->hw_drop[i]);
9568 		if (priv->hw_tag[i])
9569 			mlx5dr_action_destroy(priv->hw_tag[i]);
9570 	}
9571 	if (priv->hw_def_miss)
9572 		mlx5dr_action_destroy(priv->hw_def_miss);
9573 	flow_hw_destroy_vlan(dev);
9574 	if (dr_ctx)
9575 		claim_zero(mlx5dr_context_close(dr_ctx));
9576 	for (i = 0; i < nb_q_updated; i++) {
9577 		rte_ring_free(priv->hw_q[i].indir_iq);
9578 		rte_ring_free(priv->hw_q[i].indir_cq);
9579 	}
9580 	mlx5_free(priv->hw_q);
9581 	priv->hw_q = NULL;
9582 	if (priv->acts_ipool) {
9583 		mlx5_ipool_destroy(priv->acts_ipool);
9584 		priv->acts_ipool = NULL;
9585 	}
9586 	if (_queue_attr)
9587 		mlx5_free(_queue_attr);
9588 	if (priv->shared_host) {
9589 		__atomic_fetch_sub(&host_priv->shared_refcnt, 1, __ATOMIC_RELAXED);
9590 		priv->shared_host = NULL;
9591 	}
9592 	mlx5_free(priv->hw_attr);
9593 	priv->hw_attr = NULL;
9594 	/* Do not overwrite the internal errno information. */
9595 	if (ret)
9596 		return ret;
9597 	return rte_flow_error_set(error, rte_errno,
9598 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9599 				  "fail to configure port");
9600 }
9601 
9602 /**
9603  * Release HWS resources.
9604  *
9605  * @param[in] dev
9606  *   Pointer to the rte_eth_dev structure.
9607  */
9608 void
9609 flow_hw_resource_release(struct rte_eth_dev *dev)
9610 {
9611 	struct mlx5_priv *priv = dev->data->dev_private;
9612 	struct rte_flow_template_table *tbl;
9613 	struct rte_flow_pattern_template *it;
9614 	struct rte_flow_actions_template *at;
9615 	struct mlx5_flow_group *grp;
9616 	uint32_t i;
9617 
9618 	if (!priv->dr_ctx)
9619 		return;
9620 	flow_hw_rxq_flag_set(dev, false);
9621 	flow_hw_flush_all_ctrl_flows(dev);
9622 	flow_hw_cleanup_tx_repr_tagging(dev);
9623 	flow_hw_cleanup_ctrl_rx_tables(dev);
9624 	while (!LIST_EMPTY(&priv->flow_hw_grp)) {
9625 		grp = LIST_FIRST(&priv->flow_hw_grp);
9626 		flow_hw_group_unset_miss_group(dev, grp, NULL);
9627 	}
9628 	while (!LIST_EMPTY(&priv->flow_hw_tbl_ongo)) {
9629 		tbl = LIST_FIRST(&priv->flow_hw_tbl_ongo);
9630 		flow_hw_table_destroy(dev, tbl, NULL);
9631 	}
9632 	while (!LIST_EMPTY(&priv->flow_hw_tbl)) {
9633 		tbl = LIST_FIRST(&priv->flow_hw_tbl);
9634 		flow_hw_table_destroy(dev, tbl, NULL);
9635 	}
9636 	while (!LIST_EMPTY(&priv->flow_hw_itt)) {
9637 		it = LIST_FIRST(&priv->flow_hw_itt);
9638 		flow_hw_pattern_template_destroy(dev, it, NULL);
9639 	}
9640 	while (!LIST_EMPTY(&priv->flow_hw_at)) {
9641 		at = LIST_FIRST(&priv->flow_hw_at);
9642 		flow_hw_actions_template_destroy(dev, at, NULL);
9643 	}
9644 	for (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) {
9645 		if (priv->hw_drop[i])
9646 			mlx5dr_action_destroy(priv->hw_drop[i]);
9647 		if (priv->hw_tag[i])
9648 			mlx5dr_action_destroy(priv->hw_tag[i]);
9649 	}
9650 	if (priv->hw_def_miss)
9651 		mlx5dr_action_destroy(priv->hw_def_miss);
9652 	flow_hw_destroy_vlan(dev);
9653 	flow_hw_destroy_send_to_kernel_action(priv);
9654 	flow_hw_free_vport_actions(priv);
9655 	if (priv->acts_ipool) {
9656 		mlx5_ipool_destroy(priv->acts_ipool);
9657 		priv->acts_ipool = NULL;
9658 	}
9659 	if (priv->hws_age_req)
9660 		mlx5_hws_age_pool_destroy(priv);
9661 	if (priv->hws_cpool) {
9662 		mlx5_hws_cnt_pool_destroy(priv->sh, priv->hws_cpool);
9663 		priv->hws_cpool = NULL;
9664 	}
9665 	if (priv->hws_ctpool) {
9666 		flow_hw_ct_pool_destroy(dev, priv->hws_ctpool);
9667 		priv->hws_ctpool = NULL;
9668 	}
9669 	if (priv->ct_mng) {
9670 		flow_hw_ct_mng_destroy(dev, priv->ct_mng);
9671 		priv->ct_mng = NULL;
9672 	}
9673 	mlx5_flow_quota_destroy(dev);
9674 	for (i = 0; i < priv->nb_queue; i++) {
9675 		rte_ring_free(priv->hw_q[i].indir_iq);
9676 		rte_ring_free(priv->hw_q[i].indir_cq);
9677 	}
9678 	mlx5_free(priv->hw_q);
9679 	priv->hw_q = NULL;
9680 	claim_zero(mlx5dr_context_close(priv->dr_ctx));
9681 	if (priv->shared_host) {
9682 		struct mlx5_priv *host_priv = priv->shared_host->data->dev_private;
9683 		__atomic_fetch_sub(&host_priv->shared_refcnt, 1, __ATOMIC_RELAXED);
9684 		priv->shared_host = NULL;
9685 	}
9686 	priv->dr_ctx = NULL;
9687 	mlx5_free(priv->hw_attr);
9688 	priv->hw_attr = NULL;
9689 	priv->nb_queue = 0;
9690 }
9691 
9692 /* Sets vport tag and mask, for given port, used in HWS rules. */
9693 void
9694 flow_hw_set_port_info(struct rte_eth_dev *dev)
9695 {
9696 	struct mlx5_priv *priv = dev->data->dev_private;
9697 	uint16_t port_id = dev->data->port_id;
9698 	struct flow_hw_port_info *info;
9699 
9700 	MLX5_ASSERT(port_id < RTE_MAX_ETHPORTS);
9701 	info = &mlx5_flow_hw_port_infos[port_id];
9702 	info->regc_mask = priv->vport_meta_mask;
9703 	info->regc_value = priv->vport_meta_tag;
9704 	info->is_wire = mlx5_is_port_on_mpesw_device(priv) ? priv->mpesw_uplink : priv->master;
9705 }
9706 
9707 /* Clears vport tag and mask used for HWS rules. */
9708 void
9709 flow_hw_clear_port_info(struct rte_eth_dev *dev)
9710 {
9711 	uint16_t port_id = dev->data->port_id;
9712 	struct flow_hw_port_info *info;
9713 
9714 	MLX5_ASSERT(port_id < RTE_MAX_ETHPORTS);
9715 	info = &mlx5_flow_hw_port_infos[port_id];
9716 	info->regc_mask = 0;
9717 	info->regc_value = 0;
9718 	info->is_wire = 0;
9719 }
9720 
9721 static int
9722 flow_hw_conntrack_destroy(struct rte_eth_dev *dev __rte_unused,
9723 			  uint32_t idx,
9724 			  struct rte_flow_error *error)
9725 {
9726 	uint16_t owner = (uint16_t)MLX5_ACTION_CTX_CT_GET_OWNER(idx);
9727 	uint32_t ct_idx = MLX5_ACTION_CTX_CT_GET_IDX(idx);
9728 	struct rte_eth_dev *owndev = &rte_eth_devices[owner];
9729 	struct mlx5_priv *priv = owndev->data->dev_private;
9730 	struct mlx5_aso_ct_pool *pool = priv->hws_ctpool;
9731 	struct mlx5_aso_ct_action *ct;
9732 
9733 	ct = mlx5_ipool_get(pool->cts, ct_idx);
9734 	if (!ct) {
9735 		return rte_flow_error_set(error, EINVAL,
9736 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9737 				NULL,
9738 				"Invalid CT destruction index");
9739 	}
9740 	__atomic_store_n(&ct->state, ASO_CONNTRACK_FREE,
9741 				 __ATOMIC_RELAXED);
9742 	mlx5_ipool_free(pool->cts, ct_idx);
9743 	return 0;
9744 }
9745 
9746 static int
9747 flow_hw_conntrack_query(struct rte_eth_dev *dev, uint32_t queue, uint32_t idx,
9748 			struct rte_flow_action_conntrack *profile,
9749 			void *user_data, bool push,
9750 			struct rte_flow_error *error)
9751 {
9752 	struct mlx5_priv *priv = dev->data->dev_private;
9753 	struct mlx5_aso_ct_pool *pool = priv->hws_ctpool;
9754 	struct mlx5_aso_ct_action *ct;
9755 	uint16_t owner = (uint16_t)MLX5_ACTION_CTX_CT_GET_OWNER(idx);
9756 	uint32_t ct_idx;
9757 
9758 	if (owner != PORT_ID(priv))
9759 		return rte_flow_error_set(error, EACCES,
9760 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9761 				NULL,
9762 				"Can't query CT object owned by another port");
9763 	ct_idx = MLX5_ACTION_CTX_CT_GET_IDX(idx);
9764 	ct = mlx5_ipool_get(pool->cts, ct_idx);
9765 	if (!ct) {
9766 		return rte_flow_error_set(error, EINVAL,
9767 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9768 				NULL,
9769 				"Invalid CT query index");
9770 	}
9771 	profile->peer_port = ct->peer;
9772 	profile->is_original_dir = ct->is_original;
9773 	if (mlx5_aso_ct_query_by_wqe(priv->sh, queue, ct, profile, user_data, push))
9774 		return rte_flow_error_set(error, EIO,
9775 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9776 				NULL,
9777 				"Failed to query CT context");
9778 	return 0;
9779 }
9780 
9781 
9782 static int
9783 flow_hw_conntrack_update(struct rte_eth_dev *dev, uint32_t queue,
9784 			 const struct rte_flow_modify_conntrack *action_conf,
9785 			 uint32_t idx, void *user_data, bool push,
9786 			 struct rte_flow_error *error)
9787 {
9788 	struct mlx5_priv *priv = dev->data->dev_private;
9789 	struct mlx5_aso_ct_pool *pool = priv->hws_ctpool;
9790 	struct mlx5_aso_ct_action *ct;
9791 	const struct rte_flow_action_conntrack *new_prf;
9792 	uint16_t owner = (uint16_t)MLX5_ACTION_CTX_CT_GET_OWNER(idx);
9793 	uint32_t ct_idx;
9794 	int ret = 0;
9795 
9796 	if (PORT_ID(priv) != owner)
9797 		return rte_flow_error_set(error, EACCES,
9798 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9799 					  NULL,
9800 					  "Can't update CT object owned by another port");
9801 	ct_idx = MLX5_ACTION_CTX_CT_GET_IDX(idx);
9802 	ct = mlx5_ipool_get(pool->cts, ct_idx);
9803 	if (!ct) {
9804 		return rte_flow_error_set(error, EINVAL,
9805 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9806 				NULL,
9807 				"Invalid CT update index");
9808 	}
9809 	new_prf = &action_conf->new_ct;
9810 	if (action_conf->direction)
9811 		ct->is_original = !!new_prf->is_original_dir;
9812 	if (action_conf->state) {
9813 		/* Only validate the profile when it needs to be updated. */
9814 		ret = mlx5_validate_action_ct(dev, new_prf, error);
9815 		if (ret)
9816 			return ret;
9817 		ret = mlx5_aso_ct_update_by_wqe(priv->sh, queue, ct, new_prf,
9818 						user_data, push);
9819 		if (ret)
9820 			return rte_flow_error_set(error, EIO,
9821 					RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9822 					NULL,
9823 					"Failed to send CT context update WQE");
9824 		if (queue != MLX5_HW_INV_QUEUE)
9825 			return 0;
9826 		/* Block until ready or a failure in synchronous mode. */
9827 		ret = mlx5_aso_ct_available(priv->sh, queue, ct);
9828 		if (ret)
9829 			rte_flow_error_set(error, rte_errno,
9830 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9831 					   NULL,
9832 					   "Timeout to get the CT update");
9833 	}
9834 	return ret;
9835 }
9836 
9837 static struct rte_flow_action_handle *
9838 flow_hw_conntrack_create(struct rte_eth_dev *dev, uint32_t queue,
9839 			 const struct rte_flow_action_conntrack *pro,
9840 			 void *user_data, bool push,
9841 			 struct rte_flow_error *error)
9842 {
9843 	struct mlx5_priv *priv = dev->data->dev_private;
9844 	struct mlx5_aso_ct_pool *pool = priv->hws_ctpool;
9845 	struct mlx5_aso_ct_action *ct;
9846 	uint32_t ct_idx = 0;
9847 	int ret;
9848 	bool async = !!(queue != MLX5_HW_INV_QUEUE);
9849 
9850 	if (!pool) {
9851 		rte_flow_error_set(error, EINVAL,
9852 				   RTE_FLOW_ERROR_TYPE_ACTION, NULL,
9853 				   "CT is not enabled");
9854 		return 0;
9855 	}
9856 	ct = mlx5_ipool_zmalloc(pool->cts, &ct_idx);
9857 	if (!ct) {
9858 		rte_flow_error_set(error, rte_errno,
9859 				   RTE_FLOW_ERROR_TYPE_ACTION, NULL,
9860 				   "Failed to allocate CT object");
9861 		return 0;
9862 	}
9863 	ct->offset = ct_idx - 1;
9864 	ct->is_original = !!pro->is_original_dir;
9865 	ct->peer = pro->peer_port;
9866 	ct->pool = pool;
9867 	if (mlx5_aso_ct_update_by_wqe(priv->sh, queue, ct, pro, user_data, push)) {
9868 		mlx5_ipool_free(pool->cts, ct_idx);
9869 		rte_flow_error_set(error, EBUSY,
9870 				   RTE_FLOW_ERROR_TYPE_ACTION, NULL,
9871 				   "Failed to update CT");
9872 		return 0;
9873 	}
9874 	if (!async) {
9875 		ret = mlx5_aso_ct_available(priv->sh, queue, ct);
9876 		if (ret) {
9877 			mlx5_ipool_free(pool->cts, ct_idx);
9878 			rte_flow_error_set(error, rte_errno,
9879 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9880 					   NULL,
9881 					   "Timeout to get the CT update");
9882 			return 0;
9883 		}
9884 	}
9885 	return (struct rte_flow_action_handle *)(uintptr_t)
9886 		MLX5_ACTION_CTX_CT_GEN_IDX(PORT_ID(priv), ct_idx);
9887 }
9888 
9889 /**
9890  * Validate shared action.
9891  *
9892  * @param[in] dev
9893  *   Pointer to the rte_eth_dev structure.
9894  * @param[in] queue
9895  *   Which queue to be used.
9896  * @param[in] attr
9897  *   Operation attribute.
9898  * @param[in] conf
9899  *   Indirect action configuration.
9900  * @param[in] action
9901  *   rte_flow action detail.
9902  * @param[in] user_data
9903  *   Pointer to the user_data.
9904  * @param[out] error
9905  *   Pointer to error structure.
9906  *
9907  * @return
9908  *   0 on success, otherwise negative errno value.
9909  */
9910 static int
9911 flow_hw_action_handle_validate(struct rte_eth_dev *dev, uint32_t queue,
9912 			       const struct rte_flow_op_attr *attr,
9913 			       const struct rte_flow_indir_action_conf *conf,
9914 			       const struct rte_flow_action *action,
9915 			       void *user_data,
9916 			       struct rte_flow_error *error)
9917 {
9918 	struct mlx5_priv *priv = dev->data->dev_private;
9919 
9920 	RTE_SET_USED(attr);
9921 	RTE_SET_USED(queue);
9922 	RTE_SET_USED(user_data);
9923 	switch (action->type) {
9924 	case RTE_FLOW_ACTION_TYPE_AGE:
9925 		if (!priv->hws_age_req)
9926 			return rte_flow_error_set(error, EINVAL,
9927 						  RTE_FLOW_ERROR_TYPE_ACTION,
9928 						  NULL,
9929 						  "aging pool not initialized");
9930 		break;
9931 	case RTE_FLOW_ACTION_TYPE_COUNT:
9932 		if (!priv->hws_cpool)
9933 			return rte_flow_error_set(error, EINVAL,
9934 						  RTE_FLOW_ERROR_TYPE_ACTION,
9935 						  NULL,
9936 						  "counters pool not initialized");
9937 		break;
9938 	case RTE_FLOW_ACTION_TYPE_CONNTRACK:
9939 		if (priv->hws_ctpool == NULL)
9940 			return rte_flow_error_set(error, EINVAL,
9941 						  RTE_FLOW_ERROR_TYPE_ACTION,
9942 						  NULL,
9943 						  "CT pool not initialized");
9944 		return mlx5_validate_action_ct(dev, action->conf, error);
9945 	case RTE_FLOW_ACTION_TYPE_METER_MARK:
9946 		return flow_hw_validate_action_meter_mark(dev, action, error);
9947 	case RTE_FLOW_ACTION_TYPE_RSS:
9948 		return flow_dv_action_validate(dev, conf, action, error);
9949 	case RTE_FLOW_ACTION_TYPE_QUOTA:
9950 		return 0;
9951 	default:
9952 		return rte_flow_error_set(error, ENOTSUP,
9953 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
9954 					  "action type not supported");
9955 	}
9956 	return 0;
9957 }
9958 
9959 static __rte_always_inline bool
9960 flow_hw_action_push(const struct rte_flow_op_attr *attr)
9961 {
9962 	return attr ? !attr->postpone : true;
9963 }
9964 
9965 static __rte_always_inline struct mlx5_hw_q_job *
9966 flow_hw_action_job_init(struct mlx5_priv *priv, uint32_t queue,
9967 			const struct rte_flow_action_handle *handle,
9968 			void *user_data, void *query_data,
9969 			enum mlx5_hw_job_type type,
9970 			struct rte_flow_error *error)
9971 {
9972 	struct mlx5_hw_q_job *job;
9973 
9974 	MLX5_ASSERT(queue != MLX5_HW_INV_QUEUE);
9975 	job = flow_hw_job_get(priv, queue);
9976 	if (!job) {
9977 		rte_flow_error_set(error, ENOMEM,
9978 				   RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
9979 				   "Action destroy failed due to queue full.");
9980 		return NULL;
9981 	}
9982 	job->type = type;
9983 	job->action = handle;
9984 	job->user_data = user_data;
9985 	job->query.user = query_data;
9986 	return job;
9987 }
9988 
9989 static __rte_always_inline void
9990 flow_hw_action_finalize(struct rte_eth_dev *dev, uint32_t queue,
9991 			struct mlx5_hw_q_job *job,
9992 			bool push, bool aso, bool status)
9993 {
9994 	struct mlx5_priv *priv = dev->data->dev_private;
9995 
9996 	if (queue == MLX5_HW_INV_QUEUE)
9997 		queue = CTRL_QUEUE_ID(priv);
9998 	if (likely(status)) {
9999 		/* 1. add new job to a queue */
10000 		if (!aso)
10001 			rte_ring_enqueue(push ?
10002 					 priv->hw_q[queue].indir_cq :
10003 					 priv->hw_q[queue].indir_iq,
10004 					 job);
10005 		/* 2. send pending jobs */
10006 		if (push)
10007 			__flow_hw_push_action(dev, queue);
10008 	} else {
10009 		flow_hw_job_put(priv, job, queue);
10010 	}
10011 }
10012 
10013 /**
10014  * Create shared action.
10015  *
10016  * @param[in] dev
10017  *   Pointer to the rte_eth_dev structure.
10018  * @param[in] queue
10019  *   Which queue to be used.
10020  * @param[in] attr
10021  *   Operation attribute.
10022  * @param[in] conf
10023  *   Indirect action configuration.
10024  * @param[in] action
10025  *   rte_flow action detail.
10026  * @param[in] user_data
10027  *   Pointer to the user_data.
10028  * @param[out] error
10029  *   Pointer to error structure.
10030  *
10031  * @return
10032  *   Action handle on success, NULL otherwise and rte_errno is set.
10033  */
10034 static struct rte_flow_action_handle *
10035 flow_hw_action_handle_create(struct rte_eth_dev *dev, uint32_t queue,
10036 			     const struct rte_flow_op_attr *attr,
10037 			     const struct rte_flow_indir_action_conf *conf,
10038 			     const struct rte_flow_action *action,
10039 			     void *user_data,
10040 			     struct rte_flow_error *error)
10041 {
10042 	struct rte_flow_action_handle *handle = NULL;
10043 	struct mlx5_hw_q_job *job = NULL;
10044 	struct mlx5_priv *priv = dev->data->dev_private;
10045 	const struct rte_flow_action_age *age;
10046 	struct mlx5_aso_mtr *aso_mtr;
10047 	cnt_id_t cnt_id;
10048 	uint32_t mtr_id;
10049 	uint32_t age_idx;
10050 	bool push = flow_hw_action_push(attr);
10051 	bool aso = false;
10052 
10053 	if (attr) {
10054 		job = flow_hw_action_job_init(priv, queue, NULL, user_data,
10055 					      NULL, MLX5_HW_Q_JOB_TYPE_CREATE,
10056 					      error);
10057 		if (!job)
10058 			return NULL;
10059 	}
10060 	switch (action->type) {
10061 	case RTE_FLOW_ACTION_TYPE_AGE:
10062 		if (priv->hws_strict_queue) {
10063 			struct mlx5_age_info *info = GET_PORT_AGE_INFO(priv);
10064 
10065 			if (queue >= info->hw_q_age->nb_rings) {
10066 				rte_flow_error_set(error, EINVAL,
10067 						   RTE_FLOW_ERROR_TYPE_ACTION,
10068 						   NULL,
10069 						   "Invalid queue ID for indirect AGE.");
10070 				rte_errno = EINVAL;
10071 				return NULL;
10072 			}
10073 		}
10074 		age = action->conf;
10075 		age_idx = mlx5_hws_age_action_create(priv, queue, true, age,
10076 						     0, error);
10077 		if (age_idx == 0) {
10078 			rte_flow_error_set(error, ENODEV,
10079 					   RTE_FLOW_ERROR_TYPE_ACTION, NULL,
10080 					   "AGE are not configured!");
10081 		} else {
10082 			age_idx = (MLX5_INDIRECT_ACTION_TYPE_AGE <<
10083 				   MLX5_INDIRECT_ACTION_TYPE_OFFSET) | age_idx;
10084 			handle =
10085 			    (struct rte_flow_action_handle *)(uintptr_t)age_idx;
10086 		}
10087 		break;
10088 	case RTE_FLOW_ACTION_TYPE_COUNT:
10089 		if (mlx5_hws_cnt_shared_get(priv->hws_cpool, &cnt_id, 0))
10090 			rte_flow_error_set(error, ENODEV,
10091 					RTE_FLOW_ERROR_TYPE_ACTION,
10092 					NULL,
10093 					"counter are not configured!");
10094 		else
10095 			handle = (struct rte_flow_action_handle *)
10096 				 (uintptr_t)cnt_id;
10097 		break;
10098 	case RTE_FLOW_ACTION_TYPE_CONNTRACK:
10099 		aso = true;
10100 		handle = flow_hw_conntrack_create(dev, queue, action->conf, job,
10101 						  push, error);
10102 		break;
10103 	case RTE_FLOW_ACTION_TYPE_METER_MARK:
10104 		aso = true;
10105 		aso_mtr = flow_hw_meter_mark_alloc(dev, queue, action, job, push);
10106 		if (!aso_mtr)
10107 			break;
10108 		mtr_id = (MLX5_INDIRECT_ACTION_TYPE_METER_MARK <<
10109 			MLX5_INDIRECT_ACTION_TYPE_OFFSET) | (aso_mtr->fm.meter_id);
10110 		handle = (struct rte_flow_action_handle *)(uintptr_t)mtr_id;
10111 		break;
10112 	case RTE_FLOW_ACTION_TYPE_RSS:
10113 		handle = flow_dv_action_create(dev, conf, action, error);
10114 		break;
10115 	case RTE_FLOW_ACTION_TYPE_QUOTA:
10116 		aso = true;
10117 		handle = mlx5_quota_alloc(dev, queue, action->conf,
10118 					  job, push, error);
10119 		break;
10120 	default:
10121 		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
10122 				   NULL, "action type not supported");
10123 		break;
10124 	}
10125 	if (job) {
10126 		job->action = handle;
10127 		job->indirect_type = MLX5_HW_INDIRECT_TYPE_LEGACY;
10128 		flow_hw_action_finalize(dev, queue, job, push, aso,
10129 					handle != NULL);
10130 	}
10131 	return handle;
10132 }
10133 
10134 static int
10135 mlx5_flow_update_meter_mark(struct rte_eth_dev *dev, uint32_t queue,
10136 			    const struct rte_flow_update_meter_mark *upd_meter_mark,
10137 			    uint32_t idx, bool push,
10138 			    struct mlx5_hw_q_job *job, struct rte_flow_error *error)
10139 {
10140 	struct mlx5_priv *priv = dev->data->dev_private;
10141 	struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
10142 	const struct rte_flow_action_meter_mark *meter_mark = &upd_meter_mark->meter_mark;
10143 	struct mlx5_aso_mtr *aso_mtr = mlx5_ipool_get(pool->idx_pool, idx);
10144 	struct mlx5_flow_meter_info *fm;
10145 
10146 	if (!aso_mtr)
10147 		return rte_flow_error_set(error, EINVAL,
10148 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10149 					  NULL, "Invalid meter_mark update index");
10150 	fm = &aso_mtr->fm;
10151 	if (upd_meter_mark->profile_valid)
10152 		fm->profile = (struct mlx5_flow_meter_profile *)
10153 			(meter_mark->profile);
10154 	if (upd_meter_mark->color_mode_valid)
10155 		fm->color_aware = meter_mark->color_mode;
10156 	if (upd_meter_mark->state_valid)
10157 		fm->is_enable = meter_mark->state;
10158 	/* Update ASO flow meter by wqe. */
10159 	if (mlx5_aso_meter_update_by_wqe(priv->sh, queue,
10160 					 aso_mtr, &priv->mtr_bulk, job, push))
10161 		return rte_flow_error_set(error, EINVAL,
10162 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10163 					  NULL, "Unable to update ASO meter WQE");
10164 	/* Wait for ASO object completion. */
10165 	if (queue == MLX5_HW_INV_QUEUE &&
10166 	    mlx5_aso_mtr_wait(priv->sh, MLX5_HW_INV_QUEUE, aso_mtr))
10167 		return rte_flow_error_set(error, EINVAL,
10168 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10169 					  NULL, "Unable to wait for ASO meter CQE");
10170 	return 0;
10171 }
10172 
10173 /**
10174  * Update shared action.
10175  *
10176  * @param[in] dev
10177  *   Pointer to the rte_eth_dev structure.
10178  * @param[in] queue
10179  *   Which queue to be used.
10180  * @param[in] attr
10181  *   Operation attribute.
10182  * @param[in] handle
10183  *   Action handle to be updated.
10184  * @param[in] update
10185  *   Update value.
10186  * @param[in] user_data
10187  *   Pointer to the user_data.
10188  * @param[out] error
10189  *   Pointer to error structure.
10190  *
10191  * @return
10192  *   0 on success, negative value otherwise and rte_errno is set.
10193  */
10194 static int
10195 flow_hw_action_handle_update(struct rte_eth_dev *dev, uint32_t queue,
10196 			     const struct rte_flow_op_attr *attr,
10197 			     struct rte_flow_action_handle *handle,
10198 			     const void *update,
10199 			     void *user_data,
10200 			     struct rte_flow_error *error)
10201 {
10202 	struct mlx5_priv *priv = dev->data->dev_private;
10203 	const struct rte_flow_modify_conntrack *ct_conf =
10204 		(const struct rte_flow_modify_conntrack *)update;
10205 	struct mlx5_hw_q_job *job = NULL;
10206 	uint32_t act_idx = (uint32_t)(uintptr_t)handle;
10207 	uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
10208 	uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
10209 	int ret = 0;
10210 	bool push = flow_hw_action_push(attr);
10211 	bool aso = false;
10212 
10213 	if (attr) {
10214 		job = flow_hw_action_job_init(priv, queue, handle, user_data,
10215 					      NULL, MLX5_HW_Q_JOB_TYPE_UPDATE,
10216 					      error);
10217 		if (!job)
10218 			return -rte_errno;
10219 	}
10220 	switch (type) {
10221 	case MLX5_INDIRECT_ACTION_TYPE_AGE:
10222 		ret = mlx5_hws_age_action_update(priv, idx, update, error);
10223 		break;
10224 	case MLX5_INDIRECT_ACTION_TYPE_CT:
10225 		if (ct_conf->state)
10226 			aso = true;
10227 		ret = flow_hw_conntrack_update(dev, queue, update, act_idx,
10228 					       job, push, error);
10229 		break;
10230 	case MLX5_INDIRECT_ACTION_TYPE_METER_MARK:
10231 		aso = true;
10232 		ret = mlx5_flow_update_meter_mark(dev, queue, update, idx, push,
10233 						  job, error);
10234 		break;
10235 	case MLX5_INDIRECT_ACTION_TYPE_RSS:
10236 		ret = flow_dv_action_update(dev, handle, update, error);
10237 		break;
10238 	case MLX5_INDIRECT_ACTION_TYPE_QUOTA:
10239 		aso = true;
10240 		ret = mlx5_quota_query_update(dev, queue, handle, update, NULL,
10241 					      job, push, error);
10242 		break;
10243 	default:
10244 		ret = -ENOTSUP;
10245 		rte_flow_error_set(error, ENOTSUP,
10246 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
10247 					  "action type not supported");
10248 		break;
10249 	}
10250 	if (job)
10251 		flow_hw_action_finalize(dev, queue, job, push, aso, ret == 0);
10252 	return ret;
10253 }
10254 
10255 /**
10256  * Destroy shared action.
10257  *
10258  * @param[in] dev
10259  *   Pointer to the rte_eth_dev structure.
10260  * @param[in] queue
10261  *   Which queue to be used.
10262  * @param[in] attr
10263  *   Operation attribute.
10264  * @param[in] handle
10265  *   Action handle to be destroyed.
10266  * @param[in] user_data
10267  *   Pointer to the user_data.
10268  * @param[out] error
10269  *   Pointer to error structure.
10270  *
10271  * @return
10272  *   0 on success, negative value otherwise and rte_errno is set.
10273  */
10274 static int
10275 flow_hw_action_handle_destroy(struct rte_eth_dev *dev, uint32_t queue,
10276 			      const struct rte_flow_op_attr *attr,
10277 			      struct rte_flow_action_handle *handle,
10278 			      void *user_data,
10279 			      struct rte_flow_error *error)
10280 {
10281 	uint32_t act_idx = (uint32_t)(uintptr_t)handle;
10282 	uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
10283 	uint32_t age_idx = act_idx & MLX5_HWS_AGE_IDX_MASK;
10284 	uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
10285 	struct mlx5_priv *priv = dev->data->dev_private;
10286 	struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
10287 	struct mlx5_hw_q_job *job = NULL;
10288 	struct mlx5_aso_mtr *aso_mtr;
10289 	struct mlx5_flow_meter_info *fm;
10290 	bool push = flow_hw_action_push(attr);
10291 	bool aso = false;
10292 	int ret = 0;
10293 
10294 	if (attr) {
10295 		job = flow_hw_action_job_init(priv, queue, handle, user_data,
10296 					      NULL, MLX5_HW_Q_JOB_TYPE_DESTROY,
10297 					      error);
10298 		if (!job)
10299 			return -rte_errno;
10300 	}
10301 	switch (type) {
10302 	case MLX5_INDIRECT_ACTION_TYPE_AGE:
10303 		ret = mlx5_hws_age_action_destroy(priv, age_idx, error);
10304 		break;
10305 	case MLX5_INDIRECT_ACTION_TYPE_COUNT:
10306 		age_idx = mlx5_hws_cnt_age_get(priv->hws_cpool, act_idx);
10307 		if (age_idx != 0)
10308 			/*
10309 			 * If this counter belongs to indirect AGE, here is the
10310 			 * time to update the AGE.
10311 			 */
10312 			mlx5_hws_age_nb_cnt_decrease(priv, age_idx);
10313 		mlx5_hws_cnt_shared_put(priv->hws_cpool, &act_idx);
10314 		break;
10315 	case MLX5_INDIRECT_ACTION_TYPE_CT:
10316 		ret = flow_hw_conntrack_destroy(dev, act_idx, error);
10317 		break;
10318 	case MLX5_INDIRECT_ACTION_TYPE_METER_MARK:
10319 		aso_mtr = mlx5_ipool_get(pool->idx_pool, idx);
10320 		if (!aso_mtr) {
10321 			ret = -EINVAL;
10322 			rte_flow_error_set(error, EINVAL,
10323 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10324 				NULL, "Invalid meter_mark destroy index");
10325 			break;
10326 		}
10327 		fm = &aso_mtr->fm;
10328 		fm->is_enable = 0;
10329 		/* Update ASO flow meter by wqe. */
10330 		if (mlx5_aso_meter_update_by_wqe(priv->sh, queue, aso_mtr,
10331 						 &priv->mtr_bulk, job, push)) {
10332 			ret = -EINVAL;
10333 			rte_flow_error_set(error, EINVAL,
10334 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10335 				NULL, "Unable to update ASO meter WQE");
10336 			break;
10337 		}
10338 		/* Wait for ASO object completion. */
10339 		if (queue == MLX5_HW_INV_QUEUE &&
10340 		    mlx5_aso_mtr_wait(priv->sh, MLX5_HW_INV_QUEUE, aso_mtr)) {
10341 			ret = -EINVAL;
10342 			rte_flow_error_set(error, EINVAL,
10343 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10344 				NULL, "Unable to wait for ASO meter CQE");
10345 			break;
10346 		}
10347 		if (!job)
10348 			mlx5_ipool_free(pool->idx_pool, idx);
10349 		else
10350 			aso = true;
10351 		break;
10352 	case MLX5_INDIRECT_ACTION_TYPE_RSS:
10353 		ret = flow_dv_action_destroy(dev, handle, error);
10354 		break;
10355 	case MLX5_INDIRECT_ACTION_TYPE_QUOTA:
10356 		break;
10357 	default:
10358 		ret = -ENOTSUP;
10359 		rte_flow_error_set(error, ENOTSUP,
10360 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
10361 					  "action type not supported");
10362 		break;
10363 	}
10364 	if (job)
10365 		flow_hw_action_finalize(dev, queue, job, push, aso, ret == 0);
10366 	return ret;
10367 }
10368 
10369 static int
10370 flow_hw_query_counter(const struct rte_eth_dev *dev, uint32_t counter,
10371 		      void *data, struct rte_flow_error *error)
10372 {
10373 	struct mlx5_hws_cnt_pool *hpool;
10374 	struct mlx5_priv *priv = dev->data->dev_private;
10375 	struct mlx5_hws_cnt *cnt;
10376 	struct rte_flow_query_count *qc = data;
10377 	uint32_t iidx;
10378 	uint64_t pkts, bytes;
10379 
10380 	if (!mlx5_hws_cnt_id_valid(counter))
10381 		return rte_flow_error_set(error, EINVAL,
10382 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10383 				"counter are not available");
10384 	hpool = mlx5_hws_cnt_host_pool(priv->hws_cpool);
10385 	iidx = mlx5_hws_cnt_iidx(hpool, counter);
10386 	cnt = &hpool->pool[iidx];
10387 	__hws_cnt_query_raw(priv->hws_cpool, counter, &pkts, &bytes);
10388 	qc->hits_set = 1;
10389 	qc->bytes_set = 1;
10390 	qc->hits = pkts - cnt->reset.hits;
10391 	qc->bytes = bytes - cnt->reset.bytes;
10392 	if (qc->reset) {
10393 		cnt->reset.bytes = bytes;
10394 		cnt->reset.hits = pkts;
10395 	}
10396 	return 0;
10397 }
10398 
10399 /**
10400  * Query a flow rule AGE action for aging information.
10401  *
10402  * @param[in] dev
10403  *   Pointer to Ethernet device.
10404  * @param[in] age_idx
10405  *   Index of AGE action parameter.
10406  * @param[out] data
10407  *   Data retrieved by the query.
10408  * @param[out] error
10409  *   Perform verbose error reporting if not NULL.
10410  *
10411  * @return
10412  *   0 on success, a negative errno value otherwise and rte_errno is set.
10413  */
10414 static int
10415 flow_hw_query_age(const struct rte_eth_dev *dev, uint32_t age_idx, void *data,
10416 		  struct rte_flow_error *error)
10417 {
10418 	struct mlx5_priv *priv = dev->data->dev_private;
10419 	struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);
10420 	struct mlx5_indexed_pool *ipool = age_info->ages_ipool;
10421 	struct mlx5_hws_age_param *param = mlx5_ipool_get(ipool, age_idx);
10422 	struct rte_flow_query_age *resp = data;
10423 
10424 	if (!param || !param->timeout)
10425 		return rte_flow_error_set(error, EINVAL,
10426 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10427 					  NULL, "age data not available");
10428 	switch (__atomic_load_n(&param->state, __ATOMIC_RELAXED)) {
10429 	case HWS_AGE_AGED_OUT_REPORTED:
10430 	case HWS_AGE_AGED_OUT_NOT_REPORTED:
10431 		resp->aged = 1;
10432 		break;
10433 	case HWS_AGE_CANDIDATE:
10434 	case HWS_AGE_CANDIDATE_INSIDE_RING:
10435 		resp->aged = 0;
10436 		break;
10437 	case HWS_AGE_FREE:
10438 		/*
10439 		 * When state is FREE the flow itself should be invalid.
10440 		 * Fall-through.
10441 		 */
10442 	default:
10443 		MLX5_ASSERT(0);
10444 		break;
10445 	}
10446 	resp->sec_since_last_hit_valid = !resp->aged;
10447 	if (resp->sec_since_last_hit_valid)
10448 		resp->sec_since_last_hit = __atomic_load_n
10449 				 (&param->sec_since_last_hit, __ATOMIC_RELAXED);
10450 	return 0;
10451 }
10452 
10453 static int
10454 flow_hw_query(struct rte_eth_dev *dev, struct rte_flow *flow,
10455 	      const struct rte_flow_action *actions, void *data,
10456 	      struct rte_flow_error *error)
10457 {
10458 	int ret = -EINVAL;
10459 	struct rte_flow_hw *hw_flow = (struct rte_flow_hw *)flow;
10460 
10461 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
10462 		switch (actions->type) {
10463 		case RTE_FLOW_ACTION_TYPE_VOID:
10464 			break;
10465 		case RTE_FLOW_ACTION_TYPE_COUNT:
10466 			ret = flow_hw_query_counter(dev, hw_flow->cnt_id, data,
10467 						    error);
10468 			break;
10469 		case RTE_FLOW_ACTION_TYPE_AGE:
10470 			ret = flow_hw_query_age(dev, hw_flow->age_idx, data,
10471 						error);
10472 			break;
10473 		default:
10474 			return rte_flow_error_set(error, ENOTSUP,
10475 						  RTE_FLOW_ERROR_TYPE_ACTION,
10476 						  actions,
10477 						  "action not supported");
10478 		}
10479 	}
10480 	return ret;
10481 }
10482 
10483 /**
10484  * Validate indirect action.
10485  *
10486  * @param[in] dev
10487  *   Pointer to the Ethernet device structure.
10488  * @param[in] conf
10489  *   Shared action configuration.
10490  * @param[in] action
10491  *   Action specification used to create indirect action.
10492  * @param[out] error
10493  *   Perform verbose error reporting if not NULL. Initialized in case of
10494  *   error only.
10495  *
10496  * @return
10497  *   0 on success, otherwise negative errno value.
10498  */
10499 static int
10500 flow_hw_action_validate(struct rte_eth_dev *dev,
10501 			const struct rte_flow_indir_action_conf *conf,
10502 			const struct rte_flow_action *action,
10503 			struct rte_flow_error *err)
10504 {
10505 	return flow_hw_action_handle_validate(dev, MLX5_HW_INV_QUEUE, NULL,
10506 					      conf, action, NULL, err);
10507 }
10508 
10509 /**
10510  * Create indirect action.
10511  *
10512  * @param[in] dev
10513  *   Pointer to the Ethernet device structure.
10514  * @param[in] conf
10515  *   Shared action configuration.
10516  * @param[in] action
10517  *   Action specification used to create indirect action.
10518  * @param[out] error
10519  *   Perform verbose error reporting if not NULL. Initialized in case of
10520  *   error only.
10521  *
10522  * @return
10523  *   A valid shared action handle in case of success, NULL otherwise and
10524  *   rte_errno is set.
10525  */
10526 static struct rte_flow_action_handle *
10527 flow_hw_action_create(struct rte_eth_dev *dev,
10528 		       const struct rte_flow_indir_action_conf *conf,
10529 		       const struct rte_flow_action *action,
10530 		       struct rte_flow_error *err)
10531 {
10532 	return flow_hw_action_handle_create(dev, MLX5_HW_INV_QUEUE,
10533 					    NULL, conf, action, NULL, err);
10534 }
10535 
10536 /**
10537  * Destroy the indirect action.
10538  * Release action related resources on the NIC and the memory.
10539  * Lock free, (mutex should be acquired by caller).
10540  * Dispatcher for action type specific call.
10541  *
10542  * @param[in] dev
10543  *   Pointer to the Ethernet device structure.
10544  * @param[in] handle
10545  *   The indirect action object handle to be removed.
10546  * @param[out] error
10547  *   Perform verbose error reporting if not NULL. Initialized in case of
10548  *   error only.
10549  *
10550  * @return
10551  *   0 on success, otherwise negative errno value.
10552  */
10553 static int
10554 flow_hw_action_destroy(struct rte_eth_dev *dev,
10555 		       struct rte_flow_action_handle *handle,
10556 		       struct rte_flow_error *error)
10557 {
10558 	return flow_hw_action_handle_destroy(dev, MLX5_HW_INV_QUEUE,
10559 			NULL, handle, NULL, error);
10560 }
10561 
10562 /**
10563  * Updates in place shared action configuration.
10564  *
10565  * @param[in] dev
10566  *   Pointer to the Ethernet device structure.
10567  * @param[in] handle
10568  *   The indirect action object handle to be updated.
10569  * @param[in] update
10570  *   Action specification used to modify the action pointed by *handle*.
10571  *   *update* could be of same type with the action pointed by the *handle*
10572  *   handle argument, or some other structures like a wrapper, depending on
10573  *   the indirect action type.
10574  * @param[out] error
10575  *   Perform verbose error reporting if not NULL. Initialized in case of
10576  *   error only.
10577  *
10578  * @return
10579  *   0 on success, otherwise negative errno value.
10580  */
10581 static int
10582 flow_hw_action_update(struct rte_eth_dev *dev,
10583 		      struct rte_flow_action_handle *handle,
10584 		      const void *update,
10585 		      struct rte_flow_error *err)
10586 {
10587 	return flow_hw_action_handle_update(dev, MLX5_HW_INV_QUEUE,
10588 			NULL, handle, update, NULL, err);
10589 }
10590 
10591 static int
10592 flow_hw_action_handle_query(struct rte_eth_dev *dev, uint32_t queue,
10593 			    const struct rte_flow_op_attr *attr,
10594 			    const struct rte_flow_action_handle *handle,
10595 			    void *data, void *user_data,
10596 			    struct rte_flow_error *error)
10597 {
10598 	struct mlx5_priv *priv = dev->data->dev_private;
10599 	struct mlx5_hw_q_job *job = NULL;
10600 	uint32_t act_idx = (uint32_t)(uintptr_t)handle;
10601 	uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
10602 	uint32_t age_idx = act_idx & MLX5_HWS_AGE_IDX_MASK;
10603 	int ret;
10604 	bool push = flow_hw_action_push(attr);
10605 	bool aso = false;
10606 
10607 	if (attr) {
10608 		job = flow_hw_action_job_init(priv, queue, handle, user_data,
10609 					      data, MLX5_HW_Q_JOB_TYPE_QUERY,
10610 					      error);
10611 		if (!job)
10612 			return -rte_errno;
10613 	}
10614 	switch (type) {
10615 	case MLX5_INDIRECT_ACTION_TYPE_AGE:
10616 		ret = flow_hw_query_age(dev, age_idx, data, error);
10617 		break;
10618 	case MLX5_INDIRECT_ACTION_TYPE_COUNT:
10619 		ret = flow_hw_query_counter(dev, act_idx, data, error);
10620 		break;
10621 	case MLX5_INDIRECT_ACTION_TYPE_CT:
10622 		aso = true;
10623 		if (job)
10624 			job->query.user = data;
10625 		ret = flow_hw_conntrack_query(dev, queue, act_idx, data,
10626 					      job, push, error);
10627 		break;
10628 	case MLX5_INDIRECT_ACTION_TYPE_QUOTA:
10629 		aso = true;
10630 		ret = mlx5_quota_query(dev, queue, handle, data,
10631 				       job, push, error);
10632 		break;
10633 	default:
10634 		ret = -ENOTSUP;
10635 		rte_flow_error_set(error, ENOTSUP,
10636 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
10637 					  "action type not supported");
10638 		break;
10639 	}
10640 	if (job)
10641 		flow_hw_action_finalize(dev, queue, job, push, aso, ret == 0);
10642 	return ret;
10643 }
10644 
10645 static int
10646 flow_hw_async_action_handle_query_update
10647 			(struct rte_eth_dev *dev, uint32_t queue,
10648 			 const struct rte_flow_op_attr *attr,
10649 			 struct rte_flow_action_handle *handle,
10650 			 const void *update, void *query,
10651 			 enum rte_flow_query_update_mode qu_mode,
10652 			 void *user_data, struct rte_flow_error *error)
10653 {
10654 	struct mlx5_priv *priv = dev->data->dev_private;
10655 	bool push = flow_hw_action_push(attr);
10656 	bool aso = false;
10657 	struct mlx5_hw_q_job *job = NULL;
10658 	int ret = 0;
10659 
10660 	if (attr) {
10661 		job = flow_hw_action_job_init(priv, queue, handle, user_data,
10662 					      query,
10663 					      MLX5_HW_Q_JOB_TYPE_UPDATE_QUERY,
10664 					      error);
10665 		if (!job)
10666 			return -rte_errno;
10667 	}
10668 	switch (MLX5_INDIRECT_ACTION_TYPE_GET(handle)) {
10669 	case MLX5_INDIRECT_ACTION_TYPE_QUOTA:
10670 		if (qu_mode != RTE_FLOW_QU_QUERY_FIRST) {
10671 			ret = rte_flow_error_set
10672 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF,
10673 				 NULL, "quota action must query before update");
10674 			break;
10675 		}
10676 		aso = true;
10677 		ret = mlx5_quota_query_update(dev, queue, handle,
10678 					      update, query, job, push, error);
10679 		break;
10680 	default:
10681 		ret = rte_flow_error_set(error, ENOTSUP,
10682 					 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, "update and query not supportred");
10683 	}
10684 	if (job)
10685 		flow_hw_action_finalize(dev, queue, job, push, aso, ret == 0);
10686 	return ret;
10687 }
10688 
10689 static int
10690 flow_hw_action_query(struct rte_eth_dev *dev,
10691 		     const struct rte_flow_action_handle *handle, void *data,
10692 		     struct rte_flow_error *error)
10693 {
10694 	return flow_hw_action_handle_query(dev, MLX5_HW_INV_QUEUE, NULL,
10695 			handle, data, NULL, error);
10696 }
10697 
10698 static int
10699 flow_hw_action_query_update(struct rte_eth_dev *dev,
10700 			    struct rte_flow_action_handle *handle,
10701 			    const void *update, void *query,
10702 			    enum rte_flow_query_update_mode qu_mode,
10703 			    struct rte_flow_error *error)
10704 {
10705 	return flow_hw_async_action_handle_query_update(dev, MLX5_HW_INV_QUEUE,
10706 							NULL, handle, update,
10707 							query, qu_mode, NULL,
10708 							error);
10709 }
10710 
10711 /**
10712  * Get aged-out flows of a given port on the given HWS flow queue.
10713  *
10714  * @param[in] dev
10715  *   Pointer to the Ethernet device structure.
10716  * @param[in] queue_id
10717  *   Flow queue to query. Ignored when RTE_FLOW_PORT_FLAG_STRICT_QUEUE not set.
10718  * @param[in, out] contexts
10719  *   The address of an array of pointers to the aged-out flows contexts.
10720  * @param[in] nb_contexts
10721  *   The length of context array pointers.
10722  * @param[out] error
10723  *   Perform verbose error reporting if not NULL. Initialized in case of
10724  *   error only.
10725  *
10726  * @return
10727  *   if nb_contexts is 0, return the amount of all aged contexts.
10728  *   if nb_contexts is not 0 , return the amount of aged flows reported
10729  *   in the context array, otherwise negative errno value.
10730  */
10731 static int
10732 flow_hw_get_q_aged_flows(struct rte_eth_dev *dev, uint32_t queue_id,
10733 			 void **contexts, uint32_t nb_contexts,
10734 			 struct rte_flow_error *error)
10735 {
10736 	struct mlx5_priv *priv = dev->data->dev_private;
10737 	struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);
10738 	struct rte_ring *r;
10739 	int nb_flows = 0;
10740 
10741 	if (nb_contexts && !contexts)
10742 		return rte_flow_error_set(error, EINVAL,
10743 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10744 					  NULL, "empty context");
10745 	if (priv->hws_strict_queue) {
10746 		if (queue_id >= age_info->hw_q_age->nb_rings)
10747 			return rte_flow_error_set(error, EINVAL,
10748 						RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10749 						NULL, "invalid queue id");
10750 		r = age_info->hw_q_age->aged_lists[queue_id];
10751 	} else {
10752 		r = age_info->hw_age.aged_list;
10753 		MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
10754 	}
10755 	if (nb_contexts == 0)
10756 		return rte_ring_count(r);
10757 	while ((uint32_t)nb_flows < nb_contexts) {
10758 		uint32_t age_idx;
10759 
10760 		if (rte_ring_dequeue_elem(r, &age_idx, sizeof(uint32_t)) < 0)
10761 			break;
10762 		/* get the AGE context if the aged-out index is still valid. */
10763 		contexts[nb_flows] = mlx5_hws_age_context_get(priv, age_idx);
10764 		if (!contexts[nb_flows])
10765 			continue;
10766 		nb_flows++;
10767 	}
10768 	return nb_flows;
10769 }
10770 
10771 /**
10772  * Get aged-out flows.
10773  *
10774  * This function is relevant only if RTE_FLOW_PORT_FLAG_STRICT_QUEUE isn't set.
10775  *
10776  * @param[in] dev
10777  *   Pointer to the Ethernet device structure.
10778  * @param[in] contexts
10779  *   The address of an array of pointers to the aged-out flows contexts.
10780  * @param[in] nb_contexts
10781  *   The length of context array pointers.
10782  * @param[out] error
10783  *   Perform verbose error reporting if not NULL. Initialized in case of
10784  *   error only.
10785  *
10786  * @return
10787  *   how many contexts get in success, otherwise negative errno value.
10788  *   if nb_contexts is 0, return the amount of all aged contexts.
10789  *   if nb_contexts is not 0 , return the amount of aged flows reported
10790  *   in the context array.
10791  */
10792 static int
10793 flow_hw_get_aged_flows(struct rte_eth_dev *dev, void **contexts,
10794 		       uint32_t nb_contexts, struct rte_flow_error *error)
10795 {
10796 	struct mlx5_priv *priv = dev->data->dev_private;
10797 
10798 	if (priv->hws_strict_queue)
10799 		DRV_LOG(WARNING,
10800 			"port %u get aged flows called in strict queue mode.",
10801 			dev->data->port_id);
10802 	return flow_hw_get_q_aged_flows(dev, 0, contexts, nb_contexts, error);
10803 }
10804 
10805 static void
10806 mlx5_mirror_destroy_clone(struct rte_eth_dev *dev,
10807 			  struct mlx5_mirror_clone *clone)
10808 {
10809 	switch (clone->type) {
10810 	case RTE_FLOW_ACTION_TYPE_RSS:
10811 	case RTE_FLOW_ACTION_TYPE_QUEUE:
10812 		mlx5_hrxq_release(dev,
10813 				  ((struct mlx5_hrxq *)(clone->action_ctx))->idx);
10814 		break;
10815 	case RTE_FLOW_ACTION_TYPE_JUMP:
10816 		flow_hw_jump_release(dev, clone->action_ctx);
10817 		break;
10818 	case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
10819 	case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
10820 	case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
10821 	case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
10822 	case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
10823 	case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
10824 	default:
10825 		break;
10826 	}
10827 }
10828 
10829 void
10830 mlx5_hw_mirror_destroy(struct rte_eth_dev *dev, struct mlx5_mirror *mirror)
10831 {
10832 	uint32_t i;
10833 
10834 	mlx5_indirect_list_remove_entry(&mirror->indirect);
10835 	for (i = 0; i < mirror->clones_num; i++)
10836 		mlx5_mirror_destroy_clone(dev, &mirror->clone[i]);
10837 	if (mirror->mirror_action)
10838 		mlx5dr_action_destroy(mirror->mirror_action);
10839 	mlx5_free(mirror);
10840 }
10841 
10842 static __rte_always_inline bool
10843 mlx5_mirror_terminal_action(const struct rte_flow_action *action)
10844 {
10845 	switch (action->type) {
10846 	case RTE_FLOW_ACTION_TYPE_JUMP:
10847 	case RTE_FLOW_ACTION_TYPE_RSS:
10848 	case RTE_FLOW_ACTION_TYPE_QUEUE:
10849 	case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
10850 	case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
10851 		return true;
10852 	default:
10853 		break;
10854 	}
10855 	return false;
10856 }
10857 
10858 static bool
10859 mlx5_mirror_validate_sample_action(struct rte_eth_dev *dev,
10860 				   const struct rte_flow_attr *flow_attr,
10861 				   const struct rte_flow_action *action)
10862 {
10863 	struct mlx5_priv *priv = dev->data->dev_private;
10864 	const struct rte_flow_action_ethdev *port = NULL;
10865 	bool is_proxy = MLX5_HW_PORT_IS_PROXY(priv);
10866 
10867 	if (!action)
10868 		return false;
10869 	switch (action->type) {
10870 	case RTE_FLOW_ACTION_TYPE_QUEUE:
10871 	case RTE_FLOW_ACTION_TYPE_RSS:
10872 		if (flow_attr->transfer)
10873 			return false;
10874 		break;
10875 	case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
10876 		if (!is_proxy || !flow_attr->transfer)
10877 			return false;
10878 		port = action->conf;
10879 		if (!port || port->port_id != MLX5_REPRESENTED_PORT_ESW_MGR)
10880 			return false;
10881 		break;
10882 	case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
10883 	case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
10884 	case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
10885 	case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
10886 	case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
10887 		if (!is_proxy || !flow_attr->transfer)
10888 			return false;
10889 		if (action[0].type == RTE_FLOW_ACTION_TYPE_RAW_DECAP &&
10890 		    action[1].type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP)
10891 			return false;
10892 		break;
10893 	default:
10894 		return false;
10895 	}
10896 	return true;
10897 }
10898 
10899 /**
10900  * Valid mirror actions list includes one or two SAMPLE actions
10901  * followed by JUMP.
10902  *
10903  * @return
10904  * Number of mirrors *action* list was valid.
10905  * -EINVAL otherwise.
10906  */
10907 static int
10908 mlx5_hw_mirror_actions_list_validate(struct rte_eth_dev *dev,
10909 				     const struct rte_flow_attr *flow_attr,
10910 				     const struct rte_flow_action *actions)
10911 {
10912 	if (actions[0].type == RTE_FLOW_ACTION_TYPE_SAMPLE) {
10913 		int i = 1;
10914 		bool valid;
10915 		const struct rte_flow_action_sample *sample = actions[0].conf;
10916 		valid = mlx5_mirror_validate_sample_action(dev, flow_attr,
10917 							   sample->actions);
10918 		if (!valid)
10919 			return -EINVAL;
10920 		if (actions[1].type == RTE_FLOW_ACTION_TYPE_SAMPLE) {
10921 			i = 2;
10922 			sample = actions[1].conf;
10923 			valid = mlx5_mirror_validate_sample_action(dev, flow_attr,
10924 								   sample->actions);
10925 			if (!valid)
10926 				return -EINVAL;
10927 		}
10928 		return mlx5_mirror_terminal_action(actions + i) ? i + 1 : -EINVAL;
10929 	}
10930 	return -EINVAL;
10931 }
10932 
10933 static int
10934 mirror_format_tir(struct rte_eth_dev *dev,
10935 		  struct mlx5_mirror_clone *clone,
10936 		  const struct mlx5_flow_template_table_cfg *table_cfg,
10937 		  const struct rte_flow_action *action,
10938 		  struct mlx5dr_action_dest_attr *dest_attr,
10939 		  struct rte_flow_error *error)
10940 {
10941 	uint32_t hws_flags;
10942 	enum mlx5dr_table_type table_type;
10943 	struct mlx5_hrxq *tir_ctx;
10944 
10945 	table_type = get_mlx5dr_table_type(&table_cfg->attr.flow_attr);
10946 	hws_flags = mlx5_hw_act_flag[MLX5_HW_ACTION_FLAG_NONE_ROOT][table_type];
10947 	tir_ctx = flow_hw_tir_action_register(dev, hws_flags, action);
10948 	if (!tir_ctx)
10949 		return rte_flow_error_set(error, EINVAL,
10950 					  RTE_FLOW_ERROR_TYPE_ACTION,
10951 					  action, "failed to create QUEUE action for mirror clone");
10952 	dest_attr->dest = tir_ctx->action;
10953 	clone->action_ctx = tir_ctx;
10954 	return 0;
10955 }
10956 
10957 static int
10958 mirror_format_jump(struct rte_eth_dev *dev,
10959 		   struct mlx5_mirror_clone *clone,
10960 		   const struct mlx5_flow_template_table_cfg *table_cfg,
10961 		   const struct rte_flow_action *action,
10962 		   struct mlx5dr_action_dest_attr *dest_attr,
10963 		   struct rte_flow_error *error)
10964 {
10965 	const struct rte_flow_action_jump *jump_conf = action->conf;
10966 	struct mlx5_hw_jump_action *jump = flow_hw_jump_action_register
10967 						(dev, table_cfg,
10968 						 jump_conf->group, error);
10969 
10970 	if (!jump)
10971 		return rte_flow_error_set(error, EINVAL,
10972 					  RTE_FLOW_ERROR_TYPE_ACTION,
10973 					  action, "failed to create JUMP action for mirror clone");
10974 	dest_attr->dest = jump->hws_action;
10975 	clone->action_ctx = jump;
10976 	return 0;
10977 }
10978 
10979 static int
10980 mirror_format_port(struct rte_eth_dev *dev,
10981 		   const struct rte_flow_action *action,
10982 		   struct mlx5dr_action_dest_attr *dest_attr,
10983 		   struct rte_flow_error __rte_unused *error)
10984 {
10985 	struct mlx5_priv *priv = dev->data->dev_private;
10986 	const struct rte_flow_action_ethdev *port_action = action->conf;
10987 
10988 	dest_attr->dest = priv->hw_vport[port_action->port_id];
10989 	return 0;
10990 }
10991 
10992 static int
10993 hw_mirror_clone_reformat(const struct rte_flow_action *actions,
10994 			 struct mlx5dr_action_dest_attr *dest_attr,
10995 			 enum mlx5dr_action_type *action_type,
10996 			 uint8_t *reformat_buf, bool decap)
10997 {
10998 	int ret;
10999 	const struct rte_flow_item *encap_item = NULL;
11000 	const struct rte_flow_action_raw_encap *encap_conf = NULL;
11001 	typeof(dest_attr->reformat) *reformat = &dest_attr->reformat;
11002 
11003 	switch (actions[0].type) {
11004 	case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
11005 		encap_conf = actions[0].conf;
11006 		break;
11007 	case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
11008 		encap_item = MLX5_CONST_ENCAP_ITEM(rte_flow_action_vxlan_encap,
11009 						   actions);
11010 		break;
11011 	case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
11012 		encap_item = MLX5_CONST_ENCAP_ITEM(rte_flow_action_nvgre_encap,
11013 						   actions);
11014 		break;
11015 	default:
11016 		return -EINVAL;
11017 	}
11018 	*action_type = decap ?
11019 		       MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3 :
11020 		       MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
11021 	if (encap_item) {
11022 		ret = flow_dv_convert_encap_data(encap_item, reformat_buf,
11023 						 &reformat->reformat_data_sz, NULL);
11024 		if (ret)
11025 			return -EINVAL;
11026 		reformat->reformat_data = reformat_buf;
11027 	} else {
11028 		reformat->reformat_data = (void *)(uintptr_t)encap_conf->data;
11029 		reformat->reformat_data_sz = encap_conf->size;
11030 	}
11031 	return 0;
11032 }
11033 
11034 static int
11035 hw_mirror_format_clone(struct rte_eth_dev *dev,
11036 			struct mlx5_mirror_clone *clone,
11037 			const struct mlx5_flow_template_table_cfg *table_cfg,
11038 			const struct rte_flow_action *actions,
11039 			struct mlx5dr_action_dest_attr *dest_attr,
11040 			uint8_t *reformat_buf, struct rte_flow_error *error)
11041 {
11042 	struct mlx5_priv *priv = dev->data->dev_private;
11043 	int ret;
11044 	uint32_t i;
11045 	bool decap_seen = false;
11046 
11047 	for (i = 0; actions[i].type != RTE_FLOW_ACTION_TYPE_END; i++) {
11048 		dest_attr->action_type[i] = mlx5_hw_dr_action_types[actions[i].type];
11049 		switch (actions[i].type) {
11050 		case RTE_FLOW_ACTION_TYPE_QUEUE:
11051 		case RTE_FLOW_ACTION_TYPE_RSS:
11052 			ret = mirror_format_tir(dev, clone, table_cfg,
11053 						&actions[i], dest_attr, error);
11054 			if (ret)
11055 				return ret;
11056 			break;
11057 		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
11058 			ret = mirror_format_port(dev, &actions[i],
11059 						 dest_attr, error);
11060 			if (ret)
11061 				return ret;
11062 			break;
11063 		case RTE_FLOW_ACTION_TYPE_JUMP:
11064 			ret = mirror_format_jump(dev, clone, table_cfg,
11065 						 &actions[i], dest_attr, error);
11066 			if (ret)
11067 				return ret;
11068 			break;
11069 		case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
11070 			dest_attr->dest = priv->hw_def_miss;
11071 			break;
11072 		case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
11073 			decap_seen = true;
11074 			break;
11075 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
11076 		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
11077 		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
11078 			ret = hw_mirror_clone_reformat(&actions[i], dest_attr,
11079 						       &dest_attr->action_type[i],
11080 						       reformat_buf, decap_seen);
11081 			if (ret < 0)
11082 				return rte_flow_error_set(error, EINVAL,
11083 							  RTE_FLOW_ERROR_TYPE_ACTION,
11084 							  &actions[i],
11085 							  "failed to create reformat action");
11086 			break;
11087 		default:
11088 			return rte_flow_error_set(error, EINVAL,
11089 						  RTE_FLOW_ERROR_TYPE_ACTION,
11090 						  &actions[i], "unsupported sample action");
11091 		}
11092 		clone->type = actions->type;
11093 	}
11094 	dest_attr->action_type[i] = MLX5DR_ACTION_TYP_LAST;
11095 	return 0;
11096 }
11097 
11098 static struct rte_flow_action_list_handle *
11099 mlx5_hw_mirror_handle_create(struct rte_eth_dev *dev,
11100 			     const struct mlx5_flow_template_table_cfg *table_cfg,
11101 			     const struct rte_flow_action *actions,
11102 			     struct rte_flow_error *error)
11103 {
11104 	uint32_t hws_flags;
11105 	int ret = 0, i, clones_num;
11106 	struct mlx5_mirror *mirror;
11107 	enum mlx5dr_table_type table_type;
11108 	struct mlx5_priv *priv = dev->data->dev_private;
11109 	const struct rte_flow_attr *flow_attr = &table_cfg->attr.flow_attr;
11110 	uint8_t reformat_buf[MLX5_MIRROR_MAX_CLONES_NUM][MLX5_ENCAP_MAX_LEN];
11111 	struct mlx5dr_action_dest_attr mirror_attr[MLX5_MIRROR_MAX_CLONES_NUM + 1];
11112 	enum mlx5dr_action_type array_action_types[MLX5_MIRROR_MAX_CLONES_NUM + 1]
11113 						  [MLX5_MIRROR_MAX_SAMPLE_ACTIONS_LEN + 1];
11114 
11115 	memset(mirror_attr, 0, sizeof(mirror_attr));
11116 	memset(array_action_types, 0, sizeof(array_action_types));
11117 	table_type = get_mlx5dr_table_type(flow_attr);
11118 	hws_flags = mlx5_hw_act_flag[MLX5_HW_ACTION_FLAG_NONE_ROOT][table_type];
11119 	clones_num = mlx5_hw_mirror_actions_list_validate(dev, flow_attr,
11120 							  actions);
11121 	if (clones_num < 0) {
11122 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
11123 				   actions, "Invalid mirror list format");
11124 		return NULL;
11125 	}
11126 	mirror = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*mirror),
11127 			     0, SOCKET_ID_ANY);
11128 	if (!mirror) {
11129 		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ACTION,
11130 				   actions, "Failed to allocate mirror context");
11131 		return NULL;
11132 	}
11133 
11134 	mirror->indirect.type = MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR;
11135 	mirror->clones_num = clones_num;
11136 	for (i = 0; i < clones_num; i++) {
11137 		const struct rte_flow_action *clone_actions;
11138 
11139 		mirror_attr[i].action_type = array_action_types[i];
11140 		if (actions[i].type == RTE_FLOW_ACTION_TYPE_SAMPLE) {
11141 			const struct rte_flow_action_sample *sample = actions[i].conf;
11142 
11143 			clone_actions = sample->actions;
11144 		} else {
11145 			clone_actions = &actions[i];
11146 		}
11147 		ret = hw_mirror_format_clone(dev, &mirror->clone[i], table_cfg,
11148 					     clone_actions, &mirror_attr[i],
11149 					     reformat_buf[i], error);
11150 
11151 		if (ret)
11152 			goto error;
11153 	}
11154 	hws_flags |= MLX5DR_ACTION_FLAG_SHARED;
11155 	mirror->mirror_action = mlx5dr_action_create_dest_array(priv->dr_ctx,
11156 								clones_num,
11157 								mirror_attr,
11158 								hws_flags);
11159 	if (!mirror->mirror_action) {
11160 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
11161 				   actions, "Failed to create HWS mirror action");
11162 		goto error;
11163 	}
11164 
11165 	mlx5_indirect_list_add_entry(&priv->indirect_list_head, &mirror->indirect);
11166 	return (struct rte_flow_action_list_handle *)mirror;
11167 
11168 error:
11169 	mlx5_hw_mirror_destroy(dev, mirror);
11170 	return NULL;
11171 }
11172 
11173 void
11174 mlx5_destroy_legacy_indirect(__rte_unused struct rte_eth_dev *dev,
11175 			     struct mlx5_indirect_list *ptr)
11176 {
11177 	struct mlx5_indlst_legacy *obj = (typeof(obj))ptr;
11178 
11179 	switch (obj->legacy_type) {
11180 	case RTE_FLOW_ACTION_TYPE_METER_MARK:
11181 		break; /* ASO meters were released in mlx5_flow_meter_flush() */
11182 	default:
11183 		break;
11184 	}
11185 	mlx5_free(obj);
11186 }
11187 
11188 static struct rte_flow_action_list_handle *
11189 mlx5_create_legacy_indlst(struct rte_eth_dev *dev, uint32_t queue,
11190 			  const struct rte_flow_op_attr *attr,
11191 			  const struct rte_flow_indir_action_conf *conf,
11192 			  const struct rte_flow_action *actions,
11193 			  void *user_data, struct rte_flow_error *error)
11194 {
11195 	struct mlx5_priv *priv = dev->data->dev_private;
11196 	struct mlx5_indlst_legacy *indlst_obj = mlx5_malloc(MLX5_MEM_ZERO,
11197 							    sizeof(*indlst_obj),
11198 							    0, SOCKET_ID_ANY);
11199 
11200 	if (!indlst_obj)
11201 		return NULL;
11202 	indlst_obj->handle = flow_hw_action_handle_create(dev, queue, attr, conf,
11203 							  actions, user_data,
11204 							  error);
11205 	if (!indlst_obj->handle) {
11206 		mlx5_free(indlst_obj);
11207 		return NULL;
11208 	}
11209 	indlst_obj->legacy_type = actions[0].type;
11210 	indlst_obj->indirect.type = MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY;
11211 	mlx5_indirect_list_add_entry(&priv->indirect_list_head, &indlst_obj->indirect);
11212 	return (struct rte_flow_action_list_handle *)indlst_obj;
11213 }
11214 
11215 static __rte_always_inline enum mlx5_indirect_list_type
11216 flow_hw_inlist_type_get(const struct rte_flow_action *actions)
11217 {
11218 	switch (actions[0].type) {
11219 	case RTE_FLOW_ACTION_TYPE_SAMPLE:
11220 		return MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR;
11221 	case RTE_FLOW_ACTION_TYPE_METER_MARK:
11222 		return actions[1].type == RTE_FLOW_ACTION_TYPE_END ?
11223 		       MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY :
11224 		       MLX5_INDIRECT_ACTION_LIST_TYPE_ERR;
11225 	case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
11226 	case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
11227 		return MLX5_INDIRECT_ACTION_LIST_TYPE_REFORMAT;
11228 	default:
11229 		break;
11230 	}
11231 	return MLX5_INDIRECT_ACTION_LIST_TYPE_ERR;
11232 }
11233 
11234 static struct rte_flow_action_list_handle*
11235 mlx5_hw_decap_encap_handle_create(struct rte_eth_dev *dev,
11236 				  const struct mlx5_flow_template_table_cfg *table_cfg,
11237 				  const struct rte_flow_action *actions,
11238 				  struct rte_flow_error *error)
11239 {
11240 	struct mlx5_priv *priv = dev->data->dev_private;
11241 	const struct rte_flow_attr *flow_attr = &table_cfg->attr.flow_attr;
11242 	const struct rte_flow_action *encap = NULL;
11243 	const struct rte_flow_action *decap = NULL;
11244 	struct rte_flow_indir_action_conf indirect_conf = {
11245 		.ingress = flow_attr->ingress,
11246 		.egress = flow_attr->egress,
11247 		.transfer = flow_attr->transfer,
11248 	};
11249 	struct mlx5_hw_encap_decap_action *handle;
11250 	uint64_t action_flags = 0;
11251 
11252 	/*
11253 	 * Allow
11254 	 * 1. raw_decap / raw_encap / end
11255 	 * 2. raw_encap / end
11256 	 * 3. raw_decap / end
11257 	 */
11258 	while (actions->type != RTE_FLOW_ACTION_TYPE_END) {
11259 		if (actions->type == RTE_FLOW_ACTION_TYPE_RAW_DECAP) {
11260 			if (action_flags) {
11261 				rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
11262 						   actions, "Invalid indirect action list sequence");
11263 				return NULL;
11264 			}
11265 			action_flags |= MLX5_FLOW_ACTION_DECAP;
11266 			decap = actions;
11267 		} else if (actions->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
11268 			if (action_flags & MLX5_FLOW_ACTION_ENCAP) {
11269 				rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
11270 						   actions, "Invalid indirect action list sequence");
11271 				return NULL;
11272 			}
11273 			action_flags |= MLX5_FLOW_ACTION_ENCAP;
11274 			encap = actions;
11275 		} else {
11276 			rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
11277 					   actions, "Invalid indirect action type in list");
11278 			return NULL;
11279 		}
11280 		actions++;
11281 	}
11282 	if (!decap && !encap) {
11283 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
11284 				   actions, "Invalid indirect action combinations");
11285 		return NULL;
11286 	}
11287 	handle = mlx5_reformat_action_create(dev, &indirect_conf, encap, decap, error);
11288 	if (!handle) {
11289 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
11290 				   actions, "Failed to create HWS decap_encap action");
11291 		return NULL;
11292 	}
11293 	handle->indirect.type = MLX5_INDIRECT_ACTION_LIST_TYPE_REFORMAT;
11294 	LIST_INSERT_HEAD(&priv->indirect_list_head, &handle->indirect, entry);
11295 	return (struct rte_flow_action_list_handle *)handle;
11296 }
11297 
11298 static struct rte_flow_action_list_handle *
11299 flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue,
11300 					const struct rte_flow_op_attr *attr,
11301 					const struct rte_flow_indir_action_conf *conf,
11302 					const struct rte_flow_action *actions,
11303 					void *user_data,
11304 					struct rte_flow_error *error)
11305 {
11306 	struct mlx5_hw_q_job *job = NULL;
11307 	bool push = flow_hw_action_push(attr);
11308 	enum mlx5_indirect_list_type list_type;
11309 	struct rte_flow_action_list_handle *handle;
11310 	struct mlx5_priv *priv = dev->data->dev_private;
11311 	const struct mlx5_flow_template_table_cfg table_cfg = {
11312 		.external = true,
11313 		.attr = {
11314 			.flow_attr = {
11315 				.ingress = conf->ingress,
11316 				.egress = conf->egress,
11317 				.transfer = conf->transfer
11318 			}
11319 		}
11320 	};
11321 
11322 	if (!actions) {
11323 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
11324 				   NULL, "No action list");
11325 		return NULL;
11326 	}
11327 	list_type = flow_hw_inlist_type_get(actions);
11328 	if (list_type == MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY) {
11329 		/*
11330 		 * Legacy indirect actions already have
11331 		 * async resources management. No need to do it twice.
11332 		 */
11333 		handle = mlx5_create_legacy_indlst(dev, queue, attr, conf,
11334 						   actions, user_data, error);
11335 		goto end;
11336 	}
11337 	if (attr) {
11338 		job = flow_hw_action_job_init(priv, queue, NULL, user_data,
11339 					      NULL, MLX5_HW_Q_JOB_TYPE_CREATE,
11340 					      error);
11341 		if (!job)
11342 			return NULL;
11343 	}
11344 	switch (list_type) {
11345 	case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
11346 		handle = mlx5_hw_mirror_handle_create(dev, &table_cfg,
11347 						      actions, error);
11348 		break;
11349 	case MLX5_INDIRECT_ACTION_LIST_TYPE_REFORMAT:
11350 		handle = mlx5_hw_decap_encap_handle_create(dev, &table_cfg,
11351 							   actions, error);
11352 		break;
11353 	default:
11354 		handle = NULL;
11355 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
11356 				   actions, "Invalid list");
11357 	}
11358 	if (job) {
11359 		job->action = handle;
11360 		job->indirect_type = MLX5_HW_INDIRECT_TYPE_LIST;
11361 		flow_hw_action_finalize(dev, queue, job, push, false,
11362 					handle != NULL);
11363 	}
11364 end:
11365 	return handle;
11366 }
11367 
11368 static struct rte_flow_action_list_handle *
11369 flow_hw_action_list_handle_create(struct rte_eth_dev *dev,
11370 				  const struct rte_flow_indir_action_conf *conf,
11371 				  const struct rte_flow_action *actions,
11372 				  struct rte_flow_error *error)
11373 {
11374 	return flow_hw_async_action_list_handle_create(dev, MLX5_HW_INV_QUEUE,
11375 						       NULL, conf, actions,
11376 						       NULL, error);
11377 }
11378 
11379 static int
11380 flow_hw_async_action_list_handle_destroy
11381 			(struct rte_eth_dev *dev, uint32_t queue,
11382 			 const struct rte_flow_op_attr *attr,
11383 			 struct rte_flow_action_list_handle *handle,
11384 			 void *user_data, struct rte_flow_error *error)
11385 {
11386 	int ret = 0;
11387 	struct mlx5_hw_q_job *job = NULL;
11388 	bool push = flow_hw_action_push(attr);
11389 	struct mlx5_priv *priv = dev->data->dev_private;
11390 	enum mlx5_indirect_list_type type =
11391 		mlx5_get_indirect_list_type((void *)handle);
11392 
11393 	if (type == MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY) {
11394 		struct mlx5_indlst_legacy *legacy = (typeof(legacy))handle;
11395 
11396 		ret = flow_hw_action_handle_destroy(dev, queue, attr,
11397 						    legacy->handle,
11398 						    user_data, error);
11399 		mlx5_indirect_list_remove_entry(&legacy->indirect);
11400 		goto end;
11401 	}
11402 	if (attr) {
11403 		job = flow_hw_action_job_init(priv, queue, NULL, user_data,
11404 					      NULL, MLX5_HW_Q_JOB_TYPE_DESTROY,
11405 					      error);
11406 		if (!job)
11407 			return rte_errno;
11408 	}
11409 	switch (type) {
11410 	case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
11411 		mlx5_hw_mirror_destroy(dev, (struct mlx5_mirror *)handle);
11412 		break;
11413 	case MLX5_INDIRECT_ACTION_LIST_TYPE_REFORMAT:
11414 		LIST_REMOVE(&((struct mlx5_hw_encap_decap_action *)handle)->indirect,
11415 			    entry);
11416 		mlx5_reformat_action_destroy(dev, handle, error);
11417 		break;
11418 	default:
11419 		ret = rte_flow_error_set(error, EINVAL,
11420 					  RTE_FLOW_ERROR_TYPE_ACTION,
11421 					  NULL, "Invalid indirect list handle");
11422 	}
11423 	if (job) {
11424 		flow_hw_action_finalize(dev, queue, job, push, false, true);
11425 	}
11426 end:
11427 	return ret;
11428 }
11429 
11430 static int
11431 flow_hw_action_list_handle_destroy(struct rte_eth_dev *dev,
11432 				   struct rte_flow_action_list_handle *handle,
11433 				   struct rte_flow_error *error)
11434 {
11435 	return flow_hw_async_action_list_handle_destroy(dev, MLX5_HW_INV_QUEUE,
11436 							NULL, handle, NULL,
11437 							error);
11438 }
11439 
11440 static int
11441 flow_hw_async_action_list_handle_query_update
11442 		(struct rte_eth_dev *dev, uint32_t queue_id,
11443 		 const struct rte_flow_op_attr *attr,
11444 		 const struct rte_flow_action_list_handle *handle,
11445 		 const void **update, void **query,
11446 		 enum rte_flow_query_update_mode mode,
11447 		 void *user_data, struct rte_flow_error *error)
11448 {
11449 	enum mlx5_indirect_list_type type =
11450 		mlx5_get_indirect_list_type((const void *)handle);
11451 
11452 	if (type == MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY) {
11453 		struct mlx5_indlst_legacy *legacy = (void *)(uintptr_t)handle;
11454 
11455 		if (update && query)
11456 			return flow_hw_async_action_handle_query_update
11457 				(dev, queue_id, attr, legacy->handle,
11458 				 update, query, mode, user_data, error);
11459 		else if (update && update[0])
11460 			return flow_hw_action_handle_update(dev, queue_id, attr,
11461 							    legacy->handle, update[0],
11462 							    user_data, error);
11463 		else if (query && query[0])
11464 			return flow_hw_action_handle_query(dev, queue_id, attr,
11465 							   legacy->handle, query[0],
11466 							   user_data, error);
11467 		else
11468 			return rte_flow_error_set(error, EINVAL,
11469 						  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11470 						  NULL, "invalid legacy handle query_update parameters");
11471 	}
11472 	return -ENOTSUP;
11473 }
11474 
11475 static int
11476 flow_hw_action_list_handle_query_update(struct rte_eth_dev *dev,
11477 					const struct rte_flow_action_list_handle *handle,
11478 					const void **update, void **query,
11479 					enum rte_flow_query_update_mode mode,
11480 					struct rte_flow_error *error)
11481 {
11482 	return flow_hw_async_action_list_handle_query_update
11483 					(dev, MLX5_HW_INV_QUEUE, NULL, handle,
11484 					 update, query, mode, NULL, error);
11485 }
11486 
11487 static int
11488 flow_hw_calc_table_hash(struct rte_eth_dev *dev,
11489 			 const struct rte_flow_template_table *table,
11490 			 const struct rte_flow_item pattern[],
11491 			 uint8_t pattern_template_index,
11492 			 uint32_t *hash, struct rte_flow_error *error)
11493 {
11494 	const struct rte_flow_item *items;
11495 	/* Temp job to allow adding missing items */
11496 	static struct rte_flow_item tmp_items[MLX5_HW_MAX_ITEMS];
11497 	static struct mlx5_hw_q_job job = {.items = tmp_items};
11498 	int res;
11499 
11500 	items = flow_hw_get_rule_items(dev, table, pattern,
11501 				       pattern_template_index,
11502 				       &job);
11503 	res = mlx5dr_rule_hash_calculate(table->matcher, items,
11504 					 pattern_template_index,
11505 					 MLX5DR_RULE_HASH_CALC_MODE_RAW,
11506 					 hash);
11507 	if (res)
11508 		return rte_flow_error_set(error, res,
11509 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11510 					  NULL,
11511 					  "hash could not be calculated");
11512 	return 0;
11513 }
11514 
11515 const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {
11516 	.info_get = flow_hw_info_get,
11517 	.configure = flow_hw_configure,
11518 	.pattern_validate = flow_hw_pattern_validate,
11519 	.pattern_template_create = flow_hw_pattern_template_create,
11520 	.pattern_template_destroy = flow_hw_pattern_template_destroy,
11521 	.actions_validate = flow_hw_actions_validate,
11522 	.actions_template_create = flow_hw_actions_template_create,
11523 	.actions_template_destroy = flow_hw_actions_template_destroy,
11524 	.template_table_create = flow_hw_template_table_create,
11525 	.template_table_destroy = flow_hw_table_destroy,
11526 	.group_set_miss_actions = flow_hw_group_set_miss_actions,
11527 	.async_flow_create = flow_hw_async_flow_create,
11528 	.async_flow_create_by_index = flow_hw_async_flow_create_by_index,
11529 	.async_flow_update = flow_hw_async_flow_update,
11530 	.async_flow_destroy = flow_hw_async_flow_destroy,
11531 	.pull = flow_hw_pull,
11532 	.push = flow_hw_push,
11533 	.async_action_create = flow_hw_action_handle_create,
11534 	.async_action_destroy = flow_hw_action_handle_destroy,
11535 	.async_action_update = flow_hw_action_handle_update,
11536 	.async_action_query_update = flow_hw_async_action_handle_query_update,
11537 	.async_action_query = flow_hw_action_handle_query,
11538 	.action_validate = flow_hw_action_validate,
11539 	.action_create = flow_hw_action_create,
11540 	.action_destroy = flow_hw_action_destroy,
11541 	.action_update = flow_hw_action_update,
11542 	.action_query = flow_hw_action_query,
11543 	.action_query_update = flow_hw_action_query_update,
11544 	.action_list_handle_create = flow_hw_action_list_handle_create,
11545 	.action_list_handle_destroy = flow_hw_action_list_handle_destroy,
11546 	.action_list_handle_query_update =
11547 		flow_hw_action_list_handle_query_update,
11548 	.async_action_list_handle_create =
11549 		flow_hw_async_action_list_handle_create,
11550 	.async_action_list_handle_destroy =
11551 		flow_hw_async_action_list_handle_destroy,
11552 	.async_action_list_handle_query_update =
11553 		flow_hw_async_action_list_handle_query_update,
11554 	.query = flow_hw_query,
11555 	.get_aged_flows = flow_hw_get_aged_flows,
11556 	.get_q_aged_flows = flow_hw_get_q_aged_flows,
11557 	.item_create = flow_dv_item_create,
11558 	.item_release = flow_dv_item_release,
11559 	.flow_calc_table_hash = flow_hw_calc_table_hash,
11560 };
11561 
11562 /**
11563  * Creates a control flow using flow template API on @p proxy_dev device,
11564  * on behalf of @p owner_dev device.
11565  *
11566  * This function uses locks internally to synchronize access to the
11567  * flow queue.
11568  *
11569  * Created flow is stored in private list associated with @p proxy_dev device.
11570  *
11571  * @param owner_dev
11572  *   Pointer to Ethernet device on behalf of which flow is created.
11573  * @param proxy_dev
11574  *   Pointer to Ethernet device on which flow is created.
11575  * @param table
11576  *   Pointer to flow table.
11577  * @param items
11578  *   Pointer to flow rule items.
11579  * @param item_template_idx
11580  *   Index of an item template associated with @p table.
11581  * @param actions
11582  *   Pointer to flow rule actions.
11583  * @param action_template_idx
11584  *   Index of an action template associated with @p table.
11585  * @param info
11586  *   Additional info about control flow rule.
11587  * @param external
11588  *   External ctrl flow.
11589  *
11590  * @return
11591  *   0 on success, negative errno value otherwise and rte_errno set.
11592  */
11593 static __rte_unused int
11594 flow_hw_create_ctrl_flow(struct rte_eth_dev *owner_dev,
11595 			 struct rte_eth_dev *proxy_dev,
11596 			 struct rte_flow_template_table *table,
11597 			 struct rte_flow_item items[],
11598 			 uint8_t item_template_idx,
11599 			 struct rte_flow_action actions[],
11600 			 uint8_t action_template_idx,
11601 			 struct mlx5_hw_ctrl_flow_info *info,
11602 			 bool external)
11603 {
11604 	struct mlx5_priv *priv = proxy_dev->data->dev_private;
11605 	uint32_t queue = CTRL_QUEUE_ID(priv);
11606 	struct rte_flow_op_attr op_attr = {
11607 		.postpone = 0,
11608 	};
11609 	struct rte_flow *flow = NULL;
11610 	struct mlx5_hw_ctrl_flow *entry = NULL;
11611 	int ret;
11612 
11613 	rte_spinlock_lock(&priv->hw_ctrl_lock);
11614 	entry = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_SYS, sizeof(*entry),
11615 			    0, SOCKET_ID_ANY);
11616 	if (!entry) {
11617 		DRV_LOG(ERR, "port %u not enough memory to create control flows",
11618 			proxy_dev->data->port_id);
11619 		rte_errno = ENOMEM;
11620 		ret = -rte_errno;
11621 		goto error;
11622 	}
11623 	flow = flow_hw_async_flow_create(proxy_dev, queue, &op_attr, table,
11624 					 items, item_template_idx,
11625 					 actions, action_template_idx,
11626 					 NULL, NULL);
11627 	if (!flow) {
11628 		DRV_LOG(ERR, "port %u failed to enqueue create control"
11629 			" flow operation", proxy_dev->data->port_id);
11630 		ret = -rte_errno;
11631 		goto error;
11632 	}
11633 	ret = __flow_hw_pull_comp(proxy_dev, queue, NULL);
11634 	if (ret) {
11635 		DRV_LOG(ERR, "port %u failed to insert control flow",
11636 			proxy_dev->data->port_id);
11637 		rte_errno = EINVAL;
11638 		ret = -rte_errno;
11639 		goto error;
11640 	}
11641 	entry->owner_dev = owner_dev;
11642 	entry->flow = flow;
11643 	if (info)
11644 		entry->info = *info;
11645 	else
11646 		entry->info.type = MLX5_HW_CTRL_FLOW_TYPE_GENERAL;
11647 	if (external)
11648 		LIST_INSERT_HEAD(&priv->hw_ext_ctrl_flows, entry, next);
11649 	else
11650 		LIST_INSERT_HEAD(&priv->hw_ctrl_flows, entry, next);
11651 	rte_spinlock_unlock(&priv->hw_ctrl_lock);
11652 	return 0;
11653 error:
11654 	if (entry)
11655 		mlx5_free(entry);
11656 	rte_spinlock_unlock(&priv->hw_ctrl_lock);
11657 	return ret;
11658 }
11659 
11660 /**
11661  * Destroys a control flow @p flow using flow template API on @p dev device.
11662  *
11663  * This function uses locks internally to synchronize access to the
11664  * flow queue.
11665  *
11666  * If the @p flow is stored on any private list/pool, then caller must free up
11667  * the relevant resources.
11668  *
11669  * @param dev
11670  *   Pointer to Ethernet device.
11671  * @param flow
11672  *   Pointer to flow rule.
11673  *
11674  * @return
11675  *   0 on success, non-zero value otherwise.
11676  */
11677 static int
11678 flow_hw_destroy_ctrl_flow(struct rte_eth_dev *dev, struct rte_flow *flow)
11679 {
11680 	struct mlx5_priv *priv = dev->data->dev_private;
11681 	uint32_t queue = CTRL_QUEUE_ID(priv);
11682 	struct rte_flow_op_attr op_attr = {
11683 		.postpone = 0,
11684 	};
11685 	int ret;
11686 
11687 	rte_spinlock_lock(&priv->hw_ctrl_lock);
11688 	ret = flow_hw_async_flow_destroy(dev, queue, &op_attr, flow, NULL, NULL);
11689 	if (ret) {
11690 		DRV_LOG(ERR, "port %u failed to enqueue destroy control"
11691 			" flow operation", dev->data->port_id);
11692 		goto exit;
11693 	}
11694 	ret = __flow_hw_pull_comp(dev, queue, NULL);
11695 	if (ret) {
11696 		DRV_LOG(ERR, "port %u failed to destroy control flow",
11697 			dev->data->port_id);
11698 		rte_errno = EINVAL;
11699 		ret = -rte_errno;
11700 		goto exit;
11701 	}
11702 exit:
11703 	rte_spinlock_unlock(&priv->hw_ctrl_lock);
11704 	return ret;
11705 }
11706 
11707 /**
11708  * Destroys control flows created on behalf of @p owner device on @p dev device.
11709  *
11710  * @param dev
11711  *   Pointer to Ethernet device on which control flows were created.
11712  * @param owner
11713  *   Pointer to Ethernet device owning control flows.
11714  *
11715  * @return
11716  *   0 on success, otherwise negative error code is returned and
11717  *   rte_errno is set.
11718  */
11719 static int
11720 flow_hw_flush_ctrl_flows_owned_by(struct rte_eth_dev *dev, struct rte_eth_dev *owner)
11721 {
11722 	struct mlx5_priv *priv = dev->data->dev_private;
11723 	struct mlx5_hw_ctrl_flow *cf;
11724 	struct mlx5_hw_ctrl_flow *cf_next;
11725 	int ret;
11726 
11727 	cf = LIST_FIRST(&priv->hw_ctrl_flows);
11728 	while (cf != NULL) {
11729 		cf_next = LIST_NEXT(cf, next);
11730 		if (cf->owner_dev == owner) {
11731 			ret = flow_hw_destroy_ctrl_flow(dev, cf->flow);
11732 			if (ret) {
11733 				rte_errno = ret;
11734 				return -ret;
11735 			}
11736 			LIST_REMOVE(cf, next);
11737 			mlx5_free(cf);
11738 		}
11739 		cf = cf_next;
11740 	}
11741 	return 0;
11742 }
11743 
11744 /**
11745  * Destroys control flows created for @p owner_dev device.
11746  *
11747  * @param owner_dev
11748  *   Pointer to Ethernet device owning control flows.
11749  *
11750  * @return
11751  *   0 on success, otherwise negative error code is returned and
11752  *   rte_errno is set.
11753  */
11754 int
11755 mlx5_flow_hw_flush_ctrl_flows(struct rte_eth_dev *owner_dev)
11756 {
11757 	struct mlx5_priv *owner_priv = owner_dev->data->dev_private;
11758 	struct rte_eth_dev *proxy_dev;
11759 	uint16_t owner_port_id = owner_dev->data->port_id;
11760 	uint16_t proxy_port_id = owner_dev->data->port_id;
11761 	int ret;
11762 
11763 	/* Flush all flows created by this port for itself. */
11764 	ret = flow_hw_flush_ctrl_flows_owned_by(owner_dev, owner_dev);
11765 	if (ret)
11766 		return ret;
11767 	/* Flush all flows created for this port on proxy port. */
11768 	if (owner_priv->sh->config.dv_esw_en) {
11769 		ret = rte_flow_pick_transfer_proxy(owner_port_id, &proxy_port_id, NULL);
11770 		if (ret == -ENODEV) {
11771 			DRV_LOG(DEBUG, "Unable to find transfer proxy port for port %u. It was "
11772 				       "probably closed. Control flows were cleared.",
11773 				       owner_port_id);
11774 			rte_errno = 0;
11775 			return 0;
11776 		} else if (ret) {
11777 			DRV_LOG(ERR, "Unable to find proxy port for port %u (ret = %d)",
11778 				owner_port_id, ret);
11779 			return ret;
11780 		}
11781 		proxy_dev = &rte_eth_devices[proxy_port_id];
11782 	} else {
11783 		proxy_dev = owner_dev;
11784 	}
11785 	return flow_hw_flush_ctrl_flows_owned_by(proxy_dev, owner_dev);
11786 }
11787 
11788 /**
11789  * Destroys all control flows created on @p dev device.
11790  *
11791  * @param owner_dev
11792  *   Pointer to Ethernet device.
11793  *
11794  * @return
11795  *   0 on success, otherwise negative error code is returned and
11796  *   rte_errno is set.
11797  */
11798 static int
11799 flow_hw_flush_all_ctrl_flows(struct rte_eth_dev *dev)
11800 {
11801 	struct mlx5_priv *priv = dev->data->dev_private;
11802 	struct mlx5_hw_ctrl_flow *cf;
11803 	struct mlx5_hw_ctrl_flow *cf_next;
11804 	int ret;
11805 
11806 	cf = LIST_FIRST(&priv->hw_ctrl_flows);
11807 	while (cf != NULL) {
11808 		cf_next = LIST_NEXT(cf, next);
11809 		ret = flow_hw_destroy_ctrl_flow(dev, cf->flow);
11810 		if (ret) {
11811 			rte_errno = ret;
11812 			return -ret;
11813 		}
11814 		LIST_REMOVE(cf, next);
11815 		mlx5_free(cf);
11816 		cf = cf_next;
11817 	}
11818 	cf = LIST_FIRST(&priv->hw_ext_ctrl_flows);
11819 	while (cf != NULL) {
11820 		cf_next = LIST_NEXT(cf, next);
11821 		ret = flow_hw_destroy_ctrl_flow(dev, cf->flow);
11822 		if (ret) {
11823 			rte_errno = ret;
11824 			return -ret;
11825 		}
11826 		LIST_REMOVE(cf, next);
11827 		mlx5_free(cf);
11828 		cf = cf_next;
11829 	}
11830 	return 0;
11831 }
11832 
11833 int
11834 mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn, bool external)
11835 {
11836 	uint16_t port_id = dev->data->port_id;
11837 	struct rte_flow_item_ethdev esw_mgr_spec = {
11838 		.port_id = MLX5_REPRESENTED_PORT_ESW_MGR,
11839 	};
11840 	struct rte_flow_item_ethdev esw_mgr_mask = {
11841 		.port_id = MLX5_REPRESENTED_PORT_ESW_MGR,
11842 	};
11843 	struct rte_flow_item_tag reg_c0_spec = {
11844 		.index = (uint8_t)REG_C_0,
11845 		.data = flow_hw_esw_mgr_regc_marker(dev),
11846 	};
11847 	struct rte_flow_item_tag reg_c0_mask = {
11848 		.index = 0xff,
11849 		.data = flow_hw_esw_mgr_regc_marker_mask(dev),
11850 	};
11851 	struct mlx5_rte_flow_item_sq sq_spec = {
11852 		.queue = sqn,
11853 	};
11854 	struct rte_flow_action_ethdev port = {
11855 		.port_id = port_id,
11856 	};
11857 	struct rte_flow_item items[3] = { { 0 } };
11858 	struct rte_flow_action actions[3] = { { 0 } };
11859 	struct mlx5_hw_ctrl_flow_info flow_info = {
11860 		.type = MLX5_HW_CTRL_FLOW_TYPE_SQ_MISS_ROOT,
11861 		.esw_mgr_sq = sqn,
11862 	};
11863 	struct rte_eth_dev *proxy_dev;
11864 	struct mlx5_priv *proxy_priv;
11865 	uint16_t proxy_port_id = dev->data->port_id;
11866 	int ret;
11867 
11868 	ret = rte_flow_pick_transfer_proxy(port_id, &proxy_port_id, NULL);
11869 	if (ret) {
11870 		DRV_LOG(ERR, "Unable to pick transfer proxy port for port %u. Transfer proxy "
11871 			     "port must be present to create default SQ miss flows.",
11872 			     port_id);
11873 		return ret;
11874 	}
11875 	proxy_dev = &rte_eth_devices[proxy_port_id];
11876 	proxy_priv = proxy_dev->data->dev_private;
11877 	if (!proxy_priv->dr_ctx) {
11878 		DRV_LOG(DEBUG, "Transfer proxy port (port %u) of port %u must be configured "
11879 			       "for HWS to create default SQ miss flows. Default flows will "
11880 			       "not be created.",
11881 			       proxy_port_id, port_id);
11882 		return 0;
11883 	}
11884 	if (!proxy_priv->hw_esw_sq_miss_root_tbl ||
11885 	    !proxy_priv->hw_esw_sq_miss_tbl) {
11886 		DRV_LOG(ERR, "Transfer proxy port (port %u) of port %u was configured, but "
11887 			     "default flow tables were not created.",
11888 			     proxy_port_id, port_id);
11889 		rte_errno = ENOMEM;
11890 		return -rte_errno;
11891 	}
11892 	/*
11893 	 * Create a root SQ miss flow rule - match E-Switch Manager and SQ,
11894 	 * and jump to group 1.
11895 	 */
11896 	items[0] = (struct rte_flow_item){
11897 		.type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,
11898 		.spec = &esw_mgr_spec,
11899 		.mask = &esw_mgr_mask,
11900 	};
11901 	items[1] = (struct rte_flow_item){
11902 		.type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_SQ,
11903 		.spec = &sq_spec,
11904 	};
11905 	items[2] = (struct rte_flow_item){
11906 		.type = RTE_FLOW_ITEM_TYPE_END,
11907 	};
11908 	actions[0] = (struct rte_flow_action){
11909 		.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
11910 	};
11911 	actions[1] = (struct rte_flow_action){
11912 		.type = RTE_FLOW_ACTION_TYPE_JUMP,
11913 	};
11914 	actions[2] = (struct rte_flow_action) {
11915 		.type = RTE_FLOW_ACTION_TYPE_END,
11916 	};
11917 	ret = flow_hw_create_ctrl_flow(dev, proxy_dev, proxy_priv->hw_esw_sq_miss_root_tbl,
11918 				       items, 0, actions, 0, &flow_info, external);
11919 	if (ret) {
11920 		DRV_LOG(ERR, "Port %u failed to create root SQ miss flow rule for SQ %u, ret %d",
11921 			port_id, sqn, ret);
11922 		return ret;
11923 	}
11924 	/*
11925 	 * Create a non-root SQ miss flow rule - match REG_C_0 marker and SQ,
11926 	 * and forward to port.
11927 	 */
11928 	items[0] = (struct rte_flow_item){
11929 		.type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_TAG,
11930 		.spec = &reg_c0_spec,
11931 		.mask = &reg_c0_mask,
11932 	};
11933 	items[1] = (struct rte_flow_item){
11934 		.type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_SQ,
11935 		.spec = &sq_spec,
11936 	};
11937 	items[2] = (struct rte_flow_item){
11938 		.type = RTE_FLOW_ITEM_TYPE_END,
11939 	};
11940 	actions[0] = (struct rte_flow_action){
11941 		.type = RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT,
11942 		.conf = &port,
11943 	};
11944 	actions[1] = (struct rte_flow_action){
11945 		.type = RTE_FLOW_ACTION_TYPE_END,
11946 	};
11947 	flow_info.type = MLX5_HW_CTRL_FLOW_TYPE_SQ_MISS;
11948 	ret = flow_hw_create_ctrl_flow(dev, proxy_dev, proxy_priv->hw_esw_sq_miss_tbl,
11949 				       items, 0, actions, 0, &flow_info, external);
11950 	if (ret) {
11951 		DRV_LOG(ERR, "Port %u failed to create HWS SQ miss flow rule for SQ %u, ret %d",
11952 			port_id, sqn, ret);
11953 		return ret;
11954 	}
11955 	return 0;
11956 }
11957 
11958 static bool
11959 flow_hw_is_matching_sq_miss_flow(struct mlx5_hw_ctrl_flow *cf,
11960 				 struct rte_eth_dev *dev,
11961 				 uint32_t sqn)
11962 {
11963 	if (cf->owner_dev != dev)
11964 		return false;
11965 	if (cf->info.type == MLX5_HW_CTRL_FLOW_TYPE_SQ_MISS_ROOT && cf->info.esw_mgr_sq == sqn)
11966 		return true;
11967 	if (cf->info.type == MLX5_HW_CTRL_FLOW_TYPE_SQ_MISS && cf->info.esw_mgr_sq == sqn)
11968 		return true;
11969 	return false;
11970 }
11971 
11972 int
11973 mlx5_flow_hw_esw_destroy_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn)
11974 {
11975 	uint16_t port_id = dev->data->port_id;
11976 	uint16_t proxy_port_id = dev->data->port_id;
11977 	struct rte_eth_dev *proxy_dev;
11978 	struct mlx5_priv *proxy_priv;
11979 	struct mlx5_hw_ctrl_flow *cf;
11980 	struct mlx5_hw_ctrl_flow *cf_next;
11981 	int ret;
11982 
11983 	ret = rte_flow_pick_transfer_proxy(port_id, &proxy_port_id, NULL);
11984 	if (ret) {
11985 		DRV_LOG(ERR, "Unable to pick transfer proxy port for port %u. Transfer proxy "
11986 			     "port must be present for default SQ miss flow rules to exist.",
11987 			     port_id);
11988 		return ret;
11989 	}
11990 	proxy_dev = &rte_eth_devices[proxy_port_id];
11991 	proxy_priv = proxy_dev->data->dev_private;
11992 	if (!proxy_priv->dr_ctx)
11993 		return 0;
11994 	if (!proxy_priv->hw_esw_sq_miss_root_tbl ||
11995 	    !proxy_priv->hw_esw_sq_miss_tbl)
11996 		return 0;
11997 	cf = LIST_FIRST(&proxy_priv->hw_ctrl_flows);
11998 	while (cf != NULL) {
11999 		cf_next = LIST_NEXT(cf, next);
12000 		if (flow_hw_is_matching_sq_miss_flow(cf, dev, sqn)) {
12001 			claim_zero(flow_hw_destroy_ctrl_flow(proxy_dev, cf->flow));
12002 			LIST_REMOVE(cf, next);
12003 			mlx5_free(cf);
12004 		}
12005 		cf = cf_next;
12006 	}
12007 	return 0;
12008 }
12009 
12010 int
12011 mlx5_flow_hw_esw_create_default_jump_flow(struct rte_eth_dev *dev)
12012 {
12013 	uint16_t port_id = dev->data->port_id;
12014 	struct rte_flow_item_ethdev port_spec = {
12015 		.port_id = port_id,
12016 	};
12017 	struct rte_flow_item items[] = {
12018 		{
12019 			.type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,
12020 			.spec = &port_spec,
12021 		},
12022 		{
12023 			.type = RTE_FLOW_ITEM_TYPE_END,
12024 		},
12025 	};
12026 	struct rte_flow_action_jump jump = {
12027 		.group = 1,
12028 	};
12029 	struct rte_flow_action actions[] = {
12030 		{
12031 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
12032 			.conf = &jump,
12033 		},
12034 		{
12035 			.type = RTE_FLOW_ACTION_TYPE_END,
12036 		}
12037 	};
12038 	struct mlx5_hw_ctrl_flow_info flow_info = {
12039 		.type = MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_JUMP,
12040 	};
12041 	struct rte_eth_dev *proxy_dev;
12042 	struct mlx5_priv *proxy_priv;
12043 	uint16_t proxy_port_id = dev->data->port_id;
12044 	int ret;
12045 
12046 	ret = rte_flow_pick_transfer_proxy(port_id, &proxy_port_id, NULL);
12047 	if (ret) {
12048 		DRV_LOG(ERR, "Unable to pick transfer proxy port for port %u. Transfer proxy "
12049 			     "port must be present to create default FDB jump rule.",
12050 			     port_id);
12051 		return ret;
12052 	}
12053 	proxy_dev = &rte_eth_devices[proxy_port_id];
12054 	proxy_priv = proxy_dev->data->dev_private;
12055 	if (!proxy_priv->dr_ctx) {
12056 		DRV_LOG(DEBUG, "Transfer proxy port (port %u) of port %u must be configured "
12057 			       "for HWS to create default FDB jump rule. Default rule will "
12058 			       "not be created.",
12059 			       proxy_port_id, port_id);
12060 		return 0;
12061 	}
12062 	if (!proxy_priv->hw_esw_zero_tbl) {
12063 		DRV_LOG(ERR, "Transfer proxy port (port %u) of port %u was configured, but "
12064 			     "default flow tables were not created.",
12065 			     proxy_port_id, port_id);
12066 		rte_errno = EINVAL;
12067 		return -rte_errno;
12068 	}
12069 	return flow_hw_create_ctrl_flow(dev, proxy_dev,
12070 					proxy_priv->hw_esw_zero_tbl,
12071 					items, 0, actions, 0, &flow_info, false);
12072 }
12073 
12074 int
12075 mlx5_flow_hw_create_tx_default_mreg_copy_flow(struct rte_eth_dev *dev)
12076 {
12077 	struct mlx5_priv *priv = dev->data->dev_private;
12078 	struct rte_flow_item_eth promisc = {
12079 		.hdr.dst_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
12080 		.hdr.src_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
12081 		.hdr.ether_type = 0,
12082 	};
12083 	struct rte_flow_item eth_all[] = {
12084 		[0] = {
12085 			.type = RTE_FLOW_ITEM_TYPE_ETH,
12086 			.spec = &promisc,
12087 			.mask = &promisc,
12088 		},
12089 		[1] = {
12090 			.type = RTE_FLOW_ITEM_TYPE_END,
12091 		},
12092 	};
12093 	struct rte_flow_action_modify_field mreg_action = {
12094 		.operation = RTE_FLOW_MODIFY_SET,
12095 		.dst = {
12096 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
12097 			.tag_index = REG_C_1,
12098 		},
12099 		.src = {
12100 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
12101 			.tag_index = REG_A,
12102 		},
12103 		.width = 32,
12104 	};
12105 	struct rte_flow_action copy_reg_action[] = {
12106 		[0] = {
12107 			.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
12108 			.conf = &mreg_action,
12109 		},
12110 		[1] = {
12111 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
12112 		},
12113 		[2] = {
12114 			.type = RTE_FLOW_ACTION_TYPE_END,
12115 		},
12116 	};
12117 	struct mlx5_hw_ctrl_flow_info flow_info = {
12118 		.type = MLX5_HW_CTRL_FLOW_TYPE_TX_META_COPY,
12119 	};
12120 
12121 	MLX5_ASSERT(priv->master);
12122 	if (!priv->dr_ctx || !priv->hw_tx_meta_cpy_tbl)
12123 		return 0;
12124 	return flow_hw_create_ctrl_flow(dev, dev,
12125 					priv->hw_tx_meta_cpy_tbl,
12126 					eth_all, 0, copy_reg_action, 0, &flow_info, false);
12127 }
12128 
12129 int
12130 mlx5_flow_hw_tx_repr_matching_flow(struct rte_eth_dev *dev, uint32_t sqn, bool external)
12131 {
12132 	struct mlx5_priv *priv = dev->data->dev_private;
12133 	struct mlx5_rte_flow_item_sq sq_spec = {
12134 		.queue = sqn,
12135 	};
12136 	struct rte_flow_item items[] = {
12137 		{
12138 			.type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_SQ,
12139 			.spec = &sq_spec,
12140 		},
12141 		{
12142 			.type = RTE_FLOW_ITEM_TYPE_END,
12143 		},
12144 	};
12145 	/*
12146 	 * Allocate actions array suitable for all cases - extended metadata enabled or not.
12147 	 * With extended metadata there will be an additional MODIFY_FIELD action before JUMP.
12148 	 */
12149 	struct rte_flow_action actions[] = {
12150 		{ .type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD },
12151 		{ .type = RTE_FLOW_ACTION_TYPE_JUMP },
12152 		{ .type = RTE_FLOW_ACTION_TYPE_END },
12153 		{ .type = RTE_FLOW_ACTION_TYPE_END },
12154 	};
12155 	struct mlx5_hw_ctrl_flow_info flow_info = {
12156 		.type = MLX5_HW_CTRL_FLOW_TYPE_TX_REPR_MATCH,
12157 		.tx_repr_sq = sqn,
12158 	};
12159 
12160 	/* It is assumed that caller checked for representor matching. */
12161 	MLX5_ASSERT(priv->sh->config.repr_matching);
12162 	if (!priv->dr_ctx) {
12163 		DRV_LOG(DEBUG, "Port %u must be configured for HWS, before creating "
12164 			       "default egress flow rules. Omitting creation.",
12165 			       dev->data->port_id);
12166 		return 0;
12167 	}
12168 	if (!priv->hw_tx_repr_tagging_tbl) {
12169 		DRV_LOG(ERR, "Port %u is configured for HWS, but table for default "
12170 			     "egress flow rules does not exist.",
12171 			     dev->data->port_id);
12172 		rte_errno = EINVAL;
12173 		return -rte_errno;
12174 	}
12175 	/*
12176 	 * If extended metadata mode is enabled, then an additional MODIFY_FIELD action must be
12177 	 * placed before terminating JUMP action.
12178 	 */
12179 	if (priv->sh->config.dv_xmeta_en == MLX5_XMETA_MODE_META32_HWS) {
12180 		actions[1].type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD;
12181 		actions[2].type = RTE_FLOW_ACTION_TYPE_JUMP;
12182 	}
12183 	return flow_hw_create_ctrl_flow(dev, dev, priv->hw_tx_repr_tagging_tbl,
12184 					items, 0, actions, 0, &flow_info, external);
12185 }
12186 
12187 int
12188 mlx5_flow_hw_lacp_rx_flow(struct rte_eth_dev *dev)
12189 {
12190 	struct mlx5_priv *priv = dev->data->dev_private;
12191 	struct rte_flow_item_eth lacp_item = {
12192 		.type = RTE_BE16(RTE_ETHER_TYPE_SLOW),
12193 	};
12194 	struct rte_flow_item eth_lacp[] = {
12195 		[0] = {
12196 			.type = RTE_FLOW_ITEM_TYPE_ETH,
12197 			.spec = &lacp_item,
12198 			.mask = &lacp_item,
12199 		},
12200 		[1] = {
12201 			.type = RTE_FLOW_ITEM_TYPE_END,
12202 		},
12203 	};
12204 	struct rte_flow_action miss_action[] = {
12205 		[0] = {
12206 			.type = (enum rte_flow_action_type)
12207 				MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS,
12208 		},
12209 		[1] = {
12210 			.type = RTE_FLOW_ACTION_TYPE_END,
12211 		},
12212 	};
12213 	struct mlx5_hw_ctrl_flow_info flow_info = {
12214 		.type = MLX5_HW_CTRL_FLOW_TYPE_LACP_RX,
12215 	};
12216 
12217 	MLX5_ASSERT(priv->master);
12218 	if (!priv->dr_ctx || !priv->hw_lacp_rx_tbl)
12219 		return 0;
12220 	return flow_hw_create_ctrl_flow(dev, dev, priv->hw_lacp_rx_tbl, eth_lacp, 0,
12221 					miss_action, 0, &flow_info, false);
12222 }
12223 
12224 static uint32_t
12225 __calc_pattern_flags(const enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type)
12226 {
12227 	switch (eth_pattern_type) {
12228 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL:
12229 		return MLX5_CTRL_PROMISCUOUS;
12230 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL_MCAST:
12231 		return MLX5_CTRL_ALL_MULTICAST;
12232 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST:
12233 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST_VLAN:
12234 		return MLX5_CTRL_BROADCAST;
12235 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST:
12236 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST_VLAN:
12237 		return MLX5_CTRL_IPV4_MULTICAST;
12238 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST:
12239 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST_VLAN:
12240 		return MLX5_CTRL_IPV6_MULTICAST;
12241 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC:
12242 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN:
12243 		return MLX5_CTRL_DMAC;
12244 	default:
12245 		/* Should not reach here. */
12246 		MLX5_ASSERT(false);
12247 		return 0;
12248 	}
12249 }
12250 
12251 static uint32_t
12252 __calc_vlan_flags(const enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type)
12253 {
12254 	switch (eth_pattern_type) {
12255 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST_VLAN:
12256 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST_VLAN:
12257 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST_VLAN:
12258 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN:
12259 		return MLX5_CTRL_VLAN_FILTER;
12260 	default:
12261 		return 0;
12262 	}
12263 }
12264 
12265 static bool
12266 eth_pattern_type_is_requested(const enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type,
12267 			      uint32_t flags)
12268 {
12269 	uint32_t pattern_flags = __calc_pattern_flags(eth_pattern_type);
12270 	uint32_t vlan_flags = __calc_vlan_flags(eth_pattern_type);
12271 	bool pattern_requested = !!(pattern_flags & flags);
12272 	bool consider_vlan = vlan_flags || (MLX5_CTRL_VLAN_FILTER & flags);
12273 	bool vlan_requested = !!(vlan_flags & flags);
12274 
12275 	if (consider_vlan)
12276 		return pattern_requested && vlan_requested;
12277 	else
12278 		return pattern_requested;
12279 }
12280 
12281 static bool
12282 rss_type_is_requested(struct mlx5_priv *priv,
12283 		      const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
12284 {
12285 	struct rte_flow_actions_template *at = priv->hw_ctrl_rx->rss[rss_type];
12286 	unsigned int i;
12287 
12288 	for (i = 0; at->actions[i].type != RTE_FLOW_ACTION_TYPE_END; ++i) {
12289 		if (at->actions[i].type == RTE_FLOW_ACTION_TYPE_RSS) {
12290 			const struct rte_flow_action_rss *rss = at->actions[i].conf;
12291 			uint64_t rss_types = rss->types;
12292 
12293 			if ((rss_types & priv->rss_conf.rss_hf) != rss_types)
12294 				return false;
12295 		}
12296 	}
12297 	return true;
12298 }
12299 
12300 static const struct rte_flow_item_eth *
12301 __get_eth_spec(const enum mlx5_flow_ctrl_rx_eth_pattern_type pattern)
12302 {
12303 	switch (pattern) {
12304 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL:
12305 		return &ctrl_rx_eth_promisc_spec;
12306 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL_MCAST:
12307 		return &ctrl_rx_eth_mcast_spec;
12308 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST:
12309 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST_VLAN:
12310 		return &ctrl_rx_eth_bcast_spec;
12311 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST:
12312 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST_VLAN:
12313 		return &ctrl_rx_eth_ipv4_mcast_spec;
12314 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST:
12315 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST_VLAN:
12316 		return &ctrl_rx_eth_ipv6_mcast_spec;
12317 	default:
12318 		/* This case should not be reached. */
12319 		MLX5_ASSERT(false);
12320 		return NULL;
12321 	}
12322 }
12323 
12324 static int
12325 __flow_hw_ctrl_flows_single(struct rte_eth_dev *dev,
12326 			    struct rte_flow_template_table *tbl,
12327 			    const enum mlx5_flow_ctrl_rx_eth_pattern_type pattern_type,
12328 			    const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
12329 {
12330 	const struct rte_flow_item_eth *eth_spec = __get_eth_spec(pattern_type);
12331 	struct rte_flow_item items[5];
12332 	struct rte_flow_action actions[] = {
12333 		{ .type = RTE_FLOW_ACTION_TYPE_RSS },
12334 		{ .type = RTE_FLOW_ACTION_TYPE_END },
12335 	};
12336 	struct mlx5_hw_ctrl_flow_info flow_info = {
12337 		.type = MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS,
12338 	};
12339 
12340 	if (!eth_spec)
12341 		return -EINVAL;
12342 	memset(items, 0, sizeof(items));
12343 	items[0] = (struct rte_flow_item){
12344 		.type = RTE_FLOW_ITEM_TYPE_ETH,
12345 		.spec = eth_spec,
12346 	};
12347 	items[1] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_VOID };
12348 	items[2] = flow_hw_get_ctrl_rx_l3_item(rss_type);
12349 	items[3] = flow_hw_get_ctrl_rx_l4_item(rss_type);
12350 	items[4] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_END };
12351 	/* Without VLAN filtering, only a single flow rule must be created. */
12352 	return flow_hw_create_ctrl_flow(dev, dev, tbl, items, 0, actions, 0, &flow_info, false);
12353 }
12354 
12355 static int
12356 __flow_hw_ctrl_flows_single_vlan(struct rte_eth_dev *dev,
12357 				 struct rte_flow_template_table *tbl,
12358 				 const enum mlx5_flow_ctrl_rx_eth_pattern_type pattern_type,
12359 				 const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
12360 {
12361 	struct mlx5_priv *priv = dev->data->dev_private;
12362 	const struct rte_flow_item_eth *eth_spec = __get_eth_spec(pattern_type);
12363 	struct rte_flow_item items[5];
12364 	struct rte_flow_action actions[] = {
12365 		{ .type = RTE_FLOW_ACTION_TYPE_RSS },
12366 		{ .type = RTE_FLOW_ACTION_TYPE_END },
12367 	};
12368 	struct mlx5_hw_ctrl_flow_info flow_info = {
12369 		.type = MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS,
12370 	};
12371 	unsigned int i;
12372 
12373 	if (!eth_spec)
12374 		return -EINVAL;
12375 	memset(items, 0, sizeof(items));
12376 	items[0] = (struct rte_flow_item){
12377 		.type = RTE_FLOW_ITEM_TYPE_ETH,
12378 		.spec = eth_spec,
12379 	};
12380 	/* Optional VLAN for now will be VOID - will be filled later. */
12381 	items[1] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_VLAN };
12382 	items[2] = flow_hw_get_ctrl_rx_l3_item(rss_type);
12383 	items[3] = flow_hw_get_ctrl_rx_l4_item(rss_type);
12384 	items[4] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_END };
12385 	/* Since VLAN filtering is done, create a single flow rule for each registered vid. */
12386 	for (i = 0; i < priv->vlan_filter_n; ++i) {
12387 		uint16_t vlan = priv->vlan_filter[i];
12388 		struct rte_flow_item_vlan vlan_spec = {
12389 			.hdr.vlan_tci = rte_cpu_to_be_16(vlan),
12390 		};
12391 
12392 		items[1].spec = &vlan_spec;
12393 		if (flow_hw_create_ctrl_flow(dev, dev,
12394 					     tbl, items, 0, actions, 0, &flow_info, false))
12395 			return -rte_errno;
12396 	}
12397 	return 0;
12398 }
12399 
12400 static int
12401 __flow_hw_ctrl_flows_unicast(struct rte_eth_dev *dev,
12402 			     struct rte_flow_template_table *tbl,
12403 			     const enum mlx5_flow_ctrl_rx_eth_pattern_type pattern_type,
12404 			     const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
12405 {
12406 	struct rte_flow_item_eth eth_spec;
12407 	struct rte_flow_item items[5];
12408 	struct rte_flow_action actions[] = {
12409 		{ .type = RTE_FLOW_ACTION_TYPE_RSS },
12410 		{ .type = RTE_FLOW_ACTION_TYPE_END },
12411 	};
12412 	struct mlx5_hw_ctrl_flow_info flow_info = {
12413 		.type = MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS,
12414 	};
12415 	const struct rte_ether_addr cmp = {
12416 		.addr_bytes = "\x00\x00\x00\x00\x00\x00",
12417 	};
12418 	unsigned int i;
12419 
12420 	RTE_SET_USED(pattern_type);
12421 
12422 	memset(&eth_spec, 0, sizeof(eth_spec));
12423 	memset(items, 0, sizeof(items));
12424 	items[0] = (struct rte_flow_item){
12425 		.type = RTE_FLOW_ITEM_TYPE_ETH,
12426 		.spec = &eth_spec,
12427 	};
12428 	items[1] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_VOID };
12429 	items[2] = flow_hw_get_ctrl_rx_l3_item(rss_type);
12430 	items[3] = flow_hw_get_ctrl_rx_l4_item(rss_type);
12431 	items[4] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_END };
12432 	for (i = 0; i < MLX5_MAX_MAC_ADDRESSES; ++i) {
12433 		struct rte_ether_addr *mac = &dev->data->mac_addrs[i];
12434 
12435 		if (!memcmp(mac, &cmp, sizeof(*mac)))
12436 			continue;
12437 		memcpy(&eth_spec.hdr.dst_addr.addr_bytes, mac->addr_bytes, RTE_ETHER_ADDR_LEN);
12438 		if (flow_hw_create_ctrl_flow(dev, dev,
12439 					     tbl, items, 0, actions, 0, &flow_info, false))
12440 			return -rte_errno;
12441 	}
12442 	return 0;
12443 }
12444 
12445 static int
12446 __flow_hw_ctrl_flows_unicast_vlan(struct rte_eth_dev *dev,
12447 				  struct rte_flow_template_table *tbl,
12448 				  const enum mlx5_flow_ctrl_rx_eth_pattern_type pattern_type,
12449 				  const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
12450 {
12451 	struct mlx5_priv *priv = dev->data->dev_private;
12452 	struct rte_flow_item_eth eth_spec;
12453 	struct rte_flow_item items[5];
12454 	struct rte_flow_action actions[] = {
12455 		{ .type = RTE_FLOW_ACTION_TYPE_RSS },
12456 		{ .type = RTE_FLOW_ACTION_TYPE_END },
12457 	};
12458 	struct mlx5_hw_ctrl_flow_info flow_info = {
12459 		.type = MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS,
12460 	};
12461 	const struct rte_ether_addr cmp = {
12462 		.addr_bytes = "\x00\x00\x00\x00\x00\x00",
12463 	};
12464 	unsigned int i;
12465 	unsigned int j;
12466 
12467 	RTE_SET_USED(pattern_type);
12468 
12469 	memset(&eth_spec, 0, sizeof(eth_spec));
12470 	memset(items, 0, sizeof(items));
12471 	items[0] = (struct rte_flow_item){
12472 		.type = RTE_FLOW_ITEM_TYPE_ETH,
12473 		.spec = &eth_spec,
12474 	};
12475 	items[1] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_VLAN };
12476 	items[2] = flow_hw_get_ctrl_rx_l3_item(rss_type);
12477 	items[3] = flow_hw_get_ctrl_rx_l4_item(rss_type);
12478 	items[4] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_END };
12479 	for (i = 0; i < MLX5_MAX_MAC_ADDRESSES; ++i) {
12480 		struct rte_ether_addr *mac = &dev->data->mac_addrs[i];
12481 
12482 		if (!memcmp(mac, &cmp, sizeof(*mac)))
12483 			continue;
12484 		memcpy(&eth_spec.hdr.dst_addr.addr_bytes, mac->addr_bytes, RTE_ETHER_ADDR_LEN);
12485 		for (j = 0; j < priv->vlan_filter_n; ++j) {
12486 			uint16_t vlan = priv->vlan_filter[j];
12487 			struct rte_flow_item_vlan vlan_spec = {
12488 				.hdr.vlan_tci = rte_cpu_to_be_16(vlan),
12489 			};
12490 
12491 			items[1].spec = &vlan_spec;
12492 			if (flow_hw_create_ctrl_flow(dev, dev, tbl, items, 0, actions, 0,
12493 						     &flow_info, false))
12494 				return -rte_errno;
12495 		}
12496 	}
12497 	return 0;
12498 }
12499 
12500 static int
12501 __flow_hw_ctrl_flows(struct rte_eth_dev *dev,
12502 		     struct rte_flow_template_table *tbl,
12503 		     const enum mlx5_flow_ctrl_rx_eth_pattern_type pattern_type,
12504 		     const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
12505 {
12506 	switch (pattern_type) {
12507 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL:
12508 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL_MCAST:
12509 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST:
12510 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST:
12511 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST:
12512 		return __flow_hw_ctrl_flows_single(dev, tbl, pattern_type, rss_type);
12513 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST_VLAN:
12514 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST_VLAN:
12515 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST_VLAN:
12516 		return __flow_hw_ctrl_flows_single_vlan(dev, tbl, pattern_type, rss_type);
12517 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC:
12518 		return __flow_hw_ctrl_flows_unicast(dev, tbl, pattern_type, rss_type);
12519 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN:
12520 		return __flow_hw_ctrl_flows_unicast_vlan(dev, tbl, pattern_type, rss_type);
12521 	default:
12522 		/* Should not reach here. */
12523 		MLX5_ASSERT(false);
12524 		rte_errno = EINVAL;
12525 		return -EINVAL;
12526 	}
12527 }
12528 
12529 
12530 int
12531 mlx5_flow_hw_ctrl_flows(struct rte_eth_dev *dev, uint32_t flags)
12532 {
12533 	struct mlx5_priv *priv = dev->data->dev_private;
12534 	struct mlx5_flow_hw_ctrl_rx *hw_ctrl_rx;
12535 	unsigned int i;
12536 	unsigned int j;
12537 	int ret = 0;
12538 
12539 	RTE_SET_USED(priv);
12540 	RTE_SET_USED(flags);
12541 	if (!priv->dr_ctx) {
12542 		DRV_LOG(DEBUG, "port %u Control flow rules will not be created. "
12543 			       "HWS needs to be configured beforehand.",
12544 			       dev->data->port_id);
12545 		return 0;
12546 	}
12547 	if (!priv->hw_ctrl_rx) {
12548 		DRV_LOG(ERR, "port %u Control flow rules templates were not created.",
12549 			dev->data->port_id);
12550 		rte_errno = EINVAL;
12551 		return -rte_errno;
12552 	}
12553 	hw_ctrl_rx = priv->hw_ctrl_rx;
12554 	for (i = 0; i < MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_MAX; ++i) {
12555 		const enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type = i;
12556 
12557 		if (!eth_pattern_type_is_requested(eth_pattern_type, flags))
12558 			continue;
12559 		for (j = 0; j < MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX; ++j) {
12560 			const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type = j;
12561 			struct rte_flow_actions_template *at;
12562 			struct mlx5_flow_hw_ctrl_rx_table *tmpls = &hw_ctrl_rx->tables[i][j];
12563 			const struct mlx5_flow_template_table_cfg cfg = {
12564 				.attr = tmpls->attr,
12565 				.external = 0,
12566 			};
12567 
12568 			if (!hw_ctrl_rx->rss[rss_type]) {
12569 				at = flow_hw_create_ctrl_rx_rss_template(dev, rss_type);
12570 				if (!at)
12571 					return -rte_errno;
12572 				hw_ctrl_rx->rss[rss_type] = at;
12573 			} else {
12574 				at = hw_ctrl_rx->rss[rss_type];
12575 			}
12576 			if (!rss_type_is_requested(priv, rss_type))
12577 				continue;
12578 			if (!tmpls->tbl) {
12579 				tmpls->tbl = flow_hw_table_create(dev, &cfg,
12580 								  &tmpls->pt, 1, &at, 1, NULL);
12581 				if (!tmpls->tbl) {
12582 					DRV_LOG(ERR, "port %u Failed to create template table "
12583 						     "for control flow rules. Unable to create "
12584 						     "control flow rules.",
12585 						     dev->data->port_id);
12586 					return -rte_errno;
12587 				}
12588 			}
12589 
12590 			ret = __flow_hw_ctrl_flows(dev, tmpls->tbl, eth_pattern_type, rss_type);
12591 			if (ret) {
12592 				DRV_LOG(ERR, "port %u Failed to create control flow rule.",
12593 					dev->data->port_id);
12594 				return ret;
12595 			}
12596 		}
12597 	}
12598 	return 0;
12599 }
12600 
12601 void
12602 mlx5_flow_meter_uninit(struct rte_eth_dev *dev)
12603 {
12604 	struct mlx5_priv *priv = dev->data->dev_private;
12605 
12606 	if (priv->mtr_policy_arr) {
12607 		mlx5_free(priv->mtr_policy_arr);
12608 		priv->mtr_policy_arr = NULL;
12609 	}
12610 	if (priv->mtr_profile_arr) {
12611 		mlx5_free(priv->mtr_profile_arr);
12612 		priv->mtr_profile_arr = NULL;
12613 	}
12614 	if (priv->hws_mpool) {
12615 		mlx5_aso_mtr_queue_uninit(priv->sh, priv->hws_mpool, NULL);
12616 		mlx5_ipool_destroy(priv->hws_mpool->idx_pool);
12617 		mlx5_free(priv->hws_mpool);
12618 		priv->hws_mpool = NULL;
12619 	}
12620 	if (priv->mtr_bulk.aso) {
12621 		mlx5_free(priv->mtr_bulk.aso);
12622 		priv->mtr_bulk.aso = NULL;
12623 		priv->mtr_bulk.size = 0;
12624 		mlx5_aso_queue_uninit(priv->sh, ASO_OPC_MOD_POLICER);
12625 	}
12626 	if (priv->mtr_bulk.action) {
12627 		mlx5dr_action_destroy(priv->mtr_bulk.action);
12628 		priv->mtr_bulk.action = NULL;
12629 	}
12630 	if (priv->mtr_bulk.devx_obj) {
12631 		claim_zero(mlx5_devx_cmd_destroy(priv->mtr_bulk.devx_obj));
12632 		priv->mtr_bulk.devx_obj = NULL;
12633 	}
12634 }
12635 
12636 int
12637 mlx5_flow_meter_init(struct rte_eth_dev *dev,
12638 		     uint32_t nb_meters,
12639 		     uint32_t nb_meter_profiles,
12640 		     uint32_t nb_meter_policies,
12641 		     uint32_t nb_queues)
12642 {
12643 	struct mlx5_priv *priv = dev->data->dev_private;
12644 	struct mlx5_devx_obj *dcs = NULL;
12645 	uint32_t log_obj_size;
12646 	int ret = 0;
12647 	int reg_id;
12648 	struct mlx5_aso_mtr *aso;
12649 	uint32_t i;
12650 	struct rte_flow_error error;
12651 	uint32_t flags;
12652 	uint32_t nb_mtrs = rte_align32pow2(nb_meters);
12653 	struct mlx5_indexed_pool_config cfg = {
12654 		.size = sizeof(struct mlx5_aso_mtr),
12655 		.trunk_size = 1 << 12,
12656 		.per_core_cache = 1 << 13,
12657 		.need_lock = 1,
12658 		.release_mem_en = !!priv->sh->config.reclaim_mode,
12659 		.malloc = mlx5_malloc,
12660 		.max_idx = nb_meters,
12661 		.free = mlx5_free,
12662 		.type = "mlx5_hw_mtr_mark_action",
12663 	};
12664 
12665 	if (!nb_meters) {
12666 		ret = ENOTSUP;
12667 		rte_flow_error_set(&error, ENOMEM,
12668 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12669 				  NULL, "Meter configuration is invalid.");
12670 		goto err;
12671 	}
12672 	if (!priv->mtr_en || !priv->sh->meter_aso_en) {
12673 		ret = ENOTSUP;
12674 		rte_flow_error_set(&error, ENOMEM,
12675 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12676 				  NULL, "Meter ASO is not supported.");
12677 		goto err;
12678 	}
12679 	priv->mtr_config.nb_meters = nb_meters;
12680 	log_obj_size = rte_log2_u32(nb_meters >> 1);
12681 	dcs = mlx5_devx_cmd_create_flow_meter_aso_obj
12682 		(priv->sh->cdev->ctx, priv->sh->cdev->pdn,
12683 			log_obj_size);
12684 	if (!dcs) {
12685 		ret = ENOMEM;
12686 		rte_flow_error_set(&error, ENOMEM,
12687 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12688 				  NULL, "Meter ASO object allocation failed.");
12689 		goto err;
12690 	}
12691 	priv->mtr_bulk.devx_obj = dcs;
12692 	reg_id = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, NULL);
12693 	if (reg_id < 0) {
12694 		ret = ENOTSUP;
12695 		rte_flow_error_set(&error, ENOMEM,
12696 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12697 				  NULL, "Meter register is not available.");
12698 		goto err;
12699 	}
12700 	flags = MLX5DR_ACTION_FLAG_HWS_RX | MLX5DR_ACTION_FLAG_HWS_TX;
12701 	if (priv->sh->config.dv_esw_en && priv->master)
12702 		flags |= MLX5DR_ACTION_FLAG_HWS_FDB;
12703 	priv->mtr_bulk.action = mlx5dr_action_create_aso_meter
12704 			(priv->dr_ctx, (struct mlx5dr_devx_obj *)dcs,
12705 				reg_id - REG_C_0, flags);
12706 	if (!priv->mtr_bulk.action) {
12707 		ret = ENOMEM;
12708 		rte_flow_error_set(&error, ENOMEM,
12709 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12710 				  NULL, "Meter action creation failed.");
12711 		goto err;
12712 	}
12713 	priv->mtr_bulk.aso = mlx5_malloc(MLX5_MEM_ZERO,
12714 					 sizeof(struct mlx5_aso_mtr) *
12715 					 nb_meters,
12716 					 RTE_CACHE_LINE_SIZE,
12717 					 SOCKET_ID_ANY);
12718 	if (!priv->mtr_bulk.aso) {
12719 		ret = ENOMEM;
12720 		rte_flow_error_set(&error, ENOMEM,
12721 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12722 				  NULL, "Meter bulk ASO allocation failed.");
12723 		goto err;
12724 	}
12725 	priv->mtr_bulk.size = nb_meters;
12726 	aso = priv->mtr_bulk.aso;
12727 	for (i = 0; i < priv->mtr_bulk.size; i++) {
12728 		aso->type = ASO_METER_DIRECT;
12729 		aso->state = ASO_METER_WAIT;
12730 		aso->offset = i;
12731 		aso++;
12732 	}
12733 	priv->hws_mpool = mlx5_malloc(MLX5_MEM_ZERO,
12734 				sizeof(struct mlx5_aso_mtr_pool),
12735 				RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
12736 	if (!priv->hws_mpool) {
12737 		ret = ENOMEM;
12738 		rte_flow_error_set(&error, ENOMEM,
12739 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12740 				  NULL, "Meter ipool allocation failed.");
12741 		goto err;
12742 	}
12743 	priv->hws_mpool->devx_obj = priv->mtr_bulk.devx_obj;
12744 	priv->hws_mpool->action = priv->mtr_bulk.action;
12745 	priv->hws_mpool->nb_sq = nb_queues;
12746 	if (mlx5_aso_mtr_queue_init(priv->sh, priv->hws_mpool,
12747 				    &priv->sh->mtrmng->pools_mng, nb_queues)) {
12748 		ret = ENOMEM;
12749 		rte_flow_error_set(&error, ENOMEM,
12750 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12751 				  NULL, "Meter ASO queue allocation failed.");
12752 		goto err;
12753 	}
12754 	/*
12755 	 * No need for local cache if Meter number is a small number.
12756 	 * Since flow insertion rate will be very limited in that case.
12757 	 * Here let's set the number to less than default trunk size 4K.
12758 	 */
12759 	if (nb_mtrs <= cfg.trunk_size) {
12760 		cfg.per_core_cache = 0;
12761 		cfg.trunk_size = nb_mtrs;
12762 	} else if (nb_mtrs <= MLX5_HW_IPOOL_SIZE_THRESHOLD) {
12763 		cfg.per_core_cache = MLX5_HW_IPOOL_CACHE_MIN;
12764 	}
12765 	priv->hws_mpool->idx_pool = mlx5_ipool_create(&cfg);
12766 	if (nb_meter_profiles) {
12767 		priv->mtr_config.nb_meter_profiles = nb_meter_profiles;
12768 		priv->mtr_profile_arr =
12769 			mlx5_malloc(MLX5_MEM_ZERO,
12770 				    sizeof(struct mlx5_flow_meter_profile) *
12771 				    nb_meter_profiles,
12772 				    RTE_CACHE_LINE_SIZE,
12773 				    SOCKET_ID_ANY);
12774 		if (!priv->mtr_profile_arr) {
12775 			ret = ENOMEM;
12776 			rte_flow_error_set(&error, ENOMEM,
12777 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12778 					   NULL, "Meter profile allocation failed.");
12779 			goto err;
12780 		}
12781 	}
12782 	if (nb_meter_policies) {
12783 		priv->mtr_config.nb_meter_policies = nb_meter_policies;
12784 		priv->mtr_policy_arr =
12785 			mlx5_malloc(MLX5_MEM_ZERO,
12786 				    sizeof(struct mlx5_flow_meter_policy) *
12787 				    nb_meter_policies,
12788 				    RTE_CACHE_LINE_SIZE,
12789 				    SOCKET_ID_ANY);
12790 		if (!priv->mtr_policy_arr) {
12791 			ret = ENOMEM;
12792 			rte_flow_error_set(&error, ENOMEM,
12793 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12794 					   NULL, "Meter policy allocation failed.");
12795 			goto err;
12796 		}
12797 	}
12798 	return 0;
12799 err:
12800 	mlx5_flow_meter_uninit(dev);
12801 	return ret;
12802 }
12803 
12804 static __rte_always_inline uint32_t
12805 mlx5_reformat_domain_to_tbl_type(const struct rte_flow_indir_action_conf *domain)
12806 {
12807 	uint32_t tbl_type;
12808 
12809 	if (domain->transfer)
12810 		tbl_type = MLX5DR_ACTION_FLAG_HWS_FDB;
12811 	else if (domain->egress)
12812 		tbl_type = MLX5DR_ACTION_FLAG_HWS_TX;
12813 	else if (domain->ingress)
12814 		tbl_type = MLX5DR_ACTION_FLAG_HWS_RX;
12815 	else
12816 		tbl_type = UINT32_MAX;
12817 	return tbl_type;
12818 }
12819 
12820 static struct mlx5_hw_encap_decap_action *
12821 __mlx5_reformat_create(struct rte_eth_dev *dev,
12822 		       const struct rte_flow_action_raw_encap *encap_conf,
12823 		       const struct rte_flow_indir_action_conf *domain,
12824 		       enum mlx5dr_action_type type)
12825 {
12826 	struct mlx5_priv *priv = dev->data->dev_private;
12827 	struct mlx5_hw_encap_decap_action *handle;
12828 	struct mlx5dr_action_reformat_header hdr;
12829 	uint32_t flags;
12830 
12831 	flags = mlx5_reformat_domain_to_tbl_type(domain);
12832 	flags |= (uint32_t)MLX5DR_ACTION_FLAG_SHARED;
12833 	if (flags == UINT32_MAX) {
12834 		DRV_LOG(ERR, "Reformat: invalid indirect action configuration");
12835 		return NULL;
12836 	}
12837 	/* Allocate new list entry. */
12838 	handle = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*handle), 0, SOCKET_ID_ANY);
12839 	if (!handle) {
12840 		DRV_LOG(ERR, "Reformat: failed to allocate reformat entry");
12841 		return NULL;
12842 	}
12843 	handle->action_type = type;
12844 	hdr.sz = encap_conf ? encap_conf->size : 0;
12845 	hdr.data = encap_conf ? encap_conf->data : NULL;
12846 	handle->action = mlx5dr_action_create_reformat(priv->dr_ctx,
12847 					type, 1, &hdr, 0, flags);
12848 	if (!handle->action) {
12849 		DRV_LOG(ERR, "Reformat: failed to create reformat action");
12850 		mlx5_free(handle);
12851 		return NULL;
12852 	}
12853 	return handle;
12854 }
12855 
12856 /**
12857  * Create mlx5 reformat action.
12858  *
12859  * @param[in] dev
12860  *   Pointer to rte_eth_dev structure.
12861  * @param[in] conf
12862  *   Pointer to the indirect action parameters.
12863  * @param[in] encap_action
12864  *   Pointer to the raw_encap action configuration.
12865  * @param[in] decap_action
12866  *   Pointer to the raw_decap action configuration.
12867  * @param[out] error
12868  *   Pointer to error structure.
12869  *
12870  * @return
12871  *   A valid shared action handle in case of success, NULL otherwise and
12872  *   rte_errno is set.
12873  */
12874 struct mlx5_hw_encap_decap_action*
12875 mlx5_reformat_action_create(struct rte_eth_dev *dev,
12876 			    const struct rte_flow_indir_action_conf *conf,
12877 			    const struct rte_flow_action *encap_action,
12878 			    const struct rte_flow_action *decap_action,
12879 			    struct rte_flow_error *error)
12880 {
12881 	struct mlx5_priv *priv = dev->data->dev_private;
12882 	struct mlx5_hw_encap_decap_action *handle;
12883 	const struct rte_flow_action_raw_encap *encap = NULL;
12884 	const struct rte_flow_action_raw_decap *decap = NULL;
12885 	enum mlx5dr_action_type type = MLX5DR_ACTION_TYP_LAST;
12886 
12887 	MLX5_ASSERT(!encap_action || encap_action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP);
12888 	MLX5_ASSERT(!decap_action || decap_action->type == RTE_FLOW_ACTION_TYPE_RAW_DECAP);
12889 	if (priv->sh->config.dv_flow_en != 2) {
12890 		rte_flow_error_set(error, ENOTSUP,
12891 				   RTE_FLOW_ERROR_TYPE_ACTION, encap_action,
12892 				   "Reformat: hardware does not support");
12893 		return NULL;
12894 	}
12895 	if (!conf || (conf->transfer + conf->egress + conf->ingress != 1)) {
12896 		rte_flow_error_set(error, EINVAL,
12897 				   RTE_FLOW_ERROR_TYPE_ACTION, encap_action,
12898 				   "Reformat: domain should be specified");
12899 		return NULL;
12900 	}
12901 	if ((encap_action && !encap_action->conf) || (decap_action && !decap_action->conf)) {
12902 		rte_flow_error_set(error, EINVAL,
12903 				   RTE_FLOW_ERROR_TYPE_ACTION, encap_action,
12904 				   "Reformat: missed action configuration");
12905 		return NULL;
12906 	}
12907 	if (encap_action && !decap_action) {
12908 		encap = (const struct rte_flow_action_raw_encap *)encap_action->conf;
12909 		if (!encap->size || encap->size > MLX5_ENCAP_MAX_LEN ||
12910 		    encap->size < MLX5_ENCAPSULATION_DECISION_SIZE) {
12911 			rte_flow_error_set(error, EINVAL,
12912 					   RTE_FLOW_ERROR_TYPE_ACTION, encap_action,
12913 					   "Reformat: Invalid encap length");
12914 			return NULL;
12915 		}
12916 		type = MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
12917 	} else if (decap_action && !encap_action) {
12918 		decap = (const struct rte_flow_action_raw_decap *)decap_action->conf;
12919 		if (!decap->size || decap->size < MLX5_ENCAPSULATION_DECISION_SIZE) {
12920 			rte_flow_error_set(error, EINVAL,
12921 					   RTE_FLOW_ERROR_TYPE_ACTION, encap_action,
12922 					   "Reformat: Invalid decap length");
12923 			return NULL;
12924 		}
12925 		type = MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2;
12926 	} else if (encap_action && decap_action) {
12927 		decap = (const struct rte_flow_action_raw_decap *)decap_action->conf;
12928 		encap = (const struct rte_flow_action_raw_encap *)encap_action->conf;
12929 		if (decap->size < MLX5_ENCAPSULATION_DECISION_SIZE &&
12930 		    encap->size >= MLX5_ENCAPSULATION_DECISION_SIZE &&
12931 		    encap->size <= MLX5_ENCAP_MAX_LEN) {
12932 			type = MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3;
12933 		} else if (decap->size >= MLX5_ENCAPSULATION_DECISION_SIZE &&
12934 			   encap->size < MLX5_ENCAPSULATION_DECISION_SIZE) {
12935 			type = MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2;
12936 		} else {
12937 			rte_flow_error_set(error, EINVAL,
12938 					   RTE_FLOW_ERROR_TYPE_ACTION, encap_action,
12939 					   "Reformat: Invalid decap & encap length");
12940 			return NULL;
12941 		}
12942 	} else if (!encap_action && !decap_action) {
12943 		rte_flow_error_set(error, EINVAL,
12944 				   RTE_FLOW_ERROR_TYPE_ACTION, encap_action,
12945 				   "Reformat: Invalid decap & encap configurations");
12946 		return NULL;
12947 	}
12948 	if (!priv->dr_ctx) {
12949 		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
12950 				   encap_action, "Reformat: HWS not supported");
12951 		return NULL;
12952 	}
12953 	handle = __mlx5_reformat_create(dev, encap, conf, type);
12954 	if (!handle) {
12955 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, encap_action,
12956 				   "Reformat: failed to create indirect action");
12957 		return NULL;
12958 	}
12959 	return handle;
12960 }
12961 
12962 /**
12963  * Destroy the indirect reformat action.
12964  * Release action related resources on the NIC and the memory.
12965  * Lock free, (mutex should be acquired by caller).
12966  *
12967  * @param[in] dev
12968  *   Pointer to the Ethernet device structure.
12969  * @param[in] handle
12970  *   The indirect action list handle to be removed.
12971  * @param[out] error
12972  *   Perform verbose error reporting if not NULL. Initialized in case of
12973  *   error only.
12974  *
12975  * @return
12976  *   0 on success, otherwise negative errno value.
12977  */
12978 int
12979 mlx5_reformat_action_destroy(struct rte_eth_dev *dev,
12980 			     struct rte_flow_action_list_handle *handle,
12981 			     struct rte_flow_error *error)
12982 {
12983 	struct mlx5_priv *priv = dev->data->dev_private;
12984 	struct mlx5_hw_encap_decap_action *action;
12985 
12986 	action = (struct mlx5_hw_encap_decap_action *)handle;
12987 	if (!priv->dr_ctx || !action)
12988 		return rte_flow_error_set(error, ENOTSUP,
12989 					  RTE_FLOW_ERROR_TYPE_ACTION, handle,
12990 					  "Reformat: invalid action handle");
12991 	mlx5dr_action_destroy(action->action);
12992 	mlx5_free(handle);
12993 	return 0;
12994 }
12995 #endif
12996