xref: /dpdk/drivers/net/mlx5/mlx5_flow_hw.c (revision 7917b0d38e92e8b9ec5a870415b791420e10f11a)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2022 NVIDIA Corporation & Affiliates
3  */
4 
5 #include <rte_flow.h>
6 #include <rte_flow_driver.h>
7 #include <rte_stdatomic.h>
8 
9 #include <mlx5_malloc.h>
10 
11 #include "mlx5.h"
12 #include "mlx5_common.h"
13 #include "mlx5_defs.h"
14 #include "mlx5_flow.h"
15 #include "mlx5_flow_os.h"
16 #include "mlx5_rx.h"
17 
18 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
19 #include "mlx5_hws_cnt.h"
20 
21 /** Fast path async flow API functions. */
22 static struct rte_flow_fp_ops mlx5_flow_hw_fp_ops;
23 
24 /* The maximum actions support in the flow. */
25 #define MLX5_HW_MAX_ACTS 16
26 
27 /*
28  * The default ipool threshold value indicates which per_core_cache
29  * value to set.
30  */
31 #define MLX5_HW_IPOOL_SIZE_THRESHOLD (1 << 19)
32 /* The default min local cache size. */
33 #define MLX5_HW_IPOOL_CACHE_MIN (1 << 9)
34 
35 /* Default push burst threshold. */
36 #define BURST_THR 32u
37 
38 /* Default queue to flush the flows. */
39 #define MLX5_DEFAULT_FLUSH_QUEUE 0
40 
41 /* Maximum number of rules in control flow tables. */
42 #define MLX5_HW_CTRL_FLOW_NB_RULES (4096)
43 
44 /* Lowest flow group usable by an application if group translation is done. */
45 #define MLX5_HW_LOWEST_USABLE_GROUP (1)
46 
47 /* Maximum group index usable by user applications for transfer flows. */
48 #define MLX5_HW_MAX_TRANSFER_GROUP (UINT32_MAX - 1)
49 
50 /* Maximum group index usable by user applications for egress flows. */
51 #define MLX5_HW_MAX_EGRESS_GROUP (UINT32_MAX - 1)
52 
53 /* Lowest priority for HW root table. */
54 #define MLX5_HW_LOWEST_PRIO_ROOT 15
55 
56 /* Lowest priority for HW non-root table. */
57 #define MLX5_HW_LOWEST_PRIO_NON_ROOT (UINT32_MAX)
58 
59 /* Priorities for Rx control flow rules. */
60 #define MLX5_HW_CTRL_RX_PRIO_L2 (MLX5_HW_LOWEST_PRIO_ROOT)
61 #define MLX5_HW_CTRL_RX_PRIO_L3 (MLX5_HW_LOWEST_PRIO_ROOT - 1)
62 #define MLX5_HW_CTRL_RX_PRIO_L4 (MLX5_HW_LOWEST_PRIO_ROOT - 2)
63 
64 #define MLX5_HW_VLAN_PUSH_TYPE_IDX 0
65 #define MLX5_HW_VLAN_PUSH_VID_IDX 1
66 #define MLX5_HW_VLAN_PUSH_PCP_IDX 2
67 
68 #define MLX5_MIRROR_MAX_CLONES_NUM 3
69 #define MLX5_MIRROR_MAX_SAMPLE_ACTIONS_LEN 4
70 
71 #define MLX5_HW_PORT_IS_PROXY(priv) \
72 	(!!((priv)->sh->esw_mode && (priv)->master))
73 
74 
75 struct mlx5_indlst_legacy {
76 	struct mlx5_indirect_list indirect;
77 	struct rte_flow_action_handle *handle;
78 	enum rte_flow_action_type legacy_type;
79 };
80 
81 #define MLX5_CONST_ENCAP_ITEM(encap_type, ptr) \
82 (((const struct encap_type *)(ptr))->definition)
83 
84 /**
85  * Returns the size of a struct with a following layout:
86  *
87  * @code{.c}
88  * struct rte_flow_hw {
89  *     // rte_flow_hw fields
90  *     uint8_t rule[mlx5dr_rule_get_handle_size()];
91  * };
92  * @endcode
93  *
94  * Such struct is used as a basic container for HW Steering flow rule.
95  */
96 static size_t
97 mlx5_flow_hw_entry_size(void)
98 {
99 	return sizeof(struct rte_flow_hw) + mlx5dr_rule_get_handle_size();
100 }
101 
102 /**
103  * Returns the size of "auxed" rte_flow_hw structure which is assumed to be laid out as follows:
104  *
105  * @code{.c}
106  * struct {
107  *     struct rte_flow_hw {
108  *         // rte_flow_hw fields
109  *         uint8_t rule[mlx5dr_rule_get_handle_size()];
110  *     } flow;
111  *     struct rte_flow_hw_aux aux;
112  * };
113  * @endcode
114  *
115  * Such struct is used whenever rte_flow_hw_aux cannot be allocated separately from the rte_flow_hw
116  * e.g., when table is resizable.
117  */
118 static size_t
119 mlx5_flow_hw_auxed_entry_size(void)
120 {
121 	size_t rule_size = mlx5dr_rule_get_handle_size();
122 
123 	return sizeof(struct rte_flow_hw) + rule_size + sizeof(struct rte_flow_hw_aux);
124 }
125 
126 /**
127  * Returns a valid pointer to rte_flow_hw_aux associated with given rte_flow_hw
128  * depending on template table configuration.
129  */
130 static __rte_always_inline struct rte_flow_hw_aux *
131 mlx5_flow_hw_aux(uint16_t port_id, struct rte_flow_hw *flow)
132 {
133 	struct rte_flow_template_table *table = flow->table;
134 
135 	if (rte_flow_template_table_resizable(port_id, &table->cfg.attr)) {
136 		size_t offset = sizeof(struct rte_flow_hw) + mlx5dr_rule_get_handle_size();
137 
138 		return RTE_PTR_ADD(flow, offset);
139 	} else {
140 		return ((flow->nt_rule) ? flow->nt2hws->flow_aux : &table->flow_aux[flow->idx - 1]);
141 	}
142 }
143 
144 static __rte_always_inline void
145 mlx5_flow_hw_aux_set_age_idx(struct rte_flow_hw *flow,
146 			     struct rte_flow_hw_aux *aux,
147 			     uint32_t age_idx)
148 {
149 	/*
150 	 * Only when creating a flow rule, the type will be set explicitly.
151 	 * Or else, it should be none in the rule update case.
152 	 */
153 	if (unlikely(flow->operation_type == MLX5_FLOW_HW_FLOW_OP_TYPE_UPDATE))
154 		aux->upd.age_idx = age_idx;
155 	else
156 		aux->orig.age_idx = age_idx;
157 }
158 
159 static __rte_always_inline uint32_t
160 mlx5_flow_hw_aux_get_age_idx(struct rte_flow_hw *flow, struct rte_flow_hw_aux *aux)
161 {
162 	if (unlikely(flow->operation_type == MLX5_FLOW_HW_FLOW_OP_TYPE_UPDATE))
163 		return aux->upd.age_idx;
164 	else
165 		return aux->orig.age_idx;
166 }
167 
168 static __rte_always_inline void
169 mlx5_flow_hw_aux_set_mtr_id(struct rte_flow_hw *flow,
170 			    struct rte_flow_hw_aux *aux,
171 			    uint32_t mtr_id)
172 {
173 	if (unlikely(flow->operation_type == MLX5_FLOW_HW_FLOW_OP_TYPE_UPDATE))
174 		aux->upd.mtr_id = mtr_id;
175 	else
176 		aux->orig.mtr_id = mtr_id;
177 }
178 
179 static __rte_always_inline uint32_t
180 mlx5_flow_hw_aux_get_mtr_id(struct rte_flow_hw *flow, struct rte_flow_hw_aux *aux)
181 {
182 	if (unlikely(flow->operation_type == MLX5_FLOW_HW_FLOW_OP_TYPE_UPDATE))
183 		return aux->upd.mtr_id;
184 	else
185 		return aux->orig.mtr_id;
186 }
187 
188 static __rte_always_inline struct mlx5_hw_q_job *
189 flow_hw_action_job_init(struct mlx5_priv *priv, uint32_t queue,
190 			const struct rte_flow_action_handle *handle,
191 			void *user_data, void *query_data,
192 			enum mlx5_hw_job_type type,
193 			enum mlx5_hw_indirect_type indirect_type,
194 			struct rte_flow_error *error);
195 static void
196 flow_hw_age_count_release(struct mlx5_priv *priv, uint32_t queue, struct rte_flow_hw *flow,
197 			  struct rte_flow_error *error);
198 
199 static int
200 mlx5_tbl_multi_pattern_process(struct rte_eth_dev *dev,
201 			       struct rte_flow_template_table *tbl,
202 			       struct mlx5_multi_pattern_segment *segment,
203 			       uint32_t bulk_size,
204 			       struct rte_flow_error *error);
205 static void
206 mlx5_destroy_multi_pattern_segment(struct mlx5_multi_pattern_segment *segment);
207 
208 static __rte_always_inline enum mlx5_indirect_list_type
209 flow_hw_inlist_type_get(const struct rte_flow_action *actions);
210 
211 static bool
212 mlx5_hw_ctx_validate(const struct rte_eth_dev *dev, struct rte_flow_error *error)
213 {
214 	const struct mlx5_priv *priv = dev->data->dev_private;
215 
216 	if (!priv->dr_ctx) {
217 		rte_flow_error_set(error, EINVAL,
218 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
219 				   "non-template flow engine was not configured");
220 		return false;
221 	}
222 	return true;
223 }
224 
225 static int
226 flow_hw_allocate_actions(struct rte_eth_dev *dev,
227 			 uint64_t action_flags,
228 			 struct rte_flow_error *error);
229 
230 static __rte_always_inline int
231 mlx5_multi_pattern_reformat_to_index(enum mlx5dr_action_type type)
232 {
233 	switch (type) {
234 	case MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2:
235 		return 0;
236 	case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2:
237 		return 1;
238 	case MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
239 		return 2;
240 	case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3:
241 		return 3;
242 	default:
243 		break;
244 	}
245 	return -1;
246 }
247 
248 /* Include only supported reformat actions for BWC non template API. */
249 static __rte_always_inline int
250 mlx5_bwc_multi_pattern_reformat_to_index(enum mlx5dr_action_type type)
251 {
252 	switch (type) {
253 	case MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2:
254 	case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2:
255 	case MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
256 	case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3:
257 		return mlx5_multi_pattern_reformat_to_index(type);
258 	default:
259 		break;
260 	}
261 	return -1;
262 }
263 
264 static __rte_always_inline enum mlx5dr_action_type
265 mlx5_multi_pattern_reformat_index_to_type(uint32_t ix)
266 {
267 	switch (ix) {
268 	case 0:
269 		return MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2;
270 	case 1:
271 		return MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
272 	case 2:
273 		return MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2;
274 	case 3:
275 		return MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3;
276 	default:
277 		break;
278 	}
279 	return MLX5DR_ACTION_TYP_MAX;
280 }
281 
282 static inline enum mlx5dr_table_type
283 get_mlx5dr_table_type(const struct rte_flow_attr *attr)
284 {
285 	enum mlx5dr_table_type type;
286 
287 	if (attr->transfer)
288 		type = MLX5DR_TABLE_TYPE_FDB;
289 	else if (attr->egress)
290 		type = MLX5DR_TABLE_TYPE_NIC_TX;
291 	else
292 		type = MLX5DR_TABLE_TYPE_NIC_RX;
293 	return type;
294 }
295 
296 /* Non template default queue size used for inner ctrl queue. */
297 #define MLX5_NT_DEFAULT_QUEUE_SIZE 32
298 
299 struct mlx5_mirror_clone {
300 	enum rte_flow_action_type type;
301 	void *action_ctx;
302 };
303 
304 struct mlx5_mirror {
305 	struct mlx5_indirect_list indirect;
306 	uint32_t clones_num;
307 	struct mlx5dr_action *mirror_action;
308 	struct mlx5_mirror_clone clone[MLX5_MIRROR_MAX_CLONES_NUM];
309 };
310 
311 static int flow_hw_flush_all_ctrl_flows(struct rte_eth_dev *dev);
312 static int flow_hw_translate_group(struct rte_eth_dev *dev,
313 				   const struct mlx5_flow_template_table_cfg *cfg,
314 				   uint32_t group,
315 				   uint32_t *table_group,
316 				   struct rte_flow_error *error);
317 static __rte_always_inline int
318 flow_hw_set_vlan_vid_construct(struct rte_eth_dev *dev,
319 			       struct mlx5_modification_cmd *mhdr_cmd,
320 			       struct mlx5_action_construct_data *act_data,
321 			       const struct mlx5_hw_actions *hw_acts,
322 			       const struct rte_flow_action *action);
323 static void
324 flow_hw_construct_quota(struct mlx5_priv *priv,
325 			struct mlx5dr_rule_action *rule_act, uint32_t qid);
326 
327 static int
328 mlx5_flow_ct_init(struct rte_eth_dev *dev,
329 		  uint32_t nb_conn_tracks,
330 		  uint16_t nb_queue);
331 
332 static __rte_always_inline uint32_t flow_hw_tx_tag_regc_mask(struct rte_eth_dev *dev);
333 static __rte_always_inline uint32_t flow_hw_tx_tag_regc_value(struct rte_eth_dev *dev);
334 
335 static int flow_hw_async_create_validate(struct rte_eth_dev *dev,
336 					 const uint32_t queue,
337 					 const struct rte_flow_template_table *table,
338 					 const struct rte_flow_item items[],
339 					 const uint8_t pattern_template_index,
340 					 const struct rte_flow_action actions[],
341 					 const uint8_t action_template_index,
342 					 struct rte_flow_error *error);
343 static int flow_hw_async_create_by_index_validate(struct rte_eth_dev *dev,
344 						  const uint32_t queue,
345 						  const struct rte_flow_template_table *table,
346 						  const uint32_t rule_index,
347 						  const struct rte_flow_action actions[],
348 						  const uint8_t action_template_index,
349 						  struct rte_flow_error *error);
350 static int flow_hw_async_update_validate(struct rte_eth_dev *dev,
351 					 const uint32_t queue,
352 					 const struct rte_flow_hw *flow,
353 					 const struct rte_flow_action actions[],
354 					 const uint8_t action_template_index,
355 					 struct rte_flow_error *error);
356 static int flow_hw_async_destroy_validate(struct rte_eth_dev *dev,
357 					  const uint32_t queue,
358 					  const struct rte_flow_hw *flow,
359 					  struct rte_flow_error *error);
360 
361 const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops;
362 
363 /* DR action flags with different table. */
364 static uint32_t mlx5_hw_act_flag[MLX5_HW_ACTION_FLAG_MAX]
365 				[MLX5DR_TABLE_TYPE_MAX] = {
366 	{
367 		MLX5DR_ACTION_FLAG_ROOT_RX,
368 		MLX5DR_ACTION_FLAG_ROOT_TX,
369 		MLX5DR_ACTION_FLAG_ROOT_FDB,
370 	},
371 	{
372 		MLX5DR_ACTION_FLAG_HWS_RX,
373 		MLX5DR_ACTION_FLAG_HWS_TX,
374 		MLX5DR_ACTION_FLAG_HWS_FDB,
375 	},
376 };
377 
378 /* Ethernet item spec for promiscuous mode. */
379 static const struct rte_flow_item_eth ctrl_rx_eth_promisc_spec = {
380 	.hdr.dst_addr.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
381 	.hdr.src_addr.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
382 	.hdr.ether_type = 0,
383 };
384 /* Ethernet item mask for promiscuous mode. */
385 static const struct rte_flow_item_eth ctrl_rx_eth_promisc_mask = {
386 	.hdr.dst_addr.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
387 	.hdr.src_addr.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
388 	.hdr.ether_type = 0,
389 };
390 
391 /* Ethernet item spec for all multicast mode. */
392 static const struct rte_flow_item_eth ctrl_rx_eth_mcast_spec = {
393 	.hdr.dst_addr.addr_bytes = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 },
394 	.hdr.src_addr.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
395 	.hdr.ether_type = 0,
396 };
397 /* Ethernet item mask for all multicast mode. */
398 static const struct rte_flow_item_eth ctrl_rx_eth_mcast_mask = {
399 	.hdr.dst_addr.addr_bytes = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 },
400 	.hdr.src_addr.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
401 	.hdr.ether_type = 0,
402 };
403 
404 /* Ethernet item spec for IPv4 multicast traffic. */
405 static const struct rte_flow_item_eth ctrl_rx_eth_ipv4_mcast_spec = {
406 	.hdr.dst_addr.addr_bytes = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 },
407 	.hdr.src_addr.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
408 	.hdr.ether_type = 0,
409 };
410 /* Ethernet item mask for IPv4 multicast traffic. */
411 static const struct rte_flow_item_eth ctrl_rx_eth_ipv4_mcast_mask = {
412 	.hdr.dst_addr.addr_bytes = { 0xff, 0xff, 0xff, 0x00, 0x00, 0x00 },
413 	.hdr.src_addr.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
414 	.hdr.ether_type = 0,
415 };
416 
417 /* Ethernet item spec for IPv6 multicast traffic. */
418 static const struct rte_flow_item_eth ctrl_rx_eth_ipv6_mcast_spec = {
419 	.hdr.dst_addr.addr_bytes = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 },
420 	.hdr.src_addr.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
421 	.hdr.ether_type = 0,
422 };
423 /* Ethernet item mask for IPv6 multicast traffic. */
424 static const struct rte_flow_item_eth ctrl_rx_eth_ipv6_mcast_mask = {
425 	.hdr.dst_addr.addr_bytes = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 },
426 	.hdr.src_addr.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
427 	.hdr.ether_type = 0,
428 };
429 
430 /* Ethernet item mask for unicast traffic. */
431 static const struct rte_flow_item_eth ctrl_rx_eth_dmac_mask = {
432 	.hdr.dst_addr.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
433 	.hdr.src_addr.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
434 	.hdr.ether_type = 0,
435 };
436 
437 /* Ethernet item spec for broadcast. */
438 static const struct rte_flow_item_eth ctrl_rx_eth_bcast_spec = {
439 	.hdr.dst_addr.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
440 	.hdr.src_addr.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
441 	.hdr.ether_type = 0,
442 };
443 
444 static inline uint32_t
445 flow_hw_q_pending(struct mlx5_priv *priv, uint32_t queue)
446 {
447 	struct mlx5_hw_q *q = &priv->hw_q[queue];
448 
449 	MLX5_ASSERT(q->size >= q->job_idx);
450 	return (q->size - q->job_idx) + q->ongoing_flow_ops;
451 }
452 
453 static inline void
454 flow_hw_q_inc_flow_ops(struct mlx5_priv *priv, uint32_t queue)
455 {
456 	struct mlx5_hw_q *q = &priv->hw_q[queue];
457 
458 	q->ongoing_flow_ops++;
459 }
460 
461 static inline void
462 flow_hw_q_dec_flow_ops(struct mlx5_priv *priv, uint32_t queue)
463 {
464 	struct mlx5_hw_q *q = &priv->hw_q[queue];
465 
466 	q->ongoing_flow_ops--;
467 }
468 
469 static inline enum mlx5dr_matcher_insert_mode
470 flow_hw_matcher_insert_mode_get(enum rte_flow_table_insertion_type insert_type)
471 {
472 	if (insert_type == RTE_FLOW_TABLE_INSERTION_TYPE_PATTERN)
473 		return MLX5DR_MATCHER_INSERT_BY_HASH;
474 	else
475 		return MLX5DR_MATCHER_INSERT_BY_INDEX;
476 }
477 
478 static inline enum mlx5dr_matcher_distribute_mode
479 flow_hw_matcher_distribute_mode_get(enum rte_flow_table_hash_func hash_func)
480 {
481 	if (hash_func == RTE_FLOW_TABLE_HASH_FUNC_LINEAR)
482 		return MLX5DR_MATCHER_DISTRIBUTE_BY_LINEAR;
483 	else
484 		return MLX5DR_MATCHER_DISTRIBUTE_BY_HASH;
485 }
486 
487 /**
488  * Set the hash fields according to the @p rss_desc information.
489  *
490  * @param[in] rss_desc
491  *   Pointer to the mlx5_flow_rss_desc.
492  * @param[out] hash_fields
493  *   Pointer to the RSS hash fields.
494  */
495 static void
496 flow_hw_hashfields_set(struct mlx5_flow_rss_desc *rss_desc,
497 		       uint64_t *hash_fields)
498 {
499 	uint64_t fields = 0;
500 	int rss_inner = 0;
501 	uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
502 
503 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
504 	if (rss_desc->level >= 2)
505 		rss_inner = 1;
506 #endif
507 	if (rss_types & MLX5_IPV4_LAYER_TYPES) {
508 		if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
509 			fields |= IBV_RX_HASH_SRC_IPV4;
510 		else if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
511 			fields |= IBV_RX_HASH_DST_IPV4;
512 		else
513 			fields |= MLX5_IPV4_IBV_RX_HASH;
514 	} else if (rss_types & MLX5_IPV6_LAYER_TYPES) {
515 		if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
516 			fields |= IBV_RX_HASH_SRC_IPV6;
517 		else if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
518 			fields |= IBV_RX_HASH_DST_IPV6;
519 		else
520 			fields |= MLX5_IPV6_IBV_RX_HASH;
521 	}
522 	if (rss_types & RTE_ETH_RSS_UDP) {
523 		if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
524 			fields |= IBV_RX_HASH_SRC_PORT_UDP;
525 		else if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
526 			fields |= IBV_RX_HASH_DST_PORT_UDP;
527 		else
528 			fields |= MLX5_UDP_IBV_RX_HASH;
529 	} else if (rss_types & RTE_ETH_RSS_TCP) {
530 		if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
531 			fields |= IBV_RX_HASH_SRC_PORT_TCP;
532 		else if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
533 			fields |= IBV_RX_HASH_DST_PORT_TCP;
534 		else
535 			fields |= MLX5_TCP_IBV_RX_HASH;
536 	}
537 	if (rss_types & RTE_ETH_RSS_ESP)
538 		fields |= IBV_RX_HASH_IPSEC_SPI;
539 	if (rss_inner)
540 		fields |= IBV_RX_HASH_INNER;
541 	*hash_fields |= fields;
542 }
543 
544 /**
545  * Generate the matching pattern item flags.
546  *
547  * @param[in] items
548  *   Pointer to the list of items.
549  *
550  * @return
551  *   Matching item flags. RSS hash field function
552  *   silently ignores the flags which are unsupported.
553  */
554 static uint64_t
555 flow_hw_matching_item_flags_get(const struct rte_flow_item items[])
556 {
557 	uint64_t item_flags = 0;
558 	uint64_t last_item = 0;
559 
560 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
561 		int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
562 		int item_type = items->type;
563 
564 		switch (item_type) {
565 		case RTE_FLOW_ITEM_TYPE_IPV4:
566 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
567 					     MLX5_FLOW_LAYER_OUTER_L3_IPV4;
568 			break;
569 		case RTE_FLOW_ITEM_TYPE_IPV6:
570 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
571 					     MLX5_FLOW_LAYER_OUTER_L3_IPV6;
572 			break;
573 		case RTE_FLOW_ITEM_TYPE_TCP:
574 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
575 					     MLX5_FLOW_LAYER_OUTER_L4_TCP;
576 			break;
577 		case RTE_FLOW_ITEM_TYPE_UDP:
578 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
579 					     MLX5_FLOW_LAYER_OUTER_L4_UDP;
580 			break;
581 		case RTE_FLOW_ITEM_TYPE_IPV6_ROUTING_EXT:
582 			last_item = tunnel ? MLX5_FLOW_ITEM_INNER_IPV6_ROUTING_EXT :
583 					     MLX5_FLOW_ITEM_OUTER_IPV6_ROUTING_EXT;
584 			break;
585 		case RTE_FLOW_ITEM_TYPE_GRE:
586 			last_item = MLX5_FLOW_LAYER_GRE;
587 			break;
588 		case RTE_FLOW_ITEM_TYPE_NVGRE:
589 			last_item = MLX5_FLOW_LAYER_GRE;
590 			break;
591 		case RTE_FLOW_ITEM_TYPE_VXLAN:
592 			last_item = MLX5_FLOW_LAYER_VXLAN;
593 			break;
594 		case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
595 			last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
596 			break;
597 		case RTE_FLOW_ITEM_TYPE_GENEVE:
598 			last_item = MLX5_FLOW_LAYER_GENEVE;
599 			break;
600 		case RTE_FLOW_ITEM_TYPE_MPLS:
601 			last_item = MLX5_FLOW_LAYER_MPLS;
602 			break;
603 		case RTE_FLOW_ITEM_TYPE_GTP:
604 			last_item = MLX5_FLOW_LAYER_GTP;
605 			break;
606 		case RTE_FLOW_ITEM_TYPE_COMPARE:
607 			last_item = MLX5_FLOW_ITEM_COMPARE;
608 			break;
609 		default:
610 			break;
611 		}
612 		item_flags |= last_item;
613 	}
614 	return item_flags;
615 }
616 
617 static uint64_t
618 flow_hw_action_flags_get(const struct rte_flow_action actions[],
619 			 struct rte_flow_error *error)
620 {
621 	uint64_t action_flags = 0;
622 	const struct rte_flow_action *action;
623 
624 	for (action = actions; action->type != RTE_FLOW_ACTION_TYPE_END; action++) {
625 		int type = (int)action->type;
626 		switch (type) {
627 		case RTE_FLOW_ACTION_TYPE_INDIRECT:
628 			switch (MLX5_INDIRECT_ACTION_TYPE_GET(action->conf)) {
629 			case MLX5_INDIRECT_ACTION_TYPE_RSS:
630 				goto rss;
631 			case MLX5_INDIRECT_ACTION_TYPE_AGE:
632 				goto age;
633 			case MLX5_INDIRECT_ACTION_TYPE_COUNT:
634 				goto count;
635 			case MLX5_INDIRECT_ACTION_TYPE_CT:
636 				goto ct;
637 			case MLX5_INDIRECT_ACTION_TYPE_METER_MARK:
638 				goto meter;
639 			default:
640 				goto error;
641 			}
642 			break;
643 		case RTE_FLOW_ACTION_TYPE_DROP:
644 			action_flags |= MLX5_FLOW_ACTION_DROP;
645 			break;
646 		case RTE_FLOW_ACTION_TYPE_MARK:
647 			action_flags |= MLX5_FLOW_ACTION_MARK;
648 			break;
649 		case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
650 			action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
651 			break;
652 		case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
653 			action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
654 			break;
655 		case RTE_FLOW_ACTION_TYPE_JUMP:
656 			action_flags |= MLX5_FLOW_ACTION_JUMP;
657 			break;
658 		case RTE_FLOW_ACTION_TYPE_QUEUE:
659 			action_flags |= MLX5_FLOW_ACTION_QUEUE;
660 			break;
661 		case RTE_FLOW_ACTION_TYPE_RSS:
662 rss:
663 			action_flags |= MLX5_FLOW_ACTION_RSS;
664 			break;
665 		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
666 		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
667 			action_flags |= MLX5_FLOW_ACTION_ENCAP;
668 			break;
669 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
670 			action_flags |= MLX5_FLOW_ACTION_ENCAP;
671 			break;
672 		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
673 		case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
674 			action_flags |= MLX5_FLOW_ACTION_DECAP;
675 			break;
676 		case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
677 			action_flags |= MLX5_FLOW_ACTION_DECAP;
678 			break;
679 		case RTE_FLOW_ACTION_TYPE_SEND_TO_KERNEL:
680 			action_flags |= MLX5_FLOW_ACTION_SEND_TO_KERNEL;
681 			break;
682 		case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
683 			action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
684 			break;
685 		case RTE_FLOW_ACTION_TYPE_PORT_ID:
686 		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
687 			action_flags |= MLX5_FLOW_ACTION_PORT_ID;
688 			break;
689 		case RTE_FLOW_ACTION_TYPE_AGE:
690 age:
691 			action_flags |= MLX5_FLOW_ACTION_AGE;
692 			break;
693 		case RTE_FLOW_ACTION_TYPE_COUNT:
694 count:
695 			action_flags |= MLX5_FLOW_ACTION_COUNT;
696 			break;
697 		case RTE_FLOW_ACTION_TYPE_CONNTRACK:
698 ct:
699 			action_flags |= MLX5_FLOW_ACTION_CT;
700 			break;
701 		case RTE_FLOW_ACTION_TYPE_METER_MARK:
702 meter:
703 			action_flags |= MLX5_FLOW_ACTION_METER;
704 			break;
705 		case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
706 			action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
707 			break;
708 		case RTE_FLOW_ACTION_TYPE_VOID:
709 		case RTE_FLOW_ACTION_TYPE_END:
710 			break;
711 		default:
712 			goto error;
713 		}
714 	}
715 	return action_flags;
716 error:
717 	rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
718 			   action, "invalid flow action");
719 	return 0;
720 }
721 
722 /**
723  * Register destination table DR jump action.
724  *
725  * @param[in] dev
726  *   Pointer to the rte_eth_dev structure.
727  * @param[in] table_attr
728  *   Pointer to the flow attributes.
729  * @param[in] dest_group
730  *   The destination group ID.
731  * @param[out] error
732  *   Pointer to error structure.
733  *
734  * @return
735  *    Table on success, NULL otherwise and rte_errno is set.
736  */
737 static struct mlx5_hw_jump_action *
738 flow_hw_jump_action_register(struct rte_eth_dev *dev,
739 			     const struct mlx5_flow_template_table_cfg *cfg,
740 			     uint32_t dest_group,
741 			     struct rte_flow_error *error)
742 {
743 	struct mlx5_priv *priv = dev->data->dev_private;
744 	struct rte_flow_attr jattr = cfg->attr.flow_attr;
745 	struct mlx5_flow_group *grp;
746 	struct mlx5_flow_cb_ctx ctx = {
747 		.dev = dev,
748 		.error = error,
749 		.data = &jattr,
750 	};
751 	struct mlx5_list_entry *ge;
752 	uint32_t target_group;
753 
754 	target_group = dest_group;
755 	if (flow_hw_translate_group(dev, cfg, dest_group, &target_group, error))
756 		return NULL;
757 	jattr.group = target_group;
758 	ge = mlx5_hlist_register(priv->sh->flow_tbls, target_group, &ctx);
759 	if (!ge)
760 		return NULL;
761 	grp = container_of(ge, struct mlx5_flow_group, entry);
762 	return &grp->jump;
763 }
764 
765 /**
766  * Release jump action.
767  *
768  * @param[in] dev
769  *   Pointer to the rte_eth_dev structure.
770  * @param[in] jump
771  *   Pointer to the jump action.
772  */
773 
774 static void
775 flow_hw_jump_release(struct rte_eth_dev *dev, struct mlx5_hw_jump_action *jump)
776 {
777 	struct mlx5_priv *priv = dev->data->dev_private;
778 	struct mlx5_flow_group *grp;
779 
780 	grp = container_of(jump, struct mlx5_flow_group, jump);
781 	mlx5_hlist_unregister(priv->sh->flow_tbls, &grp->entry);
782 }
783 
784 /**
785  * Register queue/RSS action.
786  *
787  * @param[in] dev
788  *   Pointer to the rte_eth_dev structure.
789  * @param[in] hws_flags
790  *   DR action flags.
791  * @param[in] action
792  *   rte flow action.
793  * @param[in] item_flags
794  *   Item flags for non template rule.
795  * @param[in] is_template
796  *   True if it is a template rule.
797  *
798  * @return
799  *    Table on success, NULL otherwise and rte_errno is set.
800  */
801 static inline struct mlx5_hrxq*
802 flow_hw_tir_action_register(struct rte_eth_dev *dev,
803 			    uint32_t hws_flags,
804 			    const struct rte_flow_action *action)
805 {
806 	struct mlx5_flow_rss_desc rss_desc = {
807 		.hws_flags = hws_flags,
808 	};
809 	struct mlx5_hrxq *hrxq;
810 
811 	if (action->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
812 		const struct rte_flow_action_queue *queue = action->conf;
813 
814 		rss_desc.const_q = &queue->index;
815 		rss_desc.queue_num = 1;
816 	} else {
817 		const struct rte_flow_action_rss *rss = action->conf;
818 
819 		rss_desc.queue_num = rss->queue_num;
820 		rss_desc.const_q = rss->queue;
821 		memcpy(rss_desc.key,
822 		       !rss->key ? rss_hash_default_key : rss->key,
823 		       MLX5_RSS_HASH_KEY_LEN);
824 		rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
825 		rss_desc.types = !rss->types ? RTE_ETH_RSS_IP : rss->types;
826 		rss_desc.symmetric_hash_function = MLX5_RSS_IS_SYMM(rss->func);
827 		flow_hw_hashfields_set(&rss_desc, &rss_desc.hash_fields);
828 		flow_dv_action_rss_l34_hash_adjust(rss->types,
829 						   &rss_desc.hash_fields);
830 		if (rss->level > 1) {
831 			rss_desc.hash_fields |= IBV_RX_HASH_INNER;
832 			rss_desc.tunnel = 1;
833 		}
834 	}
835 	hrxq = mlx5_hrxq_get(dev, &rss_desc);
836 	return hrxq;
837 }
838 
839 static __rte_always_inline int
840 flow_hw_ct_compile(struct rte_eth_dev *dev,
841 		   uint32_t queue, uint32_t idx,
842 		   struct mlx5dr_rule_action *rule_act)
843 {
844 	struct mlx5_priv *priv = dev->data->dev_private;
845 	struct mlx5_aso_ct_action *ct;
846 
847 	ct = mlx5_ipool_get(priv->hws_ctpool->cts, idx);
848 	if (!ct || (!priv->shared_host && mlx5_aso_ct_available(priv->sh, queue, ct)))
849 		return -1;
850 	rule_act->action = priv->hws_ctpool->dr_action;
851 	rule_act->aso_ct.offset = ct->offset;
852 	rule_act->aso_ct.direction = ct->is_original ?
853 		MLX5DR_ACTION_ASO_CT_DIRECTION_INITIATOR :
854 		MLX5DR_ACTION_ASO_CT_DIRECTION_RESPONDER;
855 	return 0;
856 }
857 
858 static void
859 flow_hw_template_destroy_reformat_action(struct mlx5_hw_encap_decap_action *encap_decap)
860 {
861 	if (encap_decap->action && !encap_decap->multi_pattern)
862 		mlx5dr_action_destroy(encap_decap->action);
863 }
864 
865 static void
866 flow_hw_template_destroy_mhdr_action(struct mlx5_hw_modify_header_action *mhdr)
867 {
868 	if (mhdr->action && !mhdr->multi_pattern)
869 		mlx5dr_action_destroy(mhdr->action);
870 }
871 
872 /**
873  * Destroy DR actions created by action template.
874  *
875  * For DR actions created during table creation's action translate.
876  * Need to destroy the DR action when destroying the table.
877  *
878  * @param[in] dev
879  *   Pointer to the rte_eth_dev structure.
880  * @param[in] acts
881  *   Pointer to the template HW steering DR actions.
882  */
883 static void
884 __flow_hw_actions_release(struct rte_eth_dev *dev, struct mlx5_hw_actions *acts)
885 {
886 	struct mlx5_priv *priv = dev->data->dev_private;
887 
888 	if (acts->mark)
889 		if (!(rte_atomic_fetch_sub_explicit(&priv->hws_mark_refcnt, 1,
890 				rte_memory_order_relaxed) - 1))
891 			flow_hw_rxq_flag_set(dev, false);
892 
893 	if (acts->jump) {
894 		struct mlx5_flow_group *grp;
895 
896 		grp = container_of
897 			(acts->jump, struct mlx5_flow_group, jump);
898 		mlx5_hlist_unregister(priv->sh->flow_tbls, &grp->entry);
899 		acts->jump = NULL;
900 	}
901 	if (acts->tir) {
902 		mlx5_hrxq_release(dev, acts->tir->idx);
903 		acts->tir = NULL;
904 	}
905 	if (acts->encap_decap) {
906 		flow_hw_template_destroy_reformat_action(acts->encap_decap);
907 		mlx5_free(acts->encap_decap);
908 		acts->encap_decap = NULL;
909 	}
910 	if (acts->push_remove) {
911 		if (acts->push_remove->action)
912 			mlx5dr_action_destroy(acts->push_remove->action);
913 		mlx5_free(acts->push_remove);
914 		acts->push_remove = NULL;
915 	}
916 	if (acts->mhdr) {
917 		flow_hw_template_destroy_mhdr_action(acts->mhdr);
918 		mlx5_free(acts->mhdr);
919 		acts->mhdr = NULL;
920 	}
921 	if (mlx5_hws_cnt_id_valid(acts->cnt_id)) {
922 		mlx5_hws_cnt_shared_put(priv->hws_cpool, &acts->cnt_id);
923 		acts->cnt_id = 0;
924 	}
925 	if (acts->mtr_id) {
926 		mlx5_ipool_free(priv->hws_mpool->idx_pool, acts->mtr_id);
927 		acts->mtr_id = 0;
928 	}
929 }
930 
931 /**
932  * Destroy DR actions created by action template.
933  *
934  * For DR actions created during table creation's action translate.
935  * Need to destroy the DR action when destroying the table.
936  *
937  * @param[in] dev
938  *   Pointer to the rte_eth_dev structure.
939  * @param[in] acts
940  *   Pointer to the template HW steering DR actions.
941  */
942 static void
943 __flow_hw_action_template_destroy(struct rte_eth_dev *dev, struct mlx5_hw_actions *acts)
944 {
945 	struct mlx5_priv *priv = dev->data->dev_private;
946 	struct mlx5_action_construct_data *data;
947 
948 	while (!LIST_EMPTY(&acts->act_list)) {
949 		data = LIST_FIRST(&acts->act_list);
950 		LIST_REMOVE(data, next);
951 		mlx5_ipool_free(priv->acts_ipool, data->idx);
952 	}
953 
954 	__flow_hw_actions_release(dev, acts);
955 }
956 
957 /**
958  * Append dynamic action to the dynamic action list.
959  *
960  * @param[in] priv
961  *   Pointer to the port private data structure.
962  * @param[in] acts
963  *   Pointer to the template HW steering DR actions.
964  * @param[in] type
965  *   Action type.
966  * @param[in] action_src
967  *   Offset of source rte flow action.
968  * @param[in] action_dst
969  *   Offset of destination DR action.
970  *
971  * @return
972  *    0 on success, negative value otherwise and rte_errno is set.
973  */
974 static __rte_always_inline struct mlx5_action_construct_data *
975 __flow_hw_act_data_alloc(struct mlx5_priv *priv,
976 			 enum rte_flow_action_type type,
977 			 uint16_t action_src,
978 			 uint16_t action_dst)
979 {
980 	struct mlx5_action_construct_data *act_data;
981 	uint32_t idx = 0;
982 
983 	act_data = mlx5_ipool_zmalloc(priv->acts_ipool, &idx);
984 	if (!act_data)
985 		return NULL;
986 	act_data->idx = idx;
987 	act_data->type = type;
988 	act_data->action_src = action_src;
989 	act_data->action_dst = action_dst;
990 	return act_data;
991 }
992 
993 /**
994  * Append dynamic action to the dynamic action list.
995  *
996  * @param[in] priv
997  *   Pointer to the port private data structure.
998  * @param[in] acts
999  *   Pointer to the template HW steering DR actions.
1000  * @param[in] type
1001  *   Action type.
1002  * @param[in] action_src
1003  *   Offset of source rte flow action.
1004  * @param[in] action_dst
1005  *   Offset of destination DR action.
1006  *
1007  * @return
1008  *    0 on success, negative value otherwise and rte_errno is set.
1009  */
1010 static __rte_always_inline int
1011 __flow_hw_act_data_general_append(struct mlx5_priv *priv,
1012 				  struct mlx5_hw_actions *acts,
1013 				  enum rte_flow_action_type type,
1014 				  uint16_t action_src,
1015 				  uint16_t action_dst)
1016 {
1017 	struct mlx5_action_construct_data *act_data;
1018 
1019 	act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
1020 	if (!act_data)
1021 		return -1;
1022 	LIST_INSERT_HEAD(&acts->act_list, act_data, next);
1023 	return 0;
1024 }
1025 
1026 static __rte_always_inline int
1027 __flow_hw_act_data_indirect_append(struct mlx5_priv *priv,
1028 				   struct mlx5_hw_actions *acts,
1029 				   enum rte_flow_action_type type,
1030 				   enum rte_flow_action_type mask_type,
1031 				   uint16_t action_src,
1032 				   uint16_t action_dst)
1033 {
1034 	struct mlx5_action_construct_data *act_data;
1035 
1036 	act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
1037 	if (!act_data)
1038 		return -1;
1039 	act_data->indirect.expected_type = mask_type;
1040 	LIST_INSERT_HEAD(&acts->act_list, act_data, next);
1041 	return 0;
1042 }
1043 
1044 static __rte_always_inline int
1045 flow_hw_act_data_indirect_list_append(struct mlx5_priv *priv,
1046 				      struct mlx5_hw_actions *acts,
1047 				      enum rte_flow_action_type type,
1048 				      uint16_t action_src, uint16_t action_dst,
1049 				      indirect_list_callback_t cb)
1050 {
1051 	struct mlx5_action_construct_data *act_data;
1052 
1053 	act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
1054 	if (!act_data)
1055 		return -1;
1056 	act_data->indirect_list_cb = cb;
1057 	LIST_INSERT_HEAD(&acts->act_list, act_data, next);
1058 	return 0;
1059 }
1060 /**
1061  * Append dynamic encap action to the dynamic action list.
1062  *
1063  * @param[in] priv
1064  *   Pointer to the port private data structure.
1065  * @param[in] acts
1066  *   Pointer to the template HW steering DR actions.
1067  * @param[in] type
1068  *   Action type.
1069  * @param[in] action_src
1070  *   Offset of source rte flow action.
1071  * @param[in] action_dst
1072  *   Offset of destination DR action.
1073  * @param[in] len
1074  *   Length of the data to be updated.
1075  *
1076  * @return
1077  *    0 on success, negative value otherwise and rte_errno is set.
1078  */
1079 static __rte_always_inline int
1080 __flow_hw_act_data_encap_append(struct mlx5_priv *priv,
1081 				struct mlx5_hw_actions *acts,
1082 				enum rte_flow_action_type type,
1083 				uint16_t action_src,
1084 				uint16_t action_dst,
1085 				uint16_t len)
1086 {
1087 	struct mlx5_action_construct_data *act_data;
1088 
1089 	act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
1090 	if (!act_data)
1091 		return -1;
1092 	act_data->encap.len = len;
1093 	LIST_INSERT_HEAD(&acts->act_list, act_data, next);
1094 	return 0;
1095 }
1096 
1097 /**
1098  * Append dynamic push action to the dynamic action list.
1099  *
1100  * @param[in] dev
1101  *   Pointer to the port.
1102  * @param[in] acts
1103  *   Pointer to the template HW steering DR actions.
1104  * @param[in] type
1105  *   Action type.
1106  * @param[in] action_src
1107  *   Offset of source rte flow action.
1108  * @param[in] action_dst
1109  *   Offset of destination DR action.
1110  * @param[in] len
1111  *   Length of the data to be updated.
1112  *
1113  * @return
1114  *    Data pointer on success, NULL otherwise and rte_errno is set.
1115  */
1116 static __rte_always_inline void *
1117 __flow_hw_act_data_push_append(struct rte_eth_dev *dev,
1118 			       struct mlx5_hw_actions *acts,
1119 			       enum rte_flow_action_type type,
1120 			       uint16_t action_src,
1121 			       uint16_t action_dst,
1122 			       uint16_t len)
1123 {
1124 	struct mlx5_action_construct_data *act_data;
1125 	struct mlx5_priv *priv = dev->data->dev_private;
1126 
1127 	act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
1128 	if (!act_data)
1129 		return NULL;
1130 	act_data->ipv6_ext.len = len;
1131 	LIST_INSERT_HEAD(&acts->act_list, act_data, next);
1132 	return act_data;
1133 }
1134 
1135 static __rte_always_inline int
1136 __flow_hw_act_data_hdr_modify_append(struct mlx5_priv *priv,
1137 				     struct mlx5_hw_actions *acts,
1138 				     enum rte_flow_action_type type,
1139 				     uint16_t action_src,
1140 				     uint16_t action_dst,
1141 				     const struct rte_flow_action_modify_field *mf,
1142 				     uint16_t mhdr_cmds_off,
1143 				     uint16_t mhdr_cmds_end,
1144 				     bool shared,
1145 				     struct field_modify_info *field,
1146 				     struct field_modify_info *dcopy,
1147 				     uint32_t *mask)
1148 {
1149 	struct mlx5_action_construct_data *act_data;
1150 
1151 	act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
1152 	if (!act_data)
1153 		return -1;
1154 	act_data->modify_header.action = *mf;
1155 	act_data->modify_header.mhdr_cmds_off = mhdr_cmds_off;
1156 	act_data->modify_header.mhdr_cmds_end = mhdr_cmds_end;
1157 	act_data->modify_header.shared = shared;
1158 	rte_memcpy(act_data->modify_header.field, field,
1159 		   sizeof(*field) * MLX5_ACT_MAX_MOD_FIELDS);
1160 	rte_memcpy(act_data->modify_header.dcopy, dcopy,
1161 		   sizeof(*dcopy) * MLX5_ACT_MAX_MOD_FIELDS);
1162 	rte_memcpy(act_data->modify_header.mask, mask,
1163 		   sizeof(*mask) * MLX5_ACT_MAX_MOD_FIELDS);
1164 	LIST_INSERT_HEAD(&acts->act_list, act_data, next);
1165 	return 0;
1166 }
1167 
1168 /**
1169  * Append shared RSS action to the dynamic action list.
1170  *
1171  * @param[in] priv
1172  *   Pointer to the port private data structure.
1173  * @param[in] acts
1174  *   Pointer to the template HW steering DR actions.
1175  * @param[in] type
1176  *   Action type.
1177  * @param[in] action_src
1178  *   Offset of source rte flow action.
1179  * @param[in] action_dst
1180  *   Offset of destination DR action.
1181  * @param[in] idx
1182  *   Shared RSS index.
1183  * @param[in] rss
1184  *   Pointer to the shared RSS info.
1185  *
1186  * @return
1187  *    0 on success, negative value otherwise and rte_errno is set.
1188  */
1189 static __rte_always_inline int
1190 __flow_hw_act_data_shared_rss_append(struct mlx5_priv *priv,
1191 				     struct mlx5_hw_actions *acts,
1192 				     enum rte_flow_action_type type,
1193 				     uint16_t action_src,
1194 				     uint16_t action_dst,
1195 				     uint32_t idx,
1196 				     struct mlx5_shared_action_rss *rss)
1197 {
1198 	struct mlx5_action_construct_data *act_data;
1199 
1200 	act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
1201 	if (!act_data)
1202 		return -1;
1203 	act_data->shared_rss.level = rss->origin.level;
1204 	act_data->shared_rss.types = !rss->origin.types ? RTE_ETH_RSS_IP :
1205 				     rss->origin.types;
1206 	act_data->shared_rss.idx = idx;
1207 	act_data->shared_rss.symmetric_hash_function =
1208 		MLX5_RSS_IS_SYMM(rss->origin.func);
1209 	LIST_INSERT_HEAD(&acts->act_list, act_data, next);
1210 	return 0;
1211 }
1212 
1213 /**
1214  * Append shared counter action to the dynamic action list.
1215  *
1216  * @param[in] priv
1217  *   Pointer to the port private data structure.
1218  * @param[in] acts
1219  *   Pointer to the template HW steering DR actions.
1220  * @param[in] type
1221  *   Action type.
1222  * @param[in] action_src
1223  *   Offset of source rte flow action.
1224  * @param[in] action_dst
1225  *   Offset of destination DR action.
1226  * @param[in] cnt_id
1227  *   Shared counter id.
1228  *
1229  * @return
1230  *    0 on success, negative value otherwise and rte_errno is set.
1231  */
1232 static __rte_always_inline int
1233 __flow_hw_act_data_shared_cnt_append(struct mlx5_priv *priv,
1234 				     struct mlx5_hw_actions *acts,
1235 				     enum rte_flow_action_type type,
1236 				     uint16_t action_src,
1237 				     uint16_t action_dst,
1238 				     cnt_id_t cnt_id)
1239 {
1240 	struct mlx5_action_construct_data *act_data;
1241 
1242 	act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
1243 	if (!act_data)
1244 		return -1;
1245 	act_data->type = type;
1246 	act_data->shared_counter.id = cnt_id;
1247 	LIST_INSERT_HEAD(&acts->act_list, act_data, next);
1248 	return 0;
1249 }
1250 
1251 /**
1252  * Append shared meter_mark action to the dynamic action list.
1253  *
1254  * @param[in] priv
1255  *   Pointer to the port private data structure.
1256  * @param[in] acts
1257  *   Pointer to the template HW steering DR actions.
1258  * @param[in] type
1259  *   Action type.
1260  * @param[in] action_src
1261  *   Offset of source rte flow action.
1262  * @param[in] action_dst
1263  *   Offset of destination DR action.
1264  * @param[in] mtr_id
1265  *   Shared meter id.
1266  *
1267  * @return
1268  *    0 on success, negative value otherwise and rte_errno is set.
1269  */
1270 static __rte_always_inline int
1271 __flow_hw_act_data_shared_mtr_append(struct mlx5_priv *priv,
1272 				     struct mlx5_hw_actions *acts,
1273 				     enum rte_flow_action_type type,
1274 				     uint16_t action_src,
1275 				     uint16_t action_dst,
1276 				     cnt_id_t mtr_id)
1277 {	struct mlx5_action_construct_data *act_data;
1278 
1279 	act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
1280 	if (!act_data)
1281 		return -1;
1282 	act_data->type = type;
1283 	act_data->shared_meter.id = mtr_id;
1284 	LIST_INSERT_HEAD(&acts->act_list, act_data, next);
1285 	return 0;
1286 }
1287 
1288 /**
1289  * Translate shared indirect action.
1290  *
1291  * @param[in] dev
1292  *   Pointer to the rte_eth_dev data structure.
1293  * @param[in] action
1294  *   Pointer to the shared indirect rte_flow action.
1295  * @param[in] acts
1296  *   Pointer to the template HW steering DR actions.
1297  * @param[in] action_src
1298  *   Offset of source rte flow action.
1299  * @param[in] action_dst
1300  *   Offset of destination DR action.
1301  *
1302  * @return
1303  *    0 on success, negative value otherwise and rte_errno is set.
1304  */
1305 static __rte_always_inline int
1306 flow_hw_shared_action_translate(struct rte_eth_dev *dev,
1307 				const struct rte_flow_action *action,
1308 				struct mlx5_hw_actions *acts,
1309 				uint16_t action_src,
1310 				uint16_t action_dst)
1311 {
1312 	struct mlx5_priv *priv = dev->data->dev_private;
1313 	struct mlx5_shared_action_rss *shared_rss;
1314 	uint32_t act_idx = (uint32_t)(uintptr_t)action->conf;
1315 	uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
1316 	uint32_t idx = act_idx &
1317 		       ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
1318 
1319 	switch (type) {
1320 	case MLX5_INDIRECT_ACTION_TYPE_RSS:
1321 		shared_rss = mlx5_ipool_get
1322 		  (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
1323 		if (!shared_rss || __flow_hw_act_data_shared_rss_append
1324 		    (priv, acts,
1325 		    (enum rte_flow_action_type)MLX5_RTE_FLOW_ACTION_TYPE_RSS,
1326 		    action_src, action_dst, idx, shared_rss)) {
1327 			DRV_LOG(WARNING, "Indirect RSS action index %d translate failed", act_idx);
1328 			return -1;
1329 		}
1330 		break;
1331 	case MLX5_INDIRECT_ACTION_TYPE_COUNT:
1332 		if (__flow_hw_act_data_shared_cnt_append(priv, acts,
1333 			(enum rte_flow_action_type)
1334 			MLX5_RTE_FLOW_ACTION_TYPE_COUNT,
1335 			action_src, action_dst, act_idx)) {
1336 			DRV_LOG(WARNING, "Indirect count action translate failed");
1337 			return -1;
1338 		}
1339 		break;
1340 	case MLX5_INDIRECT_ACTION_TYPE_AGE:
1341 		/* Not supported, prevent by validate function. */
1342 		MLX5_ASSERT(0);
1343 		break;
1344 	case MLX5_INDIRECT_ACTION_TYPE_CT:
1345 		if (flow_hw_ct_compile(dev, MLX5_HW_INV_QUEUE,
1346 				       idx, &acts->rule_acts[action_dst])) {
1347 			DRV_LOG(WARNING, "Indirect CT action translate failed");
1348 			return -1;
1349 		}
1350 		break;
1351 	case MLX5_INDIRECT_ACTION_TYPE_METER_MARK:
1352 		if (__flow_hw_act_data_shared_mtr_append(priv, acts,
1353 			(enum rte_flow_action_type)
1354 			MLX5_RTE_FLOW_ACTION_TYPE_METER_MARK,
1355 			action_src, action_dst, idx)) {
1356 			DRV_LOG(WARNING, "Indirect meter mark action translate failed");
1357 			return -1;
1358 		}
1359 		break;
1360 	case MLX5_INDIRECT_ACTION_TYPE_QUOTA:
1361 		flow_hw_construct_quota(priv, &acts->rule_acts[action_dst], idx);
1362 		break;
1363 	default:
1364 		DRV_LOG(WARNING, "Unsupported shared action type:%d", type);
1365 		break;
1366 	}
1367 	return 0;
1368 }
1369 
1370 static __rte_always_inline bool
1371 flow_hw_action_modify_field_is_shared(const struct rte_flow_action *action,
1372 				      const struct rte_flow_action *mask)
1373 {
1374 	const struct rte_flow_action_modify_field *v = action->conf;
1375 	const struct rte_flow_action_modify_field *m = mask->conf;
1376 
1377 	if (v->src.field == RTE_FLOW_FIELD_VALUE) {
1378 		uint32_t j;
1379 
1380 		for (j = 0; j < RTE_DIM(m->src.value); ++j) {
1381 			/*
1382 			 * Immediate value is considered to be masked
1383 			 * (and thus shared by all flow rules), if mask
1384 			 * is non-zero. Partial mask over immediate value
1385 			 * is not allowed.
1386 			 */
1387 			if (m->src.value[j])
1388 				return true;
1389 		}
1390 		return false;
1391 	}
1392 	if (v->src.field == RTE_FLOW_FIELD_POINTER)
1393 		return m->src.pvalue != NULL;
1394 	/*
1395 	 * Source field types other than VALUE and
1396 	 * POINTER are always shared.
1397 	 */
1398 	return true;
1399 }
1400 
1401 static __rte_always_inline bool
1402 flow_hw_should_insert_nop(const struct mlx5_hw_modify_header_action *mhdr,
1403 			  const struct mlx5_modification_cmd *cmd)
1404 {
1405 	struct mlx5_modification_cmd last_cmd = { { 0 } };
1406 	struct mlx5_modification_cmd new_cmd = { { 0 } };
1407 	const uint32_t cmds_num = mhdr->mhdr_cmds_num;
1408 	unsigned int last_type;
1409 	bool should_insert = false;
1410 
1411 	if (cmds_num == 0)
1412 		return false;
1413 	last_cmd = *(&mhdr->mhdr_cmds[cmds_num - 1]);
1414 	last_cmd.data0 = rte_be_to_cpu_32(last_cmd.data0);
1415 	last_cmd.data1 = rte_be_to_cpu_32(last_cmd.data1);
1416 	last_type = last_cmd.action_type;
1417 	new_cmd = *cmd;
1418 	new_cmd.data0 = rte_be_to_cpu_32(new_cmd.data0);
1419 	new_cmd.data1 = rte_be_to_cpu_32(new_cmd.data1);
1420 	switch (new_cmd.action_type) {
1421 	case MLX5_MODIFICATION_TYPE_SET:
1422 	case MLX5_MODIFICATION_TYPE_ADD:
1423 		if (last_type == MLX5_MODIFICATION_TYPE_SET ||
1424 		    last_type == MLX5_MODIFICATION_TYPE_ADD)
1425 			should_insert = new_cmd.field == last_cmd.field;
1426 		else if (last_type == MLX5_MODIFICATION_TYPE_COPY ||
1427 			 last_type == MLX5_MODIFICATION_TYPE_ADD_FIELD)
1428 			should_insert = new_cmd.field == last_cmd.dst_field;
1429 		else if (last_type == MLX5_MODIFICATION_TYPE_NOP)
1430 			should_insert = false;
1431 		else
1432 			MLX5_ASSERT(false); /* Other types are not supported. */
1433 		break;
1434 	case MLX5_MODIFICATION_TYPE_COPY:
1435 	case MLX5_MODIFICATION_TYPE_ADD_FIELD:
1436 		if (last_type == MLX5_MODIFICATION_TYPE_SET ||
1437 		    last_type == MLX5_MODIFICATION_TYPE_ADD)
1438 			should_insert = (new_cmd.field == last_cmd.field ||
1439 					 new_cmd.dst_field == last_cmd.field);
1440 		else if (last_type == MLX5_MODIFICATION_TYPE_COPY ||
1441 			 last_type == MLX5_MODIFICATION_TYPE_ADD_FIELD)
1442 			should_insert = (new_cmd.field == last_cmd.dst_field ||
1443 					 new_cmd.dst_field == last_cmd.dst_field);
1444 		else if (last_type == MLX5_MODIFICATION_TYPE_NOP)
1445 			should_insert = false;
1446 		else
1447 			MLX5_ASSERT(false); /* Other types are not supported. */
1448 		break;
1449 	default:
1450 		/* Other action types should be rejected on AT validation. */
1451 		MLX5_ASSERT(false);
1452 		break;
1453 	}
1454 	return should_insert;
1455 }
1456 
1457 static __rte_always_inline int
1458 flow_hw_mhdr_cmd_nop_append(struct mlx5_hw_modify_header_action *mhdr)
1459 {
1460 	struct mlx5_modification_cmd *nop;
1461 	uint32_t num = mhdr->mhdr_cmds_num;
1462 
1463 	if (num + 1 >= MLX5_MHDR_MAX_CMD)
1464 		return -ENOMEM;
1465 	nop = mhdr->mhdr_cmds + num;
1466 	nop->data0 = 0;
1467 	nop->action_type = MLX5_MODIFICATION_TYPE_NOP;
1468 	nop->data0 = rte_cpu_to_be_32(nop->data0);
1469 	nop->data1 = 0;
1470 	mhdr->mhdr_cmds_num = num + 1;
1471 	return 0;
1472 }
1473 
1474 static __rte_always_inline int
1475 flow_hw_mhdr_cmd_append(struct mlx5_hw_modify_header_action *mhdr,
1476 			struct mlx5_modification_cmd *cmd)
1477 {
1478 	uint32_t num = mhdr->mhdr_cmds_num;
1479 
1480 	if (num + 1 >= MLX5_MHDR_MAX_CMD)
1481 		return -ENOMEM;
1482 	mhdr->mhdr_cmds[num] = *cmd;
1483 	mhdr->mhdr_cmds_num = num + 1;
1484 	return 0;
1485 }
1486 
1487 static __rte_always_inline int
1488 flow_hw_converted_mhdr_cmds_append(struct mlx5_hw_modify_header_action *mhdr,
1489 				   struct mlx5_flow_dv_modify_hdr_resource *resource)
1490 {
1491 	uint32_t idx;
1492 	int ret;
1493 
1494 	for (idx = 0; idx < resource->actions_num; ++idx) {
1495 		struct mlx5_modification_cmd *src = &resource->actions[idx];
1496 
1497 		if (flow_hw_should_insert_nop(mhdr, src)) {
1498 			ret = flow_hw_mhdr_cmd_nop_append(mhdr);
1499 			if (ret)
1500 				return ret;
1501 		}
1502 		ret = flow_hw_mhdr_cmd_append(mhdr, src);
1503 		if (ret)
1504 			return ret;
1505 	}
1506 	return 0;
1507 }
1508 
1509 static __rte_always_inline void
1510 flow_hw_modify_field_init(struct mlx5_hw_modify_header_action *mhdr,
1511 			  struct rte_flow_actions_template *at)
1512 {
1513 	memset(mhdr, 0, sizeof(*mhdr));
1514 	/* Modify header action without any commands is shared by default. */
1515 	mhdr->shared = true;
1516 	mhdr->pos = at->mhdr_off;
1517 }
1518 
1519 static __rte_always_inline int
1520 flow_hw_modify_field_compile(struct rte_eth_dev *dev,
1521 			     const struct rte_flow_attr *attr,
1522 			     const struct rte_flow_action *action, /* Current action from AT. */
1523 			     const struct rte_flow_action *action_mask, /* Current mask from AT. */
1524 			     struct mlx5_hw_actions *acts,
1525 			     struct mlx5_hw_modify_header_action *mhdr,
1526 			     uint16_t src_pos,
1527 			     struct rte_flow_error *error)
1528 {
1529 	struct mlx5_priv *priv = dev->data->dev_private;
1530 	const struct rte_flow_action_modify_field *conf = action->conf;
1531 	union {
1532 		struct mlx5_flow_dv_modify_hdr_resource resource;
1533 		uint8_t data[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
1534 			     sizeof(struct mlx5_modification_cmd) * MLX5_MHDR_MAX_CMD];
1535 	} dummy;
1536 	struct mlx5_flow_dv_modify_hdr_resource *resource;
1537 	struct rte_flow_item item = {
1538 		.spec = NULL,
1539 		.mask = NULL
1540 	};
1541 	struct field_modify_info field[MLX5_ACT_MAX_MOD_FIELDS] = {
1542 						{0, 0, MLX5_MODI_OUT_NONE} };
1543 	struct field_modify_info dcopy[MLX5_ACT_MAX_MOD_FIELDS] = {
1544 						{0, 0, MLX5_MODI_OUT_NONE} };
1545 	uint32_t mask[MLX5_ACT_MAX_MOD_FIELDS] = { 0 };
1546 	uint32_t type, value = 0;
1547 	uint16_t cmds_start, cmds_end;
1548 	bool shared;
1549 	int ret;
1550 
1551 	/*
1552 	 * Modify header action is shared if previous modify_field actions
1553 	 * are shared and currently compiled action is shared.
1554 	 */
1555 	shared = flow_hw_action_modify_field_is_shared(action, action_mask);
1556 	mhdr->shared &= shared;
1557 	if (conf->src.field == RTE_FLOW_FIELD_POINTER ||
1558 	    conf->src.field == RTE_FLOW_FIELD_VALUE) {
1559 		type = conf->operation == RTE_FLOW_MODIFY_SET ? MLX5_MODIFICATION_TYPE_SET :
1560 								MLX5_MODIFICATION_TYPE_ADD;
1561 		/* For SET/ADD fill the destination field (field) first. */
1562 		mlx5_flow_field_id_to_modify_info(&conf->dst, field, mask,
1563 						  conf->width, dev,
1564 						  attr, error);
1565 		item.spec = conf->src.field == RTE_FLOW_FIELD_POINTER ?
1566 				(void *)(uintptr_t)conf->src.pvalue :
1567 				(void *)(uintptr_t)&conf->src.value;
1568 		if (conf->dst.field == RTE_FLOW_FIELD_META ||
1569 		    conf->dst.field == RTE_FLOW_FIELD_TAG ||
1570 		    conf->dst.field == RTE_FLOW_FIELD_METER_COLOR ||
1571 		    conf->dst.field == (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG) {
1572 			uint8_t tag_index = flow_tag_index_get(&conf->dst);
1573 
1574 			value = *(const unaligned_uint32_t *)item.spec;
1575 			if (conf->dst.field == RTE_FLOW_FIELD_TAG &&
1576 			    tag_index == RTE_PMD_MLX5_LINEAR_HASH_TAG_INDEX)
1577 				value = rte_cpu_to_be_32(value << 16);
1578 			else
1579 				value = rte_cpu_to_be_32(value);
1580 			item.spec = &value;
1581 		} else if (conf->dst.field == RTE_FLOW_FIELD_GTP_PSC_QFI ||
1582 			   conf->dst.field == RTE_FLOW_FIELD_GENEVE_OPT_TYPE) {
1583 			/*
1584 			 * Both QFI and Geneve option type are passed as an uint8_t integer,
1585 			 * but it is accessed through a 2nd least significant byte of a 32-bit
1586 			 * field in modify header command.
1587 			 */
1588 			value = *(const uint8_t *)item.spec;
1589 			value = rte_cpu_to_be_32(value << 8);
1590 			item.spec = &value;
1591 		} else if (conf->dst.field == RTE_FLOW_FIELD_VXLAN_LAST_RSVD) {
1592 			value = *(const uint8_t *)item.spec << 24;
1593 			value = rte_cpu_to_be_32(value);
1594 			item.spec = &value;
1595 		}
1596 	} else {
1597 		type = conf->operation == RTE_FLOW_MODIFY_SET ?
1598 		       MLX5_MODIFICATION_TYPE_COPY : MLX5_MODIFICATION_TYPE_ADD_FIELD;
1599 		/* For COPY fill the destination field (dcopy) without mask. */
1600 		mlx5_flow_field_id_to_modify_info(&conf->dst, dcopy, NULL,
1601 						  conf->width, dev,
1602 						  attr, error);
1603 		/* Then construct the source field (field) with mask. */
1604 		mlx5_flow_field_id_to_modify_info(&conf->src, field, mask,
1605 						  conf->width, dev,
1606 						  attr, error);
1607 	}
1608 	item.mask = &mask;
1609 	memset(&dummy, 0, sizeof(dummy));
1610 	resource = &dummy.resource;
1611 	ret = flow_dv_convert_modify_action(&item, field, dcopy, resource, type, error);
1612 	if (ret)
1613 		return ret;
1614 	MLX5_ASSERT(resource->actions_num > 0);
1615 	/*
1616 	 * If previous modify field action collide with this one, then insert NOP command.
1617 	 * This NOP command will not be a part of action's command range used to update commands
1618 	 * on rule creation.
1619 	 */
1620 	if (flow_hw_should_insert_nop(mhdr, &resource->actions[0])) {
1621 		ret = flow_hw_mhdr_cmd_nop_append(mhdr);
1622 		if (ret)
1623 			return rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1624 						  NULL, "too many modify field operations specified");
1625 	}
1626 	cmds_start = mhdr->mhdr_cmds_num;
1627 	ret = flow_hw_converted_mhdr_cmds_append(mhdr, resource);
1628 	if (ret)
1629 		return rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1630 					  NULL, "too many modify field operations specified");
1631 
1632 	cmds_end = mhdr->mhdr_cmds_num;
1633 	if (shared)
1634 		return 0;
1635 	ret = __flow_hw_act_data_hdr_modify_append(priv, acts, RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
1636 						   src_pos, mhdr->pos, conf,
1637 						   cmds_start, cmds_end, shared,
1638 						   field, dcopy, mask);
1639 	if (ret)
1640 		return rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1641 					  NULL, "not enough memory to store modify field metadata");
1642 	return 0;
1643 }
1644 
1645 static uint32_t
1646 flow_hw_count_nop_modify_field(struct mlx5_hw_modify_header_action *mhdr)
1647 {
1648 	uint32_t i;
1649 	uint32_t nops = 0;
1650 
1651 	for (i = 0; i < mhdr->mhdr_cmds_num; ++i) {
1652 		struct mlx5_modification_cmd cmd = mhdr->mhdr_cmds[i];
1653 
1654 		cmd.data0 = rte_be_to_cpu_32(cmd.data0);
1655 		if (cmd.action_type == MLX5_MODIFICATION_TYPE_NOP)
1656 			++nops;
1657 	}
1658 	return nops;
1659 }
1660 
1661 static int
1662 flow_hw_validate_compiled_modify_field(struct rte_eth_dev *dev,
1663 				       const struct mlx5_flow_template_table_cfg *cfg,
1664 				       struct mlx5_hw_modify_header_action *mhdr,
1665 				       struct rte_flow_error *error)
1666 {
1667 	struct mlx5_priv *priv = dev->data->dev_private;
1668 	struct mlx5_hca_attr *hca_attr = &priv->sh->cdev->config.hca_attr;
1669 
1670 	/*
1671 	 * Header modify pattern length limitation is only valid for HWS groups, i.e. groups > 0.
1672 	 * In group 0, MODIFY_FIELD actions are handled with header modify actions
1673 	 * managed by rdma-core.
1674 	 */
1675 	if (cfg->attr.flow_attr.group != 0 &&
1676 	    mhdr->mhdr_cmds_num > hca_attr->max_header_modify_pattern_length) {
1677 		uint32_t nops = flow_hw_count_nop_modify_field(mhdr);
1678 
1679 		DRV_LOG(ERR, "Too many modify header commands generated from "
1680 			     "MODIFY_FIELD actions. "
1681 			     "Generated HW commands = %u (amount of NOP commands = %u). "
1682 			     "Maximum supported = %u.",
1683 			     mhdr->mhdr_cmds_num, nops,
1684 			     hca_attr->max_header_modify_pattern_length);
1685 		return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1686 					  "Number of MODIFY_FIELD actions exceeds maximum "
1687 					  "supported limit of actions");
1688 	}
1689 	return 0;
1690 }
1691 
1692 static int
1693 flow_hw_represented_port_compile(struct rte_eth_dev *dev,
1694 				 const struct rte_flow_attr *attr,
1695 				 const struct rte_flow_action *action,
1696 				 const struct rte_flow_action *action_mask,
1697 				 struct mlx5_hw_actions *acts,
1698 				 uint16_t action_src, uint16_t action_dst,
1699 				 struct rte_flow_error *error)
1700 {
1701 	struct mlx5_priv *priv = dev->data->dev_private;
1702 	const struct rte_flow_action_ethdev *v = action->conf;
1703 	const struct rte_flow_action_ethdev *m = action_mask->conf;
1704 	int ret;
1705 
1706 	if (!attr->group)
1707 		return rte_flow_error_set(error, EINVAL,
1708 					  RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1709 					  "represented_port action cannot"
1710 					  " be used on group 0");
1711 	if (!attr->transfer)
1712 		return rte_flow_error_set(error, EINVAL,
1713 					  RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1714 					  NULL,
1715 					  "represented_port action requires"
1716 					  " transfer attribute");
1717 	if (attr->ingress || attr->egress)
1718 		return rte_flow_error_set(error, EINVAL,
1719 					  RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1720 					  "represented_port action cannot"
1721 					  " be used with direction attributes");
1722 	if (!priv->master)
1723 		return rte_flow_error_set(error, EINVAL,
1724 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1725 					  "represented_port action must"
1726 					  " be used on proxy port");
1727 	if (m && !!m->port_id) {
1728 		struct mlx5_priv *port_priv;
1729 
1730 		if (!v)
1731 			return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1732 						  action, "port index was not provided");
1733 		port_priv = mlx5_port_to_eswitch_info(v->port_id, false);
1734 		if (port_priv == NULL)
1735 			return rte_flow_error_set
1736 					(error, EINVAL,
1737 					 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1738 					 "port does not exist or unable to"
1739 					 " obtain E-Switch info for port");
1740 		MLX5_ASSERT(priv->hw_vport != NULL);
1741 		if (priv->hw_vport[v->port_id]) {
1742 			acts->rule_acts[action_dst].action =
1743 					priv->hw_vport[v->port_id];
1744 		} else {
1745 			return rte_flow_error_set
1746 					(error, EINVAL,
1747 					 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1748 					 "cannot use represented_port action"
1749 					 " with this port");
1750 		}
1751 	} else {
1752 		ret = __flow_hw_act_data_general_append
1753 				(priv, acts, action->type,
1754 				 action_src, action_dst);
1755 		if (ret)
1756 			return rte_flow_error_set
1757 					(error, ENOMEM,
1758 					 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1759 					 "not enough memory to store"
1760 					 " vport action");
1761 	}
1762 	return 0;
1763 }
1764 
1765 static __rte_always_inline int
1766 flow_hw_meter_compile(struct rte_eth_dev *dev,
1767 		      const struct mlx5_flow_template_table_cfg *cfg,
1768 		      uint16_t aso_mtr_pos,
1769 		      uint16_t jump_pos,
1770 		      const struct rte_flow_action *action,
1771 		      struct mlx5_hw_actions *acts,
1772 		      struct rte_flow_error *error)
1773 {
1774 	struct mlx5_priv *priv = dev->data->dev_private;
1775 	struct mlx5_aso_mtr *aso_mtr;
1776 	const struct rte_flow_action_meter *meter = action->conf;
1777 	uint32_t group = cfg->attr.flow_attr.group;
1778 
1779 	aso_mtr = mlx5_aso_meter_by_idx(priv, meter->mtr_id);
1780 	acts->rule_acts[aso_mtr_pos].action = priv->mtr_bulk.action;
1781 	acts->rule_acts[aso_mtr_pos].aso_meter.offset = aso_mtr->offset;
1782 	acts->jump = flow_hw_jump_action_register
1783 		(dev, cfg, aso_mtr->fm.group, error);
1784 	if (!acts->jump)
1785 		return -ENOMEM;
1786 	acts->rule_acts[jump_pos].action = (!!group) ?
1787 				    acts->jump->hws_action :
1788 				    acts->jump->root_action;
1789 	if (mlx5_aso_mtr_wait(priv, aso_mtr, true))
1790 		return -ENOMEM;
1791 	return 0;
1792 }
1793 
1794 static __rte_always_inline int
1795 flow_hw_cnt_compile(struct rte_eth_dev *dev, uint32_t  start_pos,
1796 		      struct mlx5_hw_actions *acts)
1797 {
1798 	struct mlx5_priv *priv = dev->data->dev_private;
1799 	uint32_t pos = start_pos;
1800 	cnt_id_t cnt_id;
1801 	int ret;
1802 
1803 	ret = mlx5_hws_cnt_shared_get(priv->hws_cpool, &cnt_id, 0);
1804 	if (ret != 0)
1805 		return ret;
1806 	ret = mlx5_hws_cnt_pool_get_action_offset
1807 				(priv->hws_cpool,
1808 				 cnt_id,
1809 				 &acts->rule_acts[pos].action,
1810 				 &acts->rule_acts[pos].counter.offset);
1811 	if (ret != 0)
1812 		return ret;
1813 	acts->cnt_id = cnt_id;
1814 	return 0;
1815 }
1816 
1817 static __rte_always_inline bool
1818 is_of_vlan_pcp_present(const struct rte_flow_action *actions)
1819 {
1820 	/*
1821 	 * Order of RTE VLAN push actions is
1822 	 * OF_PUSH_VLAN / OF_SET_VLAN_VID [ / OF_SET_VLAN_PCP ]
1823 	 */
1824 	return actions[MLX5_HW_VLAN_PUSH_PCP_IDX].type ==
1825 		RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP;
1826 }
1827 
1828 static __rte_always_inline bool
1829 is_template_masked_push_vlan(const struct rte_flow_action_of_push_vlan *mask)
1830 {
1831 	/*
1832 	 * In masked push VLAN template all RTE push actions are masked.
1833 	 */
1834 	return mask && mask->ethertype != 0;
1835 }
1836 
1837 static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
1838 {
1839 /*
1840  * OpenFlow Switch Specification defines 801.1q VID as 12+1 bits.
1841  */
1842 	rte_be32_t type, vid, pcp;
1843 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1844 	rte_be32_t vid_lo, vid_hi;
1845 #endif
1846 
1847 	type = ((const struct rte_flow_action_of_push_vlan *)
1848 		actions[MLX5_HW_VLAN_PUSH_TYPE_IDX].conf)->ethertype;
1849 	vid = ((const struct rte_flow_action_of_set_vlan_vid *)
1850 		actions[MLX5_HW_VLAN_PUSH_VID_IDX].conf)->vlan_vid;
1851 	pcp = is_of_vlan_pcp_present(actions) ?
1852 	      ((const struct rte_flow_action_of_set_vlan_pcp *)
1853 		      actions[MLX5_HW_VLAN_PUSH_PCP_IDX].conf)->vlan_pcp : 0;
1854 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1855 	vid_hi = vid & 0xff;
1856 	vid_lo = vid >> 8;
1857 	return (((vid_lo << 8) | (pcp << 5) | vid_hi) << 16) | type;
1858 #else
1859 	return (type << 16) | (pcp << 13) | vid;
1860 #endif
1861 }
1862 
1863 static __rte_always_inline struct mlx5_aso_mtr *
1864 flow_hw_meter_mark_alloc(struct rte_eth_dev *dev, uint32_t queue,
1865 			 const struct rte_flow_action *action,
1866 			 struct mlx5_hw_q_job *job, bool push,
1867 			 struct rte_flow_error *error)
1868 {
1869 	struct mlx5_priv *priv = dev->data->dev_private;
1870 	struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
1871 	const struct rte_flow_action_meter_mark *meter_mark = action->conf;
1872 	struct mlx5_aso_mtr *aso_mtr;
1873 	struct mlx5_flow_meter_info *fm;
1874 	uint32_t mtr_id;
1875 	uintptr_t handle = (uintptr_t)MLX5_INDIRECT_ACTION_TYPE_METER_MARK <<
1876 					MLX5_INDIRECT_ACTION_TYPE_OFFSET;
1877 
1878 	if (priv->shared_host) {
1879 		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1880 				   "Meter mark actions can only be created on the host port");
1881 		return NULL;
1882 	}
1883 	if (meter_mark->profile == NULL)
1884 		return NULL;
1885 	aso_mtr = mlx5_ipool_malloc(pool->idx_pool, &mtr_id);
1886 	if (!aso_mtr)
1887 		return NULL;
1888 	/* Fill the flow meter parameters. */
1889 	aso_mtr->type = ASO_METER_INDIRECT;
1890 	fm = &aso_mtr->fm;
1891 	fm->meter_id = mtr_id;
1892 	fm->profile = (struct mlx5_flow_meter_profile *)(meter_mark->profile);
1893 	fm->is_enable = meter_mark->state;
1894 	fm->color_aware = meter_mark->color_mode;
1895 	aso_mtr->pool = pool;
1896 	aso_mtr->state = (queue == MLX5_HW_INV_QUEUE) ?
1897 			  ASO_METER_WAIT : ASO_METER_WAIT_ASYNC;
1898 	aso_mtr->offset = mtr_id - 1;
1899 	aso_mtr->init_color = fm->color_aware ? RTE_COLORS : RTE_COLOR_GREEN;
1900 	job->action = (void *)(handle | mtr_id);
1901 	/* Update ASO flow meter by wqe. */
1902 	if (mlx5_aso_meter_update_by_wqe(priv, queue, aso_mtr,
1903 					 &priv->mtr_bulk, job, push)) {
1904 		mlx5_ipool_free(pool->idx_pool, mtr_id);
1905 		return NULL;
1906 	}
1907 	/* Wait for ASO object completion. */
1908 	if (queue == MLX5_HW_INV_QUEUE &&
1909 	    mlx5_aso_mtr_wait(priv, aso_mtr, true)) {
1910 		mlx5_ipool_free(pool->idx_pool, mtr_id);
1911 		return NULL;
1912 	}
1913 	return aso_mtr;
1914 }
1915 
1916 static __rte_always_inline int
1917 flow_hw_meter_mark_compile(struct rte_eth_dev *dev,
1918 			   uint16_t aso_mtr_pos,
1919 			   const struct rte_flow_action *action,
1920 			   struct mlx5dr_rule_action *acts,
1921 			   uint32_t *index,
1922 			   uint32_t queue,
1923 			   struct rte_flow_error *error)
1924 {
1925 	struct mlx5_priv *priv = dev->data->dev_private;
1926 	struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
1927 	struct mlx5_aso_mtr *aso_mtr;
1928 	struct mlx5_hw_q_job *job =
1929 		flow_hw_action_job_init(priv, queue, NULL, NULL, NULL,
1930 					MLX5_HW_Q_JOB_TYPE_CREATE,
1931 					MLX5_HW_INDIRECT_TYPE_LEGACY, NULL);
1932 
1933 	if (!job)
1934 		return -1;
1935 	aso_mtr = flow_hw_meter_mark_alloc(dev, queue, action, job,
1936 					   true, error);
1937 	if (!aso_mtr) {
1938 		flow_hw_job_put(priv, job, queue);
1939 		return -1;
1940 	}
1941 
1942 	/* Compile METER_MARK action */
1943 	acts[aso_mtr_pos].action = pool->action;
1944 	acts[aso_mtr_pos].aso_meter.offset = aso_mtr->offset;
1945 	*index = aso_mtr->fm.meter_id;
1946 	return 0;
1947 }
1948 
1949 static int
1950 flow_hw_translate_indirect_mirror(__rte_unused struct rte_eth_dev *dev,
1951 				  __rte_unused const struct mlx5_action_construct_data *act_data,
1952 				  const struct rte_flow_action *action,
1953 				  struct mlx5dr_rule_action *dr_rule)
1954 {
1955 	const struct rte_flow_action_indirect_list *list_conf = action->conf;
1956 	const struct mlx5_mirror *mirror = (typeof(mirror))list_conf->handle;
1957 
1958 	dr_rule->action = mirror->mirror_action;
1959 	return 0;
1960 }
1961 
1962 /**
1963  * HWS mirror implemented as FW island.
1964  * The action does not support indirect list flow configuration.
1965  * If template handle was masked, use handle mirror action in flow rules.
1966  * Otherwise let flow rule specify mirror handle.
1967  */
1968 static int
1969 hws_table_tmpl_translate_indirect_mirror(struct rte_eth_dev *dev,
1970 					 const struct rte_flow_action *action,
1971 					 const struct rte_flow_action *mask,
1972 					 struct mlx5_hw_actions *acts,
1973 					 uint16_t action_src, uint16_t action_dst)
1974 {
1975 	int ret = 0;
1976 	const struct rte_flow_action_indirect_list *mask_conf = mask->conf;
1977 
1978 	if (mask_conf && mask_conf->handle) {
1979 		/**
1980 		 * If mirror handle was masked, assign fixed DR5 mirror action.
1981 		 */
1982 		flow_hw_translate_indirect_mirror(dev, NULL, action,
1983 						  &acts->rule_acts[action_dst]);
1984 	} else {
1985 		struct mlx5_priv *priv = dev->data->dev_private;
1986 		ret = flow_hw_act_data_indirect_list_append
1987 			(priv, acts, RTE_FLOW_ACTION_TYPE_INDIRECT_LIST,
1988 			 action_src, action_dst,
1989 			 flow_hw_translate_indirect_mirror);
1990 	}
1991 	return ret;
1992 }
1993 
1994 static int
1995 flow_hw_reformat_action(__rte_unused struct rte_eth_dev *dev,
1996 			__rte_unused const struct mlx5_action_construct_data *data,
1997 			const struct rte_flow_action *action,
1998 			struct mlx5dr_rule_action *dr_rule)
1999 {
2000 	const struct rte_flow_action_indirect_list *indlst_conf = action->conf;
2001 
2002 	dr_rule->action = ((struct mlx5_hw_encap_decap_action *)
2003 			   (indlst_conf->handle))->action;
2004 	if (!dr_rule->action)
2005 		return -EINVAL;
2006 	return 0;
2007 }
2008 
2009 /**
2010  * Template conf must not be masked. If handle is masked, use the one in template,
2011  * otherwise update per flow rule.
2012  */
2013 static int
2014 hws_table_tmpl_translate_indirect_reformat(struct rte_eth_dev *dev,
2015 					   const struct rte_flow_action *action,
2016 					   const struct rte_flow_action *mask,
2017 					   struct mlx5_hw_actions *acts,
2018 					   uint16_t action_src, uint16_t action_dst)
2019 {
2020 	int ret = -1;
2021 	const struct rte_flow_action_indirect_list *mask_conf = mask->conf;
2022 	struct mlx5_priv *priv = dev->data->dev_private;
2023 
2024 	if (mask_conf && mask_conf->handle && !mask_conf->conf)
2025 		/**
2026 		 * If handle was masked, assign fixed DR action.
2027 		 */
2028 		ret = flow_hw_reformat_action(dev, NULL, action,
2029 					      &acts->rule_acts[action_dst]);
2030 	else if (mask_conf && !mask_conf->handle && !mask_conf->conf)
2031 		ret = flow_hw_act_data_indirect_list_append
2032 			(priv, acts, RTE_FLOW_ACTION_TYPE_INDIRECT_LIST,
2033 			 action_src, action_dst, flow_hw_reformat_action);
2034 	return ret;
2035 }
2036 
2037 static int
2038 flow_dr_set_meter(struct mlx5_priv *priv,
2039 		  struct mlx5dr_rule_action *dr_rule,
2040 		  const struct rte_flow_action_indirect_list *action_conf)
2041 {
2042 	const struct mlx5_indlst_legacy *legacy_obj =
2043 		(typeof(legacy_obj))action_conf->handle;
2044 	struct mlx5_aso_mtr_pool *mtr_pool = priv->hws_mpool;
2045 	uint32_t act_idx = (uint32_t)(uintptr_t)legacy_obj->handle;
2046 	uint32_t mtr_id = act_idx & (RTE_BIT32(MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
2047 	struct mlx5_aso_mtr *aso_mtr = mlx5_ipool_get(mtr_pool->idx_pool, mtr_id);
2048 
2049 	if (!aso_mtr)
2050 		return -EINVAL;
2051 	dr_rule->action = mtr_pool->action;
2052 	dr_rule->aso_meter.offset = aso_mtr->offset;
2053 	return 0;
2054 }
2055 
2056 __rte_always_inline static void
2057 flow_dr_mtr_flow_color(struct mlx5dr_rule_action *dr_rule, enum rte_color init_color)
2058 {
2059 	dr_rule->aso_meter.init_color =
2060 		(enum mlx5dr_action_aso_meter_color)rte_col_2_mlx5_col(init_color);
2061 }
2062 
2063 static int
2064 flow_hw_translate_indirect_meter(struct rte_eth_dev *dev,
2065 				 const struct mlx5_action_construct_data *act_data,
2066 				 const struct rte_flow_action *action,
2067 				 struct mlx5dr_rule_action *dr_rule)
2068 {
2069 	int ret;
2070 	struct mlx5_priv *priv = dev->data->dev_private;
2071 	const struct rte_flow_action_indirect_list *action_conf = action->conf;
2072 	const struct rte_flow_indirect_update_flow_meter_mark **flow_conf =
2073 		(typeof(flow_conf))action_conf->conf;
2074 
2075 	ret = flow_dr_set_meter(priv, dr_rule, action_conf);
2076 	if (ret)
2077 		return ret;
2078 	if (!act_data->shared_meter.conf_masked) {
2079 		if (flow_conf && flow_conf[0] && flow_conf[0]->init_color < RTE_COLORS)
2080 			flow_dr_mtr_flow_color(dr_rule, flow_conf[0]->init_color);
2081 	}
2082 	return 0;
2083 }
2084 
2085 static int
2086 hws_table_tmpl_translate_indirect_meter(struct rte_eth_dev *dev,
2087 					const struct rte_flow_action *action,
2088 					const struct rte_flow_action *mask,
2089 					struct mlx5_hw_actions *acts,
2090 					uint16_t action_src, uint16_t action_dst)
2091 {
2092 	int ret;
2093 	struct mlx5_priv *priv = dev->data->dev_private;
2094 	const struct rte_flow_action_indirect_list *action_conf = action->conf;
2095 	const struct rte_flow_action_indirect_list *mask_conf = mask->conf;
2096 	bool is_handle_masked = mask_conf && mask_conf->handle;
2097 	bool is_conf_masked = mask_conf && mask_conf->conf && mask_conf->conf[0];
2098 	struct mlx5dr_rule_action *dr_rule = &acts->rule_acts[action_dst];
2099 
2100 	if (is_handle_masked) {
2101 		ret = flow_dr_set_meter(priv, dr_rule, action->conf);
2102 		if (ret)
2103 			return ret;
2104 	}
2105 	if (is_conf_masked) {
2106 		const struct
2107 			rte_flow_indirect_update_flow_meter_mark **flow_conf =
2108 			(typeof(flow_conf))action_conf->conf;
2109 		flow_dr_mtr_flow_color(dr_rule,
2110 				       flow_conf[0]->init_color);
2111 	}
2112 	if (!is_handle_masked || !is_conf_masked) {
2113 		struct mlx5_action_construct_data *act_data;
2114 
2115 		ret = flow_hw_act_data_indirect_list_append
2116 			(priv, acts, RTE_FLOW_ACTION_TYPE_INDIRECT_LIST,
2117 			 action_src, action_dst, flow_hw_translate_indirect_meter);
2118 		if (ret)
2119 			return ret;
2120 		act_data = LIST_FIRST(&acts->act_list);
2121 		act_data->shared_meter.conf_masked = is_conf_masked;
2122 	}
2123 	return 0;
2124 }
2125 
2126 static int
2127 hws_table_tmpl_translate_indirect_legacy(struct rte_eth_dev *dev,
2128 					 const struct rte_flow_action *action,
2129 					 const struct rte_flow_action *mask,
2130 					 struct mlx5_hw_actions *acts,
2131 					 uint16_t action_src, uint16_t action_dst)
2132 {
2133 	int ret;
2134 	const struct rte_flow_action_indirect_list *indlst_conf = action->conf;
2135 	struct mlx5_indlst_legacy *indlst_obj = (typeof(indlst_obj))indlst_conf->handle;
2136 	uint32_t act_idx = (uint32_t)(uintptr_t)indlst_obj->handle;
2137 	uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
2138 
2139 	switch (type) {
2140 	case MLX5_INDIRECT_ACTION_TYPE_METER_MARK:
2141 		ret = hws_table_tmpl_translate_indirect_meter(dev, action, mask,
2142 							      acts, action_src,
2143 							      action_dst);
2144 		break;
2145 	default:
2146 		ret = -EINVAL;
2147 		break;
2148 	}
2149 	return ret;
2150 }
2151 
2152 /*
2153  * template .. indirect_list handle Ht conf Ct ..
2154  * mask     .. indirect_list handle Hm conf Cm ..
2155  *
2156  * PMD requires Ht != 0 to resolve handle type.
2157  * If Ht was masked (Hm != 0) DR5 action will be set according to Ht and will
2158  * not change. Otherwise, DR5 action will be resolved during flow rule build.
2159  * If Ct was masked (Cm != 0), table template processing updates base
2160  * indirect action configuration with Ct parameters.
2161  */
2162 static int
2163 table_template_translate_indirect_list(struct rte_eth_dev *dev,
2164 				       const struct rte_flow_action *action,
2165 				       const struct rte_flow_action *mask,
2166 				       struct mlx5_hw_actions *acts,
2167 				       uint16_t action_src, uint16_t action_dst)
2168 {
2169 	int ret = 0;
2170 	enum mlx5_indirect_list_type type;
2171 	const struct rte_flow_action_indirect_list *list_conf = action->conf;
2172 
2173 	if (!list_conf || !list_conf->handle)
2174 		return -EINVAL;
2175 	type = mlx5_get_indirect_list_type(list_conf->handle);
2176 	switch (type) {
2177 	case MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY:
2178 		ret = hws_table_tmpl_translate_indirect_legacy(dev, action, mask,
2179 							       acts, action_src,
2180 							       action_dst);
2181 		break;
2182 	case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
2183 		ret = hws_table_tmpl_translate_indirect_mirror(dev, action, mask,
2184 							       acts, action_src,
2185 							       action_dst);
2186 		break;
2187 	case MLX5_INDIRECT_ACTION_LIST_TYPE_REFORMAT:
2188 		if (list_conf->conf)
2189 			return -EINVAL;
2190 		ret = hws_table_tmpl_translate_indirect_reformat(dev, action, mask,
2191 								 acts, action_src,
2192 								 action_dst);
2193 		break;
2194 	default:
2195 		return -EINVAL;
2196 	}
2197 	return ret;
2198 }
2199 
2200 static void
2201 mlx5_set_reformat_header(struct mlx5dr_action_reformat_header *hdr,
2202 			 uint8_t *encap_data,
2203 			 size_t data_size)
2204 {
2205 	hdr->sz = data_size;
2206 	hdr->data = encap_data;
2207 }
2208 
2209 static int
2210 mlx5_tbl_translate_reformat(struct mlx5_priv *priv,
2211 			    struct mlx5_hw_actions *acts,
2212 			    struct rte_flow_actions_template *at,
2213 			    const struct rte_flow_item *enc_item,
2214 			    const struct rte_flow_item *enc_item_m,
2215 			    uint8_t *encap_data, uint8_t *encap_data_m,
2216 			    struct mlx5_tbl_multi_pattern_ctx *mp_ctx,
2217 			    size_t data_size, uint16_t reformat_src,
2218 			    enum mlx5dr_action_type refmt_type,
2219 			    struct rte_flow_error *error)
2220 {
2221 	int mp_reformat_ix = mlx5_multi_pattern_reformat_to_index(refmt_type);
2222 	struct mlx5dr_action_reformat_header hdr;
2223 	uint8_t buf[MLX5_ENCAP_MAX_LEN];
2224 	bool shared_rfmt = false;
2225 	int ret;
2226 
2227 	MLX5_ASSERT(at->reformat_off != UINT16_MAX);
2228 	if (enc_item) {
2229 		MLX5_ASSERT(!encap_data);
2230 		ret = flow_dv_convert_encap_data(enc_item, buf, &data_size, error);
2231 		if (ret)
2232 			return ret;
2233 		encap_data = buf;
2234 		if (enc_item_m)
2235 			shared_rfmt = true;
2236 	} else if (encap_data && encap_data_m) {
2237 		shared_rfmt = true;
2238 	}
2239 	acts->encap_decap = mlx5_malloc(MLX5_MEM_ZERO,
2240 					sizeof(*acts->encap_decap) + data_size,
2241 					0, SOCKET_ID_ANY);
2242 	if (!acts->encap_decap)
2243 		return rte_flow_error_set(error, ENOMEM,
2244 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2245 					  NULL, "no memory for reformat context");
2246 	acts->encap_decap_pos = at->reformat_off;
2247 	acts->encap_decap->data_size = data_size;
2248 	acts->encap_decap->action_type = refmt_type;
2249 	if (shared_rfmt || mp_reformat_ix < 0) {
2250 		uint16_t reformat_ix = at->reformat_off;
2251 		/*
2252 		 * This copy is only needed in non template mode.
2253 		 * In order to create the action later.
2254 		 */
2255 		memcpy(acts->encap_decap->data, encap_data, data_size);
2256 		acts->rule_acts[reformat_ix].reformat.data = acts->encap_decap->data;
2257 		acts->rule_acts[reformat_ix].reformat.offset = 0;
2258 		acts->encap_decap->shared = true;
2259 	} else {
2260 		uint32_t ix;
2261 		typeof(mp_ctx->reformat[0]) *reformat = mp_ctx->reformat +
2262 							mp_reformat_ix;
2263 		mlx5_set_reformat_header(&hdr, encap_data, data_size);
2264 		ix = reformat->elements_num++;
2265 		reformat->reformat_hdr[ix] = hdr;
2266 		acts->rule_acts[at->reformat_off].reformat.hdr_idx = ix;
2267 		acts->encap_decap->multi_pattern = 1;
2268 		ret = __flow_hw_act_data_encap_append
2269 			(priv, acts, (at->actions + reformat_src)->type,
2270 			 reformat_src, at->reformat_off, data_size);
2271 		if (ret)
2272 			return -rte_errno;
2273 		mlx5_multi_pattern_activate(mp_ctx);
2274 	}
2275 	return 0;
2276 }
2277 
2278 static int
2279 mlx5_tbl_create_reformat_action(struct mlx5_priv *priv,
2280 				const struct rte_flow_template_table_attr *table_attr,
2281 				struct mlx5_hw_actions *acts,
2282 				struct rte_flow_actions_template *at,
2283 				uint8_t *encap_data,
2284 				size_t data_size,
2285 				enum mlx5dr_action_type refmt_type)
2286 {
2287 	const struct rte_flow_attr *attr = &table_attr->flow_attr;
2288 	enum mlx5dr_table_type tbl_type = get_mlx5dr_table_type(attr);
2289 	struct mlx5dr_action_reformat_header hdr;
2290 
2291 	mlx5_set_reformat_header(&hdr, encap_data, data_size);
2292 	uint16_t reformat_ix = at->reformat_off;
2293 	uint32_t flags = mlx5_hw_act_flag[!!attr->group][tbl_type] |
2294 				MLX5DR_ACTION_FLAG_SHARED;
2295 
2296 	acts->encap_decap->action = mlx5dr_action_create_reformat(priv->dr_ctx, refmt_type,
2297 							   1, &hdr, 0, flags);
2298 	if (!acts->encap_decap->action)
2299 		return -rte_errno;
2300 	acts->rule_acts[reformat_ix].action = acts->encap_decap->action;
2301 	return 0;
2302 }
2303 
2304 static int
2305 mlx5_tbl_translate_modify_header(struct rte_eth_dev *dev,
2306 				 const struct mlx5_flow_template_table_cfg *cfg,
2307 				 struct mlx5_hw_actions *acts,
2308 				 struct mlx5_tbl_multi_pattern_ctx *mp_ctx,
2309 				 struct mlx5_hw_modify_header_action *mhdr,
2310 				 struct rte_flow_error *error)
2311 {
2312 	uint16_t mhdr_ix = mhdr->pos;
2313 	struct mlx5dr_action_mh_pattern pattern = {
2314 		.sz = sizeof(struct mlx5_modification_cmd) * mhdr->mhdr_cmds_num
2315 	};
2316 
2317 	if (flow_hw_validate_compiled_modify_field(dev, cfg, mhdr, error)) {
2318 		__flow_hw_action_template_destroy(dev, acts);
2319 		return -rte_errno;
2320 	}
2321 	acts->mhdr = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*acts->mhdr),
2322 				 0, SOCKET_ID_ANY);
2323 	if (!acts->mhdr)
2324 		return rte_flow_error_set(error, ENOMEM,
2325 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2326 					  NULL, "translate modify_header: no memory for modify header context");
2327 	rte_memcpy(acts->mhdr, mhdr, sizeof(*mhdr));
2328 	if (!mhdr->shared) {
2329 		pattern.data = (__be64 *)acts->mhdr->mhdr_cmds;
2330 		typeof(mp_ctx->mh) *mh = &mp_ctx->mh;
2331 		uint32_t idx = mh->elements_num;
2332 		mh->pattern[mh->elements_num++] = pattern;
2333 		acts->mhdr->multi_pattern = 1;
2334 		acts->rule_acts[mhdr_ix].modify_header.pattern_idx = idx;
2335 		mlx5_multi_pattern_activate(mp_ctx);
2336 	}
2337 	return 0;
2338 }
2339 
2340 static int
2341 mlx5_tbl_ensure_shared_modify_header(struct rte_eth_dev *dev,
2342 				     const struct mlx5_flow_template_table_cfg *cfg,
2343 				     struct mlx5_hw_actions *acts,
2344 				     struct rte_flow_error *error)
2345 {
2346 	struct mlx5_priv *priv = dev->data->dev_private;
2347 	const struct rte_flow_template_table_attr *table_attr = &cfg->attr;
2348 	const struct rte_flow_attr *attr = &table_attr->flow_attr;
2349 	enum mlx5dr_table_type tbl_type = get_mlx5dr_table_type(attr);
2350 	struct mlx5dr_action_mh_pattern pattern = {
2351 		.sz = sizeof(struct mlx5_modification_cmd) * acts->mhdr->mhdr_cmds_num
2352 	};
2353 	uint16_t mhdr_ix = acts->mhdr->pos;
2354 	uint32_t flags = mlx5_hw_act_flag[!!attr->group][tbl_type] | MLX5DR_ACTION_FLAG_SHARED;
2355 
2356 	pattern.data = (__be64 *)acts->mhdr->mhdr_cmds;
2357 	acts->mhdr->action = mlx5dr_action_create_modify_header(priv->dr_ctx, 1,
2358 								&pattern, 0, flags);
2359 	if (!acts->mhdr->action)
2360 		return rte_flow_error_set(error, rte_errno,
2361 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2362 					  "translate modify_header: failed to create DR action");
2363 	acts->rule_acts[mhdr_ix].action = acts->mhdr->action;
2364 	return 0;
2365 }
2366 
2367 static int
2368 mlx5_create_ipv6_ext_reformat(struct rte_eth_dev *dev,
2369 			      const struct mlx5_flow_template_table_cfg *cfg,
2370 			      struct mlx5_hw_actions *acts,
2371 			      struct rte_flow_actions_template *at,
2372 			      uint8_t *push_data, uint8_t *push_data_m,
2373 			      size_t push_size, uint16_t recom_src,
2374 			      enum mlx5dr_action_type recom_type)
2375 {
2376 	struct mlx5_priv *priv = dev->data->dev_private;
2377 	const struct rte_flow_template_table_attr *table_attr = &cfg->attr;
2378 	const struct rte_flow_attr *attr = &table_attr->flow_attr;
2379 	enum mlx5dr_table_type type = get_mlx5dr_table_type(attr);
2380 	struct mlx5_action_construct_data *act_data;
2381 	struct mlx5dr_action_reformat_header hdr = {0};
2382 	uint32_t flag, bulk = 0;
2383 
2384 	flag = mlx5_hw_act_flag[!!attr->group][type];
2385 	acts->push_remove = mlx5_malloc(MLX5_MEM_ZERO,
2386 					sizeof(*acts->push_remove) + push_size,
2387 					0, SOCKET_ID_ANY);
2388 	if (!acts->push_remove)
2389 		return -ENOMEM;
2390 
2391 	switch (recom_type) {
2392 	case MLX5DR_ACTION_TYP_PUSH_IPV6_ROUTE_EXT:
2393 		if (!push_data || !push_size)
2394 			goto err1;
2395 		if (!push_data_m) {
2396 			bulk = rte_log2_u32(table_attr->nb_flows);
2397 		} else {
2398 			flag |= MLX5DR_ACTION_FLAG_SHARED;
2399 			acts->push_remove->shared = 1;
2400 		}
2401 		acts->push_remove->data_size = push_size;
2402 		memcpy(acts->push_remove->data, push_data, push_size);
2403 		hdr.data = push_data;
2404 		hdr.sz = push_size;
2405 		break;
2406 	case MLX5DR_ACTION_TYP_POP_IPV6_ROUTE_EXT:
2407 		flag |= MLX5DR_ACTION_FLAG_SHARED;
2408 		acts->push_remove->shared = 1;
2409 		break;
2410 	default:
2411 		break;
2412 	}
2413 
2414 	acts->push_remove->action =
2415 		mlx5dr_action_create_reformat_ipv6_ext(priv->dr_ctx,
2416 				recom_type, &hdr, bulk, flag);
2417 	if (!acts->push_remove->action)
2418 		goto err1;
2419 	acts->rule_acts[at->recom_off].action = acts->push_remove->action;
2420 	acts->rule_acts[at->recom_off].ipv6_ext.header = acts->push_remove->data;
2421 	acts->rule_acts[at->recom_off].ipv6_ext.offset = 0;
2422 	acts->push_remove_pos = at->recom_off;
2423 	if (!acts->push_remove->shared) {
2424 		act_data = __flow_hw_act_data_push_append(dev, acts,
2425 				RTE_FLOW_ACTION_TYPE_IPV6_EXT_PUSH,
2426 				recom_src, at->recom_off, push_size);
2427 		if (!act_data)
2428 			goto err;
2429 	}
2430 	return 0;
2431 err:
2432 	if (acts->push_remove->action)
2433 		mlx5dr_action_destroy(acts->push_remove->action);
2434 err1:
2435 	if (acts->push_remove) {
2436 		mlx5_free(acts->push_remove);
2437 		acts->push_remove = NULL;
2438 	}
2439 	return -EINVAL;
2440 }
2441 
2442 /**
2443  * Translate rte_flow actions to DR action.
2444  *
2445  * As the action template has already indicated the actions. Translate
2446  * the rte_flow actions to DR action if possbile. So in flow create
2447  * stage we will save cycles from handing the actions' organizing.
2448  * For the actions with limited information, need to add these to a
2449  * list.
2450  *
2451  * @param[in] dev
2452  *   Pointer to the rte_eth_dev structure.
2453  * @param[in] cfg
2454  *   Pointer to the table configuration.
2455  * @param[in/out] acts
2456  *   Pointer to the template HW steering DR actions.
2457  * @param[in] at
2458  *   Action template.
2459  * @param[in] nt_mode
2460  *   Non template rule translate.
2461  * @param[out] error
2462  *   Pointer to error structure.
2463  *
2464  * @return
2465  *   0 on success, a negative errno otherwise and rte_errno is set.
2466  */
2467 static int
2468 __flow_hw_translate_actions_template(struct rte_eth_dev *dev,
2469 				     const struct mlx5_flow_template_table_cfg *cfg,
2470 				     struct mlx5_hw_actions *acts,
2471 				     struct rte_flow_actions_template *at,
2472 				     struct mlx5_tbl_multi_pattern_ctx *mp_ctx,
2473 				     bool nt_mode,
2474 				     struct rte_flow_error *error)
2475 {
2476 	struct mlx5_priv *priv = dev->data->dev_private;
2477 	const struct rte_flow_template_table_attr *table_attr = &cfg->attr;
2478 	struct mlx5_hca_flex_attr *hca_attr = &priv->sh->cdev->config.hca_attr.flex;
2479 	const struct rte_flow_attr *attr = &table_attr->flow_attr;
2480 	struct rte_flow_action *actions = at->actions;
2481 	struct rte_flow_action *masks = at->masks;
2482 	enum mlx5dr_action_type refmt_type = MLX5DR_ACTION_TYP_LAST;
2483 	enum mlx5dr_action_type recom_type = MLX5DR_ACTION_TYP_LAST;
2484 	const struct rte_flow_action_raw_encap *raw_encap_data;
2485 	const struct rte_flow_action_ipv6_ext_push *ipv6_ext_data;
2486 	const struct rte_flow_item *enc_item = NULL, *enc_item_m = NULL;
2487 	uint16_t reformat_src = 0, recom_src = 0;
2488 	uint8_t *encap_data = NULL, *encap_data_m = NULL;
2489 	uint8_t *push_data = NULL, *push_data_m = NULL;
2490 	size_t data_size = 0, push_size = 0;
2491 	struct mlx5_hw_modify_header_action mhdr = { 0 };
2492 	bool actions_end = false;
2493 	uint32_t type;
2494 	bool reformat_used = false;
2495 	bool recom_used = false;
2496 	unsigned int of_vlan_offset;
2497 	uint16_t jump_pos;
2498 	uint32_t ct_idx;
2499 	int ret, err;
2500 	uint32_t target_grp = 0;
2501 	int table_type;
2502 
2503 	flow_hw_modify_field_init(&mhdr, at);
2504 	if (attr->transfer)
2505 		type = MLX5DR_TABLE_TYPE_FDB;
2506 	else if (attr->egress)
2507 		type = MLX5DR_TABLE_TYPE_NIC_TX;
2508 	else
2509 		type = MLX5DR_TABLE_TYPE_NIC_RX;
2510 	for (; !actions_end; actions++, masks++) {
2511 		uint64_t pos = actions - at->actions;
2512 		uint16_t src_pos = pos - at->src_off[pos];
2513 		uint16_t dr_pos = at->dr_off[pos];
2514 
2515 		switch ((int)actions->type) {
2516 		case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
2517 			if (!attr->group) {
2518 				DRV_LOG(ERR, "Indirect action is not supported in root table.");
2519 				goto err;
2520 			}
2521 			ret = table_template_translate_indirect_list
2522 				(dev, actions, masks, acts, src_pos, dr_pos);
2523 			if (ret)
2524 				goto err;
2525 			break;
2526 		case RTE_FLOW_ACTION_TYPE_INDIRECT:
2527 			if (!attr->group) {
2528 				DRV_LOG(ERR, "Indirect action is not supported in root table.");
2529 				goto err;
2530 			}
2531 			if (actions->conf && masks->conf) {
2532 				if (flow_hw_shared_action_translate
2533 				(dev, actions, acts, src_pos, dr_pos))
2534 					goto err;
2535 			} else if (__flow_hw_act_data_indirect_append
2536 					(priv, acts, RTE_FLOW_ACTION_TYPE_INDIRECT,
2537 					 masks->type, src_pos, dr_pos)){
2538 				goto err;
2539 			}
2540 			break;
2541 		case RTE_FLOW_ACTION_TYPE_VOID:
2542 			break;
2543 		case RTE_FLOW_ACTION_TYPE_DROP:
2544 			acts->rule_acts[dr_pos].action =
2545 				priv->hw_drop[!!attr->group];
2546 			break;
2547 		case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
2548 			if (!attr->group) {
2549 				DRV_LOG(ERR, "Port representor is not supported in root table.");
2550 				goto err;
2551 			}
2552 			acts->rule_acts[dr_pos].action = priv->hw_def_miss;
2553 			break;
2554 		case RTE_FLOW_ACTION_TYPE_FLAG:
2555 			acts->mark = true;
2556 			acts->rule_acts[dr_pos].tag.value =
2557 				mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
2558 			acts->rule_acts[dr_pos].action =
2559 				priv->hw_tag[!!attr->group];
2560 			rte_atomic_fetch_add_explicit(&priv->hws_mark_refcnt, 1,
2561 					rte_memory_order_relaxed);
2562 			flow_hw_rxq_flag_set(dev, true);
2563 			break;
2564 		case RTE_FLOW_ACTION_TYPE_MARK:
2565 			acts->mark = true;
2566 			if (masks->conf &&
2567 			    ((const struct rte_flow_action_mark *)
2568 			     masks->conf)->id)
2569 				acts->rule_acts[dr_pos].tag.value =
2570 					mlx5_flow_mark_set
2571 					(((const struct rte_flow_action_mark *)
2572 					(actions->conf))->id);
2573 			else if (__flow_hw_act_data_general_append(priv, acts,
2574 								   actions->type,
2575 								   src_pos, dr_pos))
2576 				goto err;
2577 			acts->rule_acts[dr_pos].action =
2578 				priv->hw_tag[!!attr->group];
2579 			rte_atomic_fetch_add_explicit(&priv->hws_mark_refcnt, 1,
2580 					rte_memory_order_relaxed);
2581 			flow_hw_rxq_flag_set(dev, true);
2582 			break;
2583 		case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
2584 			acts->rule_acts[dr_pos].action =
2585 				priv->hw_push_vlan[type];
2586 			if (is_template_masked_push_vlan(masks->conf))
2587 				acts->rule_acts[dr_pos].push_vlan.vlan_hdr =
2588 					vlan_hdr_to_be32(actions);
2589 			else if (__flow_hw_act_data_general_append
2590 					(priv, acts, actions->type,
2591 					 src_pos, dr_pos))
2592 				goto err;
2593 			of_vlan_offset = is_of_vlan_pcp_present(actions) ?
2594 					MLX5_HW_VLAN_PUSH_PCP_IDX :
2595 					MLX5_HW_VLAN_PUSH_VID_IDX;
2596 			actions += of_vlan_offset;
2597 			masks += of_vlan_offset;
2598 			break;
2599 		case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
2600 			acts->rule_acts[dr_pos].action =
2601 				priv->hw_pop_vlan[type];
2602 			break;
2603 		case RTE_FLOW_ACTION_TYPE_JUMP:
2604 			if (masks->conf &&
2605 			    ((const struct rte_flow_action_jump *)
2606 			     masks->conf)->group) {
2607 				uint32_t jump_group =
2608 					((const struct rte_flow_action_jump *)
2609 					actions->conf)->group;
2610 				acts->jump = flow_hw_jump_action_register
2611 						(dev, cfg, jump_group, error);
2612 				if (!acts->jump)
2613 					goto err;
2614 				acts->rule_acts[dr_pos].action = (!!attr->group) ?
2615 								 acts->jump->hws_action :
2616 								 acts->jump->root_action;
2617 			} else if (__flow_hw_act_data_general_append
2618 					(priv, acts, actions->type,
2619 					 src_pos, dr_pos)){
2620 				goto err;
2621 			}
2622 			break;
2623 		case RTE_FLOW_ACTION_TYPE_QUEUE:
2624 			if (masks->conf &&
2625 			    ((const struct rte_flow_action_queue *)
2626 			     masks->conf)->index) {
2627 				acts->tir = flow_hw_tir_action_register
2628 				(dev, mlx5_hw_act_flag[!!attr->group][type],
2629 				 actions);
2630 				if (!acts->tir)
2631 					goto err;
2632 				acts->rule_acts[dr_pos].action =
2633 					acts->tir->action;
2634 			} else if (__flow_hw_act_data_general_append
2635 					(priv, acts, actions->type,
2636 					 src_pos, dr_pos)) {
2637 				goto err;
2638 			}
2639 			break;
2640 		case RTE_FLOW_ACTION_TYPE_RSS:
2641 			if (actions->conf && masks->conf) {
2642 				acts->tir = flow_hw_tir_action_register
2643 				(dev, mlx5_hw_act_flag[!!attr->group][type],
2644 				 actions);
2645 				if (!acts->tir)
2646 					goto err;
2647 				acts->rule_acts[dr_pos].action =
2648 					acts->tir->action;
2649 			} else if (__flow_hw_act_data_general_append
2650 					(priv, acts, actions->type,
2651 					 src_pos, dr_pos)) {
2652 				goto err;
2653 			}
2654 			break;
2655 		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
2656 			MLX5_ASSERT(!reformat_used);
2657 			enc_item = MLX5_CONST_ENCAP_ITEM(rte_flow_action_vxlan_encap,
2658 							 actions->conf);
2659 			if (masks->conf)
2660 				enc_item_m = MLX5_CONST_ENCAP_ITEM(rte_flow_action_vxlan_encap,
2661 								   masks->conf);
2662 			reformat_used = true;
2663 			reformat_src = src_pos;
2664 			refmt_type = MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
2665 			break;
2666 		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
2667 			MLX5_ASSERT(!reformat_used);
2668 			enc_item = MLX5_CONST_ENCAP_ITEM(rte_flow_action_nvgre_encap,
2669 							 actions->conf);
2670 			if (masks->conf)
2671 				enc_item_m = MLX5_CONST_ENCAP_ITEM(rte_flow_action_nvgre_encap,
2672 								   masks->conf);
2673 			reformat_used = true;
2674 			reformat_src = src_pos;
2675 			refmt_type = MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
2676 			break;
2677 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
2678 			raw_encap_data =
2679 				(const struct rte_flow_action_raw_encap *)
2680 				 masks->conf;
2681 			if (raw_encap_data)
2682 				encap_data_m = raw_encap_data->data;
2683 			raw_encap_data =
2684 				(const struct rte_flow_action_raw_encap *)
2685 				 actions->conf;
2686 			encap_data = raw_encap_data->data;
2687 			data_size = raw_encap_data->size;
2688 			if (reformat_used) {
2689 				refmt_type = data_size <
2690 				MLX5_ENCAPSULATION_DECISION_SIZE ?
2691 				MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2 :
2692 				MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3;
2693 			} else {
2694 				reformat_used = true;
2695 				refmt_type =
2696 				MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
2697 			}
2698 			reformat_src = src_pos;
2699 			break;
2700 		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
2701 		case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
2702 			MLX5_ASSERT(!reformat_used);
2703 			reformat_used = true;
2704 			refmt_type = MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2;
2705 			break;
2706 		case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
2707 			reformat_used = true;
2708 			refmt_type = MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2;
2709 			break;
2710 		case RTE_FLOW_ACTION_TYPE_IPV6_EXT_PUSH:
2711 			if (!hca_attr->query_match_sample_info || !hca_attr->parse_graph_anchor ||
2712 			    !priv->sh->srh_flex_parser.flex.mapnum) {
2713 				DRV_LOG(ERR, "SRv6 anchor is not supported.");
2714 				goto err;
2715 			}
2716 			MLX5_ASSERT(!recom_used && !recom_type);
2717 			recom_used = true;
2718 			recom_type = MLX5DR_ACTION_TYP_PUSH_IPV6_ROUTE_EXT;
2719 			ipv6_ext_data =
2720 				(const struct rte_flow_action_ipv6_ext_push *)masks->conf;
2721 			if (ipv6_ext_data)
2722 				push_data_m = ipv6_ext_data->data;
2723 			ipv6_ext_data =
2724 				(const struct rte_flow_action_ipv6_ext_push *)actions->conf;
2725 			if (ipv6_ext_data) {
2726 				push_data = ipv6_ext_data->data;
2727 				push_size = ipv6_ext_data->size;
2728 			}
2729 			recom_src = src_pos;
2730 			break;
2731 		case RTE_FLOW_ACTION_TYPE_IPV6_EXT_REMOVE:
2732 			if (!hca_attr->query_match_sample_info || !hca_attr->parse_graph_anchor ||
2733 			    !priv->sh->srh_flex_parser.flex.mapnum) {
2734 				DRV_LOG(ERR, "SRv6 anchor is not supported.");
2735 				goto err;
2736 			}
2737 			recom_used = true;
2738 			recom_type = MLX5DR_ACTION_TYP_POP_IPV6_ROUTE_EXT;
2739 			break;
2740 		case RTE_FLOW_ACTION_TYPE_SEND_TO_KERNEL:
2741 			ret = flow_hw_translate_group(dev, cfg, attr->group,
2742 						&target_grp, error);
2743 			if (ret)
2744 				return ret;
2745 			if (target_grp == 0) {
2746 				__flow_hw_action_template_destroy(dev, acts);
2747 				return rte_flow_error_set(error, ENOTSUP,
2748 						RTE_FLOW_ERROR_TYPE_ACTION,
2749 						NULL,
2750 						"Send to kernel action on root table is not supported in HW steering mode");
2751 			}
2752 			table_type = attr->ingress ? MLX5DR_TABLE_TYPE_NIC_RX :
2753 				     ((attr->egress) ? MLX5DR_TABLE_TYPE_NIC_TX :
2754 				      MLX5DR_TABLE_TYPE_FDB);
2755 			acts->rule_acts[dr_pos].action = priv->hw_send_to_kernel[table_type];
2756 			break;
2757 		case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
2758 			err = flow_hw_modify_field_compile(dev, attr, actions,
2759 							   masks, acts, &mhdr,
2760 							   src_pos, error);
2761 			if (err)
2762 				goto err;
2763 			break;
2764 		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
2765 			if (flow_hw_represented_port_compile
2766 					(dev, attr, actions,
2767 					 masks, acts, src_pos, dr_pos, error))
2768 				goto err;
2769 			break;
2770 		case RTE_FLOW_ACTION_TYPE_METER:
2771 			/*
2772 			 * METER action is compiled to 2 DR actions - ASO_METER and FT.
2773 			 * Calculated DR offset is stored only for ASO_METER and FT
2774 			 * is assumed to be the next action.
2775 			 */
2776 			jump_pos = dr_pos + 1;
2777 			if (actions->conf && masks->conf &&
2778 			    ((const struct rte_flow_action_meter *)
2779 			     masks->conf)->mtr_id) {
2780 				err = flow_hw_meter_compile(dev, cfg,
2781 							    dr_pos, jump_pos, actions, acts, error);
2782 				if (err)
2783 					goto err;
2784 			} else if (__flow_hw_act_data_general_append(priv, acts,
2785 								     actions->type,
2786 								     src_pos,
2787 								     dr_pos))
2788 				goto err;
2789 			break;
2790 		case RTE_FLOW_ACTION_TYPE_AGE:
2791 			ret = flow_hw_translate_group(dev, cfg, attr->group,
2792 						&target_grp, error);
2793 			if (ret)
2794 				return ret;
2795 			if (target_grp == 0) {
2796 				__flow_hw_action_template_destroy(dev, acts);
2797 				return rte_flow_error_set(error, ENOTSUP,
2798 						RTE_FLOW_ERROR_TYPE_ACTION,
2799 						NULL,
2800 						"Age action on root table is not supported in HW steering mode");
2801 			}
2802 			if (__flow_hw_act_data_general_append(priv, acts,
2803 							      actions->type,
2804 							      src_pos,
2805 							      dr_pos))
2806 				goto err;
2807 			break;
2808 		case RTE_FLOW_ACTION_TYPE_COUNT:
2809 			ret = flow_hw_translate_group(dev, cfg, attr->group,
2810 						&target_grp, error);
2811 			if (ret)
2812 				return ret;
2813 			if (target_grp == 0) {
2814 				__flow_hw_action_template_destroy(dev, acts);
2815 				return rte_flow_error_set(error, ENOTSUP,
2816 						RTE_FLOW_ERROR_TYPE_ACTION,
2817 						NULL,
2818 						"Counter action on root table is not supported in HW steering mode");
2819 			}
2820 			if ((at->action_flags & MLX5_FLOW_ACTION_AGE) ||
2821 			    (at->action_flags & MLX5_FLOW_ACTION_INDIRECT_AGE))
2822 				/*
2823 				 * When both COUNT and AGE are requested, it is
2824 				 * saved as AGE action which creates also the
2825 				 * counter.
2826 				 */
2827 				break;
2828 			if (masks->conf &&
2829 			    ((const struct rte_flow_action_count *)
2830 			     masks->conf)->id) {
2831 				err = flow_hw_cnt_compile(dev, dr_pos, acts);
2832 				if (err)
2833 					goto err;
2834 			} else if (__flow_hw_act_data_general_append
2835 					(priv, acts, actions->type,
2836 					 src_pos, dr_pos)) {
2837 				goto err;
2838 			}
2839 			break;
2840 		case RTE_FLOW_ACTION_TYPE_CONNTRACK:
2841 			if (masks->conf) {
2842 				ct_idx = MLX5_INDIRECT_ACTION_IDX_GET(actions->conf);
2843 				if (flow_hw_ct_compile(dev, MLX5_HW_INV_QUEUE, ct_idx,
2844 						       &acts->rule_acts[dr_pos]))
2845 					goto err;
2846 			} else if (__flow_hw_act_data_general_append
2847 					(priv, acts, actions->type,
2848 					 src_pos, dr_pos)) {
2849 				goto err;
2850 			}
2851 			break;
2852 		case RTE_FLOW_ACTION_TYPE_METER_MARK:
2853 			if (actions->conf && masks->conf &&
2854 			    ((const struct rte_flow_action_meter_mark *)
2855 			     masks->conf)->profile) {
2856 				err = flow_hw_meter_mark_compile(dev,
2857 								 dr_pos, actions,
2858 								 acts->rule_acts,
2859 								 &acts->mtr_id,
2860 								 MLX5_HW_INV_QUEUE,
2861 								 error);
2862 				if (err)
2863 					goto err;
2864 			} else if (__flow_hw_act_data_general_append(priv, acts,
2865 								     actions->type,
2866 								     src_pos,
2867 								     dr_pos))
2868 				goto err;
2869 			break;
2870 		case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
2871 			/* Internal, can be skipped. */
2872 			if (!!attr->group) {
2873 				DRV_LOG(ERR, "DEFAULT MISS action is only"
2874 					" supported in root table.");
2875 				goto err;
2876 			}
2877 			acts->rule_acts[dr_pos].action = priv->hw_def_miss;
2878 			break;
2879 		case RTE_FLOW_ACTION_TYPE_NAT64:
2880 			if (masks->conf &&
2881 			    ((const struct rte_flow_action_nat64 *)masks->conf)->type) {
2882 				const struct rte_flow_action_nat64 *nat64_c =
2883 					(const struct rte_flow_action_nat64 *)actions->conf;
2884 
2885 				acts->rule_acts[dr_pos].action =
2886 					priv->action_nat64[type][nat64_c->type];
2887 			} else if (__flow_hw_act_data_general_append(priv, acts,
2888 								     actions->type,
2889 								     src_pos, dr_pos))
2890 				goto err;
2891 			break;
2892 		case RTE_FLOW_ACTION_TYPE_END:
2893 			actions_end = true;
2894 			break;
2895 		default:
2896 			break;
2897 		}
2898 	}
2899 	if (mhdr.pos != UINT16_MAX) {
2900 		ret = mlx5_tbl_translate_modify_header(dev, cfg, acts, mp_ctx, &mhdr, error);
2901 		if (ret)
2902 			goto err;
2903 		if (!nt_mode && mhdr.shared) {
2904 			ret = mlx5_tbl_ensure_shared_modify_header(dev, cfg, acts, error);
2905 			if (ret)
2906 				goto err;
2907 		}
2908 	}
2909 	if (reformat_used) {
2910 		ret = mlx5_tbl_translate_reformat(priv, acts, at,
2911 						  enc_item, enc_item_m,
2912 						  encap_data, encap_data_m,
2913 						  mp_ctx, data_size,
2914 						  reformat_src,
2915 						  refmt_type, error);
2916 		if (ret)
2917 			goto err;
2918 		if (!nt_mode && acts->encap_decap->shared) {
2919 			ret = mlx5_tbl_create_reformat_action(priv, table_attr, acts, at,
2920 							      encap_data, data_size,
2921 							      refmt_type);
2922 			if (ret)
2923 				goto err;
2924 		}
2925 	}
2926 	if (recom_used) {
2927 		MLX5_ASSERT(at->recom_off != UINT16_MAX);
2928 		ret = mlx5_create_ipv6_ext_reformat(dev, cfg, acts, at, push_data,
2929 						    push_data_m, push_size, recom_src,
2930 						    recom_type);
2931 		if (ret)
2932 			goto err;
2933 	}
2934 	return 0;
2935 err:
2936 	/* If rte_errno was not initialized and reached error state. */
2937 	if (!rte_errno)
2938 		rte_errno = EINVAL;
2939 	err = rte_errno;
2940 	__flow_hw_action_template_destroy(dev, acts);
2941 	return rte_flow_error_set(error, err,
2942 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2943 				  "fail to create rte table");
2944 }
2945 
2946 /**
2947  * Translate rte_flow actions to DR action.
2948  *
2949  * As the action template has already indicated the actions. Translate
2950  * the rte_flow actions to DR action if possible. So in flow create
2951  * stage we will save cycles from handing the actions' organizing.
2952  * For the actions with limited information, need to add these to a
2953  * list.
2954  *
2955  * @param[in] dev
2956  *   Pointer to the rte_eth_dev structure.
2957  * @param[in] cfg
2958  *   Pointer to the table configuration.
2959  * @param[in/out] acts
2960  *   Pointer to the template HW steering DR actions.
2961  * @param[in] at
2962  *   Action template.
2963  * @param[out] error
2964  *   Pointer to error structure.
2965  *
2966  * @return
2967  *   0 on success, a negative errno otherwise and rte_errno is set.
2968  */
2969 static int
2970 flow_hw_translate_actions_template(struct rte_eth_dev *dev,
2971 			    const struct mlx5_flow_template_table_cfg *cfg,
2972 			    struct mlx5_hw_actions *acts,
2973 			    struct rte_flow_actions_template *at,
2974 			    struct mlx5_tbl_multi_pattern_ctx *mp_ctx,
2975 			    struct rte_flow_error *error)
2976 {
2977 	return __flow_hw_translate_actions_template(dev, cfg, acts, at, mp_ctx, false, error);
2978 }
2979 
2980 static __rte_always_inline struct mlx5dr_rule_action *
2981 flow_hw_get_dr_action_buffer(struct mlx5_priv *priv,
2982 			     struct rte_flow_template_table *table,
2983 			     uint8_t action_template_index,
2984 			     uint32_t queue)
2985 {
2986 	uint32_t offset = action_template_index * priv->nb_queue + queue;
2987 
2988 	return &table->rule_acts[offset].acts[0];
2989 }
2990 
2991 static void
2992 flow_hw_populate_rule_acts_caches(struct rte_eth_dev *dev,
2993 				  struct rte_flow_template_table *table,
2994 				  uint8_t at_idx)
2995 {
2996 	struct mlx5_priv *priv = dev->data->dev_private;
2997 	uint32_t q;
2998 
2999 	for (q = 0; q < priv->nb_queue; ++q) {
3000 		struct mlx5dr_rule_action *rule_acts =
3001 				flow_hw_get_dr_action_buffer(priv, table, at_idx, q);
3002 
3003 		rte_memcpy(rule_acts, table->ats[at_idx].acts.rule_acts,
3004 			   sizeof(table->ats[at_idx].acts.rule_acts));
3005 	}
3006 }
3007 
3008 /**
3009  * Translate rte_flow actions to DR action.
3010  *
3011  * @param[in] dev
3012  *   Pointer to the rte_eth_dev structure.
3013  * @param[in] tbl
3014  *   Pointer to the flow template table.
3015  * @param[out] error
3016  *   Pointer to error structure.
3017  *
3018  * @return
3019  *    0 on success, negative value otherwise and rte_errno is set.
3020  */
3021 static int
3022 flow_hw_translate_all_actions_templates(struct rte_eth_dev *dev,
3023 			  struct rte_flow_template_table *tbl,
3024 			  struct rte_flow_error *error)
3025 {
3026 	int ret;
3027 	uint32_t i;
3028 
3029 	for (i = 0; i < tbl->nb_action_templates; i++) {
3030 		if (flow_hw_translate_actions_template(dev, &tbl->cfg,
3031 						&tbl->ats[i].acts,
3032 						tbl->ats[i].action_template,
3033 						&tbl->mpctx, error))
3034 			goto err;
3035 		flow_hw_populate_rule_acts_caches(dev, tbl, i);
3036 	}
3037 	ret = mlx5_tbl_multi_pattern_process(dev, tbl, &tbl->mpctx.segments[0],
3038 					     rte_log2_u32(tbl->cfg.attr.nb_flows),
3039 					     error);
3040 	if (ret)
3041 		goto err;
3042 	return 0;
3043 err:
3044 	while (i--)
3045 		__flow_hw_action_template_destroy(dev, &tbl->ats[i].acts);
3046 	return -1;
3047 }
3048 
3049 /**
3050  * Get shared indirect action.
3051  *
3052  * @param[in] dev
3053  *   Pointer to the rte_eth_dev data structure.
3054  * @param[in] act_data
3055  *   Pointer to the recorded action construct data.
3056  * @param[in] item_flags
3057  *   The matcher itme_flags used for RSS lookup.
3058  * @param[in] rule_act
3059  *   Pointer to the shared action's destination rule DR action.
3060  *
3061  * @return
3062  *    0 on success, negative value otherwise and rte_errno is set.
3063  */
3064 static __rte_always_inline int
3065 flow_hw_shared_action_get(struct rte_eth_dev *dev,
3066 			  struct mlx5_action_construct_data *act_data,
3067 			  const uint64_t item_flags,
3068 			  struct mlx5dr_rule_action *rule_act)
3069 {
3070 	struct mlx5_priv *priv = dev->data->dev_private;
3071 	struct mlx5_flow_rss_desc rss_desc = { 0 };
3072 	uint64_t hash_fields = 0;
3073 	uint32_t hrxq_idx = 0;
3074 	struct mlx5_hrxq *hrxq = NULL;
3075 	int act_type = act_data->type;
3076 
3077 	switch (act_type) {
3078 	case MLX5_RTE_FLOW_ACTION_TYPE_RSS:
3079 		rss_desc.level = act_data->shared_rss.level;
3080 		rss_desc.types = act_data->shared_rss.types;
3081 		rss_desc.symmetric_hash_function = act_data->shared_rss.symmetric_hash_function;
3082 		flow_dv_hashfields_set(item_flags, &rss_desc, &hash_fields);
3083 		hrxq_idx = flow_dv_action_rss_hrxq_lookup
3084 			(dev, act_data->shared_rss.idx, hash_fields);
3085 		if (hrxq_idx)
3086 			hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
3087 					      hrxq_idx);
3088 		if (hrxq) {
3089 			rule_act->action = hrxq->action;
3090 			return 0;
3091 		}
3092 		break;
3093 	default:
3094 		DRV_LOG(WARNING, "Unsupported shared action type:%d",
3095 			act_data->type);
3096 		break;
3097 	}
3098 	return -1;
3099 }
3100 
3101 static void
3102 flow_hw_construct_quota(struct mlx5_priv *priv,
3103 			struct mlx5dr_rule_action *rule_act, uint32_t qid)
3104 {
3105 	rule_act->action = priv->quota_ctx.dr_action;
3106 	rule_act->aso_meter.offset = qid - 1;
3107 	rule_act->aso_meter.init_color =
3108 		MLX5DR_ACTION_ASO_METER_COLOR_GREEN;
3109 }
3110 
3111 /**
3112  * Construct shared indirect action.
3113  *
3114  * @param[in] dev
3115  *   Pointer to the rte_eth_dev data structure.
3116  * @param[in] queue
3117  *   The flow creation queue index.
3118  * @param[in] action
3119  *   Pointer to the shared indirect rte_flow action.
3120   * @param[in] table
3121  *   Pointer to the flow table.
3122  * @param[in] item_flags
3123  *   Item flags.
3124  * @param[in] action_flags
3125  *   Actions bit-map detected in this template.
3126  * @param[in, out] flow
3127  *   Pointer to the flow containing the counter.
3128  * @param[in] rule_act
3129  *   Pointer to the shared action's destination rule DR action.
3130  *
3131  * @return
3132  *    0 on success, negative value otherwise and rte_errno is set.
3133  */
3134 static __rte_always_inline int
3135 flow_hw_shared_action_construct(struct rte_eth_dev *dev, uint32_t queue,
3136 				const struct rte_flow_action *action,
3137 				struct rte_flow_template_table *table __rte_unused,
3138 				const uint64_t item_flags, uint64_t action_flags,
3139 				struct rte_flow_hw *flow,
3140 				struct mlx5dr_rule_action *rule_act)
3141 {
3142 	struct mlx5_priv *priv = dev->data->dev_private;
3143 	struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
3144 	struct mlx5_action_construct_data act_data;
3145 	struct mlx5_shared_action_rss *shared_rss;
3146 	struct mlx5_aso_mtr *aso_mtr;
3147 	struct mlx5_age_info *age_info;
3148 	struct mlx5_hws_age_param *param;
3149 	struct rte_flow_hw_aux *aux;
3150 	uint32_t act_idx = (uint32_t)(uintptr_t)action->conf;
3151 	uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
3152 	uint32_t idx = act_idx &
3153 		       ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
3154 	cnt_id_t age_cnt;
3155 
3156 	memset(&act_data, 0, sizeof(act_data));
3157 	switch (type) {
3158 	case MLX5_INDIRECT_ACTION_TYPE_RSS:
3159 		act_data.type = MLX5_RTE_FLOW_ACTION_TYPE_RSS;
3160 		shared_rss = mlx5_ipool_get
3161 			(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
3162 		if (!shared_rss)
3163 			return -1;
3164 		act_data.shared_rss.idx = idx;
3165 		act_data.shared_rss.level = shared_rss->origin.level;
3166 		act_data.shared_rss.types = !shared_rss->origin.types ?
3167 					    RTE_ETH_RSS_IP :
3168 					    shared_rss->origin.types;
3169 		act_data.shared_rss.symmetric_hash_function =
3170 			MLX5_RSS_IS_SYMM(shared_rss->origin.func);
3171 
3172 		if (flow_hw_shared_action_get
3173 				(dev, &act_data, item_flags, rule_act))
3174 			return -1;
3175 		break;
3176 	case MLX5_INDIRECT_ACTION_TYPE_COUNT:
3177 		if (mlx5_hws_cnt_pool_get_action_offset(priv->hws_cpool,
3178 				act_idx,
3179 				&rule_act->action,
3180 				&rule_act->counter.offset))
3181 			return -1;
3182 		flow->flags |= MLX5_FLOW_HW_FLOW_FLAG_CNT_ID;
3183 		flow->cnt_id = act_idx;
3184 		break;
3185 	case MLX5_INDIRECT_ACTION_TYPE_AGE:
3186 		aux = mlx5_flow_hw_aux(dev->data->port_id, flow);
3187 		/*
3188 		 * Save the index with the indirect type, to recognize
3189 		 * it in flow destroy.
3190 		 */
3191 		mlx5_flow_hw_aux_set_age_idx(flow, aux, act_idx);
3192 		flow->flags |= MLX5_FLOW_HW_FLOW_FLAG_AGE_IDX;
3193 		if (action_flags & MLX5_FLOW_ACTION_INDIRECT_COUNT)
3194 			/*
3195 			 * The mutual update for idirect AGE & COUNT will be
3196 			 * performed later after we have ID for both of them.
3197 			 */
3198 			break;
3199 		age_info = GET_PORT_AGE_INFO(priv);
3200 		param = mlx5_ipool_get(age_info->ages_ipool, idx);
3201 		if (param == NULL)
3202 			return -1;
3203 		if (action_flags & MLX5_FLOW_ACTION_COUNT) {
3204 			if (mlx5_hws_cnt_pool_get(priv->hws_cpool,
3205 						  &param->queue_id, &age_cnt,
3206 						  idx) < 0)
3207 				return -1;
3208 			flow->flags |= MLX5_FLOW_HW_FLOW_FLAG_CNT_ID;
3209 			flow->cnt_id = age_cnt;
3210 			param->nb_cnts++;
3211 		} else {
3212 			/*
3213 			 * Get the counter of this indirect AGE or create one
3214 			 * if doesn't exist.
3215 			 */
3216 			age_cnt = mlx5_hws_age_cnt_get(priv, param, idx);
3217 			if (age_cnt == 0)
3218 				return -1;
3219 		}
3220 		if (mlx5_hws_cnt_pool_get_action_offset(priv->hws_cpool,
3221 						     age_cnt, &rule_act->action,
3222 						     &rule_act->counter.offset))
3223 			return -1;
3224 		break;
3225 	case MLX5_INDIRECT_ACTION_TYPE_CT:
3226 		if (flow_hw_ct_compile(dev, queue, idx, rule_act))
3227 			return -1;
3228 		break;
3229 	case MLX5_INDIRECT_ACTION_TYPE_METER_MARK:
3230 		/* Find ASO object. */
3231 		aso_mtr = mlx5_ipool_get(pool->idx_pool, idx);
3232 		if (!aso_mtr)
3233 			return -1;
3234 		rule_act->action = pool->action;
3235 		rule_act->aso_meter.offset = aso_mtr->offset;
3236 		break;
3237 	case MLX5_INDIRECT_ACTION_TYPE_QUOTA:
3238 		flow_hw_construct_quota(priv, rule_act, idx);
3239 		break;
3240 	default:
3241 		DRV_LOG(WARNING, "Unsupported shared action type:%d", type);
3242 		break;
3243 	}
3244 	return 0;
3245 }
3246 
3247 static __rte_always_inline int
3248 flow_hw_mhdr_cmd_is_nop(const struct mlx5_modification_cmd *cmd)
3249 {
3250 	struct mlx5_modification_cmd cmd_he = {
3251 		.data0 = rte_be_to_cpu_32(cmd->data0),
3252 		.data1 = 0,
3253 	};
3254 
3255 	return cmd_he.action_type == MLX5_MODIFICATION_TYPE_NOP;
3256 }
3257 
3258 /**
3259  * Construct flow action array.
3260  *
3261  * For action template contains dynamic actions, these actions need to
3262  * be updated according to the rte_flow action during flow creation.
3263  *
3264  * @param[in] dev
3265  *   Pointer to the rte_eth_dev structure.
3266  * @param[in] job
3267  *   Pointer to job descriptor.
3268  * @param[in] hw_acts
3269  *   Pointer to translated actions from template.
3270  * @param[in] it_idx
3271  *   Item template index the action template refer to.
3272  * @param[in] actions
3273  *   Array of rte_flow action need to be checked.
3274  * @param[in] rule_acts
3275  *   Array of DR rule actions to be used during flow creation..
3276  * @param[in] acts_num
3277  *   Pointer to the real acts_num flow has.
3278  *
3279  * @return
3280  *    0 on success, negative value otherwise and rte_errno is set.
3281  */
3282 static __rte_always_inline int
3283 flow_hw_modify_field_construct(struct mlx5_modification_cmd *mhdr_cmd,
3284 			       struct mlx5_action_construct_data *act_data,
3285 			       const struct mlx5_hw_actions *hw_acts,
3286 			       const struct rte_flow_action *action)
3287 {
3288 	const struct rte_flow_action_modify_field *mhdr_action = action->conf;
3289 	uint8_t values[16] = { 0 };
3290 	unaligned_uint32_t *value_p;
3291 	uint32_t i;
3292 	struct field_modify_info *field;
3293 
3294 	if (!hw_acts->mhdr)
3295 		return -1;
3296 	if (hw_acts->mhdr->shared || act_data->modify_header.shared)
3297 		return 0;
3298 	MLX5_ASSERT(mhdr_action->operation == RTE_FLOW_MODIFY_SET ||
3299 		    mhdr_action->operation == RTE_FLOW_MODIFY_ADD);
3300 	if (mhdr_action->src.field != RTE_FLOW_FIELD_VALUE &&
3301 	    mhdr_action->src.field != RTE_FLOW_FIELD_POINTER)
3302 		return 0;
3303 	if (mhdr_action->src.field == RTE_FLOW_FIELD_VALUE)
3304 		rte_memcpy(values, &mhdr_action->src.value, sizeof(values));
3305 	else
3306 		rte_memcpy(values, mhdr_action->src.pvalue, sizeof(values));
3307 	if (mhdr_action->dst.field == RTE_FLOW_FIELD_META ||
3308 	    mhdr_action->dst.field == RTE_FLOW_FIELD_TAG ||
3309 	    mhdr_action->dst.field == RTE_FLOW_FIELD_METER_COLOR ||
3310 	    mhdr_action->dst.field == (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG) {
3311 		uint8_t tag_index = flow_tag_index_get(&mhdr_action->dst);
3312 
3313 		value_p = (unaligned_uint32_t *)values;
3314 		if (mhdr_action->dst.field == RTE_FLOW_FIELD_TAG &&
3315 		    tag_index == RTE_PMD_MLX5_LINEAR_HASH_TAG_INDEX)
3316 			*value_p = rte_cpu_to_be_32(*value_p << 16);
3317 		else
3318 			*value_p = rte_cpu_to_be_32(*value_p);
3319 	} else if (mhdr_action->dst.field == RTE_FLOW_FIELD_GTP_PSC_QFI ||
3320 		   mhdr_action->dst.field == RTE_FLOW_FIELD_GENEVE_OPT_TYPE) {
3321 		uint32_t tmp;
3322 
3323 		/*
3324 		 * Both QFI and Geneve option type are passed as an uint8_t integer,
3325 		 * but it is accessed through a 2nd least significant byte of a 32-bit
3326 		 * field in modify header command.
3327 		 */
3328 		tmp = values[0];
3329 		value_p = (unaligned_uint32_t *)values;
3330 		*value_p = rte_cpu_to_be_32(tmp << 8);
3331 	}
3332 	i = act_data->modify_header.mhdr_cmds_off;
3333 	field = act_data->modify_header.field;
3334 	do {
3335 		uint32_t off_b;
3336 		uint32_t mask;
3337 		uint32_t data;
3338 		const uint8_t *mask_src;
3339 
3340 		if (i >= act_data->modify_header.mhdr_cmds_end)
3341 			return -1;
3342 		if (flow_hw_mhdr_cmd_is_nop(&mhdr_cmd[i])) {
3343 			++i;
3344 			continue;
3345 		}
3346 		mask_src = (const uint8_t *)act_data->modify_header.mask;
3347 		mask = flow_dv_fetch_field(mask_src + field->offset, field->size);
3348 		if (!mask) {
3349 			++field;
3350 			continue;
3351 		}
3352 		off_b = rte_bsf32(mask);
3353 		data = flow_dv_fetch_field(values + field->offset, field->size);
3354 		/*
3355 		 * IPv6 DSCP uses OUT_IPV6_TRAFFIC_CLASS as ID but it starts from 2
3356 		 * bits left. Shift the data left for IPv6 DSCP
3357 		 */
3358 		if (field->id == MLX5_MODI_OUT_IPV6_TRAFFIC_CLASS &&
3359 		    mhdr_action->dst.field == RTE_FLOW_FIELD_IPV6_DSCP)
3360 			data <<= MLX5_IPV6_HDR_DSCP_SHIFT;
3361 		data = (data & mask) >> off_b;
3362 		mhdr_cmd[i++].data1 = rte_cpu_to_be_32(data);
3363 		++field;
3364 	} while (field->size);
3365 	return 0;
3366 }
3367 
3368 /**
3369  * Release any actions allocated for the flow rule during actions construction.
3370  *
3371  * @param[in] flow
3372  *   Pointer to flow structure.
3373  */
3374 static void
3375 flow_hw_release_actions(struct rte_eth_dev *dev,
3376 			uint32_t queue,
3377 			struct rte_flow_hw *flow)
3378 {
3379 	struct mlx5_priv *priv = dev->data->dev_private;
3380 	struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
3381 	struct rte_flow_hw_aux *aux = mlx5_flow_hw_aux(dev->data->port_id, flow);
3382 
3383 	if (flow->flags & MLX5_FLOW_HW_FLOW_FLAG_FATE_JUMP)
3384 		flow_hw_jump_release(dev, flow->jump);
3385 	else if (flow->flags & MLX5_FLOW_HW_FLOW_FLAG_FATE_HRXQ)
3386 		mlx5_hrxq_obj_release(dev, flow->hrxq);
3387 	if (flow->flags & MLX5_FLOW_HW_FLOW_FLAG_CNT_ID)
3388 		flow_hw_age_count_release(priv, queue, flow, NULL);
3389 	if (flow->flags & MLX5_FLOW_HW_FLOW_FLAG_MTR_ID)
3390 		mlx5_ipool_free(pool->idx_pool, mlx5_flow_hw_aux_get_mtr_id(flow, aux));
3391 }
3392 
3393 /**
3394  * Construct flow action array.
3395  *
3396  * For action template contains dynamic actions, these actions need to
3397  * be updated according to the rte_flow action during flow creation.
3398  *
3399  * @param[in] dev
3400  *   Pointer to the rte_eth_dev structure.
3401  * @param[in] flow
3402  *   Pointer to flow structure.
3403  * @param[in] ap
3404  *   Pointer to container for temporarily constructed actions' parameters.
3405  * @param[in] hw_acts
3406  *   Pointer to translated actions from template.
3407  * @param[in] items_flags
3408  *   Item flags.
3409  * @param[in] table
3410  *   Pointer to the template table.
3411  * @param[in] actions
3412  *   Array of rte_flow action need to be checked.
3413  * @param[in] rule_acts
3414  *   Array of DR rule actions to be used during flow creation..
3415  * @param[in] acts_num
3416  *   Pointer to the real acts_num flow has.
3417  *
3418  * @return
3419  *    0 on success, negative value otherwise and rte_errno is set.
3420  */
3421 static __rte_always_inline int
3422 flow_hw_actions_construct(struct rte_eth_dev *dev,
3423 			  struct rte_flow_hw *flow,
3424 			  struct mlx5_flow_hw_action_params *ap,
3425 			  const struct mlx5_hw_action_template *hw_at,
3426 			  uint64_t item_flags,
3427 			  struct rte_flow_template_table *table,
3428 			  const struct rte_flow_action actions[],
3429 			  struct mlx5dr_rule_action *rule_acts,
3430 			  uint32_t queue,
3431 			  struct rte_flow_error *error)
3432 {
3433 	struct mlx5_priv *priv = dev->data->dev_private;
3434 	struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
3435 	struct mlx5_action_construct_data *act_data;
3436 	const struct rte_flow_actions_template *at = hw_at->action_template;
3437 	const struct mlx5_hw_actions *hw_acts = &hw_at->acts;
3438 	const struct rte_flow_action *action;
3439 	const struct rte_flow_action_raw_encap *raw_encap_data;
3440 	const struct rte_flow_action_ipv6_ext_push *ipv6_push;
3441 	const struct rte_flow_item *enc_item = NULL;
3442 	const struct rte_flow_action_ethdev *port_action = NULL;
3443 	const struct rte_flow_action_meter *meter = NULL;
3444 	const struct rte_flow_action_age *age = NULL;
3445 	const struct rte_flow_action_nat64 *nat64_c = NULL;
3446 	struct rte_flow_attr attr = {
3447 		.ingress = 1,
3448 	};
3449 	uint32_t ft_flag;
3450 	int ret;
3451 	size_t encap_len = 0;
3452 	uint32_t age_idx = 0;
3453 	uint32_t mtr_idx = 0;
3454 	struct mlx5_aso_mtr *aso_mtr;
3455 	struct mlx5_multi_pattern_segment *mp_segment = NULL;
3456 	struct rte_flow_hw_aux *aux;
3457 
3458 	attr.group = table->grp->group_id;
3459 	ft_flag = mlx5_hw_act_flag[!!table->grp->group_id][table->type];
3460 	if (table->type == MLX5DR_TABLE_TYPE_FDB) {
3461 		attr.transfer = 1;
3462 		attr.ingress = 1;
3463 	} else if (table->type == MLX5DR_TABLE_TYPE_NIC_TX) {
3464 		attr.egress = 1;
3465 		attr.ingress = 0;
3466 	} else {
3467 		attr.ingress = 1;
3468 	}
3469 	if (hw_acts->mhdr && hw_acts->mhdr->mhdr_cmds_num > 0 && !hw_acts->mhdr->shared) {
3470 		uint16_t pos = hw_acts->mhdr->pos;
3471 
3472 		mp_segment = mlx5_multi_pattern_segment_find(table, flow->res_idx);
3473 		if (!mp_segment || !mp_segment->mhdr_action)
3474 			return -1;
3475 		rule_acts[pos].action = mp_segment->mhdr_action;
3476 		/* offset is relative to DR action */
3477 		rule_acts[pos].modify_header.offset =
3478 					flow->res_idx - mp_segment->head_index;
3479 		rule_acts[pos].modify_header.data =
3480 					(uint8_t *)ap->mhdr_cmd;
3481 		MLX5_ASSERT(hw_acts->mhdr->mhdr_cmds_num <= MLX5_MHDR_MAX_CMD);
3482 		rte_memcpy(ap->mhdr_cmd, hw_acts->mhdr->mhdr_cmds,
3483 			   sizeof(*ap->mhdr_cmd) * hw_acts->mhdr->mhdr_cmds_num);
3484 	}
3485 	LIST_FOREACH(act_data, &hw_acts->act_list, next) {
3486 		uint32_t jump_group;
3487 		uint32_t tag;
3488 		struct mlx5_hw_jump_action *jump;
3489 		struct mlx5_hrxq *hrxq;
3490 		uint32_t ct_idx;
3491 		cnt_id_t cnt_id;
3492 		uint32_t *cnt_queue;
3493 		uint32_t mtr_id;
3494 
3495 		action = &actions[act_data->action_src];
3496 		/*
3497 		 * action template construction replaces
3498 		 * OF_SET_VLAN_VID with MODIFY_FIELD
3499 		 */
3500 		if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)
3501 			MLX5_ASSERT(act_data->type ==
3502 				    RTE_FLOW_ACTION_TYPE_MODIFY_FIELD);
3503 		else
3504 			MLX5_ASSERT(action->type ==
3505 				    RTE_FLOW_ACTION_TYPE_INDIRECT ||
3506 				    (int)action->type == act_data->type);
3507 		switch ((int)act_data->type) {
3508 		case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
3509 			act_data->indirect_list_cb(dev, act_data, actions,
3510 						   &rule_acts[act_data->action_dst]);
3511 			break;
3512 		case RTE_FLOW_ACTION_TYPE_INDIRECT:
3513 			if (flow_hw_shared_action_construct
3514 					(dev, queue, action, table,
3515 					 item_flags, at->action_flags, flow,
3516 					 &rule_acts[act_data->action_dst]))
3517 				goto error;
3518 			break;
3519 		case RTE_FLOW_ACTION_TYPE_VOID:
3520 			break;
3521 		case RTE_FLOW_ACTION_TYPE_MARK:
3522 			tag = mlx5_flow_mark_set
3523 			      (((const struct rte_flow_action_mark *)
3524 			      (action->conf))->id);
3525 			rule_acts[act_data->action_dst].tag.value = tag;
3526 			break;
3527 		case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
3528 			rule_acts[act_data->action_dst].push_vlan.vlan_hdr =
3529 				vlan_hdr_to_be32(action);
3530 			break;
3531 		case RTE_FLOW_ACTION_TYPE_JUMP:
3532 			jump_group = ((const struct rte_flow_action_jump *)
3533 						action->conf)->group;
3534 			jump = flow_hw_jump_action_register
3535 				(dev, &table->cfg, jump_group, NULL);
3536 			if (!jump)
3537 				goto error;
3538 			rule_acts[act_data->action_dst].action =
3539 			(!!attr.group) ? jump->hws_action : jump->root_action;
3540 			flow->jump = jump;
3541 			flow->flags |= MLX5_FLOW_HW_FLOW_FLAG_FATE_JUMP;
3542 			break;
3543 		case RTE_FLOW_ACTION_TYPE_RSS:
3544 		case RTE_FLOW_ACTION_TYPE_QUEUE:
3545 			hrxq = flow_hw_tir_action_register(dev, ft_flag, action);
3546 			if (!hrxq)
3547 				goto error;
3548 			rule_acts[act_data->action_dst].action = hrxq->action;
3549 			flow->hrxq = hrxq;
3550 			flow->flags |= MLX5_FLOW_HW_FLOW_FLAG_FATE_HRXQ;
3551 			break;
3552 		case MLX5_RTE_FLOW_ACTION_TYPE_RSS:
3553 			if (flow_hw_shared_action_get
3554 				(dev, act_data, item_flags,
3555 				 &rule_acts[act_data->action_dst]))
3556 				goto error;
3557 			break;
3558 		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3559 			enc_item = ((const struct rte_flow_action_vxlan_encap *)
3560 				   action->conf)->definition;
3561 			if (flow_dv_convert_encap_data(enc_item, ap->encap_data, &encap_len, NULL))
3562 				goto error;
3563 			break;
3564 		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
3565 			enc_item = ((const struct rte_flow_action_nvgre_encap *)
3566 				   action->conf)->definition;
3567 			if (flow_dv_convert_encap_data(enc_item, ap->encap_data, &encap_len, NULL))
3568 				goto error;
3569 			break;
3570 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
3571 			raw_encap_data =
3572 				(const struct rte_flow_action_raw_encap *)
3573 				 action->conf;
3574 			MLX5_ASSERT(raw_encap_data->size == act_data->encap.len);
3575 			if (unlikely(act_data->encap.len > MLX5_ENCAP_MAX_LEN))
3576 				return -1;
3577 			rte_memcpy(ap->encap_data, raw_encap_data->data, act_data->encap.len);
3578 			break;
3579 		case RTE_FLOW_ACTION_TYPE_IPV6_EXT_PUSH:
3580 			ipv6_push =
3581 				(const struct rte_flow_action_ipv6_ext_push *)action->conf;
3582 			MLX5_ASSERT(ipv6_push->size == act_data->ipv6_ext.len);
3583 			if (unlikely(act_data->ipv6_ext.len > MLX5_PUSH_MAX_LEN))
3584 				return -1;
3585 			rte_memcpy(ap->ipv6_push_data, ipv6_push->data,
3586 				   act_data->ipv6_ext.len);
3587 			break;
3588 		case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
3589 			if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)
3590 				ret = flow_hw_set_vlan_vid_construct(dev, ap->mhdr_cmd,
3591 								     act_data,
3592 								     hw_acts,
3593 								     action);
3594 			else
3595 				ret = flow_hw_modify_field_construct(ap->mhdr_cmd,
3596 								     act_data,
3597 								     hw_acts,
3598 								     action);
3599 			if (ret)
3600 				goto error;
3601 			break;
3602 		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
3603 			port_action = action->conf;
3604 			if (!priv->hw_vport[port_action->port_id])
3605 				goto error;
3606 			rule_acts[act_data->action_dst].action =
3607 					priv->hw_vport[port_action->port_id];
3608 			break;
3609 		case RTE_FLOW_ACTION_TYPE_QUOTA:
3610 			flow_hw_construct_quota(priv,
3611 						rule_acts + act_data->action_dst,
3612 						act_data->shared_meter.id);
3613 			break;
3614 		case RTE_FLOW_ACTION_TYPE_METER:
3615 			meter = action->conf;
3616 			mtr_id = meter->mtr_id;
3617 			aso_mtr = mlx5_aso_meter_by_idx(priv, mtr_id);
3618 			rule_acts[act_data->action_dst].action =
3619 				priv->mtr_bulk.action;
3620 			rule_acts[act_data->action_dst].aso_meter.offset =
3621 								aso_mtr->offset;
3622 			jump = flow_hw_jump_action_register
3623 				(dev, &table->cfg, aso_mtr->fm.group, NULL);
3624 			if (!jump)
3625 				goto error;
3626 			MLX5_ASSERT
3627 				(!rule_acts[act_data->action_dst + 1].action);
3628 			rule_acts[act_data->action_dst + 1].action =
3629 					(!!attr.group) ? jump->hws_action :
3630 							 jump->root_action;
3631 			flow->jump = jump;
3632 			flow->flags |= MLX5_FLOW_HW_FLOW_FLAG_FATE_JUMP;
3633 			if (mlx5_aso_mtr_wait(priv, aso_mtr, true))
3634 				goto error;
3635 			break;
3636 		case RTE_FLOW_ACTION_TYPE_AGE:
3637 			aux = mlx5_flow_hw_aux(dev->data->port_id, flow);
3638 			age = action->conf;
3639 			/*
3640 			 * First, create the AGE parameter, then create its
3641 			 * counter later:
3642 			 * Regular counter - in next case.
3643 			 * Indirect counter - update it after the loop.
3644 			 */
3645 			age_idx = mlx5_hws_age_action_create(priv, queue, 0,
3646 							     age,
3647 							     flow->res_idx,
3648 							     error);
3649 			if (age_idx == 0)
3650 				goto error;
3651 			mlx5_flow_hw_aux_set_age_idx(flow, aux, age_idx);
3652 			flow->flags |= MLX5_FLOW_HW_FLOW_FLAG_AGE_IDX;
3653 			if (at->action_flags & MLX5_FLOW_ACTION_INDIRECT_COUNT)
3654 				/*
3655 				 * When AGE uses indirect counter, no need to
3656 				 * create counter but need to update it with the
3657 				 * AGE parameter, will be done after the loop.
3658 				 */
3659 				break;
3660 			/* Fall-through. */
3661 		case RTE_FLOW_ACTION_TYPE_COUNT:
3662 			cnt_queue = mlx5_hws_cnt_get_queue(priv, &queue);
3663 			ret = mlx5_hws_cnt_pool_get(priv->hws_cpool, cnt_queue, &cnt_id, age_idx);
3664 			if (ret != 0)
3665 				goto error;
3666 			ret = mlx5_hws_cnt_pool_get_action_offset
3667 				(priv->hws_cpool,
3668 				 cnt_id,
3669 				 &rule_acts[act_data->action_dst].action,
3670 				 &rule_acts[act_data->action_dst].counter.offset
3671 				 );
3672 			if (ret != 0)
3673 				goto error;
3674 			flow->flags |= MLX5_FLOW_HW_FLOW_FLAG_CNT_ID;
3675 			flow->cnt_id = cnt_id;
3676 			break;
3677 		case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
3678 			ret = mlx5_hws_cnt_pool_get_action_offset
3679 				(priv->hws_cpool,
3680 				 act_data->shared_counter.id,
3681 				 &rule_acts[act_data->action_dst].action,
3682 				 &rule_acts[act_data->action_dst].counter.offset
3683 				 );
3684 			if (ret != 0)
3685 				goto error;
3686 			flow->flags |= MLX5_FLOW_HW_FLOW_FLAG_CNT_ID;
3687 			flow->cnt_id = act_data->shared_counter.id;
3688 			break;
3689 		case RTE_FLOW_ACTION_TYPE_CONNTRACK:
3690 			ct_idx = MLX5_INDIRECT_ACTION_IDX_GET(action->conf);
3691 			if (flow_hw_ct_compile(dev, queue, ct_idx,
3692 					       &rule_acts[act_data->action_dst]))
3693 				goto error;
3694 			break;
3695 		case MLX5_RTE_FLOW_ACTION_TYPE_METER_MARK:
3696 			mtr_id = act_data->shared_meter.id &
3697 				((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
3698 			/* Find ASO object. */
3699 			aso_mtr = mlx5_ipool_get(pool->idx_pool, mtr_id);
3700 			if (!aso_mtr)
3701 				goto error;
3702 			rule_acts[act_data->action_dst].action =
3703 							pool->action;
3704 			rule_acts[act_data->action_dst].aso_meter.offset =
3705 							aso_mtr->offset;
3706 			break;
3707 		case RTE_FLOW_ACTION_TYPE_METER_MARK:
3708 			/*
3709 			 * Allocate meter directly will slow down flow
3710 			 * insertion rate.
3711 			 */
3712 			ret = flow_hw_meter_mark_compile(dev,
3713 				act_data->action_dst, action,
3714 				rule_acts, &mtr_idx, MLX5_HW_INV_QUEUE, error);
3715 			if (ret != 0)
3716 				goto error;
3717 			aux = mlx5_flow_hw_aux(dev->data->port_id, flow);
3718 			mlx5_flow_hw_aux_set_mtr_id(flow, aux, mtr_idx);
3719 			flow->flags |= MLX5_FLOW_HW_FLOW_FLAG_MTR_ID;
3720 			break;
3721 		case RTE_FLOW_ACTION_TYPE_NAT64:
3722 			nat64_c = action->conf;
3723 			rule_acts[act_data->action_dst].action =
3724 				priv->action_nat64[table->type][nat64_c->type];
3725 			break;
3726 		default:
3727 			break;
3728 		}
3729 	}
3730 	if (at->action_flags & MLX5_FLOW_ACTION_INDIRECT_COUNT) {
3731 		/* If indirect count is used, then CNT_ID flag should be set. */
3732 		MLX5_ASSERT(flow->flags & MLX5_FLOW_HW_FLOW_FLAG_CNT_ID);
3733 		if (at->action_flags & MLX5_FLOW_ACTION_INDIRECT_AGE) {
3734 			/* If indirect AGE is used, then AGE_IDX flag should be set. */
3735 			MLX5_ASSERT(flow->flags & MLX5_FLOW_HW_FLOW_FLAG_AGE_IDX);
3736 			aux = mlx5_flow_hw_aux(dev->data->port_id, flow);
3737 			age_idx = mlx5_flow_hw_aux_get_age_idx(flow, aux) &
3738 				  MLX5_HWS_AGE_IDX_MASK;
3739 			if (mlx5_hws_cnt_age_get(priv->hws_cpool, flow->cnt_id) != age_idx)
3740 				/*
3741 				 * This is first use of this indirect counter
3742 				 * for this indirect AGE, need to increase the
3743 				 * number of counters.
3744 				 */
3745 				mlx5_hws_age_nb_cnt_increase(priv, age_idx);
3746 		}
3747 		/*
3748 		 * Update this indirect counter the indirect/direct AGE in which
3749 		 * using it.
3750 		 */
3751 		mlx5_hws_cnt_age_set(priv->hws_cpool, flow->cnt_id, age_idx);
3752 	}
3753 	if (hw_acts->encap_decap && !hw_acts->encap_decap->shared) {
3754 		int ix = mlx5_multi_pattern_reformat_to_index(hw_acts->encap_decap->action_type);
3755 		struct mlx5dr_rule_action *ra = &rule_acts[hw_acts->encap_decap_pos];
3756 
3757 		if (ix < 0)
3758 			goto error;
3759 		if (!mp_segment)
3760 			mp_segment = mlx5_multi_pattern_segment_find(table, flow->res_idx);
3761 		if (!mp_segment || !mp_segment->reformat_action[ix])
3762 			goto error;
3763 		ra->action = mp_segment->reformat_action[ix];
3764 		/* reformat offset is relative to selected DR action */
3765 		ra->reformat.offset = flow->res_idx - mp_segment->head_index;
3766 		ra->reformat.data = ap->encap_data;
3767 	}
3768 	if (hw_acts->push_remove && !hw_acts->push_remove->shared) {
3769 		rule_acts[hw_acts->push_remove_pos].ipv6_ext.offset =
3770 				flow->res_idx - 1;
3771 		rule_acts[hw_acts->push_remove_pos].ipv6_ext.header = ap->ipv6_push_data;
3772 	}
3773 	if (mlx5_hws_cnt_id_valid(hw_acts->cnt_id)) {
3774 		flow->flags |= MLX5_FLOW_HW_FLOW_FLAG_CNT_ID;
3775 		flow->cnt_id = hw_acts->cnt_id;
3776 	}
3777 	return 0;
3778 
3779 error:
3780 	flow_hw_release_actions(dev, queue, flow);
3781 	rte_errno = EINVAL;
3782 	return -rte_errno;
3783 }
3784 
3785 static const struct rte_flow_item *
3786 flow_hw_get_rule_items(struct rte_eth_dev *dev,
3787 		       const struct rte_flow_template_table *table,
3788 		       const struct rte_flow_item items[],
3789 		       uint8_t pattern_template_index,
3790 		       struct mlx5_flow_hw_pattern_params *pp)
3791 {
3792 	struct rte_flow_pattern_template *pt = table->its[pattern_template_index];
3793 
3794 	/* Only one implicit item can be added to flow rule pattern. */
3795 	MLX5_ASSERT(!pt->implicit_port || !pt->implicit_tag);
3796 	/* At least one item was allocated in pattern params for items. */
3797 	MLX5_ASSERT(MLX5_HW_MAX_ITEMS >= 1);
3798 	if (pt->implicit_port) {
3799 		if (pt->orig_item_nb + 1 > MLX5_HW_MAX_ITEMS) {
3800 			rte_errno = ENOMEM;
3801 			return NULL;
3802 		}
3803 		/* Set up represented port item in pattern params. */
3804 		pp->port_spec = (struct rte_flow_item_ethdev){
3805 			.port_id = dev->data->port_id,
3806 		};
3807 		pp->items[0] = (struct rte_flow_item){
3808 			.type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,
3809 			.spec = &pp->port_spec,
3810 		};
3811 		rte_memcpy(&pp->items[1], items, sizeof(*items) * pt->orig_item_nb);
3812 		return pp->items;
3813 	} else if (pt->implicit_tag) {
3814 		if (pt->orig_item_nb + 1 > MLX5_HW_MAX_ITEMS) {
3815 			rte_errno = ENOMEM;
3816 			return NULL;
3817 		}
3818 		/* Set up tag item in pattern params. */
3819 		pp->tag_spec = (struct rte_flow_item_tag){
3820 			.data = flow_hw_tx_tag_regc_value(dev),
3821 		};
3822 		pp->items[0] = (struct rte_flow_item){
3823 			.type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_TAG,
3824 			.spec = &pp->tag_spec,
3825 		};
3826 		rte_memcpy(&pp->items[1], items, sizeof(*items) * pt->orig_item_nb);
3827 		return pp->items;
3828 	} else {
3829 		return items;
3830 	}
3831 }
3832 
3833 /**
3834  * Enqueue HW steering flow creation.
3835  *
3836  * The flow will be applied to the HW only if the postpone bit is not set or
3837  * the extra push function is called.
3838  * The flow creation status should be checked from dequeue result.
3839  *
3840  * @param[in] dev
3841  *   Pointer to the rte_eth_dev structure.
3842  * @param[in] queue
3843  *   The queue to create the flow.
3844  * @param[in] attr
3845  *   Pointer to the flow operation attributes.
3846  * @param[in] items
3847  *   Items with flow spec value.
3848  * @param[in] pattern_template_index
3849  *   The item pattern flow follows from the table.
3850  * @param[in] actions
3851  *   Action with flow spec value.
3852  * @param[in] action_template_index
3853  *   The action pattern flow follows from the table.
3854  * @param[in] user_data
3855  *   Pointer to the user_data.
3856  * @param[out] error
3857  *   Pointer to error structure.
3858  *
3859  * @return
3860  *    Flow pointer on success, NULL otherwise and rte_errno is set.
3861  */
3862 static struct rte_flow *
3863 flow_hw_async_flow_create(struct rte_eth_dev *dev,
3864 			  uint32_t queue,
3865 			  const struct rte_flow_op_attr *attr,
3866 			  struct rte_flow_template_table *table,
3867 			  const struct rte_flow_item items[],
3868 			  uint8_t pattern_template_index,
3869 			  const struct rte_flow_action actions[],
3870 			  uint8_t action_template_index,
3871 			  void *user_data,
3872 			  struct rte_flow_error *error)
3873 {
3874 	struct mlx5_priv *priv = dev->data->dev_private;
3875 	struct mlx5dr_rule_attr rule_attr = {
3876 		.queue_id = queue,
3877 		.user_data = user_data,
3878 		.burst = attr->postpone,
3879 	};
3880 	struct mlx5dr_rule_action *rule_acts;
3881 	struct mlx5_flow_hw_action_params ap;
3882 	struct mlx5_flow_hw_pattern_params pp;
3883 	struct rte_flow_hw *flow = NULL;
3884 	const struct rte_flow_item *rule_items;
3885 	uint32_t flow_idx = 0;
3886 	uint32_t res_idx = 0;
3887 	int ret;
3888 
3889 	if (mlx5_fp_debug_enabled()) {
3890 		if (flow_hw_async_create_validate(dev, queue, table, items, pattern_template_index,
3891 						  actions, action_template_index, error))
3892 			return NULL;
3893 	}
3894 	flow = mlx5_ipool_malloc(table->flow, &flow_idx);
3895 	if (!flow)
3896 		goto error;
3897 	rule_acts = flow_hw_get_dr_action_buffer(priv, table, action_template_index, queue);
3898 	/*
3899 	 * Set the table here in order to know the destination table
3900 	 * when free the flow afterward.
3901 	 */
3902 	flow->table = table;
3903 	flow->mt_idx = pattern_template_index;
3904 	flow->idx = flow_idx;
3905 	if (table->resource) {
3906 		mlx5_ipool_malloc(table->resource, &res_idx);
3907 		if (!res_idx)
3908 			goto error;
3909 		flow->res_idx = res_idx;
3910 	} else {
3911 		flow->res_idx = flow_idx;
3912 	}
3913 	flow->flags = 0;
3914 	/*
3915 	 * Set the flow operation type here in order to know if the flow memory
3916 	 * should be freed or not when get the result from dequeue.
3917 	 */
3918 	flow->operation_type = MLX5_FLOW_HW_FLOW_OP_TYPE_CREATE;
3919 	flow->user_data = user_data;
3920 	rule_attr.user_data = flow;
3921 	/*
3922 	 * Indexed pool returns 1-based indices, but mlx5dr expects 0-based indices
3923 	 * for rule insertion hints.
3924 	 */
3925 	flow->rule_idx = flow->res_idx - 1;
3926 	rule_attr.rule_idx = flow->rule_idx;
3927 	/*
3928 	 * Construct the flow actions based on the input actions.
3929 	 * The implicitly appended action is always fixed, like metadata
3930 	 * copy action from FDB to NIC Rx.
3931 	 * No need to copy and contrust a new "actions" list based on the
3932 	 * user's input, in order to save the cost.
3933 	 */
3934 	if (flow_hw_actions_construct(dev, flow, &ap,
3935 				      &table->ats[action_template_index],
3936 				      table->its[pattern_template_index]->item_flags,
3937 				      flow->table, actions,
3938 				      rule_acts, queue, error))
3939 		goto error;
3940 	rule_items = flow_hw_get_rule_items(dev, table, items,
3941 					    pattern_template_index, &pp);
3942 	if (!rule_items)
3943 		goto error;
3944 	if (likely(!rte_flow_template_table_resizable(dev->data->port_id, &table->cfg.attr))) {
3945 		ret = mlx5dr_rule_create(table->matcher_info[0].matcher,
3946 					 pattern_template_index, rule_items,
3947 					 action_template_index, rule_acts,
3948 					 &rule_attr,
3949 					 (struct mlx5dr_rule *)flow->rule);
3950 	} else {
3951 		struct rte_flow_hw_aux *aux = mlx5_flow_hw_aux(dev->data->port_id, flow);
3952 		uint32_t selector;
3953 
3954 		flow->operation_type = MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_CREATE;
3955 		rte_rwlock_read_lock(&table->matcher_replace_rwlk);
3956 		selector = table->matcher_selector;
3957 		ret = mlx5dr_rule_create(table->matcher_info[selector].matcher,
3958 					 pattern_template_index, rule_items,
3959 					 action_template_index, rule_acts,
3960 					 &rule_attr,
3961 					 (struct mlx5dr_rule *)flow->rule);
3962 		rte_rwlock_read_unlock(&table->matcher_replace_rwlk);
3963 		aux->matcher_selector = selector;
3964 		flow->flags |= MLX5_FLOW_HW_FLOW_FLAG_MATCHER_SELECTOR;
3965 	}
3966 	if (likely(!ret)) {
3967 		flow_hw_q_inc_flow_ops(priv, queue);
3968 		return (struct rte_flow *)flow;
3969 	}
3970 error:
3971 	if (table->resource && res_idx)
3972 		mlx5_ipool_free(table->resource, res_idx);
3973 	if (flow_idx)
3974 		mlx5_ipool_free(table->flow, flow_idx);
3975 	rte_flow_error_set(error, rte_errno,
3976 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3977 			   "fail to create rte flow");
3978 	return NULL;
3979 }
3980 
3981 /**
3982  * Enqueue HW steering flow creation by index.
3983  *
3984  * The flow will be applied to the HW only if the postpone bit is not set or
3985  * the extra push function is called.
3986  * The flow creation status should be checked from dequeue result.
3987  *
3988  * @param[in] dev
3989  *   Pointer to the rte_eth_dev structure.
3990  * @param[in] queue
3991  *   The queue to create the flow.
3992  * @param[in] attr
3993  *   Pointer to the flow operation attributes.
3994  * @param[in] rule_index
3995  *   The item pattern flow follows from the table.
3996  * @param[in] actions
3997  *   Action with flow spec value.
3998  * @param[in] action_template_index
3999  *   The action pattern flow follows from the table.
4000  * @param[in] user_data
4001  *   Pointer to the user_data.
4002  * @param[out] error
4003  *   Pointer to error structure.
4004  *
4005  * @return
4006  *    Flow pointer on success, NULL otherwise and rte_errno is set.
4007  */
4008 static struct rte_flow *
4009 flow_hw_async_flow_create_by_index(struct rte_eth_dev *dev,
4010 			  uint32_t queue,
4011 			  const struct rte_flow_op_attr *attr,
4012 			  struct rte_flow_template_table *table,
4013 			  uint32_t rule_index,
4014 			  const struct rte_flow_action actions[],
4015 			  uint8_t action_template_index,
4016 			  void *user_data,
4017 			  struct rte_flow_error *error)
4018 {
4019 	struct rte_flow_item items[] = {{.type = RTE_FLOW_ITEM_TYPE_END,}};
4020 	struct mlx5_priv *priv = dev->data->dev_private;
4021 	struct mlx5dr_rule_attr rule_attr = {
4022 		.queue_id = queue,
4023 		.user_data = user_data,
4024 		.burst = attr->postpone,
4025 	};
4026 	struct mlx5dr_rule_action *rule_acts;
4027 	struct mlx5_flow_hw_action_params ap;
4028 	struct rte_flow_hw *flow = NULL;
4029 	uint32_t flow_idx = 0;
4030 	uint32_t res_idx = 0;
4031 	int ret;
4032 
4033 	if (mlx5_fp_debug_enabled()) {
4034 		if (flow_hw_async_create_by_index_validate(dev, queue, table, rule_index,
4035 							   actions, action_template_index, error))
4036 			return NULL;
4037 	}
4038 	flow = mlx5_ipool_malloc(table->flow, &flow_idx);
4039 	if (!flow)
4040 		goto error;
4041 	rule_acts = flow_hw_get_dr_action_buffer(priv, table, action_template_index, queue);
4042 	/*
4043 	 * Set the table here in order to know the destination table
4044 	 * when free the flow afterwards.
4045 	 */
4046 	flow->table = table;
4047 	flow->mt_idx = 0;
4048 	flow->idx = flow_idx;
4049 	if (table->resource) {
4050 		mlx5_ipool_malloc(table->resource, &res_idx);
4051 		if (!res_idx)
4052 			goto error;
4053 		flow->res_idx = res_idx;
4054 	} else {
4055 		flow->res_idx = flow_idx;
4056 	}
4057 	flow->flags = 0;
4058 	/*
4059 	 * Set the flow operation type here in order to know if the flow memory
4060 	 * should be freed or not when get the result from dequeue.
4061 	 */
4062 	flow->operation_type = MLX5_FLOW_HW_FLOW_OP_TYPE_CREATE;
4063 	flow->user_data = user_data;
4064 	rule_attr.user_data = flow;
4065 	/* Set the rule index. */
4066 	flow->rule_idx = rule_index;
4067 	rule_attr.rule_idx = flow->rule_idx;
4068 	/*
4069 	 * Construct the flow actions based on the input actions.
4070 	 * The implicitly appended action is always fixed, like metadata
4071 	 * copy action from FDB to NIC Rx.
4072 	 * No need to copy and contrust a new "actions" list based on the
4073 	 * user's input, in order to save the cost.
4074 	 */
4075 	if (flow_hw_actions_construct(dev, flow, &ap,
4076 				      &table->ats[action_template_index],
4077 				      table->its[0]->item_flags, table,
4078 				      actions, rule_acts, queue, error)) {
4079 		rte_errno = EINVAL;
4080 		goto error;
4081 	}
4082 	if (likely(!rte_flow_template_table_resizable(dev->data->port_id, &table->cfg.attr))) {
4083 		ret = mlx5dr_rule_create(table->matcher_info[0].matcher,
4084 					 0, items, action_template_index,
4085 					 rule_acts, &rule_attr,
4086 					 (struct mlx5dr_rule *)flow->rule);
4087 	} else {
4088 		struct rte_flow_hw_aux *aux = mlx5_flow_hw_aux(dev->data->port_id, flow);
4089 		uint32_t selector;
4090 
4091 		flow->operation_type = MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_CREATE;
4092 		rte_rwlock_read_lock(&table->matcher_replace_rwlk);
4093 		selector = table->matcher_selector;
4094 		ret = mlx5dr_rule_create(table->matcher_info[selector].matcher,
4095 					 0, items, action_template_index,
4096 					 rule_acts, &rule_attr,
4097 					 (struct mlx5dr_rule *)flow->rule);
4098 		rte_rwlock_read_unlock(&table->matcher_replace_rwlk);
4099 		aux->matcher_selector = selector;
4100 		flow->flags |= MLX5_FLOW_HW_FLOW_FLAG_MATCHER_SELECTOR;
4101 	}
4102 	if (likely(!ret)) {
4103 		flow_hw_q_inc_flow_ops(priv, queue);
4104 		return (struct rte_flow *)flow;
4105 	}
4106 error:
4107 	if (table->resource && res_idx)
4108 		mlx5_ipool_free(table->resource, res_idx);
4109 	if (flow_idx)
4110 		mlx5_ipool_free(table->flow, flow_idx);
4111 	rte_flow_error_set(error, rte_errno,
4112 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4113 			   "fail to create rte flow");
4114 	return NULL;
4115 }
4116 
4117 /**
4118  * Enqueue HW steering flow update.
4119  *
4120  * The flow will be applied to the HW only if the postpone bit is not set or
4121  * the extra push function is called.
4122  * The flow destruction status should be checked from dequeue result.
4123  *
4124  * @param[in] dev
4125  *   Pointer to the rte_eth_dev structure.
4126  * @param[in] queue
4127  *   The queue to destroy the flow.
4128  * @param[in] attr
4129  *   Pointer to the flow operation attributes.
4130  * @param[in] flow
4131  *   Pointer to the flow to be destroyed.
4132  * @param[in] actions
4133  *   Action with flow spec value.
4134  * @param[in] action_template_index
4135  *   The action pattern flow follows from the table.
4136  * @param[in] user_data
4137  *   Pointer to the user_data.
4138  * @param[out] error
4139  *   Pointer to error structure.
4140  *
4141  * @return
4142  *    0 on success, negative value otherwise and rte_errno is set.
4143  */
4144 static int
4145 flow_hw_async_flow_update(struct rte_eth_dev *dev,
4146 			   uint32_t queue,
4147 			   const struct rte_flow_op_attr *attr,
4148 			   struct rte_flow *flow,
4149 			   const struct rte_flow_action actions[],
4150 			   uint8_t action_template_index,
4151 			   void *user_data,
4152 			   struct rte_flow_error *error)
4153 {
4154 	struct mlx5_priv *priv = dev->data->dev_private;
4155 	struct mlx5dr_rule_attr rule_attr = {
4156 		.queue_id = queue,
4157 		.user_data = user_data,
4158 		.burst = attr->postpone,
4159 	};
4160 	struct mlx5dr_rule_action *rule_acts;
4161 	struct mlx5_flow_hw_action_params ap;
4162 	struct rte_flow_hw *of = (struct rte_flow_hw *)flow;
4163 	struct rte_flow_hw *nf;
4164 	struct rte_flow_hw_aux *aux;
4165 	struct rte_flow_template_table *table = of->table;
4166 	uint32_t res_idx = 0;
4167 	int ret;
4168 
4169 	if (mlx5_fp_debug_enabled()) {
4170 		if (flow_hw_async_update_validate(dev, queue, of, actions, action_template_index,
4171 						  error))
4172 			return -rte_errno;
4173 	}
4174 	aux = mlx5_flow_hw_aux(dev->data->port_id, of);
4175 	nf = &aux->upd_flow;
4176 	memset(nf, 0, sizeof(struct rte_flow_hw));
4177 	rule_acts = flow_hw_get_dr_action_buffer(priv, table, action_template_index, queue);
4178 	/*
4179 	 * Set the table here in order to know the destination table
4180 	 * when free the flow afterwards.
4181 	 */
4182 	nf->table = table;
4183 	nf->mt_idx = of->mt_idx;
4184 	nf->idx = of->idx;
4185 	if (table->resource) {
4186 		mlx5_ipool_malloc(table->resource, &res_idx);
4187 		if (!res_idx)
4188 			goto error;
4189 		nf->res_idx = res_idx;
4190 	} else {
4191 		nf->res_idx = of->res_idx;
4192 	}
4193 	nf->flags = 0;
4194 	/* Indicate the construction function to set the proper fields. */
4195 	nf->operation_type = MLX5_FLOW_HW_FLOW_OP_TYPE_UPDATE;
4196 	/*
4197 	 * Indexed pool returns 1-based indices, but mlx5dr expects 0-based indices
4198 	 * for rule insertion hints.
4199 	 * If there is only one STE, the update will be atomic by nature.
4200 	 */
4201 	nf->rule_idx = nf->res_idx - 1;
4202 	rule_attr.rule_idx = nf->rule_idx;
4203 	/*
4204 	 * Construct the flow actions based on the input actions.
4205 	 * The implicitly appended action is always fixed, like metadata
4206 	 * copy action from FDB to NIC Rx.
4207 	 * No need to copy and contrust a new "actions" list based on the
4208 	 * user's input, in order to save the cost.
4209 	 */
4210 	if (flow_hw_actions_construct(dev, nf, &ap,
4211 				      &table->ats[action_template_index],
4212 				      table->its[nf->mt_idx]->item_flags,
4213 				      table, actions,
4214 				      rule_acts, queue, error)) {
4215 		rte_errno = EINVAL;
4216 		goto error;
4217 	}
4218 	/*
4219 	 * Set the flow operation type here in order to know if the flow memory
4220 	 * should be freed or not when get the result from dequeue.
4221 	 */
4222 	of->operation_type = MLX5_FLOW_HW_FLOW_OP_TYPE_UPDATE;
4223 	of->user_data = user_data;
4224 	of->flags |= MLX5_FLOW_HW_FLOW_FLAG_UPD_FLOW;
4225 	rule_attr.user_data = of;
4226 	ret = mlx5dr_rule_action_update((struct mlx5dr_rule *)of->rule,
4227 					action_template_index, rule_acts, &rule_attr);
4228 	if (likely(!ret)) {
4229 		flow_hw_q_inc_flow_ops(priv, queue);
4230 		return 0;
4231 	}
4232 error:
4233 	if (table->resource && res_idx)
4234 		mlx5_ipool_free(table->resource, res_idx);
4235 	return rte_flow_error_set(error, rte_errno,
4236 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4237 				  "fail to update rte flow");
4238 }
4239 
4240 /**
4241  * Enqueue HW steering flow destruction.
4242  *
4243  * The flow will be applied to the HW only if the postpone bit is not set or
4244  * the extra push function is called.
4245  * The flow destruction status should be checked from dequeue result.
4246  *
4247  * @param[in] dev
4248  *   Pointer to the rte_eth_dev structure.
4249  * @param[in] queue
4250  *   The queue to destroy the flow.
4251  * @param[in] attr
4252  *   Pointer to the flow operation attributes.
4253  * @param[in] flow
4254  *   Pointer to the flow to be destroyed.
4255  * @param[in] user_data
4256  *   Pointer to the user_data.
4257  * @param[out] error
4258  *   Pointer to error structure.
4259  *
4260  * @return
4261  *    0 on success, negative value otherwise and rte_errno is set.
4262  */
4263 static int
4264 flow_hw_async_flow_destroy(struct rte_eth_dev *dev,
4265 			   uint32_t queue,
4266 			   const struct rte_flow_op_attr *attr,
4267 			   struct rte_flow *flow,
4268 			   void *user_data,
4269 			   struct rte_flow_error *error)
4270 {
4271 	struct mlx5_priv *priv = dev->data->dev_private;
4272 	struct mlx5dr_rule_attr rule_attr = {
4273 		.queue_id = queue,
4274 		.user_data = user_data,
4275 		.burst = attr->postpone,
4276 	};
4277 	struct rte_flow_hw *fh = (struct rte_flow_hw *)flow;
4278 	bool resizable = rte_flow_template_table_resizable(dev->data->port_id,
4279 							   &fh->table->cfg.attr);
4280 	int ret;
4281 
4282 	if (mlx5_fp_debug_enabled()) {
4283 		if (flow_hw_async_destroy_validate(dev, queue, fh, error))
4284 			return -rte_errno;
4285 	}
4286 	fh->operation_type = !resizable ?
4287 			     MLX5_FLOW_HW_FLOW_OP_TYPE_DESTROY :
4288 			     MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_DESTROY;
4289 	fh->user_data = user_data;
4290 	rule_attr.user_data = fh;
4291 	rule_attr.rule_idx = fh->rule_idx;
4292 	ret = mlx5dr_rule_destroy((struct mlx5dr_rule *)fh->rule, &rule_attr);
4293 	if (ret) {
4294 		return rte_flow_error_set(error, rte_errno,
4295 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4296 					  "fail to destroy rte flow");
4297 	}
4298 	flow_hw_q_inc_flow_ops(priv, queue);
4299 	return 0;
4300 }
4301 
4302 /**
4303  * Release the AGE and counter for given flow.
4304  *
4305  * @param[in] priv
4306  *   Pointer to the port private data structure.
4307  * @param[in] queue
4308  *   The queue to release the counter.
4309  * @param[in, out] flow
4310  *   Pointer to the flow containing the counter.
4311  * @param[out] error
4312  *   Pointer to error structure.
4313  */
4314 static void
4315 flow_hw_age_count_release(struct mlx5_priv *priv, uint32_t queue,
4316 			  struct rte_flow_hw *flow,
4317 			  struct rte_flow_error *error)
4318 {
4319 	struct rte_flow_hw_aux *aux = mlx5_flow_hw_aux(priv->dev_data->port_id, flow);
4320 	uint32_t *cnt_queue;
4321 	uint32_t age_idx = aux->orig.age_idx;
4322 
4323 	MLX5_ASSERT(flow->flags & MLX5_FLOW_HW_FLOW_FLAG_CNT_ID);
4324 	if (mlx5_hws_cnt_is_shared(priv->hws_cpool, flow->cnt_id)) {
4325 		if ((flow->flags & MLX5_FLOW_HW_FLOW_FLAG_AGE_IDX) &&
4326 		    !mlx5_hws_age_is_indirect(age_idx)) {
4327 			/* Remove this AGE parameter from indirect counter. */
4328 			mlx5_hws_cnt_age_set(priv->hws_cpool, flow->cnt_id, 0);
4329 			/* Release the AGE parameter. */
4330 			mlx5_hws_age_action_destroy(priv, age_idx, error);
4331 		}
4332 		return;
4333 	}
4334 	cnt_queue = mlx5_hws_cnt_get_queue(priv, &queue);
4335 	/* Put the counter first to reduce the race risk in BG thread. */
4336 	mlx5_hws_cnt_pool_put(priv->hws_cpool, cnt_queue, &flow->cnt_id);
4337 	if (flow->flags & MLX5_FLOW_HW_FLOW_FLAG_AGE_IDX) {
4338 		if (mlx5_hws_age_is_indirect(age_idx)) {
4339 			uint32_t idx = age_idx & MLX5_HWS_AGE_IDX_MASK;
4340 
4341 			mlx5_hws_age_nb_cnt_decrease(priv, idx);
4342 		} else {
4343 			/* Release the AGE parameter. */
4344 			mlx5_hws_age_action_destroy(priv, age_idx, error);
4345 		}
4346 	}
4347 }
4348 
4349 static __rte_always_inline void
4350 flow_hw_pull_legacy_indirect_comp(struct rte_eth_dev *dev, struct mlx5_hw_q_job *job,
4351 				  uint32_t queue)
4352 {
4353 	struct mlx5_priv *priv = dev->data->dev_private;
4354 	struct mlx5_aso_ct_action *aso_ct;
4355 	struct mlx5_aso_mtr *aso_mtr;
4356 	uint32_t type, idx;
4357 
4358 	if (MLX5_INDIRECT_ACTION_TYPE_GET(job->action) ==
4359 	    MLX5_INDIRECT_ACTION_TYPE_QUOTA) {
4360 		mlx5_quota_async_completion(dev, queue, job);
4361 	} else if (job->type == MLX5_HW_Q_JOB_TYPE_DESTROY) {
4362 		type = MLX5_INDIRECT_ACTION_TYPE_GET(job->action);
4363 		if (type == MLX5_INDIRECT_ACTION_TYPE_METER_MARK) {
4364 			idx = MLX5_INDIRECT_ACTION_IDX_GET(job->action);
4365 			mlx5_ipool_free(priv->hws_mpool->idx_pool, idx);
4366 		}
4367 	} else if (job->type == MLX5_HW_Q_JOB_TYPE_CREATE) {
4368 		type = MLX5_INDIRECT_ACTION_TYPE_GET(job->action);
4369 		if (type == MLX5_INDIRECT_ACTION_TYPE_METER_MARK) {
4370 			idx = MLX5_INDIRECT_ACTION_IDX_GET(job->action);
4371 			aso_mtr = mlx5_ipool_get(priv->hws_mpool->idx_pool, idx);
4372 			aso_mtr->state = ASO_METER_READY;
4373 		} else if (type == MLX5_INDIRECT_ACTION_TYPE_CT) {
4374 			idx = MLX5_INDIRECT_ACTION_IDX_GET(job->action);
4375 			aso_ct = mlx5_ipool_get(priv->hws_ctpool->cts, idx);
4376 			aso_ct->state = ASO_CONNTRACK_READY;
4377 		}
4378 	} else if (job->type == MLX5_HW_Q_JOB_TYPE_QUERY) {
4379 		type = MLX5_INDIRECT_ACTION_TYPE_GET(job->action);
4380 		if (type == MLX5_INDIRECT_ACTION_TYPE_CT) {
4381 			idx = MLX5_INDIRECT_ACTION_IDX_GET(job->action);
4382 			aso_ct = mlx5_ipool_get(priv->hws_ctpool->cts, idx);
4383 			mlx5_aso_ct_obj_analyze(job->query.user,
4384 						job->query.hw);
4385 			aso_ct->state = ASO_CONNTRACK_READY;
4386 		}
4387 	}
4388 }
4389 
4390 static __rte_always_inline int
4391 mlx5_hw_pull_flow_transfer_comp(struct rte_eth_dev *dev,
4392 				uint32_t queue, struct rte_flow_op_result res[],
4393 				uint16_t n_res)
4394 {
4395 	uint32_t size, i;
4396 	struct rte_flow_hw *flow = NULL;
4397 	struct mlx5_priv *priv = dev->data->dev_private;
4398 	struct rte_ring *ring = priv->hw_q[queue].flow_transfer_completed;
4399 
4400 	size = RTE_MIN(rte_ring_count(ring), n_res);
4401 	for (i = 0; i < size; i++) {
4402 		res[i].status = RTE_FLOW_OP_SUCCESS;
4403 		rte_ring_dequeue(ring, (void **)&flow);
4404 		res[i].user_data = flow->user_data;
4405 		flow_hw_q_dec_flow_ops(priv, queue);
4406 	}
4407 	return (int)size;
4408 }
4409 
4410 static inline int
4411 __flow_hw_pull_indir_action_comp(struct rte_eth_dev *dev,
4412 				 uint32_t queue,
4413 				 struct rte_flow_op_result res[],
4414 				 uint16_t n_res)
4415 
4416 {
4417 	struct mlx5_priv *priv = dev->data->dev_private;
4418 	struct rte_ring *r = priv->hw_q[queue].indir_cq;
4419 	void *user_data = NULL;
4420 	int ret_comp, i;
4421 
4422 	ret_comp = (int)rte_ring_count(r);
4423 	if (ret_comp > n_res)
4424 		ret_comp = n_res;
4425 	for (i = 0; i < ret_comp; i++) {
4426 		rte_ring_dequeue(r, &user_data);
4427 		res[i].user_data = user_data;
4428 		res[i].status = RTE_FLOW_OP_SUCCESS;
4429 	}
4430 	if (!priv->shared_host) {
4431 		if (ret_comp < n_res && priv->hws_mpool)
4432 			ret_comp += mlx5_aso_pull_completion(&priv->hws_mpool->sq[queue],
4433 					&res[ret_comp], n_res - ret_comp);
4434 		if (ret_comp < n_res && priv->hws_ctpool)
4435 			ret_comp += mlx5_aso_pull_completion(&priv->ct_mng->aso_sqs[queue],
4436 					&res[ret_comp], n_res - ret_comp);
4437 	}
4438 	if (ret_comp < n_res && priv->quota_ctx.sq)
4439 		ret_comp += mlx5_aso_pull_completion(&priv->quota_ctx.sq[queue],
4440 						     &res[ret_comp],
4441 						     n_res - ret_comp);
4442 	for (i = 0; i <  ret_comp; i++) {
4443 		struct mlx5_hw_q_job *job = (struct mlx5_hw_q_job *)res[i].user_data;
4444 
4445 		/* Restore user data. */
4446 		res[i].user_data = job->user_data;
4447 		if (job->indirect_type == MLX5_HW_INDIRECT_TYPE_LEGACY)
4448 			flow_hw_pull_legacy_indirect_comp(dev, job, queue);
4449 		/*
4450 		 * Current PMD supports 2 indirect action list types - MIRROR and REFORMAT.
4451 		 * These indirect list types do not post WQE to create action.
4452 		 * Future indirect list types that do post WQE will add
4453 		 * completion handlers here.
4454 		 */
4455 		flow_hw_job_put(priv, job, queue);
4456 	}
4457 	return ret_comp;
4458 }
4459 
4460 static __rte_always_inline void
4461 hw_cmpl_flow_update_or_destroy(struct rte_eth_dev *dev,
4462 			       struct rte_flow_hw *flow,
4463 			       uint32_t queue, struct rte_flow_error *error)
4464 {
4465 	struct mlx5_priv *priv = dev->data->dev_private;
4466 	struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
4467 	struct rte_flow_template_table *table = flow->table;
4468 	/* Release the original resource index in case of update. */
4469 	uint32_t res_idx = flow->res_idx;
4470 
4471 	if (flow->flags & MLX5_FLOW_HW_FLOW_FLAGS_ALL) {
4472 		struct rte_flow_hw_aux *aux = mlx5_flow_hw_aux(dev->data->port_id, flow);
4473 
4474 		if (flow->flags & MLX5_FLOW_HW_FLOW_FLAG_FATE_JUMP)
4475 			flow_hw_jump_release(dev, flow->jump);
4476 		else if (flow->flags & MLX5_FLOW_HW_FLOW_FLAG_FATE_HRXQ)
4477 			mlx5_hrxq_obj_release(dev, flow->hrxq);
4478 		if (flow->flags & MLX5_FLOW_HW_FLOW_FLAG_CNT_ID)
4479 			flow_hw_age_count_release(priv, queue, flow, error);
4480 		if (flow->flags & MLX5_FLOW_HW_FLOW_FLAG_MTR_ID)
4481 			mlx5_ipool_free(pool->idx_pool, aux->orig.mtr_id);
4482 		if (flow->flags & MLX5_FLOW_HW_FLOW_FLAG_UPD_FLOW) {
4483 			struct rte_flow_hw *upd_flow = &aux->upd_flow;
4484 
4485 			rte_memcpy(flow, upd_flow, offsetof(struct rte_flow_hw, rule));
4486 			aux->orig = aux->upd;
4487 			flow->operation_type = MLX5_FLOW_HW_FLOW_OP_TYPE_CREATE;
4488 			if (!flow->nt_rule && table->resource)
4489 				mlx5_ipool_free(table->resource, res_idx);
4490 		}
4491 	}
4492 	if (flow->operation_type == MLX5_FLOW_HW_FLOW_OP_TYPE_DESTROY ||
4493 	    flow->operation_type == MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_DESTROY) {
4494 		if (!flow->nt_rule) {
4495 			if (table->resource)
4496 				mlx5_ipool_free(table->resource, res_idx);
4497 			mlx5_ipool_free(table->flow, flow->idx);
4498 		}
4499 	}
4500 }
4501 
4502 static __rte_always_inline void
4503 hw_cmpl_resizable_tbl(struct rte_eth_dev *dev,
4504 		      struct rte_flow_hw *flow,
4505 		      uint32_t queue, enum rte_flow_op_status status,
4506 		      struct rte_flow_error *error)
4507 {
4508 	struct rte_flow_template_table *table = flow->table;
4509 	struct rte_flow_hw_aux *aux = mlx5_flow_hw_aux(dev->data->port_id, flow);
4510 	uint32_t selector = aux->matcher_selector;
4511 	uint32_t other_selector = (selector + 1) & 1;
4512 
4513 	MLX5_ASSERT(flow->flags & MLX5_FLOW_HW_FLOW_FLAG_MATCHER_SELECTOR);
4514 	switch (flow->operation_type) {
4515 	case MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_CREATE:
4516 		rte_atomic_fetch_add_explicit
4517 			(&table->matcher_info[selector].refcnt, 1,
4518 			 rte_memory_order_relaxed);
4519 		break;
4520 	case MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_DESTROY:
4521 		rte_atomic_fetch_sub_explicit
4522 			(&table->matcher_info[selector].refcnt, 1,
4523 			 rte_memory_order_relaxed);
4524 		hw_cmpl_flow_update_or_destroy(dev, flow, queue, error);
4525 		break;
4526 	case MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_MOVE:
4527 		if (status == RTE_FLOW_OP_SUCCESS) {
4528 			rte_atomic_fetch_sub_explicit
4529 				(&table->matcher_info[selector].refcnt, 1,
4530 				 rte_memory_order_relaxed);
4531 			rte_atomic_fetch_add_explicit
4532 				(&table->matcher_info[other_selector].refcnt, 1,
4533 				 rte_memory_order_relaxed);
4534 			aux->matcher_selector = other_selector;
4535 		}
4536 		break;
4537 	default:
4538 		break;
4539 	}
4540 }
4541 
4542 /**
4543  * Pull the enqueued flows.
4544  *
4545  * For flows enqueued from creation/destruction, the status should be
4546  * checked from the dequeue result.
4547  *
4548  * @param[in] dev
4549  *   Pointer to the rte_eth_dev structure.
4550  * @param[in] queue
4551  *   The queue to pull the result.
4552  * @param[in/out] res
4553  *   Array to save the results.
4554  * @param[in] n_res
4555  *   Available result with the array.
4556  * @param[out] error
4557  *   Pointer to error structure.
4558  *
4559  * @return
4560  *    Result number on success, negative value otherwise and rte_errno is set.
4561  */
4562 static int
4563 flow_hw_pull(struct rte_eth_dev *dev,
4564 	     uint32_t queue,
4565 	     struct rte_flow_op_result res[],
4566 	     uint16_t n_res,
4567 	     struct rte_flow_error *error)
4568 {
4569 	struct mlx5_priv *priv = dev->data->dev_private;
4570 	int ret, i;
4571 
4572 	/* 1. Pull the flow completion. */
4573 	ret = mlx5dr_send_queue_poll(priv->dr_ctx, queue, res, n_res);
4574 	if (ret < 0)
4575 		return rte_flow_error_set(error, rte_errno,
4576 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4577 				"fail to query flow queue");
4578 	for (i = 0; i <  ret; i++) {
4579 		struct rte_flow_hw *flow = res[i].user_data;
4580 
4581 		/* Restore user data. */
4582 		res[i].user_data = flow->user_data;
4583 		switch (flow->operation_type) {
4584 		case MLX5_FLOW_HW_FLOW_OP_TYPE_DESTROY:
4585 		case MLX5_FLOW_HW_FLOW_OP_TYPE_UPDATE:
4586 			hw_cmpl_flow_update_or_destroy(dev, flow, queue, error);
4587 			break;
4588 		case MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_CREATE:
4589 		case MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_DESTROY:
4590 		case MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_MOVE:
4591 			hw_cmpl_resizable_tbl(dev, flow, queue, res[i].status, error);
4592 			break;
4593 		default:
4594 			break;
4595 		}
4596 		flow_hw_q_dec_flow_ops(priv, queue);
4597 	}
4598 	/* 2. Pull indirect action comp. */
4599 	if (ret < n_res)
4600 		ret += __flow_hw_pull_indir_action_comp(dev, queue, &res[ret],
4601 							n_res - ret);
4602 	if (ret < n_res)
4603 		ret += mlx5_hw_pull_flow_transfer_comp(dev, queue, &res[ret],
4604 						       n_res - ret);
4605 
4606 	return ret;
4607 }
4608 
4609 static uint32_t
4610 mlx5_hw_push_queue(struct rte_ring *pending_q, struct rte_ring *cmpl_q)
4611 {
4612 	void *job = NULL;
4613 	uint32_t i, size = rte_ring_count(pending_q);
4614 
4615 	for (i = 0; i < size; i++) {
4616 		rte_ring_dequeue(pending_q, &job);
4617 		rte_ring_enqueue(cmpl_q, job);
4618 	}
4619 	return size;
4620 }
4621 
4622 static inline uint32_t
4623 __flow_hw_push_action(struct rte_eth_dev *dev,
4624 		    uint32_t queue)
4625 {
4626 	struct mlx5_priv *priv = dev->data->dev_private;
4627 	struct mlx5_hw_q *hw_q = &priv->hw_q[queue];
4628 
4629 	mlx5_hw_push_queue(hw_q->indir_iq, hw_q->indir_cq);
4630 	mlx5_hw_push_queue(hw_q->flow_transfer_pending,
4631 			   hw_q->flow_transfer_completed);
4632 	if (!priv->shared_host) {
4633 		if (priv->hws_ctpool)
4634 			mlx5_aso_push_wqe(priv->sh,
4635 					  &priv->ct_mng->aso_sqs[queue]);
4636 		if (priv->hws_mpool)
4637 			mlx5_aso_push_wqe(priv->sh,
4638 					  &priv->hws_mpool->sq[queue]);
4639 	}
4640 	return flow_hw_q_pending(priv, queue);
4641 }
4642 
4643 static int
4644 __flow_hw_push(struct rte_eth_dev *dev,
4645 	       uint32_t queue,
4646 	       struct rte_flow_error *error)
4647 {
4648 	struct mlx5_priv *priv = dev->data->dev_private;
4649 	int ret, num;
4650 
4651 	num = __flow_hw_push_action(dev, queue);
4652 	ret = mlx5dr_send_queue_action(priv->dr_ctx, queue,
4653 				       MLX5DR_SEND_QUEUE_ACTION_DRAIN_ASYNC);
4654 	if (ret) {
4655 		rte_flow_error_set(error, rte_errno,
4656 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4657 				   "fail to push flows");
4658 		return ret;
4659 	}
4660 	return num;
4661 }
4662 
4663 /**
4664  * Push the enqueued flows to HW.
4665  *
4666  * Force apply all the enqueued flows to the HW.
4667  *
4668  * @param[in] dev
4669  *   Pointer to the rte_eth_dev structure.
4670  * @param[in] queue
4671  *   The queue to push the flow.
4672  * @param[out] error
4673  *   Pointer to error structure.
4674  *
4675  * @return
4676  *    0 on success, negative value otherwise and rte_errno is set.
4677  */
4678 static int
4679 flow_hw_push(struct rte_eth_dev *dev,
4680 	     uint32_t queue, struct rte_flow_error *error)
4681 {
4682 	int ret = __flow_hw_push(dev, queue, error);
4683 
4684 	return ret >= 0 ? 0 : ret;
4685 }
4686 
4687 /**
4688  * Drain the enqueued flows' completion.
4689  *
4690  * @param[in] dev
4691  *   Pointer to the rte_eth_dev structure.
4692  * @param[in] queue
4693  *   The queue to pull the flow.
4694  * @param[out] error
4695  *   Pointer to error structure.
4696  *
4697  * @return
4698  *    0 on success, negative value otherwise and rte_errno is set.
4699  */
4700 static int
4701 __flow_hw_pull_comp(struct rte_eth_dev *dev,
4702 		    uint32_t queue, struct rte_flow_error *error)
4703 {
4704 	struct rte_flow_op_result comp[BURST_THR];
4705 	int ret, i, empty_loop = 0;
4706 	uint32_t pending_rules;
4707 
4708 	ret = __flow_hw_push(dev, queue, error);
4709 	if (ret < 0)
4710 		return ret;
4711 	pending_rules = ret;
4712 	while (pending_rules) {
4713 		ret = flow_hw_pull(dev, queue, comp, BURST_THR, error);
4714 		if (ret < 0)
4715 			return -1;
4716 		if (!ret) {
4717 			rte_delay_us_sleep(MLX5_ASO_WQE_CQE_RESPONSE_DELAY);
4718 			if (++empty_loop > 5) {
4719 				DRV_LOG(WARNING, "No available dequeue %u, quit.", pending_rules);
4720 				break;
4721 			}
4722 			continue;
4723 		}
4724 		for (i = 0; i < ret; i++) {
4725 			if (comp[i].status == RTE_FLOW_OP_ERROR)
4726 				DRV_LOG(WARNING, "Flow flush get error CQE.");
4727 		}
4728 		/*
4729 		 * Indirect **SYNC** METER_MARK and CT actions do not
4730 		 * remove completion after WQE post.
4731 		 * That implementation avoids HW timeout.
4732 		 * The completion is removed before the following WQE post.
4733 		 * However, HWS queue updates do not reflect that behaviour.
4734 		 * Therefore, during port destruction sync queue may have
4735 		 * pending completions.
4736 		 */
4737 		pending_rules -= RTE_MIN(pending_rules, (uint32_t)ret);
4738 		empty_loop = 0;
4739 	}
4740 	return 0;
4741 }
4742 
4743 /**
4744  * Flush created flows.
4745  *
4746  * @param[in] dev
4747  *   Pointer to the rte_eth_dev structure.
4748  * @param[out] error
4749  *   Pointer to error structure.
4750  *
4751  * @return
4752  *    0 on success, negative value otherwise and rte_errno is set.
4753  */
4754 int
4755 flow_hw_q_flow_flush(struct rte_eth_dev *dev,
4756 		     struct rte_flow_error *error)
4757 {
4758 	struct mlx5_priv *priv = dev->data->dev_private;
4759 	struct mlx5_hw_q *hw_q = &priv->hw_q[MLX5_DEFAULT_FLUSH_QUEUE];
4760 	struct rte_flow_template_table *tbl;
4761 	struct rte_flow_hw *flow;
4762 	struct rte_flow_op_attr attr = {
4763 		.postpone = 0,
4764 	};
4765 	uint32_t pending_rules = 0;
4766 	uint32_t queue;
4767 	uint32_t fidx;
4768 
4769 	/*
4770 	 * Ensure to push and dequeue all the enqueued flow
4771 	 * creation/destruction jobs in case user forgot to
4772 	 * dequeue. Or the enqueued created flows will be
4773 	 * leaked. The forgotten dequeues would also cause
4774 	 * flow flush get extra CQEs as expected and pending_rules
4775 	 * be minus value.
4776 	 */
4777 	for (queue = 0; queue < priv->nb_queue; queue++) {
4778 		if (__flow_hw_pull_comp(dev, queue, error))
4779 			return -1;
4780 	}
4781 	/* Flush flow per-table from MLX5_DEFAULT_FLUSH_QUEUE. */
4782 	LIST_FOREACH(tbl, &priv->flow_hw_tbl, next) {
4783 		if (!tbl->cfg.external)
4784 			continue;
4785 		MLX5_IPOOL_FOREACH(tbl->flow, fidx, flow) {
4786 			if (flow_hw_async_flow_destroy(dev,
4787 						MLX5_DEFAULT_FLUSH_QUEUE,
4788 						&attr,
4789 						(struct rte_flow *)flow,
4790 						NULL,
4791 						error))
4792 				return -1;
4793 			pending_rules++;
4794 			/* Drain completion with queue size. */
4795 			if (pending_rules >= hw_q->size) {
4796 				if (__flow_hw_pull_comp(dev,
4797 							MLX5_DEFAULT_FLUSH_QUEUE,
4798 							error))
4799 					return -1;
4800 				pending_rules = 0;
4801 			}
4802 		}
4803 	}
4804 	/* Drain left completion. */
4805 	if (pending_rules &&
4806 	    __flow_hw_pull_comp(dev, MLX5_DEFAULT_FLUSH_QUEUE, error))
4807 		return -1;
4808 	return 0;
4809 }
4810 
4811 static int
4812 mlx5_tbl_multi_pattern_process(struct rte_eth_dev *dev,
4813 			       struct rte_flow_template_table *tbl,
4814 			       struct mlx5_multi_pattern_segment *segment,
4815 			       uint32_t bulk_size,
4816 			       struct rte_flow_error *error)
4817 {
4818 	int ret = 0;
4819 	uint32_t i;
4820 	struct mlx5_priv *priv = dev->data->dev_private;
4821 	struct mlx5_tbl_multi_pattern_ctx *mpctx = &tbl->mpctx;
4822 	const struct rte_flow_template_table_attr *table_attr = &tbl->cfg.attr;
4823 	const struct rte_flow_attr *attr = &table_attr->flow_attr;
4824 	enum mlx5dr_table_type type = get_mlx5dr_table_type(attr);
4825 	uint32_t flags = mlx5_hw_act_flag[!!attr->group][type];
4826 	struct mlx5dr_action *dr_action = NULL;
4827 
4828 	for (i = 0; i < MLX5_MULTIPATTERN_ENCAP_NUM; i++) {
4829 		typeof(mpctx->reformat[0]) *reformat = mpctx->reformat + i;
4830 		enum mlx5dr_action_type reformat_type =
4831 			mlx5_multi_pattern_reformat_index_to_type(i);
4832 
4833 		if (!reformat->elements_num)
4834 			continue;
4835 		dr_action = reformat_type == MLX5DR_ACTION_TYP_INSERT_HEADER ?
4836 			mlx5dr_action_create_insert_header
4837 			(priv->dr_ctx, reformat->elements_num,
4838 			 reformat->insert_hdr, bulk_size, flags) :
4839 			mlx5dr_action_create_reformat
4840 			(priv->dr_ctx, reformat_type, reformat->elements_num,
4841 			 reformat->reformat_hdr, bulk_size, flags);
4842 		if (!dr_action) {
4843 			ret = rte_flow_error_set(error, rte_errno,
4844 						 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4845 						 NULL,
4846 						 "failed to create multi-pattern encap action");
4847 			goto error;
4848 		}
4849 		segment->reformat_action[i] = dr_action;
4850 	}
4851 	if (mpctx->mh.elements_num) {
4852 		typeof(mpctx->mh) *mh = &mpctx->mh;
4853 		dr_action = mlx5dr_action_create_modify_header
4854 			(priv->dr_ctx, mpctx->mh.elements_num, mh->pattern,
4855 			 bulk_size, flags);
4856 		if (!dr_action) {
4857 			ret = rte_flow_error_set(error, rte_errno,
4858 						  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4859 						  NULL, "failed to create multi-pattern header modify action");
4860 			goto error;
4861 		}
4862 		segment->mhdr_action = dr_action;
4863 	}
4864 	if (dr_action) {
4865 		segment->capacity = RTE_BIT32(bulk_size);
4866 		if (segment != &mpctx->segments[MLX5_MAX_TABLE_RESIZE_NUM - 1])
4867 			segment[1].head_index = segment->head_index + segment->capacity;
4868 	}
4869 	return 0;
4870 error:
4871 	mlx5_destroy_multi_pattern_segment(segment);
4872 	return ret;
4873 }
4874 
4875 static int
4876 mlx5_hw_build_template_table(struct rte_eth_dev *dev,
4877 			     uint8_t nb_action_templates,
4878 			     struct rte_flow_actions_template *action_templates[],
4879 			     struct mlx5dr_action_template *at[],
4880 			     struct rte_flow_template_table *tbl,
4881 			     struct rte_flow_error *error)
4882 {
4883 	int ret;
4884 	uint8_t i;
4885 
4886 	for (i = 0; i < nb_action_templates; i++) {
4887 		uint32_t refcnt = rte_atomic_fetch_add_explicit(&action_templates[i]->refcnt, 1,
4888 						     rte_memory_order_relaxed) + 1;
4889 
4890 		if (refcnt <= 1) {
4891 			rte_flow_error_set(error, EINVAL,
4892 					   RTE_FLOW_ERROR_TYPE_ACTION,
4893 					   &action_templates[i], "invalid AT refcount");
4894 			goto at_error;
4895 		}
4896 		at[i] = action_templates[i]->tmpl;
4897 		tbl->ats[i].action_template = action_templates[i];
4898 		LIST_INIT(&tbl->ats[i].acts.act_list);
4899 		/* do NOT translate table action if `dev` was not started */
4900 		if (!dev->data->dev_started)
4901 			continue;
4902 		ret = flow_hw_translate_actions_template(dev, &tbl->cfg,
4903 						  &tbl->ats[i].acts,
4904 						  action_templates[i],
4905 						  &tbl->mpctx, error);
4906 		if (ret) {
4907 			i++;
4908 			goto at_error;
4909 		}
4910 		flow_hw_populate_rule_acts_caches(dev, tbl, i);
4911 	}
4912 	tbl->nb_action_templates = nb_action_templates;
4913 	if (mlx5_is_multi_pattern_active(&tbl->mpctx)) {
4914 		ret = mlx5_tbl_multi_pattern_process(dev, tbl,
4915 						     &tbl->mpctx.segments[0],
4916 						     rte_log2_u32(tbl->cfg.attr.nb_flows),
4917 						     error);
4918 		if (ret)
4919 			goto at_error;
4920 	}
4921 	return 0;
4922 
4923 at_error:
4924 	while (i--) {
4925 		__flow_hw_action_template_destroy(dev, &tbl->ats[i].acts);
4926 		rte_atomic_fetch_sub_explicit(&action_templates[i]->refcnt,
4927 				   1, rte_memory_order_relaxed);
4928 	}
4929 	return rte_errno;
4930 }
4931 
4932 static bool
4933 flow_hw_validate_template_domain(const struct rte_flow_attr *table_attr,
4934 				 uint32_t ingress, uint32_t egress, uint32_t transfer)
4935 {
4936 	if (table_attr->ingress)
4937 		return ingress != 0;
4938 	else if (table_attr->egress)
4939 		return egress != 0;
4940 	else
4941 		return transfer;
4942 }
4943 
4944 static bool
4945 flow_hw_validate_table_domain(const struct rte_flow_attr *table_attr)
4946 {
4947 	return table_attr->ingress + table_attr->egress + table_attr->transfer
4948 		== 1;
4949 }
4950 
4951 /**
4952  * Create flow table.
4953  *
4954  * The input item and action templates will be binded to the table.
4955  * Flow memory will also be allocated. Matcher will be created based
4956  * on the item template. Action will be translated to the dedicated
4957  * DR action if possible.
4958  *
4959  * @param[in] dev
4960  *   Pointer to the rte_eth_dev structure.
4961  * @param[in] table_cfg
4962  *   Pointer to the table configuration.
4963  * @param[in] item_templates
4964  *   Item template array to be binded to the table.
4965  * @param[in] nb_item_templates
4966  *   Number of item template.
4967  * @param[in] action_templates
4968  *   Action template array to be binded to the table.
4969  * @param[in] nb_action_templates
4970  *   Number of action template.
4971  * @param[out] error
4972  *   Pointer to error structure.
4973  *
4974  * @return
4975  *    Table on success, NULL otherwise and rte_errno is set.
4976  */
4977 static struct rte_flow_template_table *
4978 flow_hw_table_create(struct rte_eth_dev *dev,
4979 		     const struct mlx5_flow_template_table_cfg *table_cfg,
4980 		     struct rte_flow_pattern_template *item_templates[],
4981 		     uint8_t nb_item_templates,
4982 		     struct rte_flow_actions_template *action_templates[],
4983 		     uint8_t nb_action_templates,
4984 		     struct rte_flow_error *error)
4985 {
4986 	struct rte_flow_error sub_error = {
4987 		.type = RTE_FLOW_ERROR_TYPE_NONE,
4988 		.cause = NULL,
4989 		.message = NULL,
4990 	};
4991 	struct mlx5_priv *priv = dev->data->dev_private;
4992 	struct mlx5dr_matcher_attr matcher_attr = {0};
4993 	struct rte_flow_template_table *tbl = NULL;
4994 	struct mlx5_flow_group *grp;
4995 	struct mlx5dr_match_template *mt[MLX5_HW_TBL_MAX_ITEM_TEMPLATE];
4996 	struct mlx5dr_action_template *at[MLX5_HW_TBL_MAX_ACTION_TEMPLATE];
4997 	const struct rte_flow_template_table_attr *attr = &table_cfg->attr;
4998 	struct rte_flow_attr flow_attr = attr->flow_attr;
4999 	struct mlx5_flow_cb_ctx ctx = {
5000 		.dev = dev,
5001 		.error = &sub_error,
5002 		.data = &flow_attr,
5003 	};
5004 	struct mlx5_indexed_pool_config cfg = {
5005 		.trunk_size = 1 << 12,
5006 		.per_core_cache = 1 << 13,
5007 		.need_lock = 1,
5008 		.release_mem_en = !!priv->sh->config.reclaim_mode,
5009 		.malloc = mlx5_malloc,
5010 		.free = mlx5_free,
5011 		.type = "mlx5_hw_table_flow",
5012 	};
5013 	struct mlx5_list_entry *ge;
5014 	uint32_t i = 0, max_tpl = MLX5_HW_TBL_MAX_ITEM_TEMPLATE;
5015 	uint32_t nb_flows = rte_align32pow2(attr->nb_flows);
5016 	bool port_started = !!dev->data->dev_started;
5017 	bool rpool_needed;
5018 	size_t tbl_mem_size;
5019 	int err;
5020 
5021 	if (!flow_hw_validate_table_domain(&attr->flow_attr)) {
5022 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR,
5023 				   NULL, "invalid table domain attributes");
5024 		return NULL;
5025 	}
5026 	for (i = 0; i < nb_item_templates; i++) {
5027 		const struct rte_flow_pattern_template_attr *pt_attr =
5028 			&item_templates[i]->attr;
5029 		bool match = flow_hw_validate_template_domain(&attr->flow_attr,
5030 							      pt_attr->ingress,
5031 							      pt_attr->egress,
5032 							      pt_attr->transfer);
5033 		if (!match) {
5034 			rte_flow_error_set(error, EINVAL,
5035 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5036 					   NULL, "pattern template domain does not match table");
5037 			return NULL;
5038 		}
5039 	}
5040 	for (i = 0; i < nb_action_templates; i++) {
5041 		const struct rte_flow_actions_template *at = action_templates[i];
5042 		bool match = flow_hw_validate_template_domain(&attr->flow_attr,
5043 							      at->attr.ingress,
5044 							      at->attr.egress,
5045 							      at->attr.transfer);
5046 		if (!match) {
5047 			rte_flow_error_set(error, EINVAL,
5048 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5049 					   NULL, "action template domain does not match table");
5050 			return NULL;
5051 		}
5052 	}
5053 	/* HWS layer accepts only 1 item template with root table. */
5054 	if (!attr->flow_attr.group)
5055 		max_tpl = 1;
5056 	cfg.max_idx = nb_flows;
5057 	cfg.size = !rte_flow_template_table_resizable(dev->data->port_id, attr) ?
5058 		   mlx5_flow_hw_entry_size() :
5059 		   mlx5_flow_hw_auxed_entry_size();
5060 	/* For table has very limited flows, disable cache. */
5061 	if (nb_flows < cfg.trunk_size) {
5062 		cfg.per_core_cache = 0;
5063 		cfg.trunk_size = nb_flows;
5064 	} else if (nb_flows <= MLX5_HW_IPOOL_SIZE_THRESHOLD) {
5065 		cfg.per_core_cache = MLX5_HW_IPOOL_CACHE_MIN;
5066 	}
5067 	/* Check if we requires too many templates. */
5068 	if (nb_item_templates > max_tpl ||
5069 	    nb_action_templates > MLX5_HW_TBL_MAX_ACTION_TEMPLATE) {
5070 		rte_errno = EINVAL;
5071 		goto error;
5072 	}
5073 	/*
5074 	 * Amount of memory required for rte_flow_template_table struct:
5075 	 * - Size of the struct itself.
5076 	 * - VLA of DR rule action containers at the end =
5077 	 *     number of actions templates * number of queues * size of DR rule actions container.
5078 	 */
5079 	tbl_mem_size = sizeof(*tbl);
5080 	tbl_mem_size += nb_action_templates * priv->nb_queue * sizeof(tbl->rule_acts[0]);
5081 	/* Allocate the table memory. */
5082 	tbl = mlx5_malloc(MLX5_MEM_ZERO, tbl_mem_size, RTE_CACHE_LINE_SIZE, rte_socket_id());
5083 	if (!tbl)
5084 		goto error;
5085 	tbl->cfg = *table_cfg;
5086 	/* Allocate flow indexed pool. */
5087 	tbl->flow = mlx5_ipool_create(&cfg);
5088 	if (!tbl->flow)
5089 		goto error;
5090 	/* Allocate table of auxiliary flow rule structs. */
5091 	tbl->flow_aux = mlx5_malloc(MLX5_MEM_ZERO, sizeof(struct rte_flow_hw_aux) * nb_flows,
5092 				    RTE_CACHE_LINE_SIZE, rte_dev_numa_node(dev->device));
5093 	if (!tbl->flow_aux)
5094 		goto error;
5095 	/* Register the flow group. */
5096 	ge = mlx5_hlist_register(priv->sh->groups, attr->flow_attr.group, &ctx);
5097 	if (!ge)
5098 		goto error;
5099 	grp = container_of(ge, struct mlx5_flow_group, entry);
5100 	tbl->grp = grp;
5101 	/* Prepare matcher information. */
5102 	matcher_attr.resizable = !!rte_flow_template_table_resizable
5103 					(dev->data->port_id, &table_cfg->attr);
5104 	matcher_attr.optimize_flow_src = MLX5DR_MATCHER_FLOW_SRC_ANY;
5105 	matcher_attr.priority = attr->flow_attr.priority;
5106 	matcher_attr.optimize_using_rule_idx = true;
5107 	matcher_attr.mode = MLX5DR_MATCHER_RESOURCE_MODE_RULE;
5108 	matcher_attr.insert_mode = flow_hw_matcher_insert_mode_get(attr->insertion_type);
5109 	if (attr->hash_func == RTE_FLOW_TABLE_HASH_FUNC_CRC16) {
5110 		DRV_LOG(ERR, "16-bit checksum hash type is not supported");
5111 		rte_errno = ENOTSUP;
5112 		goto it_error;
5113 	}
5114 	matcher_attr.distribute_mode = flow_hw_matcher_distribute_mode_get(attr->hash_func);
5115 	matcher_attr.rule.num_log = rte_log2_u32(nb_flows);
5116 	/* Parse hints information. */
5117 	if (attr->specialize) {
5118 		uint32_t val = RTE_FLOW_TABLE_SPECIALIZE_TRANSFER_WIRE_ORIG |
5119 			       RTE_FLOW_TABLE_SPECIALIZE_TRANSFER_VPORT_ORIG;
5120 
5121 		if ((attr->specialize & val) == val) {
5122 			DRV_LOG(ERR, "Invalid hint value %x",
5123 				attr->specialize);
5124 			rte_errno = EINVAL;
5125 			goto it_error;
5126 		}
5127 		if (attr->specialize &
5128 		    RTE_FLOW_TABLE_SPECIALIZE_TRANSFER_WIRE_ORIG)
5129 			matcher_attr.optimize_flow_src =
5130 				MLX5DR_MATCHER_FLOW_SRC_WIRE;
5131 		else if (attr->specialize &
5132 			 RTE_FLOW_TABLE_SPECIALIZE_TRANSFER_VPORT_ORIG)
5133 			matcher_attr.optimize_flow_src =
5134 				MLX5DR_MATCHER_FLOW_SRC_VPORT;
5135 	}
5136 	/* Build the item template. */
5137 	for (i = 0; i < nb_item_templates; i++) {
5138 		uint32_t ret;
5139 
5140 		if ((flow_attr.ingress && !item_templates[i]->attr.ingress) ||
5141 		    (flow_attr.egress && !item_templates[i]->attr.egress) ||
5142 		    (flow_attr.transfer && !item_templates[i]->attr.transfer)) {
5143 			DRV_LOG(ERR, "pattern template and template table attribute mismatch");
5144 			rte_errno = EINVAL;
5145 			goto it_error;
5146 		}
5147 		if (item_templates[i]->item_flags & MLX5_FLOW_ITEM_COMPARE)
5148 			matcher_attr.mode = MLX5DR_MATCHER_RESOURCE_MODE_HTABLE;
5149 		ret = rte_atomic_fetch_add_explicit(&item_templates[i]->refcnt, 1,
5150 					 rte_memory_order_relaxed) + 1;
5151 		if (ret <= 1) {
5152 			rte_errno = EINVAL;
5153 			goto it_error;
5154 		}
5155 		mt[i] = item_templates[i]->mt;
5156 		tbl->its[i] = item_templates[i];
5157 	}
5158 	tbl->nb_item_templates = nb_item_templates;
5159 	/* Build the action template. */
5160 	err = mlx5_hw_build_template_table(dev, nb_action_templates,
5161 					   action_templates, at, tbl, &sub_error);
5162 	if (err) {
5163 		i = nb_item_templates;
5164 		goto it_error;
5165 	}
5166 	tbl->matcher_info[0].matcher = mlx5dr_matcher_create
5167 		(tbl->grp->tbl, mt, nb_item_templates, at, nb_action_templates, &matcher_attr);
5168 	if (!tbl->matcher_info[0].matcher)
5169 		goto at_error;
5170 	tbl->matcher_attr = matcher_attr;
5171 	tbl->type = attr->flow_attr.transfer ? MLX5DR_TABLE_TYPE_FDB :
5172 		    (attr->flow_attr.egress ? MLX5DR_TABLE_TYPE_NIC_TX :
5173 		    MLX5DR_TABLE_TYPE_NIC_RX);
5174 	/*
5175 	 * Only the matcher supports update and needs more than 1 WQE, an additional
5176 	 * index is needed. Or else the flow index can be reused.
5177 	 */
5178 	rpool_needed = mlx5dr_matcher_is_updatable(tbl->matcher_info[0].matcher) &&
5179 		       mlx5dr_matcher_is_dependent(tbl->matcher_info[0].matcher);
5180 	if (rpool_needed) {
5181 		/* Allocate rule indexed pool. */
5182 		cfg.size = 0;
5183 		cfg.type = "mlx5_hw_table_rule";
5184 		cfg.max_idx += priv->hw_q[0].size;
5185 		tbl->resource = mlx5_ipool_create(&cfg);
5186 		if (!tbl->resource)
5187 			goto res_error;
5188 	}
5189 	if (port_started)
5190 		LIST_INSERT_HEAD(&priv->flow_hw_tbl, tbl, next);
5191 	else
5192 		LIST_INSERT_HEAD(&priv->flow_hw_tbl_ongo, tbl, next);
5193 	rte_rwlock_init(&tbl->matcher_replace_rwlk);
5194 	return tbl;
5195 res_error:
5196 	if (tbl->matcher_info[0].matcher)
5197 		(void)mlx5dr_matcher_destroy(tbl->matcher_info[0].matcher);
5198 at_error:
5199 	for (i = 0; i < nb_action_templates; i++) {
5200 		__flow_hw_action_template_destroy(dev, &tbl->ats[i].acts);
5201 		rte_atomic_fetch_sub_explicit(&action_templates[i]->refcnt,
5202 				   1, rte_memory_order_relaxed);
5203 	}
5204 	i = nb_item_templates;
5205 it_error:
5206 	while (i--)
5207 		rte_atomic_fetch_sub_explicit(&item_templates[i]->refcnt,
5208 				   1, rte_memory_order_relaxed);
5209 error:
5210 	err = rte_errno;
5211 	if (tbl) {
5212 		if (tbl->grp)
5213 			mlx5_hlist_unregister(priv->sh->groups,
5214 					      &tbl->grp->entry);
5215 		if (tbl->flow_aux)
5216 			mlx5_free(tbl->flow_aux);
5217 		if (tbl->flow)
5218 			mlx5_ipool_destroy(tbl->flow);
5219 		mlx5_free(tbl);
5220 	}
5221 	if (error != NULL) {
5222 		if (sub_error.type == RTE_FLOW_ERROR_TYPE_NONE)
5223 			rte_flow_error_set(error, err, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5224 					   "Failed to create template table");
5225 		else
5226 			rte_memcpy(error, &sub_error, sizeof(sub_error));
5227 	}
5228 	return NULL;
5229 }
5230 
5231 /**
5232  * Update flow template table.
5233  *
5234  * @param[in] dev
5235  *   Pointer to the rte_eth_dev structure.
5236  * @param[out] error
5237  *   Pointer to error structure.
5238  *
5239  * @return
5240  *    0 on success, negative value otherwise and rte_errno is set.
5241  */
5242 int
5243 flow_hw_table_update(struct rte_eth_dev *dev,
5244 		     struct rte_flow_error *error)
5245 {
5246 	struct mlx5_priv *priv = dev->data->dev_private;
5247 	struct rte_flow_template_table *tbl;
5248 
5249 	while ((tbl = LIST_FIRST(&priv->flow_hw_tbl_ongo)) != NULL) {
5250 		if (flow_hw_translate_all_actions_templates(dev, tbl, error))
5251 			return -1;
5252 		LIST_REMOVE(tbl, next);
5253 		LIST_INSERT_HEAD(&priv->flow_hw_tbl, tbl, next);
5254 	}
5255 	return 0;
5256 }
5257 
5258 static inline int
5259 __translate_group(struct rte_eth_dev *dev,
5260 			const struct rte_flow_attr *flow_attr,
5261 			bool external,
5262 			uint32_t group,
5263 			uint32_t *table_group,
5264 			struct rte_flow_error *error)
5265 {
5266 	struct mlx5_priv *priv = dev->data->dev_private;
5267 	struct mlx5_sh_config *config = &priv->sh->config;
5268 
5269 	if (config->dv_esw_en &&
5270 	    priv->fdb_def_rule &&
5271 	    external &&
5272 	    flow_attr->transfer) {
5273 		if (group > MLX5_HW_MAX_TRANSFER_GROUP)
5274 			return rte_flow_error_set(error, EINVAL,
5275 						  RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
5276 						  NULL,
5277 						  "group index not supported");
5278 		*table_group = group + 1;
5279 	} else if (config->dv_esw_en &&
5280 		   (config->repr_matching || config->dv_xmeta_en == MLX5_XMETA_MODE_META32_HWS) &&
5281 		   external &&
5282 		   flow_attr->egress) {
5283 		/*
5284 		 * On E-Switch setups, default egress flow rules are inserted to allow
5285 		 * representor matching and/or preserving metadata across steering domains.
5286 		 * These flow rules are inserted in group 0 and this group is reserved by PMD
5287 		 * for these purposes.
5288 		 *
5289 		 * As a result, if representor matching or extended metadata mode is enabled,
5290 		 * group provided by the user must be incremented to avoid inserting flow rules
5291 		 * in group 0.
5292 		 */
5293 		if (group > MLX5_HW_MAX_EGRESS_GROUP)
5294 			return rte_flow_error_set(error, EINVAL,
5295 						  RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
5296 						  NULL,
5297 						  "group index not supported");
5298 		*table_group = group + 1;
5299 	} else {
5300 		*table_group = group;
5301 	}
5302 	return 0;
5303 }
5304 
5305 /**
5306  * Translates group index specified by the user in @p attr to internal
5307  * group index.
5308  *
5309  * Translation is done by incrementing group index, so group n becomes n + 1.
5310  *
5311  * @param[in] dev
5312  *   Pointer to Ethernet device.
5313  * @param[in] cfg
5314  *   Pointer to the template table configuration.
5315  * @param[in] group
5316  *   Currently used group index (table group or jump destination).
5317  * @param[out] table_group
5318  *   Pointer to output group index.
5319  * @param[out] error
5320  *   Pointer to error structure.
5321  *
5322  * @return
5323  *   0 on success. Otherwise, returns negative error code, rte_errno is set
5324  *   and error structure is filled.
5325  */
5326 static int
5327 flow_hw_translate_group(struct rte_eth_dev *dev,
5328 			const struct mlx5_flow_template_table_cfg *cfg,
5329 			uint32_t group,
5330 			uint32_t *table_group,
5331 			struct rte_flow_error *error)
5332 {
5333 	const struct rte_flow_attr *flow_attr = &cfg->attr.flow_attr;
5334 
5335 	return __translate_group(dev, flow_attr, cfg->external, group, table_group, error);
5336 }
5337 
5338 /**
5339  * Create flow table.
5340  *
5341  * This function is a wrapper over @ref flow_hw_table_create(), which translates parameters
5342  * provided by user to proper internal values.
5343  *
5344  * @param[in] dev
5345  *   Pointer to Ethernet device.
5346  * @param[in] attr
5347  *   Pointer to the table attributes.
5348  * @param[in] item_templates
5349  *   Item template array to be binded to the table.
5350  * @param[in] nb_item_templates
5351  *   Number of item templates.
5352  * @param[in] action_templates
5353  *   Action template array to be binded to the table.
5354  * @param[in] nb_action_templates
5355  *   Number of action templates.
5356  * @param[out] error
5357  *   Pointer to error structure.
5358  *
5359  * @return
5360  *   Table on success, Otherwise, returns negative error code, rte_errno is set
5361  *   and error structure is filled.
5362  */
5363 static struct rte_flow_template_table *
5364 flow_hw_template_table_create(struct rte_eth_dev *dev,
5365 			      const struct rte_flow_template_table_attr *attr,
5366 			      struct rte_flow_pattern_template *item_templates[],
5367 			      uint8_t nb_item_templates,
5368 			      struct rte_flow_actions_template *action_templates[],
5369 			      uint8_t nb_action_templates,
5370 			      struct rte_flow_error *error)
5371 {
5372 	struct mlx5_flow_template_table_cfg cfg = {
5373 		.attr = *attr,
5374 		.external = true,
5375 	};
5376 	uint32_t group = attr->flow_attr.group;
5377 
5378 	if (flow_hw_translate_group(dev, &cfg, group, &cfg.attr.flow_attr.group, error))
5379 		return NULL;
5380 	if (!cfg.attr.flow_attr.group &&
5381 	    rte_flow_template_table_resizable(dev->data->port_id, attr)) {
5382 		rte_flow_error_set(error, EINVAL,
5383 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5384 				   "table cannot be resized: invalid group");
5385 		return NULL;
5386 	}
5387 	return flow_hw_table_create(dev, &cfg, item_templates, nb_item_templates,
5388 				    action_templates, nb_action_templates, error);
5389 }
5390 
5391 static void
5392 mlx5_destroy_multi_pattern_segment(struct mlx5_multi_pattern_segment *segment)
5393 {
5394 	int i;
5395 
5396 	if (segment->mhdr_action)
5397 		mlx5dr_action_destroy(segment->mhdr_action);
5398 	for (i = 0; i < MLX5_MULTIPATTERN_ENCAP_NUM; i++) {
5399 		if (segment->reformat_action[i])
5400 			mlx5dr_action_destroy(segment->reformat_action[i]);
5401 	}
5402 	segment->capacity = 0;
5403 }
5404 
5405 static void
5406 flow_hw_destroy_table_multi_pattern_ctx(struct rte_flow_template_table *table)
5407 {
5408 	int sx;
5409 
5410 	for (sx = 0; sx < MLX5_MAX_TABLE_RESIZE_NUM; sx++)
5411 		mlx5_destroy_multi_pattern_segment(table->mpctx.segments + sx);
5412 }
5413 /**
5414  * Destroy flow table.
5415  *
5416  * @param[in] dev
5417  *   Pointer to the rte_eth_dev structure.
5418  * @param[in] table
5419  *   Pointer to the table to be destroyed.
5420  * @param[out] error
5421  *   Pointer to error structure.
5422  *
5423  * @return
5424  *   0 on success, a negative errno value otherwise and rte_errno is set.
5425  */
5426 static int
5427 flow_hw_table_destroy(struct rte_eth_dev *dev,
5428 		      struct rte_flow_template_table *table,
5429 		      struct rte_flow_error *error)
5430 {
5431 	struct mlx5_priv *priv = dev->data->dev_private;
5432 	int i;
5433 	uint32_t fidx = 1;
5434 	uint32_t ridx = 1;
5435 
5436 	/* Build ipool allocated object bitmap. */
5437 	if (table->resource)
5438 		mlx5_ipool_flush_cache(table->resource);
5439 	mlx5_ipool_flush_cache(table->flow);
5440 	/* Check if ipool has allocated objects. */
5441 	if (table->refcnt ||
5442 	    mlx5_ipool_get_next(table->flow, &fidx) ||
5443 	    (table->resource && mlx5_ipool_get_next(table->resource, &ridx))) {
5444 		DRV_LOG(WARNING, "Table %p is still in use.", (void *)table);
5445 		return rte_flow_error_set(error, EBUSY,
5446 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5447 				   NULL,
5448 				   "table is in use");
5449 	}
5450 	LIST_REMOVE(table, next);
5451 	for (i = 0; i < table->nb_item_templates; i++)
5452 		rte_atomic_fetch_sub_explicit(&table->its[i]->refcnt,
5453 				   1, rte_memory_order_relaxed);
5454 	for (i = 0; i < table->nb_action_templates; i++) {
5455 		__flow_hw_action_template_destroy(dev, &table->ats[i].acts);
5456 		rte_atomic_fetch_sub_explicit(&table->ats[i].action_template->refcnt,
5457 				   1, rte_memory_order_relaxed);
5458 	}
5459 	flow_hw_destroy_table_multi_pattern_ctx(table);
5460 	if (table->matcher_info[0].matcher)
5461 		mlx5dr_matcher_destroy(table->matcher_info[0].matcher);
5462 	if (table->matcher_info[1].matcher)
5463 		mlx5dr_matcher_destroy(table->matcher_info[1].matcher);
5464 	mlx5_hlist_unregister(priv->sh->groups, &table->grp->entry);
5465 	if (table->resource)
5466 		mlx5_ipool_destroy(table->resource);
5467 	mlx5_free(table->flow_aux);
5468 	mlx5_ipool_destroy(table->flow);
5469 	mlx5_free(table);
5470 	return 0;
5471 }
5472 
5473 /**
5474  * Parse group's miss actions.
5475  *
5476  * @param[in] dev
5477  *   Pointer to the rte_eth_dev structure.
5478  * @param[in] cfg
5479  *   Pointer to the table_cfg structure.
5480  * @param[in] actions
5481  *   Array of actions to perform on group miss. Supported types:
5482  *   RTE_FLOW_ACTION_TYPE_JUMP, RTE_FLOW_ACTION_TYPE_VOID, RTE_FLOW_ACTION_TYPE_END.
5483  * @param[out] dst_group_id
5484  *   Pointer to destination group id output. will be set to 0 if actions is END,
5485  *   otherwise will be set to destination group id.
5486  * @param[out] error
5487  *   Pointer to error structure.
5488  *
5489  * @return
5490  *   0 on success, a negative errno value otherwise and rte_errno is set.
5491  */
5492 
5493 static int
5494 flow_hw_group_parse_miss_actions(struct rte_eth_dev *dev,
5495 				 struct mlx5_flow_template_table_cfg *cfg,
5496 				 const struct rte_flow_action actions[],
5497 				 uint32_t *dst_group_id,
5498 				 struct rte_flow_error *error)
5499 {
5500 	const struct rte_flow_action_jump *jump_conf;
5501 	uint32_t temp = 0;
5502 	uint32_t i;
5503 
5504 	for (i = 0; actions[i].type != RTE_FLOW_ACTION_TYPE_END; i++) {
5505 		switch (actions[i].type) {
5506 		case RTE_FLOW_ACTION_TYPE_VOID:
5507 			continue;
5508 		case RTE_FLOW_ACTION_TYPE_JUMP:
5509 			if (temp)
5510 				return rte_flow_error_set(error, ENOTSUP,
5511 							  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, actions,
5512 							  "Miss actions can contain only a single JUMP");
5513 
5514 			jump_conf = (const struct rte_flow_action_jump *)actions[i].conf;
5515 			if (!jump_conf)
5516 				return rte_flow_error_set(error, EINVAL,
5517 							  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5518 							  jump_conf, "Jump conf must not be NULL");
5519 
5520 			if (flow_hw_translate_group(dev, cfg, jump_conf->group, &temp, error))
5521 				return -rte_errno;
5522 
5523 			if (!temp)
5524 				return rte_flow_error_set(error, EINVAL,
5525 							  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5526 							  "Failed to set group miss actions - Invalid target group");
5527 			break;
5528 		default:
5529 			return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
5530 						  &actions[i], "Unsupported default miss action type");
5531 		}
5532 	}
5533 
5534 	*dst_group_id = temp;
5535 	return 0;
5536 }
5537 
5538 /**
5539  * Set group's miss group.
5540  *
5541  * @param[in] dev
5542  *   Pointer to the rte_eth_dev structure.
5543  * @param[in] cfg
5544  *   Pointer to the table_cfg structure.
5545  * @param[in] src_grp
5546  *   Pointer to source group structure.
5547  *   if NULL, a new group will be created based on group id from cfg->attr.flow_attr.group.
5548  * @param[in] dst_grp
5549  *   Pointer to destination group structure.
5550  * @param[out] error
5551  *   Pointer to error structure.
5552  *
5553  * @return
5554  *   0 on success, a negative errno value otherwise and rte_errno is set.
5555  */
5556 
5557 static int
5558 flow_hw_group_set_miss_group(struct rte_eth_dev *dev,
5559 			     struct mlx5_flow_template_table_cfg *cfg,
5560 			     struct mlx5_flow_group *src_grp,
5561 			     struct mlx5_flow_group *dst_grp,
5562 			     struct rte_flow_error *error)
5563 {
5564 	struct rte_flow_error sub_error = {
5565 		.type = RTE_FLOW_ERROR_TYPE_NONE,
5566 		.cause = NULL,
5567 		.message = NULL,
5568 	};
5569 	struct mlx5_flow_cb_ctx ctx = {
5570 		.dev = dev,
5571 		.error = &sub_error,
5572 		.data = &cfg->attr.flow_attr,
5573 	};
5574 	struct mlx5_priv *priv = dev->data->dev_private;
5575 	struct mlx5_list_entry *ge;
5576 	bool ref = false;
5577 	int ret;
5578 
5579 	if (!dst_grp)
5580 		return -EINVAL;
5581 
5582 	/* If group doesn't exist - needs to be created. */
5583 	if (!src_grp) {
5584 		ge = mlx5_hlist_register(priv->sh->groups, cfg->attr.flow_attr.group, &ctx);
5585 		if (!ge)
5586 			return -rte_errno;
5587 
5588 		src_grp = container_of(ge, struct mlx5_flow_group, entry);
5589 		LIST_INSERT_HEAD(&priv->flow_hw_grp, src_grp, next);
5590 		ref = true;
5591 	} else if (!src_grp->miss_group) {
5592 		/* If group exists, but has no miss actions - need to increase ref_cnt. */
5593 		LIST_INSERT_HEAD(&priv->flow_hw_grp, src_grp, next);
5594 		src_grp->entry.ref_cnt++;
5595 		ref = true;
5596 	}
5597 
5598 	ret = mlx5dr_table_set_default_miss(src_grp->tbl, dst_grp->tbl);
5599 	if (ret)
5600 		goto mlx5dr_error;
5601 
5602 	/* If group existed and had old miss actions - ref_cnt is already correct.
5603 	 * However, need to reduce ref counter for old miss group.
5604 	 */
5605 	if (src_grp->miss_group)
5606 		mlx5_hlist_unregister(priv->sh->groups, &src_grp->miss_group->entry);
5607 
5608 	src_grp->miss_group = dst_grp;
5609 	return 0;
5610 
5611 mlx5dr_error:
5612 	/* Reduce src_grp ref_cnt back & remove from grp list in case of mlx5dr error */
5613 	if (ref) {
5614 		mlx5_hlist_unregister(priv->sh->groups, &src_grp->entry);
5615 		LIST_REMOVE(src_grp, next);
5616 	}
5617 
5618 	return rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5619 				  "Failed to set group miss actions");
5620 }
5621 
5622 /**
5623  * Unset group's miss group.
5624  *
5625  * @param[in] dev
5626  *   Pointer to the rte_eth_dev structure.
5627  * @param[in] grp
5628  *   Pointer to group structure.
5629  * @param[out] error
5630  *   Pointer to error structure.
5631  *
5632  * @return
5633  *   0 on success, a negative errno value otherwise and rte_errno is set.
5634  */
5635 
5636 static int
5637 flow_hw_group_unset_miss_group(struct rte_eth_dev *dev,
5638 			       struct mlx5_flow_group *grp,
5639 			       struct rte_flow_error *error)
5640 {
5641 	struct mlx5_priv *priv = dev->data->dev_private;
5642 	int ret;
5643 
5644 	/* If group doesn't exist - no need to change anything. */
5645 	if (!grp)
5646 		return 0;
5647 
5648 	/* If group exists, but miss actions is already default behavior -
5649 	 * no need to change anything.
5650 	 */
5651 	if (!grp->miss_group)
5652 		return 0;
5653 
5654 	ret = mlx5dr_table_set_default_miss(grp->tbl, NULL);
5655 	if (ret)
5656 		return rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5657 					  "Failed to unset group miss actions");
5658 
5659 	mlx5_hlist_unregister(priv->sh->groups, &grp->miss_group->entry);
5660 	grp->miss_group = NULL;
5661 
5662 	LIST_REMOVE(grp, next);
5663 	mlx5_hlist_unregister(priv->sh->groups, &grp->entry);
5664 
5665 	return 0;
5666 }
5667 
5668 /**
5669  * Set group miss actions.
5670  *
5671  * @param[in] dev
5672  *   Pointer to the rte_eth_dev structure.
5673  * @param[in] group_id
5674  *   Group id.
5675  * @param[in] attr
5676  *   Pointer to group attributes structure.
5677  * @param[in] actions
5678  *   Array of actions to perform on group miss. Supported types:
5679  *   RTE_FLOW_ACTION_TYPE_JUMP, RTE_FLOW_ACTION_TYPE_VOID, RTE_FLOW_ACTION_TYPE_END.
5680  * @param[out] error
5681  *   Pointer to error structure.
5682  *
5683  * @return
5684  *   0 on success, a negative errno value otherwise and rte_errno is set.
5685  */
5686 
5687 static int
5688 flow_hw_group_set_miss_actions(struct rte_eth_dev *dev,
5689 			       uint32_t group_id,
5690 			       const struct rte_flow_group_attr *attr,
5691 			       const struct rte_flow_action actions[],
5692 			       struct rte_flow_error *error)
5693 {
5694 	struct rte_flow_error sub_error = {
5695 		.type = RTE_FLOW_ERROR_TYPE_NONE,
5696 		.cause = NULL,
5697 		.message = NULL,
5698 	};
5699 	struct mlx5_flow_template_table_cfg cfg = {
5700 		.external = true,
5701 		.attr = {
5702 			.flow_attr = {
5703 				.group = group_id,
5704 				.ingress = attr->ingress,
5705 				.egress = attr->egress,
5706 				.transfer = attr->transfer,
5707 			},
5708 		},
5709 	};
5710 	struct mlx5_flow_cb_ctx ctx = {
5711 		.dev = dev,
5712 		.error = &sub_error,
5713 		.data = &cfg.attr.flow_attr,
5714 	};
5715 	struct mlx5_priv *priv = dev->data->dev_private;
5716 	struct mlx5_flow_group *src_grp = NULL;
5717 	struct mlx5_flow_group *dst_grp = NULL;
5718 	struct mlx5_list_entry *ge;
5719 	uint32_t dst_group_id = 0;
5720 	int ret;
5721 
5722 	if (flow_hw_translate_group(dev, &cfg, group_id, &group_id, error))
5723 		return -rte_errno;
5724 
5725 	if (!group_id)
5726 		return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5727 					  NULL, "Failed to set group miss actions - invalid group id");
5728 
5729 	ret = flow_hw_group_parse_miss_actions(dev, &cfg, actions, &dst_group_id, error);
5730 	if (ret)
5731 		return -rte_errno;
5732 
5733 	if (dst_group_id == group_id) {
5734 		return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5735 					  NULL, "Failed to set group miss actions - target group id must differ from group_id");
5736 	}
5737 
5738 	cfg.attr.flow_attr.group = group_id;
5739 	ge = mlx5_hlist_lookup(priv->sh->groups, group_id, &ctx);
5740 	if (ge)
5741 		src_grp = container_of(ge, struct mlx5_flow_group, entry);
5742 
5743 	if (dst_group_id) {
5744 		/* Increase ref_cnt for new miss group. */
5745 		cfg.attr.flow_attr.group = dst_group_id;
5746 		ge = mlx5_hlist_register(priv->sh->groups, dst_group_id, &ctx);
5747 		if (!ge)
5748 			return -rte_errno;
5749 
5750 		dst_grp = container_of(ge, struct mlx5_flow_group, entry);
5751 
5752 		cfg.attr.flow_attr.group = group_id;
5753 		ret = flow_hw_group_set_miss_group(dev, &cfg, src_grp, dst_grp, error);
5754 		if (ret)
5755 			goto error;
5756 	} else {
5757 		return flow_hw_group_unset_miss_group(dev, src_grp, error);
5758 	}
5759 
5760 	return 0;
5761 
5762 error:
5763 	if (dst_grp)
5764 		mlx5_hlist_unregister(priv->sh->groups, &dst_grp->entry);
5765 	return -rte_errno;
5766 }
5767 
5768 static bool
5769 flow_hw_modify_field_is_used(const struct rte_flow_action_modify_field *action,
5770 			     enum rte_flow_field_id field)
5771 {
5772 	return action->src.field == field || action->dst.field == field;
5773 }
5774 
5775 static bool
5776 flow_hw_modify_field_is_geneve_opt(enum rte_flow_field_id field)
5777 {
5778 	return field == RTE_FLOW_FIELD_GENEVE_OPT_TYPE ||
5779 	       field == RTE_FLOW_FIELD_GENEVE_OPT_CLASS ||
5780 	       field == RTE_FLOW_FIELD_GENEVE_OPT_DATA;
5781 }
5782 
5783 static bool
5784 flow_hw_modify_field_is_add_dst_valid(const struct rte_flow_action_modify_field *conf)
5785 {
5786 	if (conf->operation != RTE_FLOW_MODIFY_ADD)
5787 		return true;
5788 	if (conf->src.field == RTE_FLOW_FIELD_POINTER ||
5789 	    conf->src.field == RTE_FLOW_FIELD_VALUE)
5790 		return true;
5791 	switch (conf->dst.field) {
5792 	case RTE_FLOW_FIELD_IPV4_TTL:
5793 	case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
5794 	case RTE_FLOW_FIELD_TCP_SEQ_NUM:
5795 	case RTE_FLOW_FIELD_TCP_ACK_NUM:
5796 	case RTE_FLOW_FIELD_TAG:
5797 	case RTE_FLOW_FIELD_META:
5798 	case RTE_FLOW_FIELD_FLEX_ITEM:
5799 	case RTE_FLOW_FIELD_TCP_DATA_OFFSET:
5800 	case RTE_FLOW_FIELD_IPV4_IHL:
5801 	case RTE_FLOW_FIELD_IPV4_TOTAL_LEN:
5802 	case RTE_FLOW_FIELD_IPV6_PAYLOAD_LEN:
5803 		return true;
5804 	default:
5805 		break;
5806 	}
5807 	return false;
5808 }
5809 
5810 /**
5811  * Validate the level value for modify field action.
5812  *
5813  * @param[in] data
5814  *   Pointer to the rte_flow_field_data structure either src or dst.
5815  * @param[in] inner_supported
5816  *   Indicator whether inner should be supported.
5817  * @param[out] error
5818  *   Pointer to error structure.
5819  *
5820  * @return
5821  *   0 on success, a negative errno value otherwise and rte_errno is set.
5822  */
5823 static int
5824 flow_hw_validate_modify_field_level(const struct rte_flow_field_data *data,
5825 				    bool inner_supported,
5826 				    struct rte_flow_error *error)
5827 {
5828 	switch ((int)data->field) {
5829 	case RTE_FLOW_FIELD_START:
5830 	case RTE_FLOW_FIELD_VLAN_TYPE:
5831 	case RTE_FLOW_FIELD_RANDOM:
5832 	case RTE_FLOW_FIELD_FLEX_ITEM:
5833 		/*
5834 		 * Level shouldn't be valid since field isn't supported or
5835 		 * doesn't use 'level'.
5836 		 */
5837 		break;
5838 	case RTE_FLOW_FIELD_MARK:
5839 	case RTE_FLOW_FIELD_META:
5840 	case RTE_FLOW_FIELD_METER_COLOR:
5841 	case RTE_FLOW_FIELD_HASH_RESULT:
5842 		/* For meta data fields encapsulation level is don't-care. */
5843 		break;
5844 	case RTE_FLOW_FIELD_TAG:
5845 	case MLX5_RTE_FLOW_FIELD_META_REG:
5846 		/*
5847 		 * The tag array for RTE_FLOW_FIELD_TAG type is provided using
5848 		 * 'tag_index' field. In old API, it was provided using 'level'
5849 		 * field and it is still supported for backwards compatibility.
5850 		 * Therefore, for meta tag field only, level is matter. It is
5851 		 * taken as tag index when 'tag_index' field isn't set, and
5852 		 * return error otherwise.
5853 		 */
5854 		if (data->level > 0) {
5855 			if (data->tag_index > 0)
5856 				return rte_flow_error_set(error, EINVAL,
5857 							  RTE_FLOW_ERROR_TYPE_ACTION,
5858 							  data,
5859 							  "tag array can be provided using 'level' or 'tag_index' fields, not both");
5860 			DRV_LOG(WARNING,
5861 				"tag array provided in 'level' field instead of 'tag_index' field.");
5862 		}
5863 		break;
5864 	case RTE_FLOW_FIELD_MAC_DST:
5865 	case RTE_FLOW_FIELD_MAC_SRC:
5866 	case RTE_FLOW_FIELD_MAC_TYPE:
5867 	case RTE_FLOW_FIELD_IPV4_IHL:
5868 	case RTE_FLOW_FIELD_IPV4_TOTAL_LEN:
5869 	case RTE_FLOW_FIELD_IPV4_DSCP:
5870 	case RTE_FLOW_FIELD_IPV4_ECN:
5871 	case RTE_FLOW_FIELD_IPV4_TTL:
5872 	case RTE_FLOW_FIELD_IPV4_SRC:
5873 	case RTE_FLOW_FIELD_IPV4_DST:
5874 	case RTE_FLOW_FIELD_IPV6_TRAFFIC_CLASS:
5875 	case RTE_FLOW_FIELD_IPV6_FLOW_LABEL:
5876 	case RTE_FLOW_FIELD_IPV6_PAYLOAD_LEN:
5877 	case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
5878 	case RTE_FLOW_FIELD_IPV6_SRC:
5879 	case RTE_FLOW_FIELD_IPV6_DST:
5880 	case RTE_FLOW_FIELD_TCP_PORT_SRC:
5881 	case RTE_FLOW_FIELD_TCP_PORT_DST:
5882 	case RTE_FLOW_FIELD_TCP_FLAGS:
5883 	case RTE_FLOW_FIELD_TCP_DATA_OFFSET:
5884 	case RTE_FLOW_FIELD_UDP_PORT_SRC:
5885 	case RTE_FLOW_FIELD_UDP_PORT_DST:
5886 		if (data->level > 2)
5887 			return rte_flow_error_set(error, ENOTSUP,
5888 						  RTE_FLOW_ERROR_TYPE_ACTION,
5889 						  data,
5890 						  "second inner header fields modification is not supported");
5891 		if (inner_supported)
5892 			break;
5893 		/* Fallthrough */
5894 	case RTE_FLOW_FIELD_VLAN_ID:
5895 	case RTE_FLOW_FIELD_IPV4_PROTO:
5896 	case RTE_FLOW_FIELD_IPV6_PROTO:
5897 	case RTE_FLOW_FIELD_IPV6_DSCP:
5898 	case RTE_FLOW_FIELD_IPV6_ECN:
5899 	case RTE_FLOW_FIELD_TCP_SEQ_NUM:
5900 	case RTE_FLOW_FIELD_TCP_ACK_NUM:
5901 	case RTE_FLOW_FIELD_ESP_PROTO:
5902 	case RTE_FLOW_FIELD_ESP_SPI:
5903 	case RTE_FLOW_FIELD_ESP_SEQ_NUM:
5904 	case RTE_FLOW_FIELD_VXLAN_VNI:
5905 	case RTE_FLOW_FIELD_VXLAN_LAST_RSVD:
5906 	case RTE_FLOW_FIELD_GENEVE_VNI:
5907 	case RTE_FLOW_FIELD_GENEVE_OPT_TYPE:
5908 	case RTE_FLOW_FIELD_GENEVE_OPT_CLASS:
5909 	case RTE_FLOW_FIELD_GENEVE_OPT_DATA:
5910 	case RTE_FLOW_FIELD_GTP_TEID:
5911 	case RTE_FLOW_FIELD_GTP_PSC_QFI:
5912 		if (data->level > 1)
5913 			return rte_flow_error_set(error, ENOTSUP,
5914 						  RTE_FLOW_ERROR_TYPE_ACTION,
5915 						  data,
5916 						  "inner header fields modification is not supported");
5917 		break;
5918 	case RTE_FLOW_FIELD_MPLS:
5919 		if (data->level == 1)
5920 			return rte_flow_error_set(error, ENOTSUP,
5921 						  RTE_FLOW_ERROR_TYPE_ACTION,
5922 						  data,
5923 						  "outer MPLS header modification is not supported");
5924 		if (data->level > 2)
5925 			return rte_flow_error_set(error, ENOTSUP,
5926 						  RTE_FLOW_ERROR_TYPE_ACTION,
5927 						  data,
5928 						  "inner MPLS header modification is not supported");
5929 		break;
5930 	case RTE_FLOW_FIELD_POINTER:
5931 	case RTE_FLOW_FIELD_VALUE:
5932 	default:
5933 		MLX5_ASSERT(false);
5934 	}
5935 	return 0;
5936 }
5937 
5938 static int
5939 flow_hw_validate_action_modify_field(struct rte_eth_dev *dev,
5940 				     const struct rte_flow_action *action,
5941 				     const struct rte_flow_action *mask,
5942 				     struct rte_flow_error *error)
5943 {
5944 	const struct rte_flow_action_modify_field *action_conf = action->conf;
5945 	const struct rte_flow_action_modify_field *mask_conf = mask->conf;
5946 	struct mlx5_priv *priv = dev->data->dev_private;
5947 	struct mlx5_hca_attr *attr = &priv->sh->cdev->config.hca_attr;
5948 	int ret;
5949 
5950 	if (!mask_conf)
5951 		return rte_flow_error_set(error, EINVAL,
5952 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
5953 					  "modify_field mask conf is missing");
5954 	if (action_conf->operation != mask_conf->operation)
5955 		return rte_flow_error_set(error, EINVAL,
5956 				RTE_FLOW_ERROR_TYPE_ACTION, action,
5957 				"modify_field operation mask and template are not equal");
5958 	if (action_conf->dst.field != mask_conf->dst.field)
5959 		return rte_flow_error_set(error, EINVAL,
5960 				RTE_FLOW_ERROR_TYPE_ACTION, action,
5961 				"destination field mask and template are not equal");
5962 	if (action_conf->dst.field == RTE_FLOW_FIELD_POINTER ||
5963 	    action_conf->dst.field == RTE_FLOW_FIELD_VALUE ||
5964 	    action_conf->dst.field == RTE_FLOW_FIELD_HASH_RESULT)
5965 		return rte_flow_error_set(error, EINVAL,
5966 				RTE_FLOW_ERROR_TYPE_ACTION, action,
5967 				"immediate value, pointer and hash result cannot be used as destination");
5968 	ret = flow_hw_validate_modify_field_level(&action_conf->dst, false, error);
5969 	if (ret)
5970 		return ret;
5971 	if (action_conf->dst.field != RTE_FLOW_FIELD_FLEX_ITEM &&
5972 	    !flow_hw_modify_field_is_geneve_opt(action_conf->dst.field)) {
5973 		if (action_conf->dst.tag_index &&
5974 		    !flow_modify_field_support_tag_array(action_conf->dst.field))
5975 			return rte_flow_error_set(error, EINVAL,
5976 					RTE_FLOW_ERROR_TYPE_ACTION, action,
5977 					"destination tag index is not supported");
5978 		if (action_conf->dst.class_id)
5979 			return rte_flow_error_set(error, EINVAL,
5980 					RTE_FLOW_ERROR_TYPE_ACTION, action,
5981 					"destination class id is not supported");
5982 	}
5983 	if (mask_conf->dst.level != UINT8_MAX)
5984 		return rte_flow_error_set(error, EINVAL,
5985 			RTE_FLOW_ERROR_TYPE_ACTION, action,
5986 			"destination encapsulation level must be fully masked");
5987 	if (mask_conf->dst.offset != UINT32_MAX)
5988 		return rte_flow_error_set(error, EINVAL,
5989 			RTE_FLOW_ERROR_TYPE_ACTION, action,
5990 			"destination offset level must be fully masked");
5991 	if (action_conf->src.field != mask_conf->src.field)
5992 		return rte_flow_error_set(error, EINVAL,
5993 				RTE_FLOW_ERROR_TYPE_ACTION, action,
5994 				"destination field mask and template are not equal");
5995 	if (action_conf->src.field != RTE_FLOW_FIELD_POINTER &&
5996 	    action_conf->src.field != RTE_FLOW_FIELD_VALUE) {
5997 		if (action_conf->src.field != RTE_FLOW_FIELD_FLEX_ITEM &&
5998 		    !flow_hw_modify_field_is_geneve_opt(action_conf->src.field)) {
5999 			if (action_conf->src.tag_index &&
6000 			    !flow_modify_field_support_tag_array(action_conf->src.field))
6001 				return rte_flow_error_set(error, EINVAL,
6002 					RTE_FLOW_ERROR_TYPE_ACTION, action,
6003 					"source tag index is not supported");
6004 			if (action_conf->src.class_id)
6005 				return rte_flow_error_set(error, EINVAL,
6006 					RTE_FLOW_ERROR_TYPE_ACTION, action,
6007 					"source class id is not supported");
6008 		}
6009 		if (mask_conf->src.level != UINT8_MAX)
6010 			return rte_flow_error_set(error, EINVAL,
6011 				RTE_FLOW_ERROR_TYPE_ACTION, action,
6012 				"source encapsulation level must be fully masked");
6013 		if (mask_conf->src.offset != UINT32_MAX)
6014 			return rte_flow_error_set(error, EINVAL,
6015 				RTE_FLOW_ERROR_TYPE_ACTION, action,
6016 				"source offset level must be fully masked");
6017 		ret = flow_hw_validate_modify_field_level(&action_conf->src, true, error);
6018 		if (ret)
6019 			return ret;
6020 	}
6021 	if ((action_conf->dst.field == RTE_FLOW_FIELD_TAG &&
6022 	     action_conf->dst.tag_index >= MLX5_FLOW_HW_TAGS_MAX &&
6023 	     action_conf->dst.tag_index != RTE_PMD_MLX5_LINEAR_HASH_TAG_INDEX) ||
6024 	    (action_conf->src.field == RTE_FLOW_FIELD_TAG &&
6025 	     action_conf->src.tag_index >= MLX5_FLOW_HW_TAGS_MAX &&
6026 	     action_conf->src.tag_index != RTE_PMD_MLX5_LINEAR_HASH_TAG_INDEX))
6027 		return rte_flow_error_set(error, EINVAL,
6028 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
6029 				 "tag index is out of range");
6030 	if ((action_conf->dst.field == RTE_FLOW_FIELD_TAG &&
6031 	     flow_hw_get_reg_id(dev, RTE_FLOW_ITEM_TYPE_TAG, action_conf->dst.tag_index) == REG_NON) ||
6032 	    (action_conf->src.field == RTE_FLOW_FIELD_TAG &&
6033 	     flow_hw_get_reg_id(dev, RTE_FLOW_ITEM_TYPE_TAG, action_conf->src.tag_index) == REG_NON))
6034 		return rte_flow_error_set(error, EINVAL,
6035 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
6036 					  "tag index is out of range");
6037 	if (mask_conf->width != UINT32_MAX)
6038 		return rte_flow_error_set(error, EINVAL,
6039 				RTE_FLOW_ERROR_TYPE_ACTION, action,
6040 				"modify_field width field must be fully masked");
6041 	if (flow_hw_modify_field_is_used(action_conf, RTE_FLOW_FIELD_START))
6042 		return rte_flow_error_set(error, EINVAL,
6043 				RTE_FLOW_ERROR_TYPE_ACTION, action,
6044 				"modifying arbitrary place in a packet is not supported");
6045 	if (flow_hw_modify_field_is_used(action_conf, RTE_FLOW_FIELD_VLAN_TYPE))
6046 		return rte_flow_error_set(error, EINVAL,
6047 				RTE_FLOW_ERROR_TYPE_ACTION, action,
6048 				"modifying vlan_type is not supported");
6049 	if (flow_hw_modify_field_is_used(action_conf, RTE_FLOW_FIELD_RANDOM))
6050 		return rte_flow_error_set(error, EINVAL,
6051 				RTE_FLOW_ERROR_TYPE_ACTION, action,
6052 				"modifying random value is not supported");
6053 	/**
6054 	 * Geneve VNI modification is supported only when Geneve header is
6055 	 * parsed natively. When GENEVE options are supported, they both Geneve
6056 	 * and options headers are parsed as a flex parser.
6057 	 */
6058 	if (flow_hw_modify_field_is_used(action_conf, RTE_FLOW_FIELD_GENEVE_VNI) &&
6059 	    attr->geneve_tlv_opt)
6060 		return rte_flow_error_set(error, EINVAL,
6061 				RTE_FLOW_ERROR_TYPE_ACTION, action,
6062 				"modifying Geneve VNI is not supported when GENEVE opt is supported");
6063 	if (priv->tlv_options == NULL &&
6064 	    (flow_hw_modify_field_is_used(action_conf, RTE_FLOW_FIELD_GENEVE_OPT_TYPE) ||
6065 	     flow_hw_modify_field_is_used(action_conf, RTE_FLOW_FIELD_GENEVE_OPT_CLASS) ||
6066 	     flow_hw_modify_field_is_used(action_conf, RTE_FLOW_FIELD_GENEVE_OPT_DATA)))
6067 		return rte_flow_error_set(error, EINVAL,
6068 				RTE_FLOW_ERROR_TYPE_ACTION, action,
6069 				"modifying Geneve TLV option is supported only after parser configuration");
6070 	/* Due to HW bug, tunnel MPLS header is read only. */
6071 	if (action_conf->dst.field == RTE_FLOW_FIELD_MPLS)
6072 		return rte_flow_error_set(error, EINVAL,
6073 				RTE_FLOW_ERROR_TYPE_ACTION, action,
6074 				"MPLS cannot be used as destination");
6075 	/* ADD_FIELD is not supported for all the fields. */
6076 	if (!flow_hw_modify_field_is_add_dst_valid(action_conf))
6077 		return rte_flow_error_set(error, EINVAL,
6078 				RTE_FLOW_ERROR_TYPE_ACTION, action,
6079 				"invalid add_field destination");
6080 	return 0;
6081 }
6082 
6083 static int
6084 flow_hw_validate_action_port_representor(struct rte_eth_dev *dev __rte_unused,
6085 					 const struct rte_flow_actions_template_attr *attr,
6086 					 const struct rte_flow_action *action,
6087 					 const struct rte_flow_action *mask,
6088 					 struct rte_flow_error *error)
6089 {
6090 	const struct rte_flow_action_ethdev *action_conf = NULL;
6091 	const struct rte_flow_action_ethdev *mask_conf = NULL;
6092 
6093 	/* If transfer is set, port has been validated as proxy port. */
6094 	if (!attr->transfer)
6095 		return rte_flow_error_set(error, EINVAL,
6096 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6097 					  "cannot use port_representor actions"
6098 					  " without an E-Switch");
6099 	if (!action || !mask)
6100 		return rte_flow_error_set(error, EINVAL,
6101 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6102 					  "actiona and mask configuration must be set");
6103 	action_conf = action->conf;
6104 	mask_conf = mask->conf;
6105 	if (!mask_conf || mask_conf->port_id != MLX5_REPRESENTED_PORT_ESW_MGR ||
6106 	    !action_conf || action_conf->port_id != MLX5_REPRESENTED_PORT_ESW_MGR)
6107 		return rte_flow_error_set(error, EINVAL,
6108 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6109 					  "only eswitch manager port 0xffff is"
6110 					  " supported");
6111 	return 0;
6112 }
6113 
6114 static int
6115 flow_hw_validate_target_port_id(struct rte_eth_dev *dev,
6116 				uint16_t target_port_id)
6117 {
6118 	struct mlx5_priv *port_priv;
6119 	struct mlx5_priv *dev_priv;
6120 
6121 	if (target_port_id == MLX5_REPRESENTED_PORT_ESW_MGR)
6122 		return 0;
6123 
6124 	port_priv = mlx5_port_to_eswitch_info(target_port_id, false);
6125 	if (!port_priv) {
6126 		rte_errno = EINVAL;
6127 		DRV_LOG(ERR, "Port %u Failed to obtain E-Switch info for port %u",
6128 			dev->data->port_id, target_port_id);
6129 		return -rte_errno;
6130 	}
6131 
6132 	dev_priv = mlx5_dev_to_eswitch_info(dev);
6133 	if (!dev_priv) {
6134 		rte_errno = EINVAL;
6135 		DRV_LOG(ERR, "Port %u Failed to obtain E-Switch info for transfer proxy",
6136 			dev->data->port_id);
6137 		return -rte_errno;
6138 	}
6139 
6140 	if (port_priv->domain_id != dev_priv->domain_id) {
6141 		rte_errno = EINVAL;
6142 		DRV_LOG(ERR, "Port %u Failed to obtain E-Switch info for transfer proxy",
6143 			dev->data->port_id);
6144 		return -rte_errno;
6145 	}
6146 
6147 	return 0;
6148 }
6149 
6150 static int
6151 flow_hw_validate_action_represented_port(struct rte_eth_dev *dev,
6152 					 const struct rte_flow_action *action,
6153 					 const struct rte_flow_action *mask,
6154 					 struct rte_flow_error *error)
6155 {
6156 	const struct rte_flow_action_ethdev *action_conf = action->conf;
6157 	const struct rte_flow_action_ethdev *mask_conf = mask->conf;
6158 	struct mlx5_priv *priv = dev->data->dev_private;
6159 
6160 	if (!priv->sh->config.dv_esw_en)
6161 		return rte_flow_error_set(error, EINVAL,
6162 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6163 					  "cannot use represented_port actions"
6164 					  " without an E-Switch");
6165 	if (mask_conf && mask_conf->port_id) {
6166 		if (!action_conf)
6167 			return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
6168 						  action, "port index was not provided");
6169 
6170 		if (flow_hw_validate_target_port_id(dev, action_conf->port_id))
6171 			return rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
6172 						  action, "port index is invalid");
6173 	}
6174 	return 0;
6175 }
6176 
6177 /**
6178  * Validate AGE action.
6179  *
6180  * @param[in] dev
6181  *   Pointer to rte_eth_dev structure.
6182  * @param[in] action
6183  *   Pointer to the indirect action.
6184  * @param[in] action_flags
6185  *   Holds the actions detected until now.
6186  * @param[in] fixed_cnt
6187  *   Indicator if this list has a fixed COUNT action.
6188  * @param[out] error
6189  *   Pointer to error structure.
6190  *
6191  * @return
6192  *   0 on success, a negative errno value otherwise and rte_errno is set.
6193  */
6194 static int
6195 flow_hw_validate_action_age(struct rte_eth_dev *dev,
6196 			    const struct rte_flow_action *action,
6197 			    uint64_t action_flags, bool fixed_cnt,
6198 			    struct rte_flow_error *error)
6199 {
6200 	struct mlx5_priv *priv = dev->data->dev_private;
6201 	struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);
6202 
6203 	if (!priv->sh->cdev->config.devx)
6204 		return rte_flow_error_set(error, ENOTSUP,
6205 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6206 					  NULL, "AGE action not supported");
6207 	if (age_info->ages_ipool == NULL)
6208 		return rte_flow_error_set(error, EINVAL,
6209 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6210 					  "aging pool not initialized");
6211 	if ((action_flags & MLX5_FLOW_ACTION_AGE) ||
6212 	    (action_flags & MLX5_FLOW_ACTION_INDIRECT_AGE))
6213 		return rte_flow_error_set(error, EINVAL,
6214 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
6215 					  "duplicate AGE actions set");
6216 	if (fixed_cnt)
6217 		return rte_flow_error_set(error, EINVAL,
6218 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
6219 					  "AGE and fixed COUNT combination is not supported");
6220 	return 0;
6221 }
6222 
6223 /**
6224  * Validate count action.
6225  *
6226  * @param[in] dev
6227  *   Pointer to rte_eth_dev structure.
6228  * @param[in] action
6229  *   Pointer to the indirect action.
6230  * @param[in] mask
6231  *   Pointer to the indirect action mask.
6232  * @param[in] action_flags
6233  *   Holds the actions detected until now.
6234  * @param[out] error
6235  *   Pointer to error structure.
6236  *
6237  * @return
6238  *   0 on success, a negative errno value otherwise and rte_errno is set.
6239  */
6240 static int
6241 flow_hw_validate_action_count(struct rte_eth_dev *dev,
6242 			      const struct rte_flow_action *action,
6243 			      const struct rte_flow_action *mask,
6244 			      uint64_t action_flags,
6245 			      struct rte_flow_error *error)
6246 {
6247 	struct mlx5_priv *priv = dev->data->dev_private;
6248 	const struct rte_flow_action_count *count = mask->conf;
6249 
6250 	if (!priv->sh->cdev->config.devx)
6251 		return rte_flow_error_set(error, ENOTSUP,
6252 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
6253 					  "count action not supported");
6254 	if (!priv->hws_cpool)
6255 		return rte_flow_error_set(error, EINVAL,
6256 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
6257 					  "counters pool not initialized");
6258 	if ((action_flags & MLX5_FLOW_ACTION_COUNT) ||
6259 	    (action_flags & MLX5_FLOW_ACTION_INDIRECT_COUNT))
6260 		return rte_flow_error_set(error, EINVAL,
6261 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
6262 					  "duplicate count actions set");
6263 	if (count && count->id && (action_flags & MLX5_FLOW_ACTION_AGE))
6264 		return rte_flow_error_set(error, EINVAL,
6265 					  RTE_FLOW_ERROR_TYPE_ACTION, mask,
6266 					  "AGE and COUNT action shared by mask combination is not supported");
6267 	return 0;
6268 }
6269 
6270 /**
6271  * Validate meter_mark action.
6272  *
6273  * @param[in] dev
6274  *   Pointer to rte_eth_dev structure.
6275  * @param[in] action
6276  *   Pointer to the indirect action.
6277  * @param[in] indirect
6278  *   If true, then provided action was passed using an indirect action.
6279  * @param[out] error
6280  *   Pointer to error structure.
6281  *
6282  * @return
6283  *   0 on success, a negative errno value otherwise and rte_errno is set.
6284  */
6285 static int
6286 flow_hw_validate_action_meter_mark(struct rte_eth_dev *dev,
6287 			      const struct rte_flow_action *action,
6288 			      bool indirect,
6289 			      struct rte_flow_error *error)
6290 {
6291 	struct mlx5_priv *priv = dev->data->dev_private;
6292 
6293 	RTE_SET_USED(action);
6294 
6295 	if (!priv->sh->cdev->config.devx)
6296 		return rte_flow_error_set(error, ENOTSUP,
6297 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
6298 					  "meter_mark action not supported");
6299 	if (!indirect && priv->shared_host)
6300 		return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, action,
6301 					  "meter_mark action can only be used on host port");
6302 	if (!priv->hws_mpool)
6303 		return rte_flow_error_set(error, EINVAL,
6304 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
6305 					  "meter_mark pool not initialized");
6306 	return 0;
6307 }
6308 
6309 /**
6310  * Validate indirect action.
6311  *
6312  * @param[in] dev
6313  *   Pointer to rte_eth_dev structure.
6314  * @param[in] action
6315  *   Pointer to the indirect action.
6316  * @param[in] mask
6317  *   Pointer to the indirect action mask.
6318  * @param[in, out] action_flags
6319  *   Holds the actions detected until now.
6320  * @param[in, out] fixed_cnt
6321  *   Pointer to indicator if this list has a fixed COUNT action.
6322  * @param[out] error
6323  *   Pointer to error structure.
6324  *
6325  * @return
6326  *   0 on success, a negative errno value otherwise and rte_errno is set.
6327  */
6328 static int
6329 flow_hw_validate_action_indirect(struct rte_eth_dev *dev,
6330 				 const struct rte_flow_action *action,
6331 				 const struct rte_flow_action *mask,
6332 				 uint64_t *action_flags, bool *fixed_cnt,
6333 				 struct rte_flow_error *error)
6334 {
6335 	uint32_t type;
6336 	int ret;
6337 
6338 	if (!mask)
6339 		return rte_flow_error_set(error, EINVAL,
6340 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6341 					  "Unable to determine indirect action type without a mask specified");
6342 	type = mask->type;
6343 	switch (type) {
6344 	case RTE_FLOW_ACTION_TYPE_METER_MARK:
6345 		ret = flow_hw_validate_action_meter_mark(dev, mask, true, error);
6346 		if (ret < 0)
6347 			return ret;
6348 		*action_flags |= MLX5_FLOW_ACTION_METER;
6349 		break;
6350 	case RTE_FLOW_ACTION_TYPE_RSS:
6351 		/* TODO: Validation logic (same as flow_hw_actions_validate) */
6352 		*action_flags |= MLX5_FLOW_ACTION_RSS;
6353 		break;
6354 	case RTE_FLOW_ACTION_TYPE_CONNTRACK:
6355 		/* TODO: Validation logic (same as flow_hw_actions_validate) */
6356 		*action_flags |= MLX5_FLOW_ACTION_CT;
6357 		break;
6358 	case RTE_FLOW_ACTION_TYPE_COUNT:
6359 		if (action->conf && mask->conf) {
6360 			if ((*action_flags & MLX5_FLOW_ACTION_AGE) ||
6361 			    (*action_flags & MLX5_FLOW_ACTION_INDIRECT_AGE))
6362 				/*
6363 				 * AGE cannot use indirect counter which is
6364 				 * shared with enother flow rules.
6365 				 */
6366 				return rte_flow_error_set(error, EINVAL,
6367 						  RTE_FLOW_ERROR_TYPE_ACTION,
6368 						  NULL,
6369 						  "AGE and fixed COUNT combination is not supported");
6370 			*fixed_cnt = true;
6371 		}
6372 		ret = flow_hw_validate_action_count(dev, action, mask,
6373 						    *action_flags, error);
6374 		if (ret < 0)
6375 			return ret;
6376 		*action_flags |= MLX5_FLOW_ACTION_INDIRECT_COUNT;
6377 		break;
6378 	case RTE_FLOW_ACTION_TYPE_AGE:
6379 		ret = flow_hw_validate_action_age(dev, action, *action_flags,
6380 						  *fixed_cnt, error);
6381 		if (ret < 0)
6382 			return ret;
6383 		*action_flags |= MLX5_FLOW_ACTION_INDIRECT_AGE;
6384 		break;
6385 	case RTE_FLOW_ACTION_TYPE_QUOTA:
6386 		/* TODO: add proper quota verification */
6387 		*action_flags |= MLX5_FLOW_ACTION_QUOTA;
6388 		break;
6389 	default:
6390 		DRV_LOG(WARNING, "Unsupported shared action type: %d", type);
6391 		return rte_flow_error_set(error, ENOTSUP,
6392 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, mask,
6393 					  "Unsupported indirect action type");
6394 	}
6395 	return 0;
6396 }
6397 
6398 /**
6399  * Validate ipv6_ext_push action.
6400  *
6401  * @param[in] dev
6402  *   Pointer to rte_eth_dev structure.
6403  * @param[in] action
6404  *   Pointer to the indirect action.
6405  * @param[out] error
6406  *   Pointer to error structure.
6407  *
6408  * @return
6409  *   0 on success, a negative errno value otherwise and rte_errno is set.
6410  */
6411 static int
6412 flow_hw_validate_action_ipv6_ext_push(struct rte_eth_dev *dev __rte_unused,
6413 				      const struct rte_flow_action *action,
6414 				      struct rte_flow_error *error)
6415 {
6416 	const struct rte_flow_action_ipv6_ext_push *raw_push_data = action->conf;
6417 
6418 	if (!raw_push_data || !raw_push_data->size || !raw_push_data->data)
6419 		return rte_flow_error_set(error, EINVAL,
6420 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
6421 					  "invalid ipv6_ext_push data");
6422 	if (raw_push_data->type != IPPROTO_ROUTING ||
6423 	    raw_push_data->size > MLX5_PUSH_MAX_LEN)
6424 		return rte_flow_error_set(error, EINVAL,
6425 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
6426 					  "Unsupported ipv6_ext_push type or length");
6427 	return 0;
6428 }
6429 
6430 /**
6431  * Process `... / raw_decap / raw_encap / ...` actions sequence.
6432  * The PMD handles the sequence as a single encap or decap reformat action,
6433  * depending on the raw_encap configuration.
6434  *
6435  * The function assumes that the raw_decap / raw_encap location
6436  * in actions template list complies with relative HWS actions order:
6437  * for the required reformat configuration:
6438  * ENCAP configuration must appear before [JUMP|DROP|PORT]
6439  * DECAP configuration must appear at the template head.
6440  */
6441 static uint64_t
6442 mlx5_decap_encap_reformat_type(const struct rte_flow_action *actions,
6443 			       uint32_t encap_ind, uint64_t flags)
6444 {
6445 	const struct rte_flow_action_raw_encap *encap = actions[encap_ind].conf;
6446 
6447 	if ((flags & MLX5_FLOW_ACTION_DECAP) == 0)
6448 		return MLX5_FLOW_ACTION_ENCAP;
6449 	if (actions[encap_ind - 1].type != RTE_FLOW_ACTION_TYPE_RAW_DECAP)
6450 		return MLX5_FLOW_ACTION_ENCAP;
6451 	return encap->size >= MLX5_ENCAPSULATION_DECISION_SIZE ?
6452 	       MLX5_FLOW_ACTION_ENCAP : MLX5_FLOW_ACTION_DECAP;
6453 }
6454 
6455 enum mlx5_hw_indirect_list_relative_position {
6456 	MLX5_INDIRECT_LIST_POSITION_UNKNOWN = -1,
6457 	MLX5_INDIRECT_LIST_POSITION_BEFORE_MH = 0,
6458 	MLX5_INDIRECT_LIST_POSITION_AFTER_MH,
6459 };
6460 
6461 static enum mlx5_hw_indirect_list_relative_position
6462 mlx5_hw_indirect_list_mh_position(const struct rte_flow_action *action)
6463 {
6464 	const struct rte_flow_action_indirect_list *conf = action->conf;
6465 	enum mlx5_indirect_list_type list_type = mlx5_get_indirect_list_type(conf->handle);
6466 	enum mlx5_hw_indirect_list_relative_position pos = MLX5_INDIRECT_LIST_POSITION_UNKNOWN;
6467 	const union {
6468 		struct mlx5_indlst_legacy *legacy;
6469 		struct mlx5_hw_encap_decap_action *reformat;
6470 		struct rte_flow_action_list_handle *handle;
6471 	} h = { .handle = conf->handle};
6472 
6473 	switch (list_type) {
6474 	case  MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY:
6475 		switch (h.legacy->legacy_type) {
6476 		case RTE_FLOW_ACTION_TYPE_AGE:
6477 		case RTE_FLOW_ACTION_TYPE_COUNT:
6478 		case RTE_FLOW_ACTION_TYPE_CONNTRACK:
6479 		case RTE_FLOW_ACTION_TYPE_METER_MARK:
6480 		case RTE_FLOW_ACTION_TYPE_QUOTA:
6481 			pos = MLX5_INDIRECT_LIST_POSITION_BEFORE_MH;
6482 			break;
6483 		case RTE_FLOW_ACTION_TYPE_RSS:
6484 			pos = MLX5_INDIRECT_LIST_POSITION_AFTER_MH;
6485 			break;
6486 		default:
6487 			pos = MLX5_INDIRECT_LIST_POSITION_UNKNOWN;
6488 			break;
6489 		}
6490 		break;
6491 	case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
6492 		pos = MLX5_INDIRECT_LIST_POSITION_AFTER_MH;
6493 		break;
6494 	case MLX5_INDIRECT_ACTION_LIST_TYPE_REFORMAT:
6495 		switch (h.reformat->action_type) {
6496 		case MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2:
6497 		case MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
6498 			pos = MLX5_INDIRECT_LIST_POSITION_BEFORE_MH;
6499 			break;
6500 		case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2:
6501 		case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3:
6502 			pos = MLX5_INDIRECT_LIST_POSITION_AFTER_MH;
6503 			break;
6504 		default:
6505 			pos = MLX5_INDIRECT_LIST_POSITION_UNKNOWN;
6506 			break;
6507 		}
6508 		break;
6509 	default:
6510 		pos = MLX5_INDIRECT_LIST_POSITION_UNKNOWN;
6511 		break;
6512 	}
6513 	return pos;
6514 }
6515 
6516 #define MLX5_HW_EXPAND_MH_FAILED 0xffff
6517 
6518 static inline uint16_t
6519 flow_hw_template_expand_modify_field(struct rte_flow_action actions[],
6520 				     struct rte_flow_action masks[],
6521 				     const struct rte_flow_action *mf_actions,
6522 				     const struct rte_flow_action *mf_masks,
6523 				     uint64_t flags, uint32_t act_num,
6524 				     uint32_t mf_num)
6525 {
6526 	uint32_t i, tail;
6527 
6528 	MLX5_ASSERT(actions && masks);
6529 	MLX5_ASSERT(mf_num > 0);
6530 	if (flags & MLX5_FLOW_ACTION_MODIFY_FIELD) {
6531 		/*
6532 		 * Application action template already has Modify Field.
6533 		 * It's location will be used in DR.
6534 		 * Expanded MF action can be added before the END.
6535 		 */
6536 		i = act_num - 1;
6537 		goto insert;
6538 	}
6539 	/**
6540 	 * Locate the first action positioned BEFORE the new MF.
6541 	 *
6542 	 * Search for a place to insert modify header
6543 	 * from the END action backwards:
6544 	 * 1. END is always present in actions array
6545 	 * 2. END location is always at action[act_num - 1]
6546 	 * 3. END always positioned AFTER modify field location
6547 	 *
6548 	 * Relative actions order is the same for RX, TX and FDB.
6549 	 *
6550 	 * Current actions order (draft-3)
6551 	 * @see action_order_arr[]
6552 	 */
6553 	for (i = act_num - 2; (int)i >= 0; i--) {
6554 		enum mlx5_hw_indirect_list_relative_position pos;
6555 		enum rte_flow_action_type type = actions[i].type;
6556 		uint64_t reformat_type;
6557 
6558 		if (type == RTE_FLOW_ACTION_TYPE_INDIRECT)
6559 			type = masks[i].type;
6560 		switch (type) {
6561 		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
6562 		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
6563 		case RTE_FLOW_ACTION_TYPE_DROP:
6564 		case RTE_FLOW_ACTION_TYPE_SEND_TO_KERNEL:
6565 		case RTE_FLOW_ACTION_TYPE_JUMP:
6566 		case RTE_FLOW_ACTION_TYPE_QUEUE:
6567 		case RTE_FLOW_ACTION_TYPE_RSS:
6568 		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
6569 		case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
6570 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
6571 		case RTE_FLOW_ACTION_TYPE_VOID:
6572 		case RTE_FLOW_ACTION_TYPE_END:
6573 			break;
6574 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
6575 			reformat_type =
6576 				mlx5_decap_encap_reformat_type(actions, i,
6577 							       flags);
6578 			if (reformat_type == MLX5_FLOW_ACTION_DECAP) {
6579 				i++;
6580 				goto insert;
6581 			}
6582 			if (actions[i - 1].type == RTE_FLOW_ACTION_TYPE_RAW_DECAP)
6583 				i--;
6584 			break;
6585 		case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
6586 			pos = mlx5_hw_indirect_list_mh_position(&actions[i]);
6587 			if (pos == MLX5_INDIRECT_LIST_POSITION_UNKNOWN)
6588 				return MLX5_HW_EXPAND_MH_FAILED;
6589 			if (pos == MLX5_INDIRECT_LIST_POSITION_BEFORE_MH)
6590 				goto insert;
6591 			break;
6592 		default:
6593 			i++; /* new MF inserted AFTER actions[i] */
6594 			goto insert;
6595 		}
6596 	}
6597 	i = 0;
6598 insert:
6599 	tail = act_num - i; /* num action to move */
6600 	memmove(actions + i + mf_num, actions + i, sizeof(actions[0]) * tail);
6601 	memcpy(actions + i, mf_actions, sizeof(actions[0]) * mf_num);
6602 	memmove(masks + i + mf_num, masks + i, sizeof(masks[0]) * tail);
6603 	memcpy(masks + i, mf_masks, sizeof(masks[0]) * mf_num);
6604 	return i;
6605 }
6606 
6607 static int
6608 flow_hw_validate_action_push_vlan(struct rte_eth_dev *dev,
6609 				  const
6610 				  struct rte_flow_actions_template_attr *attr,
6611 				  const struct rte_flow_action *action,
6612 				  const struct rte_flow_action *mask,
6613 				  struct rte_flow_error *error)
6614 {
6615 #define X_FIELD(ptr, t, f) (((ptr)->conf) && ((t *)((ptr)->conf))->f)
6616 
6617 	const bool masked_push =
6618 		X_FIELD(mask + MLX5_HW_VLAN_PUSH_TYPE_IDX,
6619 			const struct rte_flow_action_of_push_vlan, ethertype);
6620 	bool masked_param;
6621 
6622 	/*
6623 	 * Mandatory actions order:
6624 	 * OF_PUSH_VLAN / OF_SET_VLAN_VID [ / OF_SET_VLAN_PCP ]
6625 	 */
6626 	RTE_SET_USED(dev);
6627 	RTE_SET_USED(attr);
6628 	/* Check that mark matches OF_PUSH_VLAN */
6629 	if (mask[MLX5_HW_VLAN_PUSH_TYPE_IDX].type !=
6630 	    RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN)
6631 		return rte_flow_error_set(error, EINVAL,
6632 					  RTE_FLOW_ERROR_TYPE_ACTION,
6633 					  action, "OF_PUSH_VLAN: mask does not match");
6634 	/* Check that the second template and mask items are SET_VLAN_VID */
6635 	if (action[MLX5_HW_VLAN_PUSH_VID_IDX].type !=
6636 	    RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID ||
6637 	    mask[MLX5_HW_VLAN_PUSH_VID_IDX].type !=
6638 	    RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)
6639 		return rte_flow_error_set(error, EINVAL,
6640 					  RTE_FLOW_ERROR_TYPE_ACTION,
6641 					  action, "OF_PUSH_VLAN: invalid actions order");
6642 	masked_param = X_FIELD(mask + MLX5_HW_VLAN_PUSH_VID_IDX,
6643 			       const struct rte_flow_action_of_set_vlan_vid,
6644 			       vlan_vid);
6645 	/*
6646 	 * PMD requires OF_SET_VLAN_VID mask to must match OF_PUSH_VLAN
6647 	 */
6648 	if (masked_push ^ masked_param)
6649 		return rte_flow_error_set(error, EINVAL,
6650 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
6651 					  "OF_SET_VLAN_VID: mask does not match OF_PUSH_VLAN");
6652 	if (is_of_vlan_pcp_present(action)) {
6653 		if (mask[MLX5_HW_VLAN_PUSH_PCP_IDX].type !=
6654 		     RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP)
6655 			return rte_flow_error_set(error, EINVAL,
6656 						  RTE_FLOW_ERROR_TYPE_ACTION,
6657 						  action, "OF_SET_VLAN_PCP: missing mask configuration");
6658 		masked_param = X_FIELD(mask + MLX5_HW_VLAN_PUSH_PCP_IDX,
6659 				       const struct
6660 				       rte_flow_action_of_set_vlan_pcp,
6661 				       vlan_pcp);
6662 		/*
6663 		 * PMD requires OF_SET_VLAN_PCP mask to must match OF_PUSH_VLAN
6664 		 */
6665 		if (masked_push ^ masked_param)
6666 			return rte_flow_error_set(error, EINVAL,
6667 						  RTE_FLOW_ERROR_TYPE_ACTION, action,
6668 						  "OF_SET_VLAN_PCP: mask does not match OF_PUSH_VLAN");
6669 	}
6670 	return 0;
6671 #undef X_FIELD
6672 }
6673 
6674 static int
6675 flow_hw_validate_action_default_miss(struct rte_eth_dev *dev,
6676 				     const struct rte_flow_actions_template_attr *attr,
6677 				     uint64_t action_flags,
6678 				     struct rte_flow_error *error)
6679 {
6680 	/*
6681 	 * The private DEFAULT_MISS action is used internally for LACP in control
6682 	 * flows. So this validation can be ignored. It can be kept right now since
6683 	 * the validation will be done only once.
6684 	 */
6685 	struct mlx5_priv *priv = dev->data->dev_private;
6686 
6687 	if (!attr->ingress || attr->egress || attr->transfer)
6688 		return rte_flow_error_set(error, EINVAL,
6689 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6690 					  "DEFAULT MISS is only supported in ingress.");
6691 	if (!priv->hw_def_miss)
6692 		return rte_flow_error_set(error, EINVAL,
6693 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6694 					  "DEFAULT MISS action does not exist.");
6695 	if (action_flags & MLX5_FLOW_FATE_ACTIONS)
6696 		return rte_flow_error_set(error, EINVAL,
6697 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6698 					  "DEFAULT MISS should be the only termination.");
6699 	return 0;
6700 }
6701 
6702 static int
6703 flow_hw_validate_action_nat64(struct rte_eth_dev *dev,
6704 			      const struct rte_flow_actions_template_attr *attr,
6705 			      const struct rte_flow_action *action,
6706 			      const struct rte_flow_action *mask,
6707 			      uint64_t action_flags,
6708 			      struct rte_flow_error *error)
6709 {
6710 	struct mlx5_priv *priv = dev->data->dev_private;
6711 	const struct rte_flow_action_nat64 *nat64_c;
6712 	enum rte_flow_nat64_type cov_type;
6713 
6714 	RTE_SET_USED(action_flags);
6715 	if (mask->conf && ((const struct rte_flow_action_nat64 *)mask->conf)->type) {
6716 		nat64_c = (const struct rte_flow_action_nat64 *)action->conf;
6717 		cov_type = nat64_c->type;
6718 		if ((attr->ingress && !priv->action_nat64[MLX5DR_TABLE_TYPE_NIC_RX][cov_type]) ||
6719 		    (attr->egress && !priv->action_nat64[MLX5DR_TABLE_TYPE_NIC_TX][cov_type]) ||
6720 		    (attr->transfer && !priv->action_nat64[MLX5DR_TABLE_TYPE_FDB][cov_type]))
6721 			goto err_out;
6722 	} else {
6723 		/*
6724 		 * Usually, the actions will be used on both directions. For non-masked actions,
6725 		 * both directions' actions will be checked.
6726 		 */
6727 		if (attr->ingress)
6728 			if (!priv->action_nat64[MLX5DR_TABLE_TYPE_NIC_RX][RTE_FLOW_NAT64_6TO4] ||
6729 			    !priv->action_nat64[MLX5DR_TABLE_TYPE_NIC_RX][RTE_FLOW_NAT64_4TO6])
6730 				goto err_out;
6731 		if (attr->egress)
6732 			if (!priv->action_nat64[MLX5DR_TABLE_TYPE_NIC_TX][RTE_FLOW_NAT64_6TO4] ||
6733 			    !priv->action_nat64[MLX5DR_TABLE_TYPE_NIC_TX][RTE_FLOW_NAT64_4TO6])
6734 				goto err_out;
6735 		if (attr->transfer)
6736 			if (!priv->action_nat64[MLX5DR_TABLE_TYPE_FDB][RTE_FLOW_NAT64_6TO4] ||
6737 			    !priv->action_nat64[MLX5DR_TABLE_TYPE_FDB][RTE_FLOW_NAT64_4TO6])
6738 				goto err_out;
6739 	}
6740 	return 0;
6741 err_out:
6742 	return rte_flow_error_set(error, EOPNOTSUPP, RTE_FLOW_ERROR_TYPE_ACTION,
6743 				  NULL, "NAT64 action is not supported.");
6744 }
6745 
6746 static int
6747 flow_hw_validate_action_jump(struct rte_eth_dev *dev,
6748 			     const struct rte_flow_actions_template_attr *attr,
6749 			     const struct rte_flow_action *action,
6750 			     const struct rte_flow_action *mask,
6751 			     struct rte_flow_error *error)
6752 {
6753 	const struct rte_flow_action_jump *m = mask->conf;
6754 	const struct rte_flow_action_jump *v = action->conf;
6755 	struct mlx5_flow_template_table_cfg cfg = {
6756 		.external = true,
6757 		.attr = {
6758 			.flow_attr = {
6759 				.ingress = attr->ingress,
6760 				.egress = attr->egress,
6761 				.transfer = attr->transfer,
6762 			},
6763 		},
6764 	};
6765 	uint32_t t_group = 0;
6766 
6767 	if (!m || !m->group)
6768 		return 0;
6769 	if (!v)
6770 		return rte_flow_error_set(error, EINVAL,
6771 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
6772 					  "Invalid jump action configuration");
6773 	if (flow_hw_translate_group(dev, &cfg, v->group, &t_group, error))
6774 		return -rte_errno;
6775 	if (t_group == 0)
6776 		return rte_flow_error_set(error, EINVAL,
6777 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
6778 					  "Unsupported action - jump to root table");
6779 	return 0;
6780 }
6781 
6782 static int
6783 mlx5_hw_validate_action_mark(struct rte_eth_dev *dev,
6784 			     const struct rte_flow_action *template_action,
6785 			     const struct rte_flow_action *template_mask,
6786 			     uint64_t action_flags,
6787 			     const struct rte_flow_actions_template_attr *template_attr,
6788 			     struct rte_flow_error *error)
6789 {
6790 	const struct rte_flow_action_mark *mark_mask = template_mask->conf;
6791 	const struct rte_flow_action *action =
6792 		mark_mask && mark_mask->id ? template_action :
6793 		&(const struct rte_flow_action) {
6794 		.type = RTE_FLOW_ACTION_TYPE_MARK,
6795 		.conf = &(const struct rte_flow_action_mark) {
6796 			.id = MLX5_FLOW_MARK_MAX - 1
6797 		}
6798 	};
6799 	const struct rte_flow_attr attr = {
6800 		.ingress = template_attr->ingress,
6801 		.egress = template_attr->egress,
6802 		.transfer = template_attr->transfer
6803 	};
6804 
6805 	return mlx5_flow_validate_action_mark(dev, action, action_flags,
6806 					      &attr, error);
6807 }
6808 
6809 static int
6810 mlx5_hw_validate_action_queue(struct rte_eth_dev *dev,
6811 			      const struct rte_flow_action *template_action,
6812 			      const struct rte_flow_action *template_mask,
6813 			      const struct rte_flow_actions_template_attr *template_attr,
6814 			      uint64_t action_flags,
6815 			      struct rte_flow_error *error)
6816 {
6817 	const struct rte_flow_action_queue *queue_mask = template_mask->conf;
6818 	const struct rte_flow_attr attr = {
6819 		.ingress = template_attr->ingress,
6820 		.egress = template_attr->egress,
6821 		.transfer = template_attr->transfer
6822 	};
6823 	bool masked = queue_mask != NULL && queue_mask->index;
6824 
6825 	if (template_attr->egress || template_attr->transfer)
6826 		return rte_flow_error_set(error, EINVAL,
6827 					  RTE_FLOW_ERROR_TYPE_ATTR, NULL,
6828 					  "QUEUE action supported for ingress only");
6829 	if (masked)
6830 		return mlx5_flow_validate_action_queue(template_action, action_flags, dev,
6831 						       &attr, error);
6832 	else
6833 		return 0;
6834 }
6835 
6836 static int
6837 mlx5_hw_validate_action_rss(struct rte_eth_dev *dev,
6838 			      const struct rte_flow_action *template_action,
6839 			      const struct rte_flow_action *template_mask,
6840 			      const struct rte_flow_actions_template_attr *template_attr,
6841 			      __rte_unused uint64_t action_flags,
6842 			      struct rte_flow_error *error)
6843 {
6844 	const struct rte_flow_action_rss *mask = template_mask->conf;
6845 
6846 	if (template_attr->egress || template_attr->transfer)
6847 		return rte_flow_error_set(error, EINVAL,
6848 					  RTE_FLOW_ERROR_TYPE_ATTR, NULL,
6849 					  "RSS action supported for ingress only");
6850 	if (mask != NULL)
6851 		return mlx5_validate_action_rss(dev, template_action, error);
6852 	else
6853 		return 0;
6854 }
6855 
6856 static int
6857 mlx5_hw_validate_action_l2_encap(struct rte_eth_dev *dev,
6858 				 const struct rte_flow_action *template_action,
6859 				 const struct rte_flow_action *template_mask,
6860 				 const struct rte_flow_actions_template_attr *template_attr,
6861 				 uint64_t action_flags,
6862 				 struct rte_flow_error *error)
6863 {
6864 	const struct rte_flow_action_vxlan_encap default_action_conf = {
6865 		.definition = (struct rte_flow_item *)
6866 			(struct rte_flow_item [1]) {
6867 			[0] = { .type = RTE_FLOW_ITEM_TYPE_END }
6868 		}
6869 	};
6870 	const struct rte_flow_action *action = template_mask->conf ?
6871 		template_action : &(const struct rte_flow_action) {
6872 			.type = template_mask->type,
6873 			.conf = &default_action_conf
6874 	};
6875 	const struct rte_flow_attr attr = {
6876 		.ingress = template_attr->ingress,
6877 		.egress = template_attr->egress,
6878 		.transfer = template_attr->transfer
6879 	};
6880 
6881 	return mlx5_flow_dv_validate_action_l2_encap(dev, action_flags, action,
6882 						     &attr, error);
6883 }
6884 
6885 static int
6886 mlx5_hw_validate_action_l2_decap(struct rte_eth_dev *dev,
6887 				 const struct rte_flow_action *template_action,
6888 				 const struct rte_flow_action *template_mask,
6889 				 const struct rte_flow_actions_template_attr *template_attr,
6890 				 uint64_t action_flags,
6891 				 struct rte_flow_error *error)
6892 {
6893 	const struct rte_flow_action_vxlan_encap default_action_conf = {
6894 		.definition = (struct rte_flow_item *)
6895 			(struct rte_flow_item [1]) {
6896 				[0] = { .type = RTE_FLOW_ITEM_TYPE_END }
6897 			}
6898 	};
6899 	const struct rte_flow_action *action = template_mask->conf ?
6900 					       template_action : &(const struct rte_flow_action) {
6901 			.type = template_mask->type,
6902 			.conf = &default_action_conf
6903 		};
6904 	const struct rte_flow_attr attr = {
6905 		.ingress = template_attr->ingress,
6906 		.egress = template_attr->egress,
6907 		.transfer = template_attr->transfer
6908 	};
6909 	uint64_t item_flags =
6910 		action->type == RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
6911 		MLX5_FLOW_LAYER_VXLAN : 0;
6912 
6913 	return mlx5_flow_dv_validate_action_decap(dev, action_flags, action,
6914 						  item_flags, &attr, error);
6915 }
6916 
6917 static int
6918 mlx5_hw_validate_action_conntrack(struct rte_eth_dev *dev,
6919 				  const struct rte_flow_action *template_action,
6920 				  const struct rte_flow_action *template_mask,
6921 				  const struct rte_flow_actions_template_attr *template_attr,
6922 				  uint64_t action_flags,
6923 				  struct rte_flow_error *error)
6924 {
6925 	RTE_SET_USED(template_action);
6926 	RTE_SET_USED(template_mask);
6927 	RTE_SET_USED(template_attr);
6928 	return mlx5_flow_dv_validate_action_aso_ct(dev, action_flags,
6929 						   MLX5_FLOW_LAYER_OUTER_L4_TCP,
6930 						   false, error);
6931 }
6932 
6933 static int
6934 flow_hw_validate_action_raw_encap(const struct rte_flow_action *action,
6935 				  const struct rte_flow_action *mask,
6936 				  struct rte_flow_error *error)
6937 {
6938 	const struct rte_flow_action_raw_encap *mask_conf = mask->conf;
6939 	const struct rte_flow_action_raw_encap *action_conf = action->conf;
6940 
6941 	if (!mask_conf || !mask_conf->size)
6942 		return rte_flow_error_set(error, EINVAL,
6943 					  RTE_FLOW_ERROR_TYPE_ACTION, mask,
6944 					  "raw_encap: size must be masked");
6945 	if (!action_conf || !action_conf->size)
6946 		return rte_flow_error_set(error, EINVAL,
6947 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
6948 					  "raw_encap: invalid action configuration");
6949 	if (mask_conf->data && !action_conf->data)
6950 		return rte_flow_error_set(error, EINVAL,
6951 					  RTE_FLOW_ERROR_TYPE_ACTION,
6952 					  action, "raw_encap: masked data is missing");
6953 	return 0;
6954 }
6955 
6956 
6957 static int
6958 flow_hw_validate_action_raw_reformat(struct rte_eth_dev *dev,
6959 				     const struct rte_flow_action *template_action,
6960 				     const struct rte_flow_action *template_mask,
6961 				     const struct
6962 				     rte_flow_actions_template_attr *template_attr,
6963 				     uint64_t *action_flags,
6964 				     struct rte_flow_error *error)
6965 {
6966 	const struct rte_flow_action *encap_action = NULL;
6967 	const struct rte_flow_action *encap_mask = NULL;
6968 	const struct rte_flow_action_raw_decap *raw_decap = NULL;
6969 	const struct rte_flow_action_raw_encap *raw_encap = NULL;
6970 	const struct rte_flow_attr attr = {
6971 		.ingress = template_attr->ingress,
6972 		.egress = template_attr->egress,
6973 		.transfer = template_attr->transfer
6974 	};
6975 	uint64_t item_flags = 0;
6976 	int ret, actions_n = 0;
6977 
6978 	if (template_action->type == RTE_FLOW_ACTION_TYPE_RAW_DECAP) {
6979 		raw_decap = template_mask->conf ?
6980 			    template_action->conf : &empty_decap;
6981 		if ((template_action + 1)->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
6982 			if ((template_mask + 1)->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP)
6983 				return rte_flow_error_set(error, EINVAL,
6984 							  RTE_FLOW_ERROR_TYPE_ACTION,
6985 							  template_mask + 1, "invalid mask type");
6986 			encap_action = template_action + 1;
6987 			encap_mask = template_mask + 1;
6988 		}
6989 	} else {
6990 		encap_action = template_action;
6991 		encap_mask = template_mask;
6992 	}
6993 	if (encap_action) {
6994 		raw_encap = encap_action->conf;
6995 		ret = flow_hw_validate_action_raw_encap(encap_action,
6996 							encap_mask, error);
6997 		if (ret)
6998 			return ret;
6999 	}
7000 	return mlx5_flow_dv_validate_action_raw_encap_decap(dev, raw_decap,
7001 							    raw_encap, &attr,
7002 							    action_flags,
7003 							    &actions_n,
7004 							    template_action,
7005 							    item_flags, error);
7006 }
7007 
7008 
7009 
7010 static int
7011 mlx5_flow_hw_actions_validate(struct rte_eth_dev *dev,
7012 			      const struct rte_flow_actions_template_attr *attr,
7013 			      const struct rte_flow_action actions[],
7014 			      const struct rte_flow_action masks[],
7015 			      uint64_t *act_flags,
7016 			      struct rte_flow_error *error)
7017 {
7018 	struct mlx5_priv *priv = dev->data->dev_private;
7019 	const struct rte_flow_action_count *count_mask = NULL;
7020 	bool fixed_cnt = false;
7021 	uint64_t action_flags = 0;
7022 	bool actions_end = false;
7023 #ifdef HAVE_MLX5DV_DR_ACTION_CREATE_DEST_ROOT_TABLE
7024 	int table_type;
7025 #endif
7026 	uint16_t i;
7027 	int ret;
7028 	const struct rte_flow_action_ipv6_ext_remove *remove_data;
7029 
7030 	if (!mlx5_hw_ctx_validate(dev, error))
7031 		return -rte_errno;
7032 	/* FDB actions are only valid to proxy port. */
7033 	if (attr->transfer && (!priv->sh->config.dv_esw_en || !priv->master))
7034 		return rte_flow_error_set(error, EINVAL,
7035 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7036 					  NULL,
7037 					  "transfer actions are only valid to proxy port");
7038 	for (i = 0; !actions_end; ++i) {
7039 		const struct rte_flow_action *action = &actions[i];
7040 		const struct rte_flow_action *mask = &masks[i];
7041 
7042 		MLX5_ASSERT(i < MLX5_HW_MAX_ACTS);
7043 		if (action->type != RTE_FLOW_ACTION_TYPE_INDIRECT &&
7044 		    action->type != mask->type)
7045 			return rte_flow_error_set(error, ENOTSUP,
7046 						  RTE_FLOW_ERROR_TYPE_ACTION,
7047 						  action,
7048 						  "mask type does not match action type");
7049 		switch ((int)action->type) {
7050 		case RTE_FLOW_ACTION_TYPE_VOID:
7051 			break;
7052 		case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
7053 			break;
7054 		case RTE_FLOW_ACTION_TYPE_INDIRECT:
7055 			ret = flow_hw_validate_action_indirect(dev, action,
7056 							       mask,
7057 							       &action_flags,
7058 							       &fixed_cnt,
7059 							       error);
7060 			if (ret < 0)
7061 				return ret;
7062 			break;
7063 		case RTE_FLOW_ACTION_TYPE_FLAG:
7064 			/* TODO: Validation logic */
7065 			action_flags |= MLX5_FLOW_ACTION_FLAG;
7066 			break;
7067 		case RTE_FLOW_ACTION_TYPE_MARK:
7068 			ret = mlx5_hw_validate_action_mark(dev, action, mask,
7069 							   action_flags,
7070 							   attr, error);
7071 			if (ret)
7072 				return ret;
7073 			action_flags |= MLX5_FLOW_ACTION_MARK;
7074 			break;
7075 		case RTE_FLOW_ACTION_TYPE_DROP:
7076 			ret = mlx5_flow_validate_action_drop
7077 				(dev, action_flags,
7078 				 &(struct rte_flow_attr){.egress = attr->egress},
7079 				 error);
7080 			if (ret)
7081 				return ret;
7082 			action_flags |= MLX5_FLOW_ACTION_DROP;
7083 			break;
7084 		case RTE_FLOW_ACTION_TYPE_JUMP:
7085 			/* Only validate the jump to root table in template stage. */
7086 			ret = flow_hw_validate_action_jump(dev, attr, action, mask, error);
7087 			if (ret)
7088 				return ret;
7089 			action_flags |= MLX5_FLOW_ACTION_JUMP;
7090 			break;
7091 #ifdef HAVE_MLX5DV_DR_ACTION_CREATE_DEST_ROOT_TABLE
7092 		case RTE_FLOW_ACTION_TYPE_SEND_TO_KERNEL:
7093 			if (priv->shared_host)
7094 				return rte_flow_error_set(error, ENOTSUP,
7095 							  RTE_FLOW_ERROR_TYPE_ACTION,
7096 							  action,
7097 							  "action not supported in guest port");
7098 			table_type = attr->ingress ? MLX5DR_TABLE_TYPE_NIC_RX :
7099 				     ((attr->egress) ? MLX5DR_TABLE_TYPE_NIC_TX :
7100 				     MLX5DR_TABLE_TYPE_FDB);
7101 			if (!priv->hw_send_to_kernel[table_type])
7102 				return rte_flow_error_set(error, ENOTSUP,
7103 							  RTE_FLOW_ERROR_TYPE_ACTION,
7104 							  action,
7105 							  "action is not available");
7106 			action_flags |= MLX5_FLOW_ACTION_SEND_TO_KERNEL;
7107 			break;
7108 #endif
7109 		case RTE_FLOW_ACTION_TYPE_QUEUE:
7110 			ret = mlx5_hw_validate_action_queue(dev, action, mask,
7111 							    attr, action_flags,
7112 							    error);
7113 			if (ret)
7114 				return ret;
7115 			action_flags |= MLX5_FLOW_ACTION_QUEUE;
7116 			break;
7117 		case RTE_FLOW_ACTION_TYPE_RSS:
7118 			ret = mlx5_hw_validate_action_rss(dev, action, mask,
7119 							  attr, action_flags,
7120 							  error);
7121 			if (ret)
7122 				return ret;
7123 			action_flags |= MLX5_FLOW_ACTION_RSS;
7124 			break;
7125 		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
7126 		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
7127 			ret = mlx5_hw_validate_action_l2_encap(dev, action, mask,
7128 							       attr, action_flags,
7129 							       error);
7130 			if (ret)
7131 				return ret;
7132 			action_flags |= MLX5_FLOW_ACTION_ENCAP;
7133 			break;
7134 		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
7135 		case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
7136 			ret = mlx5_hw_validate_action_l2_decap(dev, action, mask,
7137 							       attr, action_flags,
7138 							       error);
7139 			if (ret)
7140 				return ret;
7141 			action_flags |= MLX5_FLOW_ACTION_DECAP;
7142 			break;
7143 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
7144 		case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
7145 			ret = flow_hw_validate_action_raw_reformat(dev, action,
7146 								   mask, attr,
7147 								   &action_flags,
7148 								   error);
7149 			if (ret)
7150 				return ret;
7151 			if (action->type == RTE_FLOW_ACTION_TYPE_RAW_DECAP &&
7152 			   (action + 1)->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
7153 				action_flags |= MLX5_FLOW_XCAP_ACTIONS;
7154 				i++;
7155 			}
7156 			break;
7157 		case RTE_FLOW_ACTION_TYPE_IPV6_EXT_PUSH:
7158 			ret = flow_hw_validate_action_ipv6_ext_push(dev, action, error);
7159 			if (ret < 0)
7160 				return ret;
7161 			action_flags |= MLX5_FLOW_ACTION_IPV6_ROUTING_PUSH;
7162 			break;
7163 		case RTE_FLOW_ACTION_TYPE_IPV6_EXT_REMOVE:
7164 			remove_data = action->conf;
7165 			/* Remove action must be shared. */
7166 			if (remove_data->type != IPPROTO_ROUTING || !mask) {
7167 				DRV_LOG(ERR, "Only supports shared IPv6 routing remove");
7168 				return -EINVAL;
7169 			}
7170 			action_flags |= MLX5_FLOW_ACTION_IPV6_ROUTING_REMOVE;
7171 			break;
7172 		case RTE_FLOW_ACTION_TYPE_METER:
7173 			/* TODO: Validation logic */
7174 			action_flags |= MLX5_FLOW_ACTION_METER;
7175 			break;
7176 		case RTE_FLOW_ACTION_TYPE_METER_MARK:
7177 			ret = flow_hw_validate_action_meter_mark(dev, action, false, error);
7178 			if (ret < 0)
7179 				return ret;
7180 			action_flags |= MLX5_FLOW_ACTION_METER;
7181 			break;
7182 		case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
7183 			ret = flow_hw_validate_action_modify_field(dev, action, mask,
7184 								   error);
7185 			if (ret < 0)
7186 				return ret;
7187 			action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
7188 			break;
7189 		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
7190 			ret = flow_hw_validate_action_represented_port
7191 					(dev, action, mask, error);
7192 			if (ret < 0)
7193 				return ret;
7194 			action_flags |= MLX5_FLOW_ACTION_PORT_ID;
7195 			break;
7196 		case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
7197 			ret = flow_hw_validate_action_port_representor
7198 					(dev, attr, action, mask, error);
7199 			if (ret < 0)
7200 				return ret;
7201 			action_flags |= MLX5_FLOW_ACTION_PORT_REPRESENTOR;
7202 			break;
7203 		case RTE_FLOW_ACTION_TYPE_AGE:
7204 			if (count_mask && count_mask->id)
7205 				fixed_cnt = true;
7206 			ret = flow_hw_validate_action_age(dev, action,
7207 							  action_flags,
7208 							  fixed_cnt, error);
7209 			if (ret < 0)
7210 				return ret;
7211 			action_flags |= MLX5_FLOW_ACTION_AGE;
7212 			break;
7213 		case RTE_FLOW_ACTION_TYPE_COUNT:
7214 			ret = flow_hw_validate_action_count(dev, action, mask,
7215 							    action_flags,
7216 							    error);
7217 			if (ret < 0)
7218 				return ret;
7219 			count_mask = mask->conf;
7220 			action_flags |= MLX5_FLOW_ACTION_COUNT;
7221 			break;
7222 		case RTE_FLOW_ACTION_TYPE_CONNTRACK:
7223 			ret = mlx5_hw_validate_action_conntrack(dev, action, mask,
7224 								attr, action_flags,
7225 								error);
7226 			if (ret)
7227 				return ret;
7228 			action_flags |= MLX5_FLOW_ACTION_CT;
7229 			break;
7230 		case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
7231 			action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
7232 			break;
7233 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
7234 			action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
7235 			break;
7236 		case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
7237 			ret = flow_hw_validate_action_push_vlan
7238 					(dev, attr, action, mask, error);
7239 			if (ret != 0)
7240 				return ret;
7241 			i += is_of_vlan_pcp_present(action) ?
7242 				MLX5_HW_VLAN_PUSH_PCP_IDX :
7243 				MLX5_HW_VLAN_PUSH_VID_IDX;
7244 			action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
7245 			break;
7246 		case RTE_FLOW_ACTION_TYPE_NAT64:
7247 			ret = flow_hw_validate_action_nat64(dev, attr, action, mask,
7248 							    action_flags, error);
7249 			if (ret != 0)
7250 				return ret;
7251 			action_flags |= MLX5_FLOW_ACTION_NAT64;
7252 			break;
7253 		case RTE_FLOW_ACTION_TYPE_END:
7254 			actions_end = true;
7255 			break;
7256 		case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
7257 			ret = flow_hw_validate_action_default_miss(dev, attr,
7258 								   action_flags, error);
7259 			if (ret < 0)
7260 				return ret;
7261 			action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
7262 			break;
7263 		default:
7264 			return rte_flow_error_set(error, ENOTSUP,
7265 						  RTE_FLOW_ERROR_TYPE_ACTION,
7266 						  action,
7267 						  "action not supported in template API");
7268 		}
7269 	}
7270 	if (act_flags != NULL)
7271 		*act_flags = action_flags;
7272 	return 0;
7273 }
7274 
7275 static int
7276 flow_hw_actions_validate(struct rte_eth_dev *dev,
7277 			 const struct rte_flow_actions_template_attr *attr,
7278 			 const struct rte_flow_action actions[],
7279 			 const struct rte_flow_action masks[],
7280 			 struct rte_flow_error *error)
7281 {
7282 	return mlx5_flow_hw_actions_validate(dev, attr, actions, masks, NULL, error);
7283 }
7284 
7285 
7286 static enum mlx5dr_action_type mlx5_hw_dr_action_types[] = {
7287 	[RTE_FLOW_ACTION_TYPE_MARK] = MLX5DR_ACTION_TYP_TAG,
7288 	[RTE_FLOW_ACTION_TYPE_FLAG] = MLX5DR_ACTION_TYP_TAG,
7289 	[RTE_FLOW_ACTION_TYPE_DROP] = MLX5DR_ACTION_TYP_DROP,
7290 	[RTE_FLOW_ACTION_TYPE_JUMP] = MLX5DR_ACTION_TYP_TBL,
7291 	[RTE_FLOW_ACTION_TYPE_QUEUE] = MLX5DR_ACTION_TYP_TIR,
7292 	[RTE_FLOW_ACTION_TYPE_RSS] = MLX5DR_ACTION_TYP_TIR,
7293 	[RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP] = MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2,
7294 	[RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP] = MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2,
7295 	[RTE_FLOW_ACTION_TYPE_VXLAN_DECAP] = MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2,
7296 	[RTE_FLOW_ACTION_TYPE_NVGRE_DECAP] = MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2,
7297 	[RTE_FLOW_ACTION_TYPE_MODIFY_FIELD] = MLX5DR_ACTION_TYP_MODIFY_HDR,
7298 	[RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT] = MLX5DR_ACTION_TYP_VPORT,
7299 	[RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR] = MLX5DR_ACTION_TYP_MISS,
7300 	[RTE_FLOW_ACTION_TYPE_CONNTRACK] = MLX5DR_ACTION_TYP_ASO_CT,
7301 	[RTE_FLOW_ACTION_TYPE_OF_POP_VLAN] = MLX5DR_ACTION_TYP_POP_VLAN,
7302 	[RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN] = MLX5DR_ACTION_TYP_PUSH_VLAN,
7303 	[RTE_FLOW_ACTION_TYPE_SEND_TO_KERNEL] = MLX5DR_ACTION_TYP_DEST_ROOT,
7304 	[RTE_FLOW_ACTION_TYPE_IPV6_EXT_PUSH] = MLX5DR_ACTION_TYP_PUSH_IPV6_ROUTE_EXT,
7305 	[RTE_FLOW_ACTION_TYPE_IPV6_EXT_REMOVE] = MLX5DR_ACTION_TYP_POP_IPV6_ROUTE_EXT,
7306 	[RTE_FLOW_ACTION_TYPE_NAT64] = MLX5DR_ACTION_TYP_NAT64,
7307 };
7308 
7309 static inline void
7310 action_template_set_type(struct rte_flow_actions_template *at,
7311 			 enum mlx5dr_action_type *action_types,
7312 			 unsigned int action_src, uint16_t *curr_off,
7313 			 enum mlx5dr_action_type type)
7314 {
7315 	at->dr_off[action_src] = *curr_off;
7316 	action_types[*curr_off] = type;
7317 	*curr_off = *curr_off + 1;
7318 }
7319 
7320 static int
7321 flow_hw_dr_actions_template_handle_shared(int type, uint32_t action_src,
7322 					  enum mlx5dr_action_type *action_types,
7323 					  uint16_t *curr_off, uint16_t *cnt_off,
7324 					  struct rte_flow_actions_template *at)
7325 {
7326 	switch (type) {
7327 	case RTE_FLOW_ACTION_TYPE_RSS:
7328 		action_template_set_type(at, action_types, action_src, curr_off,
7329 					 MLX5DR_ACTION_TYP_TIR);
7330 		break;
7331 	case RTE_FLOW_ACTION_TYPE_AGE:
7332 	case RTE_FLOW_ACTION_TYPE_COUNT:
7333 		/*
7334 		 * Both AGE and COUNT action need counter, the first one fills
7335 		 * the action_types array, and the second only saves the offset.
7336 		 */
7337 		if (*cnt_off == UINT16_MAX) {
7338 			*cnt_off = *curr_off;
7339 			action_template_set_type(at, action_types,
7340 						 action_src, curr_off,
7341 						 MLX5DR_ACTION_TYP_CTR);
7342 		}
7343 		at->dr_off[action_src] = *cnt_off;
7344 		break;
7345 	case RTE_FLOW_ACTION_TYPE_CONNTRACK:
7346 		action_template_set_type(at, action_types, action_src, curr_off,
7347 					 MLX5DR_ACTION_TYP_ASO_CT);
7348 		break;
7349 	case RTE_FLOW_ACTION_TYPE_QUOTA:
7350 	case RTE_FLOW_ACTION_TYPE_METER_MARK:
7351 		action_template_set_type(at, action_types, action_src, curr_off,
7352 					 MLX5DR_ACTION_TYP_ASO_METER);
7353 		break;
7354 	default:
7355 		DRV_LOG(WARNING, "Unsupported shared action type: %d", type);
7356 		return -EINVAL;
7357 	}
7358 	return 0;
7359 }
7360 
7361 
7362 static int
7363 flow_hw_template_actions_list(struct rte_flow_actions_template *at,
7364 			      unsigned int action_src,
7365 			      enum mlx5dr_action_type *action_types,
7366 			      uint16_t *curr_off, uint16_t *cnt_off)
7367 {
7368 	int ret;
7369 	const struct rte_flow_action_indirect_list *indlst_conf = at->actions[action_src].conf;
7370 	enum mlx5_indirect_list_type list_type = mlx5_get_indirect_list_type(indlst_conf->handle);
7371 	const union {
7372 		struct mlx5_indlst_legacy *legacy;
7373 		struct rte_flow_action_list_handle *handle;
7374 	} indlst_obj = { .handle = indlst_conf->handle };
7375 	enum mlx5dr_action_type type;
7376 
7377 	switch (list_type) {
7378 	case MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY:
7379 		ret = flow_hw_dr_actions_template_handle_shared
7380 			(indlst_obj.legacy->legacy_type, action_src,
7381 			 action_types, curr_off, cnt_off, at);
7382 		if (ret)
7383 			return ret;
7384 		break;
7385 	case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
7386 		action_template_set_type(at, action_types, action_src, curr_off,
7387 					 MLX5DR_ACTION_TYP_DEST_ARRAY);
7388 		break;
7389 	case MLX5_INDIRECT_ACTION_LIST_TYPE_REFORMAT:
7390 		type = ((struct mlx5_hw_encap_decap_action *)
7391 			(indlst_conf->handle))->action_type;
7392 		action_template_set_type(at, action_types, action_src, curr_off, type);
7393 		break;
7394 	default:
7395 		DRV_LOG(ERR, "Unsupported indirect list type");
7396 		return -EINVAL;
7397 	}
7398 	return 0;
7399 }
7400 
7401 /**
7402  * Create DR action template based on a provided sequence of flow actions.
7403  *
7404  * @param[in] dev
7405  *   Pointer to the rte_eth_dev structure.
7406  * @param[in] at
7407  *   Pointer to flow actions template to be updated.
7408  * @param[out] action_types
7409  *   Action types array to be filled.
7410  * @param[out] tmpl_flags
7411  *   Template DR flags to be filled.
7412  *
7413  * @return
7414  *   0 on success, a negative errno value otherwise and rte_errno is set.
7415  */
7416 static int
7417 flow_hw_parse_flow_actions_to_dr_actions(struct rte_eth_dev *dev,
7418 					struct rte_flow_actions_template *at,
7419 					enum mlx5dr_action_type action_types[MLX5_HW_MAX_ACTS],
7420 					uint32_t *tmpl_flags __rte_unused)
7421 {
7422 	unsigned int i;
7423 	uint16_t curr_off;
7424 	enum mlx5dr_action_type reformat_act_type = MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2;
7425 	uint16_t reformat_off = UINT16_MAX;
7426 	uint16_t mhdr_off = UINT16_MAX;
7427 	uint16_t recom_off = UINT16_MAX;
7428 	uint16_t cnt_off = UINT16_MAX;
7429 	enum mlx5dr_action_type recom_type = MLX5DR_ACTION_TYP_LAST;
7430 	int ret;
7431 
7432 	for (i = 0, curr_off = 0; at->actions[i].type != RTE_FLOW_ACTION_TYPE_END; ++i) {
7433 		const struct rte_flow_action_raw_encap *raw_encap_data;
7434 		size_t data_size;
7435 		enum mlx5dr_action_type type;
7436 
7437 		if (curr_off >= MLX5_HW_MAX_ACTS)
7438 			goto err_actions_num;
7439 		switch ((int)at->actions[i].type) {
7440 		case RTE_FLOW_ACTION_TYPE_VOID:
7441 			break;
7442 		case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
7443 			ret = flow_hw_template_actions_list(at, i, action_types,
7444 							    &curr_off, &cnt_off);
7445 			if (ret)
7446 				return ret;
7447 			break;
7448 		case RTE_FLOW_ACTION_TYPE_INDIRECT:
7449 			ret = flow_hw_dr_actions_template_handle_shared
7450 				(at->masks[i].type, i, action_types,
7451 				 &curr_off, &cnt_off, at);
7452 			if (ret)
7453 				return ret;
7454 			break;
7455 		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
7456 		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
7457 		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
7458 		case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
7459 			MLX5_ASSERT(reformat_off == UINT16_MAX);
7460 			reformat_off = curr_off++;
7461 			reformat_act_type = mlx5_hw_dr_action_types[at->actions[i].type];
7462 			break;
7463 		case RTE_FLOW_ACTION_TYPE_IPV6_EXT_PUSH:
7464 			MLX5_ASSERT(recom_off == UINT16_MAX);
7465 			recom_type = MLX5DR_ACTION_TYP_PUSH_IPV6_ROUTE_EXT;
7466 			recom_off = curr_off++;
7467 			break;
7468 		case RTE_FLOW_ACTION_TYPE_IPV6_EXT_REMOVE:
7469 			MLX5_ASSERT(recom_off == UINT16_MAX);
7470 			recom_type = MLX5DR_ACTION_TYP_POP_IPV6_ROUTE_EXT;
7471 			recom_off = curr_off++;
7472 			break;
7473 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
7474 			raw_encap_data = at->actions[i].conf;
7475 			data_size = raw_encap_data->size;
7476 			if (reformat_off != UINT16_MAX) {
7477 				reformat_act_type = data_size < MLX5_ENCAPSULATION_DECISION_SIZE ?
7478 					MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2 :
7479 					MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3;
7480 			} else {
7481 				reformat_off = curr_off++;
7482 				reformat_act_type = MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
7483 			}
7484 			break;
7485 		case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
7486 			reformat_off = curr_off++;
7487 			reformat_act_type = MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2;
7488 			break;
7489 		case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
7490 			if (mhdr_off == UINT16_MAX) {
7491 				mhdr_off = curr_off++;
7492 				type = mlx5_hw_dr_action_types[at->actions[i].type];
7493 				action_types[mhdr_off] = type;
7494 			}
7495 			break;
7496 		case RTE_FLOW_ACTION_TYPE_METER:
7497 			at->dr_off[i] = curr_off;
7498 			action_types[curr_off++] = MLX5DR_ACTION_TYP_ASO_METER;
7499 			if (curr_off >= MLX5_HW_MAX_ACTS)
7500 				goto err_actions_num;
7501 			action_types[curr_off++] = MLX5DR_ACTION_TYP_TBL;
7502 			break;
7503 		case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
7504 			type = mlx5_hw_dr_action_types[at->actions[i].type];
7505 			at->dr_off[i] = curr_off;
7506 			action_types[curr_off++] = type;
7507 			i += is_of_vlan_pcp_present(at->actions + i) ?
7508 				MLX5_HW_VLAN_PUSH_PCP_IDX :
7509 				MLX5_HW_VLAN_PUSH_VID_IDX;
7510 			break;
7511 		case RTE_FLOW_ACTION_TYPE_METER_MARK:
7512 			at->dr_off[i] = curr_off;
7513 			action_types[curr_off++] = MLX5DR_ACTION_TYP_ASO_METER;
7514 			if (curr_off >= MLX5_HW_MAX_ACTS)
7515 				goto err_actions_num;
7516 			break;
7517 		case RTE_FLOW_ACTION_TYPE_AGE:
7518 		case RTE_FLOW_ACTION_TYPE_COUNT:
7519 			/*
7520 			 * Both AGE and COUNT action need counter, the first
7521 			 * one fills the action_types array, and the second only
7522 			 * saves the offset.
7523 			 */
7524 			if (cnt_off == UINT16_MAX) {
7525 				cnt_off = curr_off++;
7526 				action_types[cnt_off] = MLX5DR_ACTION_TYP_CTR;
7527 			}
7528 			at->dr_off[i] = cnt_off;
7529 			break;
7530 		case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
7531 			at->dr_off[i] = curr_off;
7532 			action_types[curr_off++] = MLX5DR_ACTION_TYP_MISS;
7533 			break;
7534 		default:
7535 			type = mlx5_hw_dr_action_types[at->actions[i].type];
7536 			at->dr_off[i] = curr_off;
7537 			action_types[curr_off++] = type;
7538 			break;
7539 		}
7540 	}
7541 	if (curr_off >= MLX5_HW_MAX_ACTS)
7542 		goto err_actions_num;
7543 	if (mhdr_off != UINT16_MAX)
7544 		at->mhdr_off = mhdr_off;
7545 	if (reformat_off != UINT16_MAX) {
7546 		at->reformat_off = reformat_off;
7547 		action_types[reformat_off] = reformat_act_type;
7548 	}
7549 	if (recom_off != UINT16_MAX) {
7550 		at->recom_off = recom_off;
7551 		action_types[recom_off] = recom_type;
7552 	}
7553 	at->dr_actions_num = curr_off;
7554 
7555 	/* Create srh flex parser for remove anchor. */
7556 	if ((recom_type == MLX5DR_ACTION_TYP_POP_IPV6_ROUTE_EXT ||
7557 	     recom_type == MLX5DR_ACTION_TYP_PUSH_IPV6_ROUTE_EXT) &&
7558 	    (ret = mlx5_alloc_srh_flex_parser(dev))) {
7559 		DRV_LOG(ERR, "Failed to create srv6 flex parser");
7560 		return ret;
7561 	}
7562 	return 0;
7563 err_actions_num:
7564 	DRV_LOG(ERR, "Number of HW actions (%u) exceeded maximum (%u) allowed in template",
7565 		curr_off, MLX5_HW_MAX_ACTS);
7566 	return -EINVAL;
7567 }
7568 
7569 static void
7570 flow_hw_set_vlan_vid(struct rte_eth_dev *dev,
7571 		     struct rte_flow_action *ra,
7572 		     struct rte_flow_action *rm,
7573 		     struct rte_flow_action_modify_field *spec,
7574 		     struct rte_flow_action_modify_field *mask,
7575 		     int set_vlan_vid_ix)
7576 {
7577 	struct rte_flow_error error;
7578 	const bool masked = rm[set_vlan_vid_ix].conf &&
7579 		(((const struct rte_flow_action_of_set_vlan_vid *)
7580 			rm[set_vlan_vid_ix].conf)->vlan_vid != 0);
7581 	const struct rte_flow_action_of_set_vlan_vid *conf =
7582 		ra[set_vlan_vid_ix].conf;
7583 	int width = mlx5_flow_item_field_width(dev, RTE_FLOW_FIELD_VLAN_ID, 0,
7584 					       NULL, &error);
7585 	*spec = (typeof(*spec)) {
7586 		.operation = RTE_FLOW_MODIFY_SET,
7587 		.dst = {
7588 			.field = RTE_FLOW_FIELD_VLAN_ID,
7589 			.level = 0, .offset = 0,
7590 		},
7591 		.src = {
7592 			.field = RTE_FLOW_FIELD_VALUE,
7593 		},
7594 		.width = width,
7595 	};
7596 	*mask = (typeof(*mask)) {
7597 		.operation = RTE_FLOW_MODIFY_SET,
7598 		.dst = {
7599 			.field = RTE_FLOW_FIELD_VLAN_ID,
7600 			.level = 0xff, .offset = 0xffffffff,
7601 		},
7602 		.src = {
7603 			.field = RTE_FLOW_FIELD_VALUE,
7604 		},
7605 		.width = 0xffffffff,
7606 	};
7607 	if (masked) {
7608 		uint32_t mask_val = 0xffffffff;
7609 
7610 		rte_memcpy(spec->src.value, &conf->vlan_vid, sizeof(conf->vlan_vid));
7611 		rte_memcpy(mask->src.value, &mask_val, sizeof(mask_val));
7612 	}
7613 	ra[set_vlan_vid_ix].type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD;
7614 	ra[set_vlan_vid_ix].conf = spec;
7615 	rm[set_vlan_vid_ix].type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD;
7616 	rm[set_vlan_vid_ix].conf = mask;
7617 }
7618 
7619 static __rte_always_inline int
7620 flow_hw_set_vlan_vid_construct(struct rte_eth_dev *dev,
7621 			       struct mlx5_modification_cmd *mhdr_cmd,
7622 			       struct mlx5_action_construct_data *act_data,
7623 			       const struct mlx5_hw_actions *hw_acts,
7624 			       const struct rte_flow_action *action)
7625 {
7626 	struct rte_flow_error error;
7627 	rte_be16_t vid = ((const struct rte_flow_action_of_set_vlan_vid *)
7628 			   action->conf)->vlan_vid;
7629 	int width = mlx5_flow_item_field_width(dev, RTE_FLOW_FIELD_VLAN_ID, 0,
7630 					       NULL, &error);
7631 	struct rte_flow_action_modify_field conf = {
7632 		.operation = RTE_FLOW_MODIFY_SET,
7633 		.dst = {
7634 			.field = RTE_FLOW_FIELD_VLAN_ID,
7635 			.level = 0, .offset = 0,
7636 		},
7637 		.src = {
7638 			.field = RTE_FLOW_FIELD_VALUE,
7639 		},
7640 		.width = width,
7641 	};
7642 	struct rte_flow_action modify_action = {
7643 		.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
7644 		.conf = &conf
7645 	};
7646 
7647 	rte_memcpy(conf.src.value, &vid, sizeof(vid));
7648 	return flow_hw_modify_field_construct(mhdr_cmd, act_data, hw_acts, &modify_action);
7649 }
7650 
7651 static int
7652 flow_hw_flex_item_acquire(struct rte_eth_dev *dev,
7653 			  struct rte_flow_item_flex_handle *handle,
7654 			  uint8_t *flex_item)
7655 {
7656 	int index = mlx5_flex_acquire_index(dev, handle, false);
7657 
7658 	MLX5_ASSERT(index >= 0 && index < (int)(sizeof(uint32_t) * CHAR_BIT));
7659 	if (index < 0)
7660 		return -1;
7661 	if (!(*flex_item & RTE_BIT32(index))) {
7662 		/* Don't count same flex item again. */
7663 		if (mlx5_flex_acquire_index(dev, handle, true) != index)
7664 			MLX5_ASSERT(false);
7665 		*flex_item |= (uint8_t)RTE_BIT32(index);
7666 	}
7667 	return 0;
7668 }
7669 
7670 static void
7671 flow_hw_flex_item_release(struct rte_eth_dev *dev, uint8_t *flex_item)
7672 {
7673 	while (*flex_item) {
7674 		int index = rte_bsf32(*flex_item);
7675 
7676 		mlx5_flex_release_index(dev, index);
7677 		*flex_item &= ~(uint8_t)RTE_BIT32(index);
7678 	}
7679 }
7680 static __rte_always_inline void
7681 flow_hw_actions_template_replace_container(const
7682 					   struct rte_flow_action *actions,
7683 					   const
7684 					   struct rte_flow_action *masks,
7685 					   struct rte_flow_action *new_actions,
7686 					   struct rte_flow_action *new_masks,
7687 					   struct rte_flow_action **ra,
7688 					   struct rte_flow_action **rm,
7689 					   uint32_t act_num)
7690 {
7691 	memcpy(new_actions, actions, sizeof(actions[0]) * act_num);
7692 	memcpy(new_masks, masks, sizeof(masks[0]) * act_num);
7693 	*ra = (void *)(uintptr_t)new_actions;
7694 	*rm = (void *)(uintptr_t)new_masks;
7695 }
7696 
7697 /* Action template copies these actions in rte_flow_conv() */
7698 
7699 static const struct rte_flow_action rx_meta_copy_action =  {
7700 	.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
7701 	.conf = &(struct rte_flow_action_modify_field){
7702 		.operation = RTE_FLOW_MODIFY_SET,
7703 		.dst = {
7704 			.field = (enum rte_flow_field_id)
7705 				MLX5_RTE_FLOW_FIELD_META_REG,
7706 			.tag_index = REG_B,
7707 		},
7708 		.src = {
7709 			.field = (enum rte_flow_field_id)
7710 				MLX5_RTE_FLOW_FIELD_META_REG,
7711 			.tag_index = REG_C_1,
7712 		},
7713 		.width = 32,
7714 	}
7715 };
7716 
7717 static const struct rte_flow_action rx_meta_copy_mask = {
7718 	.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
7719 	.conf = &(struct rte_flow_action_modify_field){
7720 		.operation = RTE_FLOW_MODIFY_SET,
7721 		.dst = {
7722 			.field = (enum rte_flow_field_id)
7723 				MLX5_RTE_FLOW_FIELD_META_REG,
7724 			.level = UINT8_MAX,
7725 			.tag_index = UINT8_MAX,
7726 			.offset = UINT32_MAX,
7727 		},
7728 		.src = {
7729 			.field = (enum rte_flow_field_id)
7730 				MLX5_RTE_FLOW_FIELD_META_REG,
7731 			.level = UINT8_MAX,
7732 			.tag_index = UINT8_MAX,
7733 			.offset = UINT32_MAX,
7734 		},
7735 		.width = UINT32_MAX,
7736 	}
7737 };
7738 
7739 static const struct rte_flow_action quota_color_inc_action = {
7740 	.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
7741 	.conf = &(struct rte_flow_action_modify_field) {
7742 		.operation = RTE_FLOW_MODIFY_ADD,
7743 		.dst = {
7744 			.field = RTE_FLOW_FIELD_METER_COLOR,
7745 			.level = 0, .offset = 0
7746 		},
7747 		.src = {
7748 			.field = RTE_FLOW_FIELD_VALUE,
7749 			.level = 1,
7750 			.offset = 0,
7751 		},
7752 		.width = 2
7753 	}
7754 };
7755 
7756 static const struct rte_flow_action quota_color_inc_mask = {
7757 	.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
7758 	.conf = &(struct rte_flow_action_modify_field) {
7759 		.operation = RTE_FLOW_MODIFY_ADD,
7760 		.dst = {
7761 			.field = RTE_FLOW_FIELD_METER_COLOR,
7762 			.level = UINT8_MAX,
7763 			.tag_index = UINT8_MAX,
7764 			.offset = UINT32_MAX,
7765 		},
7766 		.src = {
7767 			.field = RTE_FLOW_FIELD_VALUE,
7768 			.level = 3,
7769 			.offset = 0
7770 		},
7771 		.width = UINT32_MAX
7772 	}
7773 };
7774 
7775 /**
7776  * Create flow action template.
7777  *
7778  * @param[in] dev
7779  *   Pointer to the rte_eth_dev structure.
7780  * @param[in] attr
7781  *   Pointer to the action template attributes.
7782  * @param[in] actions
7783  *   Associated actions (list terminated by the END action).
7784  * @param[in] masks
7785  *   List of actions that marks which of the action's member is constant.
7786  * @param[in] nt_mode
7787  *   Non template mode.
7788  * @param[out] error
7789  *   Pointer to error structure.
7790  *
7791  * @return
7792  *   Action template pointer on success, NULL otherwise and rte_errno is set.
7793  */
7794 static struct rte_flow_actions_template *
7795 __flow_hw_actions_template_create(struct rte_eth_dev *dev,
7796 			const struct rte_flow_actions_template_attr *attr,
7797 			const struct rte_flow_action actions[],
7798 			const struct rte_flow_action masks[],
7799 			bool nt_mode,
7800 			struct rte_flow_error *error)
7801 {
7802 	struct mlx5_priv *priv = dev->data->dev_private;
7803 	int len, act_len, mask_len;
7804 	int orig_act_len;
7805 	unsigned int act_num;
7806 	unsigned int i;
7807 	struct rte_flow_actions_template *at = NULL;
7808 	uint16_t pos;
7809 	uint64_t action_flags = 0;
7810 	struct rte_flow_action tmp_action[MLX5_HW_MAX_ACTS];
7811 	struct rte_flow_action tmp_mask[MLX5_HW_MAX_ACTS];
7812 	struct rte_flow_action *ra = (void *)(uintptr_t)actions;
7813 	struct rte_flow_action *rm = (void *)(uintptr_t)masks;
7814 	int set_vlan_vid_ix = -1;
7815 	struct rte_flow_action_modify_field set_vlan_vid_spec = {0, };
7816 	struct rte_flow_action_modify_field set_vlan_vid_mask = {0, };
7817 	struct rte_flow_action mf_actions[MLX5_HW_MAX_ACTS];
7818 	struct rte_flow_action mf_masks[MLX5_HW_MAX_ACTS];
7819 	uint32_t expand_mf_num = 0;
7820 	uint16_t src_off[MLX5_HW_MAX_ACTS] = {0, };
7821 	enum mlx5dr_action_type action_types[MLX5_HW_MAX_ACTS] = { MLX5DR_ACTION_TYP_LAST };
7822 	uint32_t tmpl_flags = 0;
7823 	int ret;
7824 
7825 	if (mlx5_flow_hw_actions_validate(dev, attr, actions, masks,
7826 					  &action_flags, error))
7827 		return NULL;
7828 	for (i = 0; ra[i].type != RTE_FLOW_ACTION_TYPE_END; ++i) {
7829 		switch (ra[i].type) {
7830 		/* OF_PUSH_VLAN *MUST* come before OF_SET_VLAN_VID */
7831 		case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
7832 			i += is_of_vlan_pcp_present(ra + i) ?
7833 				MLX5_HW_VLAN_PUSH_PCP_IDX :
7834 				MLX5_HW_VLAN_PUSH_VID_IDX;
7835 			break;
7836 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
7837 			set_vlan_vid_ix = i;
7838 			break;
7839 		default:
7840 			break;
7841 		}
7842 	}
7843 	/*
7844 	 * Count flow actions to allocate required space for storing DR offsets and to check
7845 	 * if temporary buffer would not be overrun.
7846 	 */
7847 	act_num = i + 1;
7848 	if (act_num >= MLX5_HW_MAX_ACTS) {
7849 		rte_flow_error_set(error, EINVAL,
7850 				   RTE_FLOW_ERROR_TYPE_ACTION, NULL, "Too many actions");
7851 		return NULL;
7852 	}
7853 	if (set_vlan_vid_ix != -1) {
7854 		/* If temporary action buffer was not used, copy template actions to it */
7855 		if (ra == actions)
7856 			flow_hw_actions_template_replace_container(actions,
7857 								   masks,
7858 								   tmp_action,
7859 								   tmp_mask,
7860 								   &ra, &rm,
7861 								   act_num);
7862 		flow_hw_set_vlan_vid(dev, ra, rm,
7863 				     &set_vlan_vid_spec, &set_vlan_vid_mask,
7864 				     set_vlan_vid_ix);
7865 		action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
7866 	}
7867 	if (action_flags & MLX5_FLOW_ACTION_QUOTA) {
7868 		mf_actions[expand_mf_num] = quota_color_inc_action;
7869 		mf_masks[expand_mf_num] = quota_color_inc_mask;
7870 		expand_mf_num++;
7871 	}
7872 	if (priv->sh->config.dv_xmeta_en == MLX5_XMETA_MODE_META32_HWS &&
7873 	    priv->sh->config.dv_esw_en &&
7874 	    (action_flags & (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS))) {
7875 		/* Insert META copy */
7876 		mf_actions[expand_mf_num] = rx_meta_copy_action;
7877 		mf_masks[expand_mf_num] = rx_meta_copy_mask;
7878 		expand_mf_num++;
7879 	}
7880 	if (expand_mf_num) {
7881 		if (act_num + expand_mf_num > MLX5_HW_MAX_ACTS) {
7882 			rte_flow_error_set(error, E2BIG,
7883 					   RTE_FLOW_ERROR_TYPE_ACTION,
7884 					   NULL, "cannot expand: too many actions");
7885 			return NULL;
7886 		}
7887 		if (ra == actions)
7888 			flow_hw_actions_template_replace_container(actions,
7889 								   masks,
7890 								   tmp_action,
7891 								   tmp_mask,
7892 								   &ra, &rm,
7893 								   act_num);
7894 		/* Application should make sure only one Q/RSS exist in one rule. */
7895 		pos = flow_hw_template_expand_modify_field(ra, rm,
7896 							   mf_actions,
7897 							   mf_masks,
7898 							   action_flags,
7899 							   act_num,
7900 							   expand_mf_num);
7901 		if (pos == MLX5_HW_EXPAND_MH_FAILED) {
7902 			rte_flow_error_set(error, ENOMEM,
7903 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7904 					   NULL, "modify header expansion failed");
7905 			return NULL;
7906 		}
7907 		act_num += expand_mf_num;
7908 		for (i = pos + expand_mf_num; i < act_num; i++)
7909 			src_off[i] += expand_mf_num;
7910 		action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
7911 	}
7912 	act_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, NULL, 0, ra, error);
7913 	if (act_len <= 0)
7914 		return NULL;
7915 	len = RTE_ALIGN(act_len, 16);
7916 	mask_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, NULL, 0, rm, error);
7917 	if (mask_len <= 0)
7918 		return NULL;
7919 	len += RTE_ALIGN(mask_len, 16);
7920 	len += RTE_ALIGN(act_num * sizeof(*at->dr_off), 16);
7921 	len += RTE_ALIGN(act_num * sizeof(*at->src_off), 16);
7922 	orig_act_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, NULL, 0, actions, error);
7923 	if (orig_act_len <= 0)
7924 		return NULL;
7925 	len += RTE_ALIGN(orig_act_len, 16);
7926 	at = mlx5_malloc(MLX5_MEM_ZERO, len + sizeof(*at),
7927 			 RTE_CACHE_LINE_SIZE, rte_socket_id());
7928 	if (!at) {
7929 		rte_flow_error_set(error, ENOMEM,
7930 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7931 				   NULL,
7932 				   "cannot allocate action template");
7933 		return NULL;
7934 	}
7935 	/* Actions part is in the first part. */
7936 	at->attr = *attr;
7937 	at->actions = (struct rte_flow_action *)(at + 1);
7938 	act_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, at->actions,
7939 				len, ra, error);
7940 	if (act_len <= 0)
7941 		goto error;
7942 	/* Masks part is in the second part. */
7943 	at->masks = (struct rte_flow_action *)(((uint8_t *)at->actions) + act_len);
7944 	mask_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, at->masks,
7945 				 len - act_len, rm, error);
7946 	if (mask_len <= 0)
7947 		goto error;
7948 	/* DR actions offsets in the third part. */
7949 	at->dr_off = (uint16_t *)((uint8_t *)at->masks + mask_len);
7950 	at->src_off = RTE_PTR_ADD(at->dr_off,
7951 				  RTE_ALIGN(act_num * sizeof(*at->dr_off), 16));
7952 	memcpy(at->src_off, src_off, act_num * sizeof(at->src_off[0]));
7953 	at->orig_actions = RTE_PTR_ADD(at->src_off,
7954 				       RTE_ALIGN(act_num * sizeof(*at->src_off), 16));
7955 	orig_act_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, at->orig_actions, orig_act_len,
7956 				     actions, error);
7957 	if (orig_act_len <= 0)
7958 		goto error;
7959 	at->actions_num = act_num;
7960 	for (i = 0; i < at->actions_num; ++i)
7961 		at->dr_off[i] = UINT16_MAX;
7962 	at->reformat_off = UINT16_MAX;
7963 	at->mhdr_off = UINT16_MAX;
7964 	at->recom_off = UINT16_MAX;
7965 	for (i = 0; actions->type != RTE_FLOW_ACTION_TYPE_END;
7966 	     actions++, masks++, i++) {
7967 		const struct rte_flow_action_modify_field *info;
7968 
7969 		switch (actions->type) {
7970 		/*
7971 		 * mlx5 PMD hacks indirect action index directly to the action conf.
7972 		 * The rte_flow_conv() function copies the content from conf pointer.
7973 		 * Need to restore the indirect action index from action conf here.
7974 		 */
7975 		case RTE_FLOW_ACTION_TYPE_INDIRECT:
7976 			at->actions[i].conf = ra[i].conf;
7977 			at->masks[i].conf = rm[i].conf;
7978 			break;
7979 		case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
7980 			info = actions->conf;
7981 			if ((info->dst.field == RTE_FLOW_FIELD_FLEX_ITEM &&
7982 			     flow_hw_flex_item_acquire(dev, info->dst.flex_handle,
7983 						       &at->flex_item)) ||
7984 			    (info->src.field == RTE_FLOW_FIELD_FLEX_ITEM &&
7985 			     flow_hw_flex_item_acquire(dev, info->src.flex_handle,
7986 						       &at->flex_item)))
7987 				goto error;
7988 			break;
7989 		default:
7990 			break;
7991 		}
7992 	}
7993 	ret = flow_hw_parse_flow_actions_to_dr_actions(dev, at, action_types, &tmpl_flags);
7994 	if (ret)
7995 		goto error;
7996 	at->action_flags = action_flags;
7997 	/* In non template mode there is no need to create the dr template. */
7998 	if (nt_mode)
7999 		return at;
8000 	at->tmpl = mlx5dr_action_template_create(action_types, tmpl_flags);
8001 	if (!at->tmpl) {
8002 		DRV_LOG(ERR, "Failed to create DR action template: %d", rte_errno);
8003 		goto error;
8004 	}
8005 	rte_atomic_fetch_add_explicit(&at->refcnt, 1, rte_memory_order_relaxed);
8006 	LIST_INSERT_HEAD(&priv->flow_hw_at, at, next);
8007 	return at;
8008 error:
8009 	if (at) {
8010 		mlx5_free(at);
8011 	}
8012 	rte_flow_error_set(error, rte_errno,
8013 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8014 			   "Failed to create action template");
8015 	return NULL;
8016 }
8017 
8018 /**
8019  * Create flow action template.
8020  *
8021  * @param[in] dev
8022  *   Pointer to the rte_eth_dev structure.
8023  * @param[in] attr
8024  *   Pointer to the action template attributes.
8025  * @param[in] actions
8026  *   Associated actions (list terminated by the END action).
8027  * @param[in] masks
8028  *   List of actions that marks which of the action's member is constant.
8029  * @param[out] error
8030  *   Pointer to error structure.
8031  *
8032  * @return
8033  *   Action template pointer on success, NULL otherwise and rte_errno is set.
8034  */
8035 static struct rte_flow_actions_template *
8036 flow_hw_actions_template_create(struct rte_eth_dev *dev,
8037 			const struct rte_flow_actions_template_attr *attr,
8038 			const struct rte_flow_action actions[],
8039 			const struct rte_flow_action masks[],
8040 			struct rte_flow_error *error)
8041 {
8042 	return __flow_hw_actions_template_create(dev, attr, actions, masks, false, error);
8043 }
8044 
8045 /**
8046  * Destroy flow action template.
8047  *
8048  * @param[in] dev
8049  *   Pointer to the rte_eth_dev structure.
8050  * @param[in] template
8051  *   Pointer to the action template to be destroyed.
8052  * @param[out] error
8053  *   Pointer to error structure.
8054  *
8055  * @return
8056  *   0 on success, a negative errno value otherwise and rte_errno is set.
8057  */
8058 static int
8059 flow_hw_actions_template_destroy(struct rte_eth_dev *dev,
8060 				 struct rte_flow_actions_template *template,
8061 				 struct rte_flow_error *error __rte_unused)
8062 {
8063 	uint64_t flag = MLX5_FLOW_ACTION_IPV6_ROUTING_REMOVE |
8064 			MLX5_FLOW_ACTION_IPV6_ROUTING_PUSH;
8065 
8066 	if (rte_atomic_load_explicit(&template->refcnt, rte_memory_order_relaxed) > 1) {
8067 		DRV_LOG(WARNING, "Action template %p is still in use.",
8068 			(void *)template);
8069 		return rte_flow_error_set(error, EBUSY,
8070 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8071 				   NULL,
8072 				   "action template is in use");
8073 	}
8074 	if (template->action_flags & flag)
8075 		mlx5_free_srh_flex_parser(dev);
8076 	LIST_REMOVE(template, next);
8077 	flow_hw_flex_item_release(dev, &template->flex_item);
8078 	if (template->tmpl)
8079 		mlx5dr_action_template_destroy(template->tmpl);
8080 	mlx5_free(template);
8081 	return 0;
8082 }
8083 
8084 static struct rte_flow_item *
8085 flow_hw_prepend_item(const struct rte_flow_item *items,
8086 		     const uint32_t nb_items,
8087 		     const struct rte_flow_item *new_item,
8088 		     struct rte_flow_error *error)
8089 {
8090 	struct rte_flow_item *copied_items;
8091 	size_t size;
8092 
8093 	/* Allocate new array of items. */
8094 	size = sizeof(*copied_items) * (nb_items + 1);
8095 	copied_items = mlx5_malloc(MLX5_MEM_ZERO, size, 0, rte_socket_id());
8096 	if (!copied_items) {
8097 		rte_flow_error_set(error, ENOMEM,
8098 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8099 				   NULL,
8100 				   "cannot allocate item template");
8101 		return NULL;
8102 	}
8103 	/* Put new item at the beginning and copy the rest. */
8104 	copied_items[0] = *new_item;
8105 	rte_memcpy(&copied_items[1], items, sizeof(*items) * nb_items);
8106 	return copied_items;
8107 }
8108 
8109 static int
8110 flow_hw_item_compare_field_validate(enum rte_flow_field_id arg_field,
8111 				    enum rte_flow_field_id base_field,
8112 				    struct rte_flow_error *error)
8113 {
8114 	switch (arg_field) {
8115 	case RTE_FLOW_FIELD_TAG:
8116 	case RTE_FLOW_FIELD_META:
8117 	case RTE_FLOW_FIELD_ESP_SEQ_NUM:
8118 		break;
8119 	case RTE_FLOW_FIELD_RANDOM:
8120 		if (base_field == RTE_FLOW_FIELD_VALUE)
8121 			return 0;
8122 		return rte_flow_error_set(error, EINVAL,
8123 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8124 					  NULL,
8125 					  "compare random is supported only with immediate value");
8126 	default:
8127 		return rte_flow_error_set(error, ENOTSUP,
8128 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8129 					  NULL,
8130 					  "compare item argument field is not supported");
8131 	}
8132 	switch (base_field) {
8133 	case RTE_FLOW_FIELD_TAG:
8134 	case RTE_FLOW_FIELD_META:
8135 	case RTE_FLOW_FIELD_VALUE:
8136 	case RTE_FLOW_FIELD_ESP_SEQ_NUM:
8137 		break;
8138 	default:
8139 		return rte_flow_error_set(error, ENOTSUP,
8140 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8141 					  NULL,
8142 					  "compare item base field is not supported");
8143 	}
8144 	return 0;
8145 }
8146 
8147 static inline uint32_t
8148 flow_hw_item_compare_width_supported(enum rte_flow_field_id field)
8149 {
8150 	switch (field) {
8151 	case RTE_FLOW_FIELD_TAG:
8152 	case RTE_FLOW_FIELD_META:
8153 	case RTE_FLOW_FIELD_ESP_SEQ_NUM:
8154 		return 32;
8155 	case RTE_FLOW_FIELD_RANDOM:
8156 		return 16;
8157 	default:
8158 		break;
8159 	}
8160 	return 0;
8161 }
8162 
8163 static int
8164 flow_hw_validate_item_compare(const struct rte_flow_item *item,
8165 			      struct rte_flow_error *error)
8166 {
8167 	const struct rte_flow_item_compare *comp_m = item->mask;
8168 	const struct rte_flow_item_compare *comp_v = item->spec;
8169 	int ret;
8170 
8171 	if (unlikely(!comp_m))
8172 		return rte_flow_error_set(error, EINVAL,
8173 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8174 				   NULL,
8175 				   "compare item mask is missing");
8176 	if (comp_m->width != UINT32_MAX)
8177 		return rte_flow_error_set(error, EINVAL,
8178 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8179 				   NULL,
8180 				   "compare item only support full mask");
8181 	ret = flow_hw_item_compare_field_validate(comp_m->a.field,
8182 						  comp_m->b.field, error);
8183 	if (ret < 0)
8184 		return ret;
8185 	if (comp_v) {
8186 		uint32_t width;
8187 
8188 		if (comp_v->operation != comp_m->operation ||
8189 		    comp_v->a.field != comp_m->a.field ||
8190 		    comp_v->b.field != comp_m->b.field)
8191 			return rte_flow_error_set(error, EINVAL,
8192 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8193 					   NULL,
8194 					   "compare item spec/mask not matching");
8195 		width = flow_hw_item_compare_width_supported(comp_v->a.field);
8196 		MLX5_ASSERT(width > 0);
8197 		if ((comp_v->width & comp_m->width) != width)
8198 			return rte_flow_error_set(error, EINVAL,
8199 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8200 					   NULL,
8201 					   "compare item only support full mask");
8202 	}
8203 	return 0;
8204 }
8205 
8206 static inline int
8207 mlx5_hw_validate_item_nsh(struct rte_eth_dev *dev,
8208 			  const struct rte_flow_item *item,
8209 			  struct rte_flow_error *error)
8210 {
8211 	return mlx5_flow_validate_item_nsh(dev, item, error);
8212 }
8213 
8214 static bool
8215 mlx5_hw_flow_tunnel_ip_check(uint64_t last_item, uint64_t *item_flags)
8216 {
8217 	bool tunnel;
8218 
8219 	if (last_item == MLX5_FLOW_LAYER_OUTER_L3_IPV4) {
8220 		tunnel = true;
8221 		*item_flags |= MLX5_FLOW_LAYER_IPIP;
8222 	} else if (last_item == MLX5_FLOW_LAYER_OUTER_L3_IPV6 ||
8223 		   last_item == MLX5_FLOW_ITEM_OUTER_IPV6_ROUTING_EXT) {
8224 		tunnel = true;
8225 		*item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
8226 	} else {
8227 		tunnel = false;
8228 	}
8229 	return tunnel;
8230 }
8231 
8232 const struct rte_flow_item_ipv4 hws_nic_ipv4_mask = {
8233 	.hdr = {
8234 		.version = 0xf,
8235 		.ihl = 0xf,
8236 		.type_of_service = 0xff,
8237 		.total_length = RTE_BE16(0xffff),
8238 		.packet_id = RTE_BE16(0xffff),
8239 		.fragment_offset = RTE_BE16(0xffff),
8240 		.time_to_live = 0xff,
8241 		.next_proto_id = 0xff,
8242 		.src_addr = RTE_BE32(0xffffffff),
8243 		.dst_addr = RTE_BE32(0xffffffff),
8244 	},
8245 };
8246 
8247 const struct rte_flow_item_ipv6 hws_nic_ipv6_mask = {
8248 	.hdr = {
8249 		.vtc_flow = RTE_BE32(0xffffffff),
8250 		.payload_len = RTE_BE16(0xffff),
8251 		.proto = 0xff,
8252 		.hop_limits = 0xff,
8253 		.src_addr = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
8254 			      0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
8255 		.dst_addr = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
8256 			      0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
8257 	},
8258 	.has_frag_ext = 1,
8259 };
8260 
8261 static int
8262 flow_hw_validate_item_ptype(const struct rte_flow_item *item,
8263 			    struct rte_flow_error *error)
8264 {
8265 	const struct rte_flow_item_ptype *ptype = item->mask;
8266 
8267 	/* HWS does not allow empty PTYPE mask */
8268 	if (!ptype)
8269 		return rte_flow_error_set(error, EINVAL,
8270 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8271 					  NULL, "empty ptype mask");
8272 	if (!(ptype->packet_type &
8273 	      (RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK | RTE_PTYPE_L4_MASK |
8274 	       RTE_PTYPE_INNER_L2_MASK | RTE_PTYPE_INNER_L3_MASK |
8275 	       RTE_PTYPE_INNER_L4_MASK)))
8276 		return rte_flow_error_set(error, ENOTSUP,
8277 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8278 					  NULL, "ptype mask not supported");
8279 	return 0;
8280 }
8281 
8282 struct mlx5_hw_pattern_validation_ctx {
8283 	const struct rte_flow_item *geneve_item;
8284 	const struct rte_flow_item *flex_item;
8285 };
8286 
8287 static int
8288 flow_hw_pattern_validate(struct rte_eth_dev *dev,
8289 			 const struct rte_flow_pattern_template_attr *attr,
8290 			 const struct rte_flow_item items[],
8291 			 uint64_t *item_flags,
8292 			 struct rte_flow_error *error)
8293 {
8294 	struct mlx5_priv *priv = dev->data->dev_private;
8295 	const struct rte_flow_item *item;
8296 	const struct rte_flow_item *gtp_item = NULL;
8297 	const struct rte_flow_item *gre_item = NULL;
8298 	const struct rte_flow_attr flow_attr = {
8299 		.ingress = attr->ingress,
8300 		.egress = attr->egress,
8301 		.transfer = attr->transfer
8302 	};
8303 	int ret, tag_idx;
8304 	uint32_t tag_bitmap = 0;
8305 	uint64_t last_item = 0;
8306 
8307 	if (!mlx5_hw_ctx_validate(dev, error))
8308 		return -rte_errno;
8309 	if (!attr->ingress && !attr->egress && !attr->transfer)
8310 		return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR, NULL,
8311 					  "at least one of the direction attributes"
8312 					  " must be specified");
8313 	if (priv->sh->config.dv_esw_en) {
8314 		MLX5_ASSERT(priv->master || priv->representor);
8315 		if (priv->master) {
8316 			if ((attr->ingress && attr->egress) ||
8317 			    (attr->ingress && attr->transfer) ||
8318 			    (attr->egress && attr->transfer))
8319 				return rte_flow_error_set(error, EINVAL,
8320 							  RTE_FLOW_ERROR_TYPE_ATTR, NULL,
8321 							  "only one direction attribute at once"
8322 							  " can be used on transfer proxy port");
8323 		} else {
8324 			if (attr->transfer)
8325 				return rte_flow_error_set(error, EINVAL,
8326 							  RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, NULL,
8327 							  "transfer attribute cannot be used with"
8328 							  " port representors");
8329 			if (attr->ingress && attr->egress)
8330 				return rte_flow_error_set(error, EINVAL,
8331 							  RTE_FLOW_ERROR_TYPE_ATTR, NULL,
8332 							  "ingress and egress direction attributes"
8333 							  " cannot be used at the same time on"
8334 							  " port representors");
8335 		}
8336 	} else {
8337 		if (attr->transfer)
8338 			return rte_flow_error_set(error, EINVAL,
8339 						  RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, NULL,
8340 						  "transfer attribute cannot be used when"
8341 						  " E-Switch is disabled");
8342 	}
8343 	for (item = items; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
8344 		bool tunnel = *item_flags & MLX5_FLOW_LAYER_TUNNEL;
8345 
8346 		switch ((int)item->type) {
8347 		case RTE_FLOW_ITEM_TYPE_PTYPE:
8348 			ret = flow_hw_validate_item_ptype(item, error);
8349 			if (ret)
8350 				return ret;
8351 			last_item = MLX5_FLOW_ITEM_PTYPE;
8352 			break;
8353 		case RTE_FLOW_ITEM_TYPE_TAG:
8354 		{
8355 			const struct rte_flow_item_tag *tag =
8356 				(const struct rte_flow_item_tag *)item->spec;
8357 
8358 			if (tag == NULL)
8359 				return rte_flow_error_set(error, EINVAL,
8360 							  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8361 							  NULL,
8362 							  "Tag spec is NULL");
8363 			if (tag->index >= MLX5_FLOW_HW_TAGS_MAX &&
8364 			    tag->index != RTE_PMD_MLX5_LINEAR_HASH_TAG_INDEX)
8365 				return rte_flow_error_set(error, EINVAL,
8366 							  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8367 							  NULL,
8368 							  "Invalid tag index");
8369 			tag_idx = flow_hw_get_reg_id(dev, RTE_FLOW_ITEM_TYPE_TAG, tag->index);
8370 			if (tag_idx == REG_NON)
8371 				return rte_flow_error_set(error, EINVAL,
8372 							  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8373 							  NULL,
8374 							  "Unsupported tag index");
8375 			if (tag_bitmap & (1 << tag_idx))
8376 				return rte_flow_error_set(error, EINVAL,
8377 							  RTE_FLOW_ERROR_TYPE_ITEM,
8378 							  NULL,
8379 							  "Duplicated tag index");
8380 			tag_bitmap |= 1 << tag_idx;
8381 			last_item = MLX5_FLOW_ITEM_TAG;
8382 			break;
8383 		}
8384 		case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
8385 		{
8386 			const struct rte_flow_item_tag *tag =
8387 				(const struct rte_flow_item_tag *)item->spec;
8388 			uint16_t regcs = (uint8_t)priv->sh->cdev->config.hca_attr.set_reg_c;
8389 
8390 			if (!((1 << (tag->index - REG_C_0)) & regcs))
8391 				return rte_flow_error_set(error, EINVAL,
8392 							  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8393 							  NULL,
8394 							  "Unsupported internal tag index");
8395 			if (tag_bitmap & (1 << tag->index))
8396 				return rte_flow_error_set(error, EINVAL,
8397 							  RTE_FLOW_ERROR_TYPE_ITEM,
8398 							  NULL,
8399 							  "Duplicated tag index");
8400 			tag_bitmap |= 1 << tag->index;
8401 			break;
8402 		}
8403 		case RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT:
8404 			if (attr->ingress && priv->sh->config.repr_matching)
8405 				return rte_flow_error_set(error, EINVAL,
8406 						  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
8407 						  "represented port item cannot be used"
8408 						  " when ingress attribute is set");
8409 			if (attr->egress)
8410 				return rte_flow_error_set(error, EINVAL,
8411 						  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
8412 						  "represented port item cannot be used"
8413 						  " when egress attribute is set");
8414 			last_item = MLX5_FLOW_ITEM_REPRESENTED_PORT;
8415 			break;
8416 		case RTE_FLOW_ITEM_TYPE_META:
8417 			/* ingress + group 0 is not supported */
8418 			*item_flags |= MLX5_FLOW_ITEM_METADATA;
8419 			break;
8420 		case RTE_FLOW_ITEM_TYPE_METER_COLOR:
8421 		{
8422 			int reg = flow_hw_get_reg_id(dev,
8423 						     RTE_FLOW_ITEM_TYPE_METER_COLOR,
8424 						     0);
8425 			if (reg == REG_NON)
8426 				return rte_flow_error_set(error, EINVAL,
8427 							  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8428 							  NULL,
8429 							  "Unsupported meter color register");
8430 			if (*item_flags &
8431 			    (MLX5_FLOW_ITEM_QUOTA | MLX5_FLOW_LAYER_ASO_CT))
8432 				return rte_flow_error_set
8433 					(error, EINVAL,
8434 					 RTE_FLOW_ERROR_TYPE_ITEM, NULL, "Only one ASO item is supported");
8435 			last_item = MLX5_FLOW_ITEM_METER_COLOR;
8436 			break;
8437 		}
8438 		case RTE_FLOW_ITEM_TYPE_AGGR_AFFINITY:
8439 		{
8440 			if (!priv->sh->lag_rx_port_affinity_en)
8441 				return rte_flow_error_set(error, EINVAL,
8442 							  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
8443 							  "Unsupported aggregated affinity with Older FW");
8444 			if ((attr->transfer && priv->fdb_def_rule) || attr->egress)
8445 				return rte_flow_error_set(error, EINVAL,
8446 							  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
8447 							  "Aggregated affinity item not supported"
8448 							  " with egress or transfer"
8449 							  " attribute");
8450 			last_item = MLX5_FLOW_ITEM_AGGR_AFFINITY;
8451 			break;
8452 		}
8453 		case RTE_FLOW_ITEM_TYPE_GENEVE:
8454 			last_item = MLX5_FLOW_LAYER_GENEVE;
8455 			break;
8456 		case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
8457 		{
8458 			last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
8459 			ret = mlx5_flow_geneve_tlv_option_validate(priv, item,
8460 								   error);
8461 			if (ret < 0)
8462 				return ret;
8463 			break;
8464 		}
8465 		case RTE_FLOW_ITEM_TYPE_COMPARE:
8466 		{
8467 			last_item = MLX5_FLOW_ITEM_COMPARE;
8468 			ret = flow_hw_validate_item_compare(item, error);
8469 			if (ret)
8470 				return ret;
8471 			break;
8472 		}
8473 		case RTE_FLOW_ITEM_TYPE_ETH:
8474 			ret = mlx5_flow_validate_item_eth(dev, item,
8475 							  *item_flags,
8476 							  true, error);
8477 			if (ret < 0)
8478 				return ret;
8479 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
8480 				    MLX5_FLOW_LAYER_OUTER_L2;
8481 			break;
8482 		case RTE_FLOW_ITEM_TYPE_VLAN:
8483 			ret = mlx5_flow_dv_validate_item_vlan(item, *item_flags,
8484 							      dev, error);
8485 			if (ret < 0)
8486 				return ret;
8487 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
8488 				    MLX5_FLOW_LAYER_OUTER_VLAN;
8489 			break;
8490 		case RTE_FLOW_ITEM_TYPE_IPV4:
8491 			tunnel |= mlx5_hw_flow_tunnel_ip_check(last_item,
8492 							       item_flags);
8493 			ret = mlx5_flow_dv_validate_item_ipv4(dev, item,
8494 							      *item_flags,
8495 							      last_item, 0,
8496 							      &hws_nic_ipv4_mask,
8497 							      error);
8498 			if (ret)
8499 				return ret;
8500 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
8501 				    MLX5_FLOW_LAYER_OUTER_L3_IPV4;
8502 			break;
8503 		case RTE_FLOW_ITEM_TYPE_IPV6:
8504 			tunnel |= mlx5_hw_flow_tunnel_ip_check(last_item,
8505 							       item_flags);
8506 			ret = mlx5_flow_validate_item_ipv6(dev, item,
8507 							   *item_flags,
8508 							   last_item, 0,
8509 							   &hws_nic_ipv6_mask,
8510 							   error);
8511 			if (ret < 0)
8512 				return ret;
8513 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
8514 				    MLX5_FLOW_LAYER_OUTER_L3_IPV6;
8515 			break;
8516 		case RTE_FLOW_ITEM_TYPE_UDP:
8517 			ret = mlx5_flow_validate_item_udp(dev, item,
8518 							  *item_flags,
8519 							  0xff, error);
8520 			if (ret)
8521 				return ret;
8522 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
8523 				    MLX5_FLOW_LAYER_OUTER_L4_UDP;
8524 			break;
8525 		case RTE_FLOW_ITEM_TYPE_TCP:
8526 			ret = mlx5_flow_validate_item_tcp
8527 				(dev, item, *item_flags,
8528 				 0xff, &nic_tcp_mask, error);
8529 			if (ret < 0)
8530 				return ret;
8531 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
8532 				    MLX5_FLOW_LAYER_OUTER_L4_TCP;
8533 			break;
8534 		case RTE_FLOW_ITEM_TYPE_GTP:
8535 			gtp_item = item;
8536 			ret = mlx5_flow_dv_validate_item_gtp(dev, gtp_item,
8537 							     *item_flags, error);
8538 			if (ret < 0)
8539 				return ret;
8540 			last_item = MLX5_FLOW_LAYER_GTP;
8541 			break;
8542 		case RTE_FLOW_ITEM_TYPE_GTP_PSC:
8543 			ret = mlx5_flow_dv_validate_item_gtp_psc(dev, item,
8544 								 last_item,
8545 								 gtp_item,
8546 								 false, error);
8547 			if (ret < 0)
8548 				return ret;
8549 			last_item = MLX5_FLOW_LAYER_GTP_PSC;
8550 			break;
8551 		case RTE_FLOW_ITEM_TYPE_VXLAN:
8552 			ret = mlx5_flow_validate_item_vxlan(dev, 0, item,
8553 							    *item_flags,
8554 							    false, error);
8555 			if (ret < 0)
8556 				return ret;
8557 			last_item = MLX5_FLOW_LAYER_VXLAN;
8558 			break;
8559 		case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
8560 			ret = mlx5_flow_validate_item_vxlan_gpe(item,
8561 								*item_flags,
8562 								dev, error);
8563 			if (ret < 0)
8564 				return ret;
8565 			last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
8566 			break;
8567 		case RTE_FLOW_ITEM_TYPE_MPLS:
8568 			ret = mlx5_flow_validate_item_mpls(dev, item,
8569 							   *item_flags,
8570 							   last_item, error);
8571 			if (ret < 0)
8572 				return ret;
8573 			last_item = MLX5_FLOW_LAYER_MPLS;
8574 			break;
8575 		case MLX5_RTE_FLOW_ITEM_TYPE_SQ:
8576 		case RTE_FLOW_ITEM_TYPE_TX_QUEUE:
8577 			last_item = MLX5_FLOW_ITEM_SQ;
8578 			break;
8579 		case RTE_FLOW_ITEM_TYPE_GRE:
8580 			ret = mlx5_flow_validate_item_gre(dev, item,
8581 							  *item_flags,
8582 							  0xff, error);
8583 			if (ret < 0)
8584 				return ret;
8585 			gre_item = item;
8586 			last_item = MLX5_FLOW_LAYER_GRE;
8587 			break;
8588 		case RTE_FLOW_ITEM_TYPE_GRE_KEY:
8589 			if (!(*item_flags & MLX5_FLOW_LAYER_GRE))
8590 				return rte_flow_error_set
8591 					(error, EINVAL,
8592 					 RTE_FLOW_ERROR_TYPE_ITEM, item, "GRE item is missing");
8593 			ret = mlx5_flow_validate_item_gre_key
8594 				(dev, item, *item_flags, gre_item, error);
8595 			if (ret < 0)
8596 				return ret;
8597 			last_item = MLX5_FLOW_LAYER_GRE_KEY;
8598 			break;
8599 		case RTE_FLOW_ITEM_TYPE_GRE_OPTION:
8600 			if (!(*item_flags & MLX5_FLOW_LAYER_GRE))
8601 				return rte_flow_error_set
8602 					(error, EINVAL,
8603 					 RTE_FLOW_ERROR_TYPE_ITEM, item, "GRE item is missing");
8604 			ret = mlx5_flow_validate_item_gre_option(dev, item,
8605 								 *item_flags,
8606 								 &flow_attr,
8607 								 gre_item,
8608 								 error);
8609 			if (ret < 0)
8610 				return ret;
8611 			last_item = MLX5_FLOW_LAYER_GRE;
8612 			break;
8613 		case RTE_FLOW_ITEM_TYPE_NVGRE:
8614 			ret = mlx5_flow_validate_item_nvgre(dev, item,
8615 							    *item_flags, 0xff,
8616 							    error);
8617 			if (ret)
8618 				return ret;
8619 			last_item = MLX5_FLOW_LAYER_NVGRE;
8620 			break;
8621 		case RTE_FLOW_ITEM_TYPE_ICMP:
8622 			ret = mlx5_flow_validate_item_icmp(dev, item,
8623 							   *item_flags, 0xff,
8624 							   error);
8625 			if (ret < 0)
8626 				return ret;
8627 			last_item = MLX5_FLOW_LAYER_ICMP;
8628 			break;
8629 		case RTE_FLOW_ITEM_TYPE_ICMP6:
8630 			ret = mlx5_flow_validate_item_icmp6(dev, item,
8631 							    *item_flags, 0xff,
8632 							    error);
8633 			if (ret < 0)
8634 				return ret;
8635 			last_item = MLX5_FLOW_LAYER_ICMP6;
8636 			break;
8637 		case RTE_FLOW_ITEM_TYPE_ICMP6_ECHO_REQUEST:
8638 		case RTE_FLOW_ITEM_TYPE_ICMP6_ECHO_REPLY:
8639 			ret = mlx5_flow_validate_item_icmp6_echo(dev, item,
8640 								 *item_flags,
8641 								 0xff, error);
8642 			if (ret < 0)
8643 				return ret;
8644 			last_item = MLX5_FLOW_LAYER_ICMP6;
8645 			break;
8646 		case RTE_FLOW_ITEM_TYPE_CONNTRACK:
8647 			if (*item_flags &
8648 			    (MLX5_FLOW_ITEM_QUOTA | MLX5_FLOW_LAYER_ASO_CT))
8649 				return rte_flow_error_set
8650 					(error, EINVAL,
8651 					 RTE_FLOW_ERROR_TYPE_ITEM, NULL, "Only one ASO item is supported");
8652 			ret = mlx5_flow_dv_validate_item_aso_ct(dev, item,
8653 								item_flags,
8654 								error);
8655 			if (ret < 0)
8656 				return ret;
8657 			break;
8658 		case RTE_FLOW_ITEM_TYPE_QUOTA:
8659 			if (*item_flags &
8660 			    (MLX5_FLOW_ITEM_METER_COLOR |
8661 			     MLX5_FLOW_LAYER_ASO_CT))
8662 				return rte_flow_error_set
8663 					(error, EINVAL,
8664 					 RTE_FLOW_ERROR_TYPE_ITEM, NULL, "Only one ASO item is supported");
8665 			last_item = MLX5_FLOW_ITEM_QUOTA;
8666 			break;
8667 		case RTE_FLOW_ITEM_TYPE_ESP:
8668 			ret = mlx5_flow_os_validate_item_esp(dev, item,
8669 							     *item_flags, 0xff,
8670 							     error);
8671 			if (ret < 0)
8672 				return ret;
8673 			last_item = MLX5_FLOW_ITEM_ESP;
8674 			break;
8675 		case RTE_FLOW_ITEM_TYPE_IPV6_ROUTING_EXT:
8676 			last_item = tunnel ?
8677 				    MLX5_FLOW_ITEM_INNER_IPV6_ROUTING_EXT :
8678 				    MLX5_FLOW_ITEM_OUTER_IPV6_ROUTING_EXT;
8679 			break;
8680 		case RTE_FLOW_ITEM_TYPE_FLEX:
8681 			/* match mlx5dr_definer_conv_items_to_hl() */
8682 			last_item = tunnel ?
8683 				    MLX5_FLOW_ITEM_INNER_FLEX :
8684 				    MLX5_FLOW_ITEM_OUTER_FLEX;
8685 			break;
8686 		case RTE_FLOW_ITEM_TYPE_RANDOM:
8687 			last_item = MLX5_FLOW_ITEM_RANDOM;
8688 			break;
8689 		case RTE_FLOW_ITEM_TYPE_NSH:
8690 			last_item = MLX5_FLOW_ITEM_NSH;
8691 			ret = mlx5_hw_validate_item_nsh(dev, item, error);
8692 			if (ret < 0)
8693 				return ret;
8694 			break;
8695 		case RTE_FLOW_ITEM_TYPE_INTEGRITY:
8696 			/*
8697 			 * Integrity flow item validation require access to
8698 			 * both item mask and spec.
8699 			 * Current HWS model allows item mask in pattern
8700 			 * template and item spec in flow rule.
8701 			 */
8702 			break;
8703 		case RTE_FLOW_ITEM_TYPE_IB_BTH:
8704 		case RTE_FLOW_ITEM_TYPE_VOID:
8705 		case RTE_FLOW_ITEM_TYPE_END:
8706 			break;
8707 		default:
8708 			return rte_flow_error_set(error, EINVAL,
8709 						  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8710 						  NULL,
8711 						  "Unsupported item type");
8712 		}
8713 		*item_flags |= last_item;
8714 	}
8715 	return 1 + RTE_PTR_DIFF(item, items) / sizeof(item[0]);
8716 }
8717 
8718 /*
8719  * Verify that the tested flow patterns fits STE size limit in HWS group.
8720  *
8721  *
8722  * Return values:
8723  * 0       : Tested patterns fit STE size limit
8724  * -EINVAL : Invalid parameters detected
8725  * -E2BIG  : Tested patterns exceed STE size limit
8726  */
8727 static int
8728 pattern_template_validate(struct rte_eth_dev *dev,
8729 			  struct rte_flow_pattern_template *pt[],
8730 			  uint32_t pt_num,
8731 			  struct rte_flow_error *error)
8732 {
8733 	struct mlx5_flow_template_table_cfg tbl_cfg = {
8734 		.attr = {
8735 			.nb_flows = 64,
8736 			.insertion_type = RTE_FLOW_TABLE_INSERTION_TYPE_PATTERN,
8737 			.hash_func = RTE_FLOW_TABLE_HASH_FUNC_DEFAULT,
8738 			.flow_attr = {
8739 				.group = 1,
8740 				.ingress = pt[0]->attr.ingress,
8741 				.egress = pt[0]->attr.egress,
8742 				.transfer = pt[0]->attr.transfer
8743 			}
8744 		}
8745 	};
8746 	struct mlx5_priv *priv = dev->data->dev_private;
8747 	struct rte_flow_actions_template *action_template;
8748 	struct rte_flow_template_table *tmpl_tbl;
8749 	int ret;
8750 
8751 	if (pt[0]->attr.ingress) {
8752 		action_template =
8753 			priv->action_template_drop[MLX5DR_TABLE_TYPE_NIC_RX];
8754 	} else if (pt[0]->attr.egress) {
8755 		action_template =
8756 			priv->action_template_drop[MLX5DR_TABLE_TYPE_NIC_TX];
8757 	} else if (pt[0]->attr.transfer) {
8758 		action_template =
8759 			priv->action_template_drop[MLX5DR_TABLE_TYPE_FDB];
8760 	} else {
8761 		ret = EINVAL;
8762 		goto end;
8763 	}
8764 
8765 	if (pt[0]->item_flags & MLX5_FLOW_ITEM_COMPARE)
8766 		tbl_cfg.attr.nb_flows = 1;
8767 	tmpl_tbl = flow_hw_table_create(dev, &tbl_cfg, pt, pt_num,
8768 					&action_template, 1, error);
8769 	if (tmpl_tbl) {
8770 		ret = 0;
8771 		flow_hw_table_destroy(dev, tmpl_tbl, error);
8772 	} else {
8773 		switch (rte_errno) {
8774 		case E2BIG:
8775 			ret = E2BIG;
8776 			break;
8777 		case ENOTSUP:
8778 			ret = EINVAL;
8779 			break;
8780 		default:
8781 			ret = 0;
8782 			break;
8783 		}
8784 	}
8785 end:
8786 	if (ret)
8787 		rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8788 				   NULL, "failed to validate pattern template");
8789 	return -ret;
8790 }
8791 
8792 /**
8793  * Create flow item template.
8794  *
8795  * @param[in] dev
8796  *   Pointer to the rte_eth_dev structure.
8797  * @param[in] attr
8798  *   Pointer to the item template attributes.
8799  * @param[in] items
8800  *   The template item pattern.
8801  * @param[out] error
8802  *   Pointer to error structure.
8803  *
8804  * @return
8805  *  Item template pointer on success, NULL otherwise and rte_errno is set.
8806  */
8807 static struct rte_flow_pattern_template *
8808 flow_hw_pattern_template_create(struct rte_eth_dev *dev,
8809 			     const struct rte_flow_pattern_template_attr *attr,
8810 			     const struct rte_flow_item items[],
8811 			     struct rte_flow_error *error)
8812 {
8813 	struct mlx5_priv *priv = dev->data->dev_private;
8814 	struct rte_flow_pattern_template *it;
8815 	struct rte_flow_item *copied_items = NULL;
8816 	const struct rte_flow_item *tmpl_items;
8817 	uint64_t orig_item_nb, item_flags = 0;
8818 	struct rte_flow_item port = {
8819 		.type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,
8820 		.mask = &rte_flow_item_ethdev_mask,
8821 	};
8822 	struct rte_flow_item_tag tag_v = {
8823 		.data = 0,
8824 		.index = REG_C_0,
8825 	};
8826 	struct rte_flow_item_tag tag_m = {
8827 		.data = flow_hw_tx_tag_regc_mask(dev),
8828 		.index = 0xff,
8829 	};
8830 	struct rte_flow_item tag = {
8831 		.type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_TAG,
8832 		.spec = &tag_v,
8833 		.mask = &tag_m,
8834 		.last = NULL
8835 	};
8836 	int it_items_size;
8837 	unsigned int i = 0;
8838 	int rc;
8839 
8840 	/* Validate application items only */
8841 	rc = flow_hw_pattern_validate(dev, attr, items, &item_flags, error);
8842 	if (rc < 0)
8843 		return NULL;
8844 	orig_item_nb = rc;
8845 	if (priv->sh->config.dv_esw_en &&
8846 	    priv->sh->config.repr_matching &&
8847 	    attr->ingress && !attr->egress && !attr->transfer) {
8848 		copied_items = flow_hw_prepend_item(items, orig_item_nb, &port, error);
8849 		if (!copied_items)
8850 			return NULL;
8851 		tmpl_items = copied_items;
8852 	} else if (priv->sh->config.dv_esw_en &&
8853 		   priv->sh->config.repr_matching &&
8854 		   !attr->ingress && attr->egress && !attr->transfer) {
8855 		if (item_flags & MLX5_FLOW_ITEM_SQ) {
8856 			DRV_LOG(DEBUG, "Port %u omitting implicit REG_C_0 match for egress "
8857 				       "pattern template", dev->data->port_id);
8858 			tmpl_items = items;
8859 			goto setup_pattern_template;
8860 		}
8861 		copied_items = flow_hw_prepend_item(items, orig_item_nb, &tag, error);
8862 		if (!copied_items)
8863 			return NULL;
8864 		tmpl_items = copied_items;
8865 	} else {
8866 		tmpl_items = items;
8867 	}
8868 setup_pattern_template:
8869 	it = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*it), 0, rte_socket_id());
8870 	if (!it) {
8871 		rte_flow_error_set(error, ENOMEM,
8872 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8873 				   NULL,
8874 				   "cannot allocate item template");
8875 		goto error;
8876 	}
8877 	it->attr = *attr;
8878 	it->item_flags = item_flags;
8879 	it->orig_item_nb = orig_item_nb;
8880 	it_items_size = rte_flow_conv(RTE_FLOW_CONV_OP_PATTERN, NULL, 0, tmpl_items, error);
8881 	if (it_items_size <= 0) {
8882 		rte_flow_error_set(error, ENOMEM,
8883 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8884 				   NULL,
8885 				   "Failed to determine buffer size for pattern");
8886 		goto error;
8887 	}
8888 	it_items_size = RTE_ALIGN(it_items_size, 16);
8889 	it->items = mlx5_malloc(MLX5_MEM_ZERO, it_items_size, 0, rte_dev_numa_node(dev->device));
8890 	if (it->items == NULL) {
8891 		rte_flow_error_set(error, ENOMEM,
8892 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8893 				   NULL,
8894 				   "Cannot allocate memory for pattern");
8895 		goto error;
8896 	}
8897 	rc = rte_flow_conv(RTE_FLOW_CONV_OP_PATTERN, it->items, it_items_size, tmpl_items, error);
8898 	if (rc <= 0) {
8899 		rte_flow_error_set(error, ENOMEM,
8900 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8901 				   NULL,
8902 				   "Failed to store pattern");
8903 		goto error;
8904 	}
8905 	it->mt = mlx5dr_match_template_create(tmpl_items, attr->relaxed_matching);
8906 	if (!it->mt) {
8907 		rte_flow_error_set(error, rte_errno,
8908 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8909 				   NULL,
8910 				   "cannot create match template");
8911 		goto error;
8912 	}
8913 	if (copied_items) {
8914 		if (attr->ingress)
8915 			it->implicit_port = true;
8916 		else if (attr->egress)
8917 			it->implicit_tag = true;
8918 		mlx5_free(copied_items);
8919 		copied_items = NULL;
8920 	}
8921 	/* Either inner or outer, can't both. */
8922 	if (it->item_flags & (MLX5_FLOW_ITEM_OUTER_IPV6_ROUTING_EXT |
8923 			      MLX5_FLOW_ITEM_INNER_IPV6_ROUTING_EXT)) {
8924 		if (((it->item_flags & MLX5_FLOW_ITEM_OUTER_IPV6_ROUTING_EXT) &&
8925 		     (it->item_flags & MLX5_FLOW_ITEM_INNER_IPV6_ROUTING_EXT)) ||
8926 		    (mlx5_alloc_srh_flex_parser(dev))) {
8927 			rte_flow_error_set(error, rte_errno,
8928 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8929 					   "cannot create IPv6 routing extension support");
8930 			goto error;
8931 		}
8932 	}
8933 	if (it->item_flags & MLX5_FLOW_ITEM_FLEX) {
8934 		for (i = 0; items[i].type != RTE_FLOW_ITEM_TYPE_END; i++) {
8935 			const struct rte_flow_item_flex *spec = items[i].spec;
8936 			struct rte_flow_item_flex_handle *handle;
8937 
8938 			if (items[i].type != RTE_FLOW_ITEM_TYPE_FLEX)
8939 				continue;
8940 			handle = spec->handle;
8941 			if (flow_hw_flex_item_acquire(dev, handle,
8942 						      &it->flex_item)) {
8943 				rte_flow_error_set(error, EINVAL,
8944 						   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8945 						   NULL, "cannot create hw FLEX item");
8946 				goto error;
8947 			}
8948 		}
8949 	}
8950 	if (it->item_flags & MLX5_FLOW_LAYER_GENEVE_OPT) {
8951 		for (i = 0; items[i].type != RTE_FLOW_ITEM_TYPE_END; i++) {
8952 			const struct rte_flow_item_geneve_opt *spec =
8953 				items[i].spec;
8954 
8955 			if (items[i].type != RTE_FLOW_ITEM_TYPE_GENEVE_OPT)
8956 				continue;
8957 			if (mlx5_geneve_tlv_option_register(priv, spec,
8958 							    &it->geneve_opt_mng)) {
8959 				rte_flow_error_set(error, EINVAL,
8960 						   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8961 						   NULL, "cannot register GENEVE TLV option");
8962 				goto error;
8963 			}
8964 		}
8965 	}
8966 	rte_atomic_fetch_add_explicit(&it->refcnt, 1, rte_memory_order_relaxed);
8967 	rc = pattern_template_validate(dev, &it, 1, error);
8968 	if (rc)
8969 		goto error;
8970 	LIST_INSERT_HEAD(&priv->flow_hw_itt, it, next);
8971 	return it;
8972 error:
8973 	if (it) {
8974 		if (it->flex_item)
8975 			flow_hw_flex_item_release(dev, &it->flex_item);
8976 		if (it->geneve_opt_mng.nb_options)
8977 			mlx5_geneve_tlv_options_unregister(priv, &it->geneve_opt_mng);
8978 		if (it->mt)
8979 			claim_zero(mlx5dr_match_template_destroy(it->mt));
8980 		mlx5_free(it->items);
8981 		mlx5_free(it);
8982 	}
8983 	if (copied_items)
8984 		mlx5_free(copied_items);
8985 	return NULL;
8986 }
8987 
8988 /**
8989  * Destroy flow item template.
8990  *
8991  * @param[in] dev
8992  *   Pointer to the rte_eth_dev structure.
8993  * @param[in] template
8994  *   Pointer to the item template to be destroyed.
8995  * @param[out] error
8996  *   Pointer to error structure.
8997  *
8998  * @return
8999  *   0 on success, a negative errno value otherwise and rte_errno is set.
9000  */
9001 static int
9002 flow_hw_pattern_template_destroy(struct rte_eth_dev *dev,
9003 			      struct rte_flow_pattern_template *template,
9004 			      struct rte_flow_error *error __rte_unused)
9005 {
9006 	struct mlx5_priv *priv = dev->data->dev_private;
9007 
9008 	if (rte_atomic_load_explicit(&template->refcnt, rte_memory_order_relaxed) > 1) {
9009 		DRV_LOG(WARNING, "Item template %p is still in use.",
9010 			(void *)template);
9011 		return rte_flow_error_set(error, EBUSY,
9012 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9013 				   NULL,
9014 				   "item template is in use");
9015 	}
9016 	if (template->item_flags & (MLX5_FLOW_ITEM_OUTER_IPV6_ROUTING_EXT |
9017 				    MLX5_FLOW_ITEM_INNER_IPV6_ROUTING_EXT))
9018 		mlx5_free_srh_flex_parser(dev);
9019 	LIST_REMOVE(template, next);
9020 	flow_hw_flex_item_release(dev, &template->flex_item);
9021 	mlx5_geneve_tlv_options_unregister(priv, &template->geneve_opt_mng);
9022 	claim_zero(mlx5dr_match_template_destroy(template->mt));
9023 	mlx5_free(template->items);
9024 	mlx5_free(template);
9025 	return 0;
9026 }
9027 
9028 /*
9029  * Get information about HWS pre-configurable resources.
9030  *
9031  * @param[in] dev
9032  *   Pointer to the rte_eth_dev structure.
9033  * @param[out] port_info
9034  *   Pointer to port information.
9035  * @param[out] queue_info
9036  *   Pointer to queue information.
9037  * @param[out] error
9038  *   Pointer to error structure.
9039  *
9040  * @return
9041  *   0 on success, a negative errno value otherwise and rte_errno is set.
9042  */
9043 static int
9044 flow_hw_info_get(struct rte_eth_dev *dev,
9045 		 struct rte_flow_port_info *port_info,
9046 		 struct rte_flow_queue_info *queue_info,
9047 		 struct rte_flow_error *error __rte_unused)
9048 {
9049 	struct mlx5_priv *priv = dev->data->dev_private;
9050 	uint16_t port_id = dev->data->port_id;
9051 	struct rte_mtr_capabilities mtr_cap;
9052 	int ret;
9053 
9054 	memset(port_info, 0, sizeof(*port_info));
9055 	/* Queue size is unlimited from low-level. */
9056 	port_info->max_nb_queues = UINT32_MAX;
9057 	queue_info->max_size = UINT32_MAX;
9058 
9059 	memset(&mtr_cap, 0, sizeof(struct rte_mtr_capabilities));
9060 	ret = rte_mtr_capabilities_get(port_id, &mtr_cap, NULL);
9061 	if (!ret)
9062 		port_info->max_nb_meters = mtr_cap.n_max;
9063 	port_info->max_nb_counters = priv->sh->hws_max_nb_counters;
9064 	port_info->max_nb_aging_objects = port_info->max_nb_counters;
9065 	return 0;
9066 }
9067 
9068 /**
9069  * Create group callback.
9070  *
9071  * @param[in] tool_ctx
9072  *   Pointer to the hash list related context.
9073  * @param[in] cb_ctx
9074  *   Pointer to the group creation context.
9075  *
9076  * @return
9077  *   Group entry on success, NULL otherwise and rte_errno is set.
9078  */
9079 struct mlx5_list_entry *
9080 flow_hw_grp_create_cb(void *tool_ctx, void *cb_ctx)
9081 {
9082 	struct mlx5_dev_ctx_shared *sh = tool_ctx;
9083 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
9084 	struct rte_eth_dev *dev = ctx->dev;
9085 	struct rte_flow_attr *attr = (struct rte_flow_attr *)ctx->data;
9086 	struct mlx5_priv *priv = dev->data->dev_private;
9087 	struct mlx5dr_table_attr dr_tbl_attr = {0};
9088 	struct rte_flow_error *error = ctx->error;
9089 	struct mlx5_flow_group *grp_data;
9090 	struct mlx5dr_table *tbl = NULL;
9091 	struct mlx5dr_action *jump;
9092 	uint32_t idx = 0;
9093 	MKSTR(matcher_name, "%s_%s_%u_%u_matcher_list",
9094 	      attr->transfer ? "FDB" : "NIC", attr->egress ? "egress" : "ingress",
9095 	      attr->group, idx);
9096 
9097 	grp_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_HW_GRP], &idx);
9098 	if (!grp_data) {
9099 		rte_flow_error_set(error, ENOMEM,
9100 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9101 				   NULL,
9102 				   "cannot allocate flow table data entry");
9103 		return NULL;
9104 	}
9105 	dr_tbl_attr.level = attr->group;
9106 	if (attr->transfer)
9107 		dr_tbl_attr.type = MLX5DR_TABLE_TYPE_FDB;
9108 	else if (attr->egress)
9109 		dr_tbl_attr.type = MLX5DR_TABLE_TYPE_NIC_TX;
9110 	else
9111 		dr_tbl_attr.type = MLX5DR_TABLE_TYPE_NIC_RX;
9112 	tbl = mlx5dr_table_create(priv->dr_ctx, &dr_tbl_attr);
9113 	if (!tbl)
9114 		goto error;
9115 	grp_data->tbl = tbl;
9116 	if (attr->group) {
9117 		/* Jump action be used by non-root table. */
9118 		jump = mlx5dr_action_create_dest_table
9119 			(priv->dr_ctx, tbl,
9120 			 mlx5_hw_act_flag[!!attr->group][dr_tbl_attr.type]);
9121 		if (!jump)
9122 			goto error;
9123 		grp_data->jump.hws_action = jump;
9124 		/* Jump action be used by root table.  */
9125 		jump = mlx5dr_action_create_dest_table
9126 			(priv->dr_ctx, tbl,
9127 			 mlx5_hw_act_flag[MLX5_HW_ACTION_FLAG_ROOT]
9128 					 [dr_tbl_attr.type]);
9129 		if (!jump)
9130 			goto error;
9131 		grp_data->jump.root_action = jump;
9132 	}
9133 
9134 	grp_data->matchers = mlx5_list_create(matcher_name, sh, true,
9135 					      flow_matcher_create_cb,
9136 					      flow_matcher_match_cb,
9137 					      flow_matcher_remove_cb,
9138 					      flow_matcher_clone_cb,
9139 					      flow_matcher_clone_free_cb);
9140 	grp_data->dev = dev;
9141 	grp_data->idx = idx;
9142 	grp_data->group_id = attr->group;
9143 	grp_data->type = dr_tbl_attr.type;
9144 	return &grp_data->entry;
9145 error:
9146 	if (grp_data->jump.root_action)
9147 		mlx5dr_action_destroy(grp_data->jump.root_action);
9148 	if (grp_data->jump.hws_action)
9149 		mlx5dr_action_destroy(grp_data->jump.hws_action);
9150 	if (tbl)
9151 		mlx5dr_table_destroy(tbl);
9152 	if (idx)
9153 		mlx5_ipool_free(sh->ipool[MLX5_IPOOL_HW_GRP], idx);
9154 	rte_flow_error_set(error, ENOMEM,
9155 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9156 			   NULL,
9157 			   "cannot allocate flow dr table");
9158 	return NULL;
9159 }
9160 
9161 /**
9162  * Remove group callback.
9163  *
9164  * @param[in] tool_ctx
9165  *   Pointer to the hash list related context.
9166  * @param[in] entry
9167  *   Pointer to the entry to be removed.
9168  */
9169 void
9170 flow_hw_grp_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
9171 {
9172 	struct mlx5_dev_ctx_shared *sh = tool_ctx;
9173 	struct mlx5_flow_group *grp_data =
9174 		    container_of(entry, struct mlx5_flow_group, entry);
9175 
9176 	MLX5_ASSERT(entry && sh);
9177 	/* To use the wrapper glue functions instead. */
9178 	if (grp_data->jump.hws_action)
9179 		mlx5dr_action_destroy(grp_data->jump.hws_action);
9180 	if (grp_data->jump.root_action)
9181 		mlx5dr_action_destroy(grp_data->jump.root_action);
9182 	mlx5_list_destroy(grp_data->matchers);
9183 	mlx5dr_table_destroy(grp_data->tbl);
9184 	mlx5_ipool_free(sh->ipool[MLX5_IPOOL_HW_GRP], grp_data->idx);
9185 }
9186 
9187 /**
9188  * Match group callback.
9189  *
9190  * @param[in] tool_ctx
9191  *   Pointer to the hash list related context.
9192  * @param[in] entry
9193  *   Pointer to the group to be matched.
9194  * @param[in] cb_ctx
9195  *   Pointer to the group matching context.
9196  *
9197  * @return
9198  *   0 on matched, 1 on miss matched.
9199  */
9200 int
9201 flow_hw_grp_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry,
9202 		     void *cb_ctx)
9203 {
9204 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
9205 	struct mlx5_flow_group *grp_data =
9206 		container_of(entry, struct mlx5_flow_group, entry);
9207 	struct rte_flow_attr *attr =
9208 			(struct rte_flow_attr *)ctx->data;
9209 
9210 	return (grp_data->dev != ctx->dev) ||
9211 		(grp_data->group_id != attr->group) ||
9212 		((grp_data->type != MLX5DR_TABLE_TYPE_FDB) &&
9213 		attr->transfer) ||
9214 		((grp_data->type != MLX5DR_TABLE_TYPE_NIC_TX) &&
9215 		attr->egress) ||
9216 		((grp_data->type != MLX5DR_TABLE_TYPE_NIC_RX) &&
9217 		attr->ingress);
9218 }
9219 
9220 /**
9221  * Clone group entry callback.
9222  *
9223  * @param[in] tool_ctx
9224  *   Pointer to the hash list related context.
9225  * @param[in] entry
9226  *   Pointer to the group to be matched.
9227  * @param[in] cb_ctx
9228  *   Pointer to the group matching context.
9229  *
9230  * @return
9231  *   0 on matched, 1 on miss matched.
9232  */
9233 struct mlx5_list_entry *
9234 flow_hw_grp_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
9235 		     void *cb_ctx)
9236 {
9237 	struct mlx5_dev_ctx_shared *sh = tool_ctx;
9238 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
9239 	struct mlx5_flow_group *grp_data;
9240 	struct rte_flow_error *error = ctx->error;
9241 	uint32_t idx = 0;
9242 
9243 	grp_data = mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_HW_GRP], &idx);
9244 	if (!grp_data) {
9245 		rte_flow_error_set(error, ENOMEM,
9246 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9247 				   NULL,
9248 				   "cannot allocate flow table data entry");
9249 		return NULL;
9250 	}
9251 	memcpy(grp_data, oentry, sizeof(*grp_data));
9252 	grp_data->idx = idx;
9253 	return &grp_data->entry;
9254 }
9255 
9256 /**
9257  * Free cloned group entry callback.
9258  *
9259  * @param[in] tool_ctx
9260  *   Pointer to the hash list related context.
9261  * @param[in] entry
9262  *   Pointer to the group to be freed.
9263  */
9264 void
9265 flow_hw_grp_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
9266 {
9267 	struct mlx5_dev_ctx_shared *sh = tool_ctx;
9268 	struct mlx5_flow_group *grp_data =
9269 		    container_of(entry, struct mlx5_flow_group, entry);
9270 
9271 	mlx5_ipool_free(sh->ipool[MLX5_IPOOL_HW_GRP], grp_data->idx);
9272 }
9273 
9274 /**
9275  * Create and cache a vport action for given @p dev port. vport actions
9276  * cache is used in HWS with FDB flows.
9277  *
9278  * This function does not create any function if proxy port for @p dev port
9279  * was not configured for HW Steering.
9280  *
9281  * This function assumes that E-Switch is enabled and PMD is running with
9282  * HW Steering configured.
9283  *
9284  * @param dev
9285  *   Pointer to Ethernet device which will be the action destination.
9286  *
9287  * @return
9288  *   0 on success, positive value otherwise.
9289  */
9290 int
9291 flow_hw_create_vport_action(struct rte_eth_dev *dev)
9292 {
9293 	struct mlx5_priv *priv = dev->data->dev_private;
9294 	struct rte_eth_dev *proxy_dev;
9295 	struct mlx5_priv *proxy_priv;
9296 	uint16_t port_id = dev->data->port_id;
9297 	uint16_t proxy_port_id = port_id;
9298 	int ret;
9299 
9300 	ret = mlx5_flow_pick_transfer_proxy(dev, &proxy_port_id, NULL);
9301 	if (ret)
9302 		return ret;
9303 	proxy_dev = &rte_eth_devices[proxy_port_id];
9304 	proxy_priv = proxy_dev->data->dev_private;
9305 	if (!proxy_priv->hw_vport)
9306 		return 0;
9307 	if (proxy_priv->hw_vport[port_id]) {
9308 		DRV_LOG(ERR, "port %u HWS vport action already created",
9309 			port_id);
9310 		return -EINVAL;
9311 	}
9312 	proxy_priv->hw_vport[port_id] = mlx5dr_action_create_dest_vport
9313 			(proxy_priv->dr_ctx, priv->dev_port,
9314 			 MLX5DR_ACTION_FLAG_HWS_FDB);
9315 	if (!proxy_priv->hw_vport[port_id]) {
9316 		DRV_LOG(ERR, "port %u unable to create HWS vport action",
9317 			port_id);
9318 		return -EINVAL;
9319 	}
9320 	return 0;
9321 }
9322 
9323 /**
9324  * Destroys the vport action associated with @p dev device
9325  * from actions' cache.
9326  *
9327  * This function does not destroy any action if there is no action cached
9328  * for @p dev or proxy port was not configured for HW Steering.
9329  *
9330  * This function assumes that E-Switch is enabled and PMD is running with
9331  * HW Steering configured.
9332  *
9333  * @param dev
9334  *   Pointer to Ethernet device which will be the action destination.
9335  */
9336 void
9337 flow_hw_destroy_vport_action(struct rte_eth_dev *dev)
9338 {
9339 	struct rte_eth_dev *proxy_dev;
9340 	struct mlx5_priv *proxy_priv;
9341 	uint16_t port_id = dev->data->port_id;
9342 	uint16_t proxy_port_id = port_id;
9343 
9344 	if (mlx5_flow_pick_transfer_proxy(dev, &proxy_port_id, NULL))
9345 		return;
9346 	proxy_dev = &rte_eth_devices[proxy_port_id];
9347 	proxy_priv = proxy_dev->data->dev_private;
9348 	if (!proxy_priv->hw_vport || !proxy_priv->hw_vport[port_id])
9349 		return;
9350 	mlx5dr_action_destroy(proxy_priv->hw_vport[port_id]);
9351 	proxy_priv->hw_vport[port_id] = NULL;
9352 }
9353 
9354 static int
9355 flow_hw_create_vport_actions(struct mlx5_priv *priv)
9356 {
9357 	uint16_t port_id;
9358 
9359 	MLX5_ASSERT(!priv->hw_vport);
9360 	priv->hw_vport = mlx5_malloc(MLX5_MEM_ZERO,
9361 				     sizeof(*priv->hw_vport) * RTE_MAX_ETHPORTS,
9362 				     0, SOCKET_ID_ANY);
9363 	if (!priv->hw_vport)
9364 		return -ENOMEM;
9365 	DRV_LOG(DEBUG, "port %u :: creating vport actions", priv->dev_data->port_id);
9366 	DRV_LOG(DEBUG, "port %u ::    domain_id=%u", priv->dev_data->port_id, priv->domain_id);
9367 	MLX5_ETH_FOREACH_DEV(port_id, NULL) {
9368 		struct mlx5_priv *port_priv = rte_eth_devices[port_id].data->dev_private;
9369 
9370 		if (!port_priv ||
9371 		    port_priv->domain_id != priv->domain_id)
9372 			continue;
9373 		DRV_LOG(DEBUG, "port %u :: for port_id=%u, calling mlx5dr_action_create_dest_vport() with ibport=%u",
9374 			priv->dev_data->port_id, port_id, port_priv->dev_port);
9375 		priv->hw_vport[port_id] = mlx5dr_action_create_dest_vport
9376 				(priv->dr_ctx, port_priv->dev_port,
9377 				 MLX5DR_ACTION_FLAG_HWS_FDB);
9378 		DRV_LOG(DEBUG, "port %u :: priv->hw_vport[%u]=%p",
9379 			priv->dev_data->port_id, port_id, (void *)priv->hw_vport[port_id]);
9380 		if (!priv->hw_vport[port_id])
9381 			return -EINVAL;
9382 	}
9383 	return 0;
9384 }
9385 
9386 static void
9387 flow_hw_free_vport_actions(struct mlx5_priv *priv)
9388 {
9389 	uint16_t port_id;
9390 
9391 	if (!priv->hw_vport)
9392 		return;
9393 	for (port_id = 0; port_id < RTE_MAX_ETHPORTS; ++port_id)
9394 		if (priv->hw_vport[port_id])
9395 			mlx5dr_action_destroy(priv->hw_vport[port_id]);
9396 	mlx5_free(priv->hw_vport);
9397 	priv->hw_vport = NULL;
9398 }
9399 
9400 static void
9401 flow_hw_create_send_to_kernel_actions(struct mlx5_priv *priv __rte_unused)
9402 {
9403 #ifdef HAVE_MLX5DV_DR_ACTION_CREATE_DEST_ROOT_TABLE
9404 	int action_flag;
9405 	int i;
9406 	bool is_vf_sf_dev = priv->sh->dev_cap.vf || priv->sh->dev_cap.sf;
9407 
9408 	for (i = MLX5DR_TABLE_TYPE_NIC_RX; i < MLX5DR_TABLE_TYPE_MAX; i++) {
9409 		if ((!priv->sh->config.dv_esw_en || is_vf_sf_dev) &&
9410 		     i == MLX5DR_TABLE_TYPE_FDB)
9411 			continue;
9412 		action_flag = mlx5_hw_act_flag[1][i];
9413 		priv->hw_send_to_kernel[i] =
9414 				mlx5dr_action_create_dest_root(priv->dr_ctx,
9415 							MLX5_HW_LOWEST_PRIO_ROOT,
9416 							action_flag);
9417 		if (!priv->hw_send_to_kernel[i]) {
9418 			DRV_LOG(WARNING, "Unable to create HWS send to kernel action");
9419 			return;
9420 		}
9421 	}
9422 #endif
9423 }
9424 
9425 static void
9426 flow_hw_destroy_send_to_kernel_action(struct mlx5_priv *priv)
9427 {
9428 	int i;
9429 	for (i = MLX5DR_TABLE_TYPE_NIC_RX; i < MLX5DR_TABLE_TYPE_MAX; i++) {
9430 		if (priv->hw_send_to_kernel[i]) {
9431 			mlx5dr_action_destroy(priv->hw_send_to_kernel[i]);
9432 			priv->hw_send_to_kernel[i] = NULL;
9433 		}
9434 	}
9435 }
9436 
9437 static void
9438 flow_hw_destroy_nat64_actions(struct mlx5_priv *priv)
9439 {
9440 	uint32_t i;
9441 
9442 	for (i = MLX5DR_TABLE_TYPE_NIC_RX; i < MLX5DR_TABLE_TYPE_MAX; i++) {
9443 		if (priv->action_nat64[i][RTE_FLOW_NAT64_6TO4]) {
9444 			(void)mlx5dr_action_destroy(priv->action_nat64[i][RTE_FLOW_NAT64_6TO4]);
9445 			priv->action_nat64[i][RTE_FLOW_NAT64_6TO4] = NULL;
9446 		}
9447 		if (priv->action_nat64[i][RTE_FLOW_NAT64_4TO6]) {
9448 			(void)mlx5dr_action_destroy(priv->action_nat64[i][RTE_FLOW_NAT64_4TO6]);
9449 			priv->action_nat64[i][RTE_FLOW_NAT64_4TO6] = NULL;
9450 		}
9451 	}
9452 }
9453 
9454 static int
9455 flow_hw_create_nat64_actions(struct mlx5_priv *priv, struct rte_flow_error *error)
9456 {
9457 	struct mlx5dr_action_nat64_attr attr;
9458 	uint8_t regs[MLX5_FLOW_NAT64_REGS_MAX];
9459 	uint32_t i;
9460 	const uint32_t flags[MLX5DR_TABLE_TYPE_MAX] = {
9461 		MLX5DR_ACTION_FLAG_HWS_RX | MLX5DR_ACTION_FLAG_SHARED,
9462 		MLX5DR_ACTION_FLAG_HWS_TX | MLX5DR_ACTION_FLAG_SHARED,
9463 		MLX5DR_ACTION_FLAG_HWS_FDB | MLX5DR_ACTION_FLAG_SHARED,
9464 	};
9465 	struct mlx5dr_action *act;
9466 
9467 	attr.registers = regs;
9468 	/* Try to use 3 registers by default. */
9469 	attr.num_of_registers = MLX5_FLOW_NAT64_REGS_MAX;
9470 	for (i = 0; i < MLX5_FLOW_NAT64_REGS_MAX; i++) {
9471 		MLX5_ASSERT(priv->sh->registers.nat64_regs[i] != REG_NON);
9472 		regs[i] = mlx5_convert_reg_to_field(priv->sh->registers.nat64_regs[i]);
9473 	}
9474 	for (i = MLX5DR_TABLE_TYPE_NIC_RX; i < MLX5DR_TABLE_TYPE_MAX; i++) {
9475 		if (i == MLX5DR_TABLE_TYPE_FDB && !priv->sh->config.dv_esw_en)
9476 			continue;
9477 		attr.flags = (enum mlx5dr_action_nat64_flags)
9478 			     (MLX5DR_ACTION_NAT64_V6_TO_V4 | MLX5DR_ACTION_NAT64_BACKUP_ADDR);
9479 		act = mlx5dr_action_create_nat64(priv->dr_ctx, &attr, flags[i]);
9480 		if (!act)
9481 			return rte_flow_error_set(error, rte_errno,
9482 						  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9483 						  "Failed to create v6 to v4 action.");
9484 		priv->action_nat64[i][RTE_FLOW_NAT64_6TO4] = act;
9485 		attr.flags = (enum mlx5dr_action_nat64_flags)
9486 			     (MLX5DR_ACTION_NAT64_V4_TO_V6 | MLX5DR_ACTION_NAT64_BACKUP_ADDR);
9487 		act = mlx5dr_action_create_nat64(priv->dr_ctx, &attr, flags[i]);
9488 		if (!act)
9489 			return rte_flow_error_set(error, rte_errno,
9490 						  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9491 						  "Failed to create v4 to v6 action.");
9492 		priv->action_nat64[i][RTE_FLOW_NAT64_4TO6] = act;
9493 	}
9494 	return 0;
9495 }
9496 
9497 /**
9498  * Create an egress pattern template matching on source SQ.
9499  *
9500  * @param dev
9501  *   Pointer to Ethernet device.
9502  * @param[out] error
9503  *   Pointer to error structure.
9504  *
9505  * @return
9506  *   Pointer to pattern template on success. NULL otherwise, and rte_errno is set.
9507  */
9508 static struct rte_flow_pattern_template *
9509 flow_hw_create_tx_repr_sq_pattern_tmpl(struct rte_eth_dev *dev, struct rte_flow_error *error)
9510 {
9511 	struct rte_flow_pattern_template_attr attr = {
9512 		.relaxed_matching = 0,
9513 		.egress = 1,
9514 	};
9515 	struct mlx5_rte_flow_item_sq sq_mask = {
9516 		.queue = UINT32_MAX,
9517 	};
9518 	struct rte_flow_item items[] = {
9519 		{
9520 			.type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_SQ,
9521 			.mask = &sq_mask,
9522 		},
9523 		{
9524 			.type = RTE_FLOW_ITEM_TYPE_END,
9525 		},
9526 	};
9527 
9528 	return flow_hw_pattern_template_create(dev, &attr, items, error);
9529 }
9530 
9531 static __rte_always_inline uint32_t
9532 flow_hw_tx_tag_regc_mask(struct rte_eth_dev *dev)
9533 {
9534 	struct mlx5_priv *priv = dev->data->dev_private;
9535 	uint32_t mask = priv->sh->dv_regc0_mask;
9536 
9537 	/* Mask is verified during device initialization. Sanity checking here. */
9538 	MLX5_ASSERT(mask != 0);
9539 	/*
9540 	 * Availability of sufficient number of bits in REG_C_0 is verified on initialization.
9541 	 * Sanity checking here.
9542 	 */
9543 	MLX5_ASSERT(rte_popcount32(mask) >= rte_popcount32(priv->vport_meta_mask));
9544 	return mask;
9545 }
9546 
9547 static __rte_always_inline uint32_t
9548 flow_hw_tx_tag_regc_value(struct rte_eth_dev *dev)
9549 {
9550 	struct mlx5_priv *priv = dev->data->dev_private;
9551 	uint32_t tag;
9552 
9553 	/* Mask is verified during device initialization. Sanity checking here. */
9554 	MLX5_ASSERT(priv->vport_meta_mask != 0);
9555 	tag = priv->vport_meta_tag >> (rte_bsf32(priv->vport_meta_mask));
9556 	/*
9557 	 * Availability of sufficient number of bits in REG_C_0 is verified on initialization.
9558 	 * Sanity checking here.
9559 	 */
9560 	MLX5_ASSERT((tag & priv->sh->dv_regc0_mask) == tag);
9561 	return tag;
9562 }
9563 
9564 static void
9565 flow_hw_update_action_mask(struct rte_flow_action *action,
9566 			   struct rte_flow_action *mask,
9567 			   enum rte_flow_action_type type,
9568 			   void *conf_v,
9569 			   void *conf_m)
9570 {
9571 	action->type = type;
9572 	action->conf = conf_v;
9573 	mask->type = type;
9574 	mask->conf = conf_m;
9575 }
9576 
9577 /**
9578  * Create an egress actions template with MODIFY_FIELD action for setting unused REG_C_0 bits
9579  * to vport tag and JUMP action to group 1.
9580  *
9581  * If extended metadata mode is enabled, then MODIFY_FIELD action for copying software metadata
9582  * to REG_C_1 is added as well.
9583  *
9584  * @param dev
9585  *   Pointer to Ethernet device.
9586  * @param[out] error
9587  *   Pointer to error structure.
9588  *
9589  * @return
9590  *   Pointer to actions template on success. NULL otherwise, and rte_errno is set.
9591  */
9592 static struct rte_flow_actions_template *
9593 flow_hw_create_tx_repr_tag_jump_acts_tmpl(struct rte_eth_dev *dev,
9594 					  struct rte_flow_error *error)
9595 {
9596 	uint32_t tag_mask = flow_hw_tx_tag_regc_mask(dev);
9597 	uint32_t tag_value = flow_hw_tx_tag_regc_value(dev);
9598 	struct rte_flow_actions_template_attr attr = {
9599 		.egress = 1,
9600 	};
9601 	struct rte_flow_action_modify_field set_tag_v = {
9602 		.operation = RTE_FLOW_MODIFY_SET,
9603 		.dst = {
9604 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
9605 			.tag_index = REG_C_0,
9606 			.offset = rte_bsf32(tag_mask),
9607 		},
9608 		.src = {
9609 			.field = RTE_FLOW_FIELD_VALUE,
9610 		},
9611 		.width = rte_popcount32(tag_mask),
9612 	};
9613 	struct rte_flow_action_modify_field set_tag_m = {
9614 		.operation = RTE_FLOW_MODIFY_SET,
9615 		.dst = {
9616 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
9617 			.level = UINT8_MAX,
9618 			.tag_index = UINT8_MAX,
9619 			.offset = UINT32_MAX,
9620 		},
9621 		.src = {
9622 			.field = RTE_FLOW_FIELD_VALUE,
9623 		},
9624 		.width = UINT32_MAX,
9625 	};
9626 	struct rte_flow_action_modify_field copy_metadata_v = {
9627 		.operation = RTE_FLOW_MODIFY_SET,
9628 		.dst = {
9629 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
9630 			.tag_index = REG_C_1,
9631 		},
9632 		.src = {
9633 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
9634 			.tag_index = REG_A,
9635 		},
9636 		.width = 32,
9637 	};
9638 	struct rte_flow_action_modify_field copy_metadata_m = {
9639 		.operation = RTE_FLOW_MODIFY_SET,
9640 		.dst = {
9641 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
9642 			.level = UINT8_MAX,
9643 			.tag_index = UINT8_MAX,
9644 			.offset = UINT32_MAX,
9645 		},
9646 		.src = {
9647 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
9648 			.level = UINT8_MAX,
9649 			.tag_index = UINT8_MAX,
9650 			.offset = UINT32_MAX,
9651 		},
9652 		.width = UINT32_MAX,
9653 	};
9654 	struct rte_flow_action_jump jump_v = {
9655 		.group = MLX5_HW_LOWEST_USABLE_GROUP,
9656 	};
9657 	struct rte_flow_action_jump jump_m = {
9658 		.group = UINT32_MAX,
9659 	};
9660 	struct rte_flow_action actions_v[4] = { { 0 } };
9661 	struct rte_flow_action actions_m[4] = { { 0 } };
9662 	unsigned int idx = 0;
9663 
9664 	rte_memcpy(set_tag_v.src.value, &tag_value, sizeof(tag_value));
9665 	rte_memcpy(set_tag_m.src.value, &tag_mask, sizeof(tag_mask));
9666 	flow_hw_update_action_mask(&actions_v[idx], &actions_m[idx],
9667 				   RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
9668 				   &set_tag_v, &set_tag_m);
9669 	idx++;
9670 	if (MLX5_SH(dev)->config.dv_xmeta_en == MLX5_XMETA_MODE_META32_HWS) {
9671 		flow_hw_update_action_mask(&actions_v[idx], &actions_m[idx],
9672 					   RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
9673 					   &copy_metadata_v, &copy_metadata_m);
9674 		idx++;
9675 	}
9676 	flow_hw_update_action_mask(&actions_v[idx], &actions_m[idx], RTE_FLOW_ACTION_TYPE_JUMP,
9677 				   &jump_v, &jump_m);
9678 	idx++;
9679 	flow_hw_update_action_mask(&actions_v[idx], &actions_m[idx], RTE_FLOW_ACTION_TYPE_END,
9680 				   NULL, NULL);
9681 	idx++;
9682 	MLX5_ASSERT(idx <= RTE_DIM(actions_v));
9683 	return flow_hw_actions_template_create(dev, &attr, actions_v, actions_m, error);
9684 }
9685 
9686 static void
9687 flow_hw_cleanup_tx_repr_tagging(struct rte_eth_dev *dev)
9688 {
9689 	struct mlx5_priv *priv = dev->data->dev_private;
9690 
9691 	if (priv->hw_tx_repr_tagging_tbl) {
9692 		flow_hw_table_destroy(dev, priv->hw_tx_repr_tagging_tbl, NULL);
9693 		priv->hw_tx_repr_tagging_tbl = NULL;
9694 	}
9695 	if (priv->hw_tx_repr_tagging_at) {
9696 		flow_hw_actions_template_destroy(dev, priv->hw_tx_repr_tagging_at, NULL);
9697 		priv->hw_tx_repr_tagging_at = NULL;
9698 	}
9699 	if (priv->hw_tx_repr_tagging_pt) {
9700 		flow_hw_pattern_template_destroy(dev, priv->hw_tx_repr_tagging_pt, NULL);
9701 		priv->hw_tx_repr_tagging_pt = NULL;
9702 	}
9703 }
9704 
9705 /**
9706  * Setup templates and table used to create default Tx flow rules. These default rules
9707  * allow for matching Tx representor traffic using a vport tag placed in unused bits of
9708  * REG_C_0 register.
9709  *
9710  * @param dev
9711  *   Pointer to Ethernet device.
9712  * @param[out] error
9713  *   Pointer to error structure.
9714  *
9715  * @return
9716  *   0 on success, negative errno value otherwise.
9717  */
9718 static int
9719 flow_hw_setup_tx_repr_tagging(struct rte_eth_dev *dev, struct rte_flow_error *error)
9720 {
9721 	struct mlx5_priv *priv = dev->data->dev_private;
9722 	struct rte_flow_template_table_attr attr = {
9723 		.flow_attr = {
9724 			.group = 0,
9725 			.priority = MLX5_HW_LOWEST_PRIO_ROOT,
9726 			.egress = 1,
9727 		},
9728 		.nb_flows = MLX5_HW_CTRL_FLOW_NB_RULES,
9729 	};
9730 	struct mlx5_flow_template_table_cfg cfg = {
9731 		.attr = attr,
9732 		.external = false,
9733 	};
9734 
9735 	MLX5_ASSERT(priv->sh->config.dv_esw_en);
9736 	MLX5_ASSERT(priv->sh->config.repr_matching);
9737 	priv->hw_tx_repr_tagging_pt =
9738 		flow_hw_create_tx_repr_sq_pattern_tmpl(dev, error);
9739 	if (!priv->hw_tx_repr_tagging_pt)
9740 		goto err;
9741 	priv->hw_tx_repr_tagging_at =
9742 		flow_hw_create_tx_repr_tag_jump_acts_tmpl(dev, error);
9743 	if (!priv->hw_tx_repr_tagging_at)
9744 		goto err;
9745 	priv->hw_tx_repr_tagging_tbl = flow_hw_table_create(dev, &cfg,
9746 							    &priv->hw_tx_repr_tagging_pt, 1,
9747 							    &priv->hw_tx_repr_tagging_at, 1,
9748 							    error);
9749 	if (!priv->hw_tx_repr_tagging_tbl)
9750 		goto err;
9751 	return 0;
9752 err:
9753 	flow_hw_cleanup_tx_repr_tagging(dev);
9754 	return -rte_errno;
9755 }
9756 
9757 static uint32_t
9758 flow_hw_esw_mgr_regc_marker_mask(struct rte_eth_dev *dev)
9759 {
9760 	uint32_t mask = MLX5_SH(dev)->dv_regc0_mask;
9761 
9762 	/* Mask is verified during device initialization. */
9763 	MLX5_ASSERT(mask != 0);
9764 	return mask;
9765 }
9766 
9767 static uint32_t
9768 flow_hw_esw_mgr_regc_marker(struct rte_eth_dev *dev)
9769 {
9770 	uint32_t mask = MLX5_SH(dev)->dv_regc0_mask;
9771 
9772 	/* Mask is verified during device initialization. */
9773 	MLX5_ASSERT(mask != 0);
9774 	return RTE_BIT32(rte_bsf32(mask));
9775 }
9776 
9777 /**
9778  * Creates a flow pattern template used to match on E-Switch Manager.
9779  * This template is used to set up a table for SQ miss default flow.
9780  *
9781  * @param dev
9782  *   Pointer to Ethernet device.
9783  * @param error
9784  *   Pointer to error structure.
9785  *
9786  * @return
9787  *   Pointer to flow pattern template on success, NULL otherwise.
9788  */
9789 static struct rte_flow_pattern_template *
9790 flow_hw_create_ctrl_esw_mgr_pattern_template(struct rte_eth_dev *dev,
9791 					     struct rte_flow_error *error)
9792 {
9793 	struct rte_flow_pattern_template_attr attr = {
9794 		.relaxed_matching = 0,
9795 		.transfer = 1,
9796 	};
9797 	struct rte_flow_item_ethdev port_spec = {
9798 		.port_id = MLX5_REPRESENTED_PORT_ESW_MGR,
9799 	};
9800 	struct rte_flow_item_ethdev port_mask = {
9801 		.port_id = UINT16_MAX,
9802 	};
9803 	struct mlx5_rte_flow_item_sq sq_mask = {
9804 		.queue = UINT32_MAX,
9805 	};
9806 	struct rte_flow_item items[] = {
9807 		{
9808 			.type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,
9809 			.spec = &port_spec,
9810 			.mask = &port_mask,
9811 		},
9812 		{
9813 			.type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_SQ,
9814 			.mask = &sq_mask,
9815 		},
9816 		{
9817 			.type = RTE_FLOW_ITEM_TYPE_END,
9818 		},
9819 	};
9820 
9821 	return flow_hw_pattern_template_create(dev, &attr, items, error);
9822 }
9823 
9824 /**
9825  * Creates a flow pattern template used to match REG_C_0 and a SQ.
9826  * Matching on REG_C_0 is set up to match on all bits usable by user-space.
9827  * If traffic was sent from E-Switch Manager, then all usable bits will be set to 0,
9828  * except the least significant bit, which will be set to 1.
9829  *
9830  * This template is used to set up a table for SQ miss default flow.
9831  *
9832  * @param dev
9833  *   Pointer to Ethernet device.
9834  * @param error
9835  *   Pointer to error structure.
9836  *
9837  * @return
9838  *   Pointer to flow pattern template on success, NULL otherwise.
9839  */
9840 static struct rte_flow_pattern_template *
9841 flow_hw_create_ctrl_regc_sq_pattern_template(struct rte_eth_dev *dev,
9842 					     struct rte_flow_error *error)
9843 {
9844 	struct rte_flow_pattern_template_attr attr = {
9845 		.relaxed_matching = 0,
9846 		.transfer = 1,
9847 	};
9848 	struct rte_flow_item_tag reg_c0_spec = {
9849 		.index = (uint8_t)REG_C_0,
9850 	};
9851 	struct rte_flow_item_tag reg_c0_mask = {
9852 		.index = 0xff,
9853 		.data = flow_hw_esw_mgr_regc_marker_mask(dev),
9854 	};
9855 	struct mlx5_rte_flow_item_sq queue_mask = {
9856 		.queue = UINT32_MAX,
9857 	};
9858 	struct rte_flow_item items[] = {
9859 		{
9860 			.type = (enum rte_flow_item_type)
9861 				MLX5_RTE_FLOW_ITEM_TYPE_TAG,
9862 			.spec = &reg_c0_spec,
9863 			.mask = &reg_c0_mask,
9864 		},
9865 		{
9866 			.type = (enum rte_flow_item_type)
9867 				MLX5_RTE_FLOW_ITEM_TYPE_SQ,
9868 			.mask = &queue_mask,
9869 		},
9870 		{
9871 			.type = RTE_FLOW_ITEM_TYPE_END,
9872 		},
9873 	};
9874 
9875 	return flow_hw_pattern_template_create(dev, &attr, items, error);
9876 }
9877 
9878 /**
9879  * Creates a flow pattern template with unmasked represented port matching.
9880  * This template is used to set up a table for default transfer flows
9881  * directing packets to group 1.
9882  *
9883  * @param dev
9884  *   Pointer to Ethernet device.
9885  * @param error
9886  *   Pointer to error structure.
9887  *
9888  * @return
9889  *   Pointer to flow pattern template on success, NULL otherwise.
9890  */
9891 static struct rte_flow_pattern_template *
9892 flow_hw_create_ctrl_port_pattern_template(struct rte_eth_dev *dev,
9893 					  struct rte_flow_error *error)
9894 {
9895 	struct rte_flow_pattern_template_attr attr = {
9896 		.relaxed_matching = 0,
9897 		.transfer = 1,
9898 	};
9899 	struct rte_flow_item_ethdev port_mask = {
9900 		.port_id = UINT16_MAX,
9901 	};
9902 	struct rte_flow_item items[] = {
9903 		{
9904 			.type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,
9905 			.mask = &port_mask,
9906 		},
9907 		{
9908 			.type = RTE_FLOW_ITEM_TYPE_END,
9909 		},
9910 	};
9911 
9912 	return flow_hw_pattern_template_create(dev, &attr, items, error);
9913 }
9914 
9915 /*
9916  * Creating a flow pattern template with all ETH packets matching.
9917  * This template is used to set up a table for default Tx copy (Tx metadata
9918  * to REG_C_1) flow rule usage.
9919  *
9920  * @param dev
9921  *   Pointer to Ethernet device.
9922  * @param error
9923  *   Pointer to error structure.
9924  *
9925  * @return
9926  *   Pointer to flow pattern template on success, NULL otherwise.
9927  */
9928 static struct rte_flow_pattern_template *
9929 flow_hw_create_tx_default_mreg_copy_pattern_template(struct rte_eth_dev *dev,
9930 						     struct rte_flow_error *error)
9931 {
9932 	struct rte_flow_pattern_template_attr tx_pa_attr = {
9933 		.relaxed_matching = 0,
9934 		.egress = 1,
9935 	};
9936 	struct rte_flow_item_eth promisc = {
9937 		.hdr.dst_addr.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
9938 		.hdr.src_addr.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
9939 		.hdr.ether_type = 0,
9940 	};
9941 	struct rte_flow_item eth_all[] = {
9942 		[0] = {
9943 			.type = RTE_FLOW_ITEM_TYPE_ETH,
9944 			.spec = &promisc,
9945 			.mask = &promisc,
9946 		},
9947 		[1] = {
9948 			.type = RTE_FLOW_ITEM_TYPE_END,
9949 		},
9950 	};
9951 
9952 	return flow_hw_pattern_template_create(dev, &tx_pa_attr, eth_all, error);
9953 }
9954 
9955 /*
9956  * Creating a flow pattern template with all LACP packets matching, only for NIC
9957  * ingress domain.
9958  *
9959  * @param dev
9960  *   Pointer to Ethernet device.
9961  * @param error
9962  *   Pointer to error structure.
9963  *
9964  * @return
9965  *   Pointer to flow pattern template on success, NULL otherwise.
9966  */
9967 static struct rte_flow_pattern_template *
9968 flow_hw_create_lacp_rx_pattern_template(struct rte_eth_dev *dev, struct rte_flow_error *error)
9969 {
9970 	struct rte_flow_pattern_template_attr pa_attr = {
9971 		.relaxed_matching = 0,
9972 		.ingress = 1,
9973 	};
9974 	struct rte_flow_item_eth lacp_mask = {
9975 		.dst.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
9976 		.src.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
9977 		.type = 0xFFFF,
9978 	};
9979 	struct rte_flow_item eth_all[] = {
9980 		[0] = {
9981 			.type = RTE_FLOW_ITEM_TYPE_ETH,
9982 			.mask = &lacp_mask,
9983 		},
9984 		[1] = {
9985 			.type = RTE_FLOW_ITEM_TYPE_END,
9986 		},
9987 	};
9988 	return flow_hw_pattern_template_create(dev, &pa_attr, eth_all, error);
9989 }
9990 
9991 /**
9992  * Creates a flow actions template with modify field action and masked jump action.
9993  * Modify field action sets the least significant bit of REG_C_0 (usable by user-space)
9994  * to 1, meaning that packet was originated from E-Switch Manager. Jump action
9995  * transfers steering to group 1.
9996  *
9997  * @param dev
9998  *   Pointer to Ethernet device.
9999  * @param error
10000  *   Pointer to error structure.
10001  *
10002  * @return
10003  *   Pointer to flow actions template on success, NULL otherwise.
10004  */
10005 static struct rte_flow_actions_template *
10006 flow_hw_create_ctrl_regc_jump_actions_template(struct rte_eth_dev *dev,
10007 					       struct rte_flow_error *error)
10008 {
10009 	uint32_t marker_mask = flow_hw_esw_mgr_regc_marker_mask(dev);
10010 	uint32_t marker_bits = flow_hw_esw_mgr_regc_marker(dev);
10011 	struct rte_flow_actions_template_attr attr = {
10012 		.transfer = 1,
10013 	};
10014 	struct rte_flow_action_modify_field set_reg_v = {
10015 		.operation = RTE_FLOW_MODIFY_SET,
10016 		.dst = {
10017 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
10018 			.tag_index = REG_C_0,
10019 		},
10020 		.src = {
10021 			.field = RTE_FLOW_FIELD_VALUE,
10022 		},
10023 		.width = rte_popcount32(marker_mask),
10024 	};
10025 	struct rte_flow_action_modify_field set_reg_m = {
10026 		.operation = RTE_FLOW_MODIFY_SET,
10027 		.dst = {
10028 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
10029 			.level = UINT8_MAX,
10030 			.tag_index = UINT8_MAX,
10031 			.offset = UINT32_MAX,
10032 		},
10033 		.src = {
10034 			.field = RTE_FLOW_FIELD_VALUE,
10035 		},
10036 		.width = UINT32_MAX,
10037 	};
10038 	struct rte_flow_action_jump jump_v = {
10039 		.group = MLX5_HW_LOWEST_USABLE_GROUP,
10040 	};
10041 	struct rte_flow_action_jump jump_m = {
10042 		.group = UINT32_MAX,
10043 	};
10044 	struct rte_flow_action actions_v[] = {
10045 		{
10046 			.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
10047 			.conf = &set_reg_v,
10048 		},
10049 		{
10050 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
10051 			.conf = &jump_v,
10052 		},
10053 		{
10054 			.type = RTE_FLOW_ACTION_TYPE_END,
10055 		}
10056 	};
10057 	struct rte_flow_action actions_m[] = {
10058 		{
10059 			.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
10060 			.conf = &set_reg_m,
10061 		},
10062 		{
10063 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
10064 			.conf = &jump_m,
10065 		},
10066 		{
10067 			.type = RTE_FLOW_ACTION_TYPE_END,
10068 		}
10069 	};
10070 
10071 	set_reg_v.dst.offset = rte_bsf32(marker_mask);
10072 	rte_memcpy(set_reg_v.src.value, &marker_bits, sizeof(marker_bits));
10073 	rte_memcpy(set_reg_m.src.value, &marker_mask, sizeof(marker_mask));
10074 	return flow_hw_actions_template_create(dev, &attr, actions_v, actions_m, error);
10075 }
10076 
10077 /**
10078  * Creates a flow actions template with an unmasked JUMP action. Flows
10079  * based on this template will perform a jump to some group. This template
10080  * is used to set up tables for control flows.
10081  *
10082  * @param dev
10083  *   Pointer to Ethernet device.
10084  * @param group
10085  *   Destination group for this action template.
10086  * @param error
10087  *   Pointer to error structure.
10088  *
10089  * @return
10090  *   Pointer to flow actions template on success, NULL otherwise.
10091  */
10092 static struct rte_flow_actions_template *
10093 flow_hw_create_ctrl_jump_actions_template(struct rte_eth_dev *dev,
10094 					  uint32_t group,
10095 					  struct rte_flow_error *error)
10096 {
10097 	struct rte_flow_actions_template_attr attr = {
10098 		.transfer = 1,
10099 	};
10100 	struct rte_flow_action_jump jump_v = {
10101 		.group = group,
10102 	};
10103 	struct rte_flow_action_jump jump_m = {
10104 		.group = UINT32_MAX,
10105 	};
10106 	struct rte_flow_action actions_v[] = {
10107 		{
10108 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
10109 			.conf = &jump_v,
10110 		},
10111 		{
10112 			.type = RTE_FLOW_ACTION_TYPE_END,
10113 		}
10114 	};
10115 	struct rte_flow_action actions_m[] = {
10116 		{
10117 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
10118 			.conf = &jump_m,
10119 		},
10120 		{
10121 			.type = RTE_FLOW_ACTION_TYPE_END,
10122 		}
10123 	};
10124 
10125 	return flow_hw_actions_template_create(dev, &attr, actions_v,
10126 					       actions_m, error);
10127 }
10128 
10129 /**
10130  * Creates a flow action template with a unmasked REPRESENTED_PORT action.
10131  * It is used to create control flow tables.
10132  *
10133  * @param dev
10134  *   Pointer to Ethernet device.
10135  * @param error
10136  *   Pointer to error structure.
10137  *
10138  * @return
10139  *   Pointer to flow action template on success, NULL otherwise.
10140  */
10141 static struct rte_flow_actions_template *
10142 flow_hw_create_ctrl_port_actions_template(struct rte_eth_dev *dev,
10143 					  struct rte_flow_error *error)
10144 {
10145 	struct rte_flow_actions_template_attr attr = {
10146 		.transfer = 1,
10147 	};
10148 	struct rte_flow_action_ethdev port_v = {
10149 		.port_id = 0,
10150 	};
10151 	struct rte_flow_action actions_v[] = {
10152 		{
10153 			.type = RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT,
10154 			.conf = &port_v,
10155 		},
10156 		{
10157 			.type = RTE_FLOW_ACTION_TYPE_END,
10158 		}
10159 	};
10160 	struct rte_flow_action_ethdev port_m = {
10161 		.port_id = 0,
10162 	};
10163 	struct rte_flow_action actions_m[] = {
10164 		{
10165 			.type = RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT,
10166 			.conf = &port_m,
10167 		},
10168 		{
10169 			.type = RTE_FLOW_ACTION_TYPE_END,
10170 		}
10171 	};
10172 
10173 	return flow_hw_actions_template_create(dev, &attr, actions_v, actions_m, error);
10174 }
10175 
10176 /*
10177  * Creating an actions template to use header modify action for register
10178  * copying. This template is used to set up a table for copy flow.
10179  *
10180  * @param dev
10181  *   Pointer to Ethernet device.
10182  * @param error
10183  *   Pointer to error structure.
10184  *
10185  * @return
10186  *   Pointer to flow actions template on success, NULL otherwise.
10187  */
10188 static struct rte_flow_actions_template *
10189 flow_hw_create_tx_default_mreg_copy_actions_template(struct rte_eth_dev *dev,
10190 						     struct rte_flow_error *error)
10191 {
10192 	struct rte_flow_actions_template_attr tx_act_attr = {
10193 		.egress = 1,
10194 	};
10195 	const struct rte_flow_action_modify_field mreg_action = {
10196 		.operation = RTE_FLOW_MODIFY_SET,
10197 		.dst = {
10198 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
10199 			.tag_index = REG_C_1,
10200 		},
10201 		.src = {
10202 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
10203 			.tag_index = REG_A,
10204 		},
10205 		.width = 32,
10206 	};
10207 	const struct rte_flow_action_modify_field mreg_mask = {
10208 		.operation = RTE_FLOW_MODIFY_SET,
10209 		.dst = {
10210 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
10211 			.level = UINT8_MAX,
10212 			.tag_index = UINT8_MAX,
10213 			.offset = UINT32_MAX,
10214 		},
10215 		.src = {
10216 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
10217 			.level = UINT8_MAX,
10218 			.tag_index = UINT8_MAX,
10219 			.offset = UINT32_MAX,
10220 		},
10221 		.width = UINT32_MAX,
10222 	};
10223 	const struct rte_flow_action_jump jump_action = {
10224 		.group = 1,
10225 	};
10226 	const struct rte_flow_action_jump jump_mask = {
10227 		.group = UINT32_MAX,
10228 	};
10229 	const struct rte_flow_action actions[] = {
10230 		[0] = {
10231 			.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
10232 			.conf = &mreg_action,
10233 		},
10234 		[1] = {
10235 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
10236 			.conf = &jump_action,
10237 		},
10238 		[2] = {
10239 			.type = RTE_FLOW_ACTION_TYPE_END,
10240 		},
10241 	};
10242 	const struct rte_flow_action masks[] = {
10243 		[0] = {
10244 			.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
10245 			.conf = &mreg_mask,
10246 		},
10247 		[1] = {
10248 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
10249 			.conf = &jump_mask,
10250 		},
10251 		[2] = {
10252 			.type = RTE_FLOW_ACTION_TYPE_END,
10253 		},
10254 	};
10255 
10256 	return flow_hw_actions_template_create(dev, &tx_act_attr, actions,
10257 					       masks, error);
10258 }
10259 
10260 /*
10261  * Creating an actions template to use default miss to re-route packets to the
10262  * kernel driver stack.
10263  * On root table, only DEFAULT_MISS action can be used.
10264  *
10265  * @param dev
10266  *   Pointer to Ethernet device.
10267  * @param error
10268  *   Pointer to error structure.
10269  *
10270  * @return
10271  *   Pointer to flow actions template on success, NULL otherwise.
10272  */
10273 static struct rte_flow_actions_template *
10274 flow_hw_create_lacp_rx_actions_template(struct rte_eth_dev *dev, struct rte_flow_error *error)
10275 {
10276 	struct rte_flow_actions_template_attr act_attr = {
10277 		.ingress = 1,
10278 	};
10279 	const struct rte_flow_action actions[] = {
10280 		[0] = {
10281 			.type = (enum rte_flow_action_type)
10282 				MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS,
10283 		},
10284 		[1] = {
10285 			.type = RTE_FLOW_ACTION_TYPE_END,
10286 		},
10287 	};
10288 
10289 	return flow_hw_actions_template_create(dev, &act_attr, actions, actions, error);
10290 }
10291 
10292 /**
10293  * Creates a control flow table used to transfer traffic from E-Switch Manager
10294  * and TX queues from group 0 to group 1.
10295  *
10296  * @param dev
10297  *   Pointer to Ethernet device.
10298  * @param it
10299  *   Pointer to flow pattern template.
10300  * @param at
10301  *   Pointer to flow actions template.
10302  * @param error
10303  *   Pointer to error structure.
10304  *
10305  * @return
10306  *   Pointer to flow table on success, NULL otherwise.
10307  */
10308 static struct rte_flow_template_table*
10309 flow_hw_create_ctrl_sq_miss_root_table(struct rte_eth_dev *dev,
10310 				       struct rte_flow_pattern_template *it,
10311 				       struct rte_flow_actions_template *at,
10312 				       struct rte_flow_error *error)
10313 {
10314 	struct rte_flow_template_table_attr attr = {
10315 		.flow_attr = {
10316 			.group = 0,
10317 			.priority = MLX5_HW_LOWEST_PRIO_ROOT,
10318 			.ingress = 0,
10319 			.egress = 0,
10320 			.transfer = 1,
10321 		},
10322 		.nb_flows = MLX5_HW_CTRL_FLOW_NB_RULES,
10323 	};
10324 	struct mlx5_flow_template_table_cfg cfg = {
10325 		.attr = attr,
10326 		.external = false,
10327 	};
10328 
10329 	return flow_hw_table_create(dev, &cfg, &it, 1, &at, 1, error);
10330 }
10331 
10332 
10333 /**
10334  * Creates a control flow table used to transfer traffic from E-Switch Manager
10335  * and TX queues from group 0 to group 1.
10336  *
10337  * @param dev
10338  *   Pointer to Ethernet device.
10339  * @param it
10340  *   Pointer to flow pattern template.
10341  * @param at
10342  *   Pointer to flow actions template.
10343  * @param error
10344  *   Pointer to error structure.
10345  *
10346  * @return
10347  *   Pointer to flow table on success, NULL otherwise.
10348  */
10349 static struct rte_flow_template_table*
10350 flow_hw_create_ctrl_sq_miss_table(struct rte_eth_dev *dev,
10351 				  struct rte_flow_pattern_template *it,
10352 				  struct rte_flow_actions_template *at,
10353 				  struct rte_flow_error *error)
10354 {
10355 	struct rte_flow_template_table_attr attr = {
10356 		.flow_attr = {
10357 			.group = 1,
10358 			.priority = MLX5_HW_LOWEST_PRIO_NON_ROOT,
10359 			.ingress = 0,
10360 			.egress = 0,
10361 			.transfer = 1,
10362 		},
10363 		.nb_flows = MLX5_HW_CTRL_FLOW_NB_RULES,
10364 	};
10365 	struct mlx5_flow_template_table_cfg cfg = {
10366 		.attr = attr,
10367 		.external = false,
10368 	};
10369 
10370 	return flow_hw_table_create(dev, &cfg, &it, 1, &at, 1, error);
10371 }
10372 
10373 /*
10374  * Creating the default Tx metadata copy table on NIC Tx group 0.
10375  *
10376  * @param dev
10377  *   Pointer to Ethernet device.
10378  * @param pt
10379  *   Pointer to flow pattern template.
10380  * @param at
10381  *   Pointer to flow actions template.
10382  * @param error
10383  *   Pointer to error structure.
10384  *
10385  * @return
10386  *   Pointer to flow table on success, NULL otherwise.
10387  */
10388 static struct rte_flow_template_table*
10389 flow_hw_create_tx_default_mreg_copy_table(struct rte_eth_dev *dev,
10390 					  struct rte_flow_pattern_template *pt,
10391 					  struct rte_flow_actions_template *at,
10392 					  struct rte_flow_error *error)
10393 {
10394 	struct rte_flow_template_table_attr tx_tbl_attr = {
10395 		.flow_attr = {
10396 			.group = 0, /* Root */
10397 			.priority = MLX5_HW_LOWEST_PRIO_ROOT,
10398 			.egress = 1,
10399 		},
10400 		.nb_flows = 1, /* One default flow rule for all. */
10401 	};
10402 	struct mlx5_flow_template_table_cfg tx_tbl_cfg = {
10403 		.attr = tx_tbl_attr,
10404 		.external = false,
10405 	};
10406 
10407 	return flow_hw_table_create(dev, &tx_tbl_cfg, &pt, 1, &at, 1, error);
10408 }
10409 
10410 /**
10411  * Creates a control flow table used to transfer traffic
10412  * from group 0 to group 1.
10413  *
10414  * @param dev
10415  *   Pointer to Ethernet device.
10416  * @param it
10417  *   Pointer to flow pattern template.
10418  * @param at
10419  *   Pointer to flow actions template.
10420  * @param error
10421  *   Pointer to error structure.
10422  *
10423  * @return
10424  *   Pointer to flow table on success, NULL otherwise.
10425  */
10426 static struct rte_flow_template_table *
10427 flow_hw_create_ctrl_jump_table(struct rte_eth_dev *dev,
10428 			       struct rte_flow_pattern_template *it,
10429 			       struct rte_flow_actions_template *at,
10430 			       struct rte_flow_error *error)
10431 {
10432 	struct rte_flow_template_table_attr attr = {
10433 		.flow_attr = {
10434 			.group = 0,
10435 			.priority = 0,
10436 			.ingress = 0,
10437 			.egress = 0,
10438 			.transfer = 1,
10439 		},
10440 		.nb_flows = MLX5_HW_CTRL_FLOW_NB_RULES,
10441 	};
10442 	struct mlx5_flow_template_table_cfg cfg = {
10443 		.attr = attr,
10444 		.external = false,
10445 	};
10446 
10447 	return flow_hw_table_create(dev, &cfg, &it, 1, &at, 1, error);
10448 }
10449 
10450 /**
10451  * Cleans up all template tables and pattern, and actions templates used for
10452  * FDB control flow rules.
10453  *
10454  * @param dev
10455  *   Pointer to Ethernet device.
10456  */
10457 static void
10458 flow_hw_cleanup_ctrl_fdb_tables(struct rte_eth_dev *dev)
10459 {
10460 	struct mlx5_priv *priv = dev->data->dev_private;
10461 	struct mlx5_flow_hw_ctrl_fdb *hw_ctrl_fdb;
10462 
10463 	if (!priv->hw_ctrl_fdb)
10464 		return;
10465 	hw_ctrl_fdb = priv->hw_ctrl_fdb;
10466 	/* Clean up templates used for LACP default miss table. */
10467 	if (hw_ctrl_fdb->hw_lacp_rx_tbl)
10468 		claim_zero(flow_hw_table_destroy(dev, hw_ctrl_fdb->hw_lacp_rx_tbl, NULL));
10469 	if (hw_ctrl_fdb->lacp_rx_actions_tmpl)
10470 		claim_zero(flow_hw_actions_template_destroy(dev, hw_ctrl_fdb->lacp_rx_actions_tmpl,
10471 			   NULL));
10472 	if (hw_ctrl_fdb->lacp_rx_items_tmpl)
10473 		claim_zero(flow_hw_pattern_template_destroy(dev, hw_ctrl_fdb->lacp_rx_items_tmpl,
10474 			   NULL));
10475 	/* Clean up templates used for default Tx metadata copy. */
10476 	if (hw_ctrl_fdb->hw_tx_meta_cpy_tbl)
10477 		claim_zero(flow_hw_table_destroy(dev, hw_ctrl_fdb->hw_tx_meta_cpy_tbl, NULL));
10478 	if (hw_ctrl_fdb->tx_meta_actions_tmpl)
10479 		claim_zero(flow_hw_actions_template_destroy(dev, hw_ctrl_fdb->tx_meta_actions_tmpl,
10480 			   NULL));
10481 	if (hw_ctrl_fdb->tx_meta_items_tmpl)
10482 		claim_zero(flow_hw_pattern_template_destroy(dev, hw_ctrl_fdb->tx_meta_items_tmpl,
10483 			   NULL));
10484 	/* Clean up templates used for default FDB jump rule. */
10485 	if (hw_ctrl_fdb->hw_esw_zero_tbl)
10486 		claim_zero(flow_hw_table_destroy(dev, hw_ctrl_fdb->hw_esw_zero_tbl, NULL));
10487 	if (hw_ctrl_fdb->jump_one_actions_tmpl)
10488 		claim_zero(flow_hw_actions_template_destroy(dev, hw_ctrl_fdb->jump_one_actions_tmpl,
10489 			   NULL));
10490 	if (hw_ctrl_fdb->port_items_tmpl)
10491 		claim_zero(flow_hw_pattern_template_destroy(dev, hw_ctrl_fdb->port_items_tmpl,
10492 			   NULL));
10493 	/* Clean up templates used for default SQ miss flow rules - non-root table. */
10494 	if (hw_ctrl_fdb->hw_esw_sq_miss_tbl)
10495 		claim_zero(flow_hw_table_destroy(dev, hw_ctrl_fdb->hw_esw_sq_miss_tbl, NULL));
10496 	if (hw_ctrl_fdb->regc_sq_items_tmpl)
10497 		claim_zero(flow_hw_pattern_template_destroy(dev, hw_ctrl_fdb->regc_sq_items_tmpl,
10498 			   NULL));
10499 	if (hw_ctrl_fdb->port_actions_tmpl)
10500 		claim_zero(flow_hw_actions_template_destroy(dev, hw_ctrl_fdb->port_actions_tmpl,
10501 			   NULL));
10502 	/* Clean up templates used for default SQ miss flow rules - root table. */
10503 	if (hw_ctrl_fdb->hw_esw_sq_miss_root_tbl)
10504 		claim_zero(flow_hw_table_destroy(dev, hw_ctrl_fdb->hw_esw_sq_miss_root_tbl, NULL));
10505 	if (hw_ctrl_fdb->regc_jump_actions_tmpl)
10506 		claim_zero(flow_hw_actions_template_destroy(dev,
10507 			   hw_ctrl_fdb->regc_jump_actions_tmpl, NULL));
10508 	if (hw_ctrl_fdb->esw_mgr_items_tmpl)
10509 		claim_zero(flow_hw_pattern_template_destroy(dev, hw_ctrl_fdb->esw_mgr_items_tmpl,
10510 			   NULL));
10511 	/* Clean up templates structure for FDB control flow rules. */
10512 	mlx5_free(hw_ctrl_fdb);
10513 	priv->hw_ctrl_fdb = NULL;
10514 }
10515 
10516 /*
10517  * Create a table on the root group to for the LACP traffic redirecting.
10518  *
10519  * @param dev
10520  *   Pointer to Ethernet device.
10521  * @param it
10522  *   Pointer to flow pattern template.
10523  * @param at
10524  *   Pointer to flow actions template.
10525  *
10526  * @return
10527  *   Pointer to flow table on success, NULL otherwise.
10528  */
10529 static struct rte_flow_template_table *
10530 flow_hw_create_lacp_rx_table(struct rte_eth_dev *dev,
10531 			     struct rte_flow_pattern_template *it,
10532 			     struct rte_flow_actions_template *at,
10533 			     struct rte_flow_error *error)
10534 {
10535 	struct rte_flow_template_table_attr attr = {
10536 		.flow_attr = {
10537 			.group = 0,
10538 			.priority = 0,
10539 			.ingress = 1,
10540 			.egress = 0,
10541 			.transfer = 0,
10542 		},
10543 		.nb_flows = 1,
10544 	};
10545 	struct mlx5_flow_template_table_cfg cfg = {
10546 		.attr = attr,
10547 		.external = false,
10548 	};
10549 
10550 	return flow_hw_table_create(dev, &cfg, &it, 1, &at, 1, error);
10551 }
10552 
10553 /**
10554  * Creates a set of flow tables used to create control flows used
10555  * when E-Switch is engaged.
10556  *
10557  * @param dev
10558  *   Pointer to Ethernet device.
10559  * @param error
10560  *   Pointer to error structure.
10561  *
10562  * @return
10563  *   0 on success, negative values otherwise
10564  */
10565 static int
10566 flow_hw_create_ctrl_tables(struct rte_eth_dev *dev, struct rte_flow_error *error)
10567 {
10568 	struct mlx5_priv *priv = dev->data->dev_private;
10569 	struct mlx5_flow_hw_ctrl_fdb *hw_ctrl_fdb;
10570 	uint32_t xmeta = priv->sh->config.dv_xmeta_en;
10571 	uint32_t repr_matching = priv->sh->config.repr_matching;
10572 	uint32_t fdb_def_rule = priv->sh->config.fdb_def_rule;
10573 
10574 	MLX5_ASSERT(priv->hw_ctrl_fdb == NULL);
10575 	hw_ctrl_fdb = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*hw_ctrl_fdb), 0, SOCKET_ID_ANY);
10576 	if (!hw_ctrl_fdb) {
10577 		DRV_LOG(ERR, "port %u failed to allocate memory for FDB control flow templates",
10578 			dev->data->port_id);
10579 		rte_errno = ENOMEM;
10580 		goto err;
10581 	}
10582 	priv->hw_ctrl_fdb = hw_ctrl_fdb;
10583 	if (fdb_def_rule) {
10584 		/* Create templates and table for default SQ miss flow rules - root table. */
10585 		hw_ctrl_fdb->esw_mgr_items_tmpl =
10586 				flow_hw_create_ctrl_esw_mgr_pattern_template(dev, error);
10587 		if (!hw_ctrl_fdb->esw_mgr_items_tmpl) {
10588 			DRV_LOG(ERR, "port %u failed to create E-Switch Manager item"
10589 				" template for control flows", dev->data->port_id);
10590 			goto err;
10591 		}
10592 		hw_ctrl_fdb->regc_jump_actions_tmpl =
10593 				flow_hw_create_ctrl_regc_jump_actions_template(dev, error);
10594 		if (!hw_ctrl_fdb->regc_jump_actions_tmpl) {
10595 			DRV_LOG(ERR, "port %u failed to create REG_C set and jump action template"
10596 				" for control flows", dev->data->port_id);
10597 			goto err;
10598 		}
10599 		hw_ctrl_fdb->hw_esw_sq_miss_root_tbl =
10600 				flow_hw_create_ctrl_sq_miss_root_table
10601 					(dev, hw_ctrl_fdb->esw_mgr_items_tmpl,
10602 					 hw_ctrl_fdb->regc_jump_actions_tmpl, error);
10603 		if (!hw_ctrl_fdb->hw_esw_sq_miss_root_tbl) {
10604 			DRV_LOG(ERR, "port %u failed to create table for default sq miss (root table)"
10605 				" for control flows", dev->data->port_id);
10606 			goto err;
10607 		}
10608 		/* Create templates and table for default SQ miss flow rules - non-root table. */
10609 		hw_ctrl_fdb->regc_sq_items_tmpl =
10610 				flow_hw_create_ctrl_regc_sq_pattern_template(dev, error);
10611 		if (!hw_ctrl_fdb->regc_sq_items_tmpl) {
10612 			DRV_LOG(ERR, "port %u failed to create SQ item template for"
10613 				" control flows", dev->data->port_id);
10614 			goto err;
10615 		}
10616 		hw_ctrl_fdb->port_actions_tmpl =
10617 				flow_hw_create_ctrl_port_actions_template(dev, error);
10618 		if (!hw_ctrl_fdb->port_actions_tmpl) {
10619 			DRV_LOG(ERR, "port %u failed to create port action template"
10620 				" for control flows", dev->data->port_id);
10621 			goto err;
10622 		}
10623 		hw_ctrl_fdb->hw_esw_sq_miss_tbl =
10624 				flow_hw_create_ctrl_sq_miss_table
10625 					(dev, hw_ctrl_fdb->regc_sq_items_tmpl,
10626 					 hw_ctrl_fdb->port_actions_tmpl, error);
10627 		if (!hw_ctrl_fdb->hw_esw_sq_miss_tbl) {
10628 			DRV_LOG(ERR, "port %u failed to create table for default sq miss (non-root table)"
10629 				" for control flows", dev->data->port_id);
10630 			goto err;
10631 		}
10632 		/* Create templates and table for default FDB jump flow rules. */
10633 		hw_ctrl_fdb->port_items_tmpl =
10634 				flow_hw_create_ctrl_port_pattern_template(dev, error);
10635 		if (!hw_ctrl_fdb->port_items_tmpl) {
10636 			DRV_LOG(ERR, "port %u failed to create SQ item template for"
10637 				" control flows", dev->data->port_id);
10638 			goto err;
10639 		}
10640 		hw_ctrl_fdb->jump_one_actions_tmpl =
10641 				flow_hw_create_ctrl_jump_actions_template
10642 					(dev, MLX5_HW_LOWEST_USABLE_GROUP, error);
10643 		if (!hw_ctrl_fdb->jump_one_actions_tmpl) {
10644 			DRV_LOG(ERR, "port %u failed to create jump action template"
10645 				" for control flows", dev->data->port_id);
10646 			goto err;
10647 		}
10648 		hw_ctrl_fdb->hw_esw_zero_tbl = flow_hw_create_ctrl_jump_table
10649 				(dev, hw_ctrl_fdb->port_items_tmpl,
10650 				 hw_ctrl_fdb->jump_one_actions_tmpl, error);
10651 		if (!hw_ctrl_fdb->hw_esw_zero_tbl) {
10652 			DRV_LOG(ERR, "port %u failed to create table for default jump to group 1"
10653 				" for control flows", dev->data->port_id);
10654 			goto err;
10655 		}
10656 	}
10657 	/* Create templates and table for default Tx metadata copy flow rule. */
10658 	if (!repr_matching && xmeta == MLX5_XMETA_MODE_META32_HWS) {
10659 		hw_ctrl_fdb->tx_meta_items_tmpl =
10660 			flow_hw_create_tx_default_mreg_copy_pattern_template(dev, error);
10661 		if (!hw_ctrl_fdb->tx_meta_items_tmpl) {
10662 			DRV_LOG(ERR, "port %u failed to Tx metadata copy pattern"
10663 				" template for control flows", dev->data->port_id);
10664 			goto err;
10665 		}
10666 		hw_ctrl_fdb->tx_meta_actions_tmpl =
10667 			flow_hw_create_tx_default_mreg_copy_actions_template(dev, error);
10668 		if (!hw_ctrl_fdb->tx_meta_actions_tmpl) {
10669 			DRV_LOG(ERR, "port %u failed to Tx metadata copy actions"
10670 				" template for control flows", dev->data->port_id);
10671 			goto err;
10672 		}
10673 		hw_ctrl_fdb->hw_tx_meta_cpy_tbl =
10674 			flow_hw_create_tx_default_mreg_copy_table
10675 				(dev, hw_ctrl_fdb->tx_meta_items_tmpl,
10676 				 hw_ctrl_fdb->tx_meta_actions_tmpl, error);
10677 		if (!hw_ctrl_fdb->hw_tx_meta_cpy_tbl) {
10678 			DRV_LOG(ERR, "port %u failed to create table for default"
10679 				" Tx metadata copy flow rule", dev->data->port_id);
10680 			goto err;
10681 		}
10682 	}
10683 	/* Create LACP default miss table. */
10684 	if (!priv->sh->config.lacp_by_user && priv->pf_bond >= 0 && priv->master) {
10685 		hw_ctrl_fdb->lacp_rx_items_tmpl =
10686 				flow_hw_create_lacp_rx_pattern_template(dev, error);
10687 		if (!hw_ctrl_fdb->lacp_rx_items_tmpl) {
10688 			DRV_LOG(ERR, "port %u failed to create pattern template"
10689 				" for LACP Rx traffic", dev->data->port_id);
10690 			goto err;
10691 		}
10692 		hw_ctrl_fdb->lacp_rx_actions_tmpl =
10693 				flow_hw_create_lacp_rx_actions_template(dev, error);
10694 		if (!hw_ctrl_fdb->lacp_rx_actions_tmpl) {
10695 			DRV_LOG(ERR, "port %u failed to create actions template"
10696 				" for LACP Rx traffic", dev->data->port_id);
10697 			goto err;
10698 		}
10699 		hw_ctrl_fdb->hw_lacp_rx_tbl = flow_hw_create_lacp_rx_table
10700 				(dev, hw_ctrl_fdb->lacp_rx_items_tmpl,
10701 				 hw_ctrl_fdb->lacp_rx_actions_tmpl, error);
10702 		if (!hw_ctrl_fdb->hw_lacp_rx_tbl) {
10703 			DRV_LOG(ERR, "port %u failed to create template table for"
10704 				" for LACP Rx traffic", dev->data->port_id);
10705 			goto err;
10706 		}
10707 	}
10708 	return 0;
10709 
10710 err:
10711 	flow_hw_cleanup_ctrl_fdb_tables(dev);
10712 	return -EINVAL;
10713 }
10714 
10715 static void
10716 flow_hw_ct_mng_destroy(struct rte_eth_dev *dev,
10717 		       struct mlx5_aso_ct_pools_mng *ct_mng)
10718 {
10719 	struct mlx5_priv *priv = dev->data->dev_private;
10720 
10721 	mlx5_aso_ct_queue_uninit(priv->sh, ct_mng);
10722 	mlx5_free(ct_mng);
10723 }
10724 
10725 static void
10726 flow_hw_ct_pool_destroy(struct rte_eth_dev *dev,
10727 			struct mlx5_aso_ct_pool *pool)
10728 {
10729 	struct mlx5_priv *priv = dev->data->dev_private;
10730 
10731 	if (pool->dr_action)
10732 		mlx5dr_action_destroy(pool->dr_action);
10733 	if (!priv->shared_host) {
10734 		if (pool->devx_obj)
10735 			claim_zero(mlx5_devx_cmd_destroy(pool->devx_obj));
10736 		if (pool->cts)
10737 			mlx5_ipool_destroy(pool->cts);
10738 	}
10739 	mlx5_free(pool);
10740 }
10741 
10742 static struct mlx5_aso_ct_pool *
10743 flow_hw_ct_pool_create(struct rte_eth_dev *dev,
10744 		       uint32_t nb_conn_tracks)
10745 {
10746 	struct mlx5_priv *priv = dev->data->dev_private;
10747 	struct mlx5_aso_ct_pool *pool;
10748 	struct mlx5_devx_obj *obj;
10749 	uint32_t nb_cts = rte_align32pow2(nb_conn_tracks);
10750 	uint32_t log_obj_size = rte_log2_u32(nb_cts);
10751 	struct mlx5_indexed_pool_config cfg = {
10752 		.size = sizeof(struct mlx5_aso_ct_action),
10753 		.trunk_size = 1 << 12,
10754 		.per_core_cache = 1 << 13,
10755 		.need_lock = 1,
10756 		.release_mem_en = !!priv->sh->config.reclaim_mode,
10757 		.malloc = mlx5_malloc,
10758 		.free = mlx5_free,
10759 		.type = "mlx5_hw_ct_action",
10760 	};
10761 	int reg_id;
10762 	uint32_t flags = 0;
10763 
10764 	pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
10765 	if (!pool) {
10766 		rte_errno = ENOMEM;
10767 		return NULL;
10768 	}
10769 	if (!priv->shared_host) {
10770 		/*
10771 		 * No need for local cache if CT number is a small number. Since
10772 		 * flow insertion rate will be very limited in that case. Here let's
10773 		 * set the number to less than default trunk size 4K.
10774 		 */
10775 		if (nb_cts <= cfg.trunk_size) {
10776 			cfg.per_core_cache = 0;
10777 			cfg.trunk_size = nb_cts;
10778 		} else if (nb_cts <= MLX5_HW_IPOOL_SIZE_THRESHOLD) {
10779 			cfg.per_core_cache = MLX5_HW_IPOOL_CACHE_MIN;
10780 		}
10781 		cfg.max_idx = nb_cts;
10782 		pool->cts = mlx5_ipool_create(&cfg);
10783 		if (!pool->cts)
10784 			goto err;
10785 		obj = mlx5_devx_cmd_create_conn_track_offload_obj(priv->sh->cdev->ctx,
10786 								  priv->sh->cdev->pdn,
10787 								  log_obj_size);
10788 		if (!obj) {
10789 			rte_errno = ENODATA;
10790 			DRV_LOG(ERR, "Failed to create conn_track_offload_obj using DevX.");
10791 			goto err;
10792 		}
10793 		pool->devx_obj = obj;
10794 	} else {
10795 		struct rte_eth_dev *host_dev = priv->shared_host;
10796 		struct mlx5_priv *host_priv = host_dev->data->dev_private;
10797 
10798 		pool->devx_obj = host_priv->hws_ctpool->devx_obj;
10799 		pool->cts = host_priv->hws_ctpool->cts;
10800 		MLX5_ASSERT(pool->cts);
10801 		MLX5_ASSERT(!nb_conn_tracks);
10802 	}
10803 	reg_id = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, NULL);
10804 	flags |= MLX5DR_ACTION_FLAG_HWS_RX | MLX5DR_ACTION_FLAG_HWS_TX;
10805 	if (priv->sh->config.dv_esw_en && priv->master)
10806 		flags |= MLX5DR_ACTION_FLAG_HWS_FDB;
10807 	pool->dr_action = mlx5dr_action_create_aso_ct(priv->dr_ctx,
10808 						      (struct mlx5dr_devx_obj *)pool->devx_obj,
10809 						      reg_id - REG_C_0, flags);
10810 	if (!pool->dr_action)
10811 		goto err;
10812 	pool->sq = priv->ct_mng->aso_sqs;
10813 	/* Assign the last extra ASO SQ as public SQ. */
10814 	pool->shared_sq = &priv->ct_mng->aso_sqs[priv->nb_queue - 1];
10815 	return pool;
10816 err:
10817 	flow_hw_ct_pool_destroy(dev, pool);
10818 	return NULL;
10819 }
10820 
10821 static int
10822 mlx5_flow_ct_init(struct rte_eth_dev *dev,
10823 		  uint32_t nb_conn_tracks,
10824 		  uint16_t nb_queue)
10825 {
10826 	struct mlx5_priv *priv = dev->data->dev_private;
10827 	uint32_t mem_size;
10828 	int ret = -ENOMEM;
10829 
10830 	if (!priv->shared_host) {
10831 		mem_size = sizeof(struct mlx5_aso_sq) * nb_queue +
10832 				sizeof(*priv->ct_mng);
10833 		priv->ct_mng = mlx5_malloc(MLX5_MEM_ZERO, mem_size,
10834 						RTE_CACHE_LINE_SIZE,
10835 						SOCKET_ID_ANY);
10836 		if (!priv->ct_mng)
10837 			goto err;
10838 		ret = mlx5_aso_ct_queue_init(priv->sh, priv->ct_mng,
10839 						nb_queue);
10840 		if (ret)
10841 			goto err;
10842 	}
10843 	priv->hws_ctpool = flow_hw_ct_pool_create(dev, nb_conn_tracks);
10844 	if (!priv->hws_ctpool)
10845 		goto err;
10846 	priv->sh->ct_aso_en = 1;
10847 	return 0;
10848 
10849 err:
10850 	if (priv->hws_ctpool) {
10851 		flow_hw_ct_pool_destroy(dev, priv->hws_ctpool);
10852 		priv->hws_ctpool = NULL;
10853 	}
10854 	if (priv->ct_mng) {
10855 		flow_hw_ct_mng_destroy(dev, priv->ct_mng);
10856 		priv->ct_mng = NULL;
10857 	}
10858 	return ret;
10859 }
10860 
10861 static void
10862 flow_hw_destroy_vlan(struct rte_eth_dev *dev)
10863 {
10864 	struct mlx5_priv *priv = dev->data->dev_private;
10865 	enum mlx5dr_table_type i;
10866 
10867 	for (i = MLX5DR_TABLE_TYPE_NIC_RX; i < MLX5DR_TABLE_TYPE_MAX; i++) {
10868 		if (priv->hw_pop_vlan[i]) {
10869 			mlx5dr_action_destroy(priv->hw_pop_vlan[i]);
10870 			priv->hw_pop_vlan[i] = NULL;
10871 		}
10872 		if (priv->hw_push_vlan[i]) {
10873 			mlx5dr_action_destroy(priv->hw_push_vlan[i]);
10874 			priv->hw_push_vlan[i] = NULL;
10875 		}
10876 	}
10877 }
10878 
10879 static int
10880 flow_hw_create_vlan(struct rte_eth_dev *dev)
10881 {
10882 	struct mlx5_priv *priv = dev->data->dev_private;
10883 	enum mlx5dr_table_type i;
10884 	const enum mlx5dr_action_flags flags[MLX5DR_TABLE_TYPE_MAX] = {
10885 		MLX5DR_ACTION_FLAG_HWS_RX,
10886 		MLX5DR_ACTION_FLAG_HWS_TX,
10887 		MLX5DR_ACTION_FLAG_HWS_FDB
10888 	};
10889 
10890 	/* rte_errno is set in the mlx5dr_action* functions. */
10891 	for (i = MLX5DR_TABLE_TYPE_NIC_RX; i <= MLX5DR_TABLE_TYPE_NIC_TX; i++) {
10892 		priv->hw_pop_vlan[i] =
10893 			mlx5dr_action_create_pop_vlan(priv->dr_ctx, flags[i]);
10894 		if (!priv->hw_pop_vlan[i])
10895 			return -rte_errno;
10896 		priv->hw_push_vlan[i] =
10897 			mlx5dr_action_create_push_vlan(priv->dr_ctx, flags[i]);
10898 		if (!priv->hw_pop_vlan[i])
10899 			return -rte_errno;
10900 	}
10901 	if (priv->sh->config.dv_esw_en && priv->master) {
10902 		priv->hw_pop_vlan[MLX5DR_TABLE_TYPE_FDB] =
10903 			mlx5dr_action_create_pop_vlan
10904 				(priv->dr_ctx, MLX5DR_ACTION_FLAG_HWS_FDB);
10905 		if (!priv->hw_pop_vlan[MLX5DR_TABLE_TYPE_FDB])
10906 			return -rte_errno;
10907 		priv->hw_push_vlan[MLX5DR_TABLE_TYPE_FDB] =
10908 			mlx5dr_action_create_push_vlan
10909 				(priv->dr_ctx, MLX5DR_ACTION_FLAG_HWS_FDB);
10910 		if (!priv->hw_pop_vlan[MLX5DR_TABLE_TYPE_FDB])
10911 			return -rte_errno;
10912 	}
10913 	return 0;
10914 }
10915 
10916 static void
10917 flow_hw_cleanup_ctrl_rx_tables(struct rte_eth_dev *dev)
10918 {
10919 	struct mlx5_priv *priv = dev->data->dev_private;
10920 	unsigned int i;
10921 	unsigned int j;
10922 
10923 	if (!priv->hw_ctrl_rx)
10924 		return;
10925 	for (i = 0; i < MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_MAX; ++i) {
10926 		for (j = 0; j < MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX; ++j) {
10927 			struct rte_flow_template_table *tbl = priv->hw_ctrl_rx->tables[i][j].tbl;
10928 			struct rte_flow_pattern_template *pt = priv->hw_ctrl_rx->tables[i][j].pt;
10929 
10930 			if (tbl)
10931 				claim_zero(flow_hw_table_destroy(dev, tbl, NULL));
10932 			if (pt)
10933 				claim_zero(flow_hw_pattern_template_destroy(dev, pt, NULL));
10934 		}
10935 	}
10936 	for (i = 0; i < MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX; ++i) {
10937 		struct rte_flow_actions_template *at = priv->hw_ctrl_rx->rss[i];
10938 
10939 		if (at)
10940 			claim_zero(flow_hw_actions_template_destroy(dev, at, NULL));
10941 	}
10942 	mlx5_free(priv->hw_ctrl_rx);
10943 	priv->hw_ctrl_rx = NULL;
10944 }
10945 
10946 static uint64_t
10947 flow_hw_ctrl_rx_rss_type_hash_types(const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
10948 {
10949 	switch (rss_type) {
10950 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_NON_IP:
10951 		return 0;
10952 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4:
10953 		return RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | RTE_ETH_RSS_NONFRAG_IPV4_OTHER;
10954 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_UDP:
10955 		return RTE_ETH_RSS_NONFRAG_IPV4_UDP;
10956 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_TCP:
10957 		return RTE_ETH_RSS_NONFRAG_IPV4_TCP;
10958 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6:
10959 		return RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_OTHER;
10960 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_UDP:
10961 		return RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_IPV6_UDP_EX;
10962 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_TCP:
10963 		return RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_IPV6_TCP_EX;
10964 	default:
10965 		/* Should not reach here. */
10966 		MLX5_ASSERT(false);
10967 		return 0;
10968 	}
10969 }
10970 
10971 static struct rte_flow_actions_template *
10972 flow_hw_create_ctrl_rx_rss_template(struct rte_eth_dev *dev,
10973 				    const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
10974 {
10975 	struct mlx5_priv *priv = dev->data->dev_private;
10976 	struct rte_flow_actions_template_attr attr = {
10977 		.ingress = 1,
10978 	};
10979 	uint16_t queue[RTE_MAX_QUEUES_PER_PORT];
10980 	struct rte_flow_action_rss rss_conf = {
10981 		.func = RTE_ETH_HASH_FUNCTION_DEFAULT,
10982 		.level = 0,
10983 		.types = 0,
10984 		.key_len = priv->rss_conf.rss_key_len,
10985 		.key = priv->rss_conf.rss_key,
10986 		.queue_num = priv->reta_idx_n,
10987 		.queue = queue,
10988 	};
10989 	struct rte_flow_action actions[] = {
10990 		{
10991 			.type = RTE_FLOW_ACTION_TYPE_RSS,
10992 			.conf = &rss_conf,
10993 		},
10994 		{
10995 			.type = RTE_FLOW_ACTION_TYPE_END,
10996 		}
10997 	};
10998 	struct rte_flow_action masks[] = {
10999 		{
11000 			.type = RTE_FLOW_ACTION_TYPE_RSS,
11001 			.conf = &rss_conf,
11002 		},
11003 		{
11004 			.type = RTE_FLOW_ACTION_TYPE_END,
11005 		}
11006 	};
11007 	struct rte_flow_actions_template *at;
11008 	struct rte_flow_error error;
11009 	unsigned int i;
11010 
11011 	MLX5_ASSERT(priv->reta_idx_n > 0 && priv->reta_idx);
11012 	/* Select proper RSS hash types and based on that configure the actions template. */
11013 	rss_conf.types = flow_hw_ctrl_rx_rss_type_hash_types(rss_type);
11014 	if (rss_conf.types) {
11015 		for (i = 0; i < priv->reta_idx_n; ++i)
11016 			queue[i] = (*priv->reta_idx)[i];
11017 	} else {
11018 		rss_conf.queue_num = 1;
11019 		queue[0] = (*priv->reta_idx)[0];
11020 	}
11021 	at = flow_hw_actions_template_create(dev, &attr, actions, masks, &error);
11022 	if (!at)
11023 		DRV_LOG(ERR,
11024 			"Failed to create ctrl flow actions template: rte_errno(%d), type(%d): %s",
11025 			rte_errno, error.type,
11026 			error.message ? error.message : "(no stated reason)");
11027 	return at;
11028 }
11029 
11030 static uint32_t ctrl_rx_rss_priority_map[MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX] = {
11031 	[MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_NON_IP] = MLX5_HW_CTRL_RX_PRIO_L2,
11032 	[MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4] = MLX5_HW_CTRL_RX_PRIO_L3,
11033 	[MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_UDP] = MLX5_HW_CTRL_RX_PRIO_L4,
11034 	[MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_TCP] = MLX5_HW_CTRL_RX_PRIO_L4,
11035 	[MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6] = MLX5_HW_CTRL_RX_PRIO_L3,
11036 	[MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_UDP] = MLX5_HW_CTRL_RX_PRIO_L4,
11037 	[MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_TCP] = MLX5_HW_CTRL_RX_PRIO_L4,
11038 };
11039 
11040 static uint32_t ctrl_rx_nb_flows_map[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_MAX] = {
11041 	[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL] = 1,
11042 	[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL_MCAST] = 1,
11043 	[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST] = 1,
11044 	[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST_VLAN] = MLX5_MAX_VLAN_IDS,
11045 	[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST] = 1,
11046 	[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST_VLAN] = MLX5_MAX_VLAN_IDS,
11047 	[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST] = 1,
11048 	[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST_VLAN] = MLX5_MAX_VLAN_IDS,
11049 	[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC] = MLX5_MAX_UC_MAC_ADDRESSES,
11050 	[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN] =
11051 			MLX5_MAX_UC_MAC_ADDRESSES * MLX5_MAX_VLAN_IDS,
11052 };
11053 
11054 static struct rte_flow_template_table_attr
11055 flow_hw_get_ctrl_rx_table_attr(enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type,
11056 			       const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
11057 {
11058 	return (struct rte_flow_template_table_attr){
11059 		.flow_attr = {
11060 			.group = 0,
11061 			.priority = ctrl_rx_rss_priority_map[rss_type],
11062 			.ingress = 1,
11063 		},
11064 		.nb_flows = ctrl_rx_nb_flows_map[eth_pattern_type],
11065 	};
11066 }
11067 
11068 static struct rte_flow_item
11069 flow_hw_get_ctrl_rx_eth_item(const enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type)
11070 {
11071 	struct rte_flow_item item = {
11072 		.type = RTE_FLOW_ITEM_TYPE_ETH,
11073 		.mask = NULL,
11074 	};
11075 
11076 	switch (eth_pattern_type) {
11077 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL:
11078 		item.mask = &ctrl_rx_eth_promisc_mask;
11079 		break;
11080 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL_MCAST:
11081 		item.mask = &ctrl_rx_eth_mcast_mask;
11082 		break;
11083 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST:
11084 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST_VLAN:
11085 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC:
11086 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN:
11087 		item.mask = &ctrl_rx_eth_dmac_mask;
11088 		break;
11089 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST:
11090 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST_VLAN:
11091 		item.mask = &ctrl_rx_eth_ipv4_mcast_mask;
11092 		break;
11093 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST:
11094 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST_VLAN:
11095 		item.mask = &ctrl_rx_eth_ipv6_mcast_mask;
11096 		break;
11097 	default:
11098 		/* Should not reach here - ETH mask must be present. */
11099 		item.type = RTE_FLOW_ITEM_TYPE_END;
11100 		MLX5_ASSERT(false);
11101 		break;
11102 	}
11103 	return item;
11104 }
11105 
11106 static struct rte_flow_item
11107 flow_hw_get_ctrl_rx_vlan_item(const enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type)
11108 {
11109 	struct rte_flow_item item = {
11110 		.type = RTE_FLOW_ITEM_TYPE_VOID,
11111 		.mask = NULL,
11112 	};
11113 
11114 	switch (eth_pattern_type) {
11115 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST_VLAN:
11116 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST_VLAN:
11117 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST_VLAN:
11118 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN:
11119 		item.type = RTE_FLOW_ITEM_TYPE_VLAN;
11120 		item.mask = &rte_flow_item_vlan_mask;
11121 		break;
11122 	default:
11123 		/* Nothing to update. */
11124 		break;
11125 	}
11126 	return item;
11127 }
11128 
11129 static struct rte_flow_item
11130 flow_hw_get_ctrl_rx_l3_item(const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
11131 {
11132 	struct rte_flow_item item = {
11133 		.type = RTE_FLOW_ITEM_TYPE_VOID,
11134 		.mask = NULL,
11135 	};
11136 
11137 	switch (rss_type) {
11138 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4:
11139 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_UDP:
11140 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_TCP:
11141 		item.type = RTE_FLOW_ITEM_TYPE_IPV4;
11142 		break;
11143 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6:
11144 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_UDP:
11145 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_TCP:
11146 		item.type = RTE_FLOW_ITEM_TYPE_IPV6;
11147 		break;
11148 	default:
11149 		/* Nothing to update. */
11150 		break;
11151 	}
11152 	return item;
11153 }
11154 
11155 static struct rte_flow_item
11156 flow_hw_get_ctrl_rx_l4_item(const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
11157 {
11158 	struct rte_flow_item item = {
11159 		.type = RTE_FLOW_ITEM_TYPE_VOID,
11160 		.mask = NULL,
11161 	};
11162 
11163 	switch (rss_type) {
11164 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_UDP:
11165 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_UDP:
11166 		item.type = RTE_FLOW_ITEM_TYPE_UDP;
11167 		break;
11168 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_TCP:
11169 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_TCP:
11170 		item.type = RTE_FLOW_ITEM_TYPE_TCP;
11171 		break;
11172 	default:
11173 		/* Nothing to update. */
11174 		break;
11175 	}
11176 	return item;
11177 }
11178 
11179 static struct rte_flow_pattern_template *
11180 flow_hw_create_ctrl_rx_pattern_template
11181 		(struct rte_eth_dev *dev,
11182 		 const enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type,
11183 		 const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
11184 {
11185 	const struct rte_flow_pattern_template_attr attr = {
11186 		.relaxed_matching = 0,
11187 		.ingress = 1,
11188 	};
11189 	struct rte_flow_item items[] = {
11190 		/* Matching patterns */
11191 		flow_hw_get_ctrl_rx_eth_item(eth_pattern_type),
11192 		flow_hw_get_ctrl_rx_vlan_item(eth_pattern_type),
11193 		flow_hw_get_ctrl_rx_l3_item(rss_type),
11194 		flow_hw_get_ctrl_rx_l4_item(rss_type),
11195 		/* Terminate pattern */
11196 		{ .type = RTE_FLOW_ITEM_TYPE_END }
11197 	};
11198 
11199 	return flow_hw_pattern_template_create(dev, &attr, items, NULL);
11200 }
11201 
11202 static int
11203 flow_hw_create_ctrl_rx_tables(struct rte_eth_dev *dev)
11204 {
11205 	struct mlx5_priv *priv = dev->data->dev_private;
11206 	unsigned int i;
11207 	unsigned int j;
11208 	int ret;
11209 
11210 	MLX5_ASSERT(!priv->hw_ctrl_rx);
11211 	priv->hw_ctrl_rx = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*priv->hw_ctrl_rx),
11212 				       RTE_CACHE_LINE_SIZE, rte_socket_id());
11213 	if (!priv->hw_ctrl_rx) {
11214 		DRV_LOG(ERR, "Failed to allocate memory for Rx control flow tables");
11215 		rte_errno = ENOMEM;
11216 		return -rte_errno;
11217 	}
11218 	/* Create all pattern template variants. */
11219 	for (i = 0; i < MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_MAX; ++i) {
11220 		enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type = i;
11221 
11222 		for (j = 0; j < MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX; ++j) {
11223 			const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type = j;
11224 			struct rte_flow_template_table_attr attr;
11225 			struct rte_flow_pattern_template *pt;
11226 
11227 			attr = flow_hw_get_ctrl_rx_table_attr(eth_pattern_type, rss_type);
11228 			pt = flow_hw_create_ctrl_rx_pattern_template(dev, eth_pattern_type,
11229 								     rss_type);
11230 			if (!pt)
11231 				goto err;
11232 			priv->hw_ctrl_rx->tables[i][j].attr = attr;
11233 			priv->hw_ctrl_rx->tables[i][j].pt = pt;
11234 		}
11235 	}
11236 	return 0;
11237 err:
11238 	ret = rte_errno;
11239 	flow_hw_cleanup_ctrl_rx_tables(dev);
11240 	rte_errno = ret;
11241 	return -ret;
11242 }
11243 
11244 void
11245 mlx5_flow_hw_cleanup_ctrl_rx_templates(struct rte_eth_dev *dev)
11246 {
11247 	struct mlx5_priv *priv = dev->data->dev_private;
11248 	struct mlx5_flow_hw_ctrl_rx *hw_ctrl_rx;
11249 	unsigned int i;
11250 	unsigned int j;
11251 
11252 	if (!priv->dr_ctx)
11253 		return;
11254 	if (!priv->hw_ctrl_rx)
11255 		return;
11256 	hw_ctrl_rx = priv->hw_ctrl_rx;
11257 	for (i = 0; i < MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_MAX; ++i) {
11258 		for (j = 0; j < MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX; ++j) {
11259 			struct mlx5_flow_hw_ctrl_rx_table *tmpls = &hw_ctrl_rx->tables[i][j];
11260 
11261 			if (tmpls->tbl) {
11262 				claim_zero(flow_hw_table_destroy(dev, tmpls->tbl, NULL));
11263 				tmpls->tbl = NULL;
11264 			}
11265 		}
11266 	}
11267 	for (j = 0; j < MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX; ++j) {
11268 		if (hw_ctrl_rx->rss[j]) {
11269 			claim_zero(flow_hw_actions_template_destroy(dev, hw_ctrl_rx->rss[j], NULL));
11270 			hw_ctrl_rx->rss[j] = NULL;
11271 		}
11272 	}
11273 }
11274 
11275 /**
11276  * Copy the provided HWS configuration to a newly allocated buffer.
11277  *
11278  * @param[in] port_attr
11279  *   Port configuration attributes.
11280  * @param[in] nb_queue
11281  *   Number of queue.
11282  * @param[in] queue_attr
11283  *   Array that holds attributes for each flow queue.
11284  * @param[in] nt_mode
11285  *   Non template mode.
11286  *
11287  * @return
11288  *   Pointer to copied HWS configuration is returned on success.
11289  *   Otherwise, NULL is returned and rte_errno is set.
11290  */
11291 static struct mlx5_flow_hw_attr *
11292 flow_hw_alloc_copy_config(const struct rte_flow_port_attr *port_attr,
11293 			  const uint16_t nb_queue,
11294 			  const struct rte_flow_queue_attr *queue_attr[],
11295 			  bool nt_mode,
11296 			  struct rte_flow_error *error)
11297 {
11298 	struct mlx5_flow_hw_attr *hw_attr;
11299 	size_t hw_attr_size;
11300 	unsigned int i;
11301 
11302 	hw_attr_size = sizeof(*hw_attr) + nb_queue * sizeof(*hw_attr->queue_attr);
11303 	hw_attr = mlx5_malloc(MLX5_MEM_ZERO, hw_attr_size, 0, SOCKET_ID_ANY);
11304 	if (!hw_attr) {
11305 		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11306 				   "Not enough memory to store configuration");
11307 		return NULL;
11308 	}
11309 	memcpy(&hw_attr->port_attr, port_attr, sizeof(*port_attr));
11310 	hw_attr->nb_queue = nb_queue;
11311 	/* Queue attributes are placed after the mlx5_flow_hw_attr. */
11312 	hw_attr->queue_attr = (struct rte_flow_queue_attr *)(hw_attr + 1);
11313 	for (i = 0; i < nb_queue; ++i)
11314 		memcpy(&hw_attr->queue_attr[i], queue_attr[i], sizeof(hw_attr->queue_attr[i]));
11315 	hw_attr->nt_mode = nt_mode;
11316 	return hw_attr;
11317 }
11318 
11319 /**
11320  * Compares the preserved HWS configuration with the provided one.
11321  *
11322  * @param[in] hw_attr
11323  *   Pointer to preserved HWS configuration.
11324  * @param[in] new_pa
11325  *   Port configuration attributes to compare.
11326  * @param[in] new_nbq
11327  *   Number of queues to compare.
11328  * @param[in] new_qa
11329  *   Array that holds attributes for each flow queue.
11330  *
11331  * @return
11332  *   True if configurations are the same, false otherwise.
11333  */
11334 static bool
11335 flow_hw_compare_config(const struct mlx5_flow_hw_attr *hw_attr,
11336 		       const struct rte_flow_port_attr *new_pa,
11337 		       const uint16_t new_nbq,
11338 		       const struct rte_flow_queue_attr *new_qa[])
11339 {
11340 	const struct rte_flow_port_attr *old_pa = &hw_attr->port_attr;
11341 	const uint16_t old_nbq = hw_attr->nb_queue;
11342 	const struct rte_flow_queue_attr *old_qa = hw_attr->queue_attr;
11343 	unsigned int i;
11344 
11345 	if (old_pa->nb_counters != new_pa->nb_counters ||
11346 	    old_pa->nb_aging_objects != new_pa->nb_aging_objects ||
11347 	    old_pa->nb_meters != new_pa->nb_meters ||
11348 	    old_pa->nb_conn_tracks != new_pa->nb_conn_tracks ||
11349 	    old_pa->flags != new_pa->flags)
11350 		return false;
11351 	if (old_nbq != new_nbq)
11352 		return false;
11353 	for (i = 0; i < old_nbq; ++i)
11354 		if (old_qa[i].size != new_qa[i]->size)
11355 			return false;
11356 	return true;
11357 }
11358 
11359 /*
11360  * No need to explicitly release drop action templates on port stop.
11361  * Drop action templates release with other action templates during
11362  * mlx5_dev_close -> flow_hw_resource_release -> flow_hw_actions_template_destroy
11363  */
11364 static void
11365 flow_hw_action_template_drop_release(struct rte_eth_dev *dev)
11366 {
11367 	int i;
11368 	struct mlx5_priv *priv = dev->data->dev_private;
11369 
11370 	for (i = 0; i < MLX5DR_TABLE_TYPE_MAX; i++) {
11371 		if (!priv->action_template_drop[i])
11372 			continue;
11373 		flow_hw_actions_template_destroy(dev,
11374 						 priv->action_template_drop[i],
11375 						 NULL);
11376 		priv->action_template_drop[i] = NULL;
11377 	}
11378 }
11379 
11380 static int
11381 flow_hw_action_template_drop_init(struct rte_eth_dev *dev,
11382 			  struct rte_flow_error *error)
11383 {
11384 	const struct rte_flow_action drop[2] = {
11385 		[0] = { .type = RTE_FLOW_ACTION_TYPE_DROP },
11386 		[1] = { .type = RTE_FLOW_ACTION_TYPE_END },
11387 	};
11388 	const struct rte_flow_action *actions = drop;
11389 	const struct rte_flow_action *masks = drop;
11390 	const struct rte_flow_actions_template_attr attr[MLX5DR_TABLE_TYPE_MAX] = {
11391 		[MLX5DR_TABLE_TYPE_NIC_RX] = { .ingress = 1 },
11392 		[MLX5DR_TABLE_TYPE_NIC_TX] = { .egress = 1 },
11393 		[MLX5DR_TABLE_TYPE_FDB] = { .transfer = 1 }
11394 	};
11395 	struct mlx5_priv *priv = dev->data->dev_private;
11396 
11397 	priv->action_template_drop[MLX5DR_TABLE_TYPE_NIC_RX] =
11398 		flow_hw_actions_template_create(dev,
11399 						&attr[MLX5DR_TABLE_TYPE_NIC_RX],
11400 						actions, masks, error);
11401 	if (!priv->action_template_drop[MLX5DR_TABLE_TYPE_NIC_RX])
11402 		return -1;
11403 	priv->action_template_drop[MLX5DR_TABLE_TYPE_NIC_TX] =
11404 		flow_hw_actions_template_create(dev,
11405 						&attr[MLX5DR_TABLE_TYPE_NIC_TX],
11406 						actions, masks, error);
11407 	if (!priv->action_template_drop[MLX5DR_TABLE_TYPE_NIC_TX])
11408 		return -1;
11409 	if (priv->sh->config.dv_esw_en && priv->master) {
11410 		priv->action_template_drop[MLX5DR_TABLE_TYPE_FDB] =
11411 			flow_hw_actions_template_create(dev,
11412 							&attr[MLX5DR_TABLE_TYPE_FDB],
11413 							actions, masks, error);
11414 		if (!priv->action_template_drop[MLX5DR_TABLE_TYPE_FDB])
11415 			return -1;
11416 	}
11417 	return 0;
11418 }
11419 
11420 static void
11421 __flow_hw_resource_release(struct rte_eth_dev *dev, bool ctx_close)
11422 {
11423 	struct mlx5_priv *priv = dev->data->dev_private;
11424 	struct rte_flow_template_table *tbl, *temp_tbl;
11425 	struct rte_flow_pattern_template *it, *temp_it;
11426 	struct rte_flow_actions_template *at, *temp_at;
11427 	struct mlx5_flow_group *grp, *temp_grp;
11428 	uint32_t i;
11429 
11430 	flow_hw_rxq_flag_set(dev, false);
11431 	flow_hw_flush_all_ctrl_flows(dev);
11432 	flow_hw_cleanup_ctrl_fdb_tables(dev);
11433 	flow_hw_cleanup_tx_repr_tagging(dev);
11434 	flow_hw_cleanup_ctrl_rx_tables(dev);
11435 	flow_hw_action_template_drop_release(dev);
11436 	grp = LIST_FIRST(&priv->flow_hw_grp);
11437 	while (grp) {
11438 		temp_grp = LIST_NEXT(grp, next);
11439 		claim_zero(flow_hw_group_unset_miss_group(dev, grp, NULL));
11440 		grp = temp_grp;
11441 	}
11442 	tbl = LIST_FIRST(&priv->flow_hw_tbl_ongo);
11443 	while (tbl) {
11444 		temp_tbl = LIST_NEXT(tbl, next);
11445 		claim_zero(flow_hw_table_destroy(dev, tbl, NULL));
11446 		tbl = temp_tbl;
11447 	}
11448 	tbl = LIST_FIRST(&priv->flow_hw_tbl);
11449 	while (tbl) {
11450 		temp_tbl = LIST_NEXT(tbl, next);
11451 		claim_zero(flow_hw_table_destroy(dev, tbl, NULL));
11452 		tbl = temp_tbl;
11453 	}
11454 	it = LIST_FIRST(&priv->flow_hw_itt);
11455 	while (it) {
11456 		temp_it = LIST_NEXT(it, next);
11457 		claim_zero(flow_hw_pattern_template_destroy(dev, it, NULL));
11458 		it = temp_it;
11459 	}
11460 	at = LIST_FIRST(&priv->flow_hw_at);
11461 	while (at) {
11462 		temp_at = LIST_NEXT(at, next);
11463 		claim_zero(flow_hw_actions_template_destroy(dev, at, NULL));
11464 		at = temp_at;
11465 	}
11466 	for (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) {
11467 		if (priv->hw_drop[i])
11468 			mlx5dr_action_destroy(priv->hw_drop[i]);
11469 		if (priv->hw_tag[i])
11470 			mlx5dr_action_destroy(priv->hw_tag[i]);
11471 	}
11472 	if (priv->hw_def_miss)
11473 		mlx5dr_action_destroy(priv->hw_def_miss);
11474 	flow_hw_destroy_nat64_actions(priv);
11475 	flow_hw_destroy_vlan(dev);
11476 	flow_hw_destroy_send_to_kernel_action(priv);
11477 	flow_hw_free_vport_actions(priv);
11478 	if (priv->acts_ipool) {
11479 		mlx5_ipool_destroy(priv->acts_ipool);
11480 		priv->acts_ipool = NULL;
11481 	}
11482 	if (priv->hws_age_req)
11483 		mlx5_hws_age_pool_destroy(priv);
11484 	if (!priv->shared_host && priv->hws_cpool) {
11485 		mlx5_hws_cnt_pool_destroy(priv->sh, priv->hws_cpool);
11486 		priv->hws_cpool = NULL;
11487 	}
11488 	if (priv->hws_ctpool) {
11489 		flow_hw_ct_pool_destroy(dev, priv->hws_ctpool);
11490 		priv->hws_ctpool = NULL;
11491 	}
11492 	if (priv->ct_mng) {
11493 		flow_hw_ct_mng_destroy(dev, priv->ct_mng);
11494 		priv->ct_mng = NULL;
11495 	}
11496 	mlx5_flow_quota_destroy(dev);
11497 	if (priv->hw_q) {
11498 		for (i = 0; i < priv->nb_queue; i++) {
11499 			struct mlx5_hw_q *hwq = &priv->hw_q[i];
11500 			rte_ring_free(hwq->indir_iq);
11501 			rte_ring_free(hwq->indir_cq);
11502 			rte_ring_free(hwq->flow_transfer_pending);
11503 			rte_ring_free(hwq->flow_transfer_completed);
11504 		}
11505 		mlx5_free(priv->hw_q);
11506 		priv->hw_q = NULL;
11507 	}
11508 	if (ctx_close) {
11509 		if (priv->dr_ctx) {
11510 			claim_zero(mlx5dr_context_close(priv->dr_ctx));
11511 			priv->dr_ctx = NULL;
11512 		}
11513 	}
11514 	if (priv->shared_host) {
11515 		struct mlx5_priv *host_priv = priv->shared_host->data->dev_private;
11516 		rte_atomic_fetch_sub_explicit(&host_priv->shared_refcnt, 1,
11517 				rte_memory_order_relaxed);
11518 		priv->shared_host = NULL;
11519 	}
11520 	if (priv->hw_attr) {
11521 		mlx5_free(priv->hw_attr);
11522 		priv->hw_attr = NULL;
11523 	}
11524 	priv->nb_queue = 0;
11525 }
11526 
11527 static __rte_always_inline struct rte_ring *
11528 mlx5_hwq_ring_create(uint16_t port_id, uint32_t queue, uint32_t size, const char *str)
11529 {
11530 	char mz_name[RTE_MEMZONE_NAMESIZE];
11531 
11532 	snprintf(mz_name, sizeof(mz_name), "port_%u_%s_%u", port_id, str, queue);
11533 	return rte_ring_create(mz_name, size, SOCKET_ID_ANY,
11534 			       RING_F_SP_ENQ | RING_F_SC_DEQ | RING_F_EXACT_SZ);
11535 }
11536 
11537 static int
11538 flow_hw_validate_attributes(const struct rte_flow_port_attr *port_attr,
11539 			    uint16_t nb_queue,
11540 			    const struct rte_flow_queue_attr *queue_attr[],
11541 			    bool nt_mode, struct rte_flow_error *error)
11542 {
11543 	uint32_t size;
11544 	unsigned int i;
11545 
11546 	if (port_attr == NULL)
11547 		return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11548 					  "Port attributes must be non-NULL");
11549 
11550 	if (nb_queue == 0 && !nt_mode)
11551 		return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11552 					  "At least one flow queue is required");
11553 
11554 	if (queue_attr == NULL)
11555 		return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11556 					  "Queue attributes must be non-NULL");
11557 
11558 	size = queue_attr[0]->size;
11559 	for (i = 1; i < nb_queue; ++i) {
11560 		if (queue_attr[i]->size != size)
11561 			return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11562 						  NULL,
11563 						  "All flow queues must have the same size");
11564 	}
11565 
11566 	return 0;
11567 }
11568 
11569 /**
11570  * Configure port HWS resources.
11571  *
11572  * @param[in] dev
11573  *   Pointer to the rte_eth_dev structure.
11574  * @param[in] port_attr
11575  *   Port configuration attributes.
11576  * @param[in] nb_queue
11577  *   Number of queue.
11578  * @param[in] queue_attr
11579  *   Array that holds attributes for each flow queue.
11580  * @param[in] nt_mode
11581  *   Non-template mode.
11582  * @param[out] error
11583  *   Pointer to error structure.
11584  *
11585  * @return
11586  *   0 on success, a negative errno value otherwise and rte_errno is set.
11587  */
11588 static int
11589 __flow_hw_configure(struct rte_eth_dev *dev,
11590 		  const struct rte_flow_port_attr *port_attr,
11591 		  uint16_t nb_queue,
11592 		  const struct rte_flow_queue_attr *queue_attr[],
11593 		  bool nt_mode,
11594 		  struct rte_flow_error *error)
11595 {
11596 	struct mlx5_priv *priv = dev->data->dev_private;
11597 	struct mlx5_priv *host_priv = NULL;
11598 	struct mlx5dr_context_attr dr_ctx_attr = {0};
11599 	struct mlx5_hw_q *hw_q;
11600 	struct mlx5_hw_q_job *job = NULL;
11601 	uint32_t mem_size, i, j;
11602 	struct mlx5_indexed_pool_config cfg = {
11603 		.size = sizeof(struct mlx5_action_construct_data),
11604 		.trunk_size = 4096,
11605 		.need_lock = 1,
11606 		.release_mem_en = !!priv->sh->config.reclaim_mode,
11607 		.malloc = mlx5_malloc,
11608 		.free = mlx5_free,
11609 		.type = "mlx5_hw_action_construct_data",
11610 	};
11611 	/*
11612 	 * Adds one queue to be used by PMD.
11613 	 * The last queue will be used by the PMD.
11614 	 */
11615 	uint16_t nb_q_updated = 0;
11616 	struct rte_flow_queue_attr **_queue_attr = NULL;
11617 	struct rte_flow_queue_attr ctrl_queue_attr = {0};
11618 	bool is_proxy = !!(priv->sh->config.dv_esw_en && priv->master);
11619 	int ret = 0;
11620 	uint32_t action_flags;
11621 	bool strict_queue = false;
11622 
11623 	if (mlx5dr_rule_get_handle_size() != MLX5_DR_RULE_SIZE) {
11624 		rte_errno = EINVAL;
11625 		goto err;
11626 	}
11627 	if (flow_hw_validate_attributes(port_attr, nb_queue, queue_attr, nt_mode, error))
11628 		return -rte_errno;
11629 	/*
11630 	 * Calling rte_flow_configure() again is allowed if
11631 	 * provided configuration matches the initially provided one,
11632 	 * or previous configuration was default non template one.
11633 	 */
11634 	if (priv->dr_ctx) {
11635 		MLX5_ASSERT(priv->hw_attr != NULL);
11636 		for (i = 0; i < priv->nb_queue; i++) {
11637 			hw_q = &priv->hw_q[i];
11638 			/* Make sure all queues are empty. */
11639 			if (hw_q->size != hw_q->job_idx) {
11640 				rte_errno = EBUSY;
11641 				goto err;
11642 			}
11643 		}
11644 		/* If previous configuration was not default non template mode config. */
11645 		if (!priv->hw_attr->nt_mode) {
11646 			if (flow_hw_compare_config(priv->hw_attr, port_attr, nb_queue, queue_attr))
11647 				return 0;
11648 			else
11649 				return rte_flow_error_set(error, ENOTSUP,
11650 							RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11651 							"Changing HWS configuration attributes "
11652 							"is not supported");
11653 		}
11654 		/* Reconfiguration, need to release all resources from previous allocation. */
11655 		__flow_hw_resource_release(dev, true);
11656 	}
11657 	priv->hw_attr = flow_hw_alloc_copy_config(port_attr, nb_queue, queue_attr, nt_mode, error);
11658 	if (!priv->hw_attr) {
11659 		ret = -rte_errno;
11660 		goto err;
11661 	}
11662 	ctrl_queue_attr.size = queue_attr[0]->size;
11663 	nb_q_updated = nb_queue + 1;
11664 	_queue_attr = mlx5_malloc(MLX5_MEM_ZERO,
11665 				  nb_q_updated *
11666 				  sizeof(struct rte_flow_queue_attr *),
11667 				  64, SOCKET_ID_ANY);
11668 	if (!_queue_attr) {
11669 		rte_errno = ENOMEM;
11670 		goto err;
11671 	}
11672 
11673 	memcpy(_queue_attr, queue_attr, sizeof(void *) * nb_queue);
11674 	_queue_attr[nb_queue] = &ctrl_queue_attr;
11675 	priv->acts_ipool = mlx5_ipool_create(&cfg);
11676 	if (!priv->acts_ipool)
11677 		goto err;
11678 	/* Allocate the queue job descriptor LIFO. */
11679 	mem_size = sizeof(priv->hw_q[0]) * nb_q_updated;
11680 	for (i = 0; i < nb_q_updated; i++) {
11681 		mem_size += (sizeof(struct mlx5_hw_q_job *) +
11682 			     sizeof(struct mlx5_hw_q_job)) * _queue_attr[i]->size;
11683 	}
11684 	priv->hw_q = mlx5_malloc(MLX5_MEM_ZERO, mem_size,
11685 				 64, SOCKET_ID_ANY);
11686 	if (!priv->hw_q) {
11687 		rte_errno = ENOMEM;
11688 		goto err;
11689 	}
11690 	for (i = 0; i < nb_q_updated; i++) {
11691 		priv->hw_q[i].job_idx = _queue_attr[i]->size;
11692 		priv->hw_q[i].size = _queue_attr[i]->size;
11693 		priv->hw_q[i].ongoing_flow_ops = 0;
11694 		if (i == 0)
11695 			priv->hw_q[i].job = (struct mlx5_hw_q_job **)
11696 					    &priv->hw_q[nb_q_updated];
11697 		else
11698 			priv->hw_q[i].job = (struct mlx5_hw_q_job **)&job[_queue_attr[i - 1]->size];
11699 		job = (struct mlx5_hw_q_job *)
11700 		      &priv->hw_q[i].job[_queue_attr[i]->size];
11701 		for (j = 0; j < _queue_attr[i]->size; j++)
11702 			priv->hw_q[i].job[j] = &job[j];
11703 		/* Notice ring name length is limited. */
11704 		priv->hw_q[i].indir_cq = mlx5_hwq_ring_create
11705 			(dev->data->port_id, i, _queue_attr[i]->size, "indir_act_cq");
11706 		if (!priv->hw_q[i].indir_cq)
11707 			goto err;
11708 		priv->hw_q[i].indir_iq = mlx5_hwq_ring_create
11709 			(dev->data->port_id, i, _queue_attr[i]->size, "indir_act_iq");
11710 		if (!priv->hw_q[i].indir_iq)
11711 			goto err;
11712 		priv->hw_q[i].flow_transfer_pending = mlx5_hwq_ring_create
11713 			(dev->data->port_id, i, _queue_attr[i]->size, "tx_pending");
11714 		if (!priv->hw_q[i].flow_transfer_pending)
11715 			goto err;
11716 		priv->hw_q[i].flow_transfer_completed = mlx5_hwq_ring_create
11717 			(dev->data->port_id, i, _queue_attr[i]->size, "tx_done");
11718 		if (!priv->hw_q[i].flow_transfer_completed)
11719 			goto err;
11720 	}
11721 	dr_ctx_attr.pd = priv->sh->cdev->pd;
11722 	dr_ctx_attr.queues = nb_q_updated;
11723 	/* Queue size should all be the same. Take the first one. */
11724 	dr_ctx_attr.queue_size = _queue_attr[0]->size;
11725 	if (port_attr->flags & RTE_FLOW_PORT_FLAG_SHARE_INDIRECT) {
11726 		struct rte_eth_dev *host_dev = NULL;
11727 		uint16_t port_id;
11728 
11729 		MLX5_ASSERT(rte_eth_dev_is_valid_port(port_attr->host_port_id));
11730 		if (is_proxy) {
11731 			DRV_LOG(ERR, "cross vHCA shared mode not supported "
11732 				"for E-Switch confgiurations");
11733 			rte_errno = ENOTSUP;
11734 			goto err;
11735 		}
11736 		MLX5_ETH_FOREACH_DEV(port_id, dev->device) {
11737 			if (port_id == port_attr->host_port_id) {
11738 				host_dev = &rte_eth_devices[port_id];
11739 				break;
11740 			}
11741 		}
11742 		if (!host_dev || host_dev == dev ||
11743 		    !host_dev->data || !host_dev->data->dev_private) {
11744 			DRV_LOG(ERR, "Invalid cross vHCA host port %u",
11745 				port_attr->host_port_id);
11746 			rte_errno = EINVAL;
11747 			goto err;
11748 		}
11749 		host_priv = host_dev->data->dev_private;
11750 		if (host_priv->sh->cdev->ctx == priv->sh->cdev->ctx) {
11751 			DRV_LOG(ERR, "Sibling ports %u and %u do not "
11752 				     "require cross vHCA sharing mode",
11753 				dev->data->port_id, port_attr->host_port_id);
11754 			rte_errno = EINVAL;
11755 			goto err;
11756 		}
11757 		if (host_priv->shared_host) {
11758 			DRV_LOG(ERR, "Host port %u is not the sharing base",
11759 				port_attr->host_port_id);
11760 			rte_errno = EINVAL;
11761 			goto err;
11762 		}
11763 		if (port_attr->nb_counters ||
11764 		    port_attr->nb_aging_objects ||
11765 		    port_attr->nb_meters ||
11766 		    port_attr->nb_conn_tracks) {
11767 			DRV_LOG(ERR,
11768 				"Object numbers on guest port must be zeros");
11769 			rte_errno = EINVAL;
11770 			goto err;
11771 		}
11772 		dr_ctx_attr.shared_ibv_ctx = host_priv->sh->cdev->ctx;
11773 		priv->shared_host = host_dev;
11774 		rte_atomic_fetch_add_explicit(&host_priv->shared_refcnt, 1,
11775 				rte_memory_order_relaxed);
11776 	}
11777 	/* Set backward compatibale mode to support non template RTE FLOW API.*/
11778 	dr_ctx_attr.bwc = true;
11779 	priv->dr_ctx = mlx5dr_context_open(priv->sh->cdev->ctx, &dr_ctx_attr);
11780 	/* rte_errno has been updated by HWS layer. */
11781 	if (!priv->dr_ctx)
11782 		goto err;
11783 	priv->nb_queue = nb_q_updated;
11784 	rte_spinlock_init(&priv->hw_ctrl_lock);
11785 	LIST_INIT(&priv->hw_ctrl_flows);
11786 	LIST_INIT(&priv->hw_ext_ctrl_flows);
11787 	ret = flow_hw_action_template_drop_init(dev, error);
11788 	if (ret)
11789 		goto err;
11790 	ret = flow_hw_create_ctrl_rx_tables(dev);
11791 	if (ret) {
11792 		rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11793 				   "Failed to set up Rx control flow templates");
11794 		goto err;
11795 	}
11796 	/* Initialize quotas */
11797 	if (port_attr->nb_quotas || (host_priv && host_priv->quota_ctx.devx_obj)) {
11798 		ret = mlx5_flow_quota_init(dev, port_attr->nb_quotas);
11799 		if (ret) {
11800 			rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11801 					   "Failed to initialize quota.");
11802 			goto err;
11803 		}
11804 	}
11805 	/* Initialize meter library*/
11806 	if (port_attr->nb_meters || (host_priv && host_priv->hws_mpool))
11807 		if (mlx5_flow_meter_init(dev, port_attr->nb_meters, 0, 0, nb_q_updated))
11808 			goto err;
11809 	/* Add global actions. */
11810 	for (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) {
11811 		uint32_t act_flags = 0;
11812 
11813 		act_flags = mlx5_hw_act_flag[i][0] | mlx5_hw_act_flag[i][1];
11814 		if (is_proxy)
11815 			act_flags |= mlx5_hw_act_flag[i][2];
11816 		priv->hw_drop[i] = mlx5dr_action_create_dest_drop(priv->dr_ctx, act_flags);
11817 		if (!priv->hw_drop[i])
11818 			goto err;
11819 		priv->hw_tag[i] = mlx5dr_action_create_tag
11820 			(priv->dr_ctx, mlx5_hw_act_flag[i][0]);
11821 		if (!priv->hw_tag[i])
11822 			goto err;
11823 	}
11824 	if (priv->sh->config.dv_esw_en && priv->sh->config.repr_matching) {
11825 		ret = flow_hw_setup_tx_repr_tagging(dev, error);
11826 		if (ret)
11827 			goto err;
11828 	}
11829 	/*
11830 	 * DEFAULT_MISS action have different behaviors in different domains.
11831 	 * In FDB, it will steering the packets to the E-switch manager.
11832 	 * In NIC Rx root, it will steering the packet to the kernel driver stack.
11833 	 * An action with all bits set in the flag can be created and the HWS
11834 	 * layer will translate it properly when being used in different rules.
11835 	 */
11836 	action_flags = MLX5DR_ACTION_FLAG_ROOT_RX | MLX5DR_ACTION_FLAG_HWS_RX |
11837 		       MLX5DR_ACTION_FLAG_ROOT_TX | MLX5DR_ACTION_FLAG_HWS_TX;
11838 	if (is_proxy)
11839 		action_flags |= (MLX5DR_ACTION_FLAG_ROOT_FDB | MLX5DR_ACTION_FLAG_HWS_FDB);
11840 	priv->hw_def_miss = mlx5dr_action_create_default_miss(priv->dr_ctx, action_flags);
11841 	if (!priv->hw_def_miss)
11842 		goto err;
11843 	if (is_proxy) {
11844 		ret = flow_hw_create_vport_actions(priv);
11845 		if (ret) {
11846 			rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11847 					   NULL, "Failed to create vport actions.");
11848 			goto err;
11849 		}
11850 		ret = flow_hw_create_ctrl_tables(dev, error);
11851 		if (ret) {
11852 			rte_errno = -ret;
11853 			goto err;
11854 		}
11855 	}
11856 	if (!priv->shared_host)
11857 		flow_hw_create_send_to_kernel_actions(priv);
11858 	if (port_attr->nb_conn_tracks || (host_priv && host_priv->hws_ctpool)) {
11859 		if (mlx5_flow_ct_init(dev, port_attr->nb_conn_tracks, nb_q_updated))
11860 			goto err;
11861 	}
11862 	if (port_attr->nb_counters || (host_priv && host_priv->hws_cpool)) {
11863 		if (mlx5_hws_cnt_pool_create(dev, port_attr->nb_counters,
11864 						nb_queue,
11865 						(host_priv ? host_priv->hws_cpool : NULL)))
11866 			goto err;
11867 	}
11868 	if (port_attr->nb_aging_objects) {
11869 		if (port_attr->nb_counters == 0) {
11870 			/*
11871 			 * Aging management uses counter. Number counters
11872 			 * requesting should take into account a counter for
11873 			 * each flow rules containing AGE without counter.
11874 			 */
11875 			DRV_LOG(ERR, "Port %u AGE objects are requested (%u) "
11876 				"without counters requesting.",
11877 				dev->data->port_id,
11878 				port_attr->nb_aging_objects);
11879 			rte_errno = EINVAL;
11880 			goto err;
11881 		}
11882 		if (port_attr->flags & RTE_FLOW_PORT_FLAG_SHARE_INDIRECT) {
11883 			DRV_LOG(ERR, "Aging is not supported "
11884 				"in cross vHCA sharing mode");
11885 			ret = -ENOTSUP;
11886 			goto err;
11887 		}
11888 		strict_queue = !!(port_attr->flags & RTE_FLOW_PORT_FLAG_STRICT_QUEUE);
11889 		ret = mlx5_hws_age_pool_init(dev, port_attr->nb_aging_objects,
11890 						nb_queue, strict_queue);
11891 		if (ret < 0)
11892 			goto err;
11893 	}
11894 	ret = flow_hw_create_vlan(dev);
11895 	if (ret) {
11896 		rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11897 				   NULL, "Failed to VLAN actions.");
11898 		goto err;
11899 	}
11900 	if (flow_hw_create_nat64_actions(priv, error))
11901 		DRV_LOG(WARNING, "Cannot create NAT64 action on port %u, "
11902 			"please check the FW version", dev->data->port_id);
11903 	if (_queue_attr)
11904 		mlx5_free(_queue_attr);
11905 	if (port_attr->flags & RTE_FLOW_PORT_FLAG_STRICT_QUEUE)
11906 		priv->hws_strict_queue = 1;
11907 	dev->flow_fp_ops = &mlx5_flow_hw_fp_ops;
11908 	return 0;
11909 err:
11910 	__flow_hw_resource_release(dev, true);
11911 	if (_queue_attr)
11912 		mlx5_free(_queue_attr);
11913 	/* Do not overwrite the internal errno information. */
11914 	if (ret)
11915 		return ret;
11916 	return rte_flow_error_set(error, rte_errno,
11917 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11918 				  "fail to configure port");
11919 }
11920 
11921 /**
11922  * Configure port HWS resources.
11923  *
11924  * @param[in] dev
11925  *   Pointer to the rte_eth_dev structure.
11926  * @param[in] port_attr
11927  *   Port configuration attributes.
11928  * @param[in] nb_queue
11929  *   Number of queue.
11930  * @param[in] queue_attr
11931  *   Array that holds attributes for each flow queue.
11932  * @param[out] error
11933  *   Pointer to error structure.
11934  *
11935  * @return
11936  *   0 on success, a negative errno value otherwise and rte_errno is set.
11937  */
11938 static int
11939 flow_hw_configure(struct rte_eth_dev *dev,
11940 		  const struct rte_flow_port_attr *port_attr,
11941 		  uint16_t nb_queue,
11942 		  const struct rte_flow_queue_attr *queue_attr[],
11943 		  struct rte_flow_error *error)
11944 {
11945 	return __flow_hw_configure(dev, port_attr, nb_queue, queue_attr, false, error);
11946 }
11947 
11948 /**
11949  * Release HWS resources.
11950  *
11951  * @param[in] dev
11952  *   Pointer to the rte_eth_dev structure.
11953  */
11954 void
11955 flow_hw_resource_release(struct rte_eth_dev *dev)
11956 {
11957 	struct mlx5_priv *priv = dev->data->dev_private;
11958 
11959 	if (!priv->dr_ctx)
11960 		return;
11961 	__flow_hw_resource_release(dev, false);
11962 }
11963 
11964 /* Sets vport tag and mask, for given port, used in HWS rules. */
11965 void
11966 flow_hw_set_port_info(struct rte_eth_dev *dev)
11967 {
11968 	struct mlx5_priv *priv = dev->data->dev_private;
11969 	uint16_t port_id = dev->data->port_id;
11970 	struct flow_hw_port_info *info;
11971 
11972 	MLX5_ASSERT(port_id < RTE_MAX_ETHPORTS);
11973 	info = &mlx5_flow_hw_port_infos[port_id];
11974 	info->regc_mask = priv->vport_meta_mask;
11975 	info->regc_value = priv->vport_meta_tag;
11976 	info->is_wire = mlx5_is_port_on_mpesw_device(priv) ? priv->mpesw_uplink : priv->master;
11977 }
11978 
11979 /* Clears vport tag and mask used for HWS rules. */
11980 void
11981 flow_hw_clear_port_info(struct rte_eth_dev *dev)
11982 {
11983 	uint16_t port_id = dev->data->port_id;
11984 	struct flow_hw_port_info *info;
11985 
11986 	MLX5_ASSERT(port_id < RTE_MAX_ETHPORTS);
11987 	info = &mlx5_flow_hw_port_infos[port_id];
11988 	info->regc_mask = 0;
11989 	info->regc_value = 0;
11990 	info->is_wire = 0;
11991 }
11992 
11993 static int
11994 flow_hw_conntrack_destroy(struct rte_eth_dev *dev,
11995 			  uint32_t idx,
11996 			  struct rte_flow_error *error)
11997 {
11998 	struct mlx5_priv *priv = dev->data->dev_private;
11999 	struct mlx5_aso_ct_pool *pool = priv->hws_ctpool;
12000 	struct mlx5_aso_ct_action *ct;
12001 
12002 	if (priv->shared_host)
12003 		return rte_flow_error_set(error, ENOTSUP,
12004 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12005 				NULL,
12006 				"CT destruction is not allowed to guest port");
12007 	ct = mlx5_ipool_get(pool->cts, idx);
12008 	if (!ct) {
12009 		return rte_flow_error_set(error, EINVAL,
12010 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12011 				NULL,
12012 				"Invalid CT destruction index");
12013 	}
12014 	rte_atomic_store_explicit(&ct->state, ASO_CONNTRACK_FREE,
12015 				 rte_memory_order_relaxed);
12016 	mlx5_ipool_free(pool->cts, idx);
12017 	return 0;
12018 }
12019 
12020 static int
12021 flow_hw_conntrack_query(struct rte_eth_dev *dev, uint32_t queue, uint32_t idx,
12022 			struct rte_flow_action_conntrack *profile,
12023 			void *user_data, bool push,
12024 			struct rte_flow_error *error)
12025 {
12026 	struct mlx5_priv *priv = dev->data->dev_private;
12027 	struct mlx5_aso_ct_pool *pool = priv->hws_ctpool;
12028 	struct mlx5_aso_ct_action *ct;
12029 
12030 	if (priv->shared_host)
12031 		return rte_flow_error_set(error, ENOTSUP,
12032 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12033 				NULL,
12034 				"CT query is not allowed to guest port");
12035 	ct = mlx5_ipool_get(pool->cts, idx);
12036 	if (!ct) {
12037 		return rte_flow_error_set(error, EINVAL,
12038 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12039 				NULL,
12040 				"Invalid CT query index");
12041 	}
12042 	profile->peer_port = ct->peer;
12043 	profile->is_original_dir = ct->is_original;
12044 	if (mlx5_aso_ct_query_by_wqe(priv->sh, queue, ct, profile, user_data, push))
12045 		return rte_flow_error_set(error, EIO,
12046 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12047 				NULL,
12048 				"Failed to query CT context");
12049 	return 0;
12050 }
12051 
12052 
12053 static int
12054 flow_hw_conntrack_update(struct rte_eth_dev *dev, uint32_t queue,
12055 			 const struct rte_flow_modify_conntrack *action_conf,
12056 			 uint32_t idx, void *user_data, bool push,
12057 			 struct rte_flow_error *error)
12058 {
12059 	struct mlx5_priv *priv = dev->data->dev_private;
12060 	struct mlx5_aso_ct_pool *pool = priv->hws_ctpool;
12061 	struct mlx5_aso_ct_action *ct;
12062 	const struct rte_flow_action_conntrack *new_prf;
12063 	int ret = 0;
12064 
12065 	if (priv->shared_host)
12066 		return rte_flow_error_set(error, ENOTSUP,
12067 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12068 				NULL,
12069 				"CT update is not allowed to guest port");
12070 	ct = mlx5_ipool_get(pool->cts, idx);
12071 	if (!ct) {
12072 		return rte_flow_error_set(error, EINVAL,
12073 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12074 				NULL,
12075 				"Invalid CT update index");
12076 	}
12077 	new_prf = &action_conf->new_ct;
12078 	if (action_conf->direction)
12079 		ct->is_original = !!new_prf->is_original_dir;
12080 	if (action_conf->state) {
12081 		/* Only validate the profile when it needs to be updated. */
12082 		ret = mlx5_validate_action_ct(dev, new_prf, error);
12083 		if (ret)
12084 			return ret;
12085 		ret = mlx5_aso_ct_update_by_wqe(priv->sh, queue, ct, new_prf,
12086 						user_data, push);
12087 		if (ret)
12088 			return rte_flow_error_set(error, EIO,
12089 					RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12090 					NULL,
12091 					"Failed to send CT context update WQE");
12092 		if (queue != MLX5_HW_INV_QUEUE)
12093 			return 0;
12094 		/* Block until ready or a failure in synchronous mode. */
12095 		ret = mlx5_aso_ct_available(priv->sh, queue, ct);
12096 		if (ret)
12097 			rte_flow_error_set(error, rte_errno,
12098 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12099 					   NULL,
12100 					   "Timeout to get the CT update");
12101 	}
12102 	return ret;
12103 }
12104 
12105 static struct rte_flow_action_handle *
12106 flow_hw_conntrack_create(struct rte_eth_dev *dev, uint32_t queue,
12107 			 const struct rte_flow_action_conntrack *pro,
12108 			 void *user_data, bool push,
12109 			 struct rte_flow_error *error)
12110 {
12111 	struct mlx5_priv *priv = dev->data->dev_private;
12112 	struct mlx5_aso_ct_pool *pool = priv->hws_ctpool;
12113 	struct mlx5_aso_ct_action *ct;
12114 	uint32_t ct_idx = 0;
12115 	int ret;
12116 	bool async = !!(queue != MLX5_HW_INV_QUEUE);
12117 
12118 	if (priv->shared_host) {
12119 		rte_flow_error_set(error, ENOTSUP,
12120 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12121 				NULL,
12122 				"CT create is not allowed to guest port");
12123 		return NULL;
12124 	}
12125 	if (!pool) {
12126 		rte_flow_error_set(error, EINVAL,
12127 				   RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12128 				   "CT is not enabled");
12129 		return 0;
12130 	}
12131 	ct = mlx5_ipool_zmalloc(pool->cts, &ct_idx);
12132 	if (!ct) {
12133 		rte_flow_error_set(error, rte_errno,
12134 				   RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12135 				   "Failed to allocate CT object");
12136 		return 0;
12137 	}
12138 	ct->offset = ct_idx - 1;
12139 	ct->is_original = !!pro->is_original_dir;
12140 	ct->peer = pro->peer_port;
12141 	ct->pool = pool;
12142 	if (mlx5_aso_ct_update_by_wqe(priv->sh, queue, ct, pro, user_data, push)) {
12143 		mlx5_ipool_free(pool->cts, ct_idx);
12144 		rte_flow_error_set(error, EBUSY,
12145 				   RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12146 				   "Failed to update CT");
12147 		return 0;
12148 	}
12149 	if (!async) {
12150 		ret = mlx5_aso_ct_available(priv->sh, queue, ct);
12151 		if (ret) {
12152 			mlx5_ipool_free(pool->cts, ct_idx);
12153 			rte_flow_error_set(error, rte_errno,
12154 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12155 					   NULL,
12156 					   "Timeout to get the CT update");
12157 			return 0;
12158 		}
12159 	}
12160 	return MLX5_INDIRECT_ACT_HWS_CT_GEN_IDX(ct_idx);
12161 }
12162 
12163 /**
12164  * Validate shared action.
12165  *
12166  * @param[in] dev
12167  *   Pointer to the rte_eth_dev structure.
12168  * @param[in] queue
12169  *   Which queue to be used.
12170  * @param[in] attr
12171  *   Operation attribute.
12172  * @param[in] conf
12173  *   Indirect action configuration.
12174  * @param[in] action
12175  *   rte_flow action detail.
12176  * @param[in] user_data
12177  *   Pointer to the user_data.
12178  * @param[out] error
12179  *   Pointer to error structure.
12180  *
12181  * @return
12182  *   0 on success, otherwise negative errno value.
12183  */
12184 static int
12185 flow_hw_action_handle_validate(struct rte_eth_dev *dev, uint32_t queue,
12186 			       const struct rte_flow_op_attr *attr,
12187 			       const struct rte_flow_indir_action_conf *conf,
12188 			       const struct rte_flow_action *action,
12189 			       void *user_data,
12190 			       struct rte_flow_error *error)
12191 {
12192 	struct mlx5_priv *priv = dev->data->dev_private;
12193 
12194 	RTE_SET_USED(attr);
12195 	RTE_SET_USED(queue);
12196 	RTE_SET_USED(user_data);
12197 	switch (action->type) {
12198 	case RTE_FLOW_ACTION_TYPE_AGE:
12199 		if (!priv->hws_age_req) {
12200 			if (flow_hw_allocate_actions(dev, MLX5_FLOW_ACTION_AGE,
12201 						     error))
12202 				return rte_flow_error_set
12203 					(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
12204 					 NULL, "aging pool not initialized");
12205 		}
12206 		break;
12207 	case RTE_FLOW_ACTION_TYPE_COUNT:
12208 		if (!priv->hws_cpool) {
12209 			if (flow_hw_allocate_actions(dev, MLX5_FLOW_ACTION_COUNT,
12210 						     error))
12211 				return rte_flow_error_set
12212 					(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
12213 					 NULL, "counters pool not initialized");
12214 		}
12215 		break;
12216 	case RTE_FLOW_ACTION_TYPE_CONNTRACK:
12217 		if (priv->hws_ctpool == NULL) {
12218 			if (flow_hw_allocate_actions(dev, MLX5_FLOW_ACTION_CT,
12219 						     error))
12220 				return rte_flow_error_set
12221 					(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
12222 					 NULL, "CT pool not initialized");
12223 		}
12224 		return mlx5_validate_action_ct(dev, action->conf, error);
12225 	case RTE_FLOW_ACTION_TYPE_METER_MARK:
12226 		return flow_hw_validate_action_meter_mark(dev, action, true, error);
12227 	case RTE_FLOW_ACTION_TYPE_RSS:
12228 		return flow_dv_action_validate(dev, conf, action, error);
12229 	case RTE_FLOW_ACTION_TYPE_QUOTA:
12230 		return 0;
12231 	default:
12232 		return rte_flow_error_set(error, ENOTSUP,
12233 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12234 					  "action type not supported");
12235 	}
12236 	return 0;
12237 }
12238 
12239 static __rte_always_inline bool
12240 flow_hw_action_push(const struct rte_flow_op_attr *attr)
12241 {
12242 	return attr ? !attr->postpone : true;
12243 }
12244 
12245 static __rte_always_inline struct mlx5_hw_q_job *
12246 flow_hw_action_job_init(struct mlx5_priv *priv, uint32_t queue,
12247 			const struct rte_flow_action_handle *handle,
12248 			void *user_data, void *query_data,
12249 			enum mlx5_hw_job_type type,
12250 			enum mlx5_hw_indirect_type indirect_type,
12251 			struct rte_flow_error *error)
12252 {
12253 	struct mlx5_hw_q_job *job;
12254 
12255 	if (queue == MLX5_HW_INV_QUEUE)
12256 		queue = CTRL_QUEUE_ID(priv);
12257 	job = flow_hw_job_get(priv, queue);
12258 	if (!job) {
12259 		rte_flow_error_set(error, ENOMEM,
12260 				   RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
12261 				   "Action destroy failed due to queue full.");
12262 		return NULL;
12263 	}
12264 	job->type = type;
12265 	job->action = handle;
12266 	job->user_data = user_data;
12267 	job->query.user = query_data;
12268 	job->indirect_type = indirect_type;
12269 	return job;
12270 }
12271 
12272 struct mlx5_hw_q_job *
12273 mlx5_flow_action_job_init(struct mlx5_priv *priv, uint32_t queue,
12274 			  const struct rte_flow_action_handle *handle,
12275 			  void *user_data, void *query_data,
12276 			  enum mlx5_hw_job_type type,
12277 			  struct rte_flow_error *error)
12278 {
12279 	return flow_hw_action_job_init(priv, queue, handle, user_data, query_data,
12280 				       type, MLX5_HW_INDIRECT_TYPE_LEGACY, error);
12281 }
12282 
12283 static __rte_always_inline void
12284 flow_hw_action_finalize(struct rte_eth_dev *dev, uint32_t queue,
12285 			struct mlx5_hw_q_job *job,
12286 			bool push, bool aso, bool status)
12287 {
12288 	struct mlx5_priv *priv = dev->data->dev_private;
12289 
12290 	if (queue == MLX5_HW_INV_QUEUE)
12291 		queue = CTRL_QUEUE_ID(priv);
12292 	if (likely(status)) {
12293 		/* 1. add new job to a queue */
12294 		if (!aso)
12295 			rte_ring_enqueue(push ?
12296 					 priv->hw_q[queue].indir_cq :
12297 					 priv->hw_q[queue].indir_iq,
12298 					 job);
12299 		/* 2. send pending jobs */
12300 		if (push)
12301 			__flow_hw_push_action(dev, queue);
12302 	} else {
12303 		flow_hw_job_put(priv, job, queue);
12304 	}
12305 }
12306 
12307 /**
12308  * Create shared action.
12309  *
12310  * @param[in] dev
12311  *   Pointer to the rte_eth_dev structure.
12312  * @param[in] queue
12313  *   Which queue to be used.
12314  * @param[in] attr
12315  *   Operation attribute.
12316  * @param[in] conf
12317  *   Indirect action configuration.
12318  * @param[in] action
12319  *   rte_flow action detail.
12320  * @param[in] user_data
12321  *   Pointer to the user_data.
12322  * @param[out] error
12323  *   Pointer to error structure.
12324  *
12325  * @return
12326  *   Action handle on success, NULL otherwise and rte_errno is set.
12327  */
12328 static struct rte_flow_action_handle *
12329 flow_hw_action_handle_create(struct rte_eth_dev *dev, uint32_t queue,
12330 			     const struct rte_flow_op_attr *attr,
12331 			     const struct rte_flow_indir_action_conf *conf,
12332 			     const struct rte_flow_action *action,
12333 			     void *user_data,
12334 			     struct rte_flow_error *error)
12335 {
12336 	struct rte_flow_action_handle *handle = NULL;
12337 	struct mlx5_hw_q_job *job = NULL;
12338 	struct mlx5_priv *priv = dev->data->dev_private;
12339 	const struct rte_flow_action_age *age;
12340 	struct mlx5_aso_mtr *aso_mtr;
12341 	cnt_id_t cnt_id;
12342 	uint32_t age_idx;
12343 	bool push = flow_hw_action_push(attr);
12344 	bool aso = false;
12345 	bool force_job = action->type == RTE_FLOW_ACTION_TYPE_METER_MARK;
12346 
12347 	if (!mlx5_hw_ctx_validate(dev, error))
12348 		return NULL;
12349 	if (attr || force_job) {
12350 		job = flow_hw_action_job_init(priv, queue, NULL, user_data,
12351 					      NULL, MLX5_HW_Q_JOB_TYPE_CREATE,
12352 					      MLX5_HW_INDIRECT_TYPE_LEGACY, error);
12353 		if (!job)
12354 			return NULL;
12355 	}
12356 	switch (action->type) {
12357 	case RTE_FLOW_ACTION_TYPE_AGE:
12358 		if (priv->hws_strict_queue) {
12359 			struct mlx5_age_info *info = GET_PORT_AGE_INFO(priv);
12360 
12361 			if (queue >= info->hw_q_age->nb_rings) {
12362 				rte_flow_error_set(error, EINVAL,
12363 						   RTE_FLOW_ERROR_TYPE_ACTION,
12364 						   NULL,
12365 						   "Invalid queue ID for indirect AGE.");
12366 				rte_errno = EINVAL;
12367 				return NULL;
12368 			}
12369 		}
12370 		age = action->conf;
12371 		age_idx = mlx5_hws_age_action_create(priv, queue, true, age,
12372 						     0, error);
12373 		if (age_idx == 0) {
12374 			rte_flow_error_set(error, ENODEV,
12375 					   RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12376 					   "AGE are not configured!");
12377 		} else {
12378 			age_idx = (MLX5_INDIRECT_ACTION_TYPE_AGE <<
12379 				   MLX5_INDIRECT_ACTION_TYPE_OFFSET) | age_idx;
12380 			handle =
12381 			    (struct rte_flow_action_handle *)(uintptr_t)age_idx;
12382 		}
12383 		break;
12384 	case RTE_FLOW_ACTION_TYPE_COUNT:
12385 		if (mlx5_hws_cnt_shared_get(priv->hws_cpool, &cnt_id, 0))
12386 			rte_flow_error_set(error, ENODEV,
12387 					RTE_FLOW_ERROR_TYPE_ACTION,
12388 					NULL,
12389 					"counter are not configured!");
12390 		else
12391 			handle = (struct rte_flow_action_handle *)
12392 				 (uintptr_t)cnt_id;
12393 		break;
12394 	case RTE_FLOW_ACTION_TYPE_CONNTRACK:
12395 		aso = true;
12396 		handle = flow_hw_conntrack_create(dev, queue, action->conf, job,
12397 						  push, error);
12398 		break;
12399 	case RTE_FLOW_ACTION_TYPE_METER_MARK:
12400 		aso = true;
12401 		aso_mtr = flow_hw_meter_mark_alloc(dev, queue, action, job, push, error);
12402 		if (!aso_mtr)
12403 			break;
12404 		handle = (void *)(uintptr_t)job->action;
12405 		break;
12406 	case RTE_FLOW_ACTION_TYPE_RSS:
12407 		handle = flow_dv_action_create(dev, conf, action, error);
12408 		break;
12409 	case RTE_FLOW_ACTION_TYPE_QUOTA:
12410 		aso = true;
12411 		handle = mlx5_quota_alloc(dev, queue, action->conf,
12412 					  job, push, error);
12413 		break;
12414 	default:
12415 		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
12416 				   NULL, "action type not supported");
12417 		break;
12418 	}
12419 	if (job && !force_job) {
12420 		job->action = handle;
12421 		flow_hw_action_finalize(dev, queue, job, push, aso,
12422 					handle != NULL);
12423 	}
12424 	return handle;
12425 }
12426 
12427 static int
12428 mlx5_flow_update_meter_mark(struct rte_eth_dev *dev, uint32_t queue,
12429 			    const struct rte_flow_update_meter_mark *upd_meter_mark,
12430 			    uint32_t idx, bool push,
12431 			    struct mlx5_hw_q_job *job, struct rte_flow_error *error)
12432 {
12433 	struct mlx5_priv *priv = dev->data->dev_private;
12434 	struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
12435 	const struct rte_flow_action_meter_mark *meter_mark = &upd_meter_mark->meter_mark;
12436 	struct mlx5_aso_mtr *aso_mtr = mlx5_ipool_get(pool->idx_pool, idx);
12437 	struct mlx5_flow_meter_info *fm;
12438 
12439 	if (!aso_mtr)
12440 		return rte_flow_error_set(error, EINVAL,
12441 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12442 					  NULL, "Invalid meter_mark update index");
12443 	fm = &aso_mtr->fm;
12444 	if (upd_meter_mark->profile_valid)
12445 		fm->profile = (struct mlx5_flow_meter_profile *)
12446 			(meter_mark->profile);
12447 	if (upd_meter_mark->color_mode_valid)
12448 		fm->color_aware = meter_mark->color_mode;
12449 	if (upd_meter_mark->state_valid)
12450 		fm->is_enable = meter_mark->state;
12451 	aso_mtr->state = (queue == MLX5_HW_INV_QUEUE) ?
12452 			 ASO_METER_WAIT : ASO_METER_WAIT_ASYNC;
12453 	/* Update ASO flow meter by wqe. */
12454 	if (mlx5_aso_meter_update_by_wqe(priv, queue,
12455 					 aso_mtr, &priv->mtr_bulk, job, push))
12456 		return rte_flow_error_set(error, EINVAL,
12457 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12458 					  NULL, "Unable to update ASO meter WQE");
12459 	/* Wait for ASO object completion. */
12460 	if (queue == MLX5_HW_INV_QUEUE &&
12461 	    mlx5_aso_mtr_wait(priv, aso_mtr, true))
12462 		return rte_flow_error_set(error, EINVAL,
12463 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12464 					  NULL, "Unable to wait for ASO meter CQE");
12465 	return 0;
12466 }
12467 
12468 /**
12469  * Update shared action.
12470  *
12471  * @param[in] dev
12472  *   Pointer to the rte_eth_dev structure.
12473  * @param[in] queue
12474  *   Which queue to be used.
12475  * @param[in] attr
12476  *   Operation attribute.
12477  * @param[in] handle
12478  *   Action handle to be updated.
12479  * @param[in] update
12480  *   Update value.
12481  * @param[in] user_data
12482  *   Pointer to the user_data.
12483  * @param[out] error
12484  *   Pointer to error structure.
12485  *
12486  * @return
12487  *   0 on success, negative value otherwise and rte_errno is set.
12488  */
12489 static int
12490 flow_hw_action_handle_update(struct rte_eth_dev *dev, uint32_t queue,
12491 			     const struct rte_flow_op_attr *attr,
12492 			     struct rte_flow_action_handle *handle,
12493 			     const void *update,
12494 			     void *user_data,
12495 			     struct rte_flow_error *error)
12496 {
12497 	struct mlx5_priv *priv = dev->data->dev_private;
12498 	const struct rte_flow_modify_conntrack *ct_conf =
12499 		(const struct rte_flow_modify_conntrack *)update;
12500 	struct mlx5_hw_q_job *job = NULL;
12501 	uint32_t act_idx = (uint32_t)(uintptr_t)handle;
12502 	uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
12503 	uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
12504 	int ret = 0;
12505 	bool push = flow_hw_action_push(attr);
12506 	bool aso = false;
12507 	bool force_job = type == MLX5_INDIRECT_ACTION_TYPE_METER_MARK;
12508 
12509 	if (attr || force_job) {
12510 		job = flow_hw_action_job_init(priv, queue, handle, user_data,
12511 					      NULL, MLX5_HW_Q_JOB_TYPE_UPDATE,
12512 					      MLX5_HW_INDIRECT_TYPE_LEGACY, error);
12513 		if (!job)
12514 			return -rte_errno;
12515 	}
12516 	switch (type) {
12517 	case MLX5_INDIRECT_ACTION_TYPE_AGE:
12518 		ret = mlx5_hws_age_action_update(priv, idx, update, error);
12519 		break;
12520 	case MLX5_INDIRECT_ACTION_TYPE_CT:
12521 		if (ct_conf->state)
12522 			aso = true;
12523 		ret = flow_hw_conntrack_update(dev, queue, update, idx,
12524 					       job, push, error);
12525 		break;
12526 	case MLX5_INDIRECT_ACTION_TYPE_METER_MARK:
12527 		aso = true;
12528 		ret = mlx5_flow_update_meter_mark(dev, queue, update, idx, push,
12529 						  job, error);
12530 		break;
12531 	case MLX5_INDIRECT_ACTION_TYPE_RSS:
12532 		ret = flow_dv_action_update(dev, handle, update, error);
12533 		break;
12534 	case MLX5_INDIRECT_ACTION_TYPE_QUOTA:
12535 		aso = true;
12536 		ret = mlx5_quota_query_update(dev, queue, handle, update, NULL,
12537 					      job, push, error);
12538 		break;
12539 	default:
12540 		ret = -ENOTSUP;
12541 		rte_flow_error_set(error, ENOTSUP,
12542 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12543 					  "action type not supported");
12544 		break;
12545 	}
12546 	if (job && !force_job)
12547 		flow_hw_action_finalize(dev, queue, job, push, aso, ret == 0);
12548 	return ret;
12549 }
12550 
12551 /**
12552  * Destroy shared action.
12553  *
12554  * @param[in] dev
12555  *   Pointer to the rte_eth_dev structure.
12556  * @param[in] queue
12557  *   Which queue to be used.
12558  * @param[in] attr
12559  *   Operation attribute.
12560  * @param[in] handle
12561  *   Action handle to be destroyed.
12562  * @param[in] user_data
12563  *   Pointer to the user_data.
12564  * @param[out] error
12565  *   Pointer to error structure.
12566  *
12567  * @return
12568  *   0 on success, negative value otherwise and rte_errno is set.
12569  */
12570 static int
12571 flow_hw_action_handle_destroy(struct rte_eth_dev *dev, uint32_t queue,
12572 			      const struct rte_flow_op_attr *attr,
12573 			      struct rte_flow_action_handle *handle,
12574 			      void *user_data,
12575 			      struct rte_flow_error *error)
12576 {
12577 	uint32_t act_idx = (uint32_t)(uintptr_t)handle;
12578 	uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
12579 	uint32_t age_idx = act_idx & MLX5_HWS_AGE_IDX_MASK;
12580 	uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
12581 	struct mlx5_priv *priv = dev->data->dev_private;
12582 	struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
12583 	struct mlx5_hw_q_job *job = NULL;
12584 	struct mlx5_aso_mtr *aso_mtr;
12585 	struct mlx5_flow_meter_info *fm;
12586 	bool push = flow_hw_action_push(attr);
12587 	bool aso = false;
12588 	int ret = 0;
12589 	bool force_job = type == MLX5_INDIRECT_ACTION_TYPE_METER_MARK;
12590 
12591 	if (attr || force_job) {
12592 		job = flow_hw_action_job_init(priv, queue, handle, user_data,
12593 					      NULL, MLX5_HW_Q_JOB_TYPE_DESTROY,
12594 					      MLX5_HW_INDIRECT_TYPE_LEGACY, error);
12595 		if (!job)
12596 			return -rte_errno;
12597 	}
12598 	switch (type) {
12599 	case MLX5_INDIRECT_ACTION_TYPE_AGE:
12600 		ret = mlx5_hws_age_action_destroy(priv, age_idx, error);
12601 		break;
12602 	case MLX5_INDIRECT_ACTION_TYPE_COUNT:
12603 		age_idx = mlx5_hws_cnt_age_get(priv->hws_cpool, act_idx);
12604 		if (age_idx != 0)
12605 			/*
12606 			 * If this counter belongs to indirect AGE, here is the
12607 			 * time to update the AGE.
12608 			 */
12609 			mlx5_hws_age_nb_cnt_decrease(priv, age_idx);
12610 		mlx5_hws_cnt_shared_put(priv->hws_cpool, &act_idx);
12611 		break;
12612 	case MLX5_INDIRECT_ACTION_TYPE_CT:
12613 		ret = flow_hw_conntrack_destroy(dev, idx, error);
12614 		break;
12615 	case MLX5_INDIRECT_ACTION_TYPE_METER_MARK:
12616 		aso_mtr = mlx5_ipool_get(pool->idx_pool, idx);
12617 		if (!aso_mtr) {
12618 			ret = -EINVAL;
12619 			rte_flow_error_set(error, EINVAL,
12620 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12621 				NULL, "Invalid meter_mark destroy index");
12622 			break;
12623 		}
12624 		fm = &aso_mtr->fm;
12625 		fm->is_enable = 0;
12626 		/* Update ASO flow meter by wqe. */
12627 		if (mlx5_aso_meter_update_by_wqe(priv, queue, aso_mtr,
12628 						 &priv->mtr_bulk, job, push)) {
12629 			ret = -EINVAL;
12630 			rte_flow_error_set(error, EINVAL,
12631 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12632 				NULL, "Unable to update ASO meter WQE");
12633 			break;
12634 		}
12635 		/* Wait for ASO object completion. */
12636 		if (queue == MLX5_HW_INV_QUEUE &&
12637 		    mlx5_aso_mtr_wait(priv, aso_mtr, true)) {
12638 			ret = -EINVAL;
12639 			rte_flow_error_set(error, EINVAL,
12640 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12641 				NULL, "Unable to wait for ASO meter CQE");
12642 			break;
12643 		}
12644 		aso = true;
12645 		break;
12646 	case MLX5_INDIRECT_ACTION_TYPE_RSS:
12647 		ret = flow_dv_action_destroy(dev, handle, error);
12648 		break;
12649 	case MLX5_INDIRECT_ACTION_TYPE_QUOTA:
12650 		break;
12651 	default:
12652 		ret = -ENOTSUP;
12653 		rte_flow_error_set(error, ENOTSUP,
12654 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12655 					  "action type not supported");
12656 		break;
12657 	}
12658 	if (job && !force_job)
12659 		flow_hw_action_finalize(dev, queue, job, push, aso, ret == 0);
12660 	return ret;
12661 }
12662 
12663 static int
12664 flow_hw_query_counter(const struct rte_eth_dev *dev, uint32_t counter,
12665 		      void *data, struct rte_flow_error *error)
12666 {
12667 	struct mlx5_hws_cnt_pool *hpool;
12668 	struct mlx5_priv *priv = dev->data->dev_private;
12669 	struct mlx5_hws_cnt *cnt;
12670 	struct rte_flow_query_count *qc = data;
12671 	uint32_t iidx;
12672 	uint64_t pkts, bytes;
12673 
12674 	if (!mlx5_hws_cnt_id_valid(counter))
12675 		return rte_flow_error_set(error, EINVAL,
12676 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
12677 				"counter are not available");
12678 	hpool = mlx5_hws_cnt_host_pool(priv->hws_cpool);
12679 	iidx = mlx5_hws_cnt_iidx(hpool, counter);
12680 	cnt = &hpool->pool[iidx];
12681 	__hws_cnt_query_raw(priv->hws_cpool, counter, &pkts, &bytes);
12682 	qc->hits_set = 1;
12683 	qc->bytes_set = 1;
12684 	qc->hits = pkts - cnt->reset.hits;
12685 	qc->bytes = bytes - cnt->reset.bytes;
12686 	if (qc->reset) {
12687 		cnt->reset.bytes = bytes;
12688 		cnt->reset.hits = pkts;
12689 	}
12690 	return 0;
12691 }
12692 
12693 /**
12694  * Query a flow rule AGE action for aging information.
12695  *
12696  * @param[in] dev
12697  *   Pointer to Ethernet device.
12698  * @param[in] age_idx
12699  *   Index of AGE action parameter.
12700  * @param[out] data
12701  *   Data retrieved by the query.
12702  * @param[out] error
12703  *   Perform verbose error reporting if not NULL.
12704  *
12705  * @return
12706  *   0 on success, a negative errno value otherwise and rte_errno is set.
12707  */
12708 static int
12709 flow_hw_query_age(const struct rte_eth_dev *dev, uint32_t age_idx, void *data,
12710 		  struct rte_flow_error *error)
12711 {
12712 	struct mlx5_priv *priv = dev->data->dev_private;
12713 	struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);
12714 	struct mlx5_indexed_pool *ipool = age_info->ages_ipool;
12715 	struct mlx5_hws_age_param *param = mlx5_ipool_get(ipool, age_idx);
12716 	struct rte_flow_query_age *resp = data;
12717 
12718 	if (!param || !param->timeout)
12719 		return rte_flow_error_set(error, EINVAL,
12720 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12721 					  NULL, "age data not available");
12722 	switch (rte_atomic_load_explicit(&param->state, rte_memory_order_relaxed)) {
12723 	case HWS_AGE_AGED_OUT_REPORTED:
12724 	case HWS_AGE_AGED_OUT_NOT_REPORTED:
12725 		resp->aged = 1;
12726 		break;
12727 	case HWS_AGE_CANDIDATE:
12728 	case HWS_AGE_CANDIDATE_INSIDE_RING:
12729 		resp->aged = 0;
12730 		break;
12731 	case HWS_AGE_FREE:
12732 		/*
12733 		 * When state is FREE the flow itself should be invalid.
12734 		 * Fall-through.
12735 		 */
12736 	default:
12737 		MLX5_ASSERT(0);
12738 		break;
12739 	}
12740 	resp->sec_since_last_hit_valid = !resp->aged;
12741 	if (resp->sec_since_last_hit_valid)
12742 		resp->sec_since_last_hit = rte_atomic_load_explicit
12743 				 (&param->sec_since_last_hit, rte_memory_order_relaxed);
12744 	return 0;
12745 }
12746 
12747 static int
12748 flow_hw_query(struct rte_eth_dev *dev, struct rte_flow *flow,
12749 	      const struct rte_flow_action *actions, void *data,
12750 	      struct rte_flow_error *error)
12751 {
12752 	int ret = -EINVAL;
12753 	struct rte_flow_hw *hw_flow = (struct rte_flow_hw *)flow;
12754 	struct rte_flow_hw_aux *aux;
12755 
12756 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
12757 		switch (actions->type) {
12758 		case RTE_FLOW_ACTION_TYPE_VOID:
12759 			break;
12760 		case RTE_FLOW_ACTION_TYPE_COUNT:
12761 			if (!(hw_flow->flags & MLX5_FLOW_HW_FLOW_FLAG_CNT_ID))
12762 				return rte_flow_error_set(error, EINVAL,
12763 							  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
12764 							  "counter not defined in the rule");
12765 			ret = flow_hw_query_counter(dev, hw_flow->cnt_id, data,
12766 						    error);
12767 			break;
12768 		case RTE_FLOW_ACTION_TYPE_AGE:
12769 			if (!(hw_flow->flags & MLX5_FLOW_HW_FLOW_FLAG_AGE_IDX))
12770 				return rte_flow_error_set(error, EINVAL,
12771 							  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
12772 							  "age data not available");
12773 			aux = mlx5_flow_hw_aux(dev->data->port_id, hw_flow);
12774 			ret = flow_hw_query_age(dev, mlx5_flow_hw_aux_get_age_idx(hw_flow, aux),
12775 						data, error);
12776 			break;
12777 		default:
12778 			return rte_flow_error_set(error, ENOTSUP,
12779 						  RTE_FLOW_ERROR_TYPE_ACTION,
12780 						  actions,
12781 						  "action not supported");
12782 		}
12783 	}
12784 	return ret;
12785 }
12786 
12787 /**
12788  * Validate indirect action.
12789  *
12790  * @param[in] dev
12791  *   Pointer to the Ethernet device structure.
12792  * @param[in] conf
12793  *   Shared action configuration.
12794  * @param[in] action
12795  *   Action specification used to create indirect action.
12796  * @param[out] error
12797  *   Perform verbose error reporting if not NULL. Initialized in case of
12798  *   error only.
12799  *
12800  * @return
12801  *   0 on success, otherwise negative errno value.
12802  */
12803 static int
12804 flow_hw_action_validate(struct rte_eth_dev *dev,
12805 			const struct rte_flow_indir_action_conf *conf,
12806 			const struct rte_flow_action *action,
12807 			struct rte_flow_error *err)
12808 {
12809 	return flow_hw_action_handle_validate(dev, MLX5_HW_INV_QUEUE, NULL,
12810 					      conf, action, NULL, err);
12811 }
12812 
12813 /**
12814  * Create indirect action.
12815  *
12816  * @param[in] dev
12817  *   Pointer to the Ethernet device structure.
12818  * @param[in] conf
12819  *   Shared action configuration.
12820  * @param[in] action
12821  *   Action specification used to create indirect action.
12822  * @param[out] error
12823  *   Perform verbose error reporting if not NULL. Initialized in case of
12824  *   error only.
12825  *
12826  * @return
12827  *   A valid shared action handle in case of success, NULL otherwise and
12828  *   rte_errno is set.
12829  */
12830 static struct rte_flow_action_handle *
12831 flow_hw_action_create(struct rte_eth_dev *dev,
12832 		       const struct rte_flow_indir_action_conf *conf,
12833 		       const struct rte_flow_action *action,
12834 		       struct rte_flow_error *err)
12835 {
12836 	return flow_hw_action_handle_create(dev, MLX5_HW_INV_QUEUE,
12837 					    NULL, conf, action, NULL, err);
12838 }
12839 
12840 /**
12841  * Destroy the indirect action.
12842  * Release action related resources on the NIC and the memory.
12843  * Lock free, (mutex should be acquired by caller).
12844  * Dispatcher for action type specific call.
12845  *
12846  * @param[in] dev
12847  *   Pointer to the Ethernet device structure.
12848  * @param[in] handle
12849  *   The indirect action object handle to be removed.
12850  * @param[out] error
12851  *   Perform verbose error reporting if not NULL. Initialized in case of
12852  *   error only.
12853  *
12854  * @return
12855  *   0 on success, otherwise negative errno value.
12856  */
12857 static int
12858 flow_hw_action_destroy(struct rte_eth_dev *dev,
12859 		       struct rte_flow_action_handle *handle,
12860 		       struct rte_flow_error *error)
12861 {
12862 	return flow_hw_action_handle_destroy(dev, MLX5_HW_INV_QUEUE,
12863 			NULL, handle, NULL, error);
12864 }
12865 
12866 /**
12867  * Updates in place shared action configuration.
12868  *
12869  * @param[in] dev
12870  *   Pointer to the Ethernet device structure.
12871  * @param[in] handle
12872  *   The indirect action object handle to be updated.
12873  * @param[in] update
12874  *   Action specification used to modify the action pointed by *handle*.
12875  *   *update* could be of same type with the action pointed by the *handle*
12876  *   handle argument, or some other structures like a wrapper, depending on
12877  *   the indirect action type.
12878  * @param[out] error
12879  *   Perform verbose error reporting if not NULL. Initialized in case of
12880  *   error only.
12881  *
12882  * @return
12883  *   0 on success, otherwise negative errno value.
12884  */
12885 static int
12886 flow_hw_action_update(struct rte_eth_dev *dev,
12887 		      struct rte_flow_action_handle *handle,
12888 		      const void *update,
12889 		      struct rte_flow_error *err)
12890 {
12891 	return flow_hw_action_handle_update(dev, MLX5_HW_INV_QUEUE,
12892 			NULL, handle, update, NULL, err);
12893 }
12894 
12895 static int
12896 flow_hw_action_handle_query(struct rte_eth_dev *dev, uint32_t queue,
12897 			    const struct rte_flow_op_attr *attr,
12898 			    const struct rte_flow_action_handle *handle,
12899 			    void *data, void *user_data,
12900 			    struct rte_flow_error *error)
12901 {
12902 	struct mlx5_priv *priv = dev->data->dev_private;
12903 	struct mlx5_hw_q_job *job = NULL;
12904 	uint32_t act_idx = (uint32_t)(uintptr_t)handle;
12905 	uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
12906 	uint32_t idx = MLX5_INDIRECT_ACTION_IDX_GET(handle);
12907 	uint32_t age_idx = act_idx & MLX5_HWS_AGE_IDX_MASK;
12908 	int ret;
12909 	bool push = flow_hw_action_push(attr);
12910 	bool aso = false;
12911 
12912 	if (attr) {
12913 		job = flow_hw_action_job_init(priv, queue, handle, user_data,
12914 					      data, MLX5_HW_Q_JOB_TYPE_QUERY,
12915 					      MLX5_HW_INDIRECT_TYPE_LEGACY, error);
12916 		if (!job)
12917 			return -rte_errno;
12918 	}
12919 	switch (type) {
12920 	case MLX5_INDIRECT_ACTION_TYPE_AGE:
12921 		ret = flow_hw_query_age(dev, age_idx, data, error);
12922 		break;
12923 	case MLX5_INDIRECT_ACTION_TYPE_COUNT:
12924 		ret = flow_hw_query_counter(dev, act_idx, data, error);
12925 		break;
12926 	case MLX5_INDIRECT_ACTION_TYPE_CT:
12927 		aso = true;
12928 		if (job)
12929 			job->query.user = data;
12930 		ret = flow_hw_conntrack_query(dev, queue, idx, data,
12931 					      job, push, error);
12932 		break;
12933 	case MLX5_INDIRECT_ACTION_TYPE_QUOTA:
12934 		aso = true;
12935 		ret = mlx5_quota_query(dev, queue, handle, data,
12936 				       job, push, error);
12937 		break;
12938 	default:
12939 		ret = -ENOTSUP;
12940 		rte_flow_error_set(error, ENOTSUP,
12941 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12942 					  "action type not supported");
12943 		break;
12944 	}
12945 	if (job)
12946 		flow_hw_action_finalize(dev, queue, job, push, aso, ret == 0);
12947 	return ret;
12948 }
12949 
12950 static int
12951 flow_hw_async_action_handle_query_update
12952 			(struct rte_eth_dev *dev, uint32_t queue,
12953 			 const struct rte_flow_op_attr *attr,
12954 			 struct rte_flow_action_handle *handle,
12955 			 const void *update, void *query,
12956 			 enum rte_flow_query_update_mode qu_mode,
12957 			 void *user_data, struct rte_flow_error *error)
12958 {
12959 	struct mlx5_priv *priv = dev->data->dev_private;
12960 	bool push = flow_hw_action_push(attr);
12961 	bool aso = false;
12962 	struct mlx5_hw_q_job *job = NULL;
12963 	int ret = 0;
12964 
12965 	if (attr) {
12966 		job = flow_hw_action_job_init(priv, queue, handle, user_data,
12967 					      query,
12968 					      MLX5_HW_Q_JOB_TYPE_UPDATE_QUERY,
12969 					      MLX5_HW_INDIRECT_TYPE_LEGACY, error);
12970 		if (!job)
12971 			return -rte_errno;
12972 	}
12973 	switch (MLX5_INDIRECT_ACTION_TYPE_GET(handle)) {
12974 	case MLX5_INDIRECT_ACTION_TYPE_QUOTA:
12975 		if (qu_mode != RTE_FLOW_QU_QUERY_FIRST) {
12976 			ret = rte_flow_error_set
12977 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF,
12978 				 NULL, "quota action must query before update");
12979 			break;
12980 		}
12981 		aso = true;
12982 		ret = mlx5_quota_query_update(dev, queue, handle,
12983 					      update, query, job, push, error);
12984 		break;
12985 	default:
12986 		ret = rte_flow_error_set(error, ENOTSUP,
12987 					 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, "update and query not supportred");
12988 	}
12989 	if (job)
12990 		flow_hw_action_finalize(dev, queue, job, push, aso, ret == 0);
12991 	return ret;
12992 }
12993 
12994 static int
12995 flow_hw_action_query(struct rte_eth_dev *dev,
12996 		     const struct rte_flow_action_handle *handle, void *data,
12997 		     struct rte_flow_error *error)
12998 {
12999 	return flow_hw_action_handle_query(dev, MLX5_HW_INV_QUEUE, NULL,
13000 			handle, data, NULL, error);
13001 }
13002 
13003 static int
13004 flow_hw_action_query_update(struct rte_eth_dev *dev,
13005 			    struct rte_flow_action_handle *handle,
13006 			    const void *update, void *query,
13007 			    enum rte_flow_query_update_mode qu_mode,
13008 			    struct rte_flow_error *error)
13009 {
13010 	return flow_hw_async_action_handle_query_update(dev, MLX5_HW_INV_QUEUE,
13011 							NULL, handle, update,
13012 							query, qu_mode, NULL,
13013 							error);
13014 }
13015 
13016 /**
13017  * Get aged-out flows of a given port on the given HWS flow queue.
13018  *
13019  * @param[in] dev
13020  *   Pointer to the Ethernet device structure.
13021  * @param[in] queue_id
13022  *   Flow queue to query. Ignored when RTE_FLOW_PORT_FLAG_STRICT_QUEUE not set.
13023  * @param[in, out] contexts
13024  *   The address of an array of pointers to the aged-out flows contexts.
13025  * @param[in] nb_contexts
13026  *   The length of context array pointers.
13027  * @param[out] error
13028  *   Perform verbose error reporting if not NULL. Initialized in case of
13029  *   error only.
13030  *
13031  * @return
13032  *   if nb_contexts is 0, return the amount of all aged contexts.
13033  *   if nb_contexts is not 0 , return the amount of aged flows reported
13034  *   in the context array, otherwise negative errno value.
13035  */
13036 static int
13037 flow_hw_get_q_aged_flows(struct rte_eth_dev *dev, uint32_t queue_id,
13038 			 void **contexts, uint32_t nb_contexts,
13039 			 struct rte_flow_error *error)
13040 {
13041 	struct mlx5_priv *priv = dev->data->dev_private;
13042 	struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);
13043 	struct rte_ring *r;
13044 	int nb_flows = 0;
13045 
13046 	if (nb_contexts && !contexts)
13047 		return rte_flow_error_set(error, EINVAL,
13048 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13049 					  NULL, "empty context");
13050 	if (!priv->hws_age_req)
13051 		return rte_flow_error_set(error, ENOENT,
13052 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13053 					  NULL, "No aging initialized");
13054 	if (priv->hws_strict_queue) {
13055 		if (queue_id >= age_info->hw_q_age->nb_rings)
13056 			return rte_flow_error_set(error, EINVAL,
13057 						RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13058 						NULL, "invalid queue id");
13059 		r = age_info->hw_q_age->aged_lists[queue_id];
13060 	} else {
13061 		r = age_info->hw_age.aged_list;
13062 		MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
13063 	}
13064 	if (nb_contexts == 0)
13065 		return rte_ring_count(r);
13066 	while ((uint32_t)nb_flows < nb_contexts) {
13067 		uint32_t age_idx;
13068 
13069 		if (rte_ring_dequeue_elem(r, &age_idx, sizeof(uint32_t)) < 0)
13070 			break;
13071 		/* get the AGE context if the aged-out index is still valid. */
13072 		contexts[nb_flows] = mlx5_hws_age_context_get(priv, age_idx);
13073 		if (!contexts[nb_flows])
13074 			continue;
13075 		nb_flows++;
13076 	}
13077 	return nb_flows;
13078 }
13079 
13080 /**
13081  * Get aged-out flows.
13082  *
13083  * This function is relevant only if RTE_FLOW_PORT_FLAG_STRICT_QUEUE isn't set.
13084  *
13085  * @param[in] dev
13086  *   Pointer to the Ethernet device structure.
13087  * @param[in] contexts
13088  *   The address of an array of pointers to the aged-out flows contexts.
13089  * @param[in] nb_contexts
13090  *   The length of context array pointers.
13091  * @param[out] error
13092  *   Perform verbose error reporting if not NULL. Initialized in case of
13093  *   error only.
13094  *
13095  * @return
13096  *   how many contexts get in success, otherwise negative errno value.
13097  *   if nb_contexts is 0, return the amount of all aged contexts.
13098  *   if nb_contexts is not 0 , return the amount of aged flows reported
13099  *   in the context array.
13100  */
13101 static int
13102 flow_hw_get_aged_flows(struct rte_eth_dev *dev, void **contexts,
13103 		       uint32_t nb_contexts, struct rte_flow_error *error)
13104 {
13105 	struct mlx5_priv *priv = dev->data->dev_private;
13106 
13107 	if (priv->hws_strict_queue)
13108 		DRV_LOG(WARNING,
13109 			"port %u get aged flows called in strict queue mode.",
13110 			dev->data->port_id);
13111 	return flow_hw_get_q_aged_flows(dev, 0, contexts, nb_contexts, error);
13112 }
13113 /**
13114  * Initialization function for non template API which calls
13115  * flow_hw_configure with default values.
13116  * Configure non queues cause 1 queue is configured by default for inner usage.
13117  *
13118  * @param[in] dev
13119  *   Pointer to the Ethernet device structure.
13120  * @param[out] error
13121  *   Pointer to the error structure.
13122  *
13123  * @return
13124  *   0 on success, a negative errno value otherwise and rte_errno is set.
13125  */
13126 int
13127 flow_hw_init(struct rte_eth_dev *dev,
13128 	     struct rte_flow_error *error)
13129 {
13130 	const struct rte_flow_port_attr port_attr = {0};
13131 	const struct rte_flow_queue_attr queue_attr = {.size = MLX5_NT_DEFAULT_QUEUE_SIZE};
13132 	const struct rte_flow_queue_attr *attr_list = &queue_attr;
13133 
13134 	/**
13135 	 * If user uses template and non template API:
13136 	 * User will call flow_hw_configure and non template
13137 	 * API will use the allocated actions.
13138 	 * Init function will not call flow_hw_configure.
13139 	 *
13140 	 * If user uses only non template API's:
13141 	 * Init function will call flow_hw_configure.
13142 	 * It will not allocate memory for actions.
13143 	 * When needed allocation, it will handle same as for SWS today,
13144 	 * meaning using bulk allocations and resize as needed.
13145 	 */
13146 	/* Configure hws with default values. */
13147 	DRV_LOG(DEBUG, "Apply default configuration, zero number of queues, inner control queue size is %u",
13148 		MLX5_NT_DEFAULT_QUEUE_SIZE);
13149 	return __flow_hw_configure(dev, &port_attr, 0, &attr_list, true, error);
13150 }
13151 
13152 static int flow_hw_prepare(struct rte_eth_dev *dev,
13153 			   const struct rte_flow_action actions[] __rte_unused,
13154 			   enum mlx5_flow_type type,
13155 			   struct rte_flow_hw **flow,
13156 			   struct rte_flow_error *error)
13157 {
13158 	struct mlx5_priv *priv = dev->data->dev_private;
13159 	uint32_t idx = 0;
13160 
13161 	 /*
13162 	  * Notice pool idx size = (sizeof(struct rte_flow_hw)
13163 	  * + sizeof(struct rte_flow_nt2hws)) for HWS mode.
13164 	  */
13165 	*flow = mlx5_ipool_zmalloc(priv->flows[type], &idx);
13166 	if (!(*flow))
13167 		return rte_flow_error_set(error, ENOMEM,
13168 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13169 			"cannot allocate flow memory");
13170 	/* Allocating 2 structures in one pool slot, updating nt2hw pointer.*/
13171 	(*flow)->nt2hws = (struct rte_flow_nt2hws *)
13172 				((uintptr_t)(*flow) + sizeof(struct rte_flow_hw));
13173 	(*flow)->idx = idx;
13174 	(*flow)->nt2hws->flow_aux = mlx5_malloc(MLX5_MEM_ZERO, sizeof(struct rte_flow_hw_aux),
13175 				    RTE_CACHE_LINE_SIZE, rte_dev_numa_node(dev->device));
13176 	if (!(*flow)->nt2hws->flow_aux)
13177 		return rte_flow_error_set(error, ENOMEM,
13178 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13179 				"cannot allocate flow aux memory");
13180 	return 0;
13181 }
13182 
13183 #define FLOW_HW_SET_DV_FIELDS(flow_attr, root, dv_resource) {					\
13184 	typeof(flow_attr) _flow_attr = (flow_attr);						\
13185 	if (_flow_attr->transfer)								\
13186 		dv_resource.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;				\
13187 	else											\
13188 		dv_resource.ft_type = _flow_attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :	\
13189 					     MLX5DV_FLOW_TABLE_TYPE_NIC_RX;			\
13190 	root = _flow_attr->group ? 0 : 1;							\
13191 	dv_resource.flags =									\
13192 		mlx5_hw_act_flag[!!_flow_attr->group][get_mlx5dr_table_type(_flow_attr)];	\
13193 }
13194 
13195 static int
13196 flow_hw_modify_hdr_resource_register
13197 			(struct rte_eth_dev *dev,
13198 			 struct rte_flow_template_table *table,
13199 			 struct mlx5_hw_actions *hw_acts,
13200 			 struct rte_flow_hw *dev_flow,
13201 			 struct rte_flow_error *error)
13202 {
13203 	struct rte_flow_attr *attr = &table->cfg.attr.flow_attr;
13204 	struct mlx5_flow_dv_modify_hdr_resource *dv_resource_ptr = NULL;
13205 	union {
13206 		struct mlx5_flow_dv_modify_hdr_resource dv_resource;
13207 		uint8_t data[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
13208 			     sizeof(struct mlx5_modification_cmd) * MLX5_MHDR_MAX_CMD];
13209 	} dummy;
13210 	int ret;
13211 
13212 	if (hw_acts->mhdr) {
13213 		dummy.dv_resource.actions_num = hw_acts->mhdr->mhdr_cmds_num;
13214 		memcpy(dummy.dv_resource.actions, hw_acts->mhdr->mhdr_cmds,
13215 			sizeof(struct mlx5_modification_cmd) * dummy.dv_resource.actions_num);
13216 	} else {
13217 		return 0;
13218 	}
13219 	FLOW_HW_SET_DV_FIELDS(attr, dummy.dv_resource.root, dummy.dv_resource);
13220 	dummy.dv_resource.flags |= MLX5DR_ACTION_FLAG_SHARED;
13221 	ret = __flow_modify_hdr_resource_register(dev, &dummy.dv_resource,
13222 		&dv_resource_ptr, error);
13223 	if (ret)
13224 		return ret;
13225 	MLX5_ASSERT(dv_resource_ptr);
13226 	dev_flow->nt2hws->modify_hdr = dv_resource_ptr;
13227 	/* keep action for the rule construction. */
13228 	hw_acts->rule_acts[hw_acts->mhdr->pos].action = dv_resource_ptr->action;
13229 	/* Bulk size is 1, so index is 1. */
13230 	dev_flow->res_idx = 1;
13231 	return 0;
13232 }
13233 
13234 static int
13235 flow_hw_encap_decap_resource_register
13236 			(struct rte_eth_dev *dev,
13237 			 struct rte_flow_template_table *table,
13238 			 struct mlx5_hw_actions *hw_acts,
13239 			 struct rte_flow_hw *dev_flow,
13240 			 struct rte_flow_error *error)
13241 {
13242 	struct rte_flow_attr *attr = &table->cfg.attr.flow_attr;
13243 	struct mlx5_flow_dv_encap_decap_resource *dv_resource_ptr = NULL;
13244 	struct mlx5_flow_dv_encap_decap_resource dv_resource;
13245 	struct mlx5_tbl_multi_pattern_ctx *mpctx = &table->mpctx;
13246 	int ret;
13247 	bool is_root;
13248 	int ix;
13249 
13250 	if (hw_acts->encap_decap)
13251 		dv_resource.reformat_type = hw_acts->encap_decap->action_type;
13252 	else
13253 		return 0;
13254 	FLOW_HW_SET_DV_FIELDS(attr, is_root, dv_resource);
13255 	ix = mlx5_bwc_multi_pattern_reformat_to_index((enum mlx5dr_action_type)
13256 			dv_resource.reformat_type);
13257 	if (ix < 0)
13258 		return ix;
13259 	if (hw_acts->encap_decap->shared) {
13260 		dv_resource.size = hw_acts->encap_decap->data_size;
13261 		MLX5_ASSERT(dv_resource.size <= MLX5_ENCAP_MAX_LEN);
13262 		memcpy(&dv_resource.buf, hw_acts->encap_decap->data, dv_resource.size);
13263 		dv_resource.flags |= MLX5DR_ACTION_FLAG_SHARED;
13264 	} else {
13265 		typeof(mpctx->reformat[0]) *reformat = mpctx->reformat + ix;
13266 		if (!reformat->elements_num)
13267 			return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
13268 					NULL, "No reformat action exist in the table.");
13269 		dv_resource.size = reformat->reformat_hdr->sz;
13270 		MLX5_ASSERT(dv_resource.size <= MLX5_ENCAP_MAX_LEN);
13271 		memcpy(&dv_resource.buf, reformat->reformat_hdr->data, dv_resource.size);
13272 	}
13273 	ret = __flow_encap_decap_resource_register(dev, &dv_resource, is_root,
13274 		&dv_resource_ptr, error);
13275 	if (ret)
13276 		return ret;
13277 	MLX5_ASSERT(dv_resource_ptr);
13278 	dev_flow->nt2hws->rix_encap_decap = dv_resource_ptr->idx;
13279 	/* keep action for the rule construction. */
13280 	if (hw_acts->encap_decap->shared)
13281 		hw_acts->rule_acts[hw_acts->encap_decap_pos].action = dv_resource_ptr->action;
13282 	else
13283 		mpctx->segments[0].reformat_action[ix] = dv_resource_ptr->action;
13284 	/* Bulk size is 1, so index is 1. */
13285 	dev_flow->res_idx = 1;
13286 	return 0;
13287 }
13288 
13289 static enum rte_flow_action_type
13290 flow_nta_get_indirect_action_type(const struct rte_flow_action *action)
13291 {
13292 	switch (MLX5_INDIRECT_ACTION_TYPE_GET(action->conf)) {
13293 	case MLX5_INDIRECT_ACTION_TYPE_RSS:
13294 		return RTE_FLOW_ACTION_TYPE_RSS;
13295 	case MLX5_INDIRECT_ACTION_TYPE_AGE:
13296 		return RTE_FLOW_ACTION_TYPE_AGE;
13297 	case MLX5_INDIRECT_ACTION_TYPE_COUNT:
13298 		return RTE_FLOW_ACTION_TYPE_COUNT;
13299 	case MLX5_INDIRECT_ACTION_TYPE_CT:
13300 		return RTE_FLOW_ACTION_TYPE_CONNTRACK;
13301 	default:
13302 		break;
13303 	}
13304 	return RTE_FLOW_ACTION_TYPE_END;
13305 }
13306 
13307 static void
13308 flow_nta_set_mh_mask_conf(const struct rte_flow_action_modify_field *action_conf,
13309 			  struct rte_flow_action_modify_field *mask_conf)
13310 {
13311 	memset(mask_conf, 0xff, sizeof(*mask_conf));
13312 	mask_conf->operation = action_conf->operation;
13313 	mask_conf->dst.field = action_conf->dst.field;
13314 	mask_conf->src.field = action_conf->src.field;
13315 }
13316 
13317 union actions_conf {
13318 	struct rte_flow_action_modify_field modify_field;
13319 	struct rte_flow_action_raw_encap raw_encap;
13320 	struct rte_flow_action_vxlan_encap vxlan_encap;
13321 	struct rte_flow_action_nvgre_encap nvgre_encap;
13322 };
13323 
13324 static int
13325 flow_nta_build_template_mask(const struct rte_flow_action actions[],
13326 			     struct rte_flow_action masks[MLX5_HW_MAX_ACTS],
13327 			     union actions_conf mask_conf[MLX5_HW_MAX_ACTS])
13328 {
13329 	int i;
13330 
13331 	for (i = 0; i == 0 || actions[i - 1].type != RTE_FLOW_ACTION_TYPE_END; i++) {
13332 		const struct rte_flow_action *action = &actions[i];
13333 		struct rte_flow_action *mask = &masks[i];
13334 		union actions_conf *conf = &mask_conf[i];
13335 
13336 		mask->type = action->type;
13337 		switch (action->type) {
13338 		case RTE_FLOW_ACTION_TYPE_INDIRECT:
13339 			mask->type = flow_nta_get_indirect_action_type(action);
13340 			if (!mask->type)
13341 				return -EINVAL;
13342 			break;
13343 		case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
13344 			flow_nta_set_mh_mask_conf(action->conf, (void *)conf);
13345 			mask->conf = conf;
13346 			break;
13347 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
13348 			/* This mask will set this action as shared. */
13349 			memset(conf, 0xff, sizeof(struct rte_flow_action_raw_encap));
13350 			mask->conf = conf;
13351 			break;
13352 		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
13353 			/* This mask will set this action as shared. */
13354 			conf->vxlan_encap.definition =
13355 				((const struct rte_flow_action_vxlan_encap *)
13356 					action->conf)->definition;
13357 			mask->conf = conf;
13358 			break;
13359 		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
13360 			/* This mask will set this action as shared. */
13361 			conf->nvgre_encap.definition =
13362 				((const struct rte_flow_action_nvgre_encap *)
13363 					action->conf)->definition;
13364 			mask->conf = conf;
13365 			break;
13366 		default:
13367 			break;
13368 		}
13369 	}
13370 	return 0;
13371 #undef NTA_CHECK_CONF_BUF_SIZE
13372 }
13373 
13374 static int
13375 flow_hw_translate_flow_actions(struct rte_eth_dev *dev,
13376 			  const struct rte_flow_attr *attr,
13377 			  const struct rte_flow_action actions[],
13378 			  struct rte_flow_hw *flow,
13379 			  struct mlx5_flow_hw_action_params *ap,
13380 			  struct mlx5_hw_actions *hw_acts,
13381 			  uint64_t item_flags, uint64_t action_flags,
13382 			  bool external,
13383 			  struct rte_flow_error *error)
13384 {
13385 	int ret = 0;
13386 	uint32_t src_group = 0;
13387 	enum mlx5dr_table_type table_type;
13388 	struct rte_flow_template_table *table = NULL;
13389 	struct mlx5_flow_group grp;
13390 	struct rte_flow_actions_template *at = NULL;
13391 	struct rte_flow_actions_template_attr template_attr = {
13392 		.egress = attr->egress,
13393 		.ingress = attr->ingress,
13394 		.transfer = attr->transfer,
13395 	};
13396 	struct rte_flow_action masks[MLX5_HW_MAX_ACTS];
13397 	union actions_conf mask_conf[MLX5_HW_MAX_ACTS];
13398 
13399 	RTE_SET_USED(action_flags);
13400 	memset(masks, 0, sizeof(masks));
13401 	memset(mask_conf, 0, sizeof(mask_conf));
13402 	/*
13403 	 * Notice All direct actions will be unmasked,
13404 	 * except for modify header and encap,
13405 	 * and therefore will be parsed as part of action construct.
13406 	 * Modify header is always shared in HWS,
13407 	 * encap is masked such that it will be treated as shared.
13408 	 * shared actions will be parsed as part of template translation
13409 	 * and not during action construct.
13410 	 */
13411 	flow_nta_build_template_mask(actions, masks, mask_conf);
13412 	/* The group in the attribute translation was done in advance. */
13413 	ret = __translate_group(dev, attr, external, attr->group, &src_group, error);
13414 	if (ret)
13415 		return ret;
13416 	if (attr->transfer)
13417 		table_type = MLX5DR_TABLE_TYPE_FDB;
13418 	else if (attr->egress)
13419 		table_type = MLX5DR_TABLE_TYPE_NIC_TX;
13420 	else
13421 		table_type = MLX5DR_TABLE_TYPE_NIC_RX;
13422 	/* TODO: consider to reuse the workspace per thread. */
13423 	table = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*table), 0, SOCKET_ID_ANY);
13424 	if (!table)
13425 		return rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ACTION,
13426 				   actions, "Failed to allocate dummy table");
13427 	at = __flow_hw_actions_template_create(dev, &template_attr, actions, masks, true, error);
13428 	if (!at) {
13429 		ret = -rte_errno;
13430 		goto end;
13431 	}
13432 	grp.group_id = src_group;
13433 	table->grp = &grp;
13434 	table->type = table_type;
13435 	table->cfg.external = external;
13436 	table->nb_action_templates = 1;
13437 	memcpy(&table->cfg.attr.flow_attr, attr, sizeof(*attr));
13438 	table->ats[0].action_template = at;
13439 	ret = __flow_hw_translate_actions_template(dev, &table->cfg, hw_acts, at,
13440 		&table->mpctx, true, error);
13441 	if (ret)
13442 		goto end;
13443 	/* handle bulk actions register. */
13444 	ret = flow_hw_encap_decap_resource_register(dev, table, hw_acts, flow, error);
13445 	if (ret)
13446 		goto clean_up;
13447 	ret = flow_hw_modify_hdr_resource_register(dev, table, hw_acts, flow, error);
13448 	if (ret)
13449 		goto clean_up;
13450 	table->ats[0].acts = *hw_acts;
13451 	ret = flow_hw_actions_construct(dev, flow, ap,
13452 		&table->ats[0], item_flags, table,
13453 		actions, hw_acts->rule_acts, 0, error);
13454 	if (ret)
13455 		goto clean_up;
13456 	goto end;
13457 clean_up:
13458 	/* Make sure that there is no garbage in the actions. */
13459 	__flow_hw_action_template_destroy(dev, hw_acts);
13460 end:
13461 	if (table)
13462 		mlx5_free(table);
13463 	if (at)
13464 		mlx5_free(at);
13465 	return ret;
13466 }
13467 
13468 static int
13469 flow_hw_unregister_matcher(struct rte_eth_dev *dev,
13470 			   struct mlx5_flow_dv_matcher *matcher)
13471 {
13472 	int ret;
13473 	struct mlx5_priv *priv = dev->data->dev_private;
13474 
13475 	if (matcher->matcher_object) {
13476 		ret = mlx5_hlist_unregister(priv->sh->groups, &matcher->group->entry);
13477 		if (ret)
13478 			goto error;
13479 		if (matcher->group) {
13480 			ret = mlx5_list_unregister(matcher->group->matchers, &matcher->entry);
13481 			if (ret)
13482 				goto error;
13483 		}
13484 	}
13485 	return 0;
13486 error:
13487 	return -EINVAL;
13488 }
13489 
13490 static int flow_hw_register_matcher(struct rte_eth_dev *dev,
13491 				    const struct rte_flow_attr *attr,
13492 				    const struct rte_flow_item items[],
13493 				    bool external,
13494 				    struct rte_flow_hw *flow,
13495 				    struct mlx5_flow_dv_matcher *matcher,
13496 				    struct rte_flow_error *error)
13497 {
13498 	struct mlx5_priv *priv = dev->data->dev_private;
13499 	struct rte_flow_error sub_error = {
13500 		.type = RTE_FLOW_ERROR_TYPE_NONE,
13501 		.cause = NULL,
13502 		.message = NULL,
13503 	};
13504 	struct rte_flow_attr flow_attr = *attr;
13505 	struct mlx5_flow_cb_ctx ctx = {
13506 		.dev = dev,
13507 		.error = &sub_error,
13508 		.data = &flow_attr,
13509 	};
13510 	void *items_ptr = &items;
13511 	struct mlx5_flow_cb_ctx matcher_ctx = {
13512 		.error = &sub_error,
13513 		.data = matcher,
13514 		.data2 = items_ptr,
13515 	};
13516 	struct mlx5_list_entry *group_entry = NULL;
13517 	struct mlx5_list_entry *matcher_entry = NULL;
13518 	struct mlx5_flow_dv_matcher *resource;
13519 	struct mlx5_list *matchers_list;
13520 	struct mlx5_flow_group *flow_group;
13521 	int ret;
13522 
13523 
13524 	matcher->crc = rte_raw_cksum((const void *)matcher->mask.buf,
13525 				    matcher->mask.size);
13526 	matcher->priority = attr->priority;
13527 	ret = __translate_group(dev, attr, external, attr->group, &flow_attr.group, error);
13528 	if (ret)
13529 		return ret;
13530 
13531 	/* Register the flow group. */
13532 	group_entry = mlx5_hlist_register(priv->sh->groups, flow_attr.group, &ctx);
13533 	if (!group_entry)
13534 		goto error;
13535 	flow_group = container_of(group_entry, struct mlx5_flow_group, entry);
13536 
13537 	matchers_list = flow_group->matchers;
13538 	matcher->group = flow_group;
13539 	matcher_entry = mlx5_list_register(matchers_list, &matcher_ctx);
13540 	if (!matcher_entry)
13541 		goto error;
13542 	resource = container_of(matcher_entry, typeof(*resource), entry);
13543 	flow->nt2hws->matcher = resource;
13544 	return 0;
13545 
13546 error:
13547 	if (group_entry)
13548 		mlx5_hlist_unregister(priv->sh->groups, group_entry);
13549 	if (error) {
13550 		if (sub_error.type != RTE_FLOW_ERROR_TYPE_NONE)
13551 			rte_memcpy(error, &sub_error, sizeof(sub_error));
13552 	}
13553 	return rte_flow_error_set(error, ENOMEM,
13554 					RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13555 					NULL, "fail to register matcher");
13556 }
13557 
13558 static int
13559 flow_hw_allocate_actions(struct rte_eth_dev *dev,
13560 			 uint64_t action_flags,
13561 			 struct rte_flow_error *error)
13562 {
13563 	struct mlx5_priv *priv = dev->data->dev_private;
13564 	int ret;
13565 	uint obj_num;
13566 
13567 	if (action_flags & MLX5_FLOW_ACTION_AGE) {
13568 		/* If no age objects were previously allocated. */
13569 		if (!priv->hws_age_req) {
13570 			/* If no counters were previously allocated. */
13571 			if (!priv->hws_cpool) {
13572 				obj_num = MLX5_CNT_NT_MAX(priv);
13573 				ret = mlx5_hws_cnt_pool_create(dev, obj_num,
13574 							       priv->nb_queue, NULL);
13575 				if (ret)
13576 					goto err;
13577 			}
13578 			/* Allocate same number of counters. */
13579 			ret = mlx5_hws_age_pool_init(dev, priv->hws_cpool->cfg.request_num,
13580 						     priv->nb_queue, false);
13581 			if (ret)
13582 				goto err;
13583 		}
13584 	}
13585 	if (action_flags & MLX5_FLOW_ACTION_COUNT) {
13586 		/* If no counters were previously allocated. */
13587 		if (!priv->hws_cpool) {
13588 			obj_num = MLX5_CNT_NT_MAX(priv);
13589 			ret = mlx5_hws_cnt_pool_create(dev, obj_num,
13590 						       priv->nb_queue, NULL);
13591 			if (ret)
13592 				goto err;
13593 		}
13594 	}
13595 	if (action_flags & MLX5_FLOW_ACTION_CT) {
13596 		/* If no CT were previously allocated. */
13597 		if (!priv->hws_ctpool) {
13598 			obj_num = MLX5_CT_NT_MAX(priv);
13599 			ret = mlx5_flow_ct_init(dev, obj_num, priv->nb_queue);
13600 			if (ret)
13601 				goto err;
13602 		}
13603 	}
13604 	if (action_flags & MLX5_FLOW_ACTION_METER) {
13605 		/* If no meters were previously allocated. */
13606 		if (!priv->hws_mpool) {
13607 			obj_num = MLX5_MTR_NT_MAX(priv);
13608 			ret = mlx5_flow_meter_init(dev, obj_num, 0, 0,
13609 						   priv->nb_queue);
13610 			if (ret)
13611 				goto err;
13612 		}
13613 	}
13614 	return 0;
13615 err:
13616 	return rte_flow_error_set(error, ret,
13617 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13618 				  NULL, "fail to allocate actions");
13619 }
13620 
13621 static int flow_hw_apply(const struct rte_flow_item items[],
13622 			 struct mlx5dr_rule_action rule_actions[],
13623 			 struct rte_flow_hw *flow,
13624 			 struct rte_flow_error *error)
13625 {
13626 	struct mlx5dr_bwc_rule *rule = NULL;
13627 
13628 	rule = mlx5dr_bwc_rule_create((struct mlx5dr_bwc_matcher *)
13629 		flow->nt2hws->matcher->matcher_object,
13630 		items, rule_actions);
13631 	flow->nt2hws->nt_rule = rule;
13632 	if (!rule) {
13633 		return rte_flow_error_set(error, EINVAL,
13634 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13635 			NULL, "fail to create rte flow");
13636 	}
13637 	return 0;
13638 }
13639 
13640 #ifdef HAVE_MLX5_HWS_SUPPORT
13641 /**
13642  * Create a flow.
13643  *
13644  * @param[in] dev
13645  *   Pointer to Ethernet device.
13646  * @param[in] type
13647  *   Flow type.
13648  * @param[in] attr
13649  *   Flow rule attributes.
13650  * @param[in] items
13651  *   Pattern specification (list terminated by the END pattern item).
13652  * @param[in] actions
13653  *   Associated actions (list terminated by the END action).
13654  * @param[in] external
13655  *   This flow rule is created by request external to PMD.
13656  * @param[out] flow
13657  *   Flow pointer
13658  * @param[out] error
13659  *   Perform verbose error reporting if not NULL.
13660  *
13661  * @return
13662  *   0 on success, negative errno value otherwise and rte_errno set.
13663  */
13664 int
13665 flow_hw_create_flow(struct rte_eth_dev *dev, enum mlx5_flow_type type,
13666 		    const struct rte_flow_attr *attr,
13667 		    const struct rte_flow_item items[],
13668 		    const struct rte_flow_action actions[],
13669 		    uint64_t item_flags, uint64_t action_flags, bool external,
13670 		    struct rte_flow_hw **flow, struct rte_flow_error *error)
13671 {
13672 	int ret;
13673 	struct mlx5_hw_actions hw_act;
13674 	struct mlx5_flow_hw_action_params ap;
13675 	struct mlx5_flow_dv_matcher matcher = {
13676 		.mask = {
13677 			.size = sizeof(matcher.mask.buf),
13678 		},
13679 	};
13680 	uint32_t tbl_type;
13681 
13682 	struct mlx5_flow_attr flow_attr = {
13683 		.port_id = dev->data->port_id,
13684 		.group = attr->group,
13685 		.priority = attr->priority,
13686 		.rss_level = 0,
13687 		.act_flags = action_flags,
13688 		.tbl_type = 0,
13689 		};
13690 
13691 	memset(&hw_act, 0, sizeof(hw_act));
13692 	if (attr->transfer)
13693 		tbl_type = MLX5DR_TABLE_TYPE_FDB;
13694 	else if (attr->egress)
13695 		tbl_type = MLX5DR_TABLE_TYPE_NIC_TX;
13696 	else
13697 		tbl_type = MLX5DR_TABLE_TYPE_NIC_RX;
13698 	flow_attr.tbl_type = tbl_type;
13699 
13700 	/* Allocate needed memory. */
13701 	ret = flow_hw_prepare(dev, actions, type, flow, error);
13702 	if (ret)
13703 		goto error;
13704 
13705 	/* TODO TBD flow_hw_handle_tunnel_offload(). */
13706 	(*flow)->nt_rule = true;
13707 	(*flow)->nt2hws->matcher = &matcher;
13708 	ret = flow_dv_translate_items_hws(items, &flow_attr, &matcher.mask.buf,
13709 					MLX5_SET_MATCHER_HS_M, &item_flags,
13710 					NULL, error);
13711 
13712 	if (ret)
13713 		goto error;
13714 
13715 	ret = flow_hw_register_matcher(dev, attr, items, external, *flow, &matcher, error);
13716 	if (ret)
13717 		goto error;
13718 
13719 	/*
13720 	 * ASO allocation – iterating on actions list to allocate missing resources.
13721 	 * In the future when validate function in hws will be added,
13722 	 * The output actions bit mask instead of
13723 	 * looping on the actions array twice.
13724 	 */
13725 	ret = flow_hw_allocate_actions(dev, action_flags, error);
13726 	if (ret)
13727 		goto error;
13728 
13729 	/* Note: the actions should be saved in the sub-flow rule itself for reference. */
13730 	ret = flow_hw_translate_flow_actions(dev, attr, actions, *flow, &ap, &hw_act,
13731 					item_flags, action_flags, external, error);
13732 	if (ret)
13733 		goto error;
13734 
13735 	/*
13736 	 * If the flow is external (from application) OR device is started,
13737 	 * OR mreg discover, then apply immediately.
13738 	 */
13739 	if (external || dev->data->dev_started ||
13740 	    (attr->group == MLX5_FLOW_MREG_CP_TABLE_GROUP &&
13741 	     attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR)) {
13742 		ret = flow_hw_apply(items, hw_act.rule_acts, *flow, error);
13743 		if (ret)
13744 			goto error;
13745 	}
13746 	ret = 0;
13747 error:
13748 	/*
13749 	 * Release memory allocated.
13750 	 * Cannot use __flow_hw_actions_release(dev, &hw_act);
13751 	 * since it destroys the actions as well.
13752 	 */
13753 	if (hw_act.encap_decap)
13754 		mlx5_free(hw_act.encap_decap);
13755 	if (hw_act.push_remove)
13756 		mlx5_free(hw_act.push_remove);
13757 	if (hw_act.mhdr)
13758 		mlx5_free(hw_act.mhdr);
13759 	if (ret) {
13760 		/* release after actual error */
13761 		if ((*flow)->nt2hws && (*flow)->nt2hws->matcher)
13762 			flow_hw_unregister_matcher(dev,
13763 						   (*flow)->nt2hws->matcher);
13764 	}
13765 	return ret;
13766 }
13767 #endif
13768 
13769 void
13770 flow_hw_destroy(struct rte_eth_dev *dev, struct rte_flow_hw *flow)
13771 {
13772 	int ret;
13773 	struct mlx5_priv *priv = dev->data->dev_private;
13774 
13775 	if (!flow || !flow->nt2hws)
13776 		return;
13777 
13778 	if (flow->nt2hws->nt_rule) {
13779 		ret = mlx5dr_bwc_rule_destroy(flow->nt2hws->nt_rule);
13780 		if (ret)
13781 			DRV_LOG(ERR, "bwc rule destroy failed");
13782 	}
13783 	flow->operation_type = MLX5_FLOW_HW_FLOW_OP_TYPE_DESTROY;
13784 	/* Notice this function does not handle shared/static actions. */
13785 	hw_cmpl_flow_update_or_destroy(dev, flow, 0, NULL);
13786 
13787 	/**
13788 	 * TODO: TBD - Release tunnel related memory allocations(mlx5_flow_tunnel_free)
13789 	 * – needed only if supporting tunnel offloads, notice update RX queue flags in SWS.
13790 	 */
13791 
13792 	 /**
13793 	  * Notice matcher destroy will take place when matcher's list is destroyed
13794 	  * , same as for DV.
13795 	  */
13796 	if (flow->nt2hws->flow_aux)
13797 		mlx5_free(flow->nt2hws->flow_aux);
13798 
13799 	if (flow->nt2hws->rix_encap_decap) {
13800 		ret = flow_encap_decap_resource_release(dev, flow->nt2hws->rix_encap_decap);
13801 		if (ret)
13802 			DRV_LOG(ERR, "failed to release encap decap.");
13803 	}
13804 	if (flow->nt2hws->modify_hdr) {
13805 		MLX5_ASSERT(flow->nt2hws->modify_hdr->action);
13806 		ret = mlx5_hlist_unregister(priv->sh->modify_cmds,
13807 			&flow->nt2hws->modify_hdr->entry);
13808 		if (ret)
13809 			DRV_LOG(ERR, "failed to release modify action.");
13810 	}
13811 	if (flow->nt2hws->matcher)
13812 		flow_hw_unregister_matcher(dev, flow->nt2hws->matcher);
13813 }
13814 
13815 #ifdef HAVE_MLX5_HWS_SUPPORT
13816 /**
13817  * Destroy a flow.
13818  *
13819  * @param[in] dev
13820  *   Pointer to Ethernet device.
13821  * @param[in] type
13822  *   Flow type.
13823  * @param[in] flow_addr
13824  *   Address of flow to destroy.
13825  */
13826 void
13827 flow_hw_list_destroy(struct rte_eth_dev *dev, enum mlx5_flow_type type,
13828 		     uintptr_t flow_addr)
13829 {
13830 	struct mlx5_priv *priv = dev->data->dev_private;
13831 	struct rte_flow_hw *flow = (struct rte_flow_hw *)flow_addr;
13832 	struct mlx5_nta_rss_flow_head head = { .slh_first = flow };
13833 
13834 	if (flow->nt2hws->chaned_flow)
13835 		return;
13836 	while (!SLIST_EMPTY(&head)) {
13837 		flow = SLIST_FIRST(&head);
13838 		SLIST_REMOVE_HEAD(&head, nt2hws->next);
13839 		flow_hw_destroy(dev, flow);
13840 		/* Release flow memory by idx */
13841 		mlx5_ipool_free(priv->flows[type], flow->idx);
13842 	}
13843 }
13844 #endif
13845 
13846 /**
13847  * Create a flow.
13848  *
13849  * @param[in] dev
13850  *   Pointer to Ethernet device.
13851  * @param[in] type
13852  *   Flow type.
13853  * @param[in] attr
13854  *   Flow rule attributes.
13855  * @param[in] items
13856  *   Pattern specification (list terminated by the END pattern item).
13857  * @param[in] actions
13858  *   Associated actions (list terminated by the END action).
13859  * @param[in] external
13860  *   This flow rule is created by request external to PMD.
13861  * @param[out] error
13862  *   Perform verbose error reporting if not NULL.
13863  *
13864  * @return
13865  *   A flow addr on success, 0 otherwise and rte_errno is set.
13866  */
13867 static uintptr_t flow_hw_list_create(struct rte_eth_dev *dev,
13868 				     enum mlx5_flow_type type,
13869 				     const struct rte_flow_attr *attr,
13870 				     const struct rte_flow_item items[],
13871 				     const struct rte_flow_action actions[],
13872 				     bool external,
13873 				     struct rte_flow_error *error)
13874 {
13875 	int ret;
13876 	struct rte_flow_hw *flow = NULL;
13877 	uint64_t item_flags = flow_hw_matching_item_flags_get(items);
13878 	uint64_t action_flags = flow_hw_action_flags_get(actions, error);
13879 
13880 	/*
13881 	 * TODO: add a call to flow_hw_validate function once it exist.
13882 	 * and update mlx5_flow_hw_drv_ops accordingly.
13883 	 */
13884 
13885 	if (action_flags & MLX5_FLOW_ACTION_RSS) {
13886 		const struct rte_flow_action_rss
13887 			*rss_conf = flow_nta_locate_rss(dev, actions, error);
13888 		flow = flow_nta_handle_rss(dev, attr, items, actions, rss_conf,
13889 					   item_flags, action_flags, external,
13890 					   type, error);
13891 		return (uintptr_t)flow;
13892 	}
13893 	/* TODO: Handle split/expand to num_flows. */
13894 
13895 	/* Create single flow. */
13896 	ret = flow_hw_create_flow(dev, type, attr, items, actions,
13897 				  item_flags, action_flags,
13898 				  external, &flow, error);
13899 	if (ret)
13900 		goto free;
13901 	if (flow)
13902 		return (uintptr_t)flow;
13903 
13904 free:
13905 	if (flow)
13906 		flow_hw_list_destroy(dev, type, (uintptr_t)flow);
13907 	return 0;
13908 }
13909 
13910 static void
13911 mlx5_mirror_destroy_clone(struct rte_eth_dev *dev,
13912 			  struct mlx5_mirror_clone *clone)
13913 {
13914 	switch (clone->type) {
13915 	case RTE_FLOW_ACTION_TYPE_RSS:
13916 	case RTE_FLOW_ACTION_TYPE_QUEUE:
13917 		mlx5_hrxq_release(dev,
13918 				  ((struct mlx5_hrxq *)(clone->action_ctx))->idx);
13919 		break;
13920 	case RTE_FLOW_ACTION_TYPE_JUMP:
13921 		flow_hw_jump_release(dev, clone->action_ctx);
13922 		break;
13923 	case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
13924 	case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
13925 	case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
13926 	case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
13927 	case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
13928 	case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
13929 	default:
13930 		break;
13931 	}
13932 }
13933 
13934 void
13935 mlx5_hw_mirror_destroy(struct rte_eth_dev *dev, struct mlx5_mirror *mirror)
13936 {
13937 	uint32_t i;
13938 
13939 	mlx5_indirect_list_remove_entry(&mirror->indirect);
13940 	for (i = 0; i < mirror->clones_num; i++)
13941 		mlx5_mirror_destroy_clone(dev, &mirror->clone[i]);
13942 	if (mirror->mirror_action)
13943 		mlx5dr_action_destroy(mirror->mirror_action);
13944 	mlx5_free(mirror);
13945 }
13946 
13947 static __rte_always_inline bool
13948 mlx5_mirror_terminal_action(const struct rte_flow_action *action)
13949 {
13950 	switch (action->type) {
13951 	case RTE_FLOW_ACTION_TYPE_JUMP:
13952 	case RTE_FLOW_ACTION_TYPE_RSS:
13953 	case RTE_FLOW_ACTION_TYPE_QUEUE:
13954 	case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
13955 	case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
13956 		return true;
13957 	default:
13958 		break;
13959 	}
13960 	return false;
13961 }
13962 
13963 static bool
13964 mlx5_mirror_validate_sample_action(struct rte_eth_dev *dev,
13965 				   const struct rte_flow_attr *flow_attr,
13966 				   const struct rte_flow_action *action)
13967 {
13968 	struct mlx5_priv *priv = dev->data->dev_private;
13969 	const struct rte_flow_action_ethdev *port = NULL;
13970 	bool is_proxy = MLX5_HW_PORT_IS_PROXY(priv);
13971 
13972 	if (!action)
13973 		return false;
13974 	switch (action->type) {
13975 	case RTE_FLOW_ACTION_TYPE_QUEUE:
13976 	case RTE_FLOW_ACTION_TYPE_RSS:
13977 		if (flow_attr->transfer)
13978 			return false;
13979 		break;
13980 	case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
13981 		if (!is_proxy || !flow_attr->transfer)
13982 			return false;
13983 		port = action->conf;
13984 		if (!port || port->port_id != MLX5_REPRESENTED_PORT_ESW_MGR)
13985 			return false;
13986 		break;
13987 	case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
13988 	case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
13989 	case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
13990 	case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
13991 	case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
13992 		if (!is_proxy || !flow_attr->transfer)
13993 			return false;
13994 		if (action[0].type == RTE_FLOW_ACTION_TYPE_RAW_DECAP &&
13995 		    action[1].type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP)
13996 			return false;
13997 		break;
13998 	default:
13999 		return false;
14000 	}
14001 	return true;
14002 }
14003 
14004 /**
14005  * Valid mirror actions list includes one or two SAMPLE actions
14006  * followed by JUMP.
14007  *
14008  * @return
14009  * Number of mirrors *action* list was valid.
14010  * -EINVAL otherwise.
14011  */
14012 static int
14013 mlx5_hw_mirror_actions_list_validate(struct rte_eth_dev *dev,
14014 				     const struct rte_flow_attr *flow_attr,
14015 				     const struct rte_flow_action *actions)
14016 {
14017 	if (actions[0].type == RTE_FLOW_ACTION_TYPE_SAMPLE) {
14018 		int i = 1;
14019 		bool valid;
14020 		const struct rte_flow_action_sample *sample = actions[0].conf;
14021 		valid = mlx5_mirror_validate_sample_action(dev, flow_attr,
14022 							   sample->actions);
14023 		if (!valid)
14024 			return -EINVAL;
14025 		if (actions[1].type == RTE_FLOW_ACTION_TYPE_SAMPLE) {
14026 			i = 2;
14027 			sample = actions[1].conf;
14028 			valid = mlx5_mirror_validate_sample_action(dev, flow_attr,
14029 								   sample->actions);
14030 			if (!valid)
14031 				return -EINVAL;
14032 		}
14033 		return mlx5_mirror_terminal_action(actions + i) ? i + 1 : -EINVAL;
14034 	}
14035 	return -EINVAL;
14036 }
14037 
14038 static int
14039 mirror_format_tir(struct rte_eth_dev *dev,
14040 		  struct mlx5_mirror_clone *clone,
14041 		  const struct mlx5_flow_template_table_cfg *table_cfg,
14042 		  const struct rte_flow_action *action,
14043 		  struct mlx5dr_action_dest_attr *dest_attr,
14044 		  struct rte_flow_error *error)
14045 {
14046 	uint32_t hws_flags;
14047 	enum mlx5dr_table_type table_type;
14048 	struct mlx5_hrxq *tir_ctx;
14049 
14050 	table_type = get_mlx5dr_table_type(&table_cfg->attr.flow_attr);
14051 	hws_flags = mlx5_hw_act_flag[MLX5_HW_ACTION_FLAG_NONE_ROOT][table_type];
14052 	tir_ctx = flow_hw_tir_action_register(dev, hws_flags, action);
14053 	if (!tir_ctx)
14054 		return rte_flow_error_set(error, EINVAL,
14055 					  RTE_FLOW_ERROR_TYPE_ACTION,
14056 					  action, "failed to create QUEUE action for mirror clone");
14057 	dest_attr->dest = tir_ctx->action;
14058 	clone->action_ctx = tir_ctx;
14059 	return 0;
14060 }
14061 
14062 static int
14063 mirror_format_jump(struct rte_eth_dev *dev,
14064 		   struct mlx5_mirror_clone *clone,
14065 		   const struct mlx5_flow_template_table_cfg *table_cfg,
14066 		   const struct rte_flow_action *action,
14067 		   struct mlx5dr_action_dest_attr *dest_attr,
14068 		   struct rte_flow_error *error)
14069 {
14070 	const struct rte_flow_action_jump *jump_conf = action->conf;
14071 	struct mlx5_hw_jump_action *jump = flow_hw_jump_action_register
14072 						(dev, table_cfg,
14073 						 jump_conf->group, error);
14074 
14075 	if (!jump)
14076 		return rte_flow_error_set(error, EINVAL,
14077 					  RTE_FLOW_ERROR_TYPE_ACTION,
14078 					  action, "failed to create JUMP action for mirror clone");
14079 	dest_attr->dest = jump->hws_action;
14080 	clone->action_ctx = jump;
14081 	return 0;
14082 }
14083 
14084 static int
14085 mirror_format_port(struct rte_eth_dev *dev,
14086 		   const struct rte_flow_action *action,
14087 		   struct mlx5dr_action_dest_attr *dest_attr,
14088 		   struct rte_flow_error __rte_unused *error)
14089 {
14090 	struct mlx5_priv *priv = dev->data->dev_private;
14091 	const struct rte_flow_action_ethdev *port_action = action->conf;
14092 
14093 	dest_attr->dest = priv->hw_vport[port_action->port_id];
14094 	return 0;
14095 }
14096 
14097 static int
14098 hw_mirror_clone_reformat(const struct rte_flow_action *actions,
14099 			 struct mlx5dr_action_dest_attr *dest_attr,
14100 			 enum mlx5dr_action_type *action_type,
14101 			 uint8_t *reformat_buf, bool decap)
14102 {
14103 	int ret;
14104 	const struct rte_flow_item *encap_item = NULL;
14105 	const struct rte_flow_action_raw_encap *encap_conf = NULL;
14106 	typeof(dest_attr->reformat) *reformat = &dest_attr->reformat;
14107 
14108 	switch (actions[0].type) {
14109 	case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
14110 		encap_conf = actions[0].conf;
14111 		break;
14112 	case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
14113 		encap_item = MLX5_CONST_ENCAP_ITEM(rte_flow_action_vxlan_encap,
14114 						   actions);
14115 		break;
14116 	case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
14117 		encap_item = MLX5_CONST_ENCAP_ITEM(rte_flow_action_nvgre_encap,
14118 						   actions);
14119 		break;
14120 	default:
14121 		return -EINVAL;
14122 	}
14123 	*action_type = decap ?
14124 		       MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3 :
14125 		       MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
14126 	if (encap_item) {
14127 		ret = flow_dv_convert_encap_data(encap_item, reformat_buf,
14128 						 &reformat->reformat_data_sz, NULL);
14129 		if (ret)
14130 			return -EINVAL;
14131 		reformat->reformat_data = reformat_buf;
14132 	} else {
14133 		reformat->reformat_data = (void *)(uintptr_t)encap_conf->data;
14134 		reformat->reformat_data_sz = encap_conf->size;
14135 	}
14136 	return 0;
14137 }
14138 
14139 static int
14140 hw_mirror_format_clone(struct rte_eth_dev *dev,
14141 			struct mlx5_mirror_clone *clone,
14142 			const struct mlx5_flow_template_table_cfg *table_cfg,
14143 			const struct rte_flow_action *actions,
14144 			struct mlx5dr_action_dest_attr *dest_attr,
14145 			uint8_t *reformat_buf, struct rte_flow_error *error)
14146 {
14147 	struct mlx5_priv *priv = dev->data->dev_private;
14148 	int ret;
14149 	uint32_t i;
14150 	bool decap_seen = false;
14151 
14152 	for (i = 0; actions[i].type != RTE_FLOW_ACTION_TYPE_END; i++) {
14153 		dest_attr->action_type[i] = mlx5_hw_dr_action_types[actions[i].type];
14154 		switch (actions[i].type) {
14155 		case RTE_FLOW_ACTION_TYPE_QUEUE:
14156 		case RTE_FLOW_ACTION_TYPE_RSS:
14157 			ret = mirror_format_tir(dev, clone, table_cfg,
14158 						&actions[i], dest_attr, error);
14159 			if (ret)
14160 				return ret;
14161 			break;
14162 		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
14163 			ret = mirror_format_port(dev, &actions[i],
14164 						 dest_attr, error);
14165 			if (ret)
14166 				return ret;
14167 			break;
14168 		case RTE_FLOW_ACTION_TYPE_JUMP:
14169 			ret = mirror_format_jump(dev, clone, table_cfg,
14170 						 &actions[i], dest_attr, error);
14171 			if (ret)
14172 				return ret;
14173 			break;
14174 		case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
14175 			dest_attr->dest = priv->hw_def_miss;
14176 			break;
14177 		case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
14178 			decap_seen = true;
14179 			break;
14180 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
14181 		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
14182 		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
14183 			ret = hw_mirror_clone_reformat(&actions[i], dest_attr,
14184 						       &dest_attr->action_type[i],
14185 						       reformat_buf, decap_seen);
14186 			if (ret < 0)
14187 				return rte_flow_error_set(error, EINVAL,
14188 							  RTE_FLOW_ERROR_TYPE_ACTION,
14189 							  &actions[i],
14190 							  "failed to create reformat action");
14191 			break;
14192 		default:
14193 			return rte_flow_error_set(error, EINVAL,
14194 						  RTE_FLOW_ERROR_TYPE_ACTION,
14195 						  &actions[i], "unsupported sample action");
14196 		}
14197 		clone->type = actions->type;
14198 	}
14199 	dest_attr->action_type[i] = MLX5DR_ACTION_TYP_LAST;
14200 	return 0;
14201 }
14202 
14203 static struct rte_flow_action_list_handle *
14204 mlx5_hw_mirror_handle_create(struct rte_eth_dev *dev,
14205 			     const struct mlx5_flow_template_table_cfg *table_cfg,
14206 			     const struct rte_flow_action *actions,
14207 			     struct rte_flow_error *error)
14208 {
14209 	uint32_t hws_flags;
14210 	int ret = 0, i, clones_num;
14211 	struct mlx5_mirror *mirror;
14212 	enum mlx5dr_table_type table_type;
14213 	struct mlx5_priv *priv = dev->data->dev_private;
14214 	const struct rte_flow_attr *flow_attr = &table_cfg->attr.flow_attr;
14215 	uint8_t reformat_buf[MLX5_MIRROR_MAX_CLONES_NUM][MLX5_ENCAP_MAX_LEN];
14216 	struct mlx5dr_action_dest_attr mirror_attr[MLX5_MIRROR_MAX_CLONES_NUM + 1];
14217 	enum mlx5dr_action_type array_action_types[MLX5_MIRROR_MAX_CLONES_NUM + 1]
14218 						  [MLX5_MIRROR_MAX_SAMPLE_ACTIONS_LEN + 1];
14219 
14220 	memset(mirror_attr, 0, sizeof(mirror_attr));
14221 	memset(array_action_types, 0, sizeof(array_action_types));
14222 	table_type = get_mlx5dr_table_type(flow_attr);
14223 	hws_flags = mlx5_hw_act_flag[MLX5_HW_ACTION_FLAG_NONE_ROOT][table_type];
14224 	clones_num = mlx5_hw_mirror_actions_list_validate(dev, flow_attr,
14225 							  actions);
14226 	if (clones_num < 0) {
14227 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
14228 				   actions, "Invalid mirror list format");
14229 		return NULL;
14230 	}
14231 	mirror = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*mirror),
14232 			     0, SOCKET_ID_ANY);
14233 	if (!mirror) {
14234 		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ACTION,
14235 				   actions, "Failed to allocate mirror context");
14236 		return NULL;
14237 	}
14238 
14239 	mirror->indirect.type = MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR;
14240 	mirror->clones_num = clones_num;
14241 	for (i = 0; i < clones_num; i++) {
14242 		const struct rte_flow_action *clone_actions;
14243 
14244 		mirror_attr[i].action_type = array_action_types[i];
14245 		if (actions[i].type == RTE_FLOW_ACTION_TYPE_SAMPLE) {
14246 			const struct rte_flow_action_sample *sample = actions[i].conf;
14247 
14248 			clone_actions = sample->actions;
14249 		} else {
14250 			clone_actions = &actions[i];
14251 		}
14252 		ret = hw_mirror_format_clone(dev, &mirror->clone[i], table_cfg,
14253 					     clone_actions, &mirror_attr[i],
14254 					     reformat_buf[i], error);
14255 
14256 		if (ret)
14257 			goto error;
14258 	}
14259 	hws_flags |= MLX5DR_ACTION_FLAG_SHARED;
14260 	mirror->mirror_action = mlx5dr_action_create_dest_array(priv->dr_ctx,
14261 								clones_num,
14262 								mirror_attr,
14263 								hws_flags);
14264 	if (!mirror->mirror_action) {
14265 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
14266 				   actions, "Failed to create HWS mirror action");
14267 		goto error;
14268 	}
14269 
14270 	mlx5_indirect_list_add_entry(&priv->indirect_list_head, &mirror->indirect);
14271 	return (struct rte_flow_action_list_handle *)mirror;
14272 
14273 error:
14274 	mlx5_hw_mirror_destroy(dev, mirror);
14275 	return NULL;
14276 }
14277 
14278 void
14279 mlx5_destroy_legacy_indirect(__rte_unused struct rte_eth_dev *dev,
14280 			     struct mlx5_indirect_list *ptr)
14281 {
14282 	struct mlx5_indlst_legacy *obj = (typeof(obj))ptr;
14283 
14284 	switch (obj->legacy_type) {
14285 	case RTE_FLOW_ACTION_TYPE_METER_MARK:
14286 		break; /* ASO meters were released in mlx5_flow_meter_flush() */
14287 	default:
14288 		break;
14289 	}
14290 	mlx5_free(obj);
14291 }
14292 
14293 static struct rte_flow_action_list_handle *
14294 mlx5_create_legacy_indlst(struct rte_eth_dev *dev, uint32_t queue,
14295 			  const struct rte_flow_op_attr *attr,
14296 			  const struct rte_flow_indir_action_conf *conf,
14297 			  const struct rte_flow_action *actions,
14298 			  void *user_data, struct rte_flow_error *error)
14299 {
14300 	struct mlx5_priv *priv = dev->data->dev_private;
14301 	struct mlx5_indlst_legacy *indlst_obj = mlx5_malloc(MLX5_MEM_ZERO,
14302 							    sizeof(*indlst_obj),
14303 							    0, SOCKET_ID_ANY);
14304 
14305 	if (!indlst_obj)
14306 		return NULL;
14307 	indlst_obj->handle = flow_hw_action_handle_create(dev, queue, attr, conf,
14308 							  actions, user_data,
14309 							  error);
14310 	if (!indlst_obj->handle) {
14311 		mlx5_free(indlst_obj);
14312 		return NULL;
14313 	}
14314 	indlst_obj->legacy_type = actions[0].type;
14315 	indlst_obj->indirect.type = MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY;
14316 	mlx5_indirect_list_add_entry(&priv->indirect_list_head, &indlst_obj->indirect);
14317 	return (struct rte_flow_action_list_handle *)indlst_obj;
14318 }
14319 
14320 static __rte_always_inline enum mlx5_indirect_list_type
14321 flow_hw_inlist_type_get(const struct rte_flow_action *actions)
14322 {
14323 	switch (actions[0].type) {
14324 	case RTE_FLOW_ACTION_TYPE_SAMPLE:
14325 		return MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR;
14326 	case RTE_FLOW_ACTION_TYPE_METER_MARK:
14327 		return actions[1].type == RTE_FLOW_ACTION_TYPE_END ?
14328 		       MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY :
14329 		       MLX5_INDIRECT_ACTION_LIST_TYPE_ERR;
14330 	case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
14331 	case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
14332 		return MLX5_INDIRECT_ACTION_LIST_TYPE_REFORMAT;
14333 	default:
14334 		break;
14335 	}
14336 	return MLX5_INDIRECT_ACTION_LIST_TYPE_ERR;
14337 }
14338 
14339 static struct rte_flow_action_list_handle*
14340 mlx5_hw_decap_encap_handle_create(struct rte_eth_dev *dev,
14341 				  const struct mlx5_flow_template_table_cfg *table_cfg,
14342 				  const struct rte_flow_action *actions,
14343 				  struct rte_flow_error *error)
14344 {
14345 	struct mlx5_priv *priv = dev->data->dev_private;
14346 	const struct rte_flow_attr *flow_attr = &table_cfg->attr.flow_attr;
14347 	const struct rte_flow_action *encap = NULL;
14348 	const struct rte_flow_action *decap = NULL;
14349 	struct rte_flow_indir_action_conf indirect_conf = {
14350 		.ingress = flow_attr->ingress,
14351 		.egress = flow_attr->egress,
14352 		.transfer = flow_attr->transfer,
14353 	};
14354 	struct mlx5_hw_encap_decap_action *handle;
14355 	uint64_t action_flags = 0;
14356 
14357 	/*
14358 	 * Allow
14359 	 * 1. raw_decap / raw_encap / end
14360 	 * 2. raw_encap / end
14361 	 * 3. raw_decap / end
14362 	 */
14363 	while (actions->type != RTE_FLOW_ACTION_TYPE_END) {
14364 		if (actions->type == RTE_FLOW_ACTION_TYPE_RAW_DECAP) {
14365 			if (action_flags) {
14366 				rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
14367 						   actions, "Invalid indirect action list sequence");
14368 				return NULL;
14369 			}
14370 			action_flags |= MLX5_FLOW_ACTION_DECAP;
14371 			decap = actions;
14372 		} else if (actions->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
14373 			if (action_flags & MLX5_FLOW_ACTION_ENCAP) {
14374 				rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
14375 						   actions, "Invalid indirect action list sequence");
14376 				return NULL;
14377 			}
14378 			action_flags |= MLX5_FLOW_ACTION_ENCAP;
14379 			encap = actions;
14380 		} else {
14381 			rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
14382 					   actions, "Invalid indirect action type in list");
14383 			return NULL;
14384 		}
14385 		actions++;
14386 	}
14387 	if (!decap && !encap) {
14388 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
14389 				   actions, "Invalid indirect action combinations");
14390 		return NULL;
14391 	}
14392 	handle = mlx5_reformat_action_create(dev, &indirect_conf, encap, decap, error);
14393 	if (!handle) {
14394 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
14395 				   actions, "Failed to create HWS decap_encap action");
14396 		return NULL;
14397 	}
14398 	handle->indirect.type = MLX5_INDIRECT_ACTION_LIST_TYPE_REFORMAT;
14399 	LIST_INSERT_HEAD(&priv->indirect_list_head, &handle->indirect, entry);
14400 	return (struct rte_flow_action_list_handle *)handle;
14401 }
14402 
14403 static struct rte_flow_action_list_handle *
14404 flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue,
14405 					const struct rte_flow_op_attr *attr,
14406 					const struct rte_flow_indir_action_conf *conf,
14407 					const struct rte_flow_action *actions,
14408 					void *user_data,
14409 					struct rte_flow_error *error)
14410 {
14411 	struct mlx5_hw_q_job *job = NULL;
14412 	bool push = flow_hw_action_push(attr);
14413 	enum mlx5_indirect_list_type list_type;
14414 	struct rte_flow_action_list_handle *handle;
14415 	struct mlx5_priv *priv = dev->data->dev_private;
14416 	const struct mlx5_flow_template_table_cfg table_cfg = {
14417 		.external = true,
14418 		.attr = {
14419 			.flow_attr = {
14420 				.ingress = conf->ingress,
14421 				.egress = conf->egress,
14422 				.transfer = conf->transfer
14423 			}
14424 		}
14425 	};
14426 
14427 	if (!mlx5_hw_ctx_validate(dev, error))
14428 		return NULL;
14429 	if (!actions) {
14430 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
14431 				   NULL, "No action list");
14432 		return NULL;
14433 	}
14434 	list_type = flow_hw_inlist_type_get(actions);
14435 	if (list_type == MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY) {
14436 		/*
14437 		 * Legacy indirect actions already have
14438 		 * async resources management. No need to do it twice.
14439 		 */
14440 		handle = mlx5_create_legacy_indlst(dev, queue, attr, conf,
14441 						   actions, user_data, error);
14442 		goto end;
14443 	}
14444 	if (attr) {
14445 		job = flow_hw_action_job_init(priv, queue, NULL, user_data,
14446 					      NULL, MLX5_HW_Q_JOB_TYPE_CREATE,
14447 					      MLX5_HW_INDIRECT_TYPE_LIST, error);
14448 		if (!job)
14449 			return NULL;
14450 	}
14451 	switch (list_type) {
14452 	case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
14453 		handle = mlx5_hw_mirror_handle_create(dev, &table_cfg,
14454 						      actions, error);
14455 		break;
14456 	case MLX5_INDIRECT_ACTION_LIST_TYPE_REFORMAT:
14457 		handle = mlx5_hw_decap_encap_handle_create(dev, &table_cfg,
14458 							   actions, error);
14459 		break;
14460 	default:
14461 		handle = NULL;
14462 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
14463 				   actions, "Invalid list");
14464 	}
14465 	if (job) {
14466 		job->action = handle;
14467 		flow_hw_action_finalize(dev, queue, job, push, false,
14468 					handle != NULL);
14469 	}
14470 end:
14471 	return handle;
14472 }
14473 
14474 static struct rte_flow_action_list_handle *
14475 flow_hw_action_list_handle_create(struct rte_eth_dev *dev,
14476 				  const struct rte_flow_indir_action_conf *conf,
14477 				  const struct rte_flow_action *actions,
14478 				  struct rte_flow_error *error)
14479 {
14480 	return flow_hw_async_action_list_handle_create(dev, MLX5_HW_INV_QUEUE,
14481 						       NULL, conf, actions,
14482 						       NULL, error);
14483 }
14484 
14485 static int
14486 flow_hw_async_action_list_handle_destroy
14487 			(struct rte_eth_dev *dev, uint32_t queue,
14488 			 const struct rte_flow_op_attr *attr,
14489 			 struct rte_flow_action_list_handle *handle,
14490 			 void *user_data, struct rte_flow_error *error)
14491 {
14492 	int ret = 0;
14493 	struct mlx5_hw_q_job *job = NULL;
14494 	bool push = flow_hw_action_push(attr);
14495 	struct mlx5_priv *priv = dev->data->dev_private;
14496 	enum mlx5_indirect_list_type type =
14497 		mlx5_get_indirect_list_type((void *)handle);
14498 
14499 	if (type == MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY) {
14500 		struct mlx5_indlst_legacy *legacy = (typeof(legacy))handle;
14501 
14502 		ret = flow_hw_action_handle_destroy(dev, queue, attr,
14503 						    legacy->handle,
14504 						    user_data, error);
14505 		mlx5_indirect_list_remove_entry(&legacy->indirect);
14506 		goto end;
14507 	}
14508 	if (attr) {
14509 		job = flow_hw_action_job_init(priv, queue, NULL, user_data,
14510 					      NULL, MLX5_HW_Q_JOB_TYPE_DESTROY,
14511 					      MLX5_HW_INDIRECT_TYPE_LIST, error);
14512 		if (!job)
14513 			return rte_errno;
14514 	}
14515 	switch (type) {
14516 	case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
14517 		mlx5_hw_mirror_destroy(dev, (struct mlx5_mirror *)handle);
14518 		break;
14519 	case MLX5_INDIRECT_ACTION_LIST_TYPE_REFORMAT:
14520 		LIST_REMOVE(&((struct mlx5_hw_encap_decap_action *)handle)->indirect,
14521 			    entry);
14522 		mlx5_reformat_action_destroy(dev, handle, error);
14523 		break;
14524 	default:
14525 		ret = rte_flow_error_set(error, EINVAL,
14526 					  RTE_FLOW_ERROR_TYPE_ACTION,
14527 					  NULL, "Invalid indirect list handle");
14528 	}
14529 	if (job) {
14530 		flow_hw_action_finalize(dev, queue, job, push, false, true);
14531 	}
14532 end:
14533 	return ret;
14534 }
14535 
14536 static int
14537 flow_hw_action_list_handle_destroy(struct rte_eth_dev *dev,
14538 				   struct rte_flow_action_list_handle *handle,
14539 				   struct rte_flow_error *error)
14540 {
14541 	return flow_hw_async_action_list_handle_destroy(dev, MLX5_HW_INV_QUEUE,
14542 							NULL, handle, NULL,
14543 							error);
14544 }
14545 
14546 static int
14547 flow_hw_async_action_list_handle_query_update
14548 		(struct rte_eth_dev *dev, uint32_t queue_id,
14549 		 const struct rte_flow_op_attr *attr,
14550 		 const struct rte_flow_action_list_handle *handle,
14551 		 const void **update, void **query,
14552 		 enum rte_flow_query_update_mode mode,
14553 		 void *user_data, struct rte_flow_error *error)
14554 {
14555 	enum mlx5_indirect_list_type type =
14556 		mlx5_get_indirect_list_type((const void *)handle);
14557 
14558 	if (type == MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY) {
14559 		struct mlx5_indlst_legacy *legacy = (void *)(uintptr_t)handle;
14560 
14561 		if (update && query)
14562 			return flow_hw_async_action_handle_query_update
14563 				(dev, queue_id, attr, legacy->handle,
14564 				 update, query, mode, user_data, error);
14565 		else if (update && update[0])
14566 			return flow_hw_action_handle_update(dev, queue_id, attr,
14567 							    legacy->handle, update[0],
14568 							    user_data, error);
14569 		else if (query && query[0])
14570 			return flow_hw_action_handle_query(dev, queue_id, attr,
14571 							   legacy->handle, query[0],
14572 							   user_data, error);
14573 		else
14574 			return rte_flow_error_set(error, EINVAL,
14575 						  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14576 						  NULL, "invalid legacy handle query_update parameters");
14577 	}
14578 	return -ENOTSUP;
14579 }
14580 
14581 static int
14582 flow_hw_action_list_handle_query_update(struct rte_eth_dev *dev,
14583 					const struct rte_flow_action_list_handle *handle,
14584 					const void **update, void **query,
14585 					enum rte_flow_query_update_mode mode,
14586 					struct rte_flow_error *error)
14587 {
14588 	return flow_hw_async_action_list_handle_query_update
14589 					(dev, MLX5_HW_INV_QUEUE, NULL, handle,
14590 					 update, query, mode, NULL, error);
14591 }
14592 
14593 static int
14594 flow_hw_calc_table_hash(struct rte_eth_dev *dev,
14595 			 const struct rte_flow_template_table *table,
14596 			 const struct rte_flow_item pattern[],
14597 			 uint8_t pattern_template_index,
14598 			 uint32_t *hash, struct rte_flow_error *error)
14599 {
14600 	const struct rte_flow_item *items;
14601 	struct mlx5_flow_hw_pattern_params pp;
14602 	int res;
14603 
14604 	items = flow_hw_get_rule_items(dev, table, pattern,
14605 				       pattern_template_index,
14606 				       &pp);
14607 	res = mlx5dr_rule_hash_calculate(mlx5_table_matcher(table), items,
14608 					 pattern_template_index,
14609 					 MLX5DR_RULE_HASH_CALC_MODE_RAW,
14610 					 hash);
14611 	if (res)
14612 		return rte_flow_error_set(error, res,
14613 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14614 					  NULL,
14615 					  "hash could not be calculated");
14616 	return 0;
14617 }
14618 
14619 static int
14620 flow_hw_calc_encap_hash(struct rte_eth_dev *dev,
14621 			const struct rte_flow_item pattern[],
14622 			enum rte_flow_encap_hash_field dest_field,
14623 			uint8_t *hash,
14624 			struct rte_flow_error *error)
14625 {
14626 	struct mlx5_priv *priv = dev->data->dev_private;
14627 	struct mlx5dr_crc_encap_entropy_hash_fields data;
14628 	enum mlx5dr_crc_encap_entropy_hash_size res_size =
14629 			dest_field == RTE_FLOW_ENCAP_HASH_FIELD_SRC_PORT ?
14630 				MLX5DR_CRC_ENCAP_ENTROPY_HASH_SIZE_16 :
14631 				MLX5DR_CRC_ENCAP_ENTROPY_HASH_SIZE_8;
14632 	int res;
14633 
14634 	memset(&data, 0, sizeof(struct mlx5dr_crc_encap_entropy_hash_fields));
14635 
14636 	for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
14637 		switch (pattern->type) {
14638 		case RTE_FLOW_ITEM_TYPE_IPV4:
14639 			data.dst.ipv4_addr =
14640 				((const struct rte_flow_item_ipv4 *)(pattern->spec))->hdr.dst_addr;
14641 			data.src.ipv4_addr =
14642 				((const struct rte_flow_item_ipv4 *)(pattern->spec))->hdr.src_addr;
14643 			break;
14644 		case RTE_FLOW_ITEM_TYPE_IPV6:
14645 			memcpy(data.dst.ipv6_addr,
14646 			       ((const struct rte_flow_item_ipv6 *)(pattern->spec))->hdr.dst_addr,
14647 			       sizeof(data.dst.ipv6_addr));
14648 			memcpy(data.src.ipv6_addr,
14649 			       ((const struct rte_flow_item_ipv6 *)(pattern->spec))->hdr.src_addr,
14650 			       sizeof(data.src.ipv6_addr));
14651 			break;
14652 		case RTE_FLOW_ITEM_TYPE_UDP:
14653 			data.next_protocol = IPPROTO_UDP;
14654 			data.dst_port =
14655 				((const struct rte_flow_item_udp *)(pattern->spec))->hdr.dst_port;
14656 			data.src_port =
14657 				((const struct rte_flow_item_udp *)(pattern->spec))->hdr.src_port;
14658 			break;
14659 		case RTE_FLOW_ITEM_TYPE_TCP:
14660 			data.next_protocol = IPPROTO_TCP;
14661 			data.dst_port =
14662 				((const struct rte_flow_item_tcp *)(pattern->spec))->hdr.dst_port;
14663 			data.src_port =
14664 				((const struct rte_flow_item_tcp *)(pattern->spec))->hdr.src_port;
14665 			break;
14666 		case RTE_FLOW_ITEM_TYPE_ICMP:
14667 			data.next_protocol = IPPROTO_ICMP;
14668 			break;
14669 		case RTE_FLOW_ITEM_TYPE_ICMP6:
14670 			data.next_protocol = IPPROTO_ICMPV6;
14671 			break;
14672 		default:
14673 			break;
14674 		}
14675 	}
14676 	res = mlx5dr_crc_encap_entropy_hash_calc(priv->dr_ctx, &data, hash, res_size);
14677 	if (res)
14678 		return rte_flow_error_set(error, res,
14679 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14680 					  NULL, "error while calculating encap hash");
14681 	return 0;
14682 }
14683 
14684 static int
14685 flow_hw_table_resize_multi_pattern_actions(struct rte_eth_dev *dev,
14686 					   struct rte_flow_template_table *table,
14687 					   uint32_t nb_flows,
14688 					   struct rte_flow_error *error)
14689 {
14690 	struct mlx5_multi_pattern_segment *segment = table->mpctx.segments;
14691 	uint32_t bulk_size;
14692 	int i, ret;
14693 
14694 	/**
14695 	 * Segment always allocates Modify Header Argument Objects number in
14696 	 * powers of 2.
14697 	 * On resize, PMD adds minimal required argument objects number.
14698 	 * For example, if table size was 10, it allocated 16 argument objects.
14699 	 * Resize to 15 will not add new objects.
14700 	 */
14701 	for (i = 1;
14702 	     i < MLX5_MAX_TABLE_RESIZE_NUM && segment->capacity;
14703 	     i++, segment++) {
14704 		/* keep the devtools/checkpatches.sh happy */
14705 	}
14706 	if (i == MLX5_MAX_TABLE_RESIZE_NUM)
14707 		return rte_flow_error_set(error, EINVAL,
14708 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14709 					  table, "too many resizes");
14710 	if (segment->head_index - 1 >= nb_flows)
14711 		return 0;
14712 	bulk_size = rte_align32pow2(nb_flows - segment->head_index + 1);
14713 	ret = mlx5_tbl_multi_pattern_process(dev, table, segment,
14714 					     rte_log2_u32(bulk_size),
14715 					     error);
14716 	if (ret)
14717 		return rte_flow_error_set(error, EINVAL,
14718 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14719 					  table, "too many resizes");
14720 	return i;
14721 }
14722 
14723 static int
14724 flow_hw_table_resize(struct rte_eth_dev *dev,
14725 		     struct rte_flow_template_table *table,
14726 		     uint32_t nb_flows,
14727 		     struct rte_flow_error *error)
14728 {
14729 	struct mlx5dr_action_template *at[MLX5_HW_TBL_MAX_ACTION_TEMPLATE];
14730 	struct mlx5dr_match_template *mt[MLX5_HW_TBL_MAX_ITEM_TEMPLATE];
14731 	struct mlx5dr_matcher_attr matcher_attr = table->matcher_attr;
14732 	struct mlx5_multi_pattern_segment *segment = NULL;
14733 	struct mlx5dr_matcher *matcher = NULL;
14734 	uint32_t i, selector = table->matcher_selector;
14735 	uint32_t other_selector = (selector + 1) & 1;
14736 	int ret;
14737 
14738 	if (!rte_flow_template_table_resizable(dev->data->port_id, &table->cfg.attr))
14739 		return rte_flow_error_set(error, EINVAL,
14740 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14741 					  table, "no resizable attribute");
14742 	if (table->matcher_info[other_selector].matcher)
14743 		return rte_flow_error_set(error, EINVAL,
14744 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14745 					  table, "last table resize was not completed");
14746 	if (nb_flows <= table->cfg.attr.nb_flows)
14747 		return rte_flow_error_set(error, EINVAL,
14748 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14749 					  table, "shrinking table is not supported");
14750 	ret = mlx5_ipool_resize(table->flow, nb_flows, error);
14751 	if (ret)
14752 		return ret;
14753 	/*
14754 	 * A resizable matcher doesn't support rule update. In this case, the ipool
14755 	 * for the resource is not created and there is no need to resize it.
14756 	 */
14757 	MLX5_ASSERT(!table->resource);
14758 	if (mlx5_is_multi_pattern_active(&table->mpctx)) {
14759 		ret = flow_hw_table_resize_multi_pattern_actions(dev, table, nb_flows, error);
14760 		if (ret < 0)
14761 			return ret;
14762 		if (ret > 0)
14763 			segment = table->mpctx.segments + ret;
14764 	}
14765 	for (i = 0; i < table->nb_item_templates; i++)
14766 		mt[i] = table->its[i]->mt;
14767 	for (i = 0; i < table->nb_action_templates; i++)
14768 		at[i] = table->ats[i].action_template->tmpl;
14769 	nb_flows = rte_align32pow2(nb_flows);
14770 	matcher_attr.rule.num_log = rte_log2_u32(nb_flows);
14771 	matcher = mlx5dr_matcher_create(table->grp->tbl, mt,
14772 					table->nb_item_templates, at,
14773 					table->nb_action_templates,
14774 					&matcher_attr);
14775 	if (!matcher) {
14776 		ret = rte_flow_error_set(error, rte_errno,
14777 					 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14778 					 table, "failed to create new matcher");
14779 		goto error;
14780 	}
14781 	rte_rwlock_write_lock(&table->matcher_replace_rwlk);
14782 	ret = mlx5dr_matcher_resize_set_target
14783 			(table->matcher_info[selector].matcher, matcher);
14784 	if (ret) {
14785 		rte_rwlock_write_unlock(&table->matcher_replace_rwlk);
14786 		ret = rte_flow_error_set(error, rte_errno,
14787 					 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14788 					 table, "failed to initiate matcher swap");
14789 		goto error;
14790 	}
14791 	table->cfg.attr.nb_flows = nb_flows;
14792 	table->matcher_info[other_selector].matcher = matcher;
14793 	table->matcher_selector = other_selector;
14794 	rte_atomic_store_explicit(&table->matcher_info[other_selector].refcnt,
14795 				  0, rte_memory_order_relaxed);
14796 	rte_rwlock_write_unlock(&table->matcher_replace_rwlk);
14797 	return 0;
14798 error:
14799 	if (segment)
14800 		mlx5_destroy_multi_pattern_segment(segment);
14801 	if (matcher) {
14802 		ret = mlx5dr_matcher_destroy(matcher);
14803 		return rte_flow_error_set(error, rte_errno,
14804 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14805 					  table, "failed to destroy new matcher");
14806 	}
14807 	return ret;
14808 }
14809 
14810 static int
14811 flow_hw_table_resize_complete(__rte_unused struct rte_eth_dev *dev,
14812 			      struct rte_flow_template_table *table,
14813 			      struct rte_flow_error *error)
14814 {
14815 	int ret;
14816 	uint32_t selector = table->matcher_selector;
14817 	uint32_t other_selector = (selector + 1) & 1;
14818 	struct mlx5_matcher_info *matcher_info = &table->matcher_info[other_selector];
14819 	uint32_t matcher_refcnt;
14820 
14821 	if (!rte_flow_template_table_resizable(dev->data->port_id, &table->cfg.attr))
14822 		return rte_flow_error_set(error, EINVAL,
14823 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14824 					  table, "no resizable attribute");
14825 	matcher_refcnt = rte_atomic_load_explicit(&matcher_info->refcnt,
14826 						  rte_memory_order_relaxed);
14827 	if (!matcher_info->matcher || matcher_refcnt)
14828 		return rte_flow_error_set(error, EBUSY,
14829 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14830 					  table, "cannot complete table resize");
14831 	ret = mlx5dr_matcher_destroy(matcher_info->matcher);
14832 	if (ret)
14833 		return rte_flow_error_set(error, rte_errno,
14834 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14835 					  table, "failed to destroy retired matcher");
14836 	matcher_info->matcher = NULL;
14837 	return 0;
14838 }
14839 
14840 static int
14841 flow_hw_update_resized(struct rte_eth_dev *dev, uint32_t queue,
14842 		       const struct rte_flow_op_attr *attr,
14843 		       struct rte_flow *flow, void *user_data,
14844 		       struct rte_flow_error *error)
14845 {
14846 	int ret;
14847 	struct mlx5_priv *priv = dev->data->dev_private;
14848 	struct rte_flow_hw *hw_flow = (struct rte_flow_hw *)flow;
14849 	struct rte_flow_template_table *table = hw_flow->table;
14850 	struct rte_flow_hw_aux *aux = mlx5_flow_hw_aux(dev->data->port_id, hw_flow);
14851 	uint32_t table_selector = table->matcher_selector;
14852 	uint32_t rule_selector = aux->matcher_selector;
14853 	uint32_t other_selector;
14854 	struct mlx5dr_matcher *other_matcher;
14855 	struct mlx5dr_rule_attr rule_attr = {
14856 		.queue_id = queue,
14857 		.burst = attr->postpone,
14858 	};
14859 
14860 	MLX5_ASSERT(hw_flow->flags & MLX5_FLOW_HW_FLOW_FLAG_MATCHER_SELECTOR);
14861 	/**
14862 	 * mlx5dr_matcher_resize_rule_move() accepts original table matcher -
14863 	 * the one that was used BEFORE table resize.
14864 	 * Since the function is called AFTER table resize,
14865 	 * `table->matcher_selector` always points to the new matcher and
14866 	 * `aux->matcher_selector` points to a matcher used to create the flow.
14867 	 */
14868 	other_selector = rule_selector == table_selector ?
14869 			 (rule_selector + 1) & 1 : rule_selector;
14870 	other_matcher = table->matcher_info[other_selector].matcher;
14871 	if (!other_matcher)
14872 		return rte_flow_error_set(error, EINVAL,
14873 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14874 					  "no active table resize");
14875 	hw_flow->operation_type = MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_MOVE;
14876 	hw_flow->user_data = user_data;
14877 	rule_attr.user_data = hw_flow;
14878 	if (rule_selector == table_selector) {
14879 		struct rte_ring *ring = !attr->postpone ?
14880 					priv->hw_q[queue].flow_transfer_completed :
14881 					priv->hw_q[queue].flow_transfer_pending;
14882 		rte_ring_enqueue(ring, hw_flow);
14883 		flow_hw_q_inc_flow_ops(priv, queue);
14884 		return 0;
14885 	}
14886 	ret = mlx5dr_matcher_resize_rule_move(other_matcher,
14887 					      (struct mlx5dr_rule *)hw_flow->rule,
14888 					      &rule_attr);
14889 	if (ret) {
14890 		return rte_flow_error_set(error, rte_errno,
14891 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
14892 					  "flow transfer failed");
14893 	}
14894 	flow_hw_q_inc_flow_ops(priv, queue);
14895 	return 0;
14896 }
14897 
14898 const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {
14899 	.list_create = flow_hw_list_create,
14900 	.list_destroy = flow_hw_list_destroy,
14901 	.validate = flow_dv_validate,
14902 	.info_get = flow_hw_info_get,
14903 	.configure = flow_hw_configure,
14904 	.pattern_validate = flow_hw_pattern_validate,
14905 	.pattern_template_create = flow_hw_pattern_template_create,
14906 	.pattern_template_destroy = flow_hw_pattern_template_destroy,
14907 	.actions_validate = flow_hw_actions_validate,
14908 	.actions_template_create = flow_hw_actions_template_create,
14909 	.actions_template_destroy = flow_hw_actions_template_destroy,
14910 	.template_table_create = flow_hw_template_table_create,
14911 	.template_table_destroy = flow_hw_table_destroy,
14912 	.table_resize = flow_hw_table_resize,
14913 	.group_set_miss_actions = flow_hw_group_set_miss_actions,
14914 	.async_flow_create = flow_hw_async_flow_create,
14915 	.async_flow_create_by_index = flow_hw_async_flow_create_by_index,
14916 	.async_flow_update = flow_hw_async_flow_update,
14917 	.async_flow_destroy = flow_hw_async_flow_destroy,
14918 	.flow_update_resized = flow_hw_update_resized,
14919 	.table_resize_complete = flow_hw_table_resize_complete,
14920 	.pull = flow_hw_pull,
14921 	.push = flow_hw_push,
14922 	.async_action_create = flow_hw_action_handle_create,
14923 	.async_action_destroy = flow_hw_action_handle_destroy,
14924 	.async_action_update = flow_hw_action_handle_update,
14925 	.async_action_query_update = flow_hw_async_action_handle_query_update,
14926 	.async_action_query = flow_hw_action_handle_query,
14927 	.action_validate = flow_hw_action_validate,
14928 	.action_create = flow_hw_action_create,
14929 	.action_destroy = flow_hw_action_destroy,
14930 	.action_update = flow_hw_action_update,
14931 	.action_query = flow_hw_action_query,
14932 	.action_query_update = flow_hw_action_query_update,
14933 	.action_list_handle_create = flow_hw_action_list_handle_create,
14934 	.action_list_handle_destroy = flow_hw_action_list_handle_destroy,
14935 	.action_list_handle_query_update =
14936 		flow_hw_action_list_handle_query_update,
14937 	.async_action_list_handle_create =
14938 		flow_hw_async_action_list_handle_create,
14939 	.async_action_list_handle_destroy =
14940 		flow_hw_async_action_list_handle_destroy,
14941 	.async_action_list_handle_query_update =
14942 		flow_hw_async_action_list_handle_query_update,
14943 	.query = flow_hw_query,
14944 	.get_aged_flows = flow_hw_get_aged_flows,
14945 	.get_q_aged_flows = flow_hw_get_q_aged_flows,
14946 	.item_create = flow_dv_item_create,
14947 	.item_release = flow_dv_item_release,
14948 	.flow_calc_table_hash = flow_hw_calc_table_hash,
14949 	.flow_calc_encap_hash = flow_hw_calc_encap_hash,
14950 };
14951 
14952 /**
14953  * Creates a control flow using flow template API on @p proxy_dev device,
14954  * on behalf of @p owner_dev device.
14955  *
14956  * This function uses locks internally to synchronize access to the
14957  * flow queue.
14958  *
14959  * Created flow is stored in private list associated with @p proxy_dev device.
14960  *
14961  * @param owner_dev
14962  *   Pointer to Ethernet device on behalf of which flow is created.
14963  * @param proxy_dev
14964  *   Pointer to Ethernet device on which flow is created.
14965  * @param table
14966  *   Pointer to flow table.
14967  * @param items
14968  *   Pointer to flow rule items.
14969  * @param item_template_idx
14970  *   Index of an item template associated with @p table.
14971  * @param actions
14972  *   Pointer to flow rule actions.
14973  * @param action_template_idx
14974  *   Index of an action template associated with @p table.
14975  * @param info
14976  *   Additional info about control flow rule.
14977  * @param external
14978  *   External ctrl flow.
14979  *
14980  * @return
14981  *   0 on success, negative errno value otherwise and rte_errno set.
14982  */
14983 static __rte_unused int
14984 flow_hw_create_ctrl_flow(struct rte_eth_dev *owner_dev,
14985 			 struct rte_eth_dev *proxy_dev,
14986 			 struct rte_flow_template_table *table,
14987 			 struct rte_flow_item items[],
14988 			 uint8_t item_template_idx,
14989 			 struct rte_flow_action actions[],
14990 			 uint8_t action_template_idx,
14991 			 struct mlx5_hw_ctrl_flow_info *info,
14992 			 bool external)
14993 {
14994 	struct mlx5_priv *priv = proxy_dev->data->dev_private;
14995 	uint32_t queue = CTRL_QUEUE_ID(priv);
14996 	struct rte_flow_op_attr op_attr = {
14997 		.postpone = 0,
14998 	};
14999 	struct rte_flow *flow = NULL;
15000 	struct mlx5_hw_ctrl_flow *entry = NULL;
15001 	int ret;
15002 
15003 	rte_spinlock_lock(&priv->hw_ctrl_lock);
15004 	entry = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_SYS, sizeof(*entry),
15005 			    0, SOCKET_ID_ANY);
15006 	if (!entry) {
15007 		DRV_LOG(ERR, "port %u not enough memory to create control flows",
15008 			proxy_dev->data->port_id);
15009 		rte_errno = ENOMEM;
15010 		ret = -rte_errno;
15011 		goto error;
15012 	}
15013 	flow = flow_hw_async_flow_create(proxy_dev, queue, &op_attr, table,
15014 					 items, item_template_idx,
15015 					 actions, action_template_idx,
15016 					 NULL, NULL);
15017 	if (!flow) {
15018 		DRV_LOG(ERR, "port %u failed to enqueue create control"
15019 			" flow operation", proxy_dev->data->port_id);
15020 		ret = -rte_errno;
15021 		goto error;
15022 	}
15023 	ret = __flow_hw_pull_comp(proxy_dev, queue, NULL);
15024 	if (ret) {
15025 		DRV_LOG(ERR, "port %u failed to insert control flow",
15026 			proxy_dev->data->port_id);
15027 		rte_errno = EINVAL;
15028 		ret = -rte_errno;
15029 		goto error;
15030 	}
15031 	entry->owner_dev = owner_dev;
15032 	entry->flow = flow;
15033 	if (info)
15034 		entry->info = *info;
15035 	else
15036 		entry->info.type = MLX5_HW_CTRL_FLOW_TYPE_GENERAL;
15037 	if (external)
15038 		LIST_INSERT_HEAD(&priv->hw_ext_ctrl_flows, entry, next);
15039 	else
15040 		LIST_INSERT_HEAD(&priv->hw_ctrl_flows, entry, next);
15041 	rte_spinlock_unlock(&priv->hw_ctrl_lock);
15042 	return 0;
15043 error:
15044 	if (entry)
15045 		mlx5_free(entry);
15046 	rte_spinlock_unlock(&priv->hw_ctrl_lock);
15047 	return ret;
15048 }
15049 
15050 /**
15051  * Destroys a control flow @p flow using flow template API on @p dev device.
15052  *
15053  * This function uses locks internally to synchronize access to the
15054  * flow queue.
15055  *
15056  * If the @p flow is stored on any private list/pool, then caller must free up
15057  * the relevant resources.
15058  *
15059  * @param dev
15060  *   Pointer to Ethernet device.
15061  * @param flow
15062  *   Pointer to flow rule.
15063  *
15064  * @return
15065  *   0 on success, non-zero value otherwise.
15066  */
15067 static int
15068 flow_hw_destroy_ctrl_flow(struct rte_eth_dev *dev, struct rte_flow *flow)
15069 {
15070 	struct mlx5_priv *priv = dev->data->dev_private;
15071 	uint32_t queue = CTRL_QUEUE_ID(priv);
15072 	struct rte_flow_op_attr op_attr = {
15073 		.postpone = 0,
15074 	};
15075 	int ret;
15076 
15077 	rte_spinlock_lock(&priv->hw_ctrl_lock);
15078 	ret = flow_hw_async_flow_destroy(dev, queue, &op_attr, flow, NULL, NULL);
15079 	if (ret) {
15080 		DRV_LOG(ERR, "port %u failed to enqueue destroy control"
15081 			" flow operation", dev->data->port_id);
15082 		goto exit;
15083 	}
15084 	ret = __flow_hw_pull_comp(dev, queue, NULL);
15085 	if (ret) {
15086 		DRV_LOG(ERR, "port %u failed to destroy control flow",
15087 			dev->data->port_id);
15088 		rte_errno = EINVAL;
15089 		ret = -rte_errno;
15090 		goto exit;
15091 	}
15092 exit:
15093 	rte_spinlock_unlock(&priv->hw_ctrl_lock);
15094 	return ret;
15095 }
15096 
15097 /**
15098  * Destroys control flows created on behalf of @p owner device on @p dev device.
15099  *
15100  * @param dev
15101  *   Pointer to Ethernet device on which control flows were created.
15102  * @param owner
15103  *   Pointer to Ethernet device owning control flows.
15104  *
15105  * @return
15106  *   0 on success, otherwise negative error code is returned and
15107  *   rte_errno is set.
15108  */
15109 static int
15110 flow_hw_flush_ctrl_flows_owned_by(struct rte_eth_dev *dev, struct rte_eth_dev *owner)
15111 {
15112 	struct mlx5_priv *priv = dev->data->dev_private;
15113 	struct mlx5_hw_ctrl_flow *cf;
15114 	struct mlx5_hw_ctrl_flow *cf_next;
15115 	int ret;
15116 
15117 	cf = LIST_FIRST(&priv->hw_ctrl_flows);
15118 	while (cf != NULL) {
15119 		cf_next = LIST_NEXT(cf, next);
15120 		if (cf->owner_dev == owner) {
15121 			ret = flow_hw_destroy_ctrl_flow(dev, cf->flow);
15122 			if (ret) {
15123 				rte_errno = ret;
15124 				return -ret;
15125 			}
15126 			LIST_REMOVE(cf, next);
15127 			mlx5_free(cf);
15128 		}
15129 		cf = cf_next;
15130 	}
15131 	return 0;
15132 }
15133 
15134 /**
15135  * Destroys control flows created for @p owner_dev device.
15136  *
15137  * @param owner_dev
15138  *   Pointer to Ethernet device owning control flows.
15139  *
15140  * @return
15141  *   0 on success, otherwise negative error code is returned and
15142  *   rte_errno is set.
15143  */
15144 int
15145 mlx5_flow_hw_flush_ctrl_flows(struct rte_eth_dev *owner_dev)
15146 {
15147 	struct mlx5_priv *owner_priv = owner_dev->data->dev_private;
15148 	struct rte_eth_dev *proxy_dev;
15149 	uint16_t owner_port_id = owner_dev->data->port_id;
15150 	uint16_t proxy_port_id = owner_dev->data->port_id;
15151 	int ret;
15152 
15153 	/* Flush all flows created by this port for itself. */
15154 	ret = flow_hw_flush_ctrl_flows_owned_by(owner_dev, owner_dev);
15155 	if (ret)
15156 		return ret;
15157 	/* Flush all flows created for this port on proxy port. */
15158 	if (owner_priv->sh->config.dv_esw_en) {
15159 		ret = rte_flow_pick_transfer_proxy(owner_port_id, &proxy_port_id, NULL);
15160 		if (ret == -ENODEV) {
15161 			DRV_LOG(DEBUG, "Unable to find transfer proxy port for port %u. It was "
15162 				       "probably closed. Control flows were cleared.",
15163 				       owner_port_id);
15164 			rte_errno = 0;
15165 			return 0;
15166 		} else if (ret) {
15167 			DRV_LOG(ERR, "Unable to find proxy port for port %u (ret = %d)",
15168 				owner_port_id, ret);
15169 			return ret;
15170 		}
15171 		proxy_dev = &rte_eth_devices[proxy_port_id];
15172 	} else {
15173 		proxy_dev = owner_dev;
15174 	}
15175 	return flow_hw_flush_ctrl_flows_owned_by(proxy_dev, owner_dev);
15176 }
15177 
15178 /**
15179  * Destroys all control flows created on @p dev device.
15180  *
15181  * @param owner_dev
15182  *   Pointer to Ethernet device.
15183  *
15184  * @return
15185  *   0 on success, otherwise negative error code is returned and
15186  *   rte_errno is set.
15187  */
15188 static int
15189 flow_hw_flush_all_ctrl_flows(struct rte_eth_dev *dev)
15190 {
15191 	struct mlx5_priv *priv = dev->data->dev_private;
15192 	struct mlx5_hw_ctrl_flow *cf;
15193 	struct mlx5_hw_ctrl_flow *cf_next;
15194 	int ret;
15195 
15196 	cf = LIST_FIRST(&priv->hw_ctrl_flows);
15197 	while (cf != NULL) {
15198 		cf_next = LIST_NEXT(cf, next);
15199 		ret = flow_hw_destroy_ctrl_flow(dev, cf->flow);
15200 		if (ret) {
15201 			rte_errno = ret;
15202 			return -ret;
15203 		}
15204 		LIST_REMOVE(cf, next);
15205 		mlx5_free(cf);
15206 		cf = cf_next;
15207 	}
15208 	cf = LIST_FIRST(&priv->hw_ext_ctrl_flows);
15209 	while (cf != NULL) {
15210 		cf_next = LIST_NEXT(cf, next);
15211 		ret = flow_hw_destroy_ctrl_flow(dev, cf->flow);
15212 		if (ret) {
15213 			rte_errno = ret;
15214 			return -ret;
15215 		}
15216 		LIST_REMOVE(cf, next);
15217 		mlx5_free(cf);
15218 		cf = cf_next;
15219 	}
15220 	return 0;
15221 }
15222 
15223 int
15224 mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn, bool external)
15225 {
15226 	uint16_t port_id = dev->data->port_id;
15227 	struct rte_flow_item_ethdev esw_mgr_spec = {
15228 		.port_id = MLX5_REPRESENTED_PORT_ESW_MGR,
15229 	};
15230 	struct rte_flow_item_ethdev esw_mgr_mask = {
15231 		.port_id = MLX5_REPRESENTED_PORT_ESW_MGR,
15232 	};
15233 	struct rte_flow_item_tag reg_c0_spec = {
15234 		.index = (uint8_t)REG_C_0,
15235 		.data = flow_hw_esw_mgr_regc_marker(dev),
15236 	};
15237 	struct rte_flow_item_tag reg_c0_mask = {
15238 		.index = 0xff,
15239 		.data = flow_hw_esw_mgr_regc_marker_mask(dev),
15240 	};
15241 	struct mlx5_rte_flow_item_sq sq_spec = {
15242 		.queue = sqn,
15243 	};
15244 	struct rte_flow_action_ethdev port = {
15245 		.port_id = port_id,
15246 	};
15247 	struct rte_flow_item items[3] = { { 0 } };
15248 	struct rte_flow_action actions[3] = { { 0 } };
15249 	struct mlx5_hw_ctrl_flow_info flow_info = {
15250 		.type = MLX5_HW_CTRL_FLOW_TYPE_SQ_MISS_ROOT,
15251 		.esw_mgr_sq = sqn,
15252 	};
15253 	struct rte_eth_dev *proxy_dev;
15254 	struct mlx5_priv *proxy_priv;
15255 	uint16_t proxy_port_id = dev->data->port_id;
15256 	int ret;
15257 
15258 	ret = rte_flow_pick_transfer_proxy(port_id, &proxy_port_id, NULL);
15259 	if (ret) {
15260 		DRV_LOG(ERR, "Unable to pick transfer proxy port for port %u. Transfer proxy "
15261 			     "port must be present to create default SQ miss flows.",
15262 			     port_id);
15263 		return ret;
15264 	}
15265 	proxy_dev = &rte_eth_devices[proxy_port_id];
15266 	proxy_priv = proxy_dev->data->dev_private;
15267 	if (!proxy_priv->dr_ctx) {
15268 		DRV_LOG(DEBUG, "Transfer proxy port (port %u) of port %u must be configured "
15269 			       "for HWS to create default SQ miss flows. Default flows will "
15270 			       "not be created.",
15271 			       proxy_port_id, port_id);
15272 		return 0;
15273 	}
15274 	if (!proxy_priv->hw_ctrl_fdb ||
15275 	    !proxy_priv->hw_ctrl_fdb->hw_esw_sq_miss_root_tbl ||
15276 	    !proxy_priv->hw_ctrl_fdb->hw_esw_sq_miss_tbl) {
15277 		DRV_LOG(ERR, "Transfer proxy port (port %u) of port %u was configured, but "
15278 			     "default flow tables were not created.",
15279 			     proxy_port_id, port_id);
15280 		rte_errno = ENOMEM;
15281 		return -rte_errno;
15282 	}
15283 	/*
15284 	 * Create a root SQ miss flow rule - match E-Switch Manager and SQ,
15285 	 * and jump to group 1.
15286 	 */
15287 	items[0] = (struct rte_flow_item){
15288 		.type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,
15289 		.spec = &esw_mgr_spec,
15290 		.mask = &esw_mgr_mask,
15291 	};
15292 	items[1] = (struct rte_flow_item){
15293 		.type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_SQ,
15294 		.spec = &sq_spec,
15295 	};
15296 	items[2] = (struct rte_flow_item){
15297 		.type = RTE_FLOW_ITEM_TYPE_END,
15298 	};
15299 	actions[0] = (struct rte_flow_action){
15300 		.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
15301 	};
15302 	actions[1] = (struct rte_flow_action){
15303 		.type = RTE_FLOW_ACTION_TYPE_JUMP,
15304 	};
15305 	actions[2] = (struct rte_flow_action) {
15306 		.type = RTE_FLOW_ACTION_TYPE_END,
15307 	};
15308 	ret = flow_hw_create_ctrl_flow(dev, proxy_dev,
15309 				       proxy_priv->hw_ctrl_fdb->hw_esw_sq_miss_root_tbl,
15310 				       items, 0, actions, 0, &flow_info, external);
15311 	if (ret) {
15312 		DRV_LOG(ERR, "Port %u failed to create root SQ miss flow rule for SQ %u, ret %d",
15313 			port_id, sqn, ret);
15314 		return ret;
15315 	}
15316 	/*
15317 	 * Create a non-root SQ miss flow rule - match REG_C_0 marker and SQ,
15318 	 * and forward to port.
15319 	 */
15320 	items[0] = (struct rte_flow_item){
15321 		.type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_TAG,
15322 		.spec = &reg_c0_spec,
15323 		.mask = &reg_c0_mask,
15324 	};
15325 	items[1] = (struct rte_flow_item){
15326 		.type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_SQ,
15327 		.spec = &sq_spec,
15328 	};
15329 	items[2] = (struct rte_flow_item){
15330 		.type = RTE_FLOW_ITEM_TYPE_END,
15331 	};
15332 	actions[0] = (struct rte_flow_action){
15333 		.type = RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT,
15334 		.conf = &port,
15335 	};
15336 	actions[1] = (struct rte_flow_action){
15337 		.type = RTE_FLOW_ACTION_TYPE_END,
15338 	};
15339 	flow_info.type = MLX5_HW_CTRL_FLOW_TYPE_SQ_MISS;
15340 	ret = flow_hw_create_ctrl_flow(dev, proxy_dev,
15341 				       proxy_priv->hw_ctrl_fdb->hw_esw_sq_miss_tbl,
15342 				       items, 0, actions, 0, &flow_info, external);
15343 	if (ret) {
15344 		DRV_LOG(ERR, "Port %u failed to create HWS SQ miss flow rule for SQ %u, ret %d",
15345 			port_id, sqn, ret);
15346 		return ret;
15347 	}
15348 	return 0;
15349 }
15350 
15351 static bool
15352 flow_hw_is_matching_sq_miss_flow(struct mlx5_hw_ctrl_flow *cf,
15353 				 struct rte_eth_dev *dev,
15354 				 uint32_t sqn)
15355 {
15356 	if (cf->owner_dev != dev)
15357 		return false;
15358 	if (cf->info.type == MLX5_HW_CTRL_FLOW_TYPE_SQ_MISS_ROOT && cf->info.esw_mgr_sq == sqn)
15359 		return true;
15360 	if (cf->info.type == MLX5_HW_CTRL_FLOW_TYPE_SQ_MISS && cf->info.esw_mgr_sq == sqn)
15361 		return true;
15362 	return false;
15363 }
15364 
15365 int
15366 mlx5_flow_hw_esw_destroy_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn)
15367 {
15368 	uint16_t port_id = dev->data->port_id;
15369 	uint16_t proxy_port_id = dev->data->port_id;
15370 	struct rte_eth_dev *proxy_dev;
15371 	struct mlx5_priv *proxy_priv;
15372 	struct mlx5_hw_ctrl_flow *cf;
15373 	struct mlx5_hw_ctrl_flow *cf_next;
15374 	int ret;
15375 
15376 	ret = rte_flow_pick_transfer_proxy(port_id, &proxy_port_id, NULL);
15377 	if (ret) {
15378 		DRV_LOG(ERR, "Unable to pick transfer proxy port for port %u. Transfer proxy "
15379 			     "port must be present for default SQ miss flow rules to exist.",
15380 			     port_id);
15381 		return ret;
15382 	}
15383 	proxy_dev = &rte_eth_devices[proxy_port_id];
15384 	proxy_priv = proxy_dev->data->dev_private;
15385 	/* FDB default flow rules must be enabled. */
15386 	MLX5_ASSERT(proxy_priv->sh->config.fdb_def_rule);
15387 	if (!proxy_priv->dr_ctx)
15388 		return 0;
15389 	if (!proxy_priv->hw_ctrl_fdb ||
15390 	    !proxy_priv->hw_ctrl_fdb->hw_esw_sq_miss_root_tbl ||
15391 	    !proxy_priv->hw_ctrl_fdb->hw_esw_sq_miss_tbl)
15392 		return 0;
15393 	cf = LIST_FIRST(&proxy_priv->hw_ctrl_flows);
15394 	while (cf != NULL) {
15395 		cf_next = LIST_NEXT(cf, next);
15396 		if (flow_hw_is_matching_sq_miss_flow(cf, dev, sqn)) {
15397 			claim_zero(flow_hw_destroy_ctrl_flow(proxy_dev, cf->flow));
15398 			LIST_REMOVE(cf, next);
15399 			mlx5_free(cf);
15400 		}
15401 		cf = cf_next;
15402 	}
15403 	return 0;
15404 }
15405 
15406 int
15407 mlx5_flow_hw_esw_create_default_jump_flow(struct rte_eth_dev *dev)
15408 {
15409 	uint16_t port_id = dev->data->port_id;
15410 	struct rte_flow_item_ethdev port_spec = {
15411 		.port_id = port_id,
15412 	};
15413 	struct rte_flow_item items[] = {
15414 		{
15415 			.type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,
15416 			.spec = &port_spec,
15417 		},
15418 		{
15419 			.type = RTE_FLOW_ITEM_TYPE_END,
15420 		},
15421 	};
15422 	struct rte_flow_action_jump jump = {
15423 		.group = 1,
15424 	};
15425 	struct rte_flow_action actions[] = {
15426 		{
15427 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
15428 			.conf = &jump,
15429 		},
15430 		{
15431 			.type = RTE_FLOW_ACTION_TYPE_END,
15432 		}
15433 	};
15434 	struct mlx5_hw_ctrl_flow_info flow_info = {
15435 		.type = MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_JUMP,
15436 	};
15437 	struct rte_eth_dev *proxy_dev;
15438 	struct mlx5_priv *proxy_priv;
15439 	uint16_t proxy_port_id = dev->data->port_id;
15440 	int ret;
15441 
15442 	ret = rte_flow_pick_transfer_proxy(port_id, &proxy_port_id, NULL);
15443 	if (ret) {
15444 		DRV_LOG(ERR, "Unable to pick transfer proxy port for port %u. Transfer proxy "
15445 			     "port must be present to create default FDB jump rule.",
15446 			     port_id);
15447 		return ret;
15448 	}
15449 	proxy_dev = &rte_eth_devices[proxy_port_id];
15450 	proxy_priv = proxy_dev->data->dev_private;
15451 	/* FDB default flow rules must be enabled. */
15452 	MLX5_ASSERT(proxy_priv->sh->config.fdb_def_rule);
15453 	if (!proxy_priv->dr_ctx) {
15454 		DRV_LOG(DEBUG, "Transfer proxy port (port %u) of port %u must be configured "
15455 			       "for HWS to create default FDB jump rule. Default rule will "
15456 			       "not be created.",
15457 			       proxy_port_id, port_id);
15458 		return 0;
15459 	}
15460 	if (!proxy_priv->hw_ctrl_fdb || !proxy_priv->hw_ctrl_fdb->hw_esw_zero_tbl) {
15461 		DRV_LOG(ERR, "Transfer proxy port (port %u) of port %u was configured, but "
15462 			     "default flow tables were not created.",
15463 			     proxy_port_id, port_id);
15464 		rte_errno = EINVAL;
15465 		return -rte_errno;
15466 	}
15467 	return flow_hw_create_ctrl_flow(dev, proxy_dev,
15468 					proxy_priv->hw_ctrl_fdb->hw_esw_zero_tbl,
15469 					items, 0, actions, 0, &flow_info, false);
15470 }
15471 
15472 int
15473 mlx5_flow_hw_create_tx_default_mreg_copy_flow(struct rte_eth_dev *dev)
15474 {
15475 	struct mlx5_priv *priv = dev->data->dev_private;
15476 	struct rte_flow_item_eth promisc = {
15477 		.hdr.dst_addr.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
15478 		.hdr.src_addr.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
15479 		.hdr.ether_type = 0,
15480 	};
15481 	struct rte_flow_item eth_all[] = {
15482 		[0] = {
15483 			.type = RTE_FLOW_ITEM_TYPE_ETH,
15484 			.spec = &promisc,
15485 			.mask = &promisc,
15486 		},
15487 		[1] = {
15488 			.type = RTE_FLOW_ITEM_TYPE_END,
15489 		},
15490 	};
15491 	struct rte_flow_action_modify_field mreg_action = {
15492 		.operation = RTE_FLOW_MODIFY_SET,
15493 		.dst = {
15494 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
15495 			.tag_index = REG_C_1,
15496 		},
15497 		.src = {
15498 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
15499 			.tag_index = REG_A,
15500 		},
15501 		.width = 32,
15502 	};
15503 	struct rte_flow_action copy_reg_action[] = {
15504 		[0] = {
15505 			.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
15506 			.conf = &mreg_action,
15507 		},
15508 		[1] = {
15509 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
15510 		},
15511 		[2] = {
15512 			.type = RTE_FLOW_ACTION_TYPE_END,
15513 		},
15514 	};
15515 	struct mlx5_hw_ctrl_flow_info flow_info = {
15516 		.type = MLX5_HW_CTRL_FLOW_TYPE_TX_META_COPY,
15517 	};
15518 
15519 	MLX5_ASSERT(priv->master);
15520 	if (!priv->dr_ctx ||
15521 	    !priv->hw_ctrl_fdb ||
15522 	    !priv->hw_ctrl_fdb->hw_tx_meta_cpy_tbl)
15523 		return 0;
15524 	return flow_hw_create_ctrl_flow(dev, dev,
15525 					priv->hw_ctrl_fdb->hw_tx_meta_cpy_tbl,
15526 					eth_all, 0, copy_reg_action, 0, &flow_info, false);
15527 }
15528 
15529 int
15530 mlx5_flow_hw_tx_repr_matching_flow(struct rte_eth_dev *dev, uint32_t sqn, bool external)
15531 {
15532 	struct mlx5_priv *priv = dev->data->dev_private;
15533 	struct mlx5_rte_flow_item_sq sq_spec = {
15534 		.queue = sqn,
15535 	};
15536 	struct rte_flow_item items[] = {
15537 		{
15538 			.type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_SQ,
15539 			.spec = &sq_spec,
15540 		},
15541 		{
15542 			.type = RTE_FLOW_ITEM_TYPE_END,
15543 		},
15544 	};
15545 	/*
15546 	 * Allocate actions array suitable for all cases - extended metadata enabled or not.
15547 	 * With extended metadata there will be an additional MODIFY_FIELD action before JUMP.
15548 	 */
15549 	struct rte_flow_action actions[] = {
15550 		{ .type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD },
15551 		{ .type = RTE_FLOW_ACTION_TYPE_JUMP },
15552 		{ .type = RTE_FLOW_ACTION_TYPE_END },
15553 		{ .type = RTE_FLOW_ACTION_TYPE_END },
15554 	};
15555 	struct mlx5_hw_ctrl_flow_info flow_info = {
15556 		.type = MLX5_HW_CTRL_FLOW_TYPE_TX_REPR_MATCH,
15557 		.tx_repr_sq = sqn,
15558 	};
15559 
15560 	/* It is assumed that caller checked for representor matching. */
15561 	MLX5_ASSERT(priv->sh->config.repr_matching);
15562 	if (!priv->dr_ctx) {
15563 		DRV_LOG(DEBUG, "Port %u must be configured for HWS, before creating "
15564 			       "default egress flow rules. Omitting creation.",
15565 			       dev->data->port_id);
15566 		return 0;
15567 	}
15568 	if (!priv->hw_tx_repr_tagging_tbl) {
15569 		DRV_LOG(ERR, "Port %u is configured for HWS, but table for default "
15570 			     "egress flow rules does not exist.",
15571 			     dev->data->port_id);
15572 		rte_errno = EINVAL;
15573 		return -rte_errno;
15574 	}
15575 	/*
15576 	 * If extended metadata mode is enabled, then an additional MODIFY_FIELD action must be
15577 	 * placed before terminating JUMP action.
15578 	 */
15579 	if (priv->sh->config.dv_xmeta_en == MLX5_XMETA_MODE_META32_HWS) {
15580 		actions[1].type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD;
15581 		actions[2].type = RTE_FLOW_ACTION_TYPE_JUMP;
15582 	}
15583 	return flow_hw_create_ctrl_flow(dev, dev, priv->hw_tx_repr_tagging_tbl,
15584 					items, 0, actions, 0, &flow_info, external);
15585 }
15586 
15587 int
15588 mlx5_flow_hw_lacp_rx_flow(struct rte_eth_dev *dev)
15589 {
15590 	struct mlx5_priv *priv = dev->data->dev_private;
15591 	struct rte_flow_item_eth lacp_item = {
15592 		.type = RTE_BE16(RTE_ETHER_TYPE_SLOW),
15593 	};
15594 	struct rte_flow_item eth_lacp[] = {
15595 		[0] = {
15596 			.type = RTE_FLOW_ITEM_TYPE_ETH,
15597 			.spec = &lacp_item,
15598 			.mask = &lacp_item,
15599 		},
15600 		[1] = {
15601 			.type = RTE_FLOW_ITEM_TYPE_END,
15602 		},
15603 	};
15604 	struct rte_flow_action miss_action[] = {
15605 		[0] = {
15606 			.type = (enum rte_flow_action_type)
15607 				MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS,
15608 		},
15609 		[1] = {
15610 			.type = RTE_FLOW_ACTION_TYPE_END,
15611 		},
15612 	};
15613 	struct mlx5_hw_ctrl_flow_info flow_info = {
15614 		.type = MLX5_HW_CTRL_FLOW_TYPE_LACP_RX,
15615 	};
15616 
15617 	if (!priv->dr_ctx || !priv->hw_ctrl_fdb || !priv->hw_ctrl_fdb->hw_lacp_rx_tbl)
15618 		return 0;
15619 	return flow_hw_create_ctrl_flow(dev, dev,
15620 					priv->hw_ctrl_fdb->hw_lacp_rx_tbl,
15621 					eth_lacp, 0, miss_action, 0, &flow_info, false);
15622 }
15623 
15624 static uint32_t
15625 __calc_pattern_flags(const enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type)
15626 {
15627 	switch (eth_pattern_type) {
15628 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL:
15629 		return MLX5_CTRL_PROMISCUOUS;
15630 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL_MCAST:
15631 		return MLX5_CTRL_ALL_MULTICAST;
15632 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST:
15633 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST_VLAN:
15634 		return MLX5_CTRL_BROADCAST;
15635 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST:
15636 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST_VLAN:
15637 		return MLX5_CTRL_IPV4_MULTICAST;
15638 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST:
15639 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST_VLAN:
15640 		return MLX5_CTRL_IPV6_MULTICAST;
15641 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC:
15642 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN:
15643 		return MLX5_CTRL_DMAC;
15644 	default:
15645 		/* Should not reach here. */
15646 		MLX5_ASSERT(false);
15647 		return 0;
15648 	}
15649 }
15650 
15651 static uint32_t
15652 __calc_vlan_flags(const enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type)
15653 {
15654 	switch (eth_pattern_type) {
15655 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST_VLAN:
15656 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST_VLAN:
15657 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST_VLAN:
15658 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN:
15659 		return MLX5_CTRL_VLAN_FILTER;
15660 	default:
15661 		return 0;
15662 	}
15663 }
15664 
15665 static bool
15666 eth_pattern_type_is_requested(const enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type,
15667 			      uint32_t flags)
15668 {
15669 	uint32_t pattern_flags = __calc_pattern_flags(eth_pattern_type);
15670 	uint32_t vlan_flags = __calc_vlan_flags(eth_pattern_type);
15671 	bool pattern_requested = !!(pattern_flags & flags);
15672 	bool consider_vlan = vlan_flags || (MLX5_CTRL_VLAN_FILTER & flags);
15673 	bool vlan_requested = !!(vlan_flags & flags);
15674 
15675 	if (consider_vlan)
15676 		return pattern_requested && vlan_requested;
15677 	else
15678 		return pattern_requested;
15679 }
15680 
15681 static bool
15682 rss_type_is_requested(struct mlx5_priv *priv,
15683 		      const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
15684 {
15685 	struct rte_flow_actions_template *at = priv->hw_ctrl_rx->rss[rss_type];
15686 	unsigned int i;
15687 
15688 	for (i = 0; at->actions[i].type != RTE_FLOW_ACTION_TYPE_END; ++i) {
15689 		if (at->actions[i].type == RTE_FLOW_ACTION_TYPE_RSS) {
15690 			const struct rte_flow_action_rss *rss = at->actions[i].conf;
15691 			uint64_t rss_types = rss->types;
15692 
15693 			if ((rss_types & priv->rss_conf.rss_hf) != rss_types)
15694 				return false;
15695 		}
15696 	}
15697 	return true;
15698 }
15699 
15700 static const struct rte_flow_item_eth *
15701 __get_eth_spec(const enum mlx5_flow_ctrl_rx_eth_pattern_type pattern)
15702 {
15703 	switch (pattern) {
15704 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL:
15705 		return &ctrl_rx_eth_promisc_spec;
15706 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL_MCAST:
15707 		return &ctrl_rx_eth_mcast_spec;
15708 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST:
15709 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST_VLAN:
15710 		return &ctrl_rx_eth_bcast_spec;
15711 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST:
15712 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST_VLAN:
15713 		return &ctrl_rx_eth_ipv4_mcast_spec;
15714 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST:
15715 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST_VLAN:
15716 		return &ctrl_rx_eth_ipv6_mcast_spec;
15717 	default:
15718 		/* This case should not be reached. */
15719 		MLX5_ASSERT(false);
15720 		return NULL;
15721 	}
15722 }
15723 
15724 static int
15725 __flow_hw_ctrl_flows_single(struct rte_eth_dev *dev,
15726 			    struct rte_flow_template_table *tbl,
15727 			    const enum mlx5_flow_ctrl_rx_eth_pattern_type pattern_type,
15728 			    const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
15729 {
15730 	const struct rte_flow_item_eth *eth_spec = __get_eth_spec(pattern_type);
15731 	struct rte_flow_item items[5];
15732 	struct rte_flow_action actions[] = {
15733 		{ .type = RTE_FLOW_ACTION_TYPE_RSS },
15734 		{ .type = RTE_FLOW_ACTION_TYPE_END },
15735 	};
15736 	struct mlx5_hw_ctrl_flow_info flow_info = {
15737 		.type = MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS,
15738 	};
15739 
15740 	if (!eth_spec)
15741 		return -EINVAL;
15742 	memset(items, 0, sizeof(items));
15743 	items[0] = (struct rte_flow_item){
15744 		.type = RTE_FLOW_ITEM_TYPE_ETH,
15745 		.spec = eth_spec,
15746 	};
15747 	items[1] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_VOID };
15748 	items[2] = flow_hw_get_ctrl_rx_l3_item(rss_type);
15749 	items[3] = flow_hw_get_ctrl_rx_l4_item(rss_type);
15750 	items[4] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_END };
15751 	/* Without VLAN filtering, only a single flow rule must be created. */
15752 	return flow_hw_create_ctrl_flow(dev, dev, tbl, items, 0, actions, 0, &flow_info, false);
15753 }
15754 
15755 static int
15756 __flow_hw_ctrl_flows_single_vlan(struct rte_eth_dev *dev,
15757 				 struct rte_flow_template_table *tbl,
15758 				 const enum mlx5_flow_ctrl_rx_eth_pattern_type pattern_type,
15759 				 const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
15760 {
15761 	struct mlx5_priv *priv = dev->data->dev_private;
15762 	const struct rte_flow_item_eth *eth_spec = __get_eth_spec(pattern_type);
15763 	struct rte_flow_item items[5];
15764 	struct rte_flow_action actions[] = {
15765 		{ .type = RTE_FLOW_ACTION_TYPE_RSS },
15766 		{ .type = RTE_FLOW_ACTION_TYPE_END },
15767 	};
15768 	struct mlx5_hw_ctrl_flow_info flow_info = {
15769 		.type = MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS,
15770 	};
15771 	unsigned int i;
15772 
15773 	if (!eth_spec)
15774 		return -EINVAL;
15775 	memset(items, 0, sizeof(items));
15776 	items[0] = (struct rte_flow_item){
15777 		.type = RTE_FLOW_ITEM_TYPE_ETH,
15778 		.spec = eth_spec,
15779 	};
15780 	/* Optional VLAN for now will be VOID - will be filled later. */
15781 	items[1] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_VLAN };
15782 	items[2] = flow_hw_get_ctrl_rx_l3_item(rss_type);
15783 	items[3] = flow_hw_get_ctrl_rx_l4_item(rss_type);
15784 	items[4] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_END };
15785 	/* Since VLAN filtering is done, create a single flow rule for each registered vid. */
15786 	for (i = 0; i < priv->vlan_filter_n; ++i) {
15787 		uint16_t vlan = priv->vlan_filter[i];
15788 		struct rte_flow_item_vlan vlan_spec = {
15789 			.hdr.vlan_tci = rte_cpu_to_be_16(vlan),
15790 		};
15791 
15792 		items[1].spec = &vlan_spec;
15793 		if (flow_hw_create_ctrl_flow(dev, dev,
15794 					     tbl, items, 0, actions, 0, &flow_info, false))
15795 			return -rte_errno;
15796 	}
15797 	return 0;
15798 }
15799 
15800 static int
15801 __flow_hw_ctrl_flows_unicast(struct rte_eth_dev *dev,
15802 			     struct rte_flow_template_table *tbl,
15803 			     const enum mlx5_flow_ctrl_rx_eth_pattern_type pattern_type,
15804 			     const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
15805 {
15806 	struct rte_flow_item_eth eth_spec;
15807 	struct rte_flow_item items[5];
15808 	struct rte_flow_action actions[] = {
15809 		{ .type = RTE_FLOW_ACTION_TYPE_RSS },
15810 		{ .type = RTE_FLOW_ACTION_TYPE_END },
15811 	};
15812 	struct mlx5_hw_ctrl_flow_info flow_info = {
15813 		.type = MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS,
15814 	};
15815 	const struct rte_ether_addr cmp = {
15816 		.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
15817 	};
15818 	unsigned int i;
15819 
15820 	RTE_SET_USED(pattern_type);
15821 
15822 	memset(&eth_spec, 0, sizeof(eth_spec));
15823 	memset(items, 0, sizeof(items));
15824 	items[0] = (struct rte_flow_item){
15825 		.type = RTE_FLOW_ITEM_TYPE_ETH,
15826 		.spec = &eth_spec,
15827 	};
15828 	items[1] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_VOID };
15829 	items[2] = flow_hw_get_ctrl_rx_l3_item(rss_type);
15830 	items[3] = flow_hw_get_ctrl_rx_l4_item(rss_type);
15831 	items[4] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_END };
15832 	for (i = 0; i < MLX5_MAX_MAC_ADDRESSES; ++i) {
15833 		struct rte_ether_addr *mac = &dev->data->mac_addrs[i];
15834 
15835 		if (!memcmp(mac, &cmp, sizeof(*mac)))
15836 			continue;
15837 		memcpy(&eth_spec.hdr.dst_addr.addr_bytes, mac->addr_bytes, RTE_ETHER_ADDR_LEN);
15838 		if (flow_hw_create_ctrl_flow(dev, dev,
15839 					     tbl, items, 0, actions, 0, &flow_info, false))
15840 			return -rte_errno;
15841 	}
15842 	return 0;
15843 }
15844 
15845 static int
15846 __flow_hw_ctrl_flows_unicast_vlan(struct rte_eth_dev *dev,
15847 				  struct rte_flow_template_table *tbl,
15848 				  const enum mlx5_flow_ctrl_rx_eth_pattern_type pattern_type,
15849 				  const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
15850 {
15851 	struct mlx5_priv *priv = dev->data->dev_private;
15852 	struct rte_flow_item_eth eth_spec;
15853 	struct rte_flow_item items[5];
15854 	struct rte_flow_action actions[] = {
15855 		{ .type = RTE_FLOW_ACTION_TYPE_RSS },
15856 		{ .type = RTE_FLOW_ACTION_TYPE_END },
15857 	};
15858 	struct mlx5_hw_ctrl_flow_info flow_info = {
15859 		.type = MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS,
15860 	};
15861 	const struct rte_ether_addr cmp = {
15862 		.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
15863 	};
15864 	unsigned int i;
15865 	unsigned int j;
15866 
15867 	RTE_SET_USED(pattern_type);
15868 
15869 	memset(&eth_spec, 0, sizeof(eth_spec));
15870 	memset(items, 0, sizeof(items));
15871 	items[0] = (struct rte_flow_item){
15872 		.type = RTE_FLOW_ITEM_TYPE_ETH,
15873 		.spec = &eth_spec,
15874 	};
15875 	items[1] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_VLAN };
15876 	items[2] = flow_hw_get_ctrl_rx_l3_item(rss_type);
15877 	items[3] = flow_hw_get_ctrl_rx_l4_item(rss_type);
15878 	items[4] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_END };
15879 	for (i = 0; i < MLX5_MAX_MAC_ADDRESSES; ++i) {
15880 		struct rte_ether_addr *mac = &dev->data->mac_addrs[i];
15881 
15882 		if (!memcmp(mac, &cmp, sizeof(*mac)))
15883 			continue;
15884 		memcpy(&eth_spec.hdr.dst_addr.addr_bytes, mac->addr_bytes, RTE_ETHER_ADDR_LEN);
15885 		for (j = 0; j < priv->vlan_filter_n; ++j) {
15886 			uint16_t vlan = priv->vlan_filter[j];
15887 			struct rte_flow_item_vlan vlan_spec = {
15888 				.hdr.vlan_tci = rte_cpu_to_be_16(vlan),
15889 			};
15890 
15891 			items[1].spec = &vlan_spec;
15892 			if (flow_hw_create_ctrl_flow(dev, dev, tbl, items, 0, actions, 0,
15893 						     &flow_info, false))
15894 				return -rte_errno;
15895 		}
15896 	}
15897 	return 0;
15898 }
15899 
15900 static int
15901 __flow_hw_ctrl_flows(struct rte_eth_dev *dev,
15902 		     struct rte_flow_template_table *tbl,
15903 		     const enum mlx5_flow_ctrl_rx_eth_pattern_type pattern_type,
15904 		     const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
15905 {
15906 	switch (pattern_type) {
15907 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL:
15908 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL_MCAST:
15909 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST:
15910 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST:
15911 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST:
15912 		return __flow_hw_ctrl_flows_single(dev, tbl, pattern_type, rss_type);
15913 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST_VLAN:
15914 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST_VLAN:
15915 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST_VLAN:
15916 		return __flow_hw_ctrl_flows_single_vlan(dev, tbl, pattern_type, rss_type);
15917 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC:
15918 		return __flow_hw_ctrl_flows_unicast(dev, tbl, pattern_type, rss_type);
15919 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN:
15920 		return __flow_hw_ctrl_flows_unicast_vlan(dev, tbl, pattern_type, rss_type);
15921 	default:
15922 		/* Should not reach here. */
15923 		MLX5_ASSERT(false);
15924 		rte_errno = EINVAL;
15925 		return -EINVAL;
15926 	}
15927 }
15928 
15929 
15930 int
15931 mlx5_flow_hw_ctrl_flows(struct rte_eth_dev *dev, uint32_t flags)
15932 {
15933 	struct mlx5_priv *priv = dev->data->dev_private;
15934 	struct mlx5_flow_hw_ctrl_rx *hw_ctrl_rx;
15935 	unsigned int i;
15936 	unsigned int j;
15937 	int ret = 0;
15938 
15939 	RTE_SET_USED(priv);
15940 	RTE_SET_USED(flags);
15941 	if (!priv->dr_ctx) {
15942 		DRV_LOG(DEBUG, "port %u Control flow rules will not be created. "
15943 			       "HWS needs to be configured beforehand.",
15944 			       dev->data->port_id);
15945 		return 0;
15946 	}
15947 	if (!priv->hw_ctrl_rx) {
15948 		DRV_LOG(ERR, "port %u Control flow rules templates were not created.",
15949 			dev->data->port_id);
15950 		rte_errno = EINVAL;
15951 		return -rte_errno;
15952 	}
15953 	hw_ctrl_rx = priv->hw_ctrl_rx;
15954 	for (i = 0; i < MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_MAX; ++i) {
15955 		const enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type = i;
15956 
15957 		if (!eth_pattern_type_is_requested(eth_pattern_type, flags))
15958 			continue;
15959 		for (j = 0; j < MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX; ++j) {
15960 			const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type = j;
15961 			struct rte_flow_actions_template *at;
15962 			struct mlx5_flow_hw_ctrl_rx_table *tmpls = &hw_ctrl_rx->tables[i][j];
15963 			const struct mlx5_flow_template_table_cfg cfg = {
15964 				.attr = tmpls->attr,
15965 				.external = 0,
15966 			};
15967 
15968 			if (!hw_ctrl_rx->rss[rss_type]) {
15969 				at = flow_hw_create_ctrl_rx_rss_template(dev, rss_type);
15970 				if (!at)
15971 					return -rte_errno;
15972 				hw_ctrl_rx->rss[rss_type] = at;
15973 			} else {
15974 				at = hw_ctrl_rx->rss[rss_type];
15975 			}
15976 			if (!rss_type_is_requested(priv, rss_type))
15977 				continue;
15978 			if (!tmpls->tbl) {
15979 				tmpls->tbl = flow_hw_table_create(dev, &cfg,
15980 								  &tmpls->pt, 1, &at, 1, NULL);
15981 				if (!tmpls->tbl) {
15982 					DRV_LOG(ERR, "port %u Failed to create template table "
15983 						     "for control flow rules. Unable to create "
15984 						     "control flow rules.",
15985 						     dev->data->port_id);
15986 					return -rte_errno;
15987 				}
15988 			}
15989 
15990 			ret = __flow_hw_ctrl_flows(dev, tmpls->tbl, eth_pattern_type, rss_type);
15991 			if (ret) {
15992 				DRV_LOG(ERR, "port %u Failed to create control flow rule.",
15993 					dev->data->port_id);
15994 				return ret;
15995 			}
15996 		}
15997 	}
15998 	return 0;
15999 }
16000 
16001 static __rte_always_inline uint32_t
16002 mlx5_reformat_domain_to_tbl_type(const struct rte_flow_indir_action_conf *domain)
16003 {
16004 	uint32_t tbl_type;
16005 
16006 	if (domain->transfer)
16007 		tbl_type = MLX5DR_ACTION_FLAG_HWS_FDB;
16008 	else if (domain->egress)
16009 		tbl_type = MLX5DR_ACTION_FLAG_HWS_TX;
16010 	else if (domain->ingress)
16011 		tbl_type = MLX5DR_ACTION_FLAG_HWS_RX;
16012 	else
16013 		tbl_type = UINT32_MAX;
16014 	return tbl_type;
16015 }
16016 
16017 static struct mlx5_hw_encap_decap_action *
16018 __mlx5_reformat_create(struct rte_eth_dev *dev,
16019 		       const struct rte_flow_action_raw_encap *encap_conf,
16020 		       const struct rte_flow_indir_action_conf *domain,
16021 		       enum mlx5dr_action_type type)
16022 {
16023 	struct mlx5_priv *priv = dev->data->dev_private;
16024 	struct mlx5_hw_encap_decap_action *handle;
16025 	struct mlx5dr_action_reformat_header hdr;
16026 	uint32_t flags;
16027 
16028 	flags = mlx5_reformat_domain_to_tbl_type(domain);
16029 	flags |= (uint32_t)MLX5DR_ACTION_FLAG_SHARED;
16030 	if (flags == UINT32_MAX) {
16031 		DRV_LOG(ERR, "Reformat: invalid indirect action configuration");
16032 		return NULL;
16033 	}
16034 	/* Allocate new list entry. */
16035 	handle = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*handle), 0, SOCKET_ID_ANY);
16036 	if (!handle) {
16037 		DRV_LOG(ERR, "Reformat: failed to allocate reformat entry");
16038 		return NULL;
16039 	}
16040 	handle->action_type = type;
16041 	hdr.sz = encap_conf ? encap_conf->size : 0;
16042 	hdr.data = encap_conf ? encap_conf->data : NULL;
16043 	handle->action = mlx5dr_action_create_reformat(priv->dr_ctx,
16044 					type, 1, &hdr, 0, flags);
16045 	if (!handle->action) {
16046 		DRV_LOG(ERR, "Reformat: failed to create reformat action");
16047 		mlx5_free(handle);
16048 		return NULL;
16049 	}
16050 	return handle;
16051 }
16052 
16053 /**
16054  * Create mlx5 reformat action.
16055  *
16056  * @param[in] dev
16057  *   Pointer to rte_eth_dev structure.
16058  * @param[in] conf
16059  *   Pointer to the indirect action parameters.
16060  * @param[in] encap_action
16061  *   Pointer to the raw_encap action configuration.
16062  * @param[in] decap_action
16063  *   Pointer to the raw_decap action configuration.
16064  * @param[out] error
16065  *   Pointer to error structure.
16066  *
16067  * @return
16068  *   A valid shared action handle in case of success, NULL otherwise and
16069  *   rte_errno is set.
16070  */
16071 struct mlx5_hw_encap_decap_action*
16072 mlx5_reformat_action_create(struct rte_eth_dev *dev,
16073 			    const struct rte_flow_indir_action_conf *conf,
16074 			    const struct rte_flow_action *encap_action,
16075 			    const struct rte_flow_action *decap_action,
16076 			    struct rte_flow_error *error)
16077 {
16078 	struct mlx5_priv *priv = dev->data->dev_private;
16079 	struct mlx5_hw_encap_decap_action *handle;
16080 	const struct rte_flow_action_raw_encap *encap = NULL;
16081 	const struct rte_flow_action_raw_decap *decap = NULL;
16082 	enum mlx5dr_action_type type = MLX5DR_ACTION_TYP_LAST;
16083 
16084 	MLX5_ASSERT(!encap_action || encap_action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP);
16085 	MLX5_ASSERT(!decap_action || decap_action->type == RTE_FLOW_ACTION_TYPE_RAW_DECAP);
16086 	if (priv->sh->config.dv_flow_en != 2) {
16087 		rte_flow_error_set(error, ENOTSUP,
16088 				   RTE_FLOW_ERROR_TYPE_ACTION, encap_action,
16089 				   "Reformat: hardware does not support");
16090 		return NULL;
16091 	}
16092 	if (!conf || (conf->transfer + conf->egress + conf->ingress != 1)) {
16093 		rte_flow_error_set(error, EINVAL,
16094 				   RTE_FLOW_ERROR_TYPE_ACTION, encap_action,
16095 				   "Reformat: domain should be specified");
16096 		return NULL;
16097 	}
16098 	if ((encap_action && !encap_action->conf) || (decap_action && !decap_action->conf)) {
16099 		rte_flow_error_set(error, EINVAL,
16100 				   RTE_FLOW_ERROR_TYPE_ACTION, encap_action,
16101 				   "Reformat: missed action configuration");
16102 		return NULL;
16103 	}
16104 	if (encap_action && !decap_action) {
16105 		encap = (const struct rte_flow_action_raw_encap *)encap_action->conf;
16106 		if (!encap->size || encap->size > MLX5_ENCAP_MAX_LEN ||
16107 		    encap->size < MLX5_ENCAPSULATION_DECISION_SIZE) {
16108 			rte_flow_error_set(error, EINVAL,
16109 					   RTE_FLOW_ERROR_TYPE_ACTION, encap_action,
16110 					   "Reformat: Invalid encap length");
16111 			return NULL;
16112 		}
16113 		type = MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
16114 	} else if (decap_action && !encap_action) {
16115 		decap = (const struct rte_flow_action_raw_decap *)decap_action->conf;
16116 		if (!decap->size || decap->size < MLX5_ENCAPSULATION_DECISION_SIZE) {
16117 			rte_flow_error_set(error, EINVAL,
16118 					   RTE_FLOW_ERROR_TYPE_ACTION, encap_action,
16119 					   "Reformat: Invalid decap length");
16120 			return NULL;
16121 		}
16122 		type = MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2;
16123 	} else if (encap_action && decap_action) {
16124 		decap = (const struct rte_flow_action_raw_decap *)decap_action->conf;
16125 		encap = (const struct rte_flow_action_raw_encap *)encap_action->conf;
16126 		if (decap->size < MLX5_ENCAPSULATION_DECISION_SIZE &&
16127 		    encap->size >= MLX5_ENCAPSULATION_DECISION_SIZE &&
16128 		    encap->size <= MLX5_ENCAP_MAX_LEN) {
16129 			type = MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3;
16130 		} else if (decap->size >= MLX5_ENCAPSULATION_DECISION_SIZE &&
16131 			   encap->size < MLX5_ENCAPSULATION_DECISION_SIZE) {
16132 			type = MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2;
16133 		} else {
16134 			rte_flow_error_set(error, EINVAL,
16135 					   RTE_FLOW_ERROR_TYPE_ACTION, encap_action,
16136 					   "Reformat: Invalid decap & encap length");
16137 			return NULL;
16138 		}
16139 	} else if (!encap_action && !decap_action) {
16140 		rte_flow_error_set(error, EINVAL,
16141 				   RTE_FLOW_ERROR_TYPE_ACTION, encap_action,
16142 				   "Reformat: Invalid decap & encap configurations");
16143 		return NULL;
16144 	}
16145 	if (!priv->dr_ctx) {
16146 		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
16147 				   encap_action, "Reformat: HWS not supported");
16148 		return NULL;
16149 	}
16150 	handle = __mlx5_reformat_create(dev, encap, conf, type);
16151 	if (!handle) {
16152 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, encap_action,
16153 				   "Reformat: failed to create indirect action");
16154 		return NULL;
16155 	}
16156 	return handle;
16157 }
16158 
16159 /**
16160  * Destroy the indirect reformat action.
16161  * Release action related resources on the NIC and the memory.
16162  * Lock free, (mutex should be acquired by caller).
16163  *
16164  * @param[in] dev
16165  *   Pointer to the Ethernet device structure.
16166  * @param[in] handle
16167  *   The indirect action list handle to be removed.
16168  * @param[out] error
16169  *   Perform verbose error reporting if not NULL. Initialized in case of
16170  *   error only.
16171  *
16172  * @return
16173  *   0 on success, otherwise negative errno value.
16174  */
16175 int
16176 mlx5_reformat_action_destroy(struct rte_eth_dev *dev,
16177 			     struct rte_flow_action_list_handle *handle,
16178 			     struct rte_flow_error *error)
16179 {
16180 	struct mlx5_priv *priv = dev->data->dev_private;
16181 	struct mlx5_hw_encap_decap_action *action;
16182 
16183 	action = (struct mlx5_hw_encap_decap_action *)handle;
16184 	if (!priv->dr_ctx || !action)
16185 		return rte_flow_error_set(error, ENOTSUP,
16186 					  RTE_FLOW_ERROR_TYPE_ACTION, handle,
16187 					  "Reformat: invalid action handle");
16188 	mlx5dr_action_destroy(action->action);
16189 	mlx5_free(handle);
16190 	return 0;
16191 }
16192 
16193 static bool
16194 flow_hw_is_item_masked(const struct rte_flow_item *item)
16195 {
16196 	const uint8_t *byte;
16197 	int size;
16198 	int i;
16199 
16200 	if (item->mask == NULL)
16201 		return false;
16202 
16203 	switch ((int)item->type) {
16204 	case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
16205 		size = sizeof(struct rte_flow_item_tag);
16206 		break;
16207 	case MLX5_RTE_FLOW_ITEM_TYPE_SQ:
16208 		size = sizeof(struct mlx5_rte_flow_item_sq);
16209 		break;
16210 	default:
16211 		size = rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_MASK, NULL, 0, item, NULL);
16212 		/*
16213 		 * Pattern template items are passed to this function.
16214 		 * These items were already validated, so error is not expected.
16215 		 * Also, if mask is NULL, then spec size is bigger than 0 always.
16216 		 */
16217 		MLX5_ASSERT(size > 0);
16218 	}
16219 
16220 	byte = (const uint8_t *)item->mask;
16221 	for (i = 0; i < size; ++i)
16222 		if (byte[i])
16223 			return true;
16224 
16225 	return false;
16226 }
16227 
16228 static int
16229 flow_hw_validate_rule_pattern(struct rte_eth_dev *dev,
16230 			      const struct rte_flow_template_table *table,
16231 			      const uint8_t pattern_template_idx,
16232 			      const struct rte_flow_item items[],
16233 			      struct rte_flow_error *error)
16234 {
16235 	const struct rte_flow_pattern_template *pt;
16236 	const struct rte_flow_item *pt_item;
16237 
16238 	if (pattern_template_idx >= table->nb_item_templates)
16239 		return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
16240 					  "Pattern template index out of range");
16241 
16242 	pt = table->its[pattern_template_idx];
16243 	pt_item = pt->items;
16244 
16245 	/* If any item was prepended, skip it. */
16246 	if (pt->implicit_port || pt->implicit_tag)
16247 		pt_item++;
16248 
16249 	for (; pt_item->type != RTE_FLOW_ITEM_TYPE_END; pt_item++, items++) {
16250 		if (pt_item->type != items->type)
16251 			return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
16252 						  items, "Item type does not match the template");
16253 
16254 		/*
16255 		 * Assumptions:
16256 		 * - Currently mlx5dr layer contains info on which fields in masks are supported.
16257 		 * - This info is not exposed to PMD directly.
16258 		 * - Because of that, it is assumed that since pattern template is correct,
16259 		 *   then, items' masks in pattern template have nonzero values only in
16260 		 *   supported fields.
16261 		 *   This is known, because a temporary mlx5dr matcher is created during pattern
16262 		 *   template creation to validate the template.
16263 		 * - As a result, it is safe to look for nonzero bytes in mask to determine if
16264 		 *   item spec is needed in a flow rule.
16265 		 */
16266 		if (!flow_hw_is_item_masked(pt_item))
16267 			continue;
16268 
16269 		if (items->spec == NULL)
16270 			return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
16271 						  items, "Item spec is required");
16272 
16273 		switch (items->type) {
16274 		const struct rte_flow_item_ethdev *ethdev;
16275 		const struct rte_flow_item_tx_queue *tx_queue;
16276 		struct mlx5_txq_ctrl *txq;
16277 
16278 		case RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT:
16279 			ethdev = items->spec;
16280 			if (flow_hw_validate_target_port_id(dev, ethdev->port_id)) {
16281 				return rte_flow_error_set(error, EINVAL,
16282 							  RTE_FLOW_ERROR_TYPE_ITEM_SPEC, items,
16283 							  "Invalid port");
16284 			}
16285 			break;
16286 		case RTE_FLOW_ITEM_TYPE_TX_QUEUE:
16287 			tx_queue = items->spec;
16288 			if (mlx5_is_external_txq(dev, tx_queue->tx_queue))
16289 				continue;
16290 			txq = mlx5_txq_get(dev, tx_queue->tx_queue);
16291 			if (!txq)
16292 				return rte_flow_error_set(error, EINVAL,
16293 							  RTE_FLOW_ERROR_TYPE_ITEM_SPEC, items,
16294 							  "Invalid Tx queue");
16295 			mlx5_txq_release(dev, tx_queue->tx_queue);
16296 		default:
16297 			break;
16298 		}
16299 	}
16300 
16301 	return 0;
16302 }
16303 
16304 static bool
16305 flow_hw_valid_indirect_action_type(const struct rte_flow_action *user_action,
16306 				   const enum rte_flow_action_type expected_type)
16307 {
16308 	uint32_t user_indirect_type = MLX5_INDIRECT_ACTION_TYPE_GET(user_action->conf);
16309 	uint32_t expected_indirect_type;
16310 
16311 	switch ((int)expected_type) {
16312 	case RTE_FLOW_ACTION_TYPE_RSS:
16313 	case MLX5_RTE_FLOW_ACTION_TYPE_RSS:
16314 		expected_indirect_type = MLX5_INDIRECT_ACTION_TYPE_RSS;
16315 		break;
16316 	case RTE_FLOW_ACTION_TYPE_COUNT:
16317 	case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
16318 		expected_indirect_type = MLX5_INDIRECT_ACTION_TYPE_COUNT;
16319 		break;
16320 	case RTE_FLOW_ACTION_TYPE_AGE:
16321 		expected_indirect_type = MLX5_INDIRECT_ACTION_TYPE_AGE;
16322 		break;
16323 	case RTE_FLOW_ACTION_TYPE_CONNTRACK:
16324 		expected_indirect_type = MLX5_INDIRECT_ACTION_TYPE_CT;
16325 		break;
16326 	case RTE_FLOW_ACTION_TYPE_METER_MARK:
16327 	case MLX5_RTE_FLOW_ACTION_TYPE_METER_MARK:
16328 		expected_indirect_type = MLX5_INDIRECT_ACTION_TYPE_METER_MARK;
16329 		break;
16330 	case RTE_FLOW_ACTION_TYPE_QUOTA:
16331 		expected_indirect_type = MLX5_INDIRECT_ACTION_TYPE_QUOTA;
16332 		break;
16333 	default:
16334 		return false;
16335 	}
16336 
16337 	return user_indirect_type == expected_indirect_type;
16338 }
16339 
16340 static int
16341 flow_hw_validate_rule_actions(struct rte_eth_dev *dev,
16342 			      const struct rte_flow_template_table *table,
16343 			      const uint8_t actions_template_idx,
16344 			      const struct rte_flow_action actions[],
16345 			      struct rte_flow_error *error)
16346 {
16347 	const struct rte_flow_actions_template *at;
16348 	const struct mlx5_hw_actions *hw_acts;
16349 	const struct mlx5_action_construct_data *act_data;
16350 	unsigned int idx;
16351 
16352 	if (actions_template_idx >= table->nb_action_templates)
16353 		return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
16354 					  "Actions template index out of range");
16355 
16356 	at = table->ats[actions_template_idx].action_template;
16357 	hw_acts = &table->ats[actions_template_idx].acts;
16358 
16359 	for (idx = 0; actions[idx].type != RTE_FLOW_ACTION_TYPE_END; ++idx) {
16360 		const struct rte_flow_action *user_action = &actions[idx];
16361 		const struct rte_flow_action *tmpl_action = &at->orig_actions[idx];
16362 
16363 		if (user_action->type != tmpl_action->type)
16364 			return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
16365 						  user_action,
16366 						  "Action type does not match type specified in "
16367 						  "actions template");
16368 	}
16369 
16370 	/*
16371 	 * Only go through unmasked actions and check if configuration is provided.
16372 	 * Configuration of masked actions is ignored.
16373 	 */
16374 	LIST_FOREACH(act_data, &hw_acts->act_list, next) {
16375 		const struct rte_flow_action *user_action;
16376 
16377 		user_action = &actions[act_data->action_src];
16378 
16379 		/* Skip actions which do not require conf. */
16380 		switch ((int)act_data->type) {
16381 		case RTE_FLOW_ACTION_TYPE_COUNT:
16382 		case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
16383 		case MLX5_RTE_FLOW_ACTION_TYPE_METER_MARK:
16384 		case MLX5_RTE_FLOW_ACTION_TYPE_RSS:
16385 			continue;
16386 		default:
16387 			break;
16388 		}
16389 
16390 		if (user_action->conf == NULL)
16391 			return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
16392 						  user_action,
16393 						  "Action requires configuration");
16394 
16395 		switch ((int)user_action->type) {
16396 		enum rte_flow_action_type expected_type;
16397 		const struct rte_flow_action_ethdev *ethdev;
16398 		const struct rte_flow_action_modify_field *mf;
16399 
16400 		case RTE_FLOW_ACTION_TYPE_INDIRECT:
16401 			expected_type = act_data->indirect.expected_type;
16402 			if (!flow_hw_valid_indirect_action_type(user_action, expected_type))
16403 				return rte_flow_error_set(error, EINVAL,
16404 							  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
16405 							  user_action,
16406 							  "Indirect action type does not match "
16407 							  "the type specified in the mask");
16408 			break;
16409 		case RTE_FLOW_ACTION_TYPE_QUEUE:
16410 			if (mlx5_flow_validate_target_queue(dev, user_action, error))
16411 				return -rte_errno;
16412 			break;
16413 		case RTE_FLOW_ACTION_TYPE_RSS:
16414 			if (mlx5_validate_action_rss(dev, user_action, error))
16415 				return -rte_errno;
16416 			break;
16417 		case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
16418 			/* TODO: Compare other fields if needed. */
16419 			mf = user_action->conf;
16420 			if (mf->operation != act_data->modify_header.action.operation ||
16421 			    mf->src.field != act_data->modify_header.action.src.field ||
16422 			    mf->dst.field != act_data->modify_header.action.dst.field ||
16423 			    mf->width != act_data->modify_header.action.width)
16424 				return rte_flow_error_set(error, EINVAL,
16425 							  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
16426 							  user_action,
16427 							  "Modify field configuration does not "
16428 							  "match configuration from actions "
16429 							  "template");
16430 			break;
16431 		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
16432 			ethdev = user_action->conf;
16433 			if (flow_hw_validate_target_port_id(dev, ethdev->port_id)) {
16434 				return rte_flow_error_set(error, EINVAL,
16435 							  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
16436 							  user_action, "Invalid port");
16437 			}
16438 			break;
16439 		default:
16440 			break;
16441 		}
16442 	}
16443 
16444 	return 0;
16445 }
16446 
16447 static int
16448 flow_hw_async_op_validate(struct rte_eth_dev *dev,
16449 			  const uint32_t queue,
16450 			  const struct rte_flow_template_table *table,
16451 			  struct rte_flow_error *error)
16452 {
16453 	struct mlx5_priv *priv = dev->data->dev_private;
16454 
16455 	MLX5_ASSERT(table != NULL);
16456 
16457 	if (table->cfg.external && queue >= priv->hw_attr->nb_queue)
16458 		return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
16459 					  "Incorrect queue");
16460 
16461 	return 0;
16462 }
16463 
16464 /**
16465  * Validate user input for rte_flow_async_create() implementation.
16466  *
16467  * If RTE_LIBRTE_MLX5_DEBUG macro is not defined, this function is a no-op.
16468  *
16469  * @param[in] dev
16470  *   Pointer to the rte_eth_dev structure.
16471  * @param[in] queue
16472  *   The queue to create the flow.
16473  * @param[in] table
16474  *   Pointer to template table.
16475  * @param[in] items
16476  *   Items with flow spec value.
16477  * @param[in] pattern_template_index
16478  *   The item pattern flow follows from the table.
16479  * @param[in] actions
16480  *   Action with flow spec value.
16481  * @param[in] action_template_index
16482  *   The action pattern flow follows from the table.
16483  * @param[out] error
16484  *   Pointer to error structure.
16485  *
16486  * @return
16487  *    0 if user input is valid.
16488  *    Negative errno otherwise, rte_errno and error struct is populated.
16489  */
16490 static int
16491 flow_hw_async_create_validate(struct rte_eth_dev *dev,
16492 			      const uint32_t queue,
16493 			      const struct rte_flow_template_table *table,
16494 			      const struct rte_flow_item items[],
16495 			      const uint8_t pattern_template_index,
16496 			      const struct rte_flow_action actions[],
16497 			      const uint8_t action_template_index,
16498 			      struct rte_flow_error *error)
16499 {
16500 	if (flow_hw_async_op_validate(dev, queue, table, error))
16501 		return -rte_errno;
16502 
16503 	if (table->cfg.attr.insertion_type != RTE_FLOW_TABLE_INSERTION_TYPE_PATTERN)
16504 		return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
16505 					  "Only pattern insertion is allowed on this table");
16506 
16507 	if (flow_hw_validate_rule_pattern(dev, table, pattern_template_index, items, error))
16508 		return -rte_errno;
16509 
16510 	if (flow_hw_validate_rule_actions(dev, table, action_template_index, actions, error))
16511 		return -rte_errno;
16512 
16513 	return 0;
16514 }
16515 
16516 /**
16517  * Validate user input for rte_flow_async_create_by_index() implementation.
16518  *
16519  * If RTE_LIBRTE_MLX5_DEBUG macro is not defined, this function is a no-op.
16520  *
16521  * @param[in] dev
16522  *   Pointer to the rte_eth_dev structure.
16523  * @param[in] queue
16524  *   The queue to create the flow.
16525  * @param[in] table
16526  *   Pointer to template table.
16527  * @param[in] rule_index
16528  *   Rule index in the table.
16529  *   Inserting a rule to already occupied index results in undefined behavior.
16530  * @param[in] actions
16531  *   Action with flow spec value.
16532  * @param[in] action_template_index
16533  *   The action pattern flow follows from the table.
16534  * @param[out] error
16535  *   Pointer to error structure.
16536  *
16537  * @return
16538  *    0 if user input is valid.
16539  *    Negative errno otherwise, rte_errno and error struct is set.
16540  */
16541 static int
16542 flow_hw_async_create_by_index_validate(struct rte_eth_dev *dev,
16543 				       const uint32_t queue,
16544 				       const struct rte_flow_template_table *table,
16545 				       const uint32_t rule_index,
16546 				       const struct rte_flow_action actions[],
16547 				       const uint8_t action_template_index,
16548 				       struct rte_flow_error *error)
16549 {
16550 	if (flow_hw_async_op_validate(dev, queue, table, error))
16551 		return -rte_errno;
16552 
16553 	if (table->cfg.attr.insertion_type != RTE_FLOW_TABLE_INSERTION_TYPE_INDEX)
16554 		return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
16555 					  "Only index insertion is allowed on this table");
16556 
16557 	if (rule_index >= table->cfg.attr.nb_flows)
16558 		return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
16559 					  "Flow rule index exceeds table size");
16560 
16561 	if (flow_hw_validate_rule_actions(dev, table, action_template_index, actions, error))
16562 		return -rte_errno;
16563 
16564 	return 0;
16565 }
16566 
16567 
16568 /**
16569  * Validate user input for rte_flow_async_update() implementation.
16570  *
16571  * If RTE_LIBRTE_MLX5_DEBUG macro is not defined, this function is a no-op.
16572  *
16573  * @param[in] dev
16574  *   Pointer to the rte_eth_dev structure.
16575  * @param[in] queue
16576  *   The queue to create the flow.
16577  * @param[in] flow
16578  *   Flow rule to be updated.
16579  * @param[in] actions
16580  *   Action with flow spec value.
16581  * @param[in] action_template_index
16582  *   The action pattern flow follows from the table.
16583  * @param[out] error
16584  *   Pointer to error structure.
16585  *
16586  * @return
16587  *    0 if user input is valid.
16588  *    Negative errno otherwise, rte_errno and error struct is set.
16589  */
16590 static int
16591 flow_hw_async_update_validate(struct rte_eth_dev *dev,
16592 			      const uint32_t queue,
16593 			      const struct rte_flow_hw *flow,
16594 			      const struct rte_flow_action actions[],
16595 			      const uint8_t action_template_index,
16596 			      struct rte_flow_error *error)
16597 {
16598 	if (flow_hw_async_op_validate(dev, queue, flow->table, error))
16599 		return -rte_errno;
16600 
16601 	if (flow_hw_validate_rule_actions(dev, flow->table, action_template_index, actions, error))
16602 		return -rte_errno;
16603 
16604 	return 0;
16605 }
16606 
16607 /**
16608  * Validate user input for rte_flow_async_destroy() implementation.
16609  *
16610  * If RTE_LIBRTE_MLX5_DEBUG macro is not defined, this function is a no-op.
16611  *
16612  * @param[in] dev
16613  *   Pointer to the rte_eth_dev structure.
16614  * @param[in] queue
16615  *   The queue to create the flow.
16616  * @param[in] flow
16617  *   Flow rule to be destroyed.
16618  * @param[out] error
16619  *   Pointer to error structure.
16620  *
16621  * @return
16622  *    0 if user input is valid.
16623  *    Negative errno otherwise, rte_errno and error struct is set.
16624  */
16625 static int
16626 flow_hw_async_destroy_validate(struct rte_eth_dev *dev,
16627 			       const uint32_t queue,
16628 			       const struct rte_flow_hw *flow,
16629 			       struct rte_flow_error *error)
16630 {
16631 	if (flow_hw_async_op_validate(dev, queue, flow->table, error))
16632 		return -rte_errno;
16633 
16634 	return 0;
16635 }
16636 
16637 static struct rte_flow_fp_ops mlx5_flow_hw_fp_ops = {
16638 	.async_create = flow_hw_async_flow_create,
16639 	.async_create_by_index = flow_hw_async_flow_create_by_index,
16640 	.async_actions_update = flow_hw_async_flow_update,
16641 	.async_destroy = flow_hw_async_flow_destroy,
16642 	.push = flow_hw_push,
16643 	.pull = flow_hw_pull,
16644 	.async_action_handle_create = flow_hw_action_handle_create,
16645 	.async_action_handle_destroy = flow_hw_action_handle_destroy,
16646 	.async_action_handle_update = flow_hw_action_handle_update,
16647 	.async_action_handle_query = flow_hw_action_handle_query,
16648 	.async_action_handle_query_update = flow_hw_async_action_handle_query_update,
16649 	.async_action_list_handle_create = flow_hw_async_action_list_handle_create,
16650 	.async_action_list_handle_destroy = flow_hw_async_action_list_handle_destroy,
16651 	.async_action_list_handle_query_update =
16652 		flow_hw_async_action_list_handle_query_update,
16653 };
16654 
16655 #endif
16656