xref: /dpdk/drivers/net/mlx5/mlx5_flow_hw.c (revision 3cd695c34528571c378c5f6be7ff81d3cca9a84c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2022 NVIDIA Corporation & Affiliates
3  */
4 
5 #include <rte_flow.h>
6 #include <rte_flow_driver.h>
7 #include <rte_stdatomic.h>
8 
9 #include <mlx5_malloc.h>
10 
11 #include "mlx5.h"
12 #include "mlx5_common.h"
13 #include "mlx5_defs.h"
14 #include "mlx5_flow.h"
15 #include "mlx5_flow_os.h"
16 #include "mlx5_rx.h"
17 
18 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
19 #include "mlx5_hws_cnt.h"
20 
21 /** Fast path async flow API functions. */
22 static struct rte_flow_fp_ops mlx5_flow_hw_fp_ops;
23 
24 /* The maximum actions support in the flow. */
25 #define MLX5_HW_MAX_ACTS 16
26 
27 /*
28  * The default ipool threshold value indicates which per_core_cache
29  * value to set.
30  */
31 #define MLX5_HW_IPOOL_SIZE_THRESHOLD (1 << 19)
32 /* The default min local cache size. */
33 #define MLX5_HW_IPOOL_CACHE_MIN (1 << 9)
34 
35 /* Default push burst threshold. */
36 #define BURST_THR 32u
37 
38 /* Default queue to flush the flows. */
39 #define MLX5_DEFAULT_FLUSH_QUEUE 0
40 
41 /* Maximum number of rules in control flow tables. */
42 #define MLX5_HW_CTRL_FLOW_NB_RULES (4096)
43 
44 /* Lowest flow group usable by an application if group translation is done. */
45 #define MLX5_HW_LOWEST_USABLE_GROUP (1)
46 
47 /* Maximum group index usable by user applications for transfer flows. */
48 #define MLX5_HW_MAX_TRANSFER_GROUP (UINT32_MAX - 1)
49 
50 /* Maximum group index usable by user applications for egress flows. */
51 #define MLX5_HW_MAX_EGRESS_GROUP (UINT32_MAX - 1)
52 
53 /* Lowest priority for HW root table. */
54 #define MLX5_HW_LOWEST_PRIO_ROOT 15
55 
56 /* Lowest priority for HW non-root table. */
57 #define MLX5_HW_LOWEST_PRIO_NON_ROOT (UINT32_MAX)
58 
59 /* Priorities for Rx control flow rules. */
60 #define MLX5_HW_CTRL_RX_PRIO_L2 (MLX5_HW_LOWEST_PRIO_ROOT)
61 #define MLX5_HW_CTRL_RX_PRIO_L3 (MLX5_HW_LOWEST_PRIO_ROOT - 1)
62 #define MLX5_HW_CTRL_RX_PRIO_L4 (MLX5_HW_LOWEST_PRIO_ROOT - 2)
63 
64 #define MLX5_HW_VLAN_PUSH_TYPE_IDX 0
65 #define MLX5_HW_VLAN_PUSH_VID_IDX 1
66 #define MLX5_HW_VLAN_PUSH_PCP_IDX 2
67 
68 #define MLX5_MIRROR_MAX_CLONES_NUM 3
69 #define MLX5_MIRROR_MAX_SAMPLE_ACTIONS_LEN 4
70 
71 #define MLX5_HW_PORT_IS_PROXY(priv) \
72 	(!!((priv)->sh->esw_mode && (priv)->master))
73 
74 
75 struct mlx5_indlst_legacy {
76 	struct mlx5_indirect_list indirect;
77 	struct rte_flow_action_handle *handle;
78 	enum rte_flow_action_type legacy_type;
79 };
80 
81 #define MLX5_CONST_ENCAP_ITEM(encap_type, ptr) \
82 (((const struct encap_type *)(ptr))->definition)
83 
84 /**
85  * Returns the size of a struct with a following layout:
86  *
87  * @code{.c}
88  * struct rte_flow_hw {
89  *     // rte_flow_hw fields
90  *     uint8_t rule[mlx5dr_rule_get_handle_size()];
91  * };
92  * @endcode
93  *
94  * Such struct is used as a basic container for HW Steering flow rule.
95  */
96 static size_t
97 mlx5_flow_hw_entry_size(void)
98 {
99 	return sizeof(struct rte_flow_hw) + mlx5dr_rule_get_handle_size();
100 }
101 
102 /**
103  * Returns the size of "auxed" rte_flow_hw structure which is assumed to be laid out as follows:
104  *
105  * @code{.c}
106  * struct {
107  *     struct rte_flow_hw {
108  *         // rte_flow_hw fields
109  *         uint8_t rule[mlx5dr_rule_get_handle_size()];
110  *     } flow;
111  *     struct rte_flow_hw_aux aux;
112  * };
113  * @endcode
114  *
115  * Such struct is used whenever rte_flow_hw_aux cannot be allocated separately from the rte_flow_hw
116  * e.g., when table is resizable.
117  */
118 static size_t
119 mlx5_flow_hw_auxed_entry_size(void)
120 {
121 	size_t rule_size = mlx5dr_rule_get_handle_size();
122 
123 	return sizeof(struct rte_flow_hw) + rule_size + sizeof(struct rte_flow_hw_aux);
124 }
125 
126 /**
127  * Returns a valid pointer to rte_flow_hw_aux associated with given rte_flow_hw
128  * depending on template table configuration.
129  */
130 static __rte_always_inline struct rte_flow_hw_aux *
131 mlx5_flow_hw_aux(uint16_t port_id, struct rte_flow_hw *flow)
132 {
133 	struct rte_flow_template_table *table = flow->table;
134 
135 	if (rte_flow_template_table_resizable(port_id, &table->cfg.attr)) {
136 		size_t offset = sizeof(struct rte_flow_hw) + mlx5dr_rule_get_handle_size();
137 
138 		return RTE_PTR_ADD(flow, offset);
139 	} else {
140 		return ((flow->nt_rule) ? flow->nt2hws->flow_aux : &table->flow_aux[flow->idx - 1]);
141 	}
142 }
143 
144 static __rte_always_inline void
145 mlx5_flow_hw_aux_set_age_idx(struct rte_flow_hw *flow,
146 			     struct rte_flow_hw_aux *aux,
147 			     uint32_t age_idx)
148 {
149 	/*
150 	 * Only when creating a flow rule, the type will be set explicitly.
151 	 * Or else, it should be none in the rule update case.
152 	 */
153 	if (unlikely(flow->operation_type == MLX5_FLOW_HW_FLOW_OP_TYPE_UPDATE))
154 		aux->upd.age_idx = age_idx;
155 	else
156 		aux->orig.age_idx = age_idx;
157 }
158 
159 static __rte_always_inline uint32_t
160 mlx5_flow_hw_aux_get_age_idx(struct rte_flow_hw *flow, struct rte_flow_hw_aux *aux)
161 {
162 	if (unlikely(flow->operation_type == MLX5_FLOW_HW_FLOW_OP_TYPE_UPDATE))
163 		return aux->upd.age_idx;
164 	else
165 		return aux->orig.age_idx;
166 }
167 
168 static __rte_always_inline void
169 mlx5_flow_hw_aux_set_mtr_id(struct rte_flow_hw *flow,
170 			    struct rte_flow_hw_aux *aux,
171 			    uint32_t mtr_id)
172 {
173 	if (unlikely(flow->operation_type == MLX5_FLOW_HW_FLOW_OP_TYPE_UPDATE))
174 		aux->upd.mtr_id = mtr_id;
175 	else
176 		aux->orig.mtr_id = mtr_id;
177 }
178 
179 static __rte_always_inline uint32_t
180 mlx5_flow_hw_aux_get_mtr_id(struct rte_flow_hw *flow, struct rte_flow_hw_aux *aux)
181 {
182 	if (unlikely(flow->operation_type == MLX5_FLOW_HW_FLOW_OP_TYPE_UPDATE))
183 		return aux->upd.mtr_id;
184 	else
185 		return aux->orig.mtr_id;
186 }
187 
188 static __rte_always_inline struct mlx5_hw_q_job *
189 flow_hw_action_job_init(struct mlx5_priv *priv, uint32_t queue,
190 			const struct rte_flow_action_handle *handle,
191 			void *user_data, void *query_data,
192 			enum mlx5_hw_job_type type,
193 			enum mlx5_hw_indirect_type indirect_type,
194 			struct rte_flow_error *error);
195 static void
196 flow_hw_age_count_release(struct mlx5_priv *priv, uint32_t queue, struct rte_flow_hw *flow,
197 			  struct rte_flow_error *error);
198 
199 static int
200 mlx5_tbl_multi_pattern_process(struct rte_eth_dev *dev,
201 			       struct rte_flow_template_table *tbl,
202 			       struct mlx5_multi_pattern_segment *segment,
203 			       uint32_t bulk_size,
204 			       struct rte_flow_error *error);
205 static void
206 mlx5_destroy_multi_pattern_segment(struct mlx5_multi_pattern_segment *segment);
207 
208 static __rte_always_inline enum mlx5_indirect_list_type
209 flow_hw_inlist_type_get(const struct rte_flow_action *actions);
210 
211 static int
212 flow_hw_allocate_actions(struct rte_eth_dev *dev,
213 			 uint64_t action_flags,
214 			 struct rte_flow_error *error);
215 
216 bool
217 mlx5_hw_ctx_validate(const struct rte_eth_dev *dev, struct rte_flow_error *error)
218 {
219 	const struct mlx5_priv *priv = dev->data->dev_private;
220 
221 	if (!priv->dr_ctx) {
222 		rte_flow_error_set(error, EINVAL,
223 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
224 				   "non-template flow engine was not configured");
225 		return false;
226 	}
227 	return true;
228 }
229 
230 static int
231 flow_hw_allocate_actions(struct rte_eth_dev *dev,
232 			 uint64_t action_flags,
233 			 struct rte_flow_error *error);
234 
235 static __rte_always_inline int
236 mlx5_multi_pattern_reformat_to_index(enum mlx5dr_action_type type)
237 {
238 	switch (type) {
239 	case MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2:
240 		return 0;
241 	case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2:
242 		return 1;
243 	case MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
244 		return 2;
245 	case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3:
246 		return 3;
247 	default:
248 		break;
249 	}
250 	return -1;
251 }
252 
253 /* Include only supported reformat actions for BWC non template API. */
254 static __rte_always_inline int
255 mlx5_bwc_multi_pattern_reformat_to_index(enum mlx5dr_action_type type)
256 {
257 	switch (type) {
258 	case MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2:
259 	case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2:
260 	case MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
261 	case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3:
262 		return mlx5_multi_pattern_reformat_to_index(type);
263 	default:
264 		break;
265 	}
266 	return -1;
267 }
268 
269 static __rte_always_inline enum mlx5dr_action_type
270 mlx5_multi_pattern_reformat_index_to_type(uint32_t ix)
271 {
272 	switch (ix) {
273 	case 0:
274 		return MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2;
275 	case 1:
276 		return MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
277 	case 2:
278 		return MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2;
279 	case 3:
280 		return MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3;
281 	default:
282 		break;
283 	}
284 	return MLX5DR_ACTION_TYP_MAX;
285 }
286 
287 static inline enum mlx5dr_table_type
288 get_mlx5dr_table_type(const struct rte_flow_attr *attr)
289 {
290 	enum mlx5dr_table_type type;
291 
292 	if (attr->transfer)
293 		type = MLX5DR_TABLE_TYPE_FDB;
294 	else if (attr->egress)
295 		type = MLX5DR_TABLE_TYPE_NIC_TX;
296 	else
297 		type = MLX5DR_TABLE_TYPE_NIC_RX;
298 	return type;
299 }
300 
301 /* Non template default queue size used for inner ctrl queue. */
302 #define MLX5_NT_DEFAULT_QUEUE_SIZE 32
303 
304 struct mlx5_mirror_clone {
305 	enum rte_flow_action_type type;
306 	void *action_ctx;
307 };
308 
309 struct mlx5_mirror {
310 	struct mlx5_indirect_list indirect;
311 	uint32_t clones_num;
312 	struct mlx5dr_action *mirror_action;
313 	struct mlx5_mirror_clone clone[MLX5_MIRROR_MAX_CLONES_NUM];
314 };
315 
316 static int flow_hw_flush_all_ctrl_flows(struct rte_eth_dev *dev);
317 static int flow_hw_translate_group(struct rte_eth_dev *dev,
318 				   const struct mlx5_flow_template_table_cfg *cfg,
319 				   uint32_t group,
320 				   uint32_t *table_group,
321 				   struct rte_flow_error *error);
322 static __rte_always_inline int
323 flow_hw_set_vlan_vid_construct(struct rte_eth_dev *dev,
324 			       struct mlx5_modification_cmd *mhdr_cmd,
325 			       struct mlx5_action_construct_data *act_data,
326 			       const struct mlx5_hw_actions *hw_acts,
327 			       const struct rte_flow_action *action);
328 static void
329 flow_hw_construct_quota(struct mlx5_priv *priv,
330 			struct mlx5dr_rule_action *rule_act, uint32_t qid);
331 
332 static int
333 mlx5_flow_ct_init(struct rte_eth_dev *dev,
334 		  uint32_t nb_conn_tracks,
335 		  uint16_t nb_queue);
336 
337 static __rte_always_inline uint32_t flow_hw_tx_tag_regc_mask(struct rte_eth_dev *dev);
338 static __rte_always_inline uint32_t flow_hw_tx_tag_regc_value(struct rte_eth_dev *dev);
339 
340 static int flow_hw_async_create_validate(struct rte_eth_dev *dev,
341 					 const uint32_t queue,
342 					 const struct rte_flow_template_table *table,
343 					 enum rte_flow_table_insertion_type insertion_type,
344 					 const uint32_t rule_index,
345 					 const struct rte_flow_item items[],
346 					 const uint8_t pattern_template_index,
347 					 const struct rte_flow_action actions[],
348 					 const uint8_t action_template_index,
349 					 struct rte_flow_error *error);
350 static int flow_hw_async_update_validate(struct rte_eth_dev *dev,
351 					 const uint32_t queue,
352 					 const struct rte_flow_hw *flow,
353 					 const struct rte_flow_action actions[],
354 					 const uint8_t action_template_index,
355 					 struct rte_flow_error *error);
356 static int flow_hw_async_destroy_validate(struct rte_eth_dev *dev,
357 					  const uint32_t queue,
358 					  const struct rte_flow_hw *flow,
359 					  struct rte_flow_error *error);
360 
361 const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops;
362 
363 /* DR action flags with different table. */
364 static uint32_t mlx5_hw_act_flag[MLX5_HW_ACTION_FLAG_MAX]
365 				[MLX5DR_TABLE_TYPE_MAX] = {
366 	{
367 		MLX5DR_ACTION_FLAG_ROOT_RX,
368 		MLX5DR_ACTION_FLAG_ROOT_TX,
369 		MLX5DR_ACTION_FLAG_ROOT_FDB,
370 	},
371 	{
372 		MLX5DR_ACTION_FLAG_HWS_RX,
373 		MLX5DR_ACTION_FLAG_HWS_TX,
374 		MLX5DR_ACTION_FLAG_HWS_FDB,
375 	},
376 };
377 
378 /* Ethernet item spec for promiscuous mode. */
379 static const struct rte_flow_item_eth ctrl_rx_eth_promisc_spec = {
380 	.hdr.dst_addr.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
381 	.hdr.src_addr.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
382 	.hdr.ether_type = 0,
383 };
384 /* Ethernet item mask for promiscuous mode. */
385 static const struct rte_flow_item_eth ctrl_rx_eth_promisc_mask = {
386 	.hdr.dst_addr.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
387 	.hdr.src_addr.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
388 	.hdr.ether_type = 0,
389 };
390 
391 /* Ethernet item spec for all multicast mode. */
392 static const struct rte_flow_item_eth ctrl_rx_eth_mcast_spec = {
393 	.hdr.dst_addr.addr_bytes = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 },
394 	.hdr.src_addr.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
395 	.hdr.ether_type = 0,
396 };
397 /* Ethernet item mask for all multicast mode. */
398 static const struct rte_flow_item_eth ctrl_rx_eth_mcast_mask = {
399 	.hdr.dst_addr.addr_bytes = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 },
400 	.hdr.src_addr.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
401 	.hdr.ether_type = 0,
402 };
403 
404 /* Ethernet item spec for IPv4 multicast traffic. */
405 static const struct rte_flow_item_eth ctrl_rx_eth_ipv4_mcast_spec = {
406 	.hdr.dst_addr.addr_bytes = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 },
407 	.hdr.src_addr.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
408 	.hdr.ether_type = 0,
409 };
410 /* Ethernet item mask for IPv4 multicast traffic. */
411 static const struct rte_flow_item_eth ctrl_rx_eth_ipv4_mcast_mask = {
412 	.hdr.dst_addr.addr_bytes = { 0xff, 0xff, 0xff, 0x00, 0x00, 0x00 },
413 	.hdr.src_addr.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
414 	.hdr.ether_type = 0,
415 };
416 
417 /* Ethernet item spec for IPv6 multicast traffic. */
418 static const struct rte_flow_item_eth ctrl_rx_eth_ipv6_mcast_spec = {
419 	.hdr.dst_addr.addr_bytes = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 },
420 	.hdr.src_addr.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
421 	.hdr.ether_type = 0,
422 };
423 /* Ethernet item mask for IPv6 multicast traffic. */
424 static const struct rte_flow_item_eth ctrl_rx_eth_ipv6_mcast_mask = {
425 	.hdr.dst_addr.addr_bytes = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 },
426 	.hdr.src_addr.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
427 	.hdr.ether_type = 0,
428 };
429 
430 /* Ethernet item mask for unicast traffic. */
431 static const struct rte_flow_item_eth ctrl_rx_eth_dmac_mask = {
432 	.hdr.dst_addr.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
433 	.hdr.src_addr.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
434 	.hdr.ether_type = 0,
435 };
436 
437 /* Ethernet item spec for broadcast. */
438 static const struct rte_flow_item_eth ctrl_rx_eth_bcast_spec = {
439 	.hdr.dst_addr.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
440 	.hdr.src_addr.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
441 	.hdr.ether_type = 0,
442 };
443 
444 static inline uint32_t
445 flow_hw_q_pending(struct mlx5_priv *priv, uint32_t queue)
446 {
447 	struct mlx5_hw_q *q = &priv->hw_q[queue];
448 
449 	MLX5_ASSERT(q->size >= q->job_idx);
450 	return (q->size - q->job_idx) + q->ongoing_flow_ops;
451 }
452 
453 static inline void
454 flow_hw_q_inc_flow_ops(struct mlx5_priv *priv, uint32_t queue)
455 {
456 	struct mlx5_hw_q *q = &priv->hw_q[queue];
457 
458 	q->ongoing_flow_ops++;
459 }
460 
461 static inline void
462 flow_hw_q_dec_flow_ops(struct mlx5_priv *priv, uint32_t queue)
463 {
464 	struct mlx5_hw_q *q = &priv->hw_q[queue];
465 
466 	q->ongoing_flow_ops--;
467 }
468 
469 static inline enum mlx5dr_matcher_insert_mode
470 flow_hw_matcher_insert_mode_get(enum rte_flow_table_insertion_type insert_type)
471 {
472 	if (insert_type == RTE_FLOW_TABLE_INSERTION_TYPE_PATTERN)
473 		return MLX5DR_MATCHER_INSERT_BY_HASH;
474 	else
475 		return MLX5DR_MATCHER_INSERT_BY_INDEX;
476 }
477 
478 static inline enum mlx5dr_matcher_distribute_mode
479 flow_hw_matcher_distribute_mode_get(enum rte_flow_table_hash_func hash_func)
480 {
481 	if (hash_func == RTE_FLOW_TABLE_HASH_FUNC_LINEAR)
482 		return MLX5DR_MATCHER_DISTRIBUTE_BY_LINEAR;
483 	else
484 		return MLX5DR_MATCHER_DISTRIBUTE_BY_HASH;
485 }
486 
487 /**
488  * Set the hash fields according to the @p rss_desc information.
489  *
490  * @param[in] rss_desc
491  *   Pointer to the mlx5_flow_rss_desc.
492  * @param[out] hash_fields
493  *   Pointer to the RSS hash fields.
494  */
495 static void
496 flow_hw_hashfields_set(struct mlx5_flow_rss_desc *rss_desc,
497 		       uint64_t *hash_fields)
498 {
499 	uint64_t fields = 0;
500 	int rss_inner = 0;
501 	uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
502 
503 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
504 	if (rss_desc->level >= 2)
505 		rss_inner = 1;
506 #endif
507 	if (rss_types & MLX5_IPV4_LAYER_TYPES) {
508 		if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
509 			fields |= IBV_RX_HASH_SRC_IPV4;
510 		else if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
511 			fields |= IBV_RX_HASH_DST_IPV4;
512 		else
513 			fields |= MLX5_IPV4_IBV_RX_HASH;
514 	} else if (rss_types & MLX5_IPV6_LAYER_TYPES) {
515 		if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
516 			fields |= IBV_RX_HASH_SRC_IPV6;
517 		else if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
518 			fields |= IBV_RX_HASH_DST_IPV6;
519 		else
520 			fields |= MLX5_IPV6_IBV_RX_HASH;
521 	}
522 	if (rss_types & RTE_ETH_RSS_UDP) {
523 		if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
524 			fields |= IBV_RX_HASH_SRC_PORT_UDP;
525 		else if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
526 			fields |= IBV_RX_HASH_DST_PORT_UDP;
527 		else
528 			fields |= MLX5_UDP_IBV_RX_HASH;
529 	} else if (rss_types & RTE_ETH_RSS_TCP) {
530 		if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
531 			fields |= IBV_RX_HASH_SRC_PORT_TCP;
532 		else if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
533 			fields |= IBV_RX_HASH_DST_PORT_TCP;
534 		else
535 			fields |= MLX5_TCP_IBV_RX_HASH;
536 	}
537 	if (rss_types & RTE_ETH_RSS_ESP)
538 		fields |= IBV_RX_HASH_IPSEC_SPI;
539 	if (rss_inner)
540 		fields |= IBV_RX_HASH_INNER;
541 	*hash_fields |= fields;
542 }
543 
544 /**
545  * Generate the matching pattern item flags.
546  *
547  * @param[in] items
548  *   Pointer to the list of items.
549  *
550  * @return
551  *   Matching item flags. RSS hash field function
552  *   silently ignores the flags which are unsupported.
553  */
554 static uint64_t
555 flow_hw_matching_item_flags_get(const struct rte_flow_item items[])
556 {
557 	uint64_t item_flags = 0;
558 	uint64_t last_item = 0;
559 
560 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
561 		enum rte_flow_item_flex_tunnel_mode tunnel_mode = FLEX_TUNNEL_MODE_SINGLE;
562 		int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
563 		int item_type = items->type;
564 
565 		switch (item_type) {
566 		case RTE_FLOW_ITEM_TYPE_IPV4:
567 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
568 					     MLX5_FLOW_LAYER_OUTER_L3_IPV4;
569 			break;
570 		case RTE_FLOW_ITEM_TYPE_IPV6:
571 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
572 					     MLX5_FLOW_LAYER_OUTER_L3_IPV6;
573 			break;
574 		case RTE_FLOW_ITEM_TYPE_TCP:
575 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
576 					     MLX5_FLOW_LAYER_OUTER_L4_TCP;
577 			break;
578 		case RTE_FLOW_ITEM_TYPE_UDP:
579 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
580 					     MLX5_FLOW_LAYER_OUTER_L4_UDP;
581 			break;
582 		case RTE_FLOW_ITEM_TYPE_IPV6_ROUTING_EXT:
583 			last_item = tunnel ? MLX5_FLOW_ITEM_INNER_IPV6_ROUTING_EXT :
584 					     MLX5_FLOW_ITEM_OUTER_IPV6_ROUTING_EXT;
585 			break;
586 		case RTE_FLOW_ITEM_TYPE_GRE:
587 			last_item = MLX5_FLOW_LAYER_GRE;
588 			break;
589 		case RTE_FLOW_ITEM_TYPE_NVGRE:
590 			last_item = MLX5_FLOW_LAYER_GRE;
591 			break;
592 		case RTE_FLOW_ITEM_TYPE_VXLAN:
593 			last_item = MLX5_FLOW_LAYER_VXLAN;
594 			break;
595 		case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
596 			last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
597 			break;
598 		case RTE_FLOW_ITEM_TYPE_GENEVE:
599 			last_item = MLX5_FLOW_LAYER_GENEVE;
600 			break;
601 		case RTE_FLOW_ITEM_TYPE_MPLS:
602 			last_item = MLX5_FLOW_LAYER_MPLS;
603 			break;
604 		case RTE_FLOW_ITEM_TYPE_GTP:
605 			last_item = MLX5_FLOW_LAYER_GTP;
606 			break;
607 		case RTE_FLOW_ITEM_TYPE_COMPARE:
608 			last_item = MLX5_FLOW_ITEM_COMPARE;
609 			break;
610 		case RTE_FLOW_ITEM_TYPE_FLEX:
611 			mlx5_flex_get_tunnel_mode(items, &tunnel_mode);
612 			last_item = tunnel_mode == FLEX_TUNNEL_MODE_TUNNEL ?
613 					MLX5_FLOW_ITEM_FLEX_TUNNEL :
614 					tunnel ? MLX5_FLOW_ITEM_INNER_FLEX :
615 						MLX5_FLOW_ITEM_OUTER_FLEX;
616 			break;
617 		default:
618 			break;
619 		}
620 		item_flags |= last_item;
621 	}
622 	return item_flags;
623 }
624 
625 static uint64_t
626 flow_hw_action_flags_get(const struct rte_flow_action actions[],
627 			 const struct rte_flow_action **qrss,
628 			 const struct rte_flow_action **mark,
629 			 int *encap_idx,
630 			 int *act_cnt,
631 			 struct rte_flow_error *error)
632 {
633 	uint64_t action_flags = 0;
634 	const struct rte_flow_action *action;
635 	const struct rte_flow_action_raw_encap *raw_encap;
636 	int raw_decap_idx = -1;
637 	int action_idx;
638 
639 	*encap_idx = -1;
640 	action_idx = 0;
641 	for (action = actions; action->type != RTE_FLOW_ACTION_TYPE_END; action++) {
642 		int type = (int)action->type;
643 		switch (type) {
644 		case RTE_FLOW_ACTION_TYPE_INDIRECT:
645 			switch (MLX5_INDIRECT_ACTION_TYPE_GET(action->conf)) {
646 			case MLX5_INDIRECT_ACTION_TYPE_RSS:
647 				goto rss;
648 			case MLX5_INDIRECT_ACTION_TYPE_AGE:
649 				goto age;
650 			case MLX5_INDIRECT_ACTION_TYPE_COUNT:
651 				goto count;
652 			case MLX5_INDIRECT_ACTION_TYPE_CT:
653 				goto ct;
654 			case MLX5_INDIRECT_ACTION_TYPE_METER_MARK:
655 				goto meter;
656 			default:
657 				goto error;
658 			}
659 			break;
660 		case RTE_FLOW_ACTION_TYPE_DROP:
661 			action_flags |= MLX5_FLOW_ACTION_DROP;
662 			break;
663 		case RTE_FLOW_ACTION_TYPE_FLAG:
664 			action_flags |= MLX5_FLOW_ACTION_FLAG;
665 			break;
666 		case RTE_FLOW_ACTION_TYPE_MARK:
667 			action_flags |= MLX5_FLOW_ACTION_MARK;
668 			*mark = action;
669 			break;
670 		case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
671 			action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
672 			break;
673 		case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
674 			action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
675 			break;
676 		case RTE_FLOW_ACTION_TYPE_JUMP:
677 			action_flags |= MLX5_FLOW_ACTION_JUMP;
678 			break;
679 		case RTE_FLOW_ACTION_TYPE_QUEUE:
680 			action_flags |= MLX5_FLOW_ACTION_QUEUE;
681 			*qrss = action;
682 			break;
683 		case RTE_FLOW_ACTION_TYPE_RSS:
684 rss:
685 			action_flags |= MLX5_FLOW_ACTION_RSS;
686 			*qrss = action;
687 			break;
688 		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
689 		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
690 			action_flags |= MLX5_FLOW_ACTION_ENCAP;
691 			*encap_idx = action_idx;
692 			break;
693 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
694 			action_flags |= MLX5_FLOW_ACTION_ENCAP;
695 			raw_encap = action->conf;
696 			if (raw_encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
697 				*encap_idx = raw_decap_idx != -1 ?
698 					     raw_decap_idx : action_idx;
699 			break;
700 		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
701 		case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
702 			action_flags |= MLX5_FLOW_ACTION_DECAP;
703 			break;
704 		case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
705 			action_flags |= MLX5_FLOW_ACTION_DECAP;
706 			raw_decap_idx = action_idx;
707 			break;
708 		case RTE_FLOW_ACTION_TYPE_SEND_TO_KERNEL:
709 			action_flags |= MLX5_FLOW_ACTION_SEND_TO_KERNEL;
710 			break;
711 		case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
712 			action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
713 			break;
714 		case RTE_FLOW_ACTION_TYPE_PORT_ID:
715 		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
716 			action_flags |= MLX5_FLOW_ACTION_PORT_ID;
717 			break;
718 		case RTE_FLOW_ACTION_TYPE_AGE:
719 age:
720 			action_flags |= MLX5_FLOW_ACTION_AGE;
721 			break;
722 		case RTE_FLOW_ACTION_TYPE_COUNT:
723 count:
724 			action_flags |= MLX5_FLOW_ACTION_COUNT;
725 			break;
726 		case RTE_FLOW_ACTION_TYPE_CONNTRACK:
727 ct:
728 			action_flags |= MLX5_FLOW_ACTION_CT;
729 			break;
730 		case RTE_FLOW_ACTION_TYPE_METER_MARK:
731 meter:
732 			action_flags |= MLX5_FLOW_ACTION_METER;
733 			break;
734 		case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
735 			action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
736 			break;
737 		case RTE_FLOW_ACTION_TYPE_JUMP_TO_TABLE_INDEX:
738 			action_flags |= MLX5_FLOW_ACTION_JUMP_TO_TABLE_INDEX;
739 			break;
740 		case RTE_FLOW_ACTION_TYPE_VOID:
741 		case RTE_FLOW_ACTION_TYPE_END:
742 			break;
743 		default:
744 			goto error;
745 		}
746 		action_idx++;
747 	}
748 	if (*encap_idx == -1)
749 		*encap_idx = action_idx;
750 	action_idx++; /* The END action. */
751 	*act_cnt = action_idx;
752 	return action_flags;
753 error:
754 	rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
755 			   action, "invalid flow action");
756 	return 0;
757 }
758 
759 /**
760  * Register destination table DR jump action.
761  *
762  * @param[in] dev
763  *   Pointer to the rte_eth_dev structure.
764  * @param[in] table_attr
765  *   Pointer to the flow attributes.
766  * @param[in] dest_group
767  *   The destination group ID.
768  * @param[out] error
769  *   Pointer to error structure.
770  *
771  * @return
772  *    Table on success, NULL otherwise and rte_errno is set.
773  */
774 static struct mlx5_hw_jump_action *
775 flow_hw_jump_action_register(struct rte_eth_dev *dev,
776 			     const struct mlx5_flow_template_table_cfg *cfg,
777 			     uint32_t dest_group,
778 			     struct rte_flow_error *error)
779 {
780 	struct mlx5_priv *priv = dev->data->dev_private;
781 	struct rte_flow_attr jattr = cfg->attr.flow_attr;
782 	struct mlx5_flow_group *grp;
783 	struct mlx5_flow_cb_ctx ctx = {
784 		.dev = dev,
785 		.error = error,
786 		.data = &jattr,
787 	};
788 	struct mlx5_list_entry *ge;
789 	uint32_t target_group;
790 
791 	target_group = dest_group;
792 	if (flow_hw_translate_group(dev, cfg, dest_group, &target_group, error))
793 		return NULL;
794 	jattr.group = target_group;
795 	ge = mlx5_hlist_register(priv->sh->flow_tbls, target_group, &ctx);
796 	if (!ge)
797 		return NULL;
798 	grp = container_of(ge, struct mlx5_flow_group, entry);
799 	return &grp->jump;
800 }
801 
802 /**
803  * Release jump action.
804  *
805  * @param[in] dev
806  *   Pointer to the rte_eth_dev structure.
807  * @param[in] jump
808  *   Pointer to the jump action.
809  */
810 
811 static void
812 flow_hw_jump_release(struct rte_eth_dev *dev, struct mlx5_hw_jump_action *jump)
813 {
814 	struct mlx5_priv *priv = dev->data->dev_private;
815 	struct mlx5_flow_group *grp;
816 
817 	grp = container_of(jump, struct mlx5_flow_group, jump);
818 	mlx5_hlist_unregister(priv->sh->flow_tbls, &grp->entry);
819 }
820 
821 /**
822  * Register queue/RSS action.
823  *
824  * @param[in] dev
825  *   Pointer to the rte_eth_dev structure.
826  * @param[in] hws_flags
827  *   DR action flags.
828  * @param[in] action
829  *   rte flow action.
830  * @param[in] item_flags
831  *   Item flags for non template rule.
832  * @param[in] is_template
833  *   True if it is a template rule.
834  *
835  * @return
836  *    Table on success, NULL otherwise and rte_errno is set.
837  */
838 static inline struct mlx5_hrxq*
839 flow_hw_tir_action_register(struct rte_eth_dev *dev,
840 			    uint32_t hws_flags,
841 			    const struct rte_flow_action *action)
842 {
843 	struct mlx5_flow_rss_desc rss_desc = {
844 		.hws_flags = hws_flags,
845 	};
846 	struct mlx5_hrxq *hrxq;
847 
848 	if (action->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
849 		const struct rte_flow_action_queue *queue = action->conf;
850 
851 		rss_desc.const_q = &queue->index;
852 		rss_desc.queue_num = 1;
853 	} else {
854 		const struct rte_flow_action_rss *rss = action->conf;
855 
856 		rss_desc.queue_num = rss->queue_num;
857 		rss_desc.const_q = rss->queue;
858 		memcpy(rss_desc.key,
859 		       !rss->key ? rss_hash_default_key : rss->key,
860 		       MLX5_RSS_HASH_KEY_LEN);
861 		rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
862 		rss_desc.types = !rss->types ? RTE_ETH_RSS_IP : rss->types;
863 		rss_desc.symmetric_hash_function = MLX5_RSS_IS_SYMM(rss->func);
864 		flow_hw_hashfields_set(&rss_desc, &rss_desc.hash_fields);
865 		flow_dv_action_rss_l34_hash_adjust(rss->types,
866 						   &rss_desc.hash_fields);
867 		if (rss->level > 1) {
868 			rss_desc.hash_fields |= IBV_RX_HASH_INNER;
869 			rss_desc.tunnel = 1;
870 		}
871 	}
872 	hrxq = mlx5_hrxq_get(dev, &rss_desc);
873 	return hrxq;
874 }
875 
876 static __rte_always_inline int
877 flow_hw_ct_compile(struct rte_eth_dev *dev,
878 		   uint32_t queue, uint32_t idx,
879 		   struct mlx5dr_rule_action *rule_act)
880 {
881 	struct mlx5_priv *priv = dev->data->dev_private;
882 	struct mlx5_aso_ct_action *ct;
883 
884 	ct = mlx5_ipool_get(priv->hws_ctpool->cts, idx);
885 	if (!ct || (!priv->shared_host && mlx5_aso_ct_available(priv->sh, queue, ct)))
886 		return -1;
887 	rule_act->action = priv->hws_ctpool->dr_action;
888 	rule_act->aso_ct.offset = ct->offset;
889 	rule_act->aso_ct.direction = ct->is_original ?
890 		MLX5DR_ACTION_ASO_CT_DIRECTION_INITIATOR :
891 		MLX5DR_ACTION_ASO_CT_DIRECTION_RESPONDER;
892 	return 0;
893 }
894 
895 static void
896 flow_hw_template_destroy_reformat_action(struct mlx5_hw_encap_decap_action *encap_decap)
897 {
898 	if (encap_decap->action && !encap_decap->multi_pattern)
899 		mlx5dr_action_destroy(encap_decap->action);
900 }
901 
902 static void
903 flow_hw_template_destroy_mhdr_action(struct mlx5_hw_modify_header_action *mhdr)
904 {
905 	if (mhdr->action && !mhdr->multi_pattern)
906 		mlx5dr_action_destroy(mhdr->action);
907 }
908 
909 /**
910  * Destroy DR actions created by action template.
911  *
912  * For DR actions created during table creation's action translate.
913  * Need to destroy the DR action when destroying the table.
914  *
915  * @param[in] dev
916  *   Pointer to the rte_eth_dev structure.
917  * @param[in] acts
918  *   Pointer to the template HW steering DR actions.
919  */
920 static void
921 __flow_hw_actions_release(struct rte_eth_dev *dev, struct mlx5_hw_actions *acts)
922 {
923 	struct mlx5_priv *priv = dev->data->dev_private;
924 
925 	if (acts->mark)
926 		if (!(rte_atomic_fetch_sub_explicit(&priv->hws_mark_refcnt, 1,
927 				rte_memory_order_relaxed) - 1))
928 			flow_hw_rxq_flag_set(dev, false);
929 
930 	if (acts->jump) {
931 		struct mlx5_flow_group *grp;
932 
933 		grp = container_of
934 			(acts->jump, struct mlx5_flow_group, jump);
935 		mlx5_hlist_unregister(priv->sh->flow_tbls, &grp->entry);
936 		acts->jump = NULL;
937 	}
938 	if (acts->tir) {
939 		mlx5_hrxq_release(dev, acts->tir->idx);
940 		acts->tir = NULL;
941 	}
942 	if (acts->encap_decap) {
943 		flow_hw_template_destroy_reformat_action(acts->encap_decap);
944 		mlx5_free(acts->encap_decap);
945 		acts->encap_decap = NULL;
946 	}
947 	if (acts->push_remove) {
948 		if (acts->push_remove->action)
949 			mlx5dr_action_destroy(acts->push_remove->action);
950 		mlx5_free(acts->push_remove);
951 		acts->push_remove = NULL;
952 	}
953 	if (acts->mhdr) {
954 		flow_hw_template_destroy_mhdr_action(acts->mhdr);
955 		mlx5_free(acts->mhdr);
956 		acts->mhdr = NULL;
957 	}
958 	if (mlx5_hws_cnt_id_valid(acts->cnt_id)) {
959 		mlx5_hws_cnt_shared_put(priv->hws_cpool, &acts->cnt_id);
960 		acts->cnt_id = 0;
961 	}
962 	if (acts->mtr_id) {
963 		mlx5_ipool_free(priv->hws_mpool->idx_pool, acts->mtr_id);
964 		acts->mtr_id = 0;
965 	}
966 }
967 
968 /**
969  * Release the action data back into the pool without destroy any action.
970  *
971  * @param[in] dev
972  *   Pointer to the rte_eth_dev structure.
973  * @param[in] acts
974  *   Pointer to the template HW steering DR actions.
975  */
976 static inline void
977 __flow_hw_act_data_flush(struct rte_eth_dev *dev, struct mlx5_hw_actions *acts)
978 {
979 	struct mlx5_priv *priv = dev->data->dev_private;
980 	struct mlx5_action_construct_data *data;
981 
982 	while (!LIST_EMPTY(&acts->act_list)) {
983 		data = LIST_FIRST(&acts->act_list);
984 		LIST_REMOVE(data, next);
985 		mlx5_ipool_free(priv->acts_ipool, data->idx);
986 	}
987 }
988 
989 /*
990  * Destroy DR actions created by action template.
991  *
992  * For DR actions created during table creation's action translate.
993  * Need to destroy the DR action when destroying the table.
994  *
995  * @param[in] dev
996  *   Pointer to the rte_eth_dev structure.
997  * @param[in] acts
998  *   Pointer to the template HW steering DR actions.
999  */
1000 static void
1001 __flow_hw_action_template_destroy(struct rte_eth_dev *dev, struct mlx5_hw_actions *acts)
1002 {
1003 	__flow_hw_act_data_flush(dev, acts);
1004 	__flow_hw_actions_release(dev, acts);
1005 }
1006 
1007 /**
1008  * Append dynamic action to the dynamic action list.
1009  *
1010  * @param[in] priv
1011  *   Pointer to the port private data structure.
1012  * @param[in] acts
1013  *   Pointer to the template HW steering DR actions.
1014  * @param[in] type
1015  *   Action type.
1016  * @param[in] action_src
1017  *   Offset of source rte flow action.
1018  * @param[in] action_dst
1019  *   Offset of destination DR action.
1020  *
1021  * @return
1022  *    0 on success, negative value otherwise and rte_errno is set.
1023  */
1024 static __rte_always_inline struct mlx5_action_construct_data *
1025 __flow_hw_act_data_alloc(struct mlx5_priv *priv,
1026 			 enum rte_flow_action_type type,
1027 			 uint16_t action_src,
1028 			 uint16_t action_dst)
1029 {
1030 	struct mlx5_action_construct_data *act_data;
1031 	uint32_t idx = 0;
1032 
1033 	act_data = mlx5_ipool_zmalloc(priv->acts_ipool, &idx);
1034 	if (!act_data)
1035 		return NULL;
1036 	act_data->idx = idx;
1037 	act_data->type = type;
1038 	act_data->action_src = action_src;
1039 	act_data->action_dst = action_dst;
1040 	return act_data;
1041 }
1042 
1043 /**
1044  * Append dynamic action to the dynamic action list.
1045  *
1046  * @param[in] priv
1047  *   Pointer to the port private data structure.
1048  * @param[in] acts
1049  *   Pointer to the template HW steering DR actions.
1050  * @param[in] type
1051  *   Action type.
1052  * @param[in] action_src
1053  *   Offset of source rte flow action.
1054  * @param[in] action_dst
1055  *   Offset of destination DR action.
1056  *
1057  * @return
1058  *    0 on success, negative value otherwise and rte_errno is set.
1059  */
1060 static __rte_always_inline int
1061 __flow_hw_act_data_general_append(struct mlx5_priv *priv,
1062 				  struct mlx5_hw_actions *acts,
1063 				  enum rte_flow_action_type type,
1064 				  uint16_t action_src,
1065 				  uint16_t action_dst)
1066 {
1067 	struct mlx5_action_construct_data *act_data;
1068 
1069 	act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
1070 	if (!act_data)
1071 		return -1;
1072 	LIST_INSERT_HEAD(&acts->act_list, act_data, next);
1073 	return 0;
1074 }
1075 
1076 static __rte_always_inline int
1077 __flow_hw_act_data_indirect_append(struct mlx5_priv *priv,
1078 				   struct mlx5_hw_actions *acts,
1079 				   enum rte_flow_action_type type,
1080 				   enum rte_flow_action_type mask_type,
1081 				   uint16_t action_src,
1082 				   uint16_t action_dst)
1083 {
1084 	struct mlx5_action_construct_data *act_data;
1085 
1086 	act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
1087 	if (!act_data)
1088 		return -1;
1089 	act_data->indirect.expected_type = mask_type;
1090 	LIST_INSERT_HEAD(&acts->act_list, act_data, next);
1091 	return 0;
1092 }
1093 
1094 static __rte_always_inline int
1095 flow_hw_act_data_indirect_list_append(struct mlx5_priv *priv,
1096 				      struct mlx5_hw_actions *acts,
1097 				      enum rte_flow_action_type type,
1098 				      uint16_t action_src, uint16_t action_dst,
1099 				      indirect_list_callback_t cb)
1100 {
1101 	struct mlx5_action_construct_data *act_data;
1102 
1103 	act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
1104 	if (!act_data)
1105 		return -1;
1106 	act_data->indirect_list_cb = cb;
1107 	LIST_INSERT_HEAD(&acts->act_list, act_data, next);
1108 	return 0;
1109 }
1110 /**
1111  * Append dynamic encap action to the dynamic action list.
1112  *
1113  * @param[in] priv
1114  *   Pointer to the port private data structure.
1115  * @param[in] acts
1116  *   Pointer to the template HW steering DR actions.
1117  * @param[in] type
1118  *   Action type.
1119  * @param[in] action_src
1120  *   Offset of source rte flow action.
1121  * @param[in] action_dst
1122  *   Offset of destination DR action.
1123  * @param[in] len
1124  *   Length of the data to be updated.
1125  *
1126  * @return
1127  *    0 on success, negative value otherwise and rte_errno is set.
1128  */
1129 static __rte_always_inline int
1130 __flow_hw_act_data_encap_append(struct mlx5_priv *priv,
1131 				struct mlx5_hw_actions *acts,
1132 				enum rte_flow_action_type type,
1133 				uint16_t action_src,
1134 				uint16_t action_dst,
1135 				uint16_t len)
1136 {
1137 	struct mlx5_action_construct_data *act_data;
1138 
1139 	act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
1140 	if (!act_data)
1141 		return -1;
1142 	act_data->encap.len = len;
1143 	LIST_INSERT_HEAD(&acts->act_list, act_data, next);
1144 	return 0;
1145 }
1146 
1147 /**
1148  * Append dynamic push action to the dynamic action list.
1149  *
1150  * @param[in] dev
1151  *   Pointer to the port.
1152  * @param[in] acts
1153  *   Pointer to the template HW steering DR actions.
1154  * @param[in] type
1155  *   Action type.
1156  * @param[in] action_src
1157  *   Offset of source rte flow action.
1158  * @param[in] action_dst
1159  *   Offset of destination DR action.
1160  * @param[in] len
1161  *   Length of the data to be updated.
1162  *
1163  * @return
1164  *    Data pointer on success, NULL otherwise and rte_errno is set.
1165  */
1166 static __rte_always_inline void *
1167 __flow_hw_act_data_push_append(struct rte_eth_dev *dev,
1168 			       struct mlx5_hw_actions *acts,
1169 			       enum rte_flow_action_type type,
1170 			       uint16_t action_src,
1171 			       uint16_t action_dst,
1172 			       uint16_t len)
1173 {
1174 	struct mlx5_action_construct_data *act_data;
1175 	struct mlx5_priv *priv = dev->data->dev_private;
1176 
1177 	act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
1178 	if (!act_data)
1179 		return NULL;
1180 	act_data->ipv6_ext.len = len;
1181 	LIST_INSERT_HEAD(&acts->act_list, act_data, next);
1182 	return act_data;
1183 }
1184 
1185 static __rte_always_inline int
1186 __flow_hw_act_data_hdr_modify_append(struct mlx5_priv *priv,
1187 				     struct mlx5_hw_actions *acts,
1188 				     enum rte_flow_action_type type,
1189 				     uint16_t action_src,
1190 				     uint16_t action_dst,
1191 				     const struct rte_flow_action_modify_field *mf,
1192 				     uint16_t mhdr_cmds_off,
1193 				     uint16_t mhdr_cmds_end,
1194 				     bool shared,
1195 				     struct field_modify_info *field,
1196 				     struct field_modify_info *dcopy,
1197 				     uint32_t *mask)
1198 {
1199 	struct mlx5_action_construct_data *act_data;
1200 
1201 	act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
1202 	if (!act_data)
1203 		return -1;
1204 	act_data->modify_header.action = *mf;
1205 	act_data->modify_header.mhdr_cmds_off = mhdr_cmds_off;
1206 	act_data->modify_header.mhdr_cmds_end = mhdr_cmds_end;
1207 	act_data->modify_header.shared = shared;
1208 	rte_memcpy(act_data->modify_header.field, field,
1209 		   sizeof(*field) * MLX5_ACT_MAX_MOD_FIELDS);
1210 	rte_memcpy(act_data->modify_header.dcopy, dcopy,
1211 		   sizeof(*dcopy) * MLX5_ACT_MAX_MOD_FIELDS);
1212 	rte_memcpy(act_data->modify_header.mask, mask,
1213 		   sizeof(*mask) * MLX5_ACT_MAX_MOD_FIELDS);
1214 	LIST_INSERT_HEAD(&acts->act_list, act_data, next);
1215 	return 0;
1216 }
1217 
1218 /**
1219  * Append shared RSS action to the dynamic action list.
1220  *
1221  * @param[in] priv
1222  *   Pointer to the port private data structure.
1223  * @param[in] acts
1224  *   Pointer to the template HW steering DR actions.
1225  * @param[in] type
1226  *   Action type.
1227  * @param[in] action_src
1228  *   Offset of source rte flow action.
1229  * @param[in] action_dst
1230  *   Offset of destination DR action.
1231  * @param[in] idx
1232  *   Shared RSS index.
1233  * @param[in] rss
1234  *   Pointer to the shared RSS info.
1235  *
1236  * @return
1237  *    0 on success, negative value otherwise and rte_errno is set.
1238  */
1239 static __rte_always_inline int
1240 __flow_hw_act_data_shared_rss_append(struct mlx5_priv *priv,
1241 				     struct mlx5_hw_actions *acts,
1242 				     enum rte_flow_action_type type,
1243 				     uint16_t action_src,
1244 				     uint16_t action_dst,
1245 				     uint32_t idx,
1246 				     struct mlx5_shared_action_rss *rss)
1247 {
1248 	struct mlx5_action_construct_data *act_data;
1249 
1250 	act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
1251 	if (!act_data)
1252 		return -1;
1253 	act_data->shared_rss.level = rss->origin.level;
1254 	act_data->shared_rss.types = !rss->origin.types ? RTE_ETH_RSS_IP :
1255 				     rss->origin.types;
1256 	act_data->shared_rss.idx = idx;
1257 	act_data->shared_rss.symmetric_hash_function =
1258 		MLX5_RSS_IS_SYMM(rss->origin.func);
1259 	LIST_INSERT_HEAD(&acts->act_list, act_data, next);
1260 	return 0;
1261 }
1262 
1263 /**
1264  * Append shared counter action to the dynamic action list.
1265  *
1266  * @param[in] priv
1267  *   Pointer to the port private data structure.
1268  * @param[in] acts
1269  *   Pointer to the template HW steering DR actions.
1270  * @param[in] type
1271  *   Action type.
1272  * @param[in] action_src
1273  *   Offset of source rte flow action.
1274  * @param[in] action_dst
1275  *   Offset of destination DR action.
1276  * @param[in] cnt_id
1277  *   Shared counter id.
1278  *
1279  * @return
1280  *    0 on success, negative value otherwise and rte_errno is set.
1281  */
1282 static __rte_always_inline int
1283 __flow_hw_act_data_shared_cnt_append(struct mlx5_priv *priv,
1284 				     struct mlx5_hw_actions *acts,
1285 				     enum rte_flow_action_type type,
1286 				     uint16_t action_src,
1287 				     uint16_t action_dst,
1288 				     cnt_id_t cnt_id)
1289 {
1290 	struct mlx5_action_construct_data *act_data;
1291 
1292 	act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
1293 	if (!act_data)
1294 		return -1;
1295 	act_data->type = type;
1296 	act_data->shared_counter.id = cnt_id;
1297 	LIST_INSERT_HEAD(&acts->act_list, act_data, next);
1298 	return 0;
1299 }
1300 
1301 /**
1302  * Append shared meter_mark action to the dynamic action list.
1303  *
1304  * @param[in] priv
1305  *   Pointer to the port private data structure.
1306  * @param[in] acts
1307  *   Pointer to the template HW steering DR actions.
1308  * @param[in] type
1309  *   Action type.
1310  * @param[in] action_src
1311  *   Offset of source rte flow action.
1312  * @param[in] action_dst
1313  *   Offset of destination DR action.
1314  * @param[in] mtr_id
1315  *   Shared meter id.
1316  *
1317  * @return
1318  *    0 on success, negative value otherwise and rte_errno is set.
1319  */
1320 static __rte_always_inline int
1321 __flow_hw_act_data_shared_mtr_append(struct mlx5_priv *priv,
1322 				     struct mlx5_hw_actions *acts,
1323 				     enum rte_flow_action_type type,
1324 				     uint16_t action_src,
1325 				     uint16_t action_dst,
1326 				     cnt_id_t mtr_id)
1327 {	struct mlx5_action_construct_data *act_data;
1328 
1329 	act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
1330 	if (!act_data)
1331 		return -1;
1332 	act_data->type = type;
1333 	act_data->shared_meter.id = mtr_id;
1334 	LIST_INSERT_HEAD(&acts->act_list, act_data, next);
1335 	return 0;
1336 }
1337 
1338 /**
1339  * Translate shared indirect action.
1340  *
1341  * @param[in] dev
1342  *   Pointer to the rte_eth_dev data structure.
1343  * @param[in] action
1344  *   Pointer to the shared indirect rte_flow action.
1345  * @param[in] acts
1346  *   Pointer to the template HW steering DR actions.
1347  * @param[in] action_src
1348  *   Offset of source rte flow action.
1349  * @param[in] action_dst
1350  *   Offset of destination DR action.
1351  *
1352  * @return
1353  *    0 on success, negative value otherwise and rte_errno is set.
1354  */
1355 static __rte_always_inline int
1356 flow_hw_shared_action_translate(struct rte_eth_dev *dev,
1357 				const struct rte_flow_action *action,
1358 				struct mlx5_hw_actions *acts,
1359 				uint16_t action_src,
1360 				uint16_t action_dst)
1361 {
1362 	struct mlx5_priv *priv = dev->data->dev_private;
1363 	struct mlx5_shared_action_rss *shared_rss;
1364 	uint32_t act_idx = (uint32_t)(uintptr_t)action->conf;
1365 	uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
1366 	uint32_t idx = act_idx &
1367 		       ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
1368 
1369 	switch (type) {
1370 	case MLX5_INDIRECT_ACTION_TYPE_RSS:
1371 		shared_rss = mlx5_ipool_get
1372 		  (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
1373 		if (!shared_rss || __flow_hw_act_data_shared_rss_append
1374 		    (priv, acts,
1375 		    (enum rte_flow_action_type)MLX5_RTE_FLOW_ACTION_TYPE_RSS,
1376 		    action_src, action_dst, idx, shared_rss)) {
1377 			DRV_LOG(WARNING, "Indirect RSS action index %d translate failed", act_idx);
1378 			return -1;
1379 		}
1380 		break;
1381 	case MLX5_INDIRECT_ACTION_TYPE_COUNT:
1382 		if (__flow_hw_act_data_shared_cnt_append(priv, acts,
1383 			(enum rte_flow_action_type)
1384 			MLX5_RTE_FLOW_ACTION_TYPE_COUNT,
1385 			action_src, action_dst, act_idx)) {
1386 			DRV_LOG(WARNING, "Indirect count action translate failed");
1387 			return -1;
1388 		}
1389 		break;
1390 	case MLX5_INDIRECT_ACTION_TYPE_AGE:
1391 		/* Not supported, prevent by validate function. */
1392 		MLX5_ASSERT(0);
1393 		break;
1394 	case MLX5_INDIRECT_ACTION_TYPE_CT:
1395 		if (flow_hw_ct_compile(dev, MLX5_HW_INV_QUEUE,
1396 				       idx, &acts->rule_acts[action_dst])) {
1397 			DRV_LOG(WARNING, "Indirect CT action translate failed");
1398 			return -1;
1399 		}
1400 		break;
1401 	case MLX5_INDIRECT_ACTION_TYPE_METER_MARK:
1402 		if (__flow_hw_act_data_shared_mtr_append(priv, acts,
1403 			(enum rte_flow_action_type)
1404 			MLX5_RTE_FLOW_ACTION_TYPE_METER_MARK,
1405 			action_src, action_dst, idx)) {
1406 			DRV_LOG(WARNING, "Indirect meter mark action translate failed");
1407 			return -1;
1408 		}
1409 		break;
1410 	case MLX5_INDIRECT_ACTION_TYPE_QUOTA:
1411 		flow_hw_construct_quota(priv, &acts->rule_acts[action_dst], idx);
1412 		break;
1413 	default:
1414 		DRV_LOG(WARNING, "Unsupported shared action type:%d", type);
1415 		break;
1416 	}
1417 	return 0;
1418 }
1419 
1420 static __rte_always_inline bool
1421 flow_hw_action_modify_field_is_shared(const struct rte_flow_action *action,
1422 				      const struct rte_flow_action *mask)
1423 {
1424 	const struct rte_flow_action_modify_field *v = action->conf;
1425 	const struct rte_flow_action_modify_field *m = mask->conf;
1426 
1427 	if (v->src.field == RTE_FLOW_FIELD_VALUE) {
1428 		uint32_t j;
1429 
1430 		for (j = 0; j < RTE_DIM(m->src.value); ++j) {
1431 			/*
1432 			 * Immediate value is considered to be masked
1433 			 * (and thus shared by all flow rules), if mask
1434 			 * is non-zero. Partial mask over immediate value
1435 			 * is not allowed.
1436 			 */
1437 			if (m->src.value[j])
1438 				return true;
1439 		}
1440 		return false;
1441 	}
1442 	if (v->src.field == RTE_FLOW_FIELD_POINTER)
1443 		return m->src.pvalue != NULL;
1444 	/*
1445 	 * Source field types other than VALUE and
1446 	 * POINTER are always shared.
1447 	 */
1448 	return true;
1449 }
1450 
1451 static __rte_always_inline bool
1452 flow_hw_should_insert_nop(const struct mlx5_hw_modify_header_action *mhdr,
1453 			  const struct mlx5_modification_cmd *cmd)
1454 {
1455 	struct mlx5_modification_cmd last_cmd = { { 0 } };
1456 	struct mlx5_modification_cmd new_cmd = { { 0 } };
1457 	const uint32_t cmds_num = mhdr->mhdr_cmds_num;
1458 	unsigned int last_type;
1459 	bool should_insert = false;
1460 
1461 	if (cmds_num == 0)
1462 		return false;
1463 	last_cmd = *(&mhdr->mhdr_cmds[cmds_num - 1]);
1464 	last_cmd.data0 = rte_be_to_cpu_32(last_cmd.data0);
1465 	last_cmd.data1 = rte_be_to_cpu_32(last_cmd.data1);
1466 	last_type = last_cmd.action_type;
1467 	new_cmd = *cmd;
1468 	new_cmd.data0 = rte_be_to_cpu_32(new_cmd.data0);
1469 	new_cmd.data1 = rte_be_to_cpu_32(new_cmd.data1);
1470 	switch (new_cmd.action_type) {
1471 	case MLX5_MODIFICATION_TYPE_SET:
1472 	case MLX5_MODIFICATION_TYPE_ADD:
1473 		if (last_type == MLX5_MODIFICATION_TYPE_SET ||
1474 		    last_type == MLX5_MODIFICATION_TYPE_ADD)
1475 			should_insert = new_cmd.field == last_cmd.field;
1476 		else if (last_type == MLX5_MODIFICATION_TYPE_COPY ||
1477 			 last_type == MLX5_MODIFICATION_TYPE_ADD_FIELD)
1478 			should_insert = new_cmd.field == last_cmd.dst_field;
1479 		else if (last_type == MLX5_MODIFICATION_TYPE_NOP)
1480 			should_insert = false;
1481 		else
1482 			MLX5_ASSERT(false); /* Other types are not supported. */
1483 		break;
1484 	case MLX5_MODIFICATION_TYPE_COPY:
1485 	case MLX5_MODIFICATION_TYPE_ADD_FIELD:
1486 		if (last_type == MLX5_MODIFICATION_TYPE_SET ||
1487 		    last_type == MLX5_MODIFICATION_TYPE_ADD)
1488 			should_insert = (new_cmd.field == last_cmd.field ||
1489 					 new_cmd.dst_field == last_cmd.field);
1490 		else if (last_type == MLX5_MODIFICATION_TYPE_COPY ||
1491 			 last_type == MLX5_MODIFICATION_TYPE_ADD_FIELD)
1492 			should_insert = (new_cmd.field == last_cmd.dst_field ||
1493 					 new_cmd.dst_field == last_cmd.dst_field);
1494 		else if (last_type == MLX5_MODIFICATION_TYPE_NOP)
1495 			should_insert = false;
1496 		else
1497 			MLX5_ASSERT(false); /* Other types are not supported. */
1498 		break;
1499 	default:
1500 		/* Other action types should be rejected on AT validation. */
1501 		MLX5_ASSERT(false);
1502 		break;
1503 	}
1504 	return should_insert;
1505 }
1506 
1507 static __rte_always_inline int
1508 flow_hw_mhdr_cmd_nop_append(struct mlx5_hw_modify_header_action *mhdr)
1509 {
1510 	struct mlx5_modification_cmd *nop;
1511 	uint32_t num = mhdr->mhdr_cmds_num;
1512 
1513 	if (num + 1 >= MLX5_MHDR_MAX_CMD)
1514 		return -ENOMEM;
1515 	nop = mhdr->mhdr_cmds + num;
1516 	nop->data0 = 0;
1517 	nop->action_type = MLX5_MODIFICATION_TYPE_NOP;
1518 	nop->data0 = rte_cpu_to_be_32(nop->data0);
1519 	nop->data1 = 0;
1520 	mhdr->mhdr_cmds_num = num + 1;
1521 	return 0;
1522 }
1523 
1524 static __rte_always_inline int
1525 flow_hw_mhdr_cmd_append(struct mlx5_hw_modify_header_action *mhdr,
1526 			struct mlx5_modification_cmd *cmd)
1527 {
1528 	uint32_t num = mhdr->mhdr_cmds_num;
1529 
1530 	if (num + 1 >= MLX5_MHDR_MAX_CMD)
1531 		return -ENOMEM;
1532 	mhdr->mhdr_cmds[num] = *cmd;
1533 	mhdr->mhdr_cmds_num = num + 1;
1534 	return 0;
1535 }
1536 
1537 static __rte_always_inline int
1538 flow_hw_converted_mhdr_cmds_append(struct mlx5_hw_modify_header_action *mhdr,
1539 				   struct mlx5_flow_dv_modify_hdr_resource *resource)
1540 {
1541 	uint32_t idx;
1542 	int ret;
1543 
1544 	for (idx = 0; idx < resource->actions_num; ++idx) {
1545 		struct mlx5_modification_cmd *src = &resource->actions[idx];
1546 
1547 		if (flow_hw_should_insert_nop(mhdr, src)) {
1548 			ret = flow_hw_mhdr_cmd_nop_append(mhdr);
1549 			if (ret)
1550 				return ret;
1551 		}
1552 		ret = flow_hw_mhdr_cmd_append(mhdr, src);
1553 		if (ret)
1554 			return ret;
1555 	}
1556 	return 0;
1557 }
1558 
1559 static __rte_always_inline void
1560 flow_hw_modify_field_init(struct mlx5_hw_modify_header_action *mhdr,
1561 			  struct rte_flow_actions_template *at)
1562 {
1563 	memset(mhdr, 0, sizeof(*mhdr));
1564 	/* Modify header action without any commands is shared by default. */
1565 	mhdr->shared = true;
1566 	mhdr->pos = at->mhdr_off;
1567 }
1568 
1569 static __rte_always_inline int
1570 flow_hw_modify_field_compile(struct rte_eth_dev *dev,
1571 			     const struct rte_flow_attr *attr,
1572 			     const struct rte_flow_action *action, /* Current action from AT. */
1573 			     const struct rte_flow_action *action_mask, /* Current mask from AT. */
1574 			     struct mlx5_hw_actions *acts,
1575 			     struct mlx5_hw_modify_header_action *mhdr,
1576 			     uint16_t src_pos,
1577 			     struct rte_flow_error *error)
1578 {
1579 	struct mlx5_priv *priv = dev->data->dev_private;
1580 	const struct rte_flow_action_modify_field *conf = action->conf;
1581 	union {
1582 		struct mlx5_flow_dv_modify_hdr_resource resource;
1583 		uint8_t data[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
1584 			     sizeof(struct mlx5_modification_cmd) * MLX5_MHDR_MAX_CMD];
1585 	} dummy;
1586 	struct mlx5_flow_dv_modify_hdr_resource *resource;
1587 	struct rte_flow_item item = {
1588 		.spec = NULL,
1589 		.mask = NULL
1590 	};
1591 	struct field_modify_info field[MLX5_ACT_MAX_MOD_FIELDS] = {
1592 						{0, 0, MLX5_MODI_OUT_NONE} };
1593 	struct field_modify_info dcopy[MLX5_ACT_MAX_MOD_FIELDS] = {
1594 						{0, 0, MLX5_MODI_OUT_NONE} };
1595 	uint32_t mask[MLX5_ACT_MAX_MOD_FIELDS] = { 0 };
1596 	uint32_t type, value = 0;
1597 	uint16_t cmds_start, cmds_end;
1598 	bool shared;
1599 	int ret;
1600 
1601 	/*
1602 	 * Modify header action is shared if previous modify_field actions
1603 	 * are shared and currently compiled action is shared.
1604 	 */
1605 	shared = flow_hw_action_modify_field_is_shared(action, action_mask);
1606 	mhdr->shared &= shared;
1607 	if (conf->src.field == RTE_FLOW_FIELD_POINTER ||
1608 	    conf->src.field == RTE_FLOW_FIELD_VALUE) {
1609 		type = conf->operation == RTE_FLOW_MODIFY_SET ? MLX5_MODIFICATION_TYPE_SET :
1610 								MLX5_MODIFICATION_TYPE_ADD;
1611 		/* For SET/ADD fill the destination field (field) first. */
1612 		mlx5_flow_field_id_to_modify_info(&conf->dst, field, mask,
1613 						  conf->width, dev,
1614 						  attr, error);
1615 		item.spec = conf->src.field == RTE_FLOW_FIELD_POINTER ?
1616 				(void *)(uintptr_t)conf->src.pvalue :
1617 				(void *)(uintptr_t)&conf->src.value;
1618 		if (conf->dst.field == RTE_FLOW_FIELD_META ||
1619 		    conf->dst.field == RTE_FLOW_FIELD_TAG ||
1620 		    conf->dst.field == RTE_FLOW_FIELD_METER_COLOR ||
1621 		    conf->dst.field == (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG) {
1622 			uint8_t tag_index = flow_tag_index_get(&conf->dst);
1623 
1624 			value = *(const unaligned_uint32_t *)item.spec;
1625 			if (conf->dst.field == RTE_FLOW_FIELD_TAG &&
1626 			    tag_index == RTE_PMD_MLX5_LINEAR_HASH_TAG_INDEX)
1627 				value = rte_cpu_to_be_32(value << 16);
1628 			else
1629 				value = rte_cpu_to_be_32(value);
1630 			item.spec = &value;
1631 		} else if (conf->dst.field == RTE_FLOW_FIELD_GTP_PSC_QFI ||
1632 			   conf->dst.field == RTE_FLOW_FIELD_GENEVE_OPT_TYPE) {
1633 			/*
1634 			 * Both QFI and Geneve option type are passed as an uint8_t integer,
1635 			 * but it is accessed through a 2nd least significant byte of a 32-bit
1636 			 * field in modify header command.
1637 			 */
1638 			value = *(const uint8_t *)item.spec;
1639 			value = rte_cpu_to_be_32(value << 8);
1640 			item.spec = &value;
1641 		} else if (conf->dst.field == RTE_FLOW_FIELD_VXLAN_LAST_RSVD) {
1642 			value = *(const uint8_t *)item.spec << 24;
1643 			value = rte_cpu_to_be_32(value);
1644 			item.spec = &value;
1645 		}
1646 	} else {
1647 		type = conf->operation == RTE_FLOW_MODIFY_SET ?
1648 		       MLX5_MODIFICATION_TYPE_COPY : MLX5_MODIFICATION_TYPE_ADD_FIELD;
1649 		/* For COPY fill the destination field (dcopy) without mask. */
1650 		mlx5_flow_field_id_to_modify_info(&conf->dst, dcopy, NULL,
1651 						  conf->width, dev,
1652 						  attr, error);
1653 		/* Then construct the source field (field) with mask. */
1654 		mlx5_flow_field_id_to_modify_info(&conf->src, field, mask,
1655 						  conf->width, dev,
1656 						  attr, error);
1657 	}
1658 	item.mask = &mask;
1659 	memset(&dummy, 0, sizeof(dummy));
1660 	resource = &dummy.resource;
1661 	ret = flow_dv_convert_modify_action(&item, field, dcopy, resource, type, error);
1662 	if (ret)
1663 		return ret;
1664 	MLX5_ASSERT(resource->actions_num > 0);
1665 	/*
1666 	 * If previous modify field action collide with this one, then insert NOP command.
1667 	 * This NOP command will not be a part of action's command range used to update commands
1668 	 * on rule creation.
1669 	 */
1670 	if (flow_hw_should_insert_nop(mhdr, &resource->actions[0])) {
1671 		ret = flow_hw_mhdr_cmd_nop_append(mhdr);
1672 		if (ret)
1673 			return rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1674 						  NULL, "too many modify field operations specified");
1675 	}
1676 	cmds_start = mhdr->mhdr_cmds_num;
1677 	ret = flow_hw_converted_mhdr_cmds_append(mhdr, resource);
1678 	if (ret)
1679 		return rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1680 					  NULL, "too many modify field operations specified");
1681 
1682 	cmds_end = mhdr->mhdr_cmds_num;
1683 	if (shared)
1684 		return 0;
1685 	ret = __flow_hw_act_data_hdr_modify_append(priv, acts, RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
1686 						   src_pos, mhdr->pos, conf,
1687 						   cmds_start, cmds_end, shared,
1688 						   field, dcopy, mask);
1689 	if (ret)
1690 		return rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1691 					  NULL, "not enough memory to store modify field metadata");
1692 	return 0;
1693 }
1694 
1695 static uint32_t
1696 flow_hw_count_nop_modify_field(struct mlx5_hw_modify_header_action *mhdr)
1697 {
1698 	uint32_t i;
1699 	uint32_t nops = 0;
1700 
1701 	for (i = 0; i < mhdr->mhdr_cmds_num; ++i) {
1702 		struct mlx5_modification_cmd cmd = mhdr->mhdr_cmds[i];
1703 
1704 		cmd.data0 = rte_be_to_cpu_32(cmd.data0);
1705 		if (cmd.action_type == MLX5_MODIFICATION_TYPE_NOP)
1706 			++nops;
1707 	}
1708 	return nops;
1709 }
1710 
1711 static int
1712 flow_hw_validate_compiled_modify_field(struct rte_eth_dev *dev,
1713 				       const struct mlx5_flow_template_table_cfg *cfg,
1714 				       struct mlx5_hw_modify_header_action *mhdr,
1715 				       struct rte_flow_error *error)
1716 {
1717 	struct mlx5_priv *priv = dev->data->dev_private;
1718 	struct mlx5_hca_attr *hca_attr = &priv->sh->cdev->config.hca_attr;
1719 
1720 	/*
1721 	 * Header modify pattern length limitation is only valid for HWS groups, i.e. groups > 0.
1722 	 * In group 0, MODIFY_FIELD actions are handled with header modify actions
1723 	 * managed by rdma-core.
1724 	 */
1725 	if (cfg->attr.flow_attr.group != 0 &&
1726 	    mhdr->mhdr_cmds_num > hca_attr->max_header_modify_pattern_length) {
1727 		uint32_t nops = flow_hw_count_nop_modify_field(mhdr);
1728 
1729 		DRV_LOG(ERR, "Too many modify header commands generated from "
1730 			     "MODIFY_FIELD actions. "
1731 			     "Generated HW commands = %u (amount of NOP commands = %u). "
1732 			     "Maximum supported = %u.",
1733 			     mhdr->mhdr_cmds_num, nops,
1734 			     hca_attr->max_header_modify_pattern_length);
1735 		return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1736 					  "Number of MODIFY_FIELD actions exceeds maximum "
1737 					  "supported limit of actions");
1738 	}
1739 	return 0;
1740 }
1741 
1742 static int
1743 flow_hw_represented_port_compile(struct rte_eth_dev *dev,
1744 				 const struct rte_flow_attr *attr,
1745 				 const struct rte_flow_action *action,
1746 				 const struct rte_flow_action *action_mask,
1747 				 struct mlx5_hw_actions *acts,
1748 				 uint16_t action_src, uint16_t action_dst,
1749 				 struct rte_flow_error *error)
1750 {
1751 	struct mlx5_priv *priv = dev->data->dev_private;
1752 	const struct rte_flow_action_ethdev *v = action->conf;
1753 	const struct rte_flow_action_ethdev *m = action_mask->conf;
1754 	int ret;
1755 
1756 	if (!attr->group)
1757 		return rte_flow_error_set(error, EINVAL,
1758 					  RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1759 					  "represented_port action cannot"
1760 					  " be used on group 0");
1761 	if (!attr->transfer)
1762 		return rte_flow_error_set(error, EINVAL,
1763 					  RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1764 					  NULL,
1765 					  "represented_port action requires"
1766 					  " transfer attribute");
1767 	if (attr->ingress || attr->egress)
1768 		return rte_flow_error_set(error, EINVAL,
1769 					  RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1770 					  "represented_port action cannot"
1771 					  " be used with direction attributes");
1772 	if (!priv->master)
1773 		return rte_flow_error_set(error, EINVAL,
1774 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1775 					  "represented_port action must"
1776 					  " be used on proxy port");
1777 	if (m && !!m->port_id) {
1778 		struct mlx5_priv *port_priv;
1779 
1780 		if (!v)
1781 			return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1782 						  action, "port index was not provided");
1783 		port_priv = mlx5_port_to_eswitch_info(v->port_id, false);
1784 		if (port_priv == NULL)
1785 			return rte_flow_error_set
1786 					(error, EINVAL,
1787 					 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1788 					 "port does not exist or unable to"
1789 					 " obtain E-Switch info for port");
1790 		MLX5_ASSERT(priv->hw_vport != NULL);
1791 		if (priv->hw_vport[v->port_id]) {
1792 			acts->rule_acts[action_dst].action =
1793 					priv->hw_vport[v->port_id];
1794 		} else {
1795 			return rte_flow_error_set
1796 					(error, EINVAL,
1797 					 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1798 					 "cannot use represented_port action"
1799 					 " with this port");
1800 		}
1801 	} else {
1802 		ret = __flow_hw_act_data_general_append
1803 				(priv, acts, action->type,
1804 				 action_src, action_dst);
1805 		if (ret)
1806 			return rte_flow_error_set
1807 					(error, ENOMEM,
1808 					 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1809 					 "not enough memory to store"
1810 					 " vport action");
1811 	}
1812 	return 0;
1813 }
1814 
1815 static __rte_always_inline int
1816 flow_hw_meter_compile(struct rte_eth_dev *dev,
1817 		      const struct mlx5_flow_template_table_cfg *cfg,
1818 		      uint16_t aso_mtr_pos,
1819 		      uint16_t jump_pos,
1820 		      const struct rte_flow_action *action,
1821 		      struct mlx5_hw_actions *acts,
1822 		      struct rte_flow_error *error)
1823 {
1824 	struct mlx5_priv *priv = dev->data->dev_private;
1825 	struct mlx5_aso_mtr *aso_mtr;
1826 	const struct rte_flow_action_meter *meter = action->conf;
1827 	uint32_t group = cfg->attr.flow_attr.group;
1828 
1829 	aso_mtr = mlx5_aso_meter_by_idx(priv, meter->mtr_id);
1830 	acts->rule_acts[aso_mtr_pos].action = priv->mtr_bulk.action;
1831 	acts->rule_acts[aso_mtr_pos].aso_meter.offset = aso_mtr->offset;
1832 	acts->jump = flow_hw_jump_action_register
1833 		(dev, cfg, aso_mtr->fm.group, error);
1834 	if (!acts->jump)
1835 		return -ENOMEM;
1836 	acts->rule_acts[jump_pos].action = (!!group) ?
1837 				    acts->jump->hws_action :
1838 				    acts->jump->root_action;
1839 	if (mlx5_aso_mtr_wait(priv, aso_mtr, true))
1840 		return -ENOMEM;
1841 	return 0;
1842 }
1843 
1844 static __rte_always_inline int
1845 flow_hw_cnt_compile(struct rte_eth_dev *dev, uint32_t  start_pos,
1846 		      struct mlx5_hw_actions *acts)
1847 {
1848 	struct mlx5_priv *priv = dev->data->dev_private;
1849 	uint32_t pos = start_pos;
1850 	cnt_id_t cnt_id;
1851 	int ret;
1852 
1853 	ret = mlx5_hws_cnt_shared_get(priv->hws_cpool, &cnt_id, 0);
1854 	if (ret != 0)
1855 		return ret;
1856 	ret = mlx5_hws_cnt_pool_get_action_offset
1857 				(priv->hws_cpool,
1858 				 cnt_id,
1859 				 &acts->rule_acts[pos].action,
1860 				 &acts->rule_acts[pos].counter.offset);
1861 	if (ret != 0)
1862 		return ret;
1863 	acts->cnt_id = cnt_id;
1864 	return 0;
1865 }
1866 
1867 static __rte_always_inline bool
1868 is_of_vlan_pcp_present(const struct rte_flow_action *actions)
1869 {
1870 	/*
1871 	 * Order of RTE VLAN push actions is
1872 	 * OF_PUSH_VLAN / OF_SET_VLAN_VID [ / OF_SET_VLAN_PCP ]
1873 	 */
1874 	return actions[MLX5_HW_VLAN_PUSH_PCP_IDX].type ==
1875 		RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP;
1876 }
1877 
1878 static __rte_always_inline bool
1879 is_template_masked_push_vlan(const struct rte_flow_action_of_push_vlan *mask)
1880 {
1881 	/*
1882 	 * In masked push VLAN template all RTE push actions are masked.
1883 	 */
1884 	return mask && mask->ethertype != 0;
1885 }
1886 
1887 static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
1888 {
1889 /*
1890  * OpenFlow Switch Specification defines 801.1q VID as 12+1 bits.
1891  */
1892 	rte_be32_t type, vid, pcp;
1893 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1894 	rte_be32_t vid_lo, vid_hi;
1895 #endif
1896 
1897 	type = ((const struct rte_flow_action_of_push_vlan *)
1898 		actions[MLX5_HW_VLAN_PUSH_TYPE_IDX].conf)->ethertype;
1899 	vid = ((const struct rte_flow_action_of_set_vlan_vid *)
1900 		actions[MLX5_HW_VLAN_PUSH_VID_IDX].conf)->vlan_vid;
1901 	pcp = is_of_vlan_pcp_present(actions) ?
1902 	      ((const struct rte_flow_action_of_set_vlan_pcp *)
1903 		      actions[MLX5_HW_VLAN_PUSH_PCP_IDX].conf)->vlan_pcp : 0;
1904 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1905 	vid_hi = vid & 0xff;
1906 	vid_lo = vid >> 8;
1907 	return (((vid_lo << 8) | (pcp << 5) | vid_hi) << 16) | type;
1908 #else
1909 	return (type << 16) | (pcp << 13) | vid;
1910 #endif
1911 }
1912 
1913 static __rte_always_inline struct mlx5_aso_mtr *
1914 flow_hw_meter_mark_alloc(struct rte_eth_dev *dev, uint32_t queue,
1915 			 const struct rte_flow_action *action,
1916 			 struct mlx5_hw_q_job *job, bool push,
1917 			 struct rte_flow_error *error)
1918 {
1919 	struct mlx5_priv *priv = dev->data->dev_private;
1920 	struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
1921 	const struct rte_flow_action_meter_mark *meter_mark = action->conf;
1922 	struct mlx5_aso_mtr *aso_mtr;
1923 	struct mlx5_flow_meter_info *fm;
1924 	uint32_t mtr_id = 0;
1925 	uintptr_t handle = (uintptr_t)MLX5_INDIRECT_ACTION_TYPE_METER_MARK <<
1926 					MLX5_INDIRECT_ACTION_TYPE_OFFSET;
1927 
1928 	if (priv->shared_host) {
1929 		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1930 				   "Meter mark actions can only be created on the host port");
1931 		return NULL;
1932 	}
1933 	if (meter_mark->profile == NULL)
1934 		return NULL;
1935 	aso_mtr = mlx5_ipool_malloc(pool->idx_pool, &mtr_id);
1936 	if (!aso_mtr) {
1937 		rte_flow_error_set(error, ENOMEM,
1938 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1939 				   NULL,
1940 				   "failed to allocate aso meter entry");
1941 		if (mtr_id)
1942 			mlx5_ipool_free(pool->idx_pool, mtr_id);
1943 		return NULL;
1944 	}
1945 	/* Fill the flow meter parameters. */
1946 	aso_mtr->type = ASO_METER_INDIRECT;
1947 	fm = &aso_mtr->fm;
1948 	fm->meter_id = mtr_id;
1949 	fm->profile = (struct mlx5_flow_meter_profile *)(meter_mark->profile);
1950 	fm->is_enable = meter_mark->state;
1951 	fm->color_aware = meter_mark->color_mode;
1952 	aso_mtr->pool = pool;
1953 	aso_mtr->state = (queue == MLX5_HW_INV_QUEUE) ?
1954 			  ASO_METER_WAIT : ASO_METER_WAIT_ASYNC;
1955 	aso_mtr->offset = mtr_id - 1;
1956 	aso_mtr->init_color = fm->color_aware ? RTE_COLORS : RTE_COLOR_GREEN;
1957 	job->action = (void *)(handle | mtr_id);
1958 	/* Update ASO flow meter by wqe. */
1959 	if (mlx5_aso_meter_update_by_wqe(priv, queue, aso_mtr,
1960 					 &priv->mtr_bulk, job, push)) {
1961 		mlx5_ipool_free(pool->idx_pool, mtr_id);
1962 		return NULL;
1963 	}
1964 	/* Wait for ASO object completion. */
1965 	if (queue == MLX5_HW_INV_QUEUE &&
1966 	    mlx5_aso_mtr_wait(priv, aso_mtr, true)) {
1967 		mlx5_ipool_free(pool->idx_pool, mtr_id);
1968 		return NULL;
1969 	}
1970 	return aso_mtr;
1971 }
1972 
1973 static __rte_always_inline int
1974 flow_hw_meter_mark_compile(struct rte_eth_dev *dev,
1975 			   uint16_t aso_mtr_pos,
1976 			   const struct rte_flow_action *action,
1977 			   struct mlx5dr_rule_action *acts,
1978 			   uint32_t *index,
1979 			   uint32_t queue,
1980 			   struct rte_flow_error *error)
1981 {
1982 	struct mlx5_priv *priv = dev->data->dev_private;
1983 	struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
1984 	struct mlx5_aso_mtr *aso_mtr;
1985 	struct mlx5_hw_q_job *job =
1986 		flow_hw_action_job_init(priv, queue, NULL, NULL, NULL,
1987 					MLX5_HW_Q_JOB_TYPE_CREATE,
1988 					MLX5_HW_INDIRECT_TYPE_LEGACY, NULL);
1989 
1990 	if (!job)
1991 		return -1;
1992 	aso_mtr = flow_hw_meter_mark_alloc(dev, queue, action, job,
1993 					   true, error);
1994 	if (!aso_mtr) {
1995 		flow_hw_job_put(priv, job, queue);
1996 		return -1;
1997 	}
1998 
1999 	/* Compile METER_MARK action */
2000 	acts[aso_mtr_pos].action = pool->action;
2001 	acts[aso_mtr_pos].aso_meter.offset = aso_mtr->offset;
2002 	*index = aso_mtr->fm.meter_id;
2003 	return 0;
2004 }
2005 
2006 static int
2007 flow_hw_translate_indirect_mirror(__rte_unused struct rte_eth_dev *dev,
2008 				  __rte_unused const struct mlx5_action_construct_data *act_data,
2009 				  const struct rte_flow_action *action,
2010 				  struct mlx5dr_rule_action *dr_rule)
2011 {
2012 	const struct rte_flow_action_indirect_list *list_conf = action->conf;
2013 	const struct mlx5_mirror *mirror = (typeof(mirror))list_conf->handle;
2014 
2015 	dr_rule->action = mirror->mirror_action;
2016 	return 0;
2017 }
2018 
2019 /**
2020  * HWS mirror implemented as FW island.
2021  * The action does not support indirect list flow configuration.
2022  * If template handle was masked, use handle mirror action in flow rules.
2023  * Otherwise let flow rule specify mirror handle.
2024  */
2025 static int
2026 hws_table_tmpl_translate_indirect_mirror(struct rte_eth_dev *dev,
2027 					 const struct rte_flow_action *action,
2028 					 const struct rte_flow_action *mask,
2029 					 struct mlx5_hw_actions *acts,
2030 					 uint16_t action_src, uint16_t action_dst)
2031 {
2032 	int ret = 0;
2033 	const struct rte_flow_action_indirect_list *mask_conf = mask->conf;
2034 
2035 	if (mask_conf && mask_conf->handle) {
2036 		/**
2037 		 * If mirror handle was masked, assign fixed DR5 mirror action.
2038 		 */
2039 		flow_hw_translate_indirect_mirror(dev, NULL, action,
2040 						  &acts->rule_acts[action_dst]);
2041 	} else {
2042 		struct mlx5_priv *priv = dev->data->dev_private;
2043 		ret = flow_hw_act_data_indirect_list_append
2044 			(priv, acts, RTE_FLOW_ACTION_TYPE_INDIRECT_LIST,
2045 			 action_src, action_dst,
2046 			 flow_hw_translate_indirect_mirror);
2047 	}
2048 	return ret;
2049 }
2050 
2051 static int
2052 flow_hw_reformat_action(__rte_unused struct rte_eth_dev *dev,
2053 			__rte_unused const struct mlx5_action_construct_data *data,
2054 			const struct rte_flow_action *action,
2055 			struct mlx5dr_rule_action *dr_rule)
2056 {
2057 	const struct rte_flow_action_indirect_list *indlst_conf = action->conf;
2058 
2059 	dr_rule->action = ((struct mlx5_hw_encap_decap_action *)
2060 			   (indlst_conf->handle))->action;
2061 	if (!dr_rule->action)
2062 		return -EINVAL;
2063 	return 0;
2064 }
2065 
2066 /**
2067  * Template conf must not be masked. If handle is masked, use the one in template,
2068  * otherwise update per flow rule.
2069  */
2070 static int
2071 hws_table_tmpl_translate_indirect_reformat(struct rte_eth_dev *dev,
2072 					   const struct rte_flow_action *action,
2073 					   const struct rte_flow_action *mask,
2074 					   struct mlx5_hw_actions *acts,
2075 					   uint16_t action_src, uint16_t action_dst)
2076 {
2077 	int ret = -1;
2078 	const struct rte_flow_action_indirect_list *mask_conf = mask->conf;
2079 	struct mlx5_priv *priv = dev->data->dev_private;
2080 
2081 	if (mask_conf && mask_conf->handle && !mask_conf->conf)
2082 		/**
2083 		 * If handle was masked, assign fixed DR action.
2084 		 */
2085 		ret = flow_hw_reformat_action(dev, NULL, action,
2086 					      &acts->rule_acts[action_dst]);
2087 	else if (mask_conf && !mask_conf->handle && !mask_conf->conf)
2088 		ret = flow_hw_act_data_indirect_list_append
2089 			(priv, acts, RTE_FLOW_ACTION_TYPE_INDIRECT_LIST,
2090 			 action_src, action_dst, flow_hw_reformat_action);
2091 	return ret;
2092 }
2093 
2094 static int
2095 flow_dr_set_meter(struct mlx5_priv *priv,
2096 		  struct mlx5dr_rule_action *dr_rule,
2097 		  const struct rte_flow_action_indirect_list *action_conf)
2098 {
2099 	const struct mlx5_indlst_legacy *legacy_obj =
2100 		(typeof(legacy_obj))action_conf->handle;
2101 	struct mlx5_aso_mtr_pool *mtr_pool = priv->hws_mpool;
2102 	uint32_t act_idx = (uint32_t)(uintptr_t)legacy_obj->handle;
2103 	uint32_t mtr_id = act_idx & (RTE_BIT32(MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
2104 	struct mlx5_aso_mtr *aso_mtr = mlx5_ipool_get(mtr_pool->idx_pool, mtr_id);
2105 
2106 	if (!aso_mtr)
2107 		return -EINVAL;
2108 	dr_rule->action = mtr_pool->action;
2109 	dr_rule->aso_meter.offset = aso_mtr->offset;
2110 	return 0;
2111 }
2112 
2113 __rte_always_inline static void
2114 flow_dr_mtr_flow_color(struct mlx5dr_rule_action *dr_rule, enum rte_color init_color)
2115 {
2116 	dr_rule->aso_meter.init_color =
2117 		(enum mlx5dr_action_aso_meter_color)rte_col_2_mlx5_col(init_color);
2118 }
2119 
2120 static int
2121 flow_hw_translate_indirect_meter(struct rte_eth_dev *dev,
2122 				 const struct mlx5_action_construct_data *act_data,
2123 				 const struct rte_flow_action *action,
2124 				 struct mlx5dr_rule_action *dr_rule)
2125 {
2126 	int ret;
2127 	struct mlx5_priv *priv = dev->data->dev_private;
2128 	const struct rte_flow_action_indirect_list *action_conf = action->conf;
2129 	const struct rte_flow_indirect_update_flow_meter_mark **flow_conf =
2130 		(typeof(flow_conf))action_conf->conf;
2131 
2132 	ret = flow_dr_set_meter(priv, dr_rule, action_conf);
2133 	if (ret)
2134 		return ret;
2135 	if (!act_data->shared_meter.conf_masked) {
2136 		if (flow_conf && flow_conf[0] && flow_conf[0]->init_color < RTE_COLORS)
2137 			flow_dr_mtr_flow_color(dr_rule, flow_conf[0]->init_color);
2138 	}
2139 	return 0;
2140 }
2141 
2142 static int
2143 hws_table_tmpl_translate_indirect_meter(struct rte_eth_dev *dev,
2144 					const struct rte_flow_action *action,
2145 					const struct rte_flow_action *mask,
2146 					struct mlx5_hw_actions *acts,
2147 					uint16_t action_src, uint16_t action_dst)
2148 {
2149 	int ret;
2150 	struct mlx5_priv *priv = dev->data->dev_private;
2151 	const struct rte_flow_action_indirect_list *action_conf = action->conf;
2152 	const struct rte_flow_action_indirect_list *mask_conf = mask->conf;
2153 	bool is_handle_masked = mask_conf && mask_conf->handle;
2154 	bool is_conf_masked = mask_conf && mask_conf->conf && mask_conf->conf[0];
2155 	struct mlx5dr_rule_action *dr_rule = &acts->rule_acts[action_dst];
2156 
2157 	if (is_handle_masked) {
2158 		ret = flow_dr_set_meter(priv, dr_rule, action->conf);
2159 		if (ret)
2160 			return ret;
2161 	}
2162 	if (is_conf_masked) {
2163 		const struct
2164 			rte_flow_indirect_update_flow_meter_mark **flow_conf =
2165 			(typeof(flow_conf))action_conf->conf;
2166 		flow_dr_mtr_flow_color(dr_rule,
2167 				       flow_conf[0]->init_color);
2168 	}
2169 	if (!is_handle_masked || !is_conf_masked) {
2170 		struct mlx5_action_construct_data *act_data;
2171 
2172 		ret = flow_hw_act_data_indirect_list_append
2173 			(priv, acts, RTE_FLOW_ACTION_TYPE_INDIRECT_LIST,
2174 			 action_src, action_dst, flow_hw_translate_indirect_meter);
2175 		if (ret)
2176 			return ret;
2177 		act_data = LIST_FIRST(&acts->act_list);
2178 		act_data->shared_meter.conf_masked = is_conf_masked;
2179 	}
2180 	return 0;
2181 }
2182 
2183 static int
2184 hws_table_tmpl_translate_indirect_legacy(struct rte_eth_dev *dev,
2185 					 const struct rte_flow_action *action,
2186 					 const struct rte_flow_action *mask,
2187 					 struct mlx5_hw_actions *acts,
2188 					 uint16_t action_src, uint16_t action_dst)
2189 {
2190 	int ret;
2191 	const struct rte_flow_action_indirect_list *indlst_conf = action->conf;
2192 	struct mlx5_indlst_legacy *indlst_obj = (typeof(indlst_obj))indlst_conf->handle;
2193 	uint32_t act_idx = (uint32_t)(uintptr_t)indlst_obj->handle;
2194 	uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
2195 
2196 	switch (type) {
2197 	case MLX5_INDIRECT_ACTION_TYPE_METER_MARK:
2198 		ret = hws_table_tmpl_translate_indirect_meter(dev, action, mask,
2199 							      acts, action_src,
2200 							      action_dst);
2201 		break;
2202 	default:
2203 		ret = -EINVAL;
2204 		break;
2205 	}
2206 	return ret;
2207 }
2208 
2209 /*
2210  * template .. indirect_list handle Ht conf Ct ..
2211  * mask     .. indirect_list handle Hm conf Cm ..
2212  *
2213  * PMD requires Ht != 0 to resolve handle type.
2214  * If Ht was masked (Hm != 0) DR5 action will be set according to Ht and will
2215  * not change. Otherwise, DR5 action will be resolved during flow rule build.
2216  * If Ct was masked (Cm != 0), table template processing updates base
2217  * indirect action configuration with Ct parameters.
2218  */
2219 static int
2220 table_template_translate_indirect_list(struct rte_eth_dev *dev,
2221 				       const struct rte_flow_action *action,
2222 				       const struct rte_flow_action *mask,
2223 				       struct mlx5_hw_actions *acts,
2224 				       uint16_t action_src, uint16_t action_dst)
2225 {
2226 	int ret = 0;
2227 	enum mlx5_indirect_list_type type;
2228 	const struct rte_flow_action_indirect_list *list_conf = action->conf;
2229 
2230 	if (!list_conf || !list_conf->handle)
2231 		return -EINVAL;
2232 	type = mlx5_get_indirect_list_type(list_conf->handle);
2233 	switch (type) {
2234 	case MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY:
2235 		ret = hws_table_tmpl_translate_indirect_legacy(dev, action, mask,
2236 							       acts, action_src,
2237 							       action_dst);
2238 		break;
2239 	case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
2240 		ret = hws_table_tmpl_translate_indirect_mirror(dev, action, mask,
2241 							       acts, action_src,
2242 							       action_dst);
2243 		break;
2244 	case MLX5_INDIRECT_ACTION_LIST_TYPE_REFORMAT:
2245 		if (list_conf->conf)
2246 			return -EINVAL;
2247 		ret = hws_table_tmpl_translate_indirect_reformat(dev, action, mask,
2248 								 acts, action_src,
2249 								 action_dst);
2250 		break;
2251 	default:
2252 		return -EINVAL;
2253 	}
2254 	return ret;
2255 }
2256 
2257 static void
2258 mlx5_set_reformat_header(struct mlx5dr_action_reformat_header *hdr,
2259 			 uint8_t *encap_data,
2260 			 size_t data_size)
2261 {
2262 	hdr->sz = data_size;
2263 	hdr->data = encap_data;
2264 }
2265 
2266 static int
2267 mlx5_tbl_translate_reformat(struct mlx5_priv *priv,
2268 			    struct mlx5_hw_actions *acts,
2269 			    struct rte_flow_actions_template *at,
2270 			    const struct rte_flow_item *enc_item,
2271 			    const struct rte_flow_item *enc_item_m,
2272 			    uint8_t *encap_data, uint8_t *encap_data_m,
2273 			    struct mlx5_tbl_multi_pattern_ctx *mp_ctx,
2274 			    size_t data_size, uint16_t reformat_src,
2275 			    enum mlx5dr_action_type refmt_type,
2276 			    struct rte_flow_error *error)
2277 {
2278 	int mp_reformat_ix = mlx5_multi_pattern_reformat_to_index(refmt_type);
2279 	struct mlx5dr_action_reformat_header hdr;
2280 	uint8_t buf[MLX5_ENCAP_MAX_LEN];
2281 	bool shared_rfmt = false;
2282 	int ret;
2283 
2284 	MLX5_ASSERT(at->reformat_off != UINT16_MAX);
2285 	if (enc_item) {
2286 		MLX5_ASSERT(!encap_data);
2287 		ret = flow_dv_convert_encap_data(enc_item, buf, &data_size, error);
2288 		if (ret)
2289 			return ret;
2290 		encap_data = buf;
2291 		if (enc_item_m)
2292 			shared_rfmt = true;
2293 	} else if (encap_data && encap_data_m) {
2294 		shared_rfmt = true;
2295 	}
2296 	acts->encap_decap = mlx5_malloc(MLX5_MEM_ZERO,
2297 					sizeof(*acts->encap_decap) + data_size,
2298 					0, SOCKET_ID_ANY);
2299 	if (!acts->encap_decap)
2300 		return rte_flow_error_set(error, ENOMEM,
2301 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2302 					  NULL, "no memory for reformat context");
2303 	acts->encap_decap_pos = at->reformat_off;
2304 	acts->encap_decap->data_size = data_size;
2305 	acts->encap_decap->action_type = refmt_type;
2306 	if (shared_rfmt || mp_reformat_ix < 0) {
2307 		uint16_t reformat_ix = at->reformat_off;
2308 		/*
2309 		 * This copy is only needed in non template mode.
2310 		 * In order to create the action later.
2311 		 */
2312 		memcpy(acts->encap_decap->data, encap_data, data_size);
2313 		acts->rule_acts[reformat_ix].reformat.data = acts->encap_decap->data;
2314 		acts->rule_acts[reformat_ix].reformat.offset = 0;
2315 		acts->encap_decap->shared = true;
2316 	} else {
2317 		uint32_t ix;
2318 		typeof(mp_ctx->reformat[0]) *reformat = mp_ctx->reformat +
2319 							mp_reformat_ix;
2320 		mlx5_set_reformat_header(&hdr, encap_data, data_size);
2321 		ix = reformat->elements_num++;
2322 		reformat->reformat_hdr[ix] = hdr;
2323 		acts->rule_acts[at->reformat_off].reformat.hdr_idx = ix;
2324 		acts->encap_decap->multi_pattern = 1;
2325 		ret = __flow_hw_act_data_encap_append
2326 			(priv, acts, (at->actions + reformat_src)->type,
2327 			 reformat_src, at->reformat_off, data_size);
2328 		if (ret)
2329 			return -rte_errno;
2330 		mlx5_multi_pattern_activate(mp_ctx);
2331 	}
2332 	return 0;
2333 }
2334 
2335 static int
2336 mlx5_tbl_create_reformat_action(struct mlx5_priv *priv,
2337 				const struct rte_flow_template_table_attr *table_attr,
2338 				struct mlx5_hw_actions *acts,
2339 				struct rte_flow_actions_template *at,
2340 				uint8_t *encap_data,
2341 				size_t data_size,
2342 				enum mlx5dr_action_type refmt_type)
2343 {
2344 	const struct rte_flow_attr *attr = &table_attr->flow_attr;
2345 	enum mlx5dr_table_type tbl_type = get_mlx5dr_table_type(attr);
2346 	struct mlx5dr_action_reformat_header hdr;
2347 
2348 	mlx5_set_reformat_header(&hdr, encap_data, data_size);
2349 	uint16_t reformat_ix = at->reformat_off;
2350 	uint32_t flags = mlx5_hw_act_flag[!!attr->group][tbl_type] |
2351 				MLX5DR_ACTION_FLAG_SHARED;
2352 
2353 	acts->encap_decap->action = mlx5dr_action_create_reformat(priv->dr_ctx, refmt_type,
2354 							   1, &hdr, 0, flags);
2355 	if (!acts->encap_decap->action)
2356 		return -rte_errno;
2357 	acts->rule_acts[reformat_ix].action = acts->encap_decap->action;
2358 	return 0;
2359 }
2360 
2361 static int
2362 mlx5_tbl_translate_modify_header(struct rte_eth_dev *dev,
2363 				 const struct mlx5_flow_template_table_cfg *cfg,
2364 				 struct mlx5_hw_actions *acts,
2365 				 struct mlx5_tbl_multi_pattern_ctx *mp_ctx,
2366 				 struct mlx5_hw_modify_header_action *mhdr,
2367 				 struct rte_flow_error *error)
2368 {
2369 	uint16_t mhdr_ix = mhdr->pos;
2370 	struct mlx5dr_action_mh_pattern pattern = {
2371 		.sz = sizeof(struct mlx5_modification_cmd) * mhdr->mhdr_cmds_num
2372 	};
2373 
2374 	if (flow_hw_validate_compiled_modify_field(dev, cfg, mhdr, error)) {
2375 		__flow_hw_action_template_destroy(dev, acts);
2376 		return -rte_errno;
2377 	}
2378 	acts->mhdr = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*acts->mhdr),
2379 				 0, SOCKET_ID_ANY);
2380 	if (!acts->mhdr)
2381 		return rte_flow_error_set(error, ENOMEM,
2382 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2383 					  NULL, "translate modify_header: no memory for modify header context");
2384 	rte_memcpy(acts->mhdr, mhdr, sizeof(*mhdr));
2385 	if (!mhdr->shared) {
2386 		pattern.data = (__be64 *)acts->mhdr->mhdr_cmds;
2387 		typeof(mp_ctx->mh) *mh = &mp_ctx->mh;
2388 		uint32_t idx = mh->elements_num;
2389 		mh->pattern[mh->elements_num++] = pattern;
2390 		acts->mhdr->multi_pattern = 1;
2391 		acts->rule_acts[mhdr_ix].modify_header.pattern_idx = idx;
2392 		mlx5_multi_pattern_activate(mp_ctx);
2393 	}
2394 	return 0;
2395 }
2396 
2397 static int
2398 mlx5_tbl_ensure_shared_modify_header(struct rte_eth_dev *dev,
2399 				     const struct mlx5_flow_template_table_cfg *cfg,
2400 				     struct mlx5_hw_actions *acts,
2401 				     struct rte_flow_error *error)
2402 {
2403 	struct mlx5_priv *priv = dev->data->dev_private;
2404 	const struct rte_flow_template_table_attr *table_attr = &cfg->attr;
2405 	const struct rte_flow_attr *attr = &table_attr->flow_attr;
2406 	enum mlx5dr_table_type tbl_type = get_mlx5dr_table_type(attr);
2407 	struct mlx5dr_action_mh_pattern pattern = {
2408 		.sz = sizeof(struct mlx5_modification_cmd) * acts->mhdr->mhdr_cmds_num
2409 	};
2410 	uint16_t mhdr_ix = acts->mhdr->pos;
2411 	uint32_t flags = mlx5_hw_act_flag[!!attr->group][tbl_type] | MLX5DR_ACTION_FLAG_SHARED;
2412 
2413 	pattern.data = (__be64 *)acts->mhdr->mhdr_cmds;
2414 	acts->mhdr->action = mlx5dr_action_create_modify_header(priv->dr_ctx, 1,
2415 								&pattern, 0, flags);
2416 	if (!acts->mhdr->action)
2417 		return rte_flow_error_set(error, rte_errno,
2418 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2419 					  "translate modify_header: failed to create DR action");
2420 	acts->rule_acts[mhdr_ix].action = acts->mhdr->action;
2421 	return 0;
2422 }
2423 
2424 static int
2425 mlx5_create_ipv6_ext_reformat(struct rte_eth_dev *dev,
2426 			      const struct mlx5_flow_template_table_cfg *cfg,
2427 			      struct mlx5_hw_actions *acts,
2428 			      struct rte_flow_actions_template *at,
2429 			      uint8_t *push_data, uint8_t *push_data_m,
2430 			      size_t push_size, uint16_t recom_src,
2431 			      enum mlx5dr_action_type recom_type)
2432 {
2433 	struct mlx5_priv *priv = dev->data->dev_private;
2434 	const struct rte_flow_template_table_attr *table_attr = &cfg->attr;
2435 	const struct rte_flow_attr *attr = &table_attr->flow_attr;
2436 	enum mlx5dr_table_type type = get_mlx5dr_table_type(attr);
2437 	struct mlx5_action_construct_data *act_data;
2438 	struct mlx5dr_action_reformat_header hdr = {0};
2439 	uint32_t flag, bulk = 0;
2440 
2441 	flag = mlx5_hw_act_flag[!!attr->group][type];
2442 	acts->push_remove = mlx5_malloc(MLX5_MEM_ZERO,
2443 					sizeof(*acts->push_remove) + push_size,
2444 					0, SOCKET_ID_ANY);
2445 	if (!acts->push_remove)
2446 		return -ENOMEM;
2447 
2448 	switch (recom_type) {
2449 	case MLX5DR_ACTION_TYP_PUSH_IPV6_ROUTE_EXT:
2450 		if (!push_data || !push_size)
2451 			goto err1;
2452 		if (!push_data_m) {
2453 			bulk = rte_log2_u32(table_attr->nb_flows);
2454 		} else {
2455 			flag |= MLX5DR_ACTION_FLAG_SHARED;
2456 			acts->push_remove->shared = 1;
2457 		}
2458 		acts->push_remove->data_size = push_size;
2459 		memcpy(acts->push_remove->data, push_data, push_size);
2460 		hdr.data = push_data;
2461 		hdr.sz = push_size;
2462 		break;
2463 	case MLX5DR_ACTION_TYP_POP_IPV6_ROUTE_EXT:
2464 		flag |= MLX5DR_ACTION_FLAG_SHARED;
2465 		acts->push_remove->shared = 1;
2466 		break;
2467 	default:
2468 		break;
2469 	}
2470 
2471 	acts->push_remove->action =
2472 		mlx5dr_action_create_reformat_ipv6_ext(priv->dr_ctx,
2473 				recom_type, &hdr, bulk, flag);
2474 	if (!acts->push_remove->action)
2475 		goto err1;
2476 	acts->rule_acts[at->recom_off].action = acts->push_remove->action;
2477 	acts->rule_acts[at->recom_off].ipv6_ext.header = acts->push_remove->data;
2478 	acts->rule_acts[at->recom_off].ipv6_ext.offset = 0;
2479 	acts->push_remove_pos = at->recom_off;
2480 	if (!acts->push_remove->shared) {
2481 		act_data = __flow_hw_act_data_push_append(dev, acts,
2482 				RTE_FLOW_ACTION_TYPE_IPV6_EXT_PUSH,
2483 				recom_src, at->recom_off, push_size);
2484 		if (!act_data)
2485 			goto err;
2486 	}
2487 	return 0;
2488 err:
2489 	if (acts->push_remove->action)
2490 		mlx5dr_action_destroy(acts->push_remove->action);
2491 err1:
2492 	if (acts->push_remove) {
2493 		mlx5_free(acts->push_remove);
2494 		acts->push_remove = NULL;
2495 	}
2496 	return -EINVAL;
2497 }
2498 
2499 /**
2500  * Translate rte_flow actions to DR action.
2501  *
2502  * As the action template has already indicated the actions. Translate
2503  * the rte_flow actions to DR action if possbile. So in flow create
2504  * stage we will save cycles from handing the actions' organizing.
2505  * For the actions with limited information, need to add these to a
2506  * list.
2507  *
2508  * @param[in] dev
2509  *   Pointer to the rte_eth_dev structure.
2510  * @param[in] cfg
2511  *   Pointer to the table configuration.
2512  * @param[in/out] acts
2513  *   Pointer to the template HW steering DR actions.
2514  * @param[in] at
2515  *   Action template.
2516  * @param[in] nt_mode
2517  *   Non template rule translate.
2518  * @param[out] error
2519  *   Pointer to error structure.
2520  *
2521  * @return
2522  *   0 on success, a negative errno otherwise and rte_errno is set.
2523  */
2524 static int
2525 __flow_hw_translate_actions_template(struct rte_eth_dev *dev,
2526 				     const struct mlx5_flow_template_table_cfg *cfg,
2527 				     struct mlx5_hw_actions *acts,
2528 				     struct rte_flow_actions_template *at,
2529 				     struct mlx5_tbl_multi_pattern_ctx *mp_ctx,
2530 				     bool nt_mode,
2531 				     struct rte_flow_error *error)
2532 {
2533 	struct mlx5_priv *priv = dev->data->dev_private;
2534 	const struct rte_flow_template_table_attr *table_attr = &cfg->attr;
2535 	struct mlx5_hca_flex_attr *hca_attr = &priv->sh->cdev->config.hca_attr.flex;
2536 	const struct rte_flow_attr *attr = &table_attr->flow_attr;
2537 	struct rte_flow_action *actions = at->actions;
2538 	struct rte_flow_action *masks = at->masks;
2539 	enum mlx5dr_action_type refmt_type = MLX5DR_ACTION_TYP_LAST;
2540 	enum mlx5dr_action_type recom_type = MLX5DR_ACTION_TYP_LAST;
2541 	const struct rte_flow_action_raw_encap *raw_encap_data;
2542 	const struct rte_flow_action_ipv6_ext_push *ipv6_ext_data;
2543 	const struct rte_flow_item *enc_item = NULL, *enc_item_m = NULL;
2544 	uint16_t reformat_src = 0, recom_src = 0;
2545 	uint8_t *encap_data = NULL, *encap_data_m = NULL;
2546 	uint8_t *push_data = NULL, *push_data_m = NULL;
2547 	size_t data_size = 0, push_size = 0;
2548 	struct mlx5_hw_modify_header_action mhdr = { 0 };
2549 	bool actions_end = false;
2550 	uint32_t type;
2551 	bool reformat_used = false;
2552 	bool recom_used = false;
2553 	unsigned int of_vlan_offset;
2554 	uint16_t jump_pos;
2555 	uint32_t ct_idx;
2556 	int ret, err;
2557 	uint32_t target_grp = 0;
2558 	int table_type;
2559 
2560 	flow_hw_modify_field_init(&mhdr, at);
2561 	if (attr->transfer)
2562 		type = MLX5DR_TABLE_TYPE_FDB;
2563 	else if (attr->egress)
2564 		type = MLX5DR_TABLE_TYPE_NIC_TX;
2565 	else
2566 		type = MLX5DR_TABLE_TYPE_NIC_RX;
2567 	for (; !actions_end; actions++, masks++) {
2568 		uint64_t pos = actions - at->actions;
2569 		uint16_t src_pos = pos - at->src_off[pos];
2570 		uint16_t dr_pos = at->dr_off[pos];
2571 
2572 		switch ((int)actions->type) {
2573 		case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
2574 			if (!attr->group) {
2575 				DRV_LOG(ERR, "Indirect action is not supported in root table.");
2576 				goto err;
2577 			}
2578 			ret = table_template_translate_indirect_list
2579 				(dev, actions, masks, acts, src_pos, dr_pos);
2580 			if (ret)
2581 				goto err;
2582 			break;
2583 		case RTE_FLOW_ACTION_TYPE_INDIRECT:
2584 			if (!attr->group) {
2585 				DRV_LOG(ERR, "Indirect action is not supported in root table.");
2586 				goto err;
2587 			}
2588 			if (actions->conf && masks->conf) {
2589 				if (flow_hw_shared_action_translate
2590 				(dev, actions, acts, src_pos, dr_pos))
2591 					goto err;
2592 			} else if (__flow_hw_act_data_indirect_append
2593 					(priv, acts, RTE_FLOW_ACTION_TYPE_INDIRECT,
2594 					 masks->type, src_pos, dr_pos)){
2595 				goto err;
2596 			}
2597 			break;
2598 		case RTE_FLOW_ACTION_TYPE_VOID:
2599 			break;
2600 		case RTE_FLOW_ACTION_TYPE_DROP:
2601 			acts->rule_acts[dr_pos].action =
2602 				priv->hw_drop[!!attr->group];
2603 			break;
2604 		case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
2605 			if (!attr->group) {
2606 				DRV_LOG(ERR, "Port representor is not supported in root table.");
2607 				goto err;
2608 			}
2609 			acts->rule_acts[dr_pos].action = priv->hw_def_miss;
2610 			break;
2611 		case RTE_FLOW_ACTION_TYPE_FLAG:
2612 			acts->mark = true;
2613 			acts->rule_acts[dr_pos].tag.value =
2614 				mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
2615 			acts->rule_acts[dr_pos].action =
2616 				priv->hw_tag[!!attr->group];
2617 			rte_atomic_fetch_add_explicit(&priv->hws_mark_refcnt, 1,
2618 					rte_memory_order_relaxed);
2619 			flow_hw_rxq_flag_set(dev, true);
2620 			break;
2621 		case RTE_FLOW_ACTION_TYPE_MARK:
2622 			acts->mark = true;
2623 			if (masks->conf &&
2624 			    ((const struct rte_flow_action_mark *)
2625 			     masks->conf)->id)
2626 				acts->rule_acts[dr_pos].tag.value =
2627 					mlx5_flow_mark_set
2628 					(((const struct rte_flow_action_mark *)
2629 					(actions->conf))->id);
2630 			else if (__flow_hw_act_data_general_append(priv, acts,
2631 								   actions->type,
2632 								   src_pos, dr_pos))
2633 				goto err;
2634 			acts->rule_acts[dr_pos].action =
2635 				priv->hw_tag[!!attr->group];
2636 			rte_atomic_fetch_add_explicit(&priv->hws_mark_refcnt, 1,
2637 					rte_memory_order_relaxed);
2638 			flow_hw_rxq_flag_set(dev, true);
2639 			break;
2640 		case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
2641 			acts->rule_acts[dr_pos].action =
2642 				priv->hw_push_vlan[type];
2643 			if (is_template_masked_push_vlan(masks->conf))
2644 				acts->rule_acts[dr_pos].push_vlan.vlan_hdr =
2645 					vlan_hdr_to_be32(actions);
2646 			else if (__flow_hw_act_data_general_append
2647 					(priv, acts, actions->type,
2648 					 src_pos, dr_pos))
2649 				goto err;
2650 			of_vlan_offset = is_of_vlan_pcp_present(actions) ?
2651 					MLX5_HW_VLAN_PUSH_PCP_IDX :
2652 					MLX5_HW_VLAN_PUSH_VID_IDX;
2653 			actions += of_vlan_offset;
2654 			masks += of_vlan_offset;
2655 			break;
2656 		case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
2657 			acts->rule_acts[dr_pos].action =
2658 				priv->hw_pop_vlan[type];
2659 			break;
2660 		case RTE_FLOW_ACTION_TYPE_JUMP:
2661 			if (masks->conf &&
2662 			    ((const struct rte_flow_action_jump *)
2663 			     masks->conf)->group) {
2664 				uint32_t jump_group =
2665 					((const struct rte_flow_action_jump *)
2666 					actions->conf)->group;
2667 				acts->jump = flow_hw_jump_action_register
2668 						(dev, cfg, jump_group, error);
2669 				if (!acts->jump)
2670 					goto err;
2671 				acts->rule_acts[dr_pos].action = (!!attr->group) ?
2672 								 acts->jump->hws_action :
2673 								 acts->jump->root_action;
2674 			} else if (__flow_hw_act_data_general_append
2675 					(priv, acts, actions->type,
2676 					 src_pos, dr_pos)){
2677 				goto err;
2678 			}
2679 			break;
2680 		case RTE_FLOW_ACTION_TYPE_QUEUE:
2681 			if (masks->conf &&
2682 			    ((const struct rte_flow_action_queue *)
2683 			     masks->conf)->index) {
2684 				acts->tir = flow_hw_tir_action_register
2685 				(dev, mlx5_hw_act_flag[!!attr->group][type],
2686 				 actions);
2687 				if (!acts->tir)
2688 					goto err;
2689 				acts->rule_acts[dr_pos].action =
2690 					acts->tir->action;
2691 			} else if (__flow_hw_act_data_general_append
2692 					(priv, acts, actions->type,
2693 					 src_pos, dr_pos)) {
2694 				goto err;
2695 			}
2696 			break;
2697 		case RTE_FLOW_ACTION_TYPE_RSS:
2698 			if (actions->conf && masks->conf) {
2699 				acts->tir = flow_hw_tir_action_register
2700 				(dev, mlx5_hw_act_flag[!!attr->group][type],
2701 				 actions);
2702 				if (!acts->tir)
2703 					goto err;
2704 				acts->rule_acts[dr_pos].action =
2705 					acts->tir->action;
2706 			} else if (__flow_hw_act_data_general_append
2707 					(priv, acts, actions->type,
2708 					 src_pos, dr_pos)) {
2709 				goto err;
2710 			}
2711 			break;
2712 		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
2713 			MLX5_ASSERT(!reformat_used);
2714 			enc_item = MLX5_CONST_ENCAP_ITEM(rte_flow_action_vxlan_encap,
2715 							 actions->conf);
2716 			if (masks->conf)
2717 				enc_item_m = MLX5_CONST_ENCAP_ITEM(rte_flow_action_vxlan_encap,
2718 								   masks->conf);
2719 			reformat_used = true;
2720 			reformat_src = src_pos;
2721 			refmt_type = MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
2722 			break;
2723 		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
2724 			MLX5_ASSERT(!reformat_used);
2725 			enc_item = MLX5_CONST_ENCAP_ITEM(rte_flow_action_nvgre_encap,
2726 							 actions->conf);
2727 			if (masks->conf)
2728 				enc_item_m = MLX5_CONST_ENCAP_ITEM(rte_flow_action_nvgre_encap,
2729 								   masks->conf);
2730 			reformat_used = true;
2731 			reformat_src = src_pos;
2732 			refmt_type = MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
2733 			break;
2734 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
2735 			raw_encap_data =
2736 				(const struct rte_flow_action_raw_encap *)
2737 				 masks->conf;
2738 			if (raw_encap_data)
2739 				encap_data_m = raw_encap_data->data;
2740 			raw_encap_data =
2741 				(const struct rte_flow_action_raw_encap *)
2742 				 actions->conf;
2743 			encap_data = raw_encap_data->data;
2744 			data_size = raw_encap_data->size;
2745 			if (reformat_used) {
2746 				refmt_type = data_size <
2747 				MLX5_ENCAPSULATION_DECISION_SIZE ?
2748 				MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2 :
2749 				MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3;
2750 			} else {
2751 				reformat_used = true;
2752 				refmt_type =
2753 				MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
2754 			}
2755 			reformat_src = src_pos;
2756 			break;
2757 		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
2758 		case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
2759 			MLX5_ASSERT(!reformat_used);
2760 			reformat_used = true;
2761 			refmt_type = MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2;
2762 			break;
2763 		case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
2764 			reformat_used = true;
2765 			refmt_type = MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2;
2766 			break;
2767 		case RTE_FLOW_ACTION_TYPE_IPV6_EXT_PUSH:
2768 			if (!hca_attr->query_match_sample_info || !hca_attr->parse_graph_anchor ||
2769 			    !priv->sh->srh_flex_parser.flex.mapnum) {
2770 				DRV_LOG(ERR, "SRv6 anchor is not supported.");
2771 				goto err;
2772 			}
2773 			MLX5_ASSERT(!recom_used && !recom_type);
2774 			recom_used = true;
2775 			recom_type = MLX5DR_ACTION_TYP_PUSH_IPV6_ROUTE_EXT;
2776 			ipv6_ext_data =
2777 				(const struct rte_flow_action_ipv6_ext_push *)masks->conf;
2778 			if (ipv6_ext_data)
2779 				push_data_m = ipv6_ext_data->data;
2780 			ipv6_ext_data =
2781 				(const struct rte_flow_action_ipv6_ext_push *)actions->conf;
2782 			if (ipv6_ext_data) {
2783 				push_data = ipv6_ext_data->data;
2784 				push_size = ipv6_ext_data->size;
2785 			}
2786 			recom_src = src_pos;
2787 			break;
2788 		case RTE_FLOW_ACTION_TYPE_IPV6_EXT_REMOVE:
2789 			if (!hca_attr->query_match_sample_info || !hca_attr->parse_graph_anchor ||
2790 			    !priv->sh->srh_flex_parser.flex.mapnum) {
2791 				DRV_LOG(ERR, "SRv6 anchor is not supported.");
2792 				goto err;
2793 			}
2794 			recom_used = true;
2795 			recom_type = MLX5DR_ACTION_TYP_POP_IPV6_ROUTE_EXT;
2796 			break;
2797 		case RTE_FLOW_ACTION_TYPE_SEND_TO_KERNEL:
2798 			ret = flow_hw_translate_group(dev, cfg, attr->group,
2799 						&target_grp, error);
2800 			if (ret)
2801 				return ret;
2802 			if (target_grp == 0) {
2803 				__flow_hw_action_template_destroy(dev, acts);
2804 				return rte_flow_error_set(error, ENOTSUP,
2805 						RTE_FLOW_ERROR_TYPE_ACTION,
2806 						NULL,
2807 						"Send to kernel action on root table is not supported in HW steering mode");
2808 			}
2809 			table_type = attr->ingress ? MLX5DR_TABLE_TYPE_NIC_RX :
2810 				     ((attr->egress) ? MLX5DR_TABLE_TYPE_NIC_TX :
2811 				      MLX5DR_TABLE_TYPE_FDB);
2812 			acts->rule_acts[dr_pos].action = priv->hw_send_to_kernel[table_type];
2813 			break;
2814 		case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
2815 			err = flow_hw_modify_field_compile(dev, attr, actions,
2816 							   masks, acts, &mhdr,
2817 							   src_pos, error);
2818 			if (err)
2819 				goto err;
2820 			break;
2821 		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
2822 			if (flow_hw_represented_port_compile
2823 					(dev, attr, actions,
2824 					 masks, acts, src_pos, dr_pos, error))
2825 				goto err;
2826 			break;
2827 		case RTE_FLOW_ACTION_TYPE_METER:
2828 			/*
2829 			 * METER action is compiled to 2 DR actions - ASO_METER and FT.
2830 			 * Calculated DR offset is stored only for ASO_METER and FT
2831 			 * is assumed to be the next action.
2832 			 */
2833 			jump_pos = dr_pos + 1;
2834 			if (actions->conf && masks->conf &&
2835 			    ((const struct rte_flow_action_meter *)
2836 			     masks->conf)->mtr_id) {
2837 				err = flow_hw_meter_compile(dev, cfg,
2838 							    dr_pos, jump_pos, actions, acts, error);
2839 				if (err)
2840 					goto err;
2841 			} else if (__flow_hw_act_data_general_append(priv, acts,
2842 								     actions->type,
2843 								     src_pos,
2844 								     dr_pos))
2845 				goto err;
2846 			break;
2847 		case RTE_FLOW_ACTION_TYPE_AGE:
2848 			ret = flow_hw_translate_group(dev, cfg, attr->group,
2849 						&target_grp, error);
2850 			if (ret)
2851 				return ret;
2852 			if (target_grp == 0) {
2853 				__flow_hw_action_template_destroy(dev, acts);
2854 				return rte_flow_error_set(error, ENOTSUP,
2855 						RTE_FLOW_ERROR_TYPE_ACTION,
2856 						NULL,
2857 						"Age action on root table is not supported in HW steering mode");
2858 			}
2859 			if (__flow_hw_act_data_general_append(priv, acts,
2860 							      actions->type,
2861 							      src_pos,
2862 							      dr_pos))
2863 				goto err;
2864 			break;
2865 		case RTE_FLOW_ACTION_TYPE_COUNT:
2866 			ret = flow_hw_translate_group(dev, cfg, attr->group,
2867 						&target_grp, error);
2868 			if (ret)
2869 				return ret;
2870 			if (target_grp == 0) {
2871 				__flow_hw_action_template_destroy(dev, acts);
2872 				return rte_flow_error_set(error, ENOTSUP,
2873 						RTE_FLOW_ERROR_TYPE_ACTION,
2874 						NULL,
2875 						"Counter action on root table is not supported in HW steering mode");
2876 			}
2877 			if ((at->action_flags & MLX5_FLOW_ACTION_AGE) ||
2878 			    (at->action_flags & MLX5_FLOW_ACTION_INDIRECT_AGE))
2879 				/*
2880 				 * When both COUNT and AGE are requested, it is
2881 				 * saved as AGE action which creates also the
2882 				 * counter.
2883 				 */
2884 				break;
2885 			if (masks->conf &&
2886 			    ((const struct rte_flow_action_count *)
2887 			     masks->conf)->id) {
2888 				err = flow_hw_cnt_compile(dev, dr_pos, acts);
2889 				if (err)
2890 					goto err;
2891 			} else if (__flow_hw_act_data_general_append
2892 					(priv, acts, actions->type,
2893 					 src_pos, dr_pos)) {
2894 				goto err;
2895 			}
2896 			break;
2897 		case RTE_FLOW_ACTION_TYPE_CONNTRACK:
2898 			if (masks->conf) {
2899 				ct_idx = MLX5_INDIRECT_ACTION_IDX_GET(actions->conf);
2900 				if (flow_hw_ct_compile(dev, MLX5_HW_INV_QUEUE, ct_idx,
2901 						       &acts->rule_acts[dr_pos]))
2902 					goto err;
2903 			} else if (__flow_hw_act_data_general_append
2904 					(priv, acts, actions->type,
2905 					 src_pos, dr_pos)) {
2906 				goto err;
2907 			}
2908 			break;
2909 		case RTE_FLOW_ACTION_TYPE_METER_MARK:
2910 			if (actions->conf && masks->conf &&
2911 			    ((const struct rte_flow_action_meter_mark *)
2912 			     masks->conf)->profile) {
2913 				err = flow_hw_meter_mark_compile(dev,
2914 								 dr_pos, actions,
2915 								 acts->rule_acts,
2916 								 &acts->mtr_id,
2917 								 MLX5_HW_INV_QUEUE,
2918 								 error);
2919 				if (err)
2920 					goto err;
2921 			} else if (__flow_hw_act_data_general_append(priv, acts,
2922 								     actions->type,
2923 								     src_pos,
2924 								     dr_pos))
2925 				goto err;
2926 			break;
2927 		case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
2928 			/* Internal, can be skipped. */
2929 			if (!!attr->group) {
2930 				DRV_LOG(ERR, "DEFAULT MISS action is only"
2931 					" supported in root table.");
2932 				goto err;
2933 			}
2934 			acts->rule_acts[dr_pos].action = priv->hw_def_miss;
2935 			break;
2936 		case RTE_FLOW_ACTION_TYPE_NAT64:
2937 			if (masks->conf &&
2938 			    ((const struct rte_flow_action_nat64 *)masks->conf)->type) {
2939 				const struct rte_flow_action_nat64 *nat64_c =
2940 					(const struct rte_flow_action_nat64 *)actions->conf;
2941 
2942 				acts->rule_acts[dr_pos].action =
2943 					priv->action_nat64[type][nat64_c->type];
2944 			} else if (__flow_hw_act_data_general_append(priv, acts,
2945 								     actions->type,
2946 								     src_pos, dr_pos))
2947 				goto err;
2948 			break;
2949 		case RTE_FLOW_ACTION_TYPE_JUMP_TO_TABLE_INDEX:
2950 			if (masks->conf &&
2951 			    ((const struct rte_flow_action_jump_to_table_index *)
2952 			     masks->conf)->table) {
2953 				struct rte_flow_template_table *jump_table =
2954 					((const struct rte_flow_action_jump_to_table_index *)
2955 					actions->conf)->table;
2956 				acts->rule_acts[dr_pos].jump_to_matcher.offset =
2957 					((const struct rte_flow_action_jump_to_table_index *)
2958 					actions->conf)->index;
2959 				if (likely(!rte_flow_template_table_resizable(dev->data->port_id,
2960 									&jump_table->cfg.attr))) {
2961 					acts->rule_acts[dr_pos].action =
2962 						jump_table->matcher_info[0].jump;
2963 				} else {
2964 					uint32_t selector;
2965 					rte_rwlock_read_lock(&jump_table->matcher_replace_rwlk);
2966 					selector = jump_table->matcher_selector;
2967 					acts->rule_acts[dr_pos].action =
2968 						jump_table->matcher_info[selector].jump;
2969 					rte_rwlock_read_unlock(&jump_table->matcher_replace_rwlk);
2970 				}
2971 			} else if (__flow_hw_act_data_general_append
2972 					(priv, acts, actions->type,
2973 					 src_pos, dr_pos)){
2974 				goto err;
2975 			}
2976 			break;
2977 		case RTE_FLOW_ACTION_TYPE_END:
2978 			actions_end = true;
2979 			break;
2980 		default:
2981 			break;
2982 		}
2983 	}
2984 	if (mhdr.pos != UINT16_MAX) {
2985 		ret = mlx5_tbl_translate_modify_header(dev, cfg, acts, mp_ctx, &mhdr, error);
2986 		if (ret)
2987 			goto err;
2988 		if (!nt_mode && mhdr.shared) {
2989 			ret = mlx5_tbl_ensure_shared_modify_header(dev, cfg, acts, error);
2990 			if (ret)
2991 				goto err;
2992 		}
2993 	}
2994 	if (reformat_used) {
2995 		ret = mlx5_tbl_translate_reformat(priv, acts, at,
2996 						  enc_item, enc_item_m,
2997 						  encap_data, encap_data_m,
2998 						  mp_ctx, data_size,
2999 						  reformat_src,
3000 						  refmt_type, error);
3001 		if (ret)
3002 			goto err;
3003 		if (!nt_mode && acts->encap_decap->shared) {
3004 			ret = mlx5_tbl_create_reformat_action(priv, table_attr, acts, at,
3005 							      encap_data, data_size,
3006 							      refmt_type);
3007 			if (ret)
3008 				goto err;
3009 		}
3010 	}
3011 	if (recom_used) {
3012 		MLX5_ASSERT(at->recom_off != UINT16_MAX);
3013 		ret = mlx5_create_ipv6_ext_reformat(dev, cfg, acts, at, push_data,
3014 						    push_data_m, push_size, recom_src,
3015 						    recom_type);
3016 		if (ret)
3017 			goto err;
3018 	}
3019 	return 0;
3020 err:
3021 	/* If rte_errno was not initialized and reached error state. */
3022 	if (!rte_errno)
3023 		rte_errno = EINVAL;
3024 	err = rte_errno;
3025 	__flow_hw_action_template_destroy(dev, acts);
3026 	return rte_flow_error_set(error, err,
3027 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3028 				  "fail to create rte table");
3029 }
3030 
3031 /**
3032  * Translate rte_flow actions to DR action.
3033  *
3034  * As the action template has already indicated the actions. Translate
3035  * the rte_flow actions to DR action if possible. So in flow create
3036  * stage we will save cycles from handing the actions' organizing.
3037  * For the actions with limited information, need to add these to a
3038  * list.
3039  *
3040  * @param[in] dev
3041  *   Pointer to the rte_eth_dev structure.
3042  * @param[in] cfg
3043  *   Pointer to the table configuration.
3044  * @param[in/out] acts
3045  *   Pointer to the template HW steering DR actions.
3046  * @param[in] at
3047  *   Action template.
3048  * @param[out] error
3049  *   Pointer to error structure.
3050  *
3051  * @return
3052  *   0 on success, a negative errno otherwise and rte_errno is set.
3053  */
3054 static int
3055 flow_hw_translate_actions_template(struct rte_eth_dev *dev,
3056 			    const struct mlx5_flow_template_table_cfg *cfg,
3057 			    struct mlx5_hw_actions *acts,
3058 			    struct rte_flow_actions_template *at,
3059 			    struct mlx5_tbl_multi_pattern_ctx *mp_ctx,
3060 			    struct rte_flow_error *error)
3061 {
3062 	return __flow_hw_translate_actions_template(dev, cfg, acts, at, mp_ctx, false, error);
3063 }
3064 
3065 static __rte_always_inline struct mlx5dr_rule_action *
3066 flow_hw_get_dr_action_buffer(struct mlx5_priv *priv,
3067 			     struct rte_flow_template_table *table,
3068 			     uint8_t action_template_index,
3069 			     uint32_t queue)
3070 {
3071 	uint32_t offset = action_template_index * priv->nb_queue + queue;
3072 
3073 	return &table->rule_acts[offset].acts[0];
3074 }
3075 
3076 static void
3077 flow_hw_populate_rule_acts_caches(struct rte_eth_dev *dev,
3078 				  struct rte_flow_template_table *table,
3079 				  uint8_t at_idx)
3080 {
3081 	struct mlx5_priv *priv = dev->data->dev_private;
3082 	uint32_t q;
3083 
3084 	for (q = 0; q < priv->nb_queue; ++q) {
3085 		struct mlx5dr_rule_action *rule_acts =
3086 				flow_hw_get_dr_action_buffer(priv, table, at_idx, q);
3087 
3088 		rte_memcpy(rule_acts, table->ats[at_idx].acts.rule_acts,
3089 			   sizeof(table->ats[at_idx].acts.rule_acts));
3090 	}
3091 }
3092 
3093 /**
3094  * Translate rte_flow actions to DR action.
3095  *
3096  * @param[in] dev
3097  *   Pointer to the rte_eth_dev structure.
3098  * @param[in] tbl
3099  *   Pointer to the flow template table.
3100  * @param[out] error
3101  *   Pointer to error structure.
3102  *
3103  * @return
3104  *    0 on success, negative value otherwise and rte_errno is set.
3105  */
3106 static int
3107 flow_hw_translate_all_actions_templates(struct rte_eth_dev *dev,
3108 			  struct rte_flow_template_table *tbl,
3109 			  struct rte_flow_error *error)
3110 {
3111 	int ret;
3112 	uint32_t i;
3113 
3114 	for (i = 0; i < tbl->nb_action_templates; i++) {
3115 		if (flow_hw_translate_actions_template(dev, &tbl->cfg,
3116 						&tbl->ats[i].acts,
3117 						tbl->ats[i].action_template,
3118 						&tbl->mpctx, error))
3119 			goto err;
3120 		flow_hw_populate_rule_acts_caches(dev, tbl, i);
3121 	}
3122 	ret = mlx5_tbl_multi_pattern_process(dev, tbl, &tbl->mpctx.segments[0],
3123 					     rte_log2_u32(tbl->cfg.attr.nb_flows),
3124 					     error);
3125 	if (ret)
3126 		goto err;
3127 	return 0;
3128 err:
3129 	while (i--)
3130 		__flow_hw_action_template_destroy(dev, &tbl->ats[i].acts);
3131 	return -1;
3132 }
3133 
3134 /**
3135  * Get shared indirect action.
3136  *
3137  * @param[in] dev
3138  *   Pointer to the rte_eth_dev data structure.
3139  * @param[in] act_data
3140  *   Pointer to the recorded action construct data.
3141  * @param[in] item_flags
3142  *   The matcher itme_flags used for RSS lookup.
3143  * @param[in] rule_act
3144  *   Pointer to the shared action's destination rule DR action.
3145  *
3146  * @return
3147  *    0 on success, negative value otherwise and rte_errno is set.
3148  */
3149 static __rte_always_inline int
3150 flow_hw_shared_action_get(struct rte_eth_dev *dev,
3151 			  struct mlx5_action_construct_data *act_data,
3152 			  const uint64_t item_flags,
3153 			  struct mlx5dr_rule_action *rule_act)
3154 {
3155 	struct mlx5_priv *priv = dev->data->dev_private;
3156 	struct mlx5_flow_rss_desc rss_desc = { 0 };
3157 	uint64_t hash_fields = 0;
3158 	uint32_t hrxq_idx = 0;
3159 	struct mlx5_hrxq *hrxq = NULL;
3160 	int act_type = act_data->type;
3161 
3162 	switch (act_type) {
3163 	case MLX5_RTE_FLOW_ACTION_TYPE_RSS:
3164 		rss_desc.level = act_data->shared_rss.level;
3165 		rss_desc.types = act_data->shared_rss.types;
3166 		rss_desc.symmetric_hash_function = act_data->shared_rss.symmetric_hash_function;
3167 		flow_dv_hashfields_set(item_flags, &rss_desc, &hash_fields);
3168 		hrxq_idx = flow_dv_action_rss_hrxq_lookup
3169 			(dev, act_data->shared_rss.idx, hash_fields);
3170 		if (hrxq_idx)
3171 			hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
3172 					      hrxq_idx);
3173 		if (hrxq) {
3174 			rule_act->action = hrxq->action;
3175 			return 0;
3176 		}
3177 		break;
3178 	default:
3179 		DRV_LOG(WARNING, "Unsupported shared action type:%d",
3180 			act_data->type);
3181 		break;
3182 	}
3183 	return -1;
3184 }
3185 
3186 static void
3187 flow_hw_construct_quota(struct mlx5_priv *priv,
3188 			struct mlx5dr_rule_action *rule_act, uint32_t qid)
3189 {
3190 	rule_act->action = priv->quota_ctx.dr_action;
3191 	rule_act->aso_meter.offset = qid - 1;
3192 	rule_act->aso_meter.init_color =
3193 		MLX5DR_ACTION_ASO_METER_COLOR_GREEN;
3194 }
3195 
3196 /**
3197  * Construct shared indirect action.
3198  *
3199  * @param[in] dev
3200  *   Pointer to the rte_eth_dev data structure.
3201  * @param[in] queue
3202  *   The flow creation queue index.
3203  * @param[in] action
3204  *   Pointer to the shared indirect rte_flow action.
3205   * @param[in] table
3206  *   Pointer to the flow table.
3207  * @param[in] item_flags
3208  *   Item flags.
3209  * @param[in] action_flags
3210  *   Actions bit-map detected in this template.
3211  * @param[in, out] flow
3212  *   Pointer to the flow containing the counter.
3213  * @param[in] rule_act
3214  *   Pointer to the shared action's destination rule DR action.
3215  *
3216  * @return
3217  *    0 on success, negative value otherwise and rte_errno is set.
3218  */
3219 static __rte_always_inline int
3220 flow_hw_shared_action_construct(struct rte_eth_dev *dev, uint32_t queue,
3221 				const struct rte_flow_action *action,
3222 				struct rte_flow_template_table *table __rte_unused,
3223 				const uint64_t item_flags, uint64_t action_flags,
3224 				struct rte_flow_hw *flow,
3225 				struct mlx5dr_rule_action *rule_act)
3226 {
3227 	struct mlx5_priv *priv = dev->data->dev_private;
3228 	struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
3229 	struct mlx5_action_construct_data act_data;
3230 	struct mlx5_shared_action_rss *shared_rss;
3231 	struct mlx5_aso_mtr *aso_mtr;
3232 	struct mlx5_age_info *age_info;
3233 	struct mlx5_hws_age_param *param;
3234 	struct rte_flow_hw_aux *aux;
3235 	uint32_t act_idx = (uint32_t)(uintptr_t)action->conf;
3236 	uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
3237 	uint32_t idx = act_idx &
3238 		       ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
3239 	cnt_id_t age_cnt;
3240 
3241 	memset(&act_data, 0, sizeof(act_data));
3242 	switch (type) {
3243 	case MLX5_INDIRECT_ACTION_TYPE_RSS:
3244 		act_data.type = MLX5_RTE_FLOW_ACTION_TYPE_RSS;
3245 		shared_rss = mlx5_ipool_get
3246 			(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
3247 		if (!shared_rss)
3248 			return -1;
3249 		act_data.shared_rss.idx = idx;
3250 		act_data.shared_rss.level = shared_rss->origin.level;
3251 		act_data.shared_rss.types = !shared_rss->origin.types ?
3252 					    RTE_ETH_RSS_IP :
3253 					    shared_rss->origin.types;
3254 		act_data.shared_rss.symmetric_hash_function =
3255 			MLX5_RSS_IS_SYMM(shared_rss->origin.func);
3256 
3257 		if (flow_hw_shared_action_get
3258 				(dev, &act_data, item_flags, rule_act))
3259 			return -1;
3260 		break;
3261 	case MLX5_INDIRECT_ACTION_TYPE_COUNT:
3262 		if (mlx5_hws_cnt_pool_get_action_offset(priv->hws_cpool,
3263 				act_idx,
3264 				&rule_act->action,
3265 				&rule_act->counter.offset))
3266 			return -1;
3267 		flow->flags |= MLX5_FLOW_HW_FLOW_FLAG_CNT_ID;
3268 		flow->cnt_id = act_idx;
3269 		break;
3270 	case MLX5_INDIRECT_ACTION_TYPE_AGE:
3271 		aux = mlx5_flow_hw_aux(dev->data->port_id, flow);
3272 		/*
3273 		 * Save the index with the indirect type, to recognize
3274 		 * it in flow destroy.
3275 		 */
3276 		mlx5_flow_hw_aux_set_age_idx(flow, aux, act_idx);
3277 		flow->flags |= MLX5_FLOW_HW_FLOW_FLAG_AGE_IDX;
3278 		if (action_flags & MLX5_FLOW_ACTION_INDIRECT_COUNT)
3279 			/*
3280 			 * The mutual update for idirect AGE & COUNT will be
3281 			 * performed later after we have ID for both of them.
3282 			 */
3283 			break;
3284 		age_info = GET_PORT_AGE_INFO(priv);
3285 		param = mlx5_ipool_get(age_info->ages_ipool, idx);
3286 		if (param == NULL)
3287 			return -1;
3288 		if (action_flags & MLX5_FLOW_ACTION_COUNT) {
3289 			if (mlx5_hws_cnt_pool_get(priv->hws_cpool,
3290 						  &param->queue_id, &age_cnt,
3291 						  idx) < 0)
3292 				return -1;
3293 			flow->flags |= MLX5_FLOW_HW_FLOW_FLAG_CNT_ID;
3294 			flow->cnt_id = age_cnt;
3295 			param->nb_cnts++;
3296 		} else {
3297 			/*
3298 			 * Get the counter of this indirect AGE or create one
3299 			 * if doesn't exist.
3300 			 */
3301 			age_cnt = mlx5_hws_age_cnt_get(priv, param, idx);
3302 			if (age_cnt == 0)
3303 				return -1;
3304 		}
3305 		if (mlx5_hws_cnt_pool_get_action_offset(priv->hws_cpool,
3306 						     age_cnt, &rule_act->action,
3307 						     &rule_act->counter.offset))
3308 			return -1;
3309 		break;
3310 	case MLX5_INDIRECT_ACTION_TYPE_CT:
3311 		if (flow_hw_ct_compile(dev, queue, idx, rule_act))
3312 			return -1;
3313 		break;
3314 	case MLX5_INDIRECT_ACTION_TYPE_METER_MARK:
3315 		/* Find ASO object. */
3316 		aso_mtr = mlx5_ipool_get(pool->idx_pool, idx);
3317 		if (!aso_mtr)
3318 			return -1;
3319 		rule_act->action = pool->action;
3320 		rule_act->aso_meter.offset = aso_mtr->offset;
3321 		break;
3322 	case MLX5_INDIRECT_ACTION_TYPE_QUOTA:
3323 		flow_hw_construct_quota(priv, rule_act, idx);
3324 		break;
3325 	default:
3326 		DRV_LOG(WARNING, "Unsupported shared action type:%d", type);
3327 		break;
3328 	}
3329 	return 0;
3330 }
3331 
3332 static __rte_always_inline int
3333 flow_hw_mhdr_cmd_is_nop(const struct mlx5_modification_cmd *cmd)
3334 {
3335 	struct mlx5_modification_cmd cmd_he = {
3336 		.data0 = rte_be_to_cpu_32(cmd->data0),
3337 		.data1 = 0,
3338 	};
3339 
3340 	return cmd_he.action_type == MLX5_MODIFICATION_TYPE_NOP;
3341 }
3342 
3343 /**
3344  * Construct flow action array.
3345  *
3346  * For action template contains dynamic actions, these actions need to
3347  * be updated according to the rte_flow action during flow creation.
3348  *
3349  * @param[in] dev
3350  *   Pointer to the rte_eth_dev structure.
3351  * @param[in] job
3352  *   Pointer to job descriptor.
3353  * @param[in] hw_acts
3354  *   Pointer to translated actions from template.
3355  * @param[in] it_idx
3356  *   Item template index the action template refer to.
3357  * @param[in] actions
3358  *   Array of rte_flow action need to be checked.
3359  * @param[in] rule_acts
3360  *   Array of DR rule actions to be used during flow creation..
3361  * @param[in] acts_num
3362  *   Pointer to the real acts_num flow has.
3363  *
3364  * @return
3365  *    0 on success, negative value otherwise and rte_errno is set.
3366  */
3367 static __rte_always_inline int
3368 flow_hw_modify_field_construct(struct mlx5_modification_cmd *mhdr_cmd,
3369 			       struct mlx5_action_construct_data *act_data,
3370 			       const struct mlx5_hw_actions *hw_acts,
3371 			       const struct rte_flow_action *action)
3372 {
3373 	const struct rte_flow_action_modify_field *mhdr_action = action->conf;
3374 	uint8_t values[16] = { 0 };
3375 	unaligned_uint32_t *value_p;
3376 	uint32_t i;
3377 	struct field_modify_info *field;
3378 
3379 	if (!hw_acts->mhdr)
3380 		return -1;
3381 	if (hw_acts->mhdr->shared || act_data->modify_header.shared)
3382 		return 0;
3383 	MLX5_ASSERT(mhdr_action->operation == RTE_FLOW_MODIFY_SET ||
3384 		    mhdr_action->operation == RTE_FLOW_MODIFY_ADD);
3385 	if (mhdr_action->src.field != RTE_FLOW_FIELD_VALUE &&
3386 	    mhdr_action->src.field != RTE_FLOW_FIELD_POINTER)
3387 		return 0;
3388 	if (mhdr_action->src.field == RTE_FLOW_FIELD_VALUE)
3389 		rte_memcpy(values, &mhdr_action->src.value, sizeof(values));
3390 	else
3391 		rte_memcpy(values, mhdr_action->src.pvalue, sizeof(values));
3392 	if (mhdr_action->dst.field == RTE_FLOW_FIELD_META ||
3393 	    mhdr_action->dst.field == RTE_FLOW_FIELD_TAG ||
3394 	    mhdr_action->dst.field == RTE_FLOW_FIELD_METER_COLOR ||
3395 	    mhdr_action->dst.field == (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG) {
3396 		uint8_t tag_index = flow_tag_index_get(&mhdr_action->dst);
3397 
3398 		value_p = (unaligned_uint32_t *)values;
3399 		if (mhdr_action->dst.field == RTE_FLOW_FIELD_TAG &&
3400 		    tag_index == RTE_PMD_MLX5_LINEAR_HASH_TAG_INDEX)
3401 			*value_p = rte_cpu_to_be_32(*value_p << 16);
3402 		else
3403 			*value_p = rte_cpu_to_be_32(*value_p);
3404 	} else if (mhdr_action->dst.field == RTE_FLOW_FIELD_GTP_PSC_QFI ||
3405 		   mhdr_action->dst.field == RTE_FLOW_FIELD_GENEVE_OPT_TYPE) {
3406 		uint32_t tmp;
3407 
3408 		/*
3409 		 * Both QFI and Geneve option type are passed as an uint8_t integer,
3410 		 * but it is accessed through a 2nd least significant byte of a 32-bit
3411 		 * field in modify header command.
3412 		 */
3413 		tmp = values[0];
3414 		value_p = (unaligned_uint32_t *)values;
3415 		*value_p = rte_cpu_to_be_32(tmp << 8);
3416 	}
3417 	i = act_data->modify_header.mhdr_cmds_off;
3418 	field = act_data->modify_header.field;
3419 	do {
3420 		uint32_t off_b;
3421 		uint32_t mask;
3422 		uint32_t data;
3423 		const uint8_t *mask_src;
3424 
3425 		if (i >= act_data->modify_header.mhdr_cmds_end)
3426 			return -1;
3427 		if (flow_hw_mhdr_cmd_is_nop(&mhdr_cmd[i])) {
3428 			++i;
3429 			continue;
3430 		}
3431 		mask_src = (const uint8_t *)act_data->modify_header.mask;
3432 		mask = flow_dv_fetch_field(mask_src + field->offset, field->size);
3433 		if (!mask) {
3434 			++field;
3435 			continue;
3436 		}
3437 		off_b = rte_bsf32(mask);
3438 		data = flow_dv_fetch_field(values + field->offset, field->size);
3439 		/*
3440 		 * IPv6 DSCP uses OUT_IPV6_TRAFFIC_CLASS as ID but it starts from 2
3441 		 * bits left. Shift the data left for IPv6 DSCP
3442 		 */
3443 		if (field->id == MLX5_MODI_OUT_IPV6_TRAFFIC_CLASS &&
3444 		    mhdr_action->dst.field == RTE_FLOW_FIELD_IPV6_DSCP)
3445 			data <<= MLX5_IPV6_HDR_DSCP_SHIFT;
3446 		data = (data & mask) >> off_b;
3447 		mhdr_cmd[i++].data1 = rte_cpu_to_be_32(data);
3448 		++field;
3449 	} while (field->size);
3450 	return 0;
3451 }
3452 
3453 /**
3454  * Release any actions allocated for the flow rule during actions construction.
3455  *
3456  * @param[in] flow
3457  *   Pointer to flow structure.
3458  */
3459 static void
3460 flow_hw_release_actions(struct rte_eth_dev *dev,
3461 			uint32_t queue,
3462 			struct rte_flow_hw *flow)
3463 {
3464 	struct mlx5_priv *priv = dev->data->dev_private;
3465 	struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
3466 	struct rte_flow_hw_aux *aux = mlx5_flow_hw_aux(dev->data->port_id, flow);
3467 
3468 	if (flow->flags & MLX5_FLOW_HW_FLOW_FLAG_FATE_JUMP)
3469 		flow_hw_jump_release(dev, flow->jump);
3470 	else if (flow->flags & MLX5_FLOW_HW_FLOW_FLAG_FATE_HRXQ)
3471 		mlx5_hrxq_obj_release(dev, flow->hrxq);
3472 	if (flow->flags & MLX5_FLOW_HW_FLOW_FLAG_CNT_ID)
3473 		flow_hw_age_count_release(priv, queue, flow, NULL);
3474 	if (flow->flags & MLX5_FLOW_HW_FLOW_FLAG_MTR_ID)
3475 		mlx5_ipool_free(pool->idx_pool, mlx5_flow_hw_aux_get_mtr_id(flow, aux));
3476 }
3477 
3478 /**
3479  * Construct flow action array.
3480  *
3481  * For action template contains dynamic actions, these actions need to
3482  * be updated according to the rte_flow action during flow creation.
3483  *
3484  * @param[in] dev
3485  *   Pointer to the rte_eth_dev structure.
3486  * @param[in] flow
3487  *   Pointer to flow structure.
3488  * @param[in] ap
3489  *   Pointer to container for temporarily constructed actions' parameters.
3490  * @param[in] hw_acts
3491  *   Pointer to translated actions from template.
3492  * @param[in] items_flags
3493  *   Item flags.
3494  * @param[in] table
3495  *   Pointer to the template table.
3496  * @param[in] actions
3497  *   Array of rte_flow action need to be checked.
3498  * @param[in] rule_acts
3499  *   Array of DR rule actions to be used during flow creation..
3500  * @param[in] acts_num
3501  *   Pointer to the real acts_num flow has.
3502  *
3503  * @return
3504  *    0 on success, negative value otherwise and rte_errno is set.
3505  */
3506 static __rte_always_inline int
3507 flow_hw_actions_construct(struct rte_eth_dev *dev,
3508 			  struct rte_flow_hw *flow,
3509 			  struct mlx5_flow_hw_action_params *ap,
3510 			  const struct mlx5_hw_action_template *hw_at,
3511 			  uint64_t item_flags,
3512 			  struct rte_flow_template_table *table,
3513 			  const struct rte_flow_action actions[],
3514 			  struct mlx5dr_rule_action *rule_acts,
3515 			  uint32_t queue,
3516 			  struct rte_flow_error *error)
3517 {
3518 	struct mlx5_priv *priv = dev->data->dev_private;
3519 	struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
3520 	struct mlx5_action_construct_data *act_data;
3521 	const struct rte_flow_actions_template *at = hw_at->action_template;
3522 	const struct mlx5_hw_actions *hw_acts = &hw_at->acts;
3523 	const struct rte_flow_action *action;
3524 	const struct rte_flow_action_raw_encap *raw_encap_data;
3525 	const struct rte_flow_action_ipv6_ext_push *ipv6_push;
3526 	const struct rte_flow_item *enc_item = NULL;
3527 	const struct rte_flow_action_ethdev *port_action = NULL;
3528 	const struct rte_flow_action_meter *meter = NULL;
3529 	const struct rte_flow_action_age *age = NULL;
3530 	const struct rte_flow_action_nat64 *nat64_c = NULL;
3531 	struct rte_flow_attr attr = {
3532 		.ingress = 1,
3533 	};
3534 	uint32_t ft_flag;
3535 	int ret;
3536 	size_t encap_len = 0;
3537 	uint32_t age_idx = 0;
3538 	uint32_t mtr_idx = 0;
3539 	struct mlx5_aso_mtr *aso_mtr;
3540 	struct mlx5_multi_pattern_segment *mp_segment = NULL;
3541 	struct rte_flow_hw_aux *aux;
3542 
3543 	attr.group = table->grp->group_id;
3544 	ft_flag = mlx5_hw_act_flag[!!table->grp->group_id][table->type];
3545 	if (table->type == MLX5DR_TABLE_TYPE_FDB) {
3546 		attr.transfer = 1;
3547 		attr.ingress = 1;
3548 	} else if (table->type == MLX5DR_TABLE_TYPE_NIC_TX) {
3549 		attr.egress = 1;
3550 		attr.ingress = 0;
3551 	} else {
3552 		attr.ingress = 1;
3553 	}
3554 	if (hw_acts->mhdr && hw_acts->mhdr->mhdr_cmds_num > 0 && !hw_acts->mhdr->shared) {
3555 		uint16_t pos = hw_acts->mhdr->pos;
3556 
3557 		mp_segment = mlx5_multi_pattern_segment_find(table, flow->res_idx);
3558 		if (!mp_segment || !mp_segment->mhdr_action)
3559 			return -1;
3560 		rule_acts[pos].action = mp_segment->mhdr_action;
3561 		/* offset is relative to DR action */
3562 		rule_acts[pos].modify_header.offset =
3563 					flow->res_idx - mp_segment->head_index;
3564 		rule_acts[pos].modify_header.data =
3565 					(uint8_t *)ap->mhdr_cmd;
3566 		MLX5_ASSERT(hw_acts->mhdr->mhdr_cmds_num <= MLX5_MHDR_MAX_CMD);
3567 		rte_memcpy(ap->mhdr_cmd, hw_acts->mhdr->mhdr_cmds,
3568 			   sizeof(*ap->mhdr_cmd) * hw_acts->mhdr->mhdr_cmds_num);
3569 	}
3570 	LIST_FOREACH(act_data, &hw_acts->act_list, next) {
3571 		uint32_t jump_group;
3572 		uint32_t tag;
3573 		struct mlx5_hw_jump_action *jump;
3574 		struct mlx5_hrxq *hrxq;
3575 		uint32_t ct_idx;
3576 		cnt_id_t cnt_id;
3577 		uint32_t *cnt_queue;
3578 		uint32_t mtr_id;
3579 		struct rte_flow_template_table *jump_table;
3580 
3581 		action = &actions[act_data->action_src];
3582 		/*
3583 		 * action template construction replaces
3584 		 * OF_SET_VLAN_VID with MODIFY_FIELD
3585 		 */
3586 		if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)
3587 			MLX5_ASSERT(act_data->type ==
3588 				    RTE_FLOW_ACTION_TYPE_MODIFY_FIELD);
3589 		else
3590 			MLX5_ASSERT(action->type ==
3591 				    RTE_FLOW_ACTION_TYPE_INDIRECT ||
3592 				    (int)action->type == act_data->type);
3593 		switch ((int)act_data->type) {
3594 		case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
3595 			act_data->indirect_list_cb(dev, act_data, action,
3596 						   &rule_acts[act_data->action_dst]);
3597 			break;
3598 		case RTE_FLOW_ACTION_TYPE_INDIRECT:
3599 			if (flow_hw_shared_action_construct
3600 					(dev, queue, action, table,
3601 					 item_flags, at->action_flags, flow,
3602 					 &rule_acts[act_data->action_dst]))
3603 				goto error;
3604 			break;
3605 		case RTE_FLOW_ACTION_TYPE_VOID:
3606 			break;
3607 		case RTE_FLOW_ACTION_TYPE_MARK:
3608 			tag = mlx5_flow_mark_set
3609 			      (((const struct rte_flow_action_mark *)
3610 			      (action->conf))->id);
3611 			rule_acts[act_data->action_dst].tag.value = tag;
3612 			break;
3613 		case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
3614 			rule_acts[act_data->action_dst].push_vlan.vlan_hdr =
3615 				vlan_hdr_to_be32(action);
3616 			break;
3617 		case RTE_FLOW_ACTION_TYPE_JUMP:
3618 			jump_group = ((const struct rte_flow_action_jump *)
3619 						action->conf)->group;
3620 			jump = flow_hw_jump_action_register
3621 				(dev, &table->cfg, jump_group, NULL);
3622 			if (!jump)
3623 				goto error;
3624 			rule_acts[act_data->action_dst].action =
3625 			(!!attr.group) ? jump->hws_action : jump->root_action;
3626 			flow->jump = jump;
3627 			flow->flags |= MLX5_FLOW_HW_FLOW_FLAG_FATE_JUMP;
3628 			break;
3629 		case RTE_FLOW_ACTION_TYPE_RSS:
3630 		case RTE_FLOW_ACTION_TYPE_QUEUE:
3631 			hrxq = flow_hw_tir_action_register(dev, ft_flag, action);
3632 			if (!hrxq)
3633 				goto error;
3634 			rule_acts[act_data->action_dst].action = hrxq->action;
3635 			flow->hrxq = hrxq;
3636 			flow->flags |= MLX5_FLOW_HW_FLOW_FLAG_FATE_HRXQ;
3637 			break;
3638 		case MLX5_RTE_FLOW_ACTION_TYPE_RSS:
3639 			if (flow_hw_shared_action_get
3640 				(dev, act_data, item_flags,
3641 				 &rule_acts[act_data->action_dst]))
3642 				goto error;
3643 			break;
3644 		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3645 			enc_item = ((const struct rte_flow_action_vxlan_encap *)
3646 				   action->conf)->definition;
3647 			if (flow_dv_convert_encap_data(enc_item, ap->encap_data, &encap_len, NULL))
3648 				goto error;
3649 			break;
3650 		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
3651 			enc_item = ((const struct rte_flow_action_nvgre_encap *)
3652 				   action->conf)->definition;
3653 			if (flow_dv_convert_encap_data(enc_item, ap->encap_data, &encap_len, NULL))
3654 				goto error;
3655 			break;
3656 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
3657 			raw_encap_data =
3658 				(const struct rte_flow_action_raw_encap *)
3659 				 action->conf;
3660 			MLX5_ASSERT(raw_encap_data->size == act_data->encap.len);
3661 			if (unlikely(act_data->encap.len > MLX5_ENCAP_MAX_LEN))
3662 				return -1;
3663 			rte_memcpy(ap->encap_data, raw_encap_data->data, act_data->encap.len);
3664 			break;
3665 		case RTE_FLOW_ACTION_TYPE_IPV6_EXT_PUSH:
3666 			ipv6_push =
3667 				(const struct rte_flow_action_ipv6_ext_push *)action->conf;
3668 			MLX5_ASSERT(ipv6_push->size == act_data->ipv6_ext.len);
3669 			if (unlikely(act_data->ipv6_ext.len > MLX5_PUSH_MAX_LEN))
3670 				return -1;
3671 			rte_memcpy(ap->ipv6_push_data, ipv6_push->data,
3672 				   act_data->ipv6_ext.len);
3673 			break;
3674 		case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
3675 			if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)
3676 				ret = flow_hw_set_vlan_vid_construct(dev, ap->mhdr_cmd,
3677 								     act_data,
3678 								     hw_acts,
3679 								     action);
3680 			else
3681 				ret = flow_hw_modify_field_construct(ap->mhdr_cmd,
3682 								     act_data,
3683 								     hw_acts,
3684 								     action);
3685 			if (ret)
3686 				goto error;
3687 			break;
3688 		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
3689 			port_action = action->conf;
3690 			if (!priv->hw_vport[port_action->port_id])
3691 				goto error;
3692 			rule_acts[act_data->action_dst].action =
3693 					priv->hw_vport[port_action->port_id];
3694 			break;
3695 		case RTE_FLOW_ACTION_TYPE_QUOTA:
3696 			flow_hw_construct_quota(priv,
3697 						rule_acts + act_data->action_dst,
3698 						act_data->shared_meter.id);
3699 			break;
3700 		case RTE_FLOW_ACTION_TYPE_METER:
3701 			meter = action->conf;
3702 			mtr_id = meter->mtr_id;
3703 			aso_mtr = mlx5_aso_meter_by_idx(priv, mtr_id);
3704 			rule_acts[act_data->action_dst].action =
3705 				priv->mtr_bulk.action;
3706 			rule_acts[act_data->action_dst].aso_meter.offset =
3707 								aso_mtr->offset;
3708 			jump = flow_hw_jump_action_register
3709 				(dev, &table->cfg, aso_mtr->fm.group, NULL);
3710 			if (!jump)
3711 				goto error;
3712 			MLX5_ASSERT
3713 				(!rule_acts[act_data->action_dst + 1].action);
3714 			rule_acts[act_data->action_dst + 1].action =
3715 					(!!attr.group) ? jump->hws_action :
3716 							 jump->root_action;
3717 			flow->jump = jump;
3718 			flow->flags |= MLX5_FLOW_HW_FLOW_FLAG_FATE_JUMP;
3719 			if (mlx5_aso_mtr_wait(priv, aso_mtr, true))
3720 				goto error;
3721 			break;
3722 		case RTE_FLOW_ACTION_TYPE_AGE:
3723 			aux = mlx5_flow_hw_aux(dev->data->port_id, flow);
3724 			age = action->conf;
3725 			/*
3726 			 * First, create the AGE parameter, then create its
3727 			 * counter later:
3728 			 * Regular counter - in next case.
3729 			 * Indirect counter - update it after the loop.
3730 			 */
3731 			age_idx = mlx5_hws_age_action_create(priv, queue, 0,
3732 							     age,
3733 							     flow->res_idx,
3734 							     error);
3735 			if (age_idx == 0)
3736 				goto error;
3737 			mlx5_flow_hw_aux_set_age_idx(flow, aux, age_idx);
3738 			flow->flags |= MLX5_FLOW_HW_FLOW_FLAG_AGE_IDX;
3739 			if (at->action_flags & MLX5_FLOW_ACTION_INDIRECT_COUNT)
3740 				/*
3741 				 * When AGE uses indirect counter, no need to
3742 				 * create counter but need to update it with the
3743 				 * AGE parameter, will be done after the loop.
3744 				 */
3745 				break;
3746 			/* Fall-through. */
3747 		case RTE_FLOW_ACTION_TYPE_COUNT:
3748 			cnt_queue = mlx5_hws_cnt_get_queue(priv, &queue);
3749 			ret = mlx5_hws_cnt_pool_get(priv->hws_cpool, cnt_queue, &cnt_id, age_idx);
3750 			if (ret != 0) {
3751 				rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_ACTION,
3752 						action, "Failed to allocate flow counter");
3753 				goto error;
3754 			}
3755 			ret = mlx5_hws_cnt_pool_get_action_offset
3756 				(priv->hws_cpool,
3757 				 cnt_id,
3758 				 &rule_acts[act_data->action_dst].action,
3759 				 &rule_acts[act_data->action_dst].counter.offset
3760 				 );
3761 			if (ret != 0)
3762 				goto error;
3763 			flow->flags |= MLX5_FLOW_HW_FLOW_FLAG_CNT_ID;
3764 			flow->cnt_id = cnt_id;
3765 			break;
3766 		case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
3767 			ret = mlx5_hws_cnt_pool_get_action_offset
3768 				(priv->hws_cpool,
3769 				 act_data->shared_counter.id,
3770 				 &rule_acts[act_data->action_dst].action,
3771 				 &rule_acts[act_data->action_dst].counter.offset
3772 				 );
3773 			if (ret != 0)
3774 				goto error;
3775 			flow->flags |= MLX5_FLOW_HW_FLOW_FLAG_CNT_ID;
3776 			flow->cnt_id = act_data->shared_counter.id;
3777 			break;
3778 		case RTE_FLOW_ACTION_TYPE_CONNTRACK:
3779 			ct_idx = MLX5_INDIRECT_ACTION_IDX_GET(action->conf);
3780 			if (flow_hw_ct_compile(dev, queue, ct_idx,
3781 					       &rule_acts[act_data->action_dst]))
3782 				goto error;
3783 			break;
3784 		case MLX5_RTE_FLOW_ACTION_TYPE_METER_MARK:
3785 			mtr_id = act_data->shared_meter.id &
3786 				((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
3787 			/* Find ASO object. */
3788 			aso_mtr = mlx5_ipool_get(pool->idx_pool, mtr_id);
3789 			if (!aso_mtr)
3790 				goto error;
3791 			rule_acts[act_data->action_dst].action =
3792 							pool->action;
3793 			rule_acts[act_data->action_dst].aso_meter.offset =
3794 							aso_mtr->offset;
3795 			break;
3796 		case RTE_FLOW_ACTION_TYPE_METER_MARK:
3797 			/*
3798 			 * Allocate meter directly will slow down flow
3799 			 * insertion rate.
3800 			 */
3801 			ret = flow_hw_meter_mark_compile(dev,
3802 				act_data->action_dst, action,
3803 				rule_acts, &mtr_idx, MLX5_HW_INV_QUEUE, error);
3804 			if (ret != 0)
3805 				goto error;
3806 			aux = mlx5_flow_hw_aux(dev->data->port_id, flow);
3807 			mlx5_flow_hw_aux_set_mtr_id(flow, aux, mtr_idx);
3808 			flow->flags |= MLX5_FLOW_HW_FLOW_FLAG_MTR_ID;
3809 			break;
3810 		case RTE_FLOW_ACTION_TYPE_NAT64:
3811 			nat64_c = action->conf;
3812 			rule_acts[act_data->action_dst].action =
3813 				priv->action_nat64[table->type][nat64_c->type];
3814 			break;
3815 		case RTE_FLOW_ACTION_TYPE_JUMP_TO_TABLE_INDEX:
3816 			jump_table = ((const struct rte_flow_action_jump_to_table_index *)
3817 						action->conf)->table;
3818 			if (likely(!rte_flow_template_table_resizable(dev->data->port_id,
3819 								      &table->cfg.attr))) {
3820 				rule_acts[act_data->action_dst].action =
3821 					jump_table->matcher_info[0].jump;
3822 			} else {
3823 				uint32_t selector;
3824 				rte_rwlock_read_lock(&table->matcher_replace_rwlk);
3825 				selector = table->matcher_selector;
3826 				rule_acts[act_data->action_dst].action =
3827 					jump_table->matcher_info[selector].jump;
3828 				rte_rwlock_read_unlock(&table->matcher_replace_rwlk);
3829 			}
3830 			rule_acts[act_data->action_dst].jump_to_matcher.offset =
3831 				((const struct rte_flow_action_jump_to_table_index *)
3832 				action->conf)->index;
3833 			break;
3834 		default:
3835 			break;
3836 		}
3837 	}
3838 	if (at->action_flags & MLX5_FLOW_ACTION_INDIRECT_COUNT) {
3839 		/* If indirect count is used, then CNT_ID flag should be set. */
3840 		MLX5_ASSERT(flow->flags & MLX5_FLOW_HW_FLOW_FLAG_CNT_ID);
3841 		if (at->action_flags & MLX5_FLOW_ACTION_INDIRECT_AGE) {
3842 			/* If indirect AGE is used, then AGE_IDX flag should be set. */
3843 			MLX5_ASSERT(flow->flags & MLX5_FLOW_HW_FLOW_FLAG_AGE_IDX);
3844 			aux = mlx5_flow_hw_aux(dev->data->port_id, flow);
3845 			age_idx = mlx5_flow_hw_aux_get_age_idx(flow, aux) &
3846 				  MLX5_HWS_AGE_IDX_MASK;
3847 			if (mlx5_hws_cnt_age_get(priv->hws_cpool, flow->cnt_id) != age_idx)
3848 				/*
3849 				 * This is first use of this indirect counter
3850 				 * for this indirect AGE, need to increase the
3851 				 * number of counters.
3852 				 */
3853 				mlx5_hws_age_nb_cnt_increase(priv, age_idx);
3854 		}
3855 		/*
3856 		 * Update this indirect counter the indirect/direct AGE in which
3857 		 * using it.
3858 		 */
3859 		mlx5_hws_cnt_age_set(priv->hws_cpool, flow->cnt_id, age_idx);
3860 	}
3861 	if (hw_acts->encap_decap && !hw_acts->encap_decap->shared) {
3862 		int ix = mlx5_multi_pattern_reformat_to_index(hw_acts->encap_decap->action_type);
3863 		struct mlx5dr_rule_action *ra = &rule_acts[hw_acts->encap_decap_pos];
3864 
3865 		if (ix < 0)
3866 			goto error;
3867 		if (!mp_segment)
3868 			mp_segment = mlx5_multi_pattern_segment_find(table, flow->res_idx);
3869 		if (!mp_segment || !mp_segment->reformat_action[ix])
3870 			goto error;
3871 		ra->action = mp_segment->reformat_action[ix];
3872 		/* reformat offset is relative to selected DR action */
3873 		ra->reformat.offset = flow->res_idx - mp_segment->head_index;
3874 		ra->reformat.data = ap->encap_data;
3875 	}
3876 	if (hw_acts->push_remove && !hw_acts->push_remove->shared) {
3877 		rule_acts[hw_acts->push_remove_pos].ipv6_ext.offset =
3878 				flow->res_idx - 1;
3879 		rule_acts[hw_acts->push_remove_pos].ipv6_ext.header = ap->ipv6_push_data;
3880 	}
3881 	if (mlx5_hws_cnt_id_valid(hw_acts->cnt_id)) {
3882 		flow->flags |= MLX5_FLOW_HW_FLOW_FLAG_CNT_ID;
3883 		flow->cnt_id = hw_acts->cnt_id;
3884 	}
3885 	return 0;
3886 
3887 error:
3888 	flow_hw_release_actions(dev, queue, flow);
3889 	rte_errno = EINVAL;
3890 	return -rte_errno;
3891 }
3892 
3893 static const struct rte_flow_item *
3894 flow_hw_get_rule_items(struct rte_eth_dev *dev,
3895 		       const struct rte_flow_template_table *table,
3896 		       const struct rte_flow_item items[],
3897 		       uint8_t pattern_template_index,
3898 		       struct mlx5_flow_hw_pattern_params *pp)
3899 {
3900 	struct rte_flow_pattern_template *pt = table->its[pattern_template_index];
3901 
3902 	/* Only one implicit item can be added to flow rule pattern. */
3903 	MLX5_ASSERT(!pt->implicit_port || !pt->implicit_tag);
3904 	/* At least one item was allocated in pattern params for items. */
3905 	MLX5_ASSERT(MLX5_HW_MAX_ITEMS >= 1);
3906 	if (pt->implicit_port) {
3907 		if (pt->orig_item_nb + 1 > MLX5_HW_MAX_ITEMS) {
3908 			rte_errno = ENOMEM;
3909 			return NULL;
3910 		}
3911 		/* Set up represented port item in pattern params. */
3912 		pp->port_spec = (struct rte_flow_item_ethdev){
3913 			.port_id = dev->data->port_id,
3914 		};
3915 		pp->items[0] = (struct rte_flow_item){
3916 			.type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,
3917 			.spec = &pp->port_spec,
3918 		};
3919 		rte_memcpy(&pp->items[1], items, sizeof(*items) * pt->orig_item_nb);
3920 		return pp->items;
3921 	} else if (pt->implicit_tag) {
3922 		if (pt->orig_item_nb + 1 > MLX5_HW_MAX_ITEMS) {
3923 			rte_errno = ENOMEM;
3924 			return NULL;
3925 		}
3926 		/* Set up tag item in pattern params. */
3927 		pp->tag_spec = (struct rte_flow_item_tag){
3928 			.data = flow_hw_tx_tag_regc_value(dev),
3929 		};
3930 		pp->items[0] = (struct rte_flow_item){
3931 			.type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_TAG,
3932 			.spec = &pp->tag_spec,
3933 		};
3934 		rte_memcpy(&pp->items[1], items, sizeof(*items) * pt->orig_item_nb);
3935 		return pp->items;
3936 	} else {
3937 		return items;
3938 	}
3939 }
3940 
3941 /**
3942  * Enqueue HW steering flow creation.
3943  *
3944  * The flow will be applied to the HW only if the postpone bit is not set or
3945  * the extra push function is called.
3946  * The flow creation status should be checked from dequeue result.
3947  *
3948  * @param[in] dev
3949  *   Pointer to the rte_eth_dev structure.
3950  * @param[in] queue
3951  *   The queue to create the flow.
3952  * @param[in] attr
3953  *   Pointer to the flow operation attributes.
3954  * @param[in] table
3955  *   Pointer to the template table.
3956  * @param[in] insertion_type
3957  *   Insertion type for flow rules.
3958  * @param[in] rule_index
3959  *   The item pattern flow follows from the table.
3960  * @param[in] items
3961  *   Items with flow spec value.
3962  * @param[in] pattern_template_index
3963  *   The item pattern flow follows from the table.
3964  * @param[in] actions
3965  *   Action with flow spec value.
3966  * @param[in] action_template_index
3967  *   The action pattern flow follows from the table.
3968  * @param[in] user_data
3969  *   Pointer to the user_data.
3970  * @param[out] error
3971  *   Pointer to error structure.
3972  *
3973  * @return
3974  *    Flow pointer on success, NULL otherwise and rte_errno is set.
3975  */
3976 static __rte_always_inline struct rte_flow *
3977 flow_hw_async_flow_create_generic(struct rte_eth_dev *dev,
3978 				  uint32_t queue,
3979 				  const struct rte_flow_op_attr *attr,
3980 				  struct rte_flow_template_table *table,
3981 				  enum rte_flow_table_insertion_type insertion_type,
3982 				  uint32_t rule_index,
3983 				  const struct rte_flow_item items[],
3984 				  uint8_t pattern_template_index,
3985 				  const struct rte_flow_action actions[],
3986 				  uint8_t action_template_index,
3987 				  void *user_data,
3988 				  struct rte_flow_error *error)
3989 {
3990 	struct mlx5_priv *priv = dev->data->dev_private;
3991 	struct mlx5dr_rule_attr rule_attr = {
3992 		.queue_id = queue,
3993 		.user_data = user_data,
3994 		.burst = attr->postpone,
3995 	};
3996 	struct mlx5dr_rule_action *rule_acts;
3997 	struct rte_flow_hw *flow = NULL;
3998 	const struct rte_flow_item *rule_items;
3999 	struct rte_flow_error sub_error = { 0 };
4000 	uint32_t flow_idx = 0;
4001 	uint32_t res_idx = 0;
4002 	int ret;
4003 
4004 	if (mlx5_fp_debug_enabled()) {
4005 		if (flow_hw_async_create_validate(dev, queue, table, insertion_type, rule_index,
4006 			items, pattern_template_index, actions, action_template_index, error))
4007 			return NULL;
4008 	}
4009 	flow = mlx5_ipool_malloc(table->flow, &flow_idx);
4010 	if (!flow) {
4011 		rte_errno = ENOMEM;
4012 		goto error;
4013 	}
4014 	rule_acts = flow_hw_get_dr_action_buffer(priv, table, action_template_index, queue);
4015 	/*
4016 	 * Set the table here in order to know the destination table
4017 	 * when free the flow afterward.
4018 	 */
4019 	flow->table = table;
4020 	flow->mt_idx = pattern_template_index;
4021 	flow->idx = flow_idx;
4022 	if (table->resource) {
4023 		mlx5_ipool_malloc(table->resource, &res_idx);
4024 		if (!res_idx) {
4025 			rte_errno = ENOMEM;
4026 			goto error;
4027 		}
4028 		flow->res_idx = res_idx;
4029 	} else {
4030 		flow->res_idx = flow_idx;
4031 	}
4032 	flow->flags = 0;
4033 	/*
4034 	 * Set the flow operation type here in order to know if the flow memory
4035 	 * should be freed or not when get the result from dequeue.
4036 	 */
4037 	flow->operation_type = MLX5_FLOW_HW_FLOW_OP_TYPE_CREATE;
4038 	flow->user_data = user_data;
4039 	rule_attr.user_data = flow;
4040 	/*
4041 	 * Indexed pool returns 1-based indices, but mlx5dr expects 0-based indices
4042 	 * for rule insertion hints.
4043 	 */
4044 	flow->rule_idx = (rule_index == UINT32_MAX) ? flow->res_idx - 1 : rule_index;
4045 	rule_attr.rule_idx = flow->rule_idx;
4046 	/*
4047 	 * Construct the flow actions based on the input actions.
4048 	 * The implicitly appended action is always fixed, like metadata
4049 	 * copy action from FDB to NIC Rx.
4050 	 * No need to copy and contrust a new "actions" list based on the
4051 	 * user's input, in order to save the cost.
4052 	 */
4053 	if (flow_hw_actions_construct(dev, flow, &priv->hw_q[queue].ap,
4054 				      &table->ats[action_template_index],
4055 				      table->its[pattern_template_index]->item_flags,
4056 				      flow->table, actions,
4057 				      rule_acts, queue, &sub_error))
4058 		goto error;
4059 	rule_items = flow_hw_get_rule_items(dev, table, items,
4060 					    pattern_template_index, &priv->hw_q[queue].pp);
4061 	if (!rule_items)
4062 		goto error;
4063 	if (likely(!rte_flow_template_table_resizable(dev->data->port_id, &table->cfg.attr))) {
4064 		ret = mlx5dr_rule_create(table->matcher_info[0].matcher,
4065 					 pattern_template_index, rule_items,
4066 					 action_template_index, rule_acts,
4067 					 &rule_attr,
4068 					 (struct mlx5dr_rule *)flow->rule);
4069 	} else {
4070 		struct rte_flow_hw_aux *aux = mlx5_flow_hw_aux(dev->data->port_id, flow);
4071 		uint32_t selector;
4072 
4073 		flow->operation_type = MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_CREATE;
4074 		rte_rwlock_read_lock(&table->matcher_replace_rwlk);
4075 		selector = table->matcher_selector;
4076 		ret = mlx5dr_rule_create(table->matcher_info[selector].matcher,
4077 					 pattern_template_index, rule_items,
4078 					 action_template_index, rule_acts,
4079 					 &rule_attr,
4080 					 (struct mlx5dr_rule *)flow->rule);
4081 		rte_rwlock_read_unlock(&table->matcher_replace_rwlk);
4082 		aux->matcher_selector = selector;
4083 		flow->flags |= MLX5_FLOW_HW_FLOW_FLAG_MATCHER_SELECTOR;
4084 	}
4085 	if (likely(!ret)) {
4086 		flow_hw_q_inc_flow_ops(priv, queue);
4087 		return (struct rte_flow *)flow;
4088 	}
4089 error:
4090 	if (table->resource && res_idx)
4091 		mlx5_ipool_free(table->resource, res_idx);
4092 	if (flow_idx)
4093 		mlx5_ipool_free(table->flow, flow_idx);
4094 	if (sub_error.cause != RTE_FLOW_ERROR_TYPE_NONE && error != NULL)
4095 		*error = sub_error;
4096 	else
4097 		rte_flow_error_set(error, rte_errno,
4098 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4099 				   "fail to create rte flow");
4100 	return NULL;
4101 }
4102 
4103 static struct rte_flow *
4104 flow_hw_async_flow_create(struct rte_eth_dev *dev,
4105 			  uint32_t queue,
4106 			  const struct rte_flow_op_attr *attr,
4107 			  struct rte_flow_template_table *table,
4108 			  const struct rte_flow_item items[],
4109 			  uint8_t pattern_template_index,
4110 			  const struct rte_flow_action actions[],
4111 			  uint8_t action_template_index,
4112 			  void *user_data,
4113 			  struct rte_flow_error *error)
4114 {
4115 	uint32_t rule_index = UINT32_MAX;
4116 
4117 	return flow_hw_async_flow_create_generic(dev, queue, attr, table,
4118 		RTE_FLOW_TABLE_INSERTION_TYPE_PATTERN, rule_index,
4119 		items, pattern_template_index, actions, action_template_index,
4120 		user_data, error);
4121 }
4122 
4123 static struct rte_flow *
4124 flow_hw_async_flow_create_by_index(struct rte_eth_dev *dev,
4125 			  uint32_t queue,
4126 			  const struct rte_flow_op_attr *attr,
4127 			  struct rte_flow_template_table *table,
4128 			  uint32_t rule_index,
4129 			  const struct rte_flow_action actions[],
4130 			  uint8_t action_template_index,
4131 			  void *user_data,
4132 			  struct rte_flow_error *error)
4133 {
4134 	struct rte_flow_item items[] = {{.type = RTE_FLOW_ITEM_TYPE_END,}};
4135 	uint8_t pattern_template_index = 0;
4136 
4137 	return flow_hw_async_flow_create_generic(dev, queue, attr, table,
4138 		RTE_FLOW_TABLE_INSERTION_TYPE_INDEX, rule_index,
4139 		items, pattern_template_index, actions, action_template_index,
4140 		user_data, error);
4141 }
4142 
4143 static struct rte_flow *
4144 flow_hw_async_flow_create_by_index_with_pattern(struct rte_eth_dev *dev,
4145 						uint32_t queue,
4146 						const struct rte_flow_op_attr *attr,
4147 						struct rte_flow_template_table *table,
4148 						uint32_t rule_index,
4149 						const struct rte_flow_item items[],
4150 						uint8_t pattern_template_index,
4151 						const struct rte_flow_action actions[],
4152 						uint8_t action_template_index,
4153 						void *user_data,
4154 						struct rte_flow_error *error)
4155 {
4156 	return flow_hw_async_flow_create_generic(dev, queue, attr, table,
4157 		RTE_FLOW_TABLE_INSERTION_TYPE_INDEX_WITH_PATTERN, rule_index,
4158 		items, pattern_template_index, actions, action_template_index,
4159 		user_data, error);
4160 }
4161 
4162 /**
4163  * Enqueue HW steering flow update.
4164  *
4165  * The flow will be applied to the HW only if the postpone bit is not set or
4166  * the extra push function is called.
4167  * The flow destruction status should be checked from dequeue result.
4168  *
4169  * @param[in] dev
4170  *   Pointer to the rte_eth_dev structure.
4171  * @param[in] queue
4172  *   The queue to destroy the flow.
4173  * @param[in] attr
4174  *   Pointer to the flow operation attributes.
4175  * @param[in] flow
4176  *   Pointer to the flow to be destroyed.
4177  * @param[in] actions
4178  *   Action with flow spec value.
4179  * @param[in] action_template_index
4180  *   The action pattern flow follows from the table.
4181  * @param[in] user_data
4182  *   Pointer to the user_data.
4183  * @param[out] error
4184  *   Pointer to error structure.
4185  *
4186  * @return
4187  *    0 on success, negative value otherwise and rte_errno is set.
4188  */
4189 static int
4190 flow_hw_async_flow_update(struct rte_eth_dev *dev,
4191 			   uint32_t queue,
4192 			   const struct rte_flow_op_attr *attr,
4193 			   struct rte_flow *flow,
4194 			   const struct rte_flow_action actions[],
4195 			   uint8_t action_template_index,
4196 			   void *user_data,
4197 			   struct rte_flow_error *error)
4198 {
4199 	struct mlx5_priv *priv = dev->data->dev_private;
4200 	struct mlx5dr_rule_attr rule_attr = {
4201 		.queue_id = queue,
4202 		.user_data = user_data,
4203 		.burst = attr->postpone,
4204 	};
4205 	struct mlx5dr_rule_action *rule_acts;
4206 	struct rte_flow_hw *of = (struct rte_flow_hw *)flow;
4207 	struct rte_flow_hw *nf;
4208 	struct rte_flow_hw_aux *aux;
4209 	struct rte_flow_template_table *table = of->table;
4210 	uint32_t res_idx = 0;
4211 	int ret;
4212 
4213 	if (mlx5_fp_debug_enabled()) {
4214 		if (flow_hw_async_update_validate(dev, queue, of, actions, action_template_index,
4215 						  error))
4216 			return -rte_errno;
4217 	}
4218 	aux = mlx5_flow_hw_aux(dev->data->port_id, of);
4219 	nf = &aux->upd_flow;
4220 	memset(nf, 0, sizeof(struct rte_flow_hw));
4221 	rule_acts = flow_hw_get_dr_action_buffer(priv, table, action_template_index, queue);
4222 	/*
4223 	 * Set the table here in order to know the destination table
4224 	 * when free the flow afterwards.
4225 	 */
4226 	nf->table = table;
4227 	nf->mt_idx = of->mt_idx;
4228 	nf->idx = of->idx;
4229 	if (table->resource) {
4230 		mlx5_ipool_malloc(table->resource, &res_idx);
4231 		if (!res_idx) {
4232 			rte_errno = ENOMEM;
4233 			goto error;
4234 		}
4235 		nf->res_idx = res_idx;
4236 	} else {
4237 		nf->res_idx = of->res_idx;
4238 	}
4239 	nf->flags = 0;
4240 	/* Indicate the construction function to set the proper fields. */
4241 	nf->operation_type = MLX5_FLOW_HW_FLOW_OP_TYPE_UPDATE;
4242 	/*
4243 	 * Indexed pool returns 1-based indices, but mlx5dr expects 0-based indices
4244 	 * for rule insertion hints.
4245 	 * If there is only one STE, the update will be atomic by nature.
4246 	 */
4247 	nf->rule_idx = nf->res_idx - 1;
4248 	rule_attr.rule_idx = nf->rule_idx;
4249 	/*
4250 	 * Construct the flow actions based on the input actions.
4251 	 * The implicitly appended action is always fixed, like metadata
4252 	 * copy action from FDB to NIC Rx.
4253 	 * No need to copy and contrust a new "actions" list based on the
4254 	 * user's input, in order to save the cost.
4255 	 */
4256 	if (flow_hw_actions_construct(dev, nf, &priv->hw_q[queue].ap,
4257 				      &table->ats[action_template_index],
4258 				      table->its[nf->mt_idx]->item_flags,
4259 				      table, actions,
4260 				      rule_acts, queue, error)) {
4261 		rte_errno = EINVAL;
4262 		goto error;
4263 	}
4264 	/*
4265 	 * Set the flow operation type here in order to know if the flow memory
4266 	 * should be freed or not when get the result from dequeue.
4267 	 */
4268 	of->operation_type = MLX5_FLOW_HW_FLOW_OP_TYPE_UPDATE;
4269 	of->user_data = user_data;
4270 	of->flags |= MLX5_FLOW_HW_FLOW_FLAG_UPD_FLOW;
4271 	rule_attr.user_data = of;
4272 	ret = mlx5dr_rule_action_update((struct mlx5dr_rule *)of->rule,
4273 					action_template_index, rule_acts, &rule_attr);
4274 	if (likely(!ret)) {
4275 		flow_hw_q_inc_flow_ops(priv, queue);
4276 		return 0;
4277 	}
4278 error:
4279 	if (table->resource && res_idx)
4280 		mlx5_ipool_free(table->resource, res_idx);
4281 	return rte_flow_error_set(error, rte_errno,
4282 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4283 				  "fail to update rte flow");
4284 }
4285 
4286 /**
4287  * Enqueue HW steering flow destruction.
4288  *
4289  * The flow will be applied to the HW only if the postpone bit is not set or
4290  * the extra push function is called.
4291  * The flow destruction status should be checked from dequeue result.
4292  *
4293  * @param[in] dev
4294  *   Pointer to the rte_eth_dev structure.
4295  * @param[in] queue
4296  *   The queue to destroy the flow.
4297  * @param[in] attr
4298  *   Pointer to the flow operation attributes.
4299  * @param[in] flow
4300  *   Pointer to the flow to be destroyed.
4301  * @param[in] user_data
4302  *   Pointer to the user_data.
4303  * @param[out] error
4304  *   Pointer to error structure.
4305  *
4306  * @return
4307  *    0 on success, negative value otherwise and rte_errno is set.
4308  */
4309 static int
4310 flow_hw_async_flow_destroy(struct rte_eth_dev *dev,
4311 			   uint32_t queue,
4312 			   const struct rte_flow_op_attr *attr,
4313 			   struct rte_flow *flow,
4314 			   void *user_data,
4315 			   struct rte_flow_error *error)
4316 {
4317 	struct mlx5_priv *priv = dev->data->dev_private;
4318 	struct mlx5dr_rule_attr rule_attr = {
4319 		.queue_id = queue,
4320 		.user_data = user_data,
4321 		.burst = attr->postpone,
4322 	};
4323 	struct rte_flow_hw *fh = (struct rte_flow_hw *)flow;
4324 	bool resizable = rte_flow_template_table_resizable(dev->data->port_id,
4325 							   &fh->table->cfg.attr);
4326 	int ret;
4327 
4328 	if (mlx5_fp_debug_enabled()) {
4329 		if (flow_hw_async_destroy_validate(dev, queue, fh, error))
4330 			return -rte_errno;
4331 	}
4332 	fh->operation_type = !resizable ?
4333 			     MLX5_FLOW_HW_FLOW_OP_TYPE_DESTROY :
4334 			     MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_DESTROY;
4335 	fh->user_data = user_data;
4336 	rule_attr.user_data = fh;
4337 	rule_attr.rule_idx = fh->rule_idx;
4338 	ret = mlx5dr_rule_destroy((struct mlx5dr_rule *)fh->rule, &rule_attr);
4339 	if (ret) {
4340 		return rte_flow_error_set(error, rte_errno,
4341 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4342 					  "fail to destroy rte flow");
4343 	}
4344 	flow_hw_q_inc_flow_ops(priv, queue);
4345 	return 0;
4346 }
4347 
4348 /**
4349  * Release the AGE and counter for given flow.
4350  *
4351  * @param[in] priv
4352  *   Pointer to the port private data structure.
4353  * @param[in] queue
4354  *   The queue to release the counter.
4355  * @param[in, out] flow
4356  *   Pointer to the flow containing the counter.
4357  * @param[out] error
4358  *   Pointer to error structure.
4359  */
4360 static void
4361 flow_hw_age_count_release(struct mlx5_priv *priv, uint32_t queue,
4362 			  struct rte_flow_hw *flow,
4363 			  struct rte_flow_error *error)
4364 {
4365 	struct rte_flow_hw_aux *aux = mlx5_flow_hw_aux(priv->dev_data->port_id, flow);
4366 	uint32_t *cnt_queue;
4367 	uint32_t age_idx = aux->orig.age_idx;
4368 
4369 	MLX5_ASSERT(flow->flags & MLX5_FLOW_HW_FLOW_FLAG_CNT_ID);
4370 	if (mlx5_hws_cnt_is_shared(priv->hws_cpool, flow->cnt_id)) {
4371 		if ((flow->flags & MLX5_FLOW_HW_FLOW_FLAG_AGE_IDX) &&
4372 		    !mlx5_hws_age_is_indirect(age_idx)) {
4373 			/* Remove this AGE parameter from indirect counter. */
4374 			mlx5_hws_cnt_age_set(priv->hws_cpool, flow->cnt_id, 0);
4375 			/* Release the AGE parameter. */
4376 			mlx5_hws_age_action_destroy(priv, age_idx, error);
4377 		}
4378 		return;
4379 	}
4380 	cnt_queue = mlx5_hws_cnt_get_queue(priv, &queue);
4381 	/* Put the counter first to reduce the race risk in BG thread. */
4382 	mlx5_hws_cnt_pool_put(priv->hws_cpool, cnt_queue, &flow->cnt_id);
4383 	if (flow->flags & MLX5_FLOW_HW_FLOW_FLAG_AGE_IDX) {
4384 		if (mlx5_hws_age_is_indirect(age_idx)) {
4385 			uint32_t idx = age_idx & MLX5_HWS_AGE_IDX_MASK;
4386 
4387 			mlx5_hws_age_nb_cnt_decrease(priv, idx);
4388 		} else {
4389 			/* Release the AGE parameter. */
4390 			mlx5_hws_age_action_destroy(priv, age_idx, error);
4391 		}
4392 	}
4393 }
4394 
4395 static __rte_always_inline void
4396 flow_hw_pull_legacy_indirect_comp(struct rte_eth_dev *dev, struct mlx5_hw_q_job *job,
4397 				  uint32_t queue)
4398 {
4399 	struct mlx5_priv *priv = dev->data->dev_private;
4400 	struct mlx5_aso_ct_action *aso_ct;
4401 	struct mlx5_aso_mtr *aso_mtr;
4402 	uint32_t type, idx;
4403 
4404 	if (MLX5_INDIRECT_ACTION_TYPE_GET(job->action) ==
4405 	    MLX5_INDIRECT_ACTION_TYPE_QUOTA) {
4406 		mlx5_quota_async_completion(dev, queue, job);
4407 	} else if (job->type == MLX5_HW_Q_JOB_TYPE_DESTROY) {
4408 		type = MLX5_INDIRECT_ACTION_TYPE_GET(job->action);
4409 		if (type == MLX5_INDIRECT_ACTION_TYPE_METER_MARK) {
4410 			idx = MLX5_INDIRECT_ACTION_IDX_GET(job->action);
4411 			mlx5_ipool_free(priv->hws_mpool->idx_pool, idx);
4412 		}
4413 	} else if (job->type == MLX5_HW_Q_JOB_TYPE_CREATE) {
4414 		type = MLX5_INDIRECT_ACTION_TYPE_GET(job->action);
4415 		if (type == MLX5_INDIRECT_ACTION_TYPE_METER_MARK) {
4416 			idx = MLX5_INDIRECT_ACTION_IDX_GET(job->action);
4417 			aso_mtr = mlx5_ipool_get(priv->hws_mpool->idx_pool, idx);
4418 			aso_mtr->state = ASO_METER_READY;
4419 		} else if (type == MLX5_INDIRECT_ACTION_TYPE_CT) {
4420 			idx = MLX5_INDIRECT_ACTION_IDX_GET(job->action);
4421 			aso_ct = mlx5_ipool_get(priv->hws_ctpool->cts, idx);
4422 			aso_ct->state = ASO_CONNTRACK_READY;
4423 		}
4424 	} else if (job->type == MLX5_HW_Q_JOB_TYPE_QUERY) {
4425 		type = MLX5_INDIRECT_ACTION_TYPE_GET(job->action);
4426 		if (type == MLX5_INDIRECT_ACTION_TYPE_CT) {
4427 			idx = MLX5_INDIRECT_ACTION_IDX_GET(job->action);
4428 			aso_ct = mlx5_ipool_get(priv->hws_ctpool->cts, idx);
4429 			mlx5_aso_ct_obj_analyze(job->query.user,
4430 						job->query.hw);
4431 			aso_ct->state = ASO_CONNTRACK_READY;
4432 		}
4433 	}
4434 }
4435 
4436 static __rte_always_inline int
4437 mlx5_hw_pull_flow_transfer_comp(struct rte_eth_dev *dev,
4438 				uint32_t queue, struct rte_flow_op_result res[],
4439 				uint16_t n_res)
4440 {
4441 	uint32_t size, i;
4442 	struct rte_flow_hw *flow = NULL;
4443 	struct mlx5_priv *priv = dev->data->dev_private;
4444 	struct rte_ring *ring = priv->hw_q[queue].flow_transfer_completed;
4445 
4446 	size = RTE_MIN(rte_ring_count(ring), n_res);
4447 	for (i = 0; i < size; i++) {
4448 		res[i].status = RTE_FLOW_OP_SUCCESS;
4449 		rte_ring_dequeue(ring, (void **)&flow);
4450 		res[i].user_data = flow->user_data;
4451 		flow_hw_q_dec_flow_ops(priv, queue);
4452 	}
4453 	return (int)size;
4454 }
4455 
4456 static inline int
4457 __flow_hw_pull_indir_action_comp(struct rte_eth_dev *dev,
4458 				 uint32_t queue,
4459 				 struct rte_flow_op_result res[],
4460 				 uint16_t n_res)
4461 
4462 {
4463 	struct mlx5_priv *priv = dev->data->dev_private;
4464 	struct rte_ring *r = priv->hw_q[queue].indir_cq;
4465 	void *user_data = NULL;
4466 	int ret_comp, i;
4467 
4468 	ret_comp = (int)rte_ring_count(r);
4469 	if (ret_comp > n_res)
4470 		ret_comp = n_res;
4471 	for (i = 0; i < ret_comp; i++) {
4472 		rte_ring_dequeue(r, &user_data);
4473 		res[i].user_data = user_data;
4474 		res[i].status = RTE_FLOW_OP_SUCCESS;
4475 	}
4476 	if (!priv->shared_host) {
4477 		if (ret_comp < n_res && priv->hws_mpool)
4478 			ret_comp += mlx5_aso_pull_completion(&priv->hws_mpool->sq[queue],
4479 					&res[ret_comp], n_res - ret_comp);
4480 		if (ret_comp < n_res && priv->hws_ctpool)
4481 			ret_comp += mlx5_aso_pull_completion(&priv->ct_mng->aso_sqs[queue],
4482 					&res[ret_comp], n_res - ret_comp);
4483 	}
4484 	if (ret_comp < n_res && priv->quota_ctx.sq)
4485 		ret_comp += mlx5_aso_pull_completion(&priv->quota_ctx.sq[queue],
4486 						     &res[ret_comp],
4487 						     n_res - ret_comp);
4488 	for (i = 0; i <  ret_comp; i++) {
4489 		struct mlx5_hw_q_job *job = (struct mlx5_hw_q_job *)res[i].user_data;
4490 
4491 		/* Restore user data. */
4492 		res[i].user_data = job->user_data;
4493 		if (job->indirect_type == MLX5_HW_INDIRECT_TYPE_LEGACY)
4494 			flow_hw_pull_legacy_indirect_comp(dev, job, queue);
4495 		/*
4496 		 * Current PMD supports 2 indirect action list types - MIRROR and REFORMAT.
4497 		 * These indirect list types do not post WQE to create action.
4498 		 * Future indirect list types that do post WQE will add
4499 		 * completion handlers here.
4500 		 */
4501 		flow_hw_job_put(priv, job, queue);
4502 	}
4503 	return ret_comp;
4504 }
4505 
4506 static __rte_always_inline void
4507 hw_cmpl_flow_update_or_destroy(struct rte_eth_dev *dev,
4508 			       struct rte_flow_hw *flow,
4509 			       uint32_t queue, struct rte_flow_error *error)
4510 {
4511 	struct mlx5_priv *priv = dev->data->dev_private;
4512 	struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
4513 	struct rte_flow_template_table *table = flow->table;
4514 	/* Release the original resource index in case of update. */
4515 	uint32_t res_idx = flow->res_idx;
4516 
4517 	if (flow->flags & MLX5_FLOW_HW_FLOW_FLAGS_ALL) {
4518 		struct rte_flow_hw_aux *aux = mlx5_flow_hw_aux(dev->data->port_id, flow);
4519 
4520 		if (flow->flags & MLX5_FLOW_HW_FLOW_FLAG_FATE_JUMP)
4521 			flow_hw_jump_release(dev, flow->jump);
4522 		else if (flow->flags & MLX5_FLOW_HW_FLOW_FLAG_FATE_HRXQ)
4523 			mlx5_hrxq_obj_release(dev, flow->hrxq);
4524 		if (flow->flags & MLX5_FLOW_HW_FLOW_FLAG_CNT_ID)
4525 			flow_hw_age_count_release(priv, queue, flow, error);
4526 		if (flow->flags & MLX5_FLOW_HW_FLOW_FLAG_MTR_ID)
4527 			mlx5_ipool_free(pool->idx_pool, aux->orig.mtr_id);
4528 		if (flow->flags & MLX5_FLOW_HW_FLOW_FLAG_UPD_FLOW) {
4529 			struct rte_flow_hw *upd_flow = &aux->upd_flow;
4530 
4531 			rte_memcpy(flow, upd_flow, offsetof(struct rte_flow_hw, rule));
4532 			aux->orig = aux->upd;
4533 			flow->operation_type = MLX5_FLOW_HW_FLOW_OP_TYPE_CREATE;
4534 			if (!flow->nt_rule && table->resource)
4535 				mlx5_ipool_free(table->resource, res_idx);
4536 		}
4537 	}
4538 	if (flow->operation_type == MLX5_FLOW_HW_FLOW_OP_TYPE_DESTROY ||
4539 	    flow->operation_type == MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_DESTROY) {
4540 		if (!flow->nt_rule) {
4541 			if (table->resource)
4542 				mlx5_ipool_free(table->resource, res_idx);
4543 			mlx5_ipool_free(table->flow, flow->idx);
4544 		}
4545 	}
4546 }
4547 
4548 static __rte_always_inline void
4549 hw_cmpl_resizable_tbl(struct rte_eth_dev *dev,
4550 		      struct rte_flow_hw *flow,
4551 		      uint32_t queue, enum rte_flow_op_status status,
4552 		      struct rte_flow_error *error)
4553 {
4554 	struct rte_flow_template_table *table = flow->table;
4555 	struct rte_flow_hw_aux *aux = mlx5_flow_hw_aux(dev->data->port_id, flow);
4556 	uint32_t selector = aux->matcher_selector;
4557 	uint32_t other_selector = (selector + 1) & 1;
4558 
4559 	MLX5_ASSERT(flow->flags & MLX5_FLOW_HW_FLOW_FLAG_MATCHER_SELECTOR);
4560 	switch (flow->operation_type) {
4561 	case MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_CREATE:
4562 		rte_atomic_fetch_add_explicit
4563 			(&table->matcher_info[selector].refcnt, 1,
4564 			 rte_memory_order_relaxed);
4565 		break;
4566 	case MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_DESTROY:
4567 		rte_atomic_fetch_sub_explicit
4568 			(&table->matcher_info[selector].refcnt, 1,
4569 			 rte_memory_order_relaxed);
4570 		hw_cmpl_flow_update_or_destroy(dev, flow, queue, error);
4571 		break;
4572 	case MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_MOVE:
4573 		if (status == RTE_FLOW_OP_SUCCESS) {
4574 			rte_atomic_fetch_sub_explicit
4575 				(&table->matcher_info[selector].refcnt, 1,
4576 				 rte_memory_order_relaxed);
4577 			rte_atomic_fetch_add_explicit
4578 				(&table->matcher_info[other_selector].refcnt, 1,
4579 				 rte_memory_order_relaxed);
4580 			aux->matcher_selector = other_selector;
4581 		}
4582 		break;
4583 	default:
4584 		break;
4585 	}
4586 }
4587 
4588 /**
4589  * Pull the enqueued flows.
4590  *
4591  * For flows enqueued from creation/destruction, the status should be
4592  * checked from the dequeue result.
4593  *
4594  * @param[in] dev
4595  *   Pointer to the rte_eth_dev structure.
4596  * @param[in] queue
4597  *   The queue to pull the result.
4598  * @param[in/out] res
4599  *   Array to save the results.
4600  * @param[in] n_res
4601  *   Available result with the array.
4602  * @param[out] error
4603  *   Pointer to error structure.
4604  *
4605  * @return
4606  *    Result number on success, negative value otherwise and rte_errno is set.
4607  */
4608 static int
4609 flow_hw_pull(struct rte_eth_dev *dev,
4610 	     uint32_t queue,
4611 	     struct rte_flow_op_result res[],
4612 	     uint16_t n_res,
4613 	     struct rte_flow_error *error)
4614 {
4615 	struct mlx5_priv *priv = dev->data->dev_private;
4616 	int ret, i;
4617 
4618 	/* 1. Pull the flow completion. */
4619 	ret = mlx5dr_send_queue_poll(priv->dr_ctx, queue, res, n_res);
4620 	if (ret < 0)
4621 		return rte_flow_error_set(error, rte_errno,
4622 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4623 				"fail to query flow queue");
4624 	for (i = 0; i <  ret; i++) {
4625 		struct rte_flow_hw *flow = res[i].user_data;
4626 
4627 		/* Restore user data. */
4628 		res[i].user_data = flow->user_data;
4629 		switch (flow->operation_type) {
4630 		case MLX5_FLOW_HW_FLOW_OP_TYPE_DESTROY:
4631 		case MLX5_FLOW_HW_FLOW_OP_TYPE_UPDATE:
4632 			hw_cmpl_flow_update_or_destroy(dev, flow, queue, error);
4633 			break;
4634 		case MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_CREATE:
4635 		case MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_DESTROY:
4636 		case MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_MOVE:
4637 			hw_cmpl_resizable_tbl(dev, flow, queue, res[i].status, error);
4638 			break;
4639 		default:
4640 			break;
4641 		}
4642 		flow_hw_q_dec_flow_ops(priv, queue);
4643 	}
4644 	/* 2. Pull indirect action comp. */
4645 	if (ret < n_res)
4646 		ret += __flow_hw_pull_indir_action_comp(dev, queue, &res[ret],
4647 							n_res - ret);
4648 	if (ret < n_res)
4649 		ret += mlx5_hw_pull_flow_transfer_comp(dev, queue, &res[ret],
4650 						       n_res - ret);
4651 
4652 	return ret;
4653 }
4654 
4655 static uint32_t
4656 mlx5_hw_push_queue(struct rte_ring *pending_q, struct rte_ring *cmpl_q)
4657 {
4658 	void *job = NULL;
4659 	uint32_t i, size = rte_ring_count(pending_q);
4660 
4661 	for (i = 0; i < size; i++) {
4662 		rte_ring_dequeue(pending_q, &job);
4663 		rte_ring_enqueue(cmpl_q, job);
4664 	}
4665 	return size;
4666 }
4667 
4668 static inline uint32_t
4669 __flow_hw_push_action(struct rte_eth_dev *dev,
4670 		    uint32_t queue)
4671 {
4672 	struct mlx5_priv *priv = dev->data->dev_private;
4673 	struct mlx5_hw_q *hw_q = &priv->hw_q[queue];
4674 
4675 	mlx5_hw_push_queue(hw_q->indir_iq, hw_q->indir_cq);
4676 	mlx5_hw_push_queue(hw_q->flow_transfer_pending,
4677 			   hw_q->flow_transfer_completed);
4678 	if (!priv->shared_host) {
4679 		if (priv->hws_ctpool)
4680 			mlx5_aso_push_wqe(priv->sh,
4681 					  &priv->ct_mng->aso_sqs[queue]);
4682 		if (priv->hws_mpool)
4683 			mlx5_aso_push_wqe(priv->sh,
4684 					  &priv->hws_mpool->sq[queue]);
4685 	}
4686 	return flow_hw_q_pending(priv, queue);
4687 }
4688 
4689 static int
4690 __flow_hw_push(struct rte_eth_dev *dev,
4691 	       uint32_t queue,
4692 	       struct rte_flow_error *error)
4693 {
4694 	struct mlx5_priv *priv = dev->data->dev_private;
4695 	int ret, num;
4696 
4697 	num = __flow_hw_push_action(dev, queue);
4698 	ret = mlx5dr_send_queue_action(priv->dr_ctx, queue,
4699 				       MLX5DR_SEND_QUEUE_ACTION_DRAIN_ASYNC);
4700 	if (ret) {
4701 		rte_flow_error_set(error, rte_errno,
4702 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4703 				   "fail to push flows");
4704 		return ret;
4705 	}
4706 	return num;
4707 }
4708 
4709 /**
4710  * Push the enqueued flows to HW.
4711  *
4712  * Force apply all the enqueued flows to the HW.
4713  *
4714  * @param[in] dev
4715  *   Pointer to the rte_eth_dev structure.
4716  * @param[in] queue
4717  *   The queue to push the flow.
4718  * @param[out] error
4719  *   Pointer to error structure.
4720  *
4721  * @return
4722  *    0 on success, negative value otherwise and rte_errno is set.
4723  */
4724 static int
4725 flow_hw_push(struct rte_eth_dev *dev,
4726 	     uint32_t queue, struct rte_flow_error *error)
4727 {
4728 	int ret = __flow_hw_push(dev, queue, error);
4729 
4730 	return ret >= 0 ? 0 : ret;
4731 }
4732 
4733 /**
4734  * Drain the enqueued flows' completion.
4735  *
4736  * @param[in] dev
4737  *   Pointer to the rte_eth_dev structure.
4738  * @param[in] queue
4739  *   The queue to pull the flow.
4740  * @param[out] error
4741  *   Pointer to error structure.
4742  *
4743  * @return
4744  *    0 on success, negative value otherwise and rte_errno is set.
4745  */
4746 static int
4747 __flow_hw_pull_comp(struct rte_eth_dev *dev,
4748 		    uint32_t queue, struct rte_flow_error *error)
4749 {
4750 	struct rte_flow_op_result comp[BURST_THR];
4751 	int ret, i, empty_loop = 0;
4752 	uint32_t pending_rules;
4753 
4754 	ret = __flow_hw_push(dev, queue, error);
4755 	if (ret < 0)
4756 		return ret;
4757 	pending_rules = ret;
4758 	while (pending_rules) {
4759 		ret = flow_hw_pull(dev, queue, comp, BURST_THR, error);
4760 		if (ret < 0)
4761 			return -1;
4762 		if (!ret) {
4763 			rte_delay_us_sleep(MLX5_ASO_WQE_CQE_RESPONSE_DELAY);
4764 			if (++empty_loop > 5) {
4765 				DRV_LOG(WARNING, "No available dequeue %u, quit.", pending_rules);
4766 				break;
4767 			}
4768 			continue;
4769 		}
4770 		for (i = 0; i < ret; i++) {
4771 			if (comp[i].status == RTE_FLOW_OP_ERROR)
4772 				DRV_LOG(WARNING, "Flow flush get error CQE.");
4773 		}
4774 		/*
4775 		 * Indirect **SYNC** METER_MARK and CT actions do not
4776 		 * remove completion after WQE post.
4777 		 * That implementation avoids HW timeout.
4778 		 * The completion is removed before the following WQE post.
4779 		 * However, HWS queue updates do not reflect that behaviour.
4780 		 * Therefore, during port destruction sync queue may have
4781 		 * pending completions.
4782 		 */
4783 		pending_rules -= RTE_MIN(pending_rules, (uint32_t)ret);
4784 		empty_loop = 0;
4785 	}
4786 	return 0;
4787 }
4788 
4789 /**
4790  * Flush created flows.
4791  *
4792  * @param[in] dev
4793  *   Pointer to the rte_eth_dev structure.
4794  * @param[out] error
4795  *   Pointer to error structure.
4796  *
4797  * @return
4798  *    0 on success, negative value otherwise and rte_errno is set.
4799  */
4800 int
4801 flow_hw_q_flow_flush(struct rte_eth_dev *dev,
4802 		     struct rte_flow_error *error)
4803 {
4804 	struct mlx5_priv *priv = dev->data->dev_private;
4805 	struct mlx5_hw_q *hw_q = &priv->hw_q[MLX5_DEFAULT_FLUSH_QUEUE];
4806 	struct rte_flow_template_table *tbl;
4807 	struct rte_flow_hw *flow;
4808 	struct rte_flow_op_attr attr = {
4809 		.postpone = 0,
4810 	};
4811 	uint32_t pending_rules = 0;
4812 	uint32_t queue;
4813 	uint32_t fidx;
4814 
4815 	/*
4816 	 * Ensure to push and dequeue all the enqueued flow
4817 	 * creation/destruction jobs in case user forgot to
4818 	 * dequeue. Or the enqueued created flows will be
4819 	 * leaked. The forgotten dequeues would also cause
4820 	 * flow flush get extra CQEs as expected and pending_rules
4821 	 * be minus value.
4822 	 */
4823 	for (queue = 0; queue < priv->nb_queue; queue++) {
4824 		if (__flow_hw_pull_comp(dev, queue, error))
4825 			return -1;
4826 	}
4827 	/* Flush flow per-table from MLX5_DEFAULT_FLUSH_QUEUE. */
4828 	LIST_FOREACH(tbl, &priv->flow_hw_tbl, next) {
4829 		if (!tbl->cfg.external)
4830 			continue;
4831 		MLX5_IPOOL_FOREACH(tbl->flow, fidx, flow) {
4832 			if (flow_hw_async_flow_destroy(dev,
4833 						MLX5_DEFAULT_FLUSH_QUEUE,
4834 						&attr,
4835 						(struct rte_flow *)flow,
4836 						NULL,
4837 						error))
4838 				return -1;
4839 			pending_rules++;
4840 			/* Drain completion with queue size. */
4841 			if (pending_rules >= hw_q->size) {
4842 				if (__flow_hw_pull_comp(dev,
4843 							MLX5_DEFAULT_FLUSH_QUEUE,
4844 							error))
4845 					return -1;
4846 				pending_rules = 0;
4847 			}
4848 		}
4849 	}
4850 	/* Drain left completion. */
4851 	if (pending_rules &&
4852 	    __flow_hw_pull_comp(dev, MLX5_DEFAULT_FLUSH_QUEUE, error))
4853 		return -1;
4854 	return 0;
4855 }
4856 
4857 static int
4858 mlx5_tbl_multi_pattern_process(struct rte_eth_dev *dev,
4859 			       struct rte_flow_template_table *tbl,
4860 			       struct mlx5_multi_pattern_segment *segment,
4861 			       uint32_t bulk_size,
4862 			       struct rte_flow_error *error)
4863 {
4864 	int ret = 0;
4865 	uint32_t i;
4866 	struct mlx5_priv *priv = dev->data->dev_private;
4867 	struct mlx5_tbl_multi_pattern_ctx *mpctx = &tbl->mpctx;
4868 	const struct rte_flow_template_table_attr *table_attr = &tbl->cfg.attr;
4869 	const struct rte_flow_attr *attr = &table_attr->flow_attr;
4870 	enum mlx5dr_table_type type = get_mlx5dr_table_type(attr);
4871 	uint32_t flags = mlx5_hw_act_flag[!!attr->group][type];
4872 	struct mlx5dr_action *dr_action = NULL;
4873 
4874 	for (i = 0; i < MLX5_MULTIPATTERN_ENCAP_NUM; i++) {
4875 		typeof(mpctx->reformat[0]) *reformat = mpctx->reformat + i;
4876 		enum mlx5dr_action_type reformat_type =
4877 			mlx5_multi_pattern_reformat_index_to_type(i);
4878 
4879 		if (!reformat->elements_num)
4880 			continue;
4881 		dr_action = reformat_type == MLX5DR_ACTION_TYP_INSERT_HEADER ?
4882 			mlx5dr_action_create_insert_header
4883 			(priv->dr_ctx, reformat->elements_num,
4884 			 reformat->insert_hdr, bulk_size, flags) :
4885 			mlx5dr_action_create_reformat
4886 			(priv->dr_ctx, reformat_type, reformat->elements_num,
4887 			 reformat->reformat_hdr, bulk_size, flags);
4888 		if (!dr_action) {
4889 			ret = rte_flow_error_set(error, rte_errno,
4890 						 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4891 						 NULL,
4892 						 "failed to create multi-pattern encap action");
4893 			goto error;
4894 		}
4895 		segment->reformat_action[i] = dr_action;
4896 	}
4897 	if (mpctx->mh.elements_num) {
4898 		typeof(mpctx->mh) *mh = &mpctx->mh;
4899 		dr_action = mlx5dr_action_create_modify_header
4900 			(priv->dr_ctx, mpctx->mh.elements_num, mh->pattern,
4901 			 bulk_size, flags);
4902 		if (!dr_action) {
4903 			ret = rte_flow_error_set(error, rte_errno,
4904 						  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4905 						  NULL, "failed to create multi-pattern header modify action");
4906 			goto error;
4907 		}
4908 		segment->mhdr_action = dr_action;
4909 	}
4910 	if (dr_action) {
4911 		segment->capacity = RTE_BIT32(bulk_size);
4912 		if (segment != &mpctx->segments[MLX5_MAX_TABLE_RESIZE_NUM - 1])
4913 			segment[1].head_index = segment->head_index + segment->capacity;
4914 	}
4915 	return 0;
4916 error:
4917 	mlx5_destroy_multi_pattern_segment(segment);
4918 	return ret;
4919 }
4920 
4921 static int
4922 mlx5_hw_build_template_table(struct rte_eth_dev *dev,
4923 			     uint8_t nb_action_templates,
4924 			     struct rte_flow_actions_template *action_templates[],
4925 			     struct mlx5dr_action_template *at[],
4926 			     struct rte_flow_template_table *tbl,
4927 			     struct rte_flow_error *error)
4928 {
4929 	int ret;
4930 	uint8_t i;
4931 
4932 	for (i = 0; i < nb_action_templates; i++) {
4933 		uint32_t refcnt = rte_atomic_fetch_add_explicit(&action_templates[i]->refcnt, 1,
4934 						     rte_memory_order_relaxed) + 1;
4935 
4936 		if (refcnt <= 1) {
4937 			rte_flow_error_set(error, EINVAL,
4938 					   RTE_FLOW_ERROR_TYPE_ACTION,
4939 					   &action_templates[i], "invalid AT refcount");
4940 			goto at_error;
4941 		}
4942 		at[i] = action_templates[i]->tmpl;
4943 		tbl->ats[i].action_template = action_templates[i];
4944 		LIST_INIT(&tbl->ats[i].acts.act_list);
4945 		/* do NOT translate table action if `dev` was not started */
4946 		if (!dev->data->dev_started)
4947 			continue;
4948 		ret = flow_hw_translate_actions_template(dev, &tbl->cfg,
4949 						  &tbl->ats[i].acts,
4950 						  action_templates[i],
4951 						  &tbl->mpctx, error);
4952 		if (ret) {
4953 			i++;
4954 			goto at_error;
4955 		}
4956 		flow_hw_populate_rule_acts_caches(dev, tbl, i);
4957 	}
4958 	tbl->nb_action_templates = nb_action_templates;
4959 	if (mlx5_is_multi_pattern_active(&tbl->mpctx)) {
4960 		ret = mlx5_tbl_multi_pattern_process(dev, tbl,
4961 						     &tbl->mpctx.segments[0],
4962 						     rte_log2_u32(tbl->cfg.attr.nb_flows),
4963 						     error);
4964 		if (ret)
4965 			goto at_error;
4966 	}
4967 	return 0;
4968 
4969 at_error:
4970 	while (i--) {
4971 		__flow_hw_action_template_destroy(dev, &tbl->ats[i].acts);
4972 		rte_atomic_fetch_sub_explicit(&action_templates[i]->refcnt,
4973 				   1, rte_memory_order_relaxed);
4974 	}
4975 	return rte_errno;
4976 }
4977 
4978 static bool
4979 flow_hw_validate_template_domain(const struct rte_flow_attr *table_attr,
4980 				 uint32_t ingress, uint32_t egress, uint32_t transfer)
4981 {
4982 	if (table_attr->ingress)
4983 		return ingress != 0;
4984 	else if (table_attr->egress)
4985 		return egress != 0;
4986 	else
4987 		return transfer;
4988 }
4989 
4990 static bool
4991 flow_hw_validate_table_domain(const struct rte_flow_attr *table_attr)
4992 {
4993 	return table_attr->ingress + table_attr->egress + table_attr->transfer
4994 		== 1;
4995 }
4996 
4997 /**
4998  * Create flow table.
4999  *
5000  * The input item and action templates will be binded to the table.
5001  * Flow memory will also be allocated. Matcher will be created based
5002  * on the item template. Action will be translated to the dedicated
5003  * DR action if possible.
5004  *
5005  * @param[in] dev
5006  *   Pointer to the rte_eth_dev structure.
5007  * @param[in] table_cfg
5008  *   Pointer to the table configuration.
5009  * @param[in] item_templates
5010  *   Item template array to be binded to the table.
5011  * @param[in] nb_item_templates
5012  *   Number of item template.
5013  * @param[in] action_templates
5014  *   Action template array to be binded to the table.
5015  * @param[in] nb_action_templates
5016  *   Number of action template.
5017  * @param[out] error
5018  *   Pointer to error structure.
5019  *
5020  * @return
5021  *    Table on success, NULL otherwise and rte_errno is set.
5022  */
5023 static struct rte_flow_template_table *
5024 flow_hw_table_create(struct rte_eth_dev *dev,
5025 		     const struct mlx5_flow_template_table_cfg *table_cfg,
5026 		     struct rte_flow_pattern_template *item_templates[],
5027 		     uint8_t nb_item_templates,
5028 		     struct rte_flow_actions_template *action_templates[],
5029 		     uint8_t nb_action_templates,
5030 		     struct rte_flow_error *error)
5031 {
5032 	struct rte_flow_error sub_error = {
5033 		.type = RTE_FLOW_ERROR_TYPE_NONE,
5034 		.cause = NULL,
5035 		.message = NULL,
5036 	};
5037 	struct mlx5_priv *priv = dev->data->dev_private;
5038 	struct mlx5dr_matcher_attr matcher_attr = {0};
5039 	struct mlx5dr_action_jump_to_matcher_attr jump_attr = {
5040 		.type = MLX5DR_ACTION_JUMP_TO_MATCHER_BY_INDEX,
5041 		.matcher = NULL,
5042 	};
5043 	struct rte_flow_template_table *tbl = NULL;
5044 	struct mlx5_flow_group *grp;
5045 	struct mlx5dr_match_template *mt[MLX5_HW_TBL_MAX_ITEM_TEMPLATE];
5046 	struct mlx5dr_action_template *at[MLX5_HW_TBL_MAX_ACTION_TEMPLATE];
5047 	const struct rte_flow_template_table_attr *attr = &table_cfg->attr;
5048 	struct rte_flow_attr flow_attr = attr->flow_attr;
5049 	struct mlx5_flow_cb_ctx ctx = {
5050 		.dev = dev,
5051 		.error = &sub_error,
5052 		.data = &flow_attr,
5053 	};
5054 	struct mlx5_indexed_pool_config cfg = {
5055 		.trunk_size = 1 << 12,
5056 		.per_core_cache = 1 << 13,
5057 		.need_lock = 1,
5058 		.release_mem_en = !!priv->sh->config.reclaim_mode,
5059 		.malloc = mlx5_malloc,
5060 		.free = mlx5_free,
5061 		.type = "mlx5_hw_table_flow",
5062 	};
5063 	struct mlx5_list_entry *ge;
5064 	uint32_t i = 0, max_tpl = MLX5_HW_TBL_MAX_ITEM_TEMPLATE;
5065 	uint32_t nb_flows = rte_align32pow2(attr->nb_flows);
5066 	bool port_started = !!dev->data->dev_started;
5067 	bool rpool_needed;
5068 	size_t tbl_mem_size;
5069 	int err;
5070 
5071 	if (!flow_hw_validate_table_domain(&attr->flow_attr)) {
5072 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR,
5073 				   NULL, "invalid table domain attributes");
5074 		return NULL;
5075 	}
5076 	for (i = 0; i < nb_item_templates; i++) {
5077 		const struct rte_flow_pattern_template_attr *pt_attr =
5078 			&item_templates[i]->attr;
5079 		bool match = flow_hw_validate_template_domain(&attr->flow_attr,
5080 							      pt_attr->ingress,
5081 							      pt_attr->egress,
5082 							      pt_attr->transfer);
5083 		if (!match) {
5084 			rte_flow_error_set(error, EINVAL,
5085 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5086 					   NULL, "pattern template domain does not match table");
5087 			return NULL;
5088 		}
5089 	}
5090 	for (i = 0; i < nb_action_templates; i++) {
5091 		const struct rte_flow_actions_template *at = action_templates[i];
5092 		bool match = flow_hw_validate_template_domain(&attr->flow_attr,
5093 							      at->attr.ingress,
5094 							      at->attr.egress,
5095 							      at->attr.transfer);
5096 		if (!match) {
5097 			rte_flow_error_set(error, EINVAL,
5098 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5099 					   NULL, "action template domain does not match table");
5100 			return NULL;
5101 		}
5102 	}
5103 	/* HWS layer accepts only 1 item template with root table. */
5104 	if (!attr->flow_attr.group)
5105 		max_tpl = 1;
5106 	cfg.max_idx = nb_flows;
5107 	cfg.size = !rte_flow_template_table_resizable(dev->data->port_id, attr) ?
5108 		   mlx5_flow_hw_entry_size() :
5109 		   mlx5_flow_hw_auxed_entry_size();
5110 	/* For table has very limited flows, disable cache. */
5111 	if (nb_flows < cfg.trunk_size) {
5112 		cfg.per_core_cache = 0;
5113 		cfg.trunk_size = nb_flows;
5114 	} else if (nb_flows <= MLX5_HW_IPOOL_SIZE_THRESHOLD) {
5115 		cfg.per_core_cache = MLX5_HW_IPOOL_CACHE_MIN;
5116 	}
5117 	/* Check if we requires too many templates. */
5118 	if (nb_item_templates > max_tpl ||
5119 	    nb_action_templates > MLX5_HW_TBL_MAX_ACTION_TEMPLATE) {
5120 		rte_errno = EINVAL;
5121 		goto error;
5122 	}
5123 	/*
5124 	 * Amount of memory required for rte_flow_template_table struct:
5125 	 * - Size of the struct itself.
5126 	 * - VLA of DR rule action containers at the end =
5127 	 *     number of actions templates * number of queues * size of DR rule actions container.
5128 	 */
5129 	tbl_mem_size = sizeof(*tbl);
5130 	tbl_mem_size += nb_action_templates * priv->nb_queue * sizeof(tbl->rule_acts[0]);
5131 	/* Allocate the table memory. */
5132 	tbl = mlx5_malloc(MLX5_MEM_ZERO, tbl_mem_size, RTE_CACHE_LINE_SIZE, rte_socket_id());
5133 	if (!tbl)
5134 		goto error;
5135 	tbl->cfg = *table_cfg;
5136 	/* Allocate flow indexed pool. */
5137 	tbl->flow = mlx5_ipool_create(&cfg);
5138 	if (!tbl->flow)
5139 		goto error;
5140 	/* Allocate table of auxiliary flow rule structs. */
5141 	tbl->flow_aux = mlx5_malloc(MLX5_MEM_ZERO, sizeof(struct rte_flow_hw_aux) * nb_flows,
5142 				    RTE_CACHE_LINE_SIZE, rte_dev_numa_node(dev->device));
5143 	if (!tbl->flow_aux)
5144 		goto error;
5145 	/* Register the flow group. */
5146 	ge = mlx5_hlist_register(priv->sh->groups, attr->flow_attr.group, &ctx);
5147 	if (!ge)
5148 		goto error;
5149 	grp = container_of(ge, struct mlx5_flow_group, entry);
5150 	tbl->grp = grp;
5151 	/* Prepare matcher information. */
5152 	matcher_attr.resizable = !!rte_flow_template_table_resizable
5153 					(dev->data->port_id, &table_cfg->attr);
5154 	matcher_attr.optimize_flow_src = MLX5DR_MATCHER_FLOW_SRC_ANY;
5155 	matcher_attr.priority = attr->flow_attr.priority;
5156 	matcher_attr.optimize_using_rule_idx = true;
5157 	matcher_attr.mode = MLX5DR_MATCHER_RESOURCE_MODE_RULE;
5158 	matcher_attr.insert_mode = flow_hw_matcher_insert_mode_get(attr->insertion_type);
5159 	if (matcher_attr.insert_mode == MLX5DR_MATCHER_INSERT_BY_INDEX) {
5160 		if (attr->insertion_type == RTE_FLOW_TABLE_INSERTION_TYPE_INDEX_WITH_PATTERN) {
5161 			matcher_attr.isolated = true;
5162 			matcher_attr.match_mode = MLX5DR_MATCHER_MATCH_MODE_DEFAULT;
5163 		} else {
5164 			matcher_attr.isolated = false;
5165 			matcher_attr.match_mode = MLX5DR_MATCHER_MATCH_MODE_ALWAYS_HIT;
5166 		}
5167 	}
5168 	if (attr->hash_func == RTE_FLOW_TABLE_HASH_FUNC_CRC16) {
5169 		DRV_LOG(ERR, "16-bit checksum hash type is not supported");
5170 		rte_errno = ENOTSUP;
5171 		goto it_error;
5172 	}
5173 	matcher_attr.distribute_mode = flow_hw_matcher_distribute_mode_get(attr->hash_func);
5174 	matcher_attr.rule.num_log = rte_log2_u32(nb_flows);
5175 	/* Parse hints information. */
5176 	if (attr->specialize) {
5177 		uint32_t val = RTE_FLOW_TABLE_SPECIALIZE_TRANSFER_WIRE_ORIG |
5178 			       RTE_FLOW_TABLE_SPECIALIZE_TRANSFER_VPORT_ORIG;
5179 
5180 		if ((attr->specialize & val) == val) {
5181 			DRV_LOG(ERR, "Invalid hint value %x",
5182 				attr->specialize);
5183 			rte_errno = EINVAL;
5184 			goto it_error;
5185 		}
5186 		if (attr->specialize &
5187 		    RTE_FLOW_TABLE_SPECIALIZE_TRANSFER_WIRE_ORIG)
5188 			matcher_attr.optimize_flow_src =
5189 				MLX5DR_MATCHER_FLOW_SRC_WIRE;
5190 		else if (attr->specialize &
5191 			 RTE_FLOW_TABLE_SPECIALIZE_TRANSFER_VPORT_ORIG)
5192 			matcher_attr.optimize_flow_src =
5193 				MLX5DR_MATCHER_FLOW_SRC_VPORT;
5194 	}
5195 	/* Build the item template. */
5196 	for (i = 0; i < nb_item_templates; i++) {
5197 		uint32_t ret;
5198 
5199 		if ((flow_attr.ingress && !item_templates[i]->attr.ingress) ||
5200 		    (flow_attr.egress && !item_templates[i]->attr.egress) ||
5201 		    (flow_attr.transfer && !item_templates[i]->attr.transfer)) {
5202 			DRV_LOG(ERR, "pattern template and template table attribute mismatch");
5203 			rte_errno = EINVAL;
5204 			goto it_error;
5205 		}
5206 		if (item_templates[i]->item_flags & MLX5_FLOW_ITEM_COMPARE)
5207 			matcher_attr.mode = MLX5DR_MATCHER_RESOURCE_MODE_HTABLE;
5208 		ret = rte_atomic_fetch_add_explicit(&item_templates[i]->refcnt, 1,
5209 					 rte_memory_order_relaxed) + 1;
5210 		if (ret <= 1) {
5211 			rte_errno = EINVAL;
5212 			goto it_error;
5213 		}
5214 		mt[i] = item_templates[i]->mt;
5215 		tbl->its[i] = item_templates[i];
5216 	}
5217 	tbl->nb_item_templates = nb_item_templates;
5218 	/* Build the action template. */
5219 	err = mlx5_hw_build_template_table(dev, nb_action_templates,
5220 					   action_templates, at, tbl, &sub_error);
5221 	if (err) {
5222 		i = nb_item_templates;
5223 		goto it_error;
5224 	}
5225 	tbl->matcher_info[0].matcher = mlx5dr_matcher_create
5226 		(tbl->grp->tbl, mt, nb_item_templates, at, nb_action_templates, &matcher_attr);
5227 	if (!tbl->matcher_info[0].matcher)
5228 		goto at_error;
5229 	tbl->matcher_attr = matcher_attr;
5230 	tbl->type = attr->flow_attr.transfer ? MLX5DR_TABLE_TYPE_FDB :
5231 		    (attr->flow_attr.egress ? MLX5DR_TABLE_TYPE_NIC_TX :
5232 		    MLX5DR_TABLE_TYPE_NIC_RX);
5233 	if (matcher_attr.isolated) {
5234 		jump_attr.matcher = tbl->matcher_info[0].matcher;
5235 		tbl->matcher_info[0].jump = mlx5dr_action_create_jump_to_matcher(priv->dr_ctx,
5236 				&jump_attr, mlx5_hw_act_flag[!!attr->flow_attr.group][tbl->type]);
5237 		if (!tbl->matcher_info[0].jump)
5238 			goto jtm_error;
5239 	}
5240 	/*
5241 	 * Only the matcher supports update and needs more than 1 WQE, an additional
5242 	 * index is needed. Or else the flow index can be reused.
5243 	 */
5244 	rpool_needed = mlx5dr_matcher_is_updatable(tbl->matcher_info[0].matcher) &&
5245 		       mlx5dr_matcher_is_dependent(tbl->matcher_info[0].matcher);
5246 	if (rpool_needed) {
5247 		/* Allocate rule indexed pool. */
5248 		cfg.size = 0;
5249 		cfg.type = "mlx5_hw_table_rule";
5250 		cfg.max_idx += priv->hw_q[0].size;
5251 		tbl->resource = mlx5_ipool_create(&cfg);
5252 		if (!tbl->resource)
5253 			goto res_error;
5254 	}
5255 	if (port_started)
5256 		LIST_INSERT_HEAD(&priv->flow_hw_tbl, tbl, next);
5257 	else
5258 		LIST_INSERT_HEAD(&priv->flow_hw_tbl_ongo, tbl, next);
5259 	rte_rwlock_init(&tbl->matcher_replace_rwlk);
5260 	return tbl;
5261 res_error:
5262 	if (tbl->matcher_info[0].jump)
5263 		mlx5dr_action_destroy(tbl->matcher_info[0].jump);
5264 jtm_error:
5265 	if (tbl->matcher_info[0].matcher)
5266 		(void)mlx5dr_matcher_destroy(tbl->matcher_info[0].matcher);
5267 at_error:
5268 	for (i = 0; i < nb_action_templates; i++) {
5269 		__flow_hw_action_template_destroy(dev, &tbl->ats[i].acts);
5270 		rte_atomic_fetch_sub_explicit(&action_templates[i]->refcnt,
5271 				   1, rte_memory_order_relaxed);
5272 	}
5273 	i = nb_item_templates;
5274 it_error:
5275 	while (i--)
5276 		rte_atomic_fetch_sub_explicit(&item_templates[i]->refcnt,
5277 				   1, rte_memory_order_relaxed);
5278 error:
5279 	err = rte_errno;
5280 	if (tbl) {
5281 		if (tbl->grp)
5282 			mlx5_hlist_unregister(priv->sh->groups,
5283 					      &tbl->grp->entry);
5284 		if (tbl->flow_aux)
5285 			mlx5_free(tbl->flow_aux);
5286 		if (tbl->flow)
5287 			mlx5_ipool_destroy(tbl->flow);
5288 		mlx5_free(tbl);
5289 	}
5290 	if (error != NULL) {
5291 		if (sub_error.type == RTE_FLOW_ERROR_TYPE_NONE)
5292 			rte_flow_error_set(error, err, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5293 					   "Failed to create template table");
5294 		else
5295 			rte_memcpy(error, &sub_error, sizeof(sub_error));
5296 	}
5297 	return NULL;
5298 }
5299 
5300 /**
5301  * Update flow template table.
5302  *
5303  * @param[in] dev
5304  *   Pointer to the rte_eth_dev structure.
5305  * @param[out] error
5306  *   Pointer to error structure.
5307  *
5308  * @return
5309  *    0 on success, negative value otherwise and rte_errno is set.
5310  */
5311 int
5312 flow_hw_table_update(struct rte_eth_dev *dev,
5313 		     struct rte_flow_error *error)
5314 {
5315 	struct mlx5_priv *priv = dev->data->dev_private;
5316 	struct rte_flow_template_table *tbl;
5317 
5318 	while ((tbl = LIST_FIRST(&priv->flow_hw_tbl_ongo)) != NULL) {
5319 		if (flow_hw_translate_all_actions_templates(dev, tbl, error))
5320 			return -1;
5321 		LIST_REMOVE(tbl, next);
5322 		LIST_INSERT_HEAD(&priv->flow_hw_tbl, tbl, next);
5323 	}
5324 	return 0;
5325 }
5326 
5327 static inline int
5328 __translate_group(struct rte_eth_dev *dev,
5329 			const struct rte_flow_attr *flow_attr,
5330 			bool external,
5331 			uint32_t group,
5332 			uint32_t *table_group,
5333 			struct rte_flow_error *error)
5334 {
5335 	struct mlx5_priv *priv = dev->data->dev_private;
5336 	struct mlx5_sh_config *config = &priv->sh->config;
5337 
5338 	if (config->dv_esw_en &&
5339 	    priv->fdb_def_rule &&
5340 	    external &&
5341 	    flow_attr->transfer) {
5342 		if (group > MLX5_HW_MAX_TRANSFER_GROUP)
5343 			return rte_flow_error_set(error, EINVAL,
5344 						  RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
5345 						  NULL,
5346 						  "group index not supported");
5347 		*table_group = group + 1;
5348 	} else if (config->dv_esw_en &&
5349 		   (config->repr_matching || config->dv_xmeta_en == MLX5_XMETA_MODE_META32_HWS) &&
5350 		   external &&
5351 		   flow_attr->egress) {
5352 		/*
5353 		 * On E-Switch setups, default egress flow rules are inserted to allow
5354 		 * representor matching and/or preserving metadata across steering domains.
5355 		 * These flow rules are inserted in group 0 and this group is reserved by PMD
5356 		 * for these purposes.
5357 		 *
5358 		 * As a result, if representor matching or extended metadata mode is enabled,
5359 		 * group provided by the user must be incremented to avoid inserting flow rules
5360 		 * in group 0.
5361 		 */
5362 		if (group > MLX5_HW_MAX_EGRESS_GROUP)
5363 			return rte_flow_error_set(error, EINVAL,
5364 						  RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
5365 						  NULL,
5366 						  "group index not supported");
5367 		*table_group = group + 1;
5368 	} else {
5369 		*table_group = group;
5370 	}
5371 	return 0;
5372 }
5373 
5374 /**
5375  * Translates group index specified by the user in @p attr to internal
5376  * group index.
5377  *
5378  * Translation is done by incrementing group index, so group n becomes n + 1.
5379  *
5380  * @param[in] dev
5381  *   Pointer to Ethernet device.
5382  * @param[in] cfg
5383  *   Pointer to the template table configuration.
5384  * @param[in] group
5385  *   Currently used group index (table group or jump destination).
5386  * @param[out] table_group
5387  *   Pointer to output group index.
5388  * @param[out] error
5389  *   Pointer to error structure.
5390  *
5391  * @return
5392  *   0 on success. Otherwise, returns negative error code, rte_errno is set
5393  *   and error structure is filled.
5394  */
5395 static int
5396 flow_hw_translate_group(struct rte_eth_dev *dev,
5397 			const struct mlx5_flow_template_table_cfg *cfg,
5398 			uint32_t group,
5399 			uint32_t *table_group,
5400 			struct rte_flow_error *error)
5401 {
5402 	const struct rte_flow_attr *flow_attr = &cfg->attr.flow_attr;
5403 
5404 	return __translate_group(dev, flow_attr, cfg->external, group, table_group, error);
5405 }
5406 
5407 /**
5408  * Create flow table.
5409  *
5410  * This function is a wrapper over @ref flow_hw_table_create(), which translates parameters
5411  * provided by user to proper internal values.
5412  *
5413  * @param[in] dev
5414  *   Pointer to Ethernet device.
5415  * @param[in] attr
5416  *   Pointer to the table attributes.
5417  * @param[in] item_templates
5418  *   Item template array to be binded to the table.
5419  * @param[in] nb_item_templates
5420  *   Number of item templates.
5421  * @param[in] action_templates
5422  *   Action template array to be binded to the table.
5423  * @param[in] nb_action_templates
5424  *   Number of action templates.
5425  * @param[out] error
5426  *   Pointer to error structure.
5427  *
5428  * @return
5429  *   Table on success, Otherwise, returns negative error code, rte_errno is set
5430  *   and error structure is filled.
5431  */
5432 static struct rte_flow_template_table *
5433 flow_hw_template_table_create(struct rte_eth_dev *dev,
5434 			      const struct rte_flow_template_table_attr *attr,
5435 			      struct rte_flow_pattern_template *item_templates[],
5436 			      uint8_t nb_item_templates,
5437 			      struct rte_flow_actions_template *action_templates[],
5438 			      uint8_t nb_action_templates,
5439 			      struct rte_flow_error *error)
5440 {
5441 	struct mlx5_flow_template_table_cfg cfg = {
5442 		.attr = *attr,
5443 		.external = true,
5444 	};
5445 	uint32_t group = attr->flow_attr.group;
5446 
5447 	if (flow_hw_translate_group(dev, &cfg, group, &cfg.attr.flow_attr.group, error))
5448 		return NULL;
5449 	if (!cfg.attr.flow_attr.group &&
5450 	    rte_flow_template_table_resizable(dev->data->port_id, attr)) {
5451 		rte_flow_error_set(error, EINVAL,
5452 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5453 				   "table cannot be resized: invalid group");
5454 		return NULL;
5455 	}
5456 	return flow_hw_table_create(dev, &cfg, item_templates, nb_item_templates,
5457 				    action_templates, nb_action_templates, error);
5458 }
5459 
5460 static void
5461 mlx5_destroy_multi_pattern_segment(struct mlx5_multi_pattern_segment *segment)
5462 {
5463 	int i;
5464 
5465 	if (segment->mhdr_action)
5466 		mlx5dr_action_destroy(segment->mhdr_action);
5467 	for (i = 0; i < MLX5_MULTIPATTERN_ENCAP_NUM; i++) {
5468 		if (segment->reformat_action[i])
5469 			mlx5dr_action_destroy(segment->reformat_action[i]);
5470 	}
5471 	segment->capacity = 0;
5472 }
5473 
5474 static void
5475 flow_hw_destroy_table_multi_pattern_ctx(struct rte_flow_template_table *table)
5476 {
5477 	int sx;
5478 
5479 	for (sx = 0; sx < MLX5_MAX_TABLE_RESIZE_NUM; sx++)
5480 		mlx5_destroy_multi_pattern_segment(table->mpctx.segments + sx);
5481 }
5482 /**
5483  * Destroy flow table.
5484  *
5485  * @param[in] dev
5486  *   Pointer to the rte_eth_dev structure.
5487  * @param[in] table
5488  *   Pointer to the table to be destroyed.
5489  * @param[out] error
5490  *   Pointer to error structure.
5491  *
5492  * @return
5493  *   0 on success, a negative errno value otherwise and rte_errno is set.
5494  */
5495 static int
5496 flow_hw_table_destroy(struct rte_eth_dev *dev,
5497 		      struct rte_flow_template_table *table,
5498 		      struct rte_flow_error *error)
5499 {
5500 	struct mlx5_priv *priv = dev->data->dev_private;
5501 	int i;
5502 	uint32_t fidx = 1;
5503 	uint32_t ridx = 1;
5504 
5505 	/* Build ipool allocated object bitmap. */
5506 	if (table->resource)
5507 		mlx5_ipool_flush_cache(table->resource);
5508 	mlx5_ipool_flush_cache(table->flow);
5509 	/* Check if ipool has allocated objects. */
5510 	if (table->refcnt ||
5511 	    mlx5_ipool_get_next(table->flow, &fidx) ||
5512 	    (table->resource && mlx5_ipool_get_next(table->resource, &ridx))) {
5513 		DRV_LOG(WARNING, "Table %p is still in use.", (void *)table);
5514 		return rte_flow_error_set(error, EBUSY,
5515 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5516 				   NULL,
5517 				   "table is in use");
5518 	}
5519 	LIST_REMOVE(table, next);
5520 	for (i = 0; i < table->nb_item_templates; i++)
5521 		rte_atomic_fetch_sub_explicit(&table->its[i]->refcnt,
5522 				   1, rte_memory_order_relaxed);
5523 	for (i = 0; i < table->nb_action_templates; i++) {
5524 		__flow_hw_action_template_destroy(dev, &table->ats[i].acts);
5525 		rte_atomic_fetch_sub_explicit(&table->ats[i].action_template->refcnt,
5526 				   1, rte_memory_order_relaxed);
5527 	}
5528 	flow_hw_destroy_table_multi_pattern_ctx(table);
5529 	if (table->matcher_info[0].jump)
5530 		mlx5dr_action_destroy(table->matcher_info[0].jump);
5531 	if (table->matcher_info[0].matcher)
5532 		mlx5dr_matcher_destroy(table->matcher_info[0].matcher);
5533 	if (table->matcher_info[1].jump)
5534 		mlx5dr_action_destroy(table->matcher_info[1].jump);
5535 	if (table->matcher_info[1].matcher)
5536 		mlx5dr_matcher_destroy(table->matcher_info[1].matcher);
5537 	mlx5_hlist_unregister(priv->sh->groups, &table->grp->entry);
5538 	if (table->resource)
5539 		mlx5_ipool_destroy(table->resource);
5540 	mlx5_free(table->flow_aux);
5541 	mlx5_ipool_destroy(table->flow);
5542 	mlx5_free(table);
5543 	return 0;
5544 }
5545 
5546 /**
5547  * Parse group's miss actions.
5548  *
5549  * @param[in] dev
5550  *   Pointer to the rte_eth_dev structure.
5551  * @param[in] cfg
5552  *   Pointer to the table_cfg structure.
5553  * @param[in] actions
5554  *   Array of actions to perform on group miss. Supported types:
5555  *   RTE_FLOW_ACTION_TYPE_JUMP, RTE_FLOW_ACTION_TYPE_VOID, RTE_FLOW_ACTION_TYPE_END.
5556  * @param[out] dst_group_id
5557  *   Pointer to destination group id output. will be set to 0 if actions is END,
5558  *   otherwise will be set to destination group id.
5559  * @param[out] error
5560  *   Pointer to error structure.
5561  *
5562  * @return
5563  *   0 on success, a negative errno value otherwise and rte_errno is set.
5564  */
5565 
5566 static int
5567 flow_hw_group_parse_miss_actions(struct rte_eth_dev *dev,
5568 				 struct mlx5_flow_template_table_cfg *cfg,
5569 				 const struct rte_flow_action actions[],
5570 				 uint32_t *dst_group_id,
5571 				 struct rte_flow_error *error)
5572 {
5573 	const struct rte_flow_action_jump *jump_conf;
5574 	uint32_t temp = 0;
5575 	uint32_t i;
5576 
5577 	for (i = 0; actions[i].type != RTE_FLOW_ACTION_TYPE_END; i++) {
5578 		switch (actions[i].type) {
5579 		case RTE_FLOW_ACTION_TYPE_VOID:
5580 			continue;
5581 		case RTE_FLOW_ACTION_TYPE_JUMP:
5582 			if (temp)
5583 				return rte_flow_error_set(error, ENOTSUP,
5584 							  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, actions,
5585 							  "Miss actions can contain only a single JUMP");
5586 
5587 			jump_conf = (const struct rte_flow_action_jump *)actions[i].conf;
5588 			if (!jump_conf)
5589 				return rte_flow_error_set(error, EINVAL,
5590 							  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5591 							  jump_conf, "Jump conf must not be NULL");
5592 
5593 			if (flow_hw_translate_group(dev, cfg, jump_conf->group, &temp, error))
5594 				return -rte_errno;
5595 
5596 			if (!temp)
5597 				return rte_flow_error_set(error, EINVAL,
5598 							  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5599 							  "Failed to set group miss actions - Invalid target group");
5600 			break;
5601 		default:
5602 			return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
5603 						  &actions[i], "Unsupported default miss action type");
5604 		}
5605 	}
5606 
5607 	*dst_group_id = temp;
5608 	return 0;
5609 }
5610 
5611 /**
5612  * Set group's miss group.
5613  *
5614  * @param[in] dev
5615  *   Pointer to the rte_eth_dev structure.
5616  * @param[in] cfg
5617  *   Pointer to the table_cfg structure.
5618  * @param[in] src_grp
5619  *   Pointer to source group structure.
5620  *   if NULL, a new group will be created based on group id from cfg->attr.flow_attr.group.
5621  * @param[in] dst_grp
5622  *   Pointer to destination group structure.
5623  * @param[out] error
5624  *   Pointer to error structure.
5625  *
5626  * @return
5627  *   0 on success, a negative errno value otherwise and rte_errno is set.
5628  */
5629 
5630 static int
5631 flow_hw_group_set_miss_group(struct rte_eth_dev *dev,
5632 			     struct mlx5_flow_template_table_cfg *cfg,
5633 			     struct mlx5_flow_group *src_grp,
5634 			     struct mlx5_flow_group *dst_grp,
5635 			     struct rte_flow_error *error)
5636 {
5637 	struct rte_flow_error sub_error = {
5638 		.type = RTE_FLOW_ERROR_TYPE_NONE,
5639 		.cause = NULL,
5640 		.message = NULL,
5641 	};
5642 	struct mlx5_flow_cb_ctx ctx = {
5643 		.dev = dev,
5644 		.error = &sub_error,
5645 		.data = &cfg->attr.flow_attr,
5646 	};
5647 	struct mlx5_priv *priv = dev->data->dev_private;
5648 	struct mlx5_list_entry *ge;
5649 	bool ref = false;
5650 	int ret;
5651 
5652 	if (!dst_grp)
5653 		return -EINVAL;
5654 
5655 	/* If group doesn't exist - needs to be created. */
5656 	if (!src_grp) {
5657 		ge = mlx5_hlist_register(priv->sh->groups, cfg->attr.flow_attr.group, &ctx);
5658 		if (!ge)
5659 			return -rte_errno;
5660 
5661 		src_grp = container_of(ge, struct mlx5_flow_group, entry);
5662 		LIST_INSERT_HEAD(&priv->flow_hw_grp, src_grp, next);
5663 		ref = true;
5664 	} else if (!src_grp->miss_group) {
5665 		/* If group exists, but has no miss actions - need to increase ref_cnt. */
5666 		LIST_INSERT_HEAD(&priv->flow_hw_grp, src_grp, next);
5667 		src_grp->entry.ref_cnt++;
5668 		ref = true;
5669 	}
5670 
5671 	ret = mlx5dr_table_set_default_miss(src_grp->tbl, dst_grp->tbl);
5672 	if (ret)
5673 		goto mlx5dr_error;
5674 
5675 	/* If group existed and had old miss actions - ref_cnt is already correct.
5676 	 * However, need to reduce ref counter for old miss group.
5677 	 */
5678 	if (src_grp->miss_group)
5679 		mlx5_hlist_unregister(priv->sh->groups, &src_grp->miss_group->entry);
5680 
5681 	src_grp->miss_group = dst_grp;
5682 	return 0;
5683 
5684 mlx5dr_error:
5685 	/* Reduce src_grp ref_cnt back & remove from grp list in case of mlx5dr error */
5686 	if (ref) {
5687 		mlx5_hlist_unregister(priv->sh->groups, &src_grp->entry);
5688 		LIST_REMOVE(src_grp, next);
5689 	}
5690 
5691 	return rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5692 				  "Failed to set group miss actions");
5693 }
5694 
5695 /**
5696  * Unset group's miss group.
5697  *
5698  * @param[in] dev
5699  *   Pointer to the rte_eth_dev structure.
5700  * @param[in] grp
5701  *   Pointer to group structure.
5702  * @param[out] error
5703  *   Pointer to error structure.
5704  *
5705  * @return
5706  *   0 on success, a negative errno value otherwise and rte_errno is set.
5707  */
5708 
5709 static int
5710 flow_hw_group_unset_miss_group(struct rte_eth_dev *dev,
5711 			       struct mlx5_flow_group *grp,
5712 			       struct rte_flow_error *error)
5713 {
5714 	struct mlx5_priv *priv = dev->data->dev_private;
5715 	int ret;
5716 
5717 	/* If group doesn't exist - no need to change anything. */
5718 	if (!grp)
5719 		return 0;
5720 
5721 	/* If group exists, but miss actions is already default behavior -
5722 	 * no need to change anything.
5723 	 */
5724 	if (!grp->miss_group)
5725 		return 0;
5726 
5727 	ret = mlx5dr_table_set_default_miss(grp->tbl, NULL);
5728 	if (ret)
5729 		return rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5730 					  "Failed to unset group miss actions");
5731 
5732 	mlx5_hlist_unregister(priv->sh->groups, &grp->miss_group->entry);
5733 	grp->miss_group = NULL;
5734 
5735 	LIST_REMOVE(grp, next);
5736 	mlx5_hlist_unregister(priv->sh->groups, &grp->entry);
5737 
5738 	return 0;
5739 }
5740 
5741 /**
5742  * Set group miss actions.
5743  *
5744  * @param[in] dev
5745  *   Pointer to the rte_eth_dev structure.
5746  * @param[in] group_id
5747  *   Group id.
5748  * @param[in] attr
5749  *   Pointer to group attributes structure.
5750  * @param[in] actions
5751  *   Array of actions to perform on group miss. Supported types:
5752  *   RTE_FLOW_ACTION_TYPE_JUMP, RTE_FLOW_ACTION_TYPE_VOID, RTE_FLOW_ACTION_TYPE_END.
5753  * @param[out] error
5754  *   Pointer to error structure.
5755  *
5756  * @return
5757  *   0 on success, a negative errno value otherwise and rte_errno is set.
5758  */
5759 
5760 static int
5761 flow_hw_group_set_miss_actions(struct rte_eth_dev *dev,
5762 			       uint32_t group_id,
5763 			       const struct rte_flow_group_attr *attr,
5764 			       const struct rte_flow_action actions[],
5765 			       struct rte_flow_error *error)
5766 {
5767 	struct rte_flow_error sub_error = {
5768 		.type = RTE_FLOW_ERROR_TYPE_NONE,
5769 		.cause = NULL,
5770 		.message = NULL,
5771 	};
5772 	struct mlx5_flow_template_table_cfg cfg = {
5773 		.external = true,
5774 		.attr = {
5775 			.flow_attr = {
5776 				.group = group_id,
5777 				.ingress = attr->ingress,
5778 				.egress = attr->egress,
5779 				.transfer = attr->transfer,
5780 			},
5781 		},
5782 	};
5783 	struct mlx5_flow_cb_ctx ctx = {
5784 		.dev = dev,
5785 		.error = &sub_error,
5786 		.data = &cfg.attr.flow_attr,
5787 	};
5788 	struct mlx5_priv *priv = dev->data->dev_private;
5789 	struct mlx5_flow_group *src_grp = NULL;
5790 	struct mlx5_flow_group *dst_grp = NULL;
5791 	struct mlx5_list_entry *ge;
5792 	uint32_t dst_group_id = 0;
5793 	int ret;
5794 
5795 	if (flow_hw_translate_group(dev, &cfg, group_id, &group_id, error))
5796 		return -rte_errno;
5797 
5798 	if (!group_id)
5799 		return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5800 					  NULL, "Failed to set group miss actions - invalid group id");
5801 
5802 	ret = flow_hw_group_parse_miss_actions(dev, &cfg, actions, &dst_group_id, error);
5803 	if (ret)
5804 		return -rte_errno;
5805 
5806 	if (dst_group_id == group_id) {
5807 		return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5808 					  NULL, "Failed to set group miss actions - target group id must differ from group_id");
5809 	}
5810 
5811 	cfg.attr.flow_attr.group = group_id;
5812 	ge = mlx5_hlist_lookup(priv->sh->groups, group_id, &ctx);
5813 	if (ge)
5814 		src_grp = container_of(ge, struct mlx5_flow_group, entry);
5815 
5816 	if (dst_group_id) {
5817 		/* Increase ref_cnt for new miss group. */
5818 		cfg.attr.flow_attr.group = dst_group_id;
5819 		ge = mlx5_hlist_register(priv->sh->groups, dst_group_id, &ctx);
5820 		if (!ge)
5821 			return -rte_errno;
5822 
5823 		dst_grp = container_of(ge, struct mlx5_flow_group, entry);
5824 
5825 		cfg.attr.flow_attr.group = group_id;
5826 		ret = flow_hw_group_set_miss_group(dev, &cfg, src_grp, dst_grp, error);
5827 		if (ret)
5828 			goto error;
5829 	} else {
5830 		return flow_hw_group_unset_miss_group(dev, src_grp, error);
5831 	}
5832 
5833 	return 0;
5834 
5835 error:
5836 	if (dst_grp)
5837 		mlx5_hlist_unregister(priv->sh->groups, &dst_grp->entry);
5838 	return -rte_errno;
5839 }
5840 
5841 static bool
5842 flow_hw_modify_field_is_used(const struct rte_flow_action_modify_field *action,
5843 			     enum rte_flow_field_id field)
5844 {
5845 	return action->src.field == field || action->dst.field == field;
5846 }
5847 
5848 static bool
5849 flow_hw_modify_field_is_geneve_opt(enum rte_flow_field_id field)
5850 {
5851 	return field == RTE_FLOW_FIELD_GENEVE_OPT_TYPE ||
5852 	       field == RTE_FLOW_FIELD_GENEVE_OPT_CLASS ||
5853 	       field == RTE_FLOW_FIELD_GENEVE_OPT_DATA;
5854 }
5855 
5856 static bool
5857 flow_hw_modify_field_is_add_dst_valid(const struct rte_flow_action_modify_field *conf)
5858 {
5859 	if (conf->operation != RTE_FLOW_MODIFY_ADD)
5860 		return true;
5861 	if (conf->src.field == RTE_FLOW_FIELD_POINTER ||
5862 	    conf->src.field == RTE_FLOW_FIELD_VALUE)
5863 		return true;
5864 	switch (conf->dst.field) {
5865 	case RTE_FLOW_FIELD_IPV4_TTL:
5866 	case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
5867 	case RTE_FLOW_FIELD_TCP_SEQ_NUM:
5868 	case RTE_FLOW_FIELD_TCP_ACK_NUM:
5869 	case RTE_FLOW_FIELD_TAG:
5870 	case RTE_FLOW_FIELD_META:
5871 	case RTE_FLOW_FIELD_FLEX_ITEM:
5872 	case RTE_FLOW_FIELD_TCP_DATA_OFFSET:
5873 	case RTE_FLOW_FIELD_IPV4_IHL:
5874 	case RTE_FLOW_FIELD_IPV4_TOTAL_LEN:
5875 	case RTE_FLOW_FIELD_IPV6_PAYLOAD_LEN:
5876 		return true;
5877 	default:
5878 		break;
5879 	}
5880 	return false;
5881 }
5882 
5883 /**
5884  * Validate the level value for modify field action.
5885  *
5886  * @param[in] data
5887  *   Pointer to the rte_flow_field_data structure either src or dst.
5888  * @param[in] inner_supported
5889  *   Indicator whether inner should be supported.
5890  * @param[out] error
5891  *   Pointer to error structure.
5892  *
5893  * @return
5894  *   0 on success, a negative errno value otherwise and rte_errno is set.
5895  */
5896 static int
5897 flow_hw_validate_modify_field_level(const struct rte_flow_field_data *data,
5898 				    bool inner_supported,
5899 				    struct rte_flow_error *error)
5900 {
5901 	switch ((int)data->field) {
5902 	case RTE_FLOW_FIELD_START:
5903 	case RTE_FLOW_FIELD_VLAN_TYPE:
5904 	case RTE_FLOW_FIELD_RANDOM:
5905 	case RTE_FLOW_FIELD_FLEX_ITEM:
5906 		/*
5907 		 * Level shouldn't be valid since field isn't supported or
5908 		 * doesn't use 'level'.
5909 		 */
5910 		break;
5911 	case RTE_FLOW_FIELD_MARK:
5912 	case RTE_FLOW_FIELD_META:
5913 	case RTE_FLOW_FIELD_METER_COLOR:
5914 	case RTE_FLOW_FIELD_HASH_RESULT:
5915 		/* For meta data fields encapsulation level is don't-care. */
5916 		break;
5917 	case RTE_FLOW_FIELD_TAG:
5918 	case MLX5_RTE_FLOW_FIELD_META_REG:
5919 		/*
5920 		 * The tag array for RTE_FLOW_FIELD_TAG type is provided using
5921 		 * 'tag_index' field. In old API, it was provided using 'level'
5922 		 * field and it is still supported for backwards compatibility.
5923 		 * Therefore, for meta tag field only, level is matter. It is
5924 		 * taken as tag index when 'tag_index' field isn't set, and
5925 		 * return error otherwise.
5926 		 */
5927 		if (data->level > 0) {
5928 			if (data->tag_index > 0)
5929 				return rte_flow_error_set(error, EINVAL,
5930 							  RTE_FLOW_ERROR_TYPE_ACTION,
5931 							  data,
5932 							  "tag array can be provided using 'level' or 'tag_index' fields, not both");
5933 			DRV_LOG(WARNING,
5934 				"tag array provided in 'level' field instead of 'tag_index' field.");
5935 		}
5936 		break;
5937 	case RTE_FLOW_FIELD_MAC_DST:
5938 	case RTE_FLOW_FIELD_MAC_SRC:
5939 	case RTE_FLOW_FIELD_MAC_TYPE:
5940 	case RTE_FLOW_FIELD_IPV4_IHL:
5941 	case RTE_FLOW_FIELD_IPV4_TOTAL_LEN:
5942 	case RTE_FLOW_FIELD_IPV4_DSCP:
5943 	case RTE_FLOW_FIELD_IPV4_ECN:
5944 	case RTE_FLOW_FIELD_IPV4_TTL:
5945 	case RTE_FLOW_FIELD_IPV4_SRC:
5946 	case RTE_FLOW_FIELD_IPV4_DST:
5947 	case RTE_FLOW_FIELD_IPV6_TRAFFIC_CLASS:
5948 	case RTE_FLOW_FIELD_IPV6_FLOW_LABEL:
5949 	case RTE_FLOW_FIELD_IPV6_PAYLOAD_LEN:
5950 	case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
5951 	case RTE_FLOW_FIELD_IPV6_SRC:
5952 	case RTE_FLOW_FIELD_IPV6_DST:
5953 	case RTE_FLOW_FIELD_TCP_PORT_SRC:
5954 	case RTE_FLOW_FIELD_TCP_PORT_DST:
5955 	case RTE_FLOW_FIELD_TCP_FLAGS:
5956 	case RTE_FLOW_FIELD_TCP_DATA_OFFSET:
5957 	case RTE_FLOW_FIELD_UDP_PORT_SRC:
5958 	case RTE_FLOW_FIELD_UDP_PORT_DST:
5959 		if (data->level > 2)
5960 			return rte_flow_error_set(error, ENOTSUP,
5961 						  RTE_FLOW_ERROR_TYPE_ACTION,
5962 						  data,
5963 						  "second inner header fields modification is not supported");
5964 		if (inner_supported)
5965 			break;
5966 		/* Fallthrough */
5967 	case RTE_FLOW_FIELD_VLAN_ID:
5968 	case RTE_FLOW_FIELD_IPV4_PROTO:
5969 	case RTE_FLOW_FIELD_IPV6_PROTO:
5970 	case RTE_FLOW_FIELD_IPV6_DSCP:
5971 	case RTE_FLOW_FIELD_IPV6_ECN:
5972 	case RTE_FLOW_FIELD_TCP_SEQ_NUM:
5973 	case RTE_FLOW_FIELD_TCP_ACK_NUM:
5974 	case RTE_FLOW_FIELD_ESP_PROTO:
5975 	case RTE_FLOW_FIELD_ESP_SPI:
5976 	case RTE_FLOW_FIELD_ESP_SEQ_NUM:
5977 	case RTE_FLOW_FIELD_VXLAN_VNI:
5978 	case RTE_FLOW_FIELD_VXLAN_LAST_RSVD:
5979 	case RTE_FLOW_FIELD_GENEVE_VNI:
5980 	case RTE_FLOW_FIELD_GENEVE_OPT_TYPE:
5981 	case RTE_FLOW_FIELD_GENEVE_OPT_CLASS:
5982 	case RTE_FLOW_FIELD_GENEVE_OPT_DATA:
5983 	case RTE_FLOW_FIELD_GTP_TEID:
5984 	case RTE_FLOW_FIELD_GTP_PSC_QFI:
5985 		if (data->level > 1)
5986 			return rte_flow_error_set(error, ENOTSUP,
5987 						  RTE_FLOW_ERROR_TYPE_ACTION,
5988 						  data,
5989 						  "inner header fields modification is not supported");
5990 		break;
5991 	case RTE_FLOW_FIELD_MPLS:
5992 		if (data->level == 1)
5993 			return rte_flow_error_set(error, ENOTSUP,
5994 						  RTE_FLOW_ERROR_TYPE_ACTION,
5995 						  data,
5996 						  "outer MPLS header modification is not supported");
5997 		if (data->level > 2)
5998 			return rte_flow_error_set(error, ENOTSUP,
5999 						  RTE_FLOW_ERROR_TYPE_ACTION,
6000 						  data,
6001 						  "inner MPLS header modification is not supported");
6002 		break;
6003 	case RTE_FLOW_FIELD_POINTER:
6004 	case RTE_FLOW_FIELD_VALUE:
6005 	default:
6006 		MLX5_ASSERT(false);
6007 	}
6008 	return 0;
6009 }
6010 
6011 static int
6012 flow_hw_validate_action_modify_field(struct rte_eth_dev *dev,
6013 				     const struct rte_flow_action *action,
6014 				     const struct rte_flow_action *mask,
6015 				     struct rte_flow_error *error)
6016 {
6017 	const struct rte_flow_action_modify_field *action_conf = action->conf;
6018 	const struct rte_flow_action_modify_field *mask_conf = mask->conf;
6019 	struct mlx5_priv *priv = dev->data->dev_private;
6020 	struct mlx5_hca_attr *attr = &priv->sh->cdev->config.hca_attr;
6021 	int ret;
6022 
6023 	if (!mask_conf)
6024 		return rte_flow_error_set(error, EINVAL,
6025 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
6026 					  "modify_field mask conf is missing");
6027 	if (action_conf->operation != mask_conf->operation)
6028 		return rte_flow_error_set(error, EINVAL,
6029 				RTE_FLOW_ERROR_TYPE_ACTION, action,
6030 				"modify_field operation mask and template are not equal");
6031 	if (action_conf->dst.field != mask_conf->dst.field)
6032 		return rte_flow_error_set(error, EINVAL,
6033 				RTE_FLOW_ERROR_TYPE_ACTION, action,
6034 				"destination field mask and template are not equal");
6035 	if (action_conf->dst.field == RTE_FLOW_FIELD_POINTER ||
6036 	    action_conf->dst.field == RTE_FLOW_FIELD_VALUE ||
6037 	    action_conf->dst.field == RTE_FLOW_FIELD_HASH_RESULT)
6038 		return rte_flow_error_set(error, EINVAL,
6039 				RTE_FLOW_ERROR_TYPE_ACTION, action,
6040 				"immediate value, pointer and hash result cannot be used as destination");
6041 	ret = flow_hw_validate_modify_field_level(&action_conf->dst, false, error);
6042 	if (ret)
6043 		return ret;
6044 	if (action_conf->dst.field != RTE_FLOW_FIELD_FLEX_ITEM &&
6045 	    !flow_hw_modify_field_is_geneve_opt(action_conf->dst.field)) {
6046 		if (action_conf->dst.tag_index &&
6047 		    !flow_modify_field_support_tag_array(action_conf->dst.field))
6048 			return rte_flow_error_set(error, EINVAL,
6049 					RTE_FLOW_ERROR_TYPE_ACTION, action,
6050 					"destination tag index is not supported");
6051 		if (action_conf->dst.class_id)
6052 			return rte_flow_error_set(error, EINVAL,
6053 					RTE_FLOW_ERROR_TYPE_ACTION, action,
6054 					"destination class id is not supported");
6055 	}
6056 	if (mask_conf->dst.level != UINT8_MAX)
6057 		return rte_flow_error_set(error, EINVAL,
6058 			RTE_FLOW_ERROR_TYPE_ACTION, action,
6059 			"destination encapsulation level must be fully masked");
6060 	if (mask_conf->dst.offset != UINT32_MAX)
6061 		return rte_flow_error_set(error, EINVAL,
6062 			RTE_FLOW_ERROR_TYPE_ACTION, action,
6063 			"destination offset level must be fully masked");
6064 	if (action_conf->src.field != mask_conf->src.field)
6065 		return rte_flow_error_set(error, EINVAL,
6066 				RTE_FLOW_ERROR_TYPE_ACTION, action,
6067 				"destination field mask and template are not equal");
6068 	if (action_conf->src.field != RTE_FLOW_FIELD_POINTER &&
6069 	    action_conf->src.field != RTE_FLOW_FIELD_VALUE) {
6070 		if (action_conf->src.field != RTE_FLOW_FIELD_FLEX_ITEM &&
6071 		    !flow_hw_modify_field_is_geneve_opt(action_conf->src.field)) {
6072 			if (action_conf->src.tag_index &&
6073 			    !flow_modify_field_support_tag_array(action_conf->src.field))
6074 				return rte_flow_error_set(error, EINVAL,
6075 					RTE_FLOW_ERROR_TYPE_ACTION, action,
6076 					"source tag index is not supported");
6077 			if (action_conf->src.class_id)
6078 				return rte_flow_error_set(error, EINVAL,
6079 					RTE_FLOW_ERROR_TYPE_ACTION, action,
6080 					"source class id is not supported");
6081 		}
6082 		if (mask_conf->src.level != UINT8_MAX)
6083 			return rte_flow_error_set(error, EINVAL,
6084 				RTE_FLOW_ERROR_TYPE_ACTION, action,
6085 				"source encapsulation level must be fully masked");
6086 		if (mask_conf->src.offset != UINT32_MAX)
6087 			return rte_flow_error_set(error, EINVAL,
6088 				RTE_FLOW_ERROR_TYPE_ACTION, action,
6089 				"source offset level must be fully masked");
6090 		ret = flow_hw_validate_modify_field_level(&action_conf->src, true, error);
6091 		if (ret)
6092 			return ret;
6093 	}
6094 	if ((action_conf->dst.field == RTE_FLOW_FIELD_TAG &&
6095 	     action_conf->dst.tag_index >= MLX5_FLOW_HW_TAGS_MAX &&
6096 	     action_conf->dst.tag_index != RTE_PMD_MLX5_LINEAR_HASH_TAG_INDEX) ||
6097 	    (action_conf->src.field == RTE_FLOW_FIELD_TAG &&
6098 	     action_conf->src.tag_index >= MLX5_FLOW_HW_TAGS_MAX &&
6099 	     action_conf->src.tag_index != RTE_PMD_MLX5_LINEAR_HASH_TAG_INDEX))
6100 		return rte_flow_error_set(error, EINVAL,
6101 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
6102 				 "tag index is out of range");
6103 	if ((action_conf->dst.field == RTE_FLOW_FIELD_TAG &&
6104 	     flow_hw_get_reg_id(dev, RTE_FLOW_ITEM_TYPE_TAG, action_conf->dst.tag_index) == REG_NON) ||
6105 	    (action_conf->src.field == RTE_FLOW_FIELD_TAG &&
6106 	     flow_hw_get_reg_id(dev, RTE_FLOW_ITEM_TYPE_TAG, action_conf->src.tag_index) == REG_NON))
6107 		return rte_flow_error_set(error, EINVAL,
6108 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
6109 					  "tag index is out of range");
6110 	if (mask_conf->width != UINT32_MAX)
6111 		return rte_flow_error_set(error, EINVAL,
6112 				RTE_FLOW_ERROR_TYPE_ACTION, action,
6113 				"modify_field width field must be fully masked");
6114 	if (flow_hw_modify_field_is_used(action_conf, RTE_FLOW_FIELD_START))
6115 		return rte_flow_error_set(error, EINVAL,
6116 				RTE_FLOW_ERROR_TYPE_ACTION, action,
6117 				"modifying arbitrary place in a packet is not supported");
6118 	if (flow_hw_modify_field_is_used(action_conf, RTE_FLOW_FIELD_VLAN_TYPE))
6119 		return rte_flow_error_set(error, EINVAL,
6120 				RTE_FLOW_ERROR_TYPE_ACTION, action,
6121 				"modifying vlan_type is not supported");
6122 	if (flow_hw_modify_field_is_used(action_conf, RTE_FLOW_FIELD_RANDOM))
6123 		return rte_flow_error_set(error, EINVAL,
6124 				RTE_FLOW_ERROR_TYPE_ACTION, action,
6125 				"modifying random value is not supported");
6126 	/**
6127 	 * Geneve VNI modification is supported only when Geneve header is
6128 	 * parsed natively. When GENEVE options are supported, they both Geneve
6129 	 * and options headers are parsed as a flex parser.
6130 	 */
6131 	if (flow_hw_modify_field_is_used(action_conf, RTE_FLOW_FIELD_GENEVE_VNI) &&
6132 	    attr->geneve_tlv_opt)
6133 		return rte_flow_error_set(error, EINVAL,
6134 				RTE_FLOW_ERROR_TYPE_ACTION, action,
6135 				"modifying Geneve VNI is not supported when GENEVE opt is supported");
6136 	if (priv->tlv_options == NULL &&
6137 	    (flow_hw_modify_field_is_used(action_conf, RTE_FLOW_FIELD_GENEVE_OPT_TYPE) ||
6138 	     flow_hw_modify_field_is_used(action_conf, RTE_FLOW_FIELD_GENEVE_OPT_CLASS) ||
6139 	     flow_hw_modify_field_is_used(action_conf, RTE_FLOW_FIELD_GENEVE_OPT_DATA)))
6140 		return rte_flow_error_set(error, EINVAL,
6141 				RTE_FLOW_ERROR_TYPE_ACTION, action,
6142 				"modifying Geneve TLV option is supported only after parser configuration");
6143 	/* Due to HW bug, tunnel MPLS header is read only. */
6144 	if (action_conf->dst.field == RTE_FLOW_FIELD_MPLS)
6145 		return rte_flow_error_set(error, EINVAL,
6146 				RTE_FLOW_ERROR_TYPE_ACTION, action,
6147 				"MPLS cannot be used as destination");
6148 	/* ADD_FIELD is not supported for all the fields. */
6149 	if (!flow_hw_modify_field_is_add_dst_valid(action_conf))
6150 		return rte_flow_error_set(error, EINVAL,
6151 				RTE_FLOW_ERROR_TYPE_ACTION, action,
6152 				"invalid add_field destination");
6153 	return 0;
6154 }
6155 
6156 static int
6157 flow_hw_validate_action_port_representor(struct rte_eth_dev *dev __rte_unused,
6158 					 const struct rte_flow_actions_template_attr *attr,
6159 					 const struct rte_flow_action *action,
6160 					 const struct rte_flow_action *mask,
6161 					 struct rte_flow_error *error)
6162 {
6163 	const struct rte_flow_action_ethdev *action_conf = NULL;
6164 	const struct rte_flow_action_ethdev *mask_conf = NULL;
6165 
6166 	/* If transfer is set, port has been validated as proxy port. */
6167 	if (!attr->transfer)
6168 		return rte_flow_error_set(error, EINVAL,
6169 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6170 					  "cannot use port_representor actions"
6171 					  " without an E-Switch");
6172 	if (!action || !mask)
6173 		return rte_flow_error_set(error, EINVAL,
6174 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6175 					  "actiona and mask configuration must be set");
6176 	action_conf = action->conf;
6177 	mask_conf = mask->conf;
6178 	if (!mask_conf || mask_conf->port_id != MLX5_REPRESENTED_PORT_ESW_MGR ||
6179 	    !action_conf || action_conf->port_id != MLX5_REPRESENTED_PORT_ESW_MGR)
6180 		return rte_flow_error_set(error, EINVAL,
6181 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6182 					  "only eswitch manager port 0xffff is"
6183 					  " supported");
6184 	return 0;
6185 }
6186 
6187 static int
6188 flow_hw_validate_target_port_id(struct rte_eth_dev *dev,
6189 				uint16_t target_port_id)
6190 {
6191 	struct mlx5_priv *port_priv;
6192 	struct mlx5_priv *dev_priv;
6193 
6194 	if (target_port_id == MLX5_REPRESENTED_PORT_ESW_MGR)
6195 		return 0;
6196 
6197 	port_priv = mlx5_port_to_eswitch_info(target_port_id, false);
6198 	if (!port_priv) {
6199 		rte_errno = EINVAL;
6200 		DRV_LOG(ERR, "Port %u Failed to obtain E-Switch info for port %u",
6201 			dev->data->port_id, target_port_id);
6202 		return -rte_errno;
6203 	}
6204 
6205 	dev_priv = mlx5_dev_to_eswitch_info(dev);
6206 	if (!dev_priv) {
6207 		rte_errno = EINVAL;
6208 		DRV_LOG(ERR, "Port %u Failed to obtain E-Switch info for transfer proxy",
6209 			dev->data->port_id);
6210 		return -rte_errno;
6211 	}
6212 
6213 	if (port_priv->domain_id != dev_priv->domain_id) {
6214 		rte_errno = EINVAL;
6215 		DRV_LOG(ERR, "Port %u Failed to obtain E-Switch info for transfer proxy",
6216 			dev->data->port_id);
6217 		return -rte_errno;
6218 	}
6219 
6220 	return 0;
6221 }
6222 
6223 static int
6224 flow_hw_validate_action_represented_port(struct rte_eth_dev *dev,
6225 					 const struct rte_flow_action *action,
6226 					 const struct rte_flow_action *mask,
6227 					 struct rte_flow_error *error)
6228 {
6229 	const struct rte_flow_action_ethdev *action_conf = action->conf;
6230 	const struct rte_flow_action_ethdev *mask_conf = mask->conf;
6231 	struct mlx5_priv *priv = dev->data->dev_private;
6232 
6233 	if (!priv->sh->config.dv_esw_en)
6234 		return rte_flow_error_set(error, EINVAL,
6235 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6236 					  "cannot use represented_port actions"
6237 					  " without an E-Switch");
6238 	if (mask_conf && mask_conf->port_id) {
6239 		if (!action_conf)
6240 			return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
6241 						  action, "port index was not provided");
6242 
6243 		if (flow_hw_validate_target_port_id(dev, action_conf->port_id))
6244 			return rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
6245 						  action, "port index is invalid");
6246 	}
6247 	return 0;
6248 }
6249 
6250 /**
6251  * Validate AGE action.
6252  *
6253  * @param[in] dev
6254  *   Pointer to rte_eth_dev structure.
6255  * @param[in] action
6256  *   Pointer to the indirect action.
6257  * @param[in] action_flags
6258  *   Holds the actions detected until now.
6259  * @param[in] fixed_cnt
6260  *   Indicator if this list has a fixed COUNT action.
6261  * @param[out] error
6262  *   Pointer to error structure.
6263  *
6264  * @return
6265  *   0 on success, a negative errno value otherwise and rte_errno is set.
6266  */
6267 static int
6268 flow_hw_validate_action_age(struct rte_eth_dev *dev,
6269 			    const struct rte_flow_action *action,
6270 			    uint64_t action_flags, bool fixed_cnt,
6271 			    struct rte_flow_error *error)
6272 {
6273 	struct mlx5_priv *priv = dev->data->dev_private;
6274 	struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);
6275 
6276 	if (!priv->sh->cdev->config.devx)
6277 		return rte_flow_error_set(error, ENOTSUP,
6278 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6279 					  NULL, "AGE action not supported");
6280 	if (age_info->ages_ipool == NULL)
6281 		return rte_flow_error_set(error, EINVAL,
6282 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6283 					  "aging pool not initialized");
6284 	if ((action_flags & MLX5_FLOW_ACTION_AGE) ||
6285 	    (action_flags & MLX5_FLOW_ACTION_INDIRECT_AGE))
6286 		return rte_flow_error_set(error, EINVAL,
6287 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
6288 					  "duplicate AGE actions set");
6289 	if (fixed_cnt)
6290 		return rte_flow_error_set(error, EINVAL,
6291 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
6292 					  "AGE and fixed COUNT combination is not supported");
6293 	return 0;
6294 }
6295 
6296 /**
6297  * Validate count action.
6298  *
6299  * @param[in] dev
6300  *   Pointer to rte_eth_dev structure.
6301  * @param[in] action
6302  *   Pointer to the indirect action.
6303  * @param[in] mask
6304  *   Pointer to the indirect action mask.
6305  * @param[in] action_flags
6306  *   Holds the actions detected until now.
6307  * @param[out] error
6308  *   Pointer to error structure.
6309  *
6310  * @return
6311  *   0 on success, a negative errno value otherwise and rte_errno is set.
6312  */
6313 static int
6314 flow_hw_validate_action_count(struct rte_eth_dev *dev,
6315 			      const struct rte_flow_action *action,
6316 			      const struct rte_flow_action *mask,
6317 			      uint64_t action_flags,
6318 			      struct rte_flow_error *error)
6319 {
6320 	struct mlx5_priv *priv = dev->data->dev_private;
6321 	const struct rte_flow_action_count *count = mask->conf;
6322 
6323 	if (!priv->sh->cdev->config.devx)
6324 		return rte_flow_error_set(error, ENOTSUP,
6325 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
6326 					  "count action not supported");
6327 	if (!priv->hws_cpool)
6328 		return rte_flow_error_set(error, EINVAL,
6329 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
6330 					  "counters pool not initialized");
6331 	if ((action_flags & MLX5_FLOW_ACTION_COUNT) ||
6332 	    (action_flags & MLX5_FLOW_ACTION_INDIRECT_COUNT))
6333 		return rte_flow_error_set(error, EINVAL,
6334 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
6335 					  "duplicate count actions set");
6336 	if (count && count->id && (action_flags & MLX5_FLOW_ACTION_AGE))
6337 		return rte_flow_error_set(error, EINVAL,
6338 					  RTE_FLOW_ERROR_TYPE_ACTION, mask,
6339 					  "AGE and COUNT action shared by mask combination is not supported");
6340 	return 0;
6341 }
6342 
6343 /**
6344  * Validate meter_mark action.
6345  *
6346  * @param[in] dev
6347  *   Pointer to rte_eth_dev structure.
6348  * @param[in] action
6349  *   Pointer to the indirect action.
6350  * @param[in] indirect
6351  *   If true, then provided action was passed using an indirect action.
6352  * @param[out] error
6353  *   Pointer to error structure.
6354  *
6355  * @return
6356  *   0 on success, a negative errno value otherwise and rte_errno is set.
6357  */
6358 static int
6359 flow_hw_validate_action_meter_mark(struct rte_eth_dev *dev,
6360 			      const struct rte_flow_action *action,
6361 			      bool indirect,
6362 			      struct rte_flow_error *error)
6363 {
6364 	struct mlx5_priv *priv = dev->data->dev_private;
6365 
6366 	RTE_SET_USED(action);
6367 
6368 	if (!priv->sh->cdev->config.devx)
6369 		return rte_flow_error_set(error, ENOTSUP,
6370 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
6371 					  "meter_mark action not supported");
6372 	if (!indirect && priv->shared_host)
6373 		return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, action,
6374 					  "meter_mark action can only be used on host port");
6375 	if (!priv->hws_mpool)
6376 		return rte_flow_error_set(error, EINVAL,
6377 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
6378 					  "meter_mark pool not initialized");
6379 	return 0;
6380 }
6381 
6382 /**
6383  * Validate indirect action.
6384  *
6385  * @param[in] dev
6386  *   Pointer to rte_eth_dev structure.
6387  * @param[in] action
6388  *   Pointer to the indirect action.
6389  * @param[in] mask
6390  *   Pointer to the indirect action mask.
6391  * @param[in, out] action_flags
6392  *   Holds the actions detected until now.
6393  * @param[in, out] fixed_cnt
6394  *   Pointer to indicator if this list has a fixed COUNT action.
6395  * @param[out] error
6396  *   Pointer to error structure.
6397  *
6398  * @return
6399  *   0 on success, a negative errno value otherwise and rte_errno is set.
6400  */
6401 static int
6402 flow_hw_validate_action_indirect(struct rte_eth_dev *dev,
6403 				 const struct rte_flow_action *action,
6404 				 const struct rte_flow_action *mask,
6405 				 uint64_t *action_flags, bool *fixed_cnt,
6406 				 struct rte_flow_error *error)
6407 {
6408 	uint32_t type;
6409 	int ret;
6410 
6411 	if (!mask)
6412 		return rte_flow_error_set(error, EINVAL,
6413 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6414 					  "Unable to determine indirect action type without a mask specified");
6415 	type = mask->type;
6416 	switch (type) {
6417 	case RTE_FLOW_ACTION_TYPE_METER_MARK:
6418 		ret = flow_hw_validate_action_meter_mark(dev, mask, true, error);
6419 		if (ret < 0)
6420 			return ret;
6421 		*action_flags |= MLX5_FLOW_ACTION_METER;
6422 		break;
6423 	case RTE_FLOW_ACTION_TYPE_RSS:
6424 		/* TODO: Validation logic (same as flow_hw_actions_validate) */
6425 		*action_flags |= MLX5_FLOW_ACTION_RSS;
6426 		break;
6427 	case RTE_FLOW_ACTION_TYPE_CONNTRACK:
6428 		/* TODO: Validation logic (same as flow_hw_actions_validate) */
6429 		*action_flags |= MLX5_FLOW_ACTION_CT;
6430 		break;
6431 	case RTE_FLOW_ACTION_TYPE_COUNT:
6432 		if (action->conf && mask->conf) {
6433 			if ((*action_flags & MLX5_FLOW_ACTION_AGE) ||
6434 			    (*action_flags & MLX5_FLOW_ACTION_INDIRECT_AGE))
6435 				/*
6436 				 * AGE cannot use indirect counter which is
6437 				 * shared with enother flow rules.
6438 				 */
6439 				return rte_flow_error_set(error, EINVAL,
6440 						  RTE_FLOW_ERROR_TYPE_ACTION,
6441 						  NULL,
6442 						  "AGE and fixed COUNT combination is not supported");
6443 			*fixed_cnt = true;
6444 		}
6445 		ret = flow_hw_validate_action_count(dev, action, mask,
6446 						    *action_flags, error);
6447 		if (ret < 0)
6448 			return ret;
6449 		*action_flags |= MLX5_FLOW_ACTION_INDIRECT_COUNT;
6450 		break;
6451 	case RTE_FLOW_ACTION_TYPE_AGE:
6452 		ret = flow_hw_validate_action_age(dev, action, *action_flags,
6453 						  *fixed_cnt, error);
6454 		if (ret < 0)
6455 			return ret;
6456 		*action_flags |= MLX5_FLOW_ACTION_INDIRECT_AGE;
6457 		break;
6458 	case RTE_FLOW_ACTION_TYPE_QUOTA:
6459 		/* TODO: add proper quota verification */
6460 		*action_flags |= MLX5_FLOW_ACTION_QUOTA;
6461 		break;
6462 	default:
6463 		DRV_LOG(WARNING, "Unsupported shared action type: %d", type);
6464 		return rte_flow_error_set(error, ENOTSUP,
6465 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, mask,
6466 					  "Unsupported indirect action type");
6467 	}
6468 	return 0;
6469 }
6470 
6471 /**
6472  * Validate ipv6_ext_push action.
6473  *
6474  * @param[in] dev
6475  *   Pointer to rte_eth_dev structure.
6476  * @param[in] action
6477  *   Pointer to the indirect action.
6478  * @param[out] error
6479  *   Pointer to error structure.
6480  *
6481  * @return
6482  *   0 on success, a negative errno value otherwise and rte_errno is set.
6483  */
6484 static int
6485 flow_hw_validate_action_ipv6_ext_push(struct rte_eth_dev *dev __rte_unused,
6486 				      const struct rte_flow_action *action,
6487 				      struct rte_flow_error *error)
6488 {
6489 	const struct rte_flow_action_ipv6_ext_push *raw_push_data = action->conf;
6490 
6491 	if (!raw_push_data || !raw_push_data->size || !raw_push_data->data)
6492 		return rte_flow_error_set(error, EINVAL,
6493 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
6494 					  "invalid ipv6_ext_push data");
6495 	if (raw_push_data->type != IPPROTO_ROUTING ||
6496 	    raw_push_data->size > MLX5_PUSH_MAX_LEN)
6497 		return rte_flow_error_set(error, EINVAL,
6498 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
6499 					  "Unsupported ipv6_ext_push type or length");
6500 	return 0;
6501 }
6502 
6503 /**
6504  * Process `... / raw_decap / raw_encap / ...` actions sequence.
6505  * The PMD handles the sequence as a single encap or decap reformat action,
6506  * depending on the raw_encap configuration.
6507  *
6508  * The function assumes that the raw_decap / raw_encap location
6509  * in actions template list complies with relative HWS actions order:
6510  * for the required reformat configuration:
6511  * ENCAP configuration must appear before [JUMP|DROP|PORT]
6512  * DECAP configuration must appear at the template head.
6513  */
6514 static uint64_t
6515 mlx5_decap_encap_reformat_type(const struct rte_flow_action *actions,
6516 			       uint32_t encap_ind, uint64_t flags)
6517 {
6518 	const struct rte_flow_action_raw_encap *encap = actions[encap_ind].conf;
6519 
6520 	if ((flags & MLX5_FLOW_ACTION_DECAP) == 0)
6521 		return MLX5_FLOW_ACTION_ENCAP;
6522 	if (actions[encap_ind - 1].type != RTE_FLOW_ACTION_TYPE_RAW_DECAP)
6523 		return MLX5_FLOW_ACTION_ENCAP;
6524 	return encap->size >= MLX5_ENCAPSULATION_DECISION_SIZE ?
6525 	       MLX5_FLOW_ACTION_ENCAP : MLX5_FLOW_ACTION_DECAP;
6526 }
6527 
6528 enum mlx5_hw_indirect_list_relative_position {
6529 	MLX5_INDIRECT_LIST_POSITION_UNKNOWN = -1,
6530 	MLX5_INDIRECT_LIST_POSITION_BEFORE_MH = 0,
6531 	MLX5_INDIRECT_LIST_POSITION_AFTER_MH,
6532 };
6533 
6534 static enum mlx5_hw_indirect_list_relative_position
6535 mlx5_hw_indirect_list_mh_position(const struct rte_flow_action *action)
6536 {
6537 	const struct rte_flow_action_indirect_list *conf = action->conf;
6538 	enum mlx5_indirect_list_type list_type = mlx5_get_indirect_list_type(conf->handle);
6539 	enum mlx5_hw_indirect_list_relative_position pos = MLX5_INDIRECT_LIST_POSITION_UNKNOWN;
6540 	const union {
6541 		struct mlx5_indlst_legacy *legacy;
6542 		struct mlx5_hw_encap_decap_action *reformat;
6543 		struct rte_flow_action_list_handle *handle;
6544 	} h = { .handle = conf->handle};
6545 
6546 	switch (list_type) {
6547 	case  MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY:
6548 		switch (h.legacy->legacy_type) {
6549 		case RTE_FLOW_ACTION_TYPE_AGE:
6550 		case RTE_FLOW_ACTION_TYPE_COUNT:
6551 		case RTE_FLOW_ACTION_TYPE_CONNTRACK:
6552 		case RTE_FLOW_ACTION_TYPE_METER_MARK:
6553 		case RTE_FLOW_ACTION_TYPE_QUOTA:
6554 			pos = MLX5_INDIRECT_LIST_POSITION_BEFORE_MH;
6555 			break;
6556 		case RTE_FLOW_ACTION_TYPE_RSS:
6557 			pos = MLX5_INDIRECT_LIST_POSITION_AFTER_MH;
6558 			break;
6559 		default:
6560 			pos = MLX5_INDIRECT_LIST_POSITION_UNKNOWN;
6561 			break;
6562 		}
6563 		break;
6564 	case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
6565 		pos = MLX5_INDIRECT_LIST_POSITION_AFTER_MH;
6566 		break;
6567 	case MLX5_INDIRECT_ACTION_LIST_TYPE_REFORMAT:
6568 		switch (h.reformat->action_type) {
6569 		case MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2:
6570 		case MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
6571 			pos = MLX5_INDIRECT_LIST_POSITION_BEFORE_MH;
6572 			break;
6573 		case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2:
6574 		case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3:
6575 			pos = MLX5_INDIRECT_LIST_POSITION_AFTER_MH;
6576 			break;
6577 		default:
6578 			pos = MLX5_INDIRECT_LIST_POSITION_UNKNOWN;
6579 			break;
6580 		}
6581 		break;
6582 	default:
6583 		pos = MLX5_INDIRECT_LIST_POSITION_UNKNOWN;
6584 		break;
6585 	}
6586 	return pos;
6587 }
6588 
6589 #define MLX5_HW_EXPAND_MH_FAILED 0xffff
6590 
6591 static inline uint16_t
6592 flow_hw_template_expand_modify_field(struct rte_flow_action actions[],
6593 				     struct rte_flow_action masks[],
6594 				     const struct rte_flow_action *mf_actions,
6595 				     const struct rte_flow_action *mf_masks,
6596 				     uint64_t flags, uint32_t act_num,
6597 				     uint32_t mf_num)
6598 {
6599 	uint32_t i, tail;
6600 
6601 	MLX5_ASSERT(actions && masks);
6602 	MLX5_ASSERT(mf_num > 0);
6603 	if (flags & MLX5_FLOW_ACTION_MODIFY_FIELD) {
6604 		/*
6605 		 * Application action template already has Modify Field.
6606 		 * It's location will be used in DR.
6607 		 * Expanded MF action can be added before the END.
6608 		 */
6609 		i = act_num - 1;
6610 		goto insert;
6611 	}
6612 	/**
6613 	 * Locate the first action positioned BEFORE the new MF.
6614 	 *
6615 	 * Search for a place to insert modify header
6616 	 * from the END action backwards:
6617 	 * 1. END is always present in actions array
6618 	 * 2. END location is always at action[act_num - 1]
6619 	 * 3. END always positioned AFTER modify field location
6620 	 *
6621 	 * Relative actions order is the same for RX, TX and FDB.
6622 	 *
6623 	 * Current actions order (draft-3)
6624 	 * @see action_order_arr[]
6625 	 */
6626 	for (i = act_num - 2; (int)i >= 0; i--) {
6627 		enum mlx5_hw_indirect_list_relative_position pos;
6628 		enum rte_flow_action_type type = actions[i].type;
6629 		uint64_t reformat_type;
6630 
6631 		if (type == RTE_FLOW_ACTION_TYPE_INDIRECT)
6632 			type = masks[i].type;
6633 		switch (type) {
6634 		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
6635 		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
6636 		case RTE_FLOW_ACTION_TYPE_DROP:
6637 		case RTE_FLOW_ACTION_TYPE_SEND_TO_KERNEL:
6638 		case RTE_FLOW_ACTION_TYPE_JUMP:
6639 		case RTE_FLOW_ACTION_TYPE_JUMP_TO_TABLE_INDEX:
6640 		case RTE_FLOW_ACTION_TYPE_QUEUE:
6641 		case RTE_FLOW_ACTION_TYPE_RSS:
6642 		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
6643 		case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
6644 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
6645 		case RTE_FLOW_ACTION_TYPE_VOID:
6646 		case RTE_FLOW_ACTION_TYPE_END:
6647 			break;
6648 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
6649 			reformat_type =
6650 				mlx5_decap_encap_reformat_type(actions, i,
6651 							       flags);
6652 			if (reformat_type == MLX5_FLOW_ACTION_DECAP) {
6653 				i++;
6654 				goto insert;
6655 			}
6656 			if (actions[i - 1].type == RTE_FLOW_ACTION_TYPE_RAW_DECAP)
6657 				i--;
6658 			break;
6659 		case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
6660 			pos = mlx5_hw_indirect_list_mh_position(&actions[i]);
6661 			if (pos == MLX5_INDIRECT_LIST_POSITION_UNKNOWN)
6662 				return MLX5_HW_EXPAND_MH_FAILED;
6663 			if (pos == MLX5_INDIRECT_LIST_POSITION_BEFORE_MH)
6664 				goto insert;
6665 			break;
6666 		default:
6667 			i++; /* new MF inserted AFTER actions[i] */
6668 			goto insert;
6669 		}
6670 	}
6671 	i = 0;
6672 insert:
6673 	tail = act_num - i; /* num action to move */
6674 	memmove(actions + i + mf_num, actions + i, sizeof(actions[0]) * tail);
6675 	memcpy(actions + i, mf_actions, sizeof(actions[0]) * mf_num);
6676 	memmove(masks + i + mf_num, masks + i, sizeof(masks[0]) * tail);
6677 	memcpy(masks + i, mf_masks, sizeof(masks[0]) * mf_num);
6678 	return i;
6679 }
6680 
6681 static int
6682 flow_hw_validate_action_push_vlan(struct rte_eth_dev *dev,
6683 				  const
6684 				  struct rte_flow_actions_template_attr *attr,
6685 				  const struct rte_flow_action *action,
6686 				  const struct rte_flow_action *mask,
6687 				  struct rte_flow_error *error)
6688 {
6689 #define X_FIELD(ptr, t, f) (((ptr)->conf) && ((t *)((ptr)->conf))->f)
6690 
6691 	const bool masked_push =
6692 		X_FIELD(mask + MLX5_HW_VLAN_PUSH_TYPE_IDX,
6693 			const struct rte_flow_action_of_push_vlan, ethertype);
6694 	bool masked_param;
6695 
6696 	/*
6697 	 * Mandatory actions order:
6698 	 * OF_PUSH_VLAN / OF_SET_VLAN_VID [ / OF_SET_VLAN_PCP ]
6699 	 */
6700 	RTE_SET_USED(dev);
6701 	RTE_SET_USED(attr);
6702 	/* Check that mark matches OF_PUSH_VLAN */
6703 	if (mask[MLX5_HW_VLAN_PUSH_TYPE_IDX].type !=
6704 	    RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN)
6705 		return rte_flow_error_set(error, EINVAL,
6706 					  RTE_FLOW_ERROR_TYPE_ACTION,
6707 					  action, "OF_PUSH_VLAN: mask does not match");
6708 	/* Check that the second template and mask items are SET_VLAN_VID */
6709 	if (action[MLX5_HW_VLAN_PUSH_VID_IDX].type !=
6710 	    RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID ||
6711 	    mask[MLX5_HW_VLAN_PUSH_VID_IDX].type !=
6712 	    RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)
6713 		return rte_flow_error_set(error, EINVAL,
6714 					  RTE_FLOW_ERROR_TYPE_ACTION,
6715 					  action, "OF_PUSH_VLAN: invalid actions order");
6716 	masked_param = X_FIELD(mask + MLX5_HW_VLAN_PUSH_VID_IDX,
6717 			       const struct rte_flow_action_of_set_vlan_vid,
6718 			       vlan_vid);
6719 	/*
6720 	 * PMD requires OF_SET_VLAN_VID mask to must match OF_PUSH_VLAN
6721 	 */
6722 	if (masked_push ^ masked_param)
6723 		return rte_flow_error_set(error, EINVAL,
6724 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
6725 					  "OF_SET_VLAN_VID: mask does not match OF_PUSH_VLAN");
6726 	if (is_of_vlan_pcp_present(action)) {
6727 		if (mask[MLX5_HW_VLAN_PUSH_PCP_IDX].type !=
6728 		     RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP)
6729 			return rte_flow_error_set(error, EINVAL,
6730 						  RTE_FLOW_ERROR_TYPE_ACTION,
6731 						  action, "OF_SET_VLAN_PCP: missing mask configuration");
6732 		masked_param = X_FIELD(mask + MLX5_HW_VLAN_PUSH_PCP_IDX,
6733 				       const struct
6734 				       rte_flow_action_of_set_vlan_pcp,
6735 				       vlan_pcp);
6736 		/*
6737 		 * PMD requires OF_SET_VLAN_PCP mask to must match OF_PUSH_VLAN
6738 		 */
6739 		if (masked_push ^ masked_param)
6740 			return rte_flow_error_set(error, EINVAL,
6741 						  RTE_FLOW_ERROR_TYPE_ACTION, action,
6742 						  "OF_SET_VLAN_PCP: mask does not match OF_PUSH_VLAN");
6743 	}
6744 	return 0;
6745 #undef X_FIELD
6746 }
6747 
6748 static int
6749 flow_hw_validate_action_default_miss(struct rte_eth_dev *dev,
6750 				     const struct rte_flow_actions_template_attr *attr,
6751 				     uint64_t action_flags,
6752 				     struct rte_flow_error *error)
6753 {
6754 	/*
6755 	 * The private DEFAULT_MISS action is used internally for LACP in control
6756 	 * flows. So this validation can be ignored. It can be kept right now since
6757 	 * the validation will be done only once.
6758 	 */
6759 	struct mlx5_priv *priv = dev->data->dev_private;
6760 
6761 	if (!attr->ingress || attr->egress || attr->transfer)
6762 		return rte_flow_error_set(error, EINVAL,
6763 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6764 					  "DEFAULT MISS is only supported in ingress.");
6765 	if (!priv->hw_def_miss)
6766 		return rte_flow_error_set(error, EINVAL,
6767 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6768 					  "DEFAULT MISS action does not exist.");
6769 	if (action_flags & MLX5_FLOW_FATE_ACTIONS)
6770 		return rte_flow_error_set(error, EINVAL,
6771 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6772 					  "DEFAULT MISS should be the only termination.");
6773 	return 0;
6774 }
6775 
6776 static int
6777 flow_hw_validate_action_nat64(struct rte_eth_dev *dev,
6778 			      const struct rte_flow_actions_template_attr *attr,
6779 			      const struct rte_flow_action *action,
6780 			      const struct rte_flow_action *mask,
6781 			      uint64_t action_flags,
6782 			      struct rte_flow_error *error)
6783 {
6784 	struct mlx5_priv *priv = dev->data->dev_private;
6785 	const struct rte_flow_action_nat64 *nat64_c;
6786 	enum rte_flow_nat64_type cov_type;
6787 
6788 	RTE_SET_USED(action_flags);
6789 	if (mask->conf && ((const struct rte_flow_action_nat64 *)mask->conf)->type) {
6790 		nat64_c = (const struct rte_flow_action_nat64 *)action->conf;
6791 		cov_type = nat64_c->type;
6792 		if ((attr->ingress && !priv->action_nat64[MLX5DR_TABLE_TYPE_NIC_RX][cov_type]) ||
6793 		    (attr->egress && !priv->action_nat64[MLX5DR_TABLE_TYPE_NIC_TX][cov_type]) ||
6794 		    (attr->transfer && !priv->action_nat64[MLX5DR_TABLE_TYPE_FDB][cov_type]))
6795 			goto err_out;
6796 	} else {
6797 		/*
6798 		 * Usually, the actions will be used on both directions. For non-masked actions,
6799 		 * both directions' actions will be checked.
6800 		 */
6801 		if (attr->ingress)
6802 			if (!priv->action_nat64[MLX5DR_TABLE_TYPE_NIC_RX][RTE_FLOW_NAT64_6TO4] ||
6803 			    !priv->action_nat64[MLX5DR_TABLE_TYPE_NIC_RX][RTE_FLOW_NAT64_4TO6])
6804 				goto err_out;
6805 		if (attr->egress)
6806 			if (!priv->action_nat64[MLX5DR_TABLE_TYPE_NIC_TX][RTE_FLOW_NAT64_6TO4] ||
6807 			    !priv->action_nat64[MLX5DR_TABLE_TYPE_NIC_TX][RTE_FLOW_NAT64_4TO6])
6808 				goto err_out;
6809 		if (attr->transfer)
6810 			if (!priv->action_nat64[MLX5DR_TABLE_TYPE_FDB][RTE_FLOW_NAT64_6TO4] ||
6811 			    !priv->action_nat64[MLX5DR_TABLE_TYPE_FDB][RTE_FLOW_NAT64_4TO6])
6812 				goto err_out;
6813 	}
6814 	return 0;
6815 err_out:
6816 	return rte_flow_error_set(error, EOPNOTSUPP, RTE_FLOW_ERROR_TYPE_ACTION,
6817 				  NULL, "NAT64 action is not supported.");
6818 }
6819 
6820 static int
6821 flow_hw_validate_action_jump(struct rte_eth_dev *dev,
6822 			     const struct rte_flow_actions_template_attr *attr,
6823 			     const struct rte_flow_action *action,
6824 			     const struct rte_flow_action *mask,
6825 			     struct rte_flow_error *error)
6826 {
6827 	const struct rte_flow_action_jump *m = mask->conf;
6828 	const struct rte_flow_action_jump *v = action->conf;
6829 	struct mlx5_flow_template_table_cfg cfg = {
6830 		.external = true,
6831 		.attr = {
6832 			.flow_attr = {
6833 				.ingress = attr->ingress,
6834 				.egress = attr->egress,
6835 				.transfer = attr->transfer,
6836 			},
6837 		},
6838 	};
6839 	uint32_t t_group = 0;
6840 
6841 	if (!m || !m->group)
6842 		return 0;
6843 	if (!v)
6844 		return rte_flow_error_set(error, EINVAL,
6845 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
6846 					  "Invalid jump action configuration");
6847 	if (flow_hw_translate_group(dev, &cfg, v->group, &t_group, error))
6848 		return -rte_errno;
6849 	if (t_group == 0)
6850 		return rte_flow_error_set(error, EINVAL,
6851 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
6852 					  "Unsupported action - jump to root table");
6853 	return 0;
6854 }
6855 
6856 static int
6857 mlx5_flow_validate_action_jump_to_table_index(const struct rte_flow_action *action,
6858 			     const struct rte_flow_action *mask,
6859 			     struct rte_flow_error *error)
6860 {
6861 	const struct rte_flow_action_jump_to_table_index *m = mask->conf;
6862 	const struct rte_flow_action_jump_to_table_index *v = action->conf;
6863 	struct mlx5dr_action *jump_action;
6864 	uint32_t t_group = 0;
6865 
6866 	if (!m || !m->table)
6867 		return 0;
6868 	if (!v)
6869 		return rte_flow_error_set(error, EINVAL,
6870 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
6871 					  "Invalid jump to matcher action configuration");
6872 	t_group = v->table->grp->group_id;
6873 	if (t_group == 0)
6874 		return rte_flow_error_set(error, EINVAL,
6875 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
6876 					  "Unsupported action - jump to root table");
6877 	if (likely(!rte_flow_template_table_resizable(0, &v->table->cfg.attr))) {
6878 		jump_action = v->table->matcher_info[0].jump;
6879 	} else {
6880 		uint32_t selector;
6881 		rte_rwlock_read_lock(&v->table->matcher_replace_rwlk);
6882 		selector = v->table->matcher_selector;
6883 		jump_action = v->table->matcher_info[selector].jump;
6884 		rte_rwlock_read_unlock(&v->table->matcher_replace_rwlk);
6885 	}
6886 	if (jump_action == NULL)
6887 		return rte_flow_error_set(error, EINVAL,
6888 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
6889 					  "Unsupported action - table is not an rule array");
6890 	return 0;
6891 }
6892 
6893 static int
6894 mlx5_hw_validate_action_mark(struct rte_eth_dev *dev,
6895 			     const struct rte_flow_action *template_action,
6896 			     const struct rte_flow_action *template_mask,
6897 			     uint64_t action_flags,
6898 			     const struct rte_flow_actions_template_attr *template_attr,
6899 			     struct rte_flow_error *error)
6900 {
6901 	const struct rte_flow_action_mark *mark_mask = template_mask->conf;
6902 	const struct rte_flow_action *action =
6903 		mark_mask && mark_mask->id ? template_action :
6904 		&(const struct rte_flow_action) {
6905 		.type = RTE_FLOW_ACTION_TYPE_MARK,
6906 		.conf = &(const struct rte_flow_action_mark) {
6907 			.id = MLX5_FLOW_MARK_MAX - 1
6908 		}
6909 	};
6910 	const struct rte_flow_attr attr = {
6911 		.ingress = template_attr->ingress,
6912 		.egress = template_attr->egress,
6913 		.transfer = template_attr->transfer
6914 	};
6915 
6916 	return mlx5_flow_validate_action_mark(dev, action, action_flags,
6917 					      &attr, error);
6918 }
6919 
6920 static int
6921 mlx5_hw_validate_action_queue(struct rte_eth_dev *dev,
6922 			      const struct rte_flow_action *template_action,
6923 			      const struct rte_flow_action *template_mask,
6924 			      const struct rte_flow_actions_template_attr *template_attr,
6925 			      uint64_t action_flags,
6926 			      struct rte_flow_error *error)
6927 {
6928 	const struct rte_flow_action_queue *queue_mask = template_mask->conf;
6929 	const struct rte_flow_attr attr = {
6930 		.ingress = template_attr->ingress,
6931 		.egress = template_attr->egress,
6932 		.transfer = template_attr->transfer
6933 	};
6934 	bool masked = queue_mask != NULL && queue_mask->index;
6935 
6936 	if (template_attr->egress || template_attr->transfer)
6937 		return rte_flow_error_set(error, EINVAL,
6938 					  RTE_FLOW_ERROR_TYPE_ATTR, NULL,
6939 					  "QUEUE action supported for ingress only");
6940 	if (masked)
6941 		return mlx5_flow_validate_action_queue(template_action, action_flags, dev,
6942 						       &attr, error);
6943 	else
6944 		return 0;
6945 }
6946 
6947 static int
6948 mlx5_hw_validate_action_rss(struct rte_eth_dev *dev,
6949 			      const struct rte_flow_action *template_action,
6950 			      const struct rte_flow_action *template_mask,
6951 			      const struct rte_flow_actions_template_attr *template_attr,
6952 			      __rte_unused uint64_t action_flags,
6953 			      struct rte_flow_error *error)
6954 {
6955 	const struct rte_flow_action_rss *mask = template_mask->conf;
6956 
6957 	if (template_attr->egress || template_attr->transfer)
6958 		return rte_flow_error_set(error, EINVAL,
6959 					  RTE_FLOW_ERROR_TYPE_ATTR, NULL,
6960 					  "RSS action supported for ingress only");
6961 	if (mask != NULL)
6962 		return mlx5_validate_action_rss(dev, template_action, error);
6963 	else
6964 		return 0;
6965 }
6966 
6967 static int
6968 mlx5_hw_validate_action_l2_encap(struct rte_eth_dev *dev,
6969 				 const struct rte_flow_action *template_action,
6970 				 const struct rte_flow_action *template_mask,
6971 				 const struct rte_flow_actions_template_attr *template_attr,
6972 				 uint64_t action_flags,
6973 				 struct rte_flow_error *error)
6974 {
6975 	const struct rte_flow_action_vxlan_encap default_action_conf = {
6976 		.definition = (struct rte_flow_item *)
6977 			(struct rte_flow_item [1]) {
6978 			[0] = { .type = RTE_FLOW_ITEM_TYPE_END }
6979 		}
6980 	};
6981 	const struct rte_flow_action *action = template_mask->conf ?
6982 		template_action : &(const struct rte_flow_action) {
6983 			.type = template_mask->type,
6984 			.conf = &default_action_conf
6985 	};
6986 	const struct rte_flow_attr attr = {
6987 		.ingress = template_attr->ingress,
6988 		.egress = template_attr->egress,
6989 		.transfer = template_attr->transfer
6990 	};
6991 
6992 	return mlx5_flow_dv_validate_action_l2_encap(dev, action_flags, action,
6993 						     &attr, error);
6994 }
6995 
6996 static int
6997 mlx5_hw_validate_action_l2_decap(struct rte_eth_dev *dev,
6998 				 const struct rte_flow_action *template_action,
6999 				 const struct rte_flow_action *template_mask,
7000 				 const struct rte_flow_actions_template_attr *template_attr,
7001 				 uint64_t action_flags,
7002 				 struct rte_flow_error *error)
7003 {
7004 	const struct rte_flow_action_vxlan_encap default_action_conf = {
7005 		.definition = (struct rte_flow_item *)
7006 			(struct rte_flow_item [1]) {
7007 				[0] = { .type = RTE_FLOW_ITEM_TYPE_END }
7008 			}
7009 	};
7010 	const struct rte_flow_action *action = template_mask->conf ?
7011 					       template_action : &(const struct rte_flow_action) {
7012 			.type = template_mask->type,
7013 			.conf = &default_action_conf
7014 		};
7015 	const struct rte_flow_attr attr = {
7016 		.ingress = template_attr->ingress,
7017 		.egress = template_attr->egress,
7018 		.transfer = template_attr->transfer
7019 	};
7020 	uint64_t item_flags =
7021 		action->type == RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
7022 		MLX5_FLOW_LAYER_VXLAN : 0;
7023 
7024 	return mlx5_flow_dv_validate_action_decap(dev, action_flags, action,
7025 						  item_flags, &attr, error);
7026 }
7027 
7028 static int
7029 mlx5_hw_validate_action_conntrack(struct rte_eth_dev *dev,
7030 				  const struct rte_flow_action *template_action,
7031 				  const struct rte_flow_action *template_mask,
7032 				  const struct rte_flow_actions_template_attr *template_attr,
7033 				  uint64_t action_flags,
7034 				  struct rte_flow_error *error)
7035 {
7036 	RTE_SET_USED(template_action);
7037 	RTE_SET_USED(template_mask);
7038 	RTE_SET_USED(template_attr);
7039 	return mlx5_flow_dv_validate_action_aso_ct(dev, action_flags,
7040 						   MLX5_FLOW_LAYER_OUTER_L4_TCP,
7041 						   false, error);
7042 }
7043 
7044 static int
7045 flow_hw_validate_action_raw_encap(const struct rte_flow_action *action,
7046 				  const struct rte_flow_action *mask,
7047 				  struct rte_flow_error *error)
7048 {
7049 	const struct rte_flow_action_raw_encap *mask_conf = mask->conf;
7050 	const struct rte_flow_action_raw_encap *action_conf = action->conf;
7051 
7052 	if (!mask_conf || !mask_conf->size)
7053 		return rte_flow_error_set(error, EINVAL,
7054 					  RTE_FLOW_ERROR_TYPE_ACTION, mask,
7055 					  "raw_encap: size must be masked");
7056 	if (!action_conf || !action_conf->size)
7057 		return rte_flow_error_set(error, EINVAL,
7058 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
7059 					  "raw_encap: invalid action configuration");
7060 	if (mask_conf->data && !action_conf->data)
7061 		return rte_flow_error_set(error, EINVAL,
7062 					  RTE_FLOW_ERROR_TYPE_ACTION,
7063 					  action, "raw_encap: masked data is missing");
7064 	return 0;
7065 }
7066 
7067 
7068 static int
7069 flow_hw_validate_action_raw_reformat(struct rte_eth_dev *dev,
7070 				     const struct rte_flow_action *template_action,
7071 				     const struct rte_flow_action *template_mask,
7072 				     const struct
7073 				     rte_flow_actions_template_attr *template_attr,
7074 				     uint64_t *action_flags,
7075 				     struct rte_flow_error *error)
7076 {
7077 	const struct rte_flow_action *encap_action = NULL;
7078 	const struct rte_flow_action *encap_mask = NULL;
7079 	const struct rte_flow_action_raw_decap *raw_decap = NULL;
7080 	const struct rte_flow_action_raw_encap *raw_encap = NULL;
7081 	const struct rte_flow_attr attr = {
7082 		.ingress = template_attr->ingress,
7083 		.egress = template_attr->egress,
7084 		.transfer = template_attr->transfer
7085 	};
7086 	uint64_t item_flags = 0;
7087 	int ret, actions_n = 0;
7088 
7089 	if (template_action->type == RTE_FLOW_ACTION_TYPE_RAW_DECAP) {
7090 		raw_decap = template_mask->conf ?
7091 			    template_action->conf : &empty_decap;
7092 		if ((template_action + 1)->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
7093 			if ((template_mask + 1)->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP)
7094 				return rte_flow_error_set(error, EINVAL,
7095 							  RTE_FLOW_ERROR_TYPE_ACTION,
7096 							  template_mask + 1, "invalid mask type");
7097 			encap_action = template_action + 1;
7098 			encap_mask = template_mask + 1;
7099 		}
7100 	} else {
7101 		encap_action = template_action;
7102 		encap_mask = template_mask;
7103 	}
7104 	if (encap_action) {
7105 		raw_encap = encap_action->conf;
7106 		ret = flow_hw_validate_action_raw_encap(encap_action,
7107 							encap_mask, error);
7108 		if (ret)
7109 			return ret;
7110 	}
7111 	return mlx5_flow_dv_validate_action_raw_encap_decap(dev, raw_decap,
7112 							    raw_encap, &attr,
7113 							    action_flags,
7114 							    &actions_n,
7115 							    template_action,
7116 							    item_flags, error);
7117 }
7118 
7119 
7120 
7121 static int
7122 mlx5_flow_hw_actions_validate(struct rte_eth_dev *dev,
7123 			      const struct rte_flow_actions_template_attr *attr,
7124 			      const struct rte_flow_action actions[],
7125 			      const struct rte_flow_action masks[],
7126 			      uint64_t *act_flags,
7127 			      struct rte_flow_error *error)
7128 {
7129 	struct mlx5_priv *priv = dev->data->dev_private;
7130 	const struct rte_flow_action_count *count_mask = NULL;
7131 	bool fixed_cnt = false;
7132 	uint64_t action_flags = 0;
7133 	bool actions_end = false;
7134 #ifdef HAVE_MLX5DV_DR_ACTION_CREATE_DEST_ROOT_TABLE
7135 	int table_type;
7136 #endif
7137 	uint16_t i;
7138 	int ret;
7139 	const struct rte_flow_action_ipv6_ext_remove *remove_data;
7140 
7141 	if (!mlx5_hw_ctx_validate(dev, error))
7142 		return -rte_errno;
7143 	/* FDB actions are only valid to proxy port. */
7144 	if (attr->transfer && (!priv->sh->config.dv_esw_en || !priv->master))
7145 		return rte_flow_error_set(error, EINVAL,
7146 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7147 					  NULL,
7148 					  "transfer actions are only valid to proxy port");
7149 	for (i = 0; !actions_end; ++i) {
7150 		const struct rte_flow_action *action = &actions[i];
7151 		const struct rte_flow_action *mask = &masks[i];
7152 
7153 		MLX5_ASSERT(i < MLX5_HW_MAX_ACTS);
7154 		if (action->type != RTE_FLOW_ACTION_TYPE_INDIRECT &&
7155 		    action->type != mask->type)
7156 			return rte_flow_error_set(error, ENOTSUP,
7157 						  RTE_FLOW_ERROR_TYPE_ACTION,
7158 						  action,
7159 						  "mask type does not match action type");
7160 		switch ((int)action->type) {
7161 		case RTE_FLOW_ACTION_TYPE_VOID:
7162 			break;
7163 		case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
7164 			break;
7165 		case RTE_FLOW_ACTION_TYPE_INDIRECT:
7166 			ret = flow_hw_validate_action_indirect(dev, action,
7167 							       mask,
7168 							       &action_flags,
7169 							       &fixed_cnt,
7170 							       error);
7171 			if (ret < 0)
7172 				return ret;
7173 			break;
7174 		case RTE_FLOW_ACTION_TYPE_FLAG:
7175 			/* TODO: Validation logic */
7176 			action_flags |= MLX5_FLOW_ACTION_FLAG;
7177 			break;
7178 		case RTE_FLOW_ACTION_TYPE_MARK:
7179 			ret = mlx5_hw_validate_action_mark(dev, action, mask,
7180 							   action_flags,
7181 							   attr, error);
7182 			if (ret)
7183 				return ret;
7184 			action_flags |= MLX5_FLOW_ACTION_MARK;
7185 			break;
7186 		case RTE_FLOW_ACTION_TYPE_DROP:
7187 			ret = mlx5_flow_validate_action_drop
7188 				(dev, action_flags,
7189 				 &(struct rte_flow_attr){.egress = attr->egress},
7190 				 error);
7191 			if (ret)
7192 				return ret;
7193 			action_flags |= MLX5_FLOW_ACTION_DROP;
7194 			break;
7195 		case RTE_FLOW_ACTION_TYPE_JUMP:
7196 			/* Only validate the jump to root table in template stage. */
7197 			ret = flow_hw_validate_action_jump(dev, attr, action, mask, error);
7198 			if (ret)
7199 				return ret;
7200 			action_flags |= MLX5_FLOW_ACTION_JUMP;
7201 			break;
7202 #ifdef HAVE_MLX5DV_DR_ACTION_CREATE_DEST_ROOT_TABLE
7203 		case RTE_FLOW_ACTION_TYPE_SEND_TO_KERNEL:
7204 			if (priv->shared_host)
7205 				return rte_flow_error_set(error, ENOTSUP,
7206 							  RTE_FLOW_ERROR_TYPE_ACTION,
7207 							  action,
7208 							  "action not supported in guest port");
7209 			table_type = attr->ingress ? MLX5DR_TABLE_TYPE_NIC_RX :
7210 				     ((attr->egress) ? MLX5DR_TABLE_TYPE_NIC_TX :
7211 				     MLX5DR_TABLE_TYPE_FDB);
7212 			if (!priv->hw_send_to_kernel[table_type])
7213 				return rte_flow_error_set(error, ENOTSUP,
7214 							  RTE_FLOW_ERROR_TYPE_ACTION,
7215 							  action,
7216 							  "action is not available");
7217 			action_flags |= MLX5_FLOW_ACTION_SEND_TO_KERNEL;
7218 			break;
7219 #endif
7220 		case RTE_FLOW_ACTION_TYPE_QUEUE:
7221 			ret = mlx5_hw_validate_action_queue(dev, action, mask,
7222 							    attr, action_flags,
7223 							    error);
7224 			if (ret)
7225 				return ret;
7226 			action_flags |= MLX5_FLOW_ACTION_QUEUE;
7227 			break;
7228 		case RTE_FLOW_ACTION_TYPE_RSS:
7229 			ret = mlx5_hw_validate_action_rss(dev, action, mask,
7230 							  attr, action_flags,
7231 							  error);
7232 			if (ret)
7233 				return ret;
7234 			action_flags |= MLX5_FLOW_ACTION_RSS;
7235 			break;
7236 		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
7237 		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
7238 			ret = mlx5_hw_validate_action_l2_encap(dev, action, mask,
7239 							       attr, action_flags,
7240 							       error);
7241 			if (ret)
7242 				return ret;
7243 			action_flags |= MLX5_FLOW_ACTION_ENCAP;
7244 			break;
7245 		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
7246 		case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
7247 			ret = mlx5_hw_validate_action_l2_decap(dev, action, mask,
7248 							       attr, action_flags,
7249 							       error);
7250 			if (ret)
7251 				return ret;
7252 			action_flags |= MLX5_FLOW_ACTION_DECAP;
7253 			break;
7254 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
7255 		case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
7256 			ret = flow_hw_validate_action_raw_reformat(dev, action,
7257 								   mask, attr,
7258 								   &action_flags,
7259 								   error);
7260 			if (ret)
7261 				return ret;
7262 			if (action->type == RTE_FLOW_ACTION_TYPE_RAW_DECAP &&
7263 			   (action + 1)->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
7264 				action_flags |= MLX5_FLOW_XCAP_ACTIONS;
7265 				i++;
7266 			}
7267 			break;
7268 		case RTE_FLOW_ACTION_TYPE_IPV6_EXT_PUSH:
7269 			ret = flow_hw_validate_action_ipv6_ext_push(dev, action, error);
7270 			if (ret < 0)
7271 				return ret;
7272 			action_flags |= MLX5_FLOW_ACTION_IPV6_ROUTING_PUSH;
7273 			break;
7274 		case RTE_FLOW_ACTION_TYPE_IPV6_EXT_REMOVE:
7275 			remove_data = action->conf;
7276 			/* Remove action must be shared. */
7277 			if (remove_data->type != IPPROTO_ROUTING || !mask) {
7278 				DRV_LOG(ERR, "Only supports shared IPv6 routing remove");
7279 				return -EINVAL;
7280 			}
7281 			action_flags |= MLX5_FLOW_ACTION_IPV6_ROUTING_REMOVE;
7282 			break;
7283 		case RTE_FLOW_ACTION_TYPE_METER:
7284 			/* TODO: Validation logic */
7285 			action_flags |= MLX5_FLOW_ACTION_METER;
7286 			break;
7287 		case RTE_FLOW_ACTION_TYPE_METER_MARK:
7288 			ret = flow_hw_validate_action_meter_mark(dev, action, false, error);
7289 			if (ret < 0)
7290 				return ret;
7291 			action_flags |= MLX5_FLOW_ACTION_METER;
7292 			break;
7293 		case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
7294 			ret = flow_hw_validate_action_modify_field(dev, action, mask,
7295 								   error);
7296 			if (ret < 0)
7297 				return ret;
7298 			action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
7299 			break;
7300 		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
7301 			ret = flow_hw_validate_action_represented_port
7302 					(dev, action, mask, error);
7303 			if (ret < 0)
7304 				return ret;
7305 			action_flags |= MLX5_FLOW_ACTION_PORT_ID;
7306 			break;
7307 		case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
7308 			ret = flow_hw_validate_action_port_representor
7309 					(dev, attr, action, mask, error);
7310 			if (ret < 0)
7311 				return ret;
7312 			action_flags |= MLX5_FLOW_ACTION_PORT_REPRESENTOR;
7313 			break;
7314 		case RTE_FLOW_ACTION_TYPE_AGE:
7315 			if (count_mask && count_mask->id)
7316 				fixed_cnt = true;
7317 			ret = flow_hw_validate_action_age(dev, action,
7318 							  action_flags,
7319 							  fixed_cnt, error);
7320 			if (ret < 0)
7321 				return ret;
7322 			action_flags |= MLX5_FLOW_ACTION_AGE;
7323 			break;
7324 		case RTE_FLOW_ACTION_TYPE_COUNT:
7325 			ret = flow_hw_validate_action_count(dev, action, mask,
7326 							    action_flags,
7327 							    error);
7328 			if (ret < 0)
7329 				return ret;
7330 			count_mask = mask->conf;
7331 			action_flags |= MLX5_FLOW_ACTION_COUNT;
7332 			break;
7333 		case RTE_FLOW_ACTION_TYPE_CONNTRACK:
7334 			ret = mlx5_hw_validate_action_conntrack(dev, action, mask,
7335 								attr, action_flags,
7336 								error);
7337 			if (ret)
7338 				return ret;
7339 			action_flags |= MLX5_FLOW_ACTION_CT;
7340 			break;
7341 		case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
7342 			action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
7343 			break;
7344 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
7345 			action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
7346 			break;
7347 		case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
7348 			ret = flow_hw_validate_action_push_vlan
7349 					(dev, attr, action, mask, error);
7350 			if (ret != 0)
7351 				return ret;
7352 			i += is_of_vlan_pcp_present(action) ?
7353 				MLX5_HW_VLAN_PUSH_PCP_IDX :
7354 				MLX5_HW_VLAN_PUSH_VID_IDX;
7355 			action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
7356 			break;
7357 		case RTE_FLOW_ACTION_TYPE_NAT64:
7358 			ret = flow_hw_validate_action_nat64(dev, attr, action, mask,
7359 							    action_flags, error);
7360 			if (ret != 0)
7361 				return ret;
7362 			action_flags |= MLX5_FLOW_ACTION_NAT64;
7363 			break;
7364 		case RTE_FLOW_ACTION_TYPE_END:
7365 			actions_end = true;
7366 			break;
7367 		case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
7368 			ret = flow_hw_validate_action_default_miss(dev, attr,
7369 								   action_flags, error);
7370 			if (ret < 0)
7371 				return ret;
7372 			action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
7373 			break;
7374 		case RTE_FLOW_ACTION_TYPE_JUMP_TO_TABLE_INDEX:
7375 			ret = mlx5_flow_validate_action_jump_to_table_index(action, mask, error);
7376 			if (ret < 0)
7377 				return ret;
7378 			action_flags |= MLX5_FLOW_ACTION_JUMP_TO_TABLE_INDEX;
7379 			break;
7380 		default:
7381 			return rte_flow_error_set(error, ENOTSUP,
7382 						  RTE_FLOW_ERROR_TYPE_ACTION,
7383 						  action,
7384 						  "action not supported in template API");
7385 		}
7386 	}
7387 	if (act_flags != NULL)
7388 		*act_flags = action_flags;
7389 	return 0;
7390 }
7391 
7392 static int
7393 flow_hw_actions_validate(struct rte_eth_dev *dev,
7394 			 const struct rte_flow_actions_template_attr *attr,
7395 			 const struct rte_flow_action actions[],
7396 			 const struct rte_flow_action masks[],
7397 			 struct rte_flow_error *error)
7398 {
7399 	return mlx5_flow_hw_actions_validate(dev, attr, actions, masks, NULL, error);
7400 }
7401 
7402 
7403 static enum mlx5dr_action_type mlx5_hw_dr_action_types[] = {
7404 	[RTE_FLOW_ACTION_TYPE_MARK] = MLX5DR_ACTION_TYP_TAG,
7405 	[RTE_FLOW_ACTION_TYPE_FLAG] = MLX5DR_ACTION_TYP_TAG,
7406 	[RTE_FLOW_ACTION_TYPE_DROP] = MLX5DR_ACTION_TYP_DROP,
7407 	[RTE_FLOW_ACTION_TYPE_JUMP] = MLX5DR_ACTION_TYP_TBL,
7408 	[RTE_FLOW_ACTION_TYPE_QUEUE] = MLX5DR_ACTION_TYP_TIR,
7409 	[RTE_FLOW_ACTION_TYPE_RSS] = MLX5DR_ACTION_TYP_TIR,
7410 	[RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP] = MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2,
7411 	[RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP] = MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2,
7412 	[RTE_FLOW_ACTION_TYPE_VXLAN_DECAP] = MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2,
7413 	[RTE_FLOW_ACTION_TYPE_NVGRE_DECAP] = MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2,
7414 	[RTE_FLOW_ACTION_TYPE_MODIFY_FIELD] = MLX5DR_ACTION_TYP_MODIFY_HDR,
7415 	[RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT] = MLX5DR_ACTION_TYP_VPORT,
7416 	[RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR] = MLX5DR_ACTION_TYP_MISS,
7417 	[RTE_FLOW_ACTION_TYPE_CONNTRACK] = MLX5DR_ACTION_TYP_ASO_CT,
7418 	[RTE_FLOW_ACTION_TYPE_OF_POP_VLAN] = MLX5DR_ACTION_TYP_POP_VLAN,
7419 	[RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN] = MLX5DR_ACTION_TYP_PUSH_VLAN,
7420 	[RTE_FLOW_ACTION_TYPE_SEND_TO_KERNEL] = MLX5DR_ACTION_TYP_DEST_ROOT,
7421 	[RTE_FLOW_ACTION_TYPE_IPV6_EXT_PUSH] = MLX5DR_ACTION_TYP_PUSH_IPV6_ROUTE_EXT,
7422 	[RTE_FLOW_ACTION_TYPE_IPV6_EXT_REMOVE] = MLX5DR_ACTION_TYP_POP_IPV6_ROUTE_EXT,
7423 	[RTE_FLOW_ACTION_TYPE_NAT64] = MLX5DR_ACTION_TYP_NAT64,
7424 	[RTE_FLOW_ACTION_TYPE_JUMP_TO_TABLE_INDEX] = MLX5DR_ACTION_TYP_JUMP_TO_MATCHER,
7425 };
7426 
7427 static inline void
7428 action_template_set_type(struct rte_flow_actions_template *at,
7429 			 enum mlx5dr_action_type *action_types,
7430 			 unsigned int action_src, uint16_t *curr_off,
7431 			 enum mlx5dr_action_type type)
7432 {
7433 	at->dr_off[action_src] = *curr_off;
7434 	action_types[*curr_off] = type;
7435 	*curr_off = *curr_off + 1;
7436 }
7437 
7438 static int
7439 flow_hw_dr_actions_template_handle_shared(int type, uint32_t action_src,
7440 					  enum mlx5dr_action_type *action_types,
7441 					  uint16_t *curr_off, uint16_t *cnt_off,
7442 					  struct rte_flow_actions_template *at)
7443 {
7444 	switch (type) {
7445 	case RTE_FLOW_ACTION_TYPE_RSS:
7446 		action_template_set_type(at, action_types, action_src, curr_off,
7447 					 MLX5DR_ACTION_TYP_TIR);
7448 		break;
7449 	case RTE_FLOW_ACTION_TYPE_AGE:
7450 	case RTE_FLOW_ACTION_TYPE_COUNT:
7451 		/*
7452 		 * Both AGE and COUNT action need counter, the first one fills
7453 		 * the action_types array, and the second only saves the offset.
7454 		 */
7455 		if (*cnt_off == UINT16_MAX) {
7456 			*cnt_off = *curr_off;
7457 			action_template_set_type(at, action_types,
7458 						 action_src, curr_off,
7459 						 MLX5DR_ACTION_TYP_CTR);
7460 		}
7461 		at->dr_off[action_src] = *cnt_off;
7462 		break;
7463 	case RTE_FLOW_ACTION_TYPE_CONNTRACK:
7464 		action_template_set_type(at, action_types, action_src, curr_off,
7465 					 MLX5DR_ACTION_TYP_ASO_CT);
7466 		break;
7467 	case RTE_FLOW_ACTION_TYPE_QUOTA:
7468 	case RTE_FLOW_ACTION_TYPE_METER_MARK:
7469 		action_template_set_type(at, action_types, action_src, curr_off,
7470 					 MLX5DR_ACTION_TYP_ASO_METER);
7471 		break;
7472 	default:
7473 		DRV_LOG(WARNING, "Unsupported shared action type: %d", type);
7474 		return -EINVAL;
7475 	}
7476 	return 0;
7477 }
7478 
7479 
7480 static int
7481 flow_hw_template_actions_list(struct rte_flow_actions_template *at,
7482 			      unsigned int action_src,
7483 			      enum mlx5dr_action_type *action_types,
7484 			      uint16_t *curr_off, uint16_t *cnt_off)
7485 {
7486 	int ret;
7487 	const struct rte_flow_action_indirect_list *indlst_conf = at->actions[action_src].conf;
7488 	enum mlx5_indirect_list_type list_type = mlx5_get_indirect_list_type(indlst_conf->handle);
7489 	const union {
7490 		struct mlx5_indlst_legacy *legacy;
7491 		struct rte_flow_action_list_handle *handle;
7492 	} indlst_obj = { .handle = indlst_conf->handle };
7493 	enum mlx5dr_action_type type;
7494 
7495 	switch (list_type) {
7496 	case MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY:
7497 		ret = flow_hw_dr_actions_template_handle_shared
7498 			(indlst_obj.legacy->legacy_type, action_src,
7499 			 action_types, curr_off, cnt_off, at);
7500 		if (ret)
7501 			return ret;
7502 		break;
7503 	case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
7504 		action_template_set_type(at, action_types, action_src, curr_off,
7505 					 MLX5DR_ACTION_TYP_DEST_ARRAY);
7506 		break;
7507 	case MLX5_INDIRECT_ACTION_LIST_TYPE_REFORMAT:
7508 		type = ((struct mlx5_hw_encap_decap_action *)
7509 			(indlst_conf->handle))->action_type;
7510 		action_template_set_type(at, action_types, action_src, curr_off, type);
7511 		break;
7512 	default:
7513 		DRV_LOG(ERR, "Unsupported indirect list type");
7514 		return -EINVAL;
7515 	}
7516 	return 0;
7517 }
7518 
7519 /**
7520  * Create DR action template based on a provided sequence of flow actions.
7521  *
7522  * @param[in] dev
7523  *   Pointer to the rte_eth_dev structure.
7524  * @param[in] at
7525  *   Pointer to flow actions template to be updated.
7526  * @param[out] action_types
7527  *   Action types array to be filled.
7528  * @param[out] tmpl_flags
7529  *   Template DR flags to be filled.
7530  *
7531  * @return
7532  *   0 on success, a negative errno value otherwise and rte_errno is set.
7533  */
7534 static int
7535 flow_hw_parse_flow_actions_to_dr_actions(struct rte_eth_dev *dev,
7536 					struct rte_flow_actions_template *at,
7537 					enum mlx5dr_action_type action_types[MLX5_HW_MAX_ACTS],
7538 					uint32_t *tmpl_flags __rte_unused)
7539 {
7540 	unsigned int i;
7541 	uint16_t curr_off;
7542 	enum mlx5dr_action_type reformat_act_type = MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2;
7543 	uint16_t reformat_off = UINT16_MAX;
7544 	uint16_t mhdr_off = UINT16_MAX;
7545 	uint16_t recom_off = UINT16_MAX;
7546 	uint16_t cnt_off = UINT16_MAX;
7547 	enum mlx5dr_action_type recom_type = MLX5DR_ACTION_TYP_LAST;
7548 	int ret;
7549 
7550 	for (i = 0, curr_off = 0; at->actions[i].type != RTE_FLOW_ACTION_TYPE_END; ++i) {
7551 		const struct rte_flow_action_raw_encap *raw_encap_data;
7552 		size_t data_size;
7553 		enum mlx5dr_action_type type;
7554 
7555 		if (curr_off >= MLX5_HW_MAX_ACTS)
7556 			goto err_actions_num;
7557 		switch ((int)at->actions[i].type) {
7558 		case RTE_FLOW_ACTION_TYPE_VOID:
7559 			break;
7560 		case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
7561 			ret = flow_hw_template_actions_list(at, i, action_types,
7562 							    &curr_off, &cnt_off);
7563 			if (ret)
7564 				return ret;
7565 			break;
7566 		case RTE_FLOW_ACTION_TYPE_INDIRECT:
7567 			ret = flow_hw_dr_actions_template_handle_shared
7568 				(at->masks[i].type, i, action_types,
7569 				 &curr_off, &cnt_off, at);
7570 			if (ret)
7571 				return ret;
7572 			break;
7573 		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
7574 		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
7575 		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
7576 		case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
7577 			MLX5_ASSERT(reformat_off == UINT16_MAX);
7578 			reformat_off = curr_off++;
7579 			reformat_act_type = mlx5_hw_dr_action_types[at->actions[i].type];
7580 			break;
7581 		case RTE_FLOW_ACTION_TYPE_IPV6_EXT_PUSH:
7582 			MLX5_ASSERT(recom_off == UINT16_MAX);
7583 			recom_type = MLX5DR_ACTION_TYP_PUSH_IPV6_ROUTE_EXT;
7584 			recom_off = curr_off++;
7585 			break;
7586 		case RTE_FLOW_ACTION_TYPE_IPV6_EXT_REMOVE:
7587 			MLX5_ASSERT(recom_off == UINT16_MAX);
7588 			recom_type = MLX5DR_ACTION_TYP_POP_IPV6_ROUTE_EXT;
7589 			recom_off = curr_off++;
7590 			break;
7591 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
7592 			raw_encap_data = at->actions[i].conf;
7593 			data_size = raw_encap_data->size;
7594 			if (reformat_off != UINT16_MAX) {
7595 				reformat_act_type = data_size < MLX5_ENCAPSULATION_DECISION_SIZE ?
7596 					MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2 :
7597 					MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3;
7598 			} else {
7599 				reformat_off = curr_off++;
7600 				reformat_act_type = MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
7601 			}
7602 			break;
7603 		case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
7604 			reformat_off = curr_off++;
7605 			reformat_act_type = MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2;
7606 			break;
7607 		case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
7608 			if (mhdr_off == UINT16_MAX) {
7609 				mhdr_off = curr_off++;
7610 				type = mlx5_hw_dr_action_types[at->actions[i].type];
7611 				action_types[mhdr_off] = type;
7612 			}
7613 			break;
7614 		case RTE_FLOW_ACTION_TYPE_METER:
7615 			at->dr_off[i] = curr_off;
7616 			action_types[curr_off++] = MLX5DR_ACTION_TYP_ASO_METER;
7617 			if (curr_off >= MLX5_HW_MAX_ACTS)
7618 				goto err_actions_num;
7619 			action_types[curr_off++] = MLX5DR_ACTION_TYP_TBL;
7620 			break;
7621 		case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
7622 			type = mlx5_hw_dr_action_types[at->actions[i].type];
7623 			at->dr_off[i] = curr_off;
7624 			action_types[curr_off++] = type;
7625 			i += is_of_vlan_pcp_present(at->actions + i) ?
7626 				MLX5_HW_VLAN_PUSH_PCP_IDX :
7627 				MLX5_HW_VLAN_PUSH_VID_IDX;
7628 			break;
7629 		case RTE_FLOW_ACTION_TYPE_METER_MARK:
7630 			at->dr_off[i] = curr_off;
7631 			action_types[curr_off++] = MLX5DR_ACTION_TYP_ASO_METER;
7632 			if (curr_off >= MLX5_HW_MAX_ACTS)
7633 				goto err_actions_num;
7634 			break;
7635 		case RTE_FLOW_ACTION_TYPE_AGE:
7636 		case RTE_FLOW_ACTION_TYPE_COUNT:
7637 			/*
7638 			 * Both AGE and COUNT action need counter, the first
7639 			 * one fills the action_types array, and the second only
7640 			 * saves the offset.
7641 			 */
7642 			if (cnt_off == UINT16_MAX) {
7643 				cnt_off = curr_off++;
7644 				action_types[cnt_off] = MLX5DR_ACTION_TYP_CTR;
7645 			}
7646 			at->dr_off[i] = cnt_off;
7647 			break;
7648 		case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
7649 			at->dr_off[i] = curr_off;
7650 			action_types[curr_off++] = MLX5DR_ACTION_TYP_MISS;
7651 			break;
7652 		case RTE_FLOW_ACTION_TYPE_JUMP_TO_TABLE_INDEX:
7653 			*tmpl_flags |= MLX5DR_ACTION_TEMPLATE_FLAG_RELAXED_ORDER;
7654 			at->dr_off[i] = curr_off;
7655 			action_types[curr_off++] = MLX5DR_ACTION_TYP_JUMP_TO_MATCHER;
7656 			break;
7657 		default:
7658 			type = mlx5_hw_dr_action_types[at->actions[i].type];
7659 			at->dr_off[i] = curr_off;
7660 			action_types[curr_off++] = type;
7661 			break;
7662 		}
7663 	}
7664 	if (curr_off >= MLX5_HW_MAX_ACTS)
7665 		goto err_actions_num;
7666 	if (mhdr_off != UINT16_MAX)
7667 		at->mhdr_off = mhdr_off;
7668 	if (reformat_off != UINT16_MAX) {
7669 		at->reformat_off = reformat_off;
7670 		action_types[reformat_off] = reformat_act_type;
7671 	}
7672 	if (recom_off != UINT16_MAX) {
7673 		at->recom_off = recom_off;
7674 		action_types[recom_off] = recom_type;
7675 	}
7676 	at->dr_actions_num = curr_off;
7677 
7678 	/* Create srh flex parser for remove anchor. */
7679 	if ((recom_type == MLX5DR_ACTION_TYP_POP_IPV6_ROUTE_EXT ||
7680 	     recom_type == MLX5DR_ACTION_TYP_PUSH_IPV6_ROUTE_EXT) &&
7681 	    (ret = mlx5_alloc_srh_flex_parser(dev))) {
7682 		DRV_LOG(ERR, "Failed to create srv6 flex parser");
7683 		return ret;
7684 	}
7685 	return 0;
7686 err_actions_num:
7687 	DRV_LOG(ERR, "Number of HW actions (%u) exceeded maximum (%u) allowed in template",
7688 		curr_off, MLX5_HW_MAX_ACTS);
7689 	return -EINVAL;
7690 }
7691 
7692 static void
7693 flow_hw_set_vlan_vid(struct rte_eth_dev *dev,
7694 		     struct rte_flow_action *ra,
7695 		     struct rte_flow_action *rm,
7696 		     struct rte_flow_action_modify_field *spec,
7697 		     struct rte_flow_action_modify_field *mask,
7698 		     int set_vlan_vid_ix)
7699 {
7700 	struct rte_flow_error error;
7701 	const bool masked = rm[set_vlan_vid_ix].conf &&
7702 		(((const struct rte_flow_action_of_set_vlan_vid *)
7703 			rm[set_vlan_vid_ix].conf)->vlan_vid != 0);
7704 	const struct rte_flow_action_of_set_vlan_vid *conf =
7705 		ra[set_vlan_vid_ix].conf;
7706 	int width = mlx5_flow_item_field_width(dev, RTE_FLOW_FIELD_VLAN_ID, 0,
7707 					       NULL, &error);
7708 	*spec = (typeof(*spec)) {
7709 		.operation = RTE_FLOW_MODIFY_SET,
7710 		.dst = {
7711 			.field = RTE_FLOW_FIELD_VLAN_ID,
7712 			.level = 0, .offset = 0,
7713 		},
7714 		.src = {
7715 			.field = RTE_FLOW_FIELD_VALUE,
7716 		},
7717 		.width = width,
7718 	};
7719 	*mask = (typeof(*mask)) {
7720 		.operation = RTE_FLOW_MODIFY_SET,
7721 		.dst = {
7722 			.field = RTE_FLOW_FIELD_VLAN_ID,
7723 			.level = 0xff, .offset = 0xffffffff,
7724 		},
7725 		.src = {
7726 			.field = RTE_FLOW_FIELD_VALUE,
7727 		},
7728 		.width = 0xffffffff,
7729 	};
7730 	if (masked) {
7731 		uint32_t mask_val = 0xffffffff;
7732 
7733 		rte_memcpy(spec->src.value, &conf->vlan_vid, sizeof(conf->vlan_vid));
7734 		rte_memcpy(mask->src.value, &mask_val, sizeof(mask_val));
7735 	}
7736 	ra[set_vlan_vid_ix].type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD;
7737 	ra[set_vlan_vid_ix].conf = spec;
7738 	rm[set_vlan_vid_ix].type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD;
7739 	rm[set_vlan_vid_ix].conf = mask;
7740 }
7741 
7742 static __rte_always_inline int
7743 flow_hw_set_vlan_vid_construct(struct rte_eth_dev *dev,
7744 			       struct mlx5_modification_cmd *mhdr_cmd,
7745 			       struct mlx5_action_construct_data *act_data,
7746 			       const struct mlx5_hw_actions *hw_acts,
7747 			       const struct rte_flow_action *action)
7748 {
7749 	struct rte_flow_error error;
7750 	rte_be16_t vid = ((const struct rte_flow_action_of_set_vlan_vid *)
7751 			   action->conf)->vlan_vid;
7752 	int width = mlx5_flow_item_field_width(dev, RTE_FLOW_FIELD_VLAN_ID, 0,
7753 					       NULL, &error);
7754 	struct rte_flow_action_modify_field conf = {
7755 		.operation = RTE_FLOW_MODIFY_SET,
7756 		.dst = {
7757 			.field = RTE_FLOW_FIELD_VLAN_ID,
7758 			.level = 0, .offset = 0,
7759 		},
7760 		.src = {
7761 			.field = RTE_FLOW_FIELD_VALUE,
7762 		},
7763 		.width = width,
7764 	};
7765 	struct rte_flow_action modify_action = {
7766 		.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
7767 		.conf = &conf
7768 	};
7769 
7770 	rte_memcpy(conf.src.value, &vid, sizeof(vid));
7771 	return flow_hw_modify_field_construct(mhdr_cmd, act_data, hw_acts, &modify_action);
7772 }
7773 
7774 static int
7775 flow_hw_flex_item_acquire(struct rte_eth_dev *dev,
7776 			  struct rte_flow_item_flex_handle *handle,
7777 			  uint8_t *flex_item)
7778 {
7779 	int index = mlx5_flex_acquire_index(dev, handle, false);
7780 
7781 	MLX5_ASSERT(index >= 0 && index < (int)(sizeof(uint32_t) * CHAR_BIT));
7782 	if (index < 0)
7783 		return -1;
7784 	if (!(*flex_item & RTE_BIT32(index))) {
7785 		/* Don't count same flex item again. */
7786 		if (mlx5_flex_acquire_index(dev, handle, true) != index)
7787 			MLX5_ASSERT(false);
7788 		*flex_item |= (uint8_t)RTE_BIT32(index);
7789 	}
7790 	return 0;
7791 }
7792 
7793 static void
7794 flow_hw_flex_item_release(struct rte_eth_dev *dev, uint8_t *flex_item)
7795 {
7796 	while (*flex_item) {
7797 		int index = rte_bsf32(*flex_item);
7798 
7799 		mlx5_flex_release_index(dev, index);
7800 		*flex_item &= ~(uint8_t)RTE_BIT32(index);
7801 	}
7802 }
7803 static __rte_always_inline void
7804 flow_hw_actions_template_replace_container(const
7805 					   struct rte_flow_action *actions,
7806 					   const
7807 					   struct rte_flow_action *masks,
7808 					   struct rte_flow_action *new_actions,
7809 					   struct rte_flow_action *new_masks,
7810 					   struct rte_flow_action **ra,
7811 					   struct rte_flow_action **rm,
7812 					   uint32_t act_num)
7813 {
7814 	memcpy(new_actions, actions, sizeof(actions[0]) * act_num);
7815 	memcpy(new_masks, masks, sizeof(masks[0]) * act_num);
7816 	*ra = (void *)(uintptr_t)new_actions;
7817 	*rm = (void *)(uintptr_t)new_masks;
7818 }
7819 
7820 /* Action template copies these actions in rte_flow_conv() */
7821 
7822 static const struct rte_flow_action rx_meta_copy_action =  {
7823 	.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
7824 	.conf = &(struct rte_flow_action_modify_field){
7825 		.operation = RTE_FLOW_MODIFY_SET,
7826 		.dst = {
7827 			.field = (enum rte_flow_field_id)
7828 				MLX5_RTE_FLOW_FIELD_META_REG,
7829 			.tag_index = REG_B,
7830 		},
7831 		.src = {
7832 			.field = (enum rte_flow_field_id)
7833 				MLX5_RTE_FLOW_FIELD_META_REG,
7834 			.tag_index = REG_C_1,
7835 		},
7836 		.width = 32,
7837 	}
7838 };
7839 
7840 static const struct rte_flow_action rx_meta_copy_mask = {
7841 	.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
7842 	.conf = &(struct rte_flow_action_modify_field){
7843 		.operation = RTE_FLOW_MODIFY_SET,
7844 		.dst = {
7845 			.field = (enum rte_flow_field_id)
7846 				MLX5_RTE_FLOW_FIELD_META_REG,
7847 			.level = UINT8_MAX,
7848 			.tag_index = UINT8_MAX,
7849 			.offset = UINT32_MAX,
7850 		},
7851 		.src = {
7852 			.field = (enum rte_flow_field_id)
7853 				MLX5_RTE_FLOW_FIELD_META_REG,
7854 			.level = UINT8_MAX,
7855 			.tag_index = UINT8_MAX,
7856 			.offset = UINT32_MAX,
7857 		},
7858 		.width = UINT32_MAX,
7859 	}
7860 };
7861 
7862 static const struct rte_flow_action quota_color_inc_action = {
7863 	.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
7864 	.conf = &(struct rte_flow_action_modify_field) {
7865 		.operation = RTE_FLOW_MODIFY_ADD,
7866 		.dst = {
7867 			.field = RTE_FLOW_FIELD_METER_COLOR,
7868 			.level = 0, .offset = 0
7869 		},
7870 		.src = {
7871 			.field = RTE_FLOW_FIELD_VALUE,
7872 			.level = 1,
7873 			.offset = 0,
7874 		},
7875 		.width = 2
7876 	}
7877 };
7878 
7879 static const struct rte_flow_action quota_color_inc_mask = {
7880 	.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
7881 	.conf = &(struct rte_flow_action_modify_field) {
7882 		.operation = RTE_FLOW_MODIFY_ADD,
7883 		.dst = {
7884 			.field = RTE_FLOW_FIELD_METER_COLOR,
7885 			.level = UINT8_MAX,
7886 			.tag_index = UINT8_MAX,
7887 			.offset = UINT32_MAX,
7888 		},
7889 		.src = {
7890 			.field = RTE_FLOW_FIELD_VALUE,
7891 			.level = 3,
7892 			.offset = 0
7893 		},
7894 		.width = UINT32_MAX
7895 	}
7896 };
7897 
7898 /**
7899  * Create flow action template.
7900  *
7901  * @param[in] dev
7902  *   Pointer to the rte_eth_dev structure.
7903  * @param[in] attr
7904  *   Pointer to the action template attributes.
7905  * @param[in] actions
7906  *   Associated actions (list terminated by the END action).
7907  * @param[in] masks
7908  *   List of actions that marks which of the action's member is constant.
7909  * @param[in] nt_mode
7910  *   Non template mode.
7911  * @param[out] error
7912  *   Pointer to error structure.
7913  *
7914  * @return
7915  *   Action template pointer on success, NULL otherwise and rte_errno is set.
7916  */
7917 static struct rte_flow_actions_template *
7918 __flow_hw_actions_template_create(struct rte_eth_dev *dev,
7919 			const struct rte_flow_actions_template_attr *attr,
7920 			const struct rte_flow_action actions[],
7921 			const struct rte_flow_action masks[],
7922 			bool nt_mode,
7923 			struct rte_flow_error *error)
7924 {
7925 	struct mlx5_priv *priv = dev->data->dev_private;
7926 	int len, act_len, mask_len;
7927 	int orig_act_len;
7928 	unsigned int act_num;
7929 	unsigned int i;
7930 	struct rte_flow_actions_template *at = NULL;
7931 	uint16_t pos;
7932 	uint64_t action_flags = 0;
7933 	struct rte_flow_action tmp_action[MLX5_HW_MAX_ACTS];
7934 	struct rte_flow_action tmp_mask[MLX5_HW_MAX_ACTS];
7935 	struct rte_flow_action *ra = (void *)(uintptr_t)actions;
7936 	struct rte_flow_action *rm = (void *)(uintptr_t)masks;
7937 	int set_vlan_vid_ix = -1;
7938 	struct rte_flow_action_modify_field set_vlan_vid_spec = {0, };
7939 	struct rte_flow_action_modify_field set_vlan_vid_mask = {0, };
7940 	struct rte_flow_action mf_actions[MLX5_HW_MAX_ACTS];
7941 	struct rte_flow_action mf_masks[MLX5_HW_MAX_ACTS];
7942 	uint32_t expand_mf_num = 0;
7943 	uint16_t src_off[MLX5_HW_MAX_ACTS] = {0, };
7944 	enum mlx5dr_action_type action_types[MLX5_HW_MAX_ACTS] = { MLX5DR_ACTION_TYP_LAST };
7945 	uint32_t tmpl_flags = 0;
7946 	int ret;
7947 
7948 	if (!nt_mode && mlx5_flow_hw_actions_validate(dev, attr, actions, masks,
7949 						      &action_flags, error))
7950 		return NULL;
7951 	for (i = 0; ra[i].type != RTE_FLOW_ACTION_TYPE_END; ++i) {
7952 		switch (ra[i].type) {
7953 		/* OF_PUSH_VLAN *MUST* come before OF_SET_VLAN_VID */
7954 		case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
7955 			i += is_of_vlan_pcp_present(ra + i) ?
7956 				MLX5_HW_VLAN_PUSH_PCP_IDX :
7957 				MLX5_HW_VLAN_PUSH_VID_IDX;
7958 			break;
7959 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
7960 			set_vlan_vid_ix = i;
7961 			break;
7962 		default:
7963 			break;
7964 		}
7965 	}
7966 	/*
7967 	 * Count flow actions to allocate required space for storing DR offsets and to check
7968 	 * if temporary buffer would not be overrun.
7969 	 */
7970 	act_num = i + 1;
7971 	if (act_num >= MLX5_HW_MAX_ACTS) {
7972 		rte_flow_error_set(error, EINVAL,
7973 				   RTE_FLOW_ERROR_TYPE_ACTION, NULL, "Too many actions");
7974 		return NULL;
7975 	}
7976 	if (set_vlan_vid_ix != -1) {
7977 		/* If temporary action buffer was not used, copy template actions to it */
7978 		if (ra == actions)
7979 			flow_hw_actions_template_replace_container(actions,
7980 								   masks,
7981 								   tmp_action,
7982 								   tmp_mask,
7983 								   &ra, &rm,
7984 								   act_num);
7985 		flow_hw_set_vlan_vid(dev, ra, rm,
7986 				     &set_vlan_vid_spec, &set_vlan_vid_mask,
7987 				     set_vlan_vid_ix);
7988 		action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
7989 	}
7990 	if (action_flags & MLX5_FLOW_ACTION_QUOTA) {
7991 		mf_actions[expand_mf_num] = quota_color_inc_action;
7992 		mf_masks[expand_mf_num] = quota_color_inc_mask;
7993 		expand_mf_num++;
7994 	}
7995 	if (priv->sh->config.dv_xmeta_en == MLX5_XMETA_MODE_META32_HWS &&
7996 	    priv->sh->config.dv_esw_en &&
7997 	    (action_flags & (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS))) {
7998 		/* Insert META copy */
7999 		mf_actions[expand_mf_num] = rx_meta_copy_action;
8000 		mf_masks[expand_mf_num] = rx_meta_copy_mask;
8001 		expand_mf_num++;
8002 	}
8003 	if (expand_mf_num) {
8004 		if (act_num + expand_mf_num > MLX5_HW_MAX_ACTS) {
8005 			rte_flow_error_set(error, E2BIG,
8006 					   RTE_FLOW_ERROR_TYPE_ACTION,
8007 					   NULL, "cannot expand: too many actions");
8008 			return NULL;
8009 		}
8010 		if (ra == actions)
8011 			flow_hw_actions_template_replace_container(actions,
8012 								   masks,
8013 								   tmp_action,
8014 								   tmp_mask,
8015 								   &ra, &rm,
8016 								   act_num);
8017 		/* Application should make sure only one Q/RSS exist in one rule. */
8018 		pos = flow_hw_template_expand_modify_field(ra, rm,
8019 							   mf_actions,
8020 							   mf_masks,
8021 							   action_flags,
8022 							   act_num,
8023 							   expand_mf_num);
8024 		if (pos == MLX5_HW_EXPAND_MH_FAILED) {
8025 			rte_flow_error_set(error, ENOMEM,
8026 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8027 					   NULL, "modify header expansion failed");
8028 			return NULL;
8029 		}
8030 		act_num += expand_mf_num;
8031 		for (i = pos + expand_mf_num; i < act_num; i++)
8032 			src_off[i] += expand_mf_num;
8033 		action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
8034 	}
8035 	act_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, NULL, 0, ra, error);
8036 	if (act_len <= 0)
8037 		return NULL;
8038 	len = RTE_ALIGN(act_len, 16);
8039 	mask_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, NULL, 0, rm, error);
8040 	if (mask_len <= 0)
8041 		return NULL;
8042 	len += RTE_ALIGN(mask_len, 16);
8043 	len += RTE_ALIGN(act_num * sizeof(*at->dr_off), 16);
8044 	len += RTE_ALIGN(act_num * sizeof(*at->src_off), 16);
8045 	orig_act_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, NULL, 0, actions, error);
8046 	if (orig_act_len <= 0)
8047 		return NULL;
8048 	len += RTE_ALIGN(orig_act_len, 16);
8049 	at = mlx5_malloc(MLX5_MEM_ZERO, len + sizeof(*at),
8050 			 RTE_CACHE_LINE_SIZE, rte_socket_id());
8051 	if (!at) {
8052 		rte_flow_error_set(error, ENOMEM,
8053 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8054 				   NULL,
8055 				   "cannot allocate action template");
8056 		return NULL;
8057 	}
8058 	/* Actions part is in the first part. */
8059 	at->attr = *attr;
8060 	at->actions = (struct rte_flow_action *)(at + 1);
8061 	act_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, at->actions,
8062 				len, ra, error);
8063 	if (act_len <= 0)
8064 		goto error;
8065 	/* Masks part is in the second part. */
8066 	at->masks = (struct rte_flow_action *)(((uint8_t *)at->actions) + act_len);
8067 	mask_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, at->masks,
8068 				 len - act_len, rm, error);
8069 	if (mask_len <= 0)
8070 		goto error;
8071 	/* DR actions offsets in the third part. */
8072 	at->dr_off = (uint16_t *)((uint8_t *)at->masks + mask_len);
8073 	at->src_off = RTE_PTR_ADD(at->dr_off,
8074 				  RTE_ALIGN(act_num * sizeof(*at->dr_off), 16));
8075 	memcpy(at->src_off, src_off, act_num * sizeof(at->src_off[0]));
8076 	at->orig_actions = RTE_PTR_ADD(at->src_off,
8077 				       RTE_ALIGN(act_num * sizeof(*at->src_off), 16));
8078 	orig_act_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, at->orig_actions, orig_act_len,
8079 				     actions, error);
8080 	if (orig_act_len <= 0)
8081 		goto error;
8082 	at->actions_num = act_num;
8083 	for (i = 0; i < at->actions_num; ++i)
8084 		at->dr_off[i] = UINT16_MAX;
8085 	at->reformat_off = UINT16_MAX;
8086 	at->mhdr_off = UINT16_MAX;
8087 	at->recom_off = UINT16_MAX;
8088 	for (i = 0; actions->type != RTE_FLOW_ACTION_TYPE_END;
8089 	     actions++, masks++, i++) {
8090 		const struct rte_flow_action_modify_field *info;
8091 
8092 		switch (actions->type) {
8093 		/*
8094 		 * mlx5 PMD hacks indirect action index directly to the action conf.
8095 		 * The rte_flow_conv() function copies the content from conf pointer.
8096 		 * Need to restore the indirect action index from action conf here.
8097 		 */
8098 		case RTE_FLOW_ACTION_TYPE_INDIRECT:
8099 			at->actions[i].conf = ra[i].conf;
8100 			at->masks[i].conf = rm[i].conf;
8101 			break;
8102 		case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
8103 			info = actions->conf;
8104 			if ((info->dst.field == RTE_FLOW_FIELD_FLEX_ITEM &&
8105 			     flow_hw_flex_item_acquire(dev, info->dst.flex_handle,
8106 						       &at->flex_item)) ||
8107 			    (info->src.field == RTE_FLOW_FIELD_FLEX_ITEM &&
8108 			     flow_hw_flex_item_acquire(dev, info->src.flex_handle,
8109 						       &at->flex_item)))
8110 				goto error;
8111 			break;
8112 		default:
8113 			break;
8114 		}
8115 	}
8116 	ret = flow_hw_parse_flow_actions_to_dr_actions(dev, at, action_types, &tmpl_flags);
8117 	if (ret)
8118 		goto error;
8119 	at->action_flags = action_flags;
8120 	/* In non template mode there is no need to create the dr template. */
8121 	if (nt_mode)
8122 		return at;
8123 	at->tmpl = mlx5dr_action_template_create(action_types, tmpl_flags);
8124 	if (!at->tmpl) {
8125 		DRV_LOG(ERR, "Failed to create DR action template: %d", rte_errno);
8126 		goto error;
8127 	}
8128 	rte_atomic_fetch_add_explicit(&at->refcnt, 1, rte_memory_order_relaxed);
8129 	LIST_INSERT_HEAD(&priv->flow_hw_at, at, next);
8130 	return at;
8131 error:
8132 	if (at) {
8133 		mlx5_free(at);
8134 	}
8135 	rte_flow_error_set(error, rte_errno,
8136 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8137 			   "Failed to create action template");
8138 	return NULL;
8139 }
8140 
8141 /**
8142  * Create flow action template.
8143  *
8144  * @param[in] dev
8145  *   Pointer to the rte_eth_dev structure.
8146  * @param[in] attr
8147  *   Pointer to the action template attributes.
8148  * @param[in] actions
8149  *   Associated actions (list terminated by the END action).
8150  * @param[in] masks
8151  *   List of actions that marks which of the action's member is constant.
8152  * @param[out] error
8153  *   Pointer to error structure.
8154  *
8155  * @return
8156  *   Action template pointer on success, NULL otherwise and rte_errno is set.
8157  */
8158 static struct rte_flow_actions_template *
8159 flow_hw_actions_template_create(struct rte_eth_dev *dev,
8160 			const struct rte_flow_actions_template_attr *attr,
8161 			const struct rte_flow_action actions[],
8162 			const struct rte_flow_action masks[],
8163 			struct rte_flow_error *error)
8164 {
8165 	return __flow_hw_actions_template_create(dev, attr, actions, masks, false, error);
8166 }
8167 
8168 /**
8169  * Destroy flow action template.
8170  *
8171  * @param[in] dev
8172  *   Pointer to the rte_eth_dev structure.
8173  * @param[in] template
8174  *   Pointer to the action template to be destroyed.
8175  * @param[out] error
8176  *   Pointer to error structure.
8177  *
8178  * @return
8179  *   0 on success, a negative errno value otherwise and rte_errno is set.
8180  */
8181 static int
8182 flow_hw_actions_template_destroy(struct rte_eth_dev *dev,
8183 				 struct rte_flow_actions_template *template,
8184 				 struct rte_flow_error *error __rte_unused)
8185 {
8186 	uint64_t flag = MLX5_FLOW_ACTION_IPV6_ROUTING_REMOVE |
8187 			MLX5_FLOW_ACTION_IPV6_ROUTING_PUSH;
8188 
8189 	if (rte_atomic_load_explicit(&template->refcnt, rte_memory_order_relaxed) > 1) {
8190 		DRV_LOG(WARNING, "Action template %p is still in use.",
8191 			(void *)template);
8192 		return rte_flow_error_set(error, EBUSY,
8193 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8194 				   NULL,
8195 				   "action template is in use");
8196 	}
8197 	if (template->action_flags & flag)
8198 		mlx5_free_srh_flex_parser(dev);
8199 	LIST_REMOVE(template, next);
8200 	flow_hw_flex_item_release(dev, &template->flex_item);
8201 	if (template->tmpl)
8202 		mlx5dr_action_template_destroy(template->tmpl);
8203 	mlx5_free(template);
8204 	return 0;
8205 }
8206 
8207 static struct rte_flow_item *
8208 flow_hw_prepend_item(const struct rte_flow_item *items,
8209 		     const uint32_t nb_items,
8210 		     const struct rte_flow_item *new_item,
8211 		     struct rte_flow_error *error)
8212 {
8213 	struct rte_flow_item *copied_items;
8214 	size_t size;
8215 
8216 	/* Allocate new array of items. */
8217 	size = sizeof(*copied_items) * (nb_items + 1);
8218 	copied_items = mlx5_malloc(MLX5_MEM_ZERO, size, 0, rte_socket_id());
8219 	if (!copied_items) {
8220 		rte_flow_error_set(error, ENOMEM,
8221 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8222 				   NULL,
8223 				   "cannot allocate item template");
8224 		return NULL;
8225 	}
8226 	/* Put new item at the beginning and copy the rest. */
8227 	copied_items[0] = *new_item;
8228 	rte_memcpy(&copied_items[1], items, sizeof(*items) * nb_items);
8229 	return copied_items;
8230 }
8231 
8232 static int
8233 flow_hw_item_compare_field_validate(enum rte_flow_field_id arg_field,
8234 				    enum rte_flow_field_id base_field,
8235 				    struct rte_flow_error *error)
8236 {
8237 	switch (arg_field) {
8238 	case RTE_FLOW_FIELD_TAG:
8239 	case RTE_FLOW_FIELD_META:
8240 	case RTE_FLOW_FIELD_ESP_SEQ_NUM:
8241 		break;
8242 	case RTE_FLOW_FIELD_RANDOM:
8243 		if (base_field == RTE_FLOW_FIELD_VALUE)
8244 			return 0;
8245 		return rte_flow_error_set(error, EINVAL,
8246 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8247 					  NULL,
8248 					  "compare random is supported only with immediate value");
8249 	default:
8250 		return rte_flow_error_set(error, ENOTSUP,
8251 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8252 					  NULL,
8253 					  "compare item argument field is not supported");
8254 	}
8255 	switch (base_field) {
8256 	case RTE_FLOW_FIELD_TAG:
8257 	case RTE_FLOW_FIELD_META:
8258 	case RTE_FLOW_FIELD_VALUE:
8259 	case RTE_FLOW_FIELD_ESP_SEQ_NUM:
8260 		break;
8261 	default:
8262 		return rte_flow_error_set(error, ENOTSUP,
8263 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8264 					  NULL,
8265 					  "compare item base field is not supported");
8266 	}
8267 	return 0;
8268 }
8269 
8270 static inline uint32_t
8271 flow_hw_item_compare_width_supported(enum rte_flow_field_id field)
8272 {
8273 	switch (field) {
8274 	case RTE_FLOW_FIELD_TAG:
8275 	case RTE_FLOW_FIELD_META:
8276 	case RTE_FLOW_FIELD_ESP_SEQ_NUM:
8277 		return 32;
8278 	case RTE_FLOW_FIELD_RANDOM:
8279 		return 16;
8280 	default:
8281 		break;
8282 	}
8283 	return 0;
8284 }
8285 
8286 static int
8287 flow_hw_validate_item_compare(const struct rte_flow_item *item,
8288 			      struct rte_flow_error *error)
8289 {
8290 	const struct rte_flow_item_compare *comp_m = item->mask;
8291 	const struct rte_flow_item_compare *comp_v = item->spec;
8292 	int ret;
8293 
8294 	if (unlikely(!comp_m))
8295 		return rte_flow_error_set(error, EINVAL,
8296 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8297 				   NULL,
8298 				   "compare item mask is missing");
8299 	if (comp_m->width != UINT32_MAX)
8300 		return rte_flow_error_set(error, EINVAL,
8301 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8302 				   NULL,
8303 				   "compare item only support full mask");
8304 	ret = flow_hw_item_compare_field_validate(comp_m->a.field,
8305 						  comp_m->b.field, error);
8306 	if (ret < 0)
8307 		return ret;
8308 	if (comp_v) {
8309 		uint32_t width;
8310 
8311 		if (comp_v->operation != comp_m->operation ||
8312 		    comp_v->a.field != comp_m->a.field ||
8313 		    comp_v->b.field != comp_m->b.field)
8314 			return rte_flow_error_set(error, EINVAL,
8315 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8316 					   NULL,
8317 					   "compare item spec/mask not matching");
8318 		width = flow_hw_item_compare_width_supported(comp_v->a.field);
8319 		MLX5_ASSERT(width > 0);
8320 		if ((comp_v->width & comp_m->width) != width)
8321 			return rte_flow_error_set(error, EINVAL,
8322 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8323 					   NULL,
8324 					   "compare item only support full mask");
8325 	}
8326 	return 0;
8327 }
8328 
8329 static inline int
8330 mlx5_hw_validate_item_nsh(struct rte_eth_dev *dev,
8331 			  const struct rte_flow_item *item,
8332 			  struct rte_flow_error *error)
8333 {
8334 	return mlx5_flow_validate_item_nsh(dev, item, error);
8335 }
8336 
8337 static bool
8338 mlx5_hw_flow_tunnel_ip_check(uint64_t last_item, uint64_t *item_flags)
8339 {
8340 	bool tunnel;
8341 
8342 	if (last_item == MLX5_FLOW_LAYER_OUTER_L3_IPV4) {
8343 		tunnel = true;
8344 		*item_flags |= MLX5_FLOW_LAYER_IPIP;
8345 	} else if (last_item == MLX5_FLOW_LAYER_OUTER_L3_IPV6 ||
8346 		   last_item == MLX5_FLOW_ITEM_OUTER_IPV6_ROUTING_EXT) {
8347 		tunnel = true;
8348 		*item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
8349 	} else {
8350 		tunnel = false;
8351 	}
8352 	return tunnel;
8353 }
8354 
8355 const struct rte_flow_item_ipv4 hws_nic_ipv4_mask = {
8356 	.hdr = {
8357 		.version = 0xf,
8358 		.ihl = 0xf,
8359 		.type_of_service = 0xff,
8360 		.total_length = RTE_BE16(0xffff),
8361 		.packet_id = RTE_BE16(0xffff),
8362 		.fragment_offset = RTE_BE16(0xffff),
8363 		.time_to_live = 0xff,
8364 		.next_proto_id = 0xff,
8365 		.src_addr = RTE_BE32(0xffffffff),
8366 		.dst_addr = RTE_BE32(0xffffffff),
8367 	},
8368 };
8369 
8370 const struct rte_flow_item_ipv6 hws_nic_ipv6_mask = {
8371 	.hdr = {
8372 		.vtc_flow = RTE_BE32(0xffffffff),
8373 		.payload_len = RTE_BE16(0xffff),
8374 		.proto = 0xff,
8375 		.hop_limits = 0xff,
8376 		.src_addr = RTE_IPV6_MASK_FULL,
8377 		.dst_addr = RTE_IPV6_MASK_FULL,
8378 	},
8379 	.has_frag_ext = 1,
8380 };
8381 
8382 static int
8383 flow_hw_validate_item_ptype(const struct rte_flow_item *item,
8384 			    struct rte_flow_error *error)
8385 {
8386 	const struct rte_flow_item_ptype *ptype = item->mask;
8387 
8388 	/* HWS does not allow empty PTYPE mask */
8389 	if (!ptype)
8390 		return rte_flow_error_set(error, EINVAL,
8391 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8392 					  NULL, "empty ptype mask");
8393 	if (!(ptype->packet_type &
8394 	      (RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK | RTE_PTYPE_L4_MASK |
8395 	       RTE_PTYPE_INNER_L2_MASK | RTE_PTYPE_INNER_L3_MASK |
8396 	       RTE_PTYPE_INNER_L4_MASK)))
8397 		return rte_flow_error_set(error, ENOTSUP,
8398 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8399 					  NULL, "ptype mask not supported");
8400 	return 0;
8401 }
8402 
8403 struct mlx5_hw_pattern_validation_ctx {
8404 	const struct rte_flow_item *geneve_item;
8405 	const struct rte_flow_item *flex_item;
8406 };
8407 
8408 static int
8409 flow_hw_pattern_validate(struct rte_eth_dev *dev,
8410 			 const struct rte_flow_pattern_template_attr *attr,
8411 			 const struct rte_flow_item items[],
8412 			 uint64_t *item_flags,
8413 			 struct rte_flow_error *error)
8414 {
8415 	struct mlx5_priv *priv = dev->data->dev_private;
8416 	const struct rte_flow_item *item;
8417 	const struct rte_flow_item *gtp_item = NULL;
8418 	const struct rte_flow_item *gre_item = NULL;
8419 	const struct rte_flow_attr flow_attr = {
8420 		.ingress = attr->ingress,
8421 		.egress = attr->egress,
8422 		.transfer = attr->transfer
8423 	};
8424 	int ret, tag_idx;
8425 	uint32_t tag_bitmap = 0;
8426 	uint64_t last_item = 0;
8427 
8428 	if (!mlx5_hw_ctx_validate(dev, error))
8429 		return -rte_errno;
8430 	if (!attr->ingress && !attr->egress && !attr->transfer)
8431 		return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR, NULL,
8432 					  "at least one of the direction attributes"
8433 					  " must be specified");
8434 	if (priv->sh->config.dv_esw_en) {
8435 		MLX5_ASSERT(priv->master || priv->representor);
8436 		if (priv->master) {
8437 			if ((attr->ingress && attr->egress) ||
8438 			    (attr->ingress && attr->transfer) ||
8439 			    (attr->egress && attr->transfer))
8440 				return rte_flow_error_set(error, EINVAL,
8441 							  RTE_FLOW_ERROR_TYPE_ATTR, NULL,
8442 							  "only one direction attribute at once"
8443 							  " can be used on transfer proxy port");
8444 		} else {
8445 			if (attr->transfer)
8446 				return rte_flow_error_set(error, EINVAL,
8447 							  RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, NULL,
8448 							  "transfer attribute cannot be used with"
8449 							  " port representors");
8450 			if (attr->ingress && attr->egress)
8451 				return rte_flow_error_set(error, EINVAL,
8452 							  RTE_FLOW_ERROR_TYPE_ATTR, NULL,
8453 							  "ingress and egress direction attributes"
8454 							  " cannot be used at the same time on"
8455 							  " port representors");
8456 		}
8457 	} else {
8458 		if (attr->transfer)
8459 			return rte_flow_error_set(error, EINVAL,
8460 						  RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, NULL,
8461 						  "transfer attribute cannot be used when"
8462 						  " E-Switch is disabled");
8463 	}
8464 	for (item = items; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
8465 		bool tunnel = *item_flags & MLX5_FLOW_LAYER_TUNNEL;
8466 
8467 		switch ((int)item->type) {
8468 		case RTE_FLOW_ITEM_TYPE_PTYPE:
8469 			ret = flow_hw_validate_item_ptype(item, error);
8470 			if (ret)
8471 				return ret;
8472 			last_item = MLX5_FLOW_ITEM_PTYPE;
8473 			break;
8474 		case RTE_FLOW_ITEM_TYPE_TAG:
8475 		{
8476 			const struct rte_flow_item_tag *tag =
8477 				(const struct rte_flow_item_tag *)item->spec;
8478 
8479 			if (tag == NULL)
8480 				return rte_flow_error_set(error, EINVAL,
8481 							  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8482 							  NULL,
8483 							  "Tag spec is NULL");
8484 			if (tag->index >= MLX5_FLOW_HW_TAGS_MAX &&
8485 			    tag->index != RTE_PMD_MLX5_LINEAR_HASH_TAG_INDEX)
8486 				return rte_flow_error_set(error, EINVAL,
8487 							  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8488 							  NULL,
8489 							  "Invalid tag index");
8490 			tag_idx = flow_hw_get_reg_id(dev, RTE_FLOW_ITEM_TYPE_TAG, tag->index);
8491 			if (tag_idx == REG_NON)
8492 				return rte_flow_error_set(error, EINVAL,
8493 							  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8494 							  NULL,
8495 							  "Unsupported tag index");
8496 			if (tag_bitmap & (1 << tag_idx))
8497 				return rte_flow_error_set(error, EINVAL,
8498 							  RTE_FLOW_ERROR_TYPE_ITEM,
8499 							  NULL,
8500 							  "Duplicated tag index");
8501 			tag_bitmap |= 1 << tag_idx;
8502 			last_item = MLX5_FLOW_ITEM_TAG;
8503 			break;
8504 		}
8505 		case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
8506 		{
8507 			const struct rte_flow_item_tag *tag =
8508 				(const struct rte_flow_item_tag *)item->spec;
8509 			uint16_t regcs = (uint8_t)priv->sh->cdev->config.hca_attr.set_reg_c;
8510 
8511 			if (!((1 << (tag->index - REG_C_0)) & regcs))
8512 				return rte_flow_error_set(error, EINVAL,
8513 							  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8514 							  NULL,
8515 							  "Unsupported internal tag index");
8516 			if (tag_bitmap & (1 << tag->index))
8517 				return rte_flow_error_set(error, EINVAL,
8518 							  RTE_FLOW_ERROR_TYPE_ITEM,
8519 							  NULL,
8520 							  "Duplicated tag index");
8521 			tag_bitmap |= 1 << tag->index;
8522 			break;
8523 		}
8524 		case RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT:
8525 			if (attr->ingress && priv->sh->config.repr_matching)
8526 				return rte_flow_error_set(error, EINVAL,
8527 						  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
8528 						  "represented port item cannot be used"
8529 						  " when ingress attribute is set");
8530 			if (attr->egress)
8531 				return rte_flow_error_set(error, EINVAL,
8532 						  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
8533 						  "represented port item cannot be used"
8534 						  " when egress attribute is set");
8535 			last_item = MLX5_FLOW_ITEM_REPRESENTED_PORT;
8536 			break;
8537 		case RTE_FLOW_ITEM_TYPE_META:
8538 			/* ingress + group 0 is not supported */
8539 			*item_flags |= MLX5_FLOW_ITEM_METADATA;
8540 			break;
8541 		case RTE_FLOW_ITEM_TYPE_METER_COLOR:
8542 		{
8543 			int reg = flow_hw_get_reg_id(dev,
8544 						     RTE_FLOW_ITEM_TYPE_METER_COLOR,
8545 						     0);
8546 			if (reg == REG_NON)
8547 				return rte_flow_error_set(error, EINVAL,
8548 							  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8549 							  NULL,
8550 							  "Unsupported meter color register");
8551 			if (*item_flags &
8552 			    (MLX5_FLOW_ITEM_QUOTA | MLX5_FLOW_LAYER_ASO_CT))
8553 				return rte_flow_error_set
8554 					(error, EINVAL,
8555 					 RTE_FLOW_ERROR_TYPE_ITEM, NULL, "Only one ASO item is supported");
8556 			last_item = MLX5_FLOW_ITEM_METER_COLOR;
8557 			break;
8558 		}
8559 		case RTE_FLOW_ITEM_TYPE_AGGR_AFFINITY:
8560 		{
8561 			if (!priv->sh->lag_rx_port_affinity_en)
8562 				return rte_flow_error_set(error, EINVAL,
8563 							  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
8564 							  "Unsupported aggregated affinity with Older FW");
8565 			if ((attr->transfer && priv->fdb_def_rule) || attr->egress)
8566 				return rte_flow_error_set(error, EINVAL,
8567 							  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
8568 							  "Aggregated affinity item not supported"
8569 							  " with egress or transfer"
8570 							  " attribute");
8571 			last_item = MLX5_FLOW_ITEM_AGGR_AFFINITY;
8572 			break;
8573 		}
8574 		case RTE_FLOW_ITEM_TYPE_GENEVE:
8575 			last_item = MLX5_FLOW_LAYER_GENEVE;
8576 			break;
8577 		case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
8578 		{
8579 			last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
8580 			ret = mlx5_flow_geneve_tlv_option_validate(priv, item,
8581 								   error);
8582 			if (ret < 0)
8583 				return ret;
8584 			break;
8585 		}
8586 		case RTE_FLOW_ITEM_TYPE_COMPARE:
8587 		{
8588 			last_item = MLX5_FLOW_ITEM_COMPARE;
8589 			ret = flow_hw_validate_item_compare(item, error);
8590 			if (ret)
8591 				return ret;
8592 			break;
8593 		}
8594 		case RTE_FLOW_ITEM_TYPE_ETH:
8595 			ret = mlx5_flow_validate_item_eth(dev, item,
8596 							  *item_flags,
8597 							  true, error);
8598 			if (ret < 0)
8599 				return ret;
8600 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
8601 				    MLX5_FLOW_LAYER_OUTER_L2;
8602 			break;
8603 		case RTE_FLOW_ITEM_TYPE_VLAN:
8604 			ret = mlx5_flow_dv_validate_item_vlan(item, *item_flags,
8605 							      dev, error);
8606 			if (ret < 0)
8607 				return ret;
8608 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
8609 				    MLX5_FLOW_LAYER_OUTER_VLAN;
8610 			break;
8611 		case RTE_FLOW_ITEM_TYPE_IPV4:
8612 			tunnel |= mlx5_hw_flow_tunnel_ip_check(last_item,
8613 							       item_flags);
8614 			ret = mlx5_flow_dv_validate_item_ipv4(dev, item,
8615 							      *item_flags,
8616 							      last_item, 0,
8617 							      &hws_nic_ipv4_mask,
8618 							      error);
8619 			if (ret)
8620 				return ret;
8621 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
8622 				    MLX5_FLOW_LAYER_OUTER_L3_IPV4;
8623 			break;
8624 		case RTE_FLOW_ITEM_TYPE_IPV6:
8625 			tunnel |= mlx5_hw_flow_tunnel_ip_check(last_item,
8626 							       item_flags);
8627 			ret = mlx5_flow_validate_item_ipv6(dev, item,
8628 							   *item_flags,
8629 							   last_item, 0,
8630 							   &hws_nic_ipv6_mask,
8631 							   error);
8632 			if (ret < 0)
8633 				return ret;
8634 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
8635 				    MLX5_FLOW_LAYER_OUTER_L3_IPV6;
8636 			break;
8637 		case RTE_FLOW_ITEM_TYPE_UDP:
8638 			ret = mlx5_flow_validate_item_udp(dev, item,
8639 							  *item_flags,
8640 							  0xff, error);
8641 			if (ret)
8642 				return ret;
8643 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
8644 				    MLX5_FLOW_LAYER_OUTER_L4_UDP;
8645 			break;
8646 		case RTE_FLOW_ITEM_TYPE_TCP:
8647 			ret = mlx5_flow_validate_item_tcp
8648 				(dev, item, *item_flags,
8649 				 0xff, &nic_tcp_mask, error);
8650 			if (ret < 0)
8651 				return ret;
8652 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
8653 				    MLX5_FLOW_LAYER_OUTER_L4_TCP;
8654 			break;
8655 		case RTE_FLOW_ITEM_TYPE_GTP:
8656 			gtp_item = item;
8657 			ret = mlx5_flow_dv_validate_item_gtp(dev, gtp_item,
8658 							     *item_flags, error);
8659 			if (ret < 0)
8660 				return ret;
8661 			last_item = MLX5_FLOW_LAYER_GTP;
8662 			break;
8663 		case RTE_FLOW_ITEM_TYPE_GTP_PSC:
8664 			ret = mlx5_flow_dv_validate_item_gtp_psc(dev, item,
8665 								 last_item,
8666 								 gtp_item,
8667 								 false, error);
8668 			if (ret < 0)
8669 				return ret;
8670 			last_item = MLX5_FLOW_LAYER_GTP_PSC;
8671 			break;
8672 		case RTE_FLOW_ITEM_TYPE_VXLAN:
8673 			ret = mlx5_flow_validate_item_vxlan(dev, 0, item,
8674 							    *item_flags,
8675 							    false, error);
8676 			if (ret < 0)
8677 				return ret;
8678 			last_item = MLX5_FLOW_LAYER_VXLAN;
8679 			break;
8680 		case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
8681 			ret = mlx5_flow_validate_item_vxlan_gpe(item,
8682 								*item_flags,
8683 								dev, error);
8684 			if (ret < 0)
8685 				return ret;
8686 			last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
8687 			break;
8688 		case RTE_FLOW_ITEM_TYPE_MPLS:
8689 			ret = mlx5_flow_validate_item_mpls(dev, item,
8690 							   *item_flags,
8691 							   last_item, error);
8692 			if (ret < 0)
8693 				return ret;
8694 			last_item = MLX5_FLOW_LAYER_MPLS;
8695 			break;
8696 		case MLX5_RTE_FLOW_ITEM_TYPE_SQ:
8697 		case RTE_FLOW_ITEM_TYPE_TX_QUEUE:
8698 			last_item = MLX5_FLOW_ITEM_SQ;
8699 			break;
8700 		case RTE_FLOW_ITEM_TYPE_GRE:
8701 			ret = mlx5_flow_validate_item_gre(dev, item,
8702 							  *item_flags,
8703 							  0xff, error);
8704 			if (ret < 0)
8705 				return ret;
8706 			gre_item = item;
8707 			last_item = MLX5_FLOW_LAYER_GRE;
8708 			break;
8709 		case RTE_FLOW_ITEM_TYPE_GRE_KEY:
8710 			if (!(*item_flags & MLX5_FLOW_LAYER_GRE))
8711 				return rte_flow_error_set
8712 					(error, EINVAL,
8713 					 RTE_FLOW_ERROR_TYPE_ITEM, item, "GRE item is missing");
8714 			ret = mlx5_flow_validate_item_gre_key
8715 				(dev, item, *item_flags, gre_item, error);
8716 			if (ret < 0)
8717 				return ret;
8718 			last_item = MLX5_FLOW_LAYER_GRE_KEY;
8719 			break;
8720 		case RTE_FLOW_ITEM_TYPE_GRE_OPTION:
8721 			if (!(*item_flags & MLX5_FLOW_LAYER_GRE))
8722 				return rte_flow_error_set
8723 					(error, EINVAL,
8724 					 RTE_FLOW_ERROR_TYPE_ITEM, item, "GRE item is missing");
8725 			ret = mlx5_flow_validate_item_gre_option(dev, item,
8726 								 *item_flags,
8727 								 &flow_attr,
8728 								 gre_item,
8729 								 error);
8730 			if (ret < 0)
8731 				return ret;
8732 			last_item = MLX5_FLOW_LAYER_GRE;
8733 			break;
8734 		case RTE_FLOW_ITEM_TYPE_NVGRE:
8735 			ret = mlx5_flow_validate_item_nvgre(dev, item,
8736 							    *item_flags, 0xff,
8737 							    error);
8738 			if (ret)
8739 				return ret;
8740 			last_item = MLX5_FLOW_LAYER_NVGRE;
8741 			break;
8742 		case RTE_FLOW_ITEM_TYPE_ICMP:
8743 			ret = mlx5_flow_validate_item_icmp(dev, item,
8744 							   *item_flags, 0xff,
8745 							   error);
8746 			if (ret < 0)
8747 				return ret;
8748 			last_item = MLX5_FLOW_LAYER_ICMP;
8749 			break;
8750 		case RTE_FLOW_ITEM_TYPE_ICMP6:
8751 			ret = mlx5_flow_validate_item_icmp6(dev, item,
8752 							    *item_flags, 0xff,
8753 							    error);
8754 			if (ret < 0)
8755 				return ret;
8756 			last_item = MLX5_FLOW_LAYER_ICMP6;
8757 			break;
8758 		case RTE_FLOW_ITEM_TYPE_ICMP6_ECHO_REQUEST:
8759 		case RTE_FLOW_ITEM_TYPE_ICMP6_ECHO_REPLY:
8760 			ret = mlx5_flow_validate_item_icmp6_echo(dev, item,
8761 								 *item_flags,
8762 								 0xff, error);
8763 			if (ret < 0)
8764 				return ret;
8765 			last_item = MLX5_FLOW_LAYER_ICMP6;
8766 			break;
8767 		case RTE_FLOW_ITEM_TYPE_CONNTRACK:
8768 			if (*item_flags &
8769 			    (MLX5_FLOW_ITEM_QUOTA | MLX5_FLOW_LAYER_ASO_CT))
8770 				return rte_flow_error_set
8771 					(error, EINVAL,
8772 					 RTE_FLOW_ERROR_TYPE_ITEM, NULL, "Only one ASO item is supported");
8773 			ret = mlx5_flow_dv_validate_item_aso_ct(dev, item,
8774 								item_flags,
8775 								error);
8776 			if (ret < 0)
8777 				return ret;
8778 			break;
8779 		case RTE_FLOW_ITEM_TYPE_QUOTA:
8780 			if (*item_flags &
8781 			    (MLX5_FLOW_ITEM_METER_COLOR |
8782 			     MLX5_FLOW_LAYER_ASO_CT))
8783 				return rte_flow_error_set
8784 					(error, EINVAL,
8785 					 RTE_FLOW_ERROR_TYPE_ITEM, NULL, "Only one ASO item is supported");
8786 			last_item = MLX5_FLOW_ITEM_QUOTA;
8787 			break;
8788 		case RTE_FLOW_ITEM_TYPE_ESP:
8789 			ret = mlx5_flow_os_validate_item_esp(dev, item,
8790 							     *item_flags, 0xff,
8791 							     error);
8792 			if (ret < 0)
8793 				return ret;
8794 			last_item = MLX5_FLOW_ITEM_ESP;
8795 			break;
8796 		case RTE_FLOW_ITEM_TYPE_IPV6_ROUTING_EXT:
8797 			last_item = tunnel ?
8798 				    MLX5_FLOW_ITEM_INNER_IPV6_ROUTING_EXT :
8799 				    MLX5_FLOW_ITEM_OUTER_IPV6_ROUTING_EXT;
8800 			break;
8801 		case RTE_FLOW_ITEM_TYPE_FLEX:
8802 			/* match mlx5dr_definer_conv_items_to_hl() */
8803 			last_item = tunnel ?
8804 				    MLX5_FLOW_ITEM_INNER_FLEX :
8805 				    MLX5_FLOW_ITEM_OUTER_FLEX;
8806 			break;
8807 		case RTE_FLOW_ITEM_TYPE_RANDOM:
8808 			last_item = MLX5_FLOW_ITEM_RANDOM;
8809 			break;
8810 		case RTE_FLOW_ITEM_TYPE_NSH:
8811 			last_item = MLX5_FLOW_ITEM_NSH;
8812 			ret = mlx5_hw_validate_item_nsh(dev, item, error);
8813 			if (ret < 0)
8814 				return ret;
8815 			break;
8816 		case RTE_FLOW_ITEM_TYPE_INTEGRITY:
8817 			/*
8818 			 * Integrity flow item validation require access to
8819 			 * both item mask and spec.
8820 			 * Current HWS model allows item mask in pattern
8821 			 * template and item spec in flow rule.
8822 			 */
8823 			break;
8824 		case RTE_FLOW_ITEM_TYPE_IB_BTH:
8825 		case RTE_FLOW_ITEM_TYPE_VOID:
8826 		case RTE_FLOW_ITEM_TYPE_END:
8827 			break;
8828 		default:
8829 			return rte_flow_error_set(error, EINVAL,
8830 						  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8831 						  NULL,
8832 						  "Unsupported item type");
8833 		}
8834 		*item_flags |= last_item;
8835 	}
8836 	return 1 + RTE_PTR_DIFF(item, items) / sizeof(item[0]);
8837 }
8838 
8839 /*
8840  * Verify that the tested flow patterns fits STE size limit in HWS group.
8841  *
8842  *
8843  * Return values:
8844  * 0       : Tested patterns fit STE size limit
8845  * -EINVAL : Invalid parameters detected
8846  * -E2BIG  : Tested patterns exceed STE size limit
8847  */
8848 static int
8849 pattern_template_validate(struct rte_eth_dev *dev,
8850 			  struct rte_flow_pattern_template *pt[],
8851 			  uint32_t pt_num,
8852 			  struct rte_flow_error *error)
8853 {
8854 	struct mlx5_flow_template_table_cfg tbl_cfg = {
8855 		.attr = {
8856 			.nb_flows = 64,
8857 			.insertion_type = RTE_FLOW_TABLE_INSERTION_TYPE_PATTERN,
8858 			.hash_func = RTE_FLOW_TABLE_HASH_FUNC_DEFAULT,
8859 			.flow_attr = {
8860 				.group = 1,
8861 				.ingress = pt[0]->attr.ingress,
8862 				.egress = pt[0]->attr.egress,
8863 				.transfer = pt[0]->attr.transfer
8864 			}
8865 		}
8866 	};
8867 	struct mlx5_priv *priv = dev->data->dev_private;
8868 	struct rte_flow_actions_template *action_template;
8869 	struct rte_flow_template_table *tmpl_tbl;
8870 	int ret;
8871 
8872 	if (pt[0]->attr.ingress) {
8873 		action_template =
8874 			priv->action_template_drop[MLX5DR_TABLE_TYPE_NIC_RX];
8875 	} else if (pt[0]->attr.egress) {
8876 		action_template =
8877 			priv->action_template_drop[MLX5DR_TABLE_TYPE_NIC_TX];
8878 	} else if (pt[0]->attr.transfer) {
8879 		action_template =
8880 			priv->action_template_drop[MLX5DR_TABLE_TYPE_FDB];
8881 	} else {
8882 		ret = EINVAL;
8883 		goto end;
8884 	}
8885 
8886 	if (pt[0]->item_flags & MLX5_FLOW_ITEM_COMPARE)
8887 		tbl_cfg.attr.nb_flows = 1;
8888 	tmpl_tbl = flow_hw_table_create(dev, &tbl_cfg, pt, pt_num,
8889 					&action_template, 1, error);
8890 	if (tmpl_tbl) {
8891 		ret = 0;
8892 		flow_hw_table_destroy(dev, tmpl_tbl, error);
8893 	} else {
8894 		switch (rte_errno) {
8895 		case E2BIG:
8896 			ret = E2BIG;
8897 			break;
8898 		case ENOTSUP:
8899 			ret = EINVAL;
8900 			break;
8901 		default:
8902 			ret = 0;
8903 			break;
8904 		}
8905 	}
8906 end:
8907 	if (ret)
8908 		rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8909 				   NULL, "failed to validate pattern template");
8910 	return -ret;
8911 }
8912 
8913 /**
8914  * Create flow item template.
8915  *
8916  * @param[in] dev
8917  *   Pointer to the rte_eth_dev structure.
8918  * @param[in] attr
8919  *   Pointer to the item template attributes.
8920  * @param[in] items
8921  *   The template item pattern.
8922  * @param[out] error
8923  *   Pointer to error structure.
8924  *
8925  * @return
8926  *  Item template pointer on success, NULL otherwise and rte_errno is set.
8927  */
8928 static struct rte_flow_pattern_template *
8929 flow_hw_pattern_template_create(struct rte_eth_dev *dev,
8930 			     const struct rte_flow_pattern_template_attr *attr,
8931 			     const struct rte_flow_item items[],
8932 			     struct rte_flow_error *error)
8933 {
8934 	struct mlx5_priv *priv = dev->data->dev_private;
8935 	struct rte_flow_pattern_template *it;
8936 	struct rte_flow_item *copied_items = NULL;
8937 	const struct rte_flow_item *tmpl_items;
8938 	uint64_t orig_item_nb, item_flags = 0;
8939 	struct rte_flow_item port = {
8940 		.type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,
8941 		.mask = &rte_flow_item_ethdev_mask,
8942 	};
8943 	struct rte_flow_item_tag tag_v = {
8944 		.data = 0,
8945 		.index = REG_C_0,
8946 	};
8947 	struct rte_flow_item_tag tag_m = {
8948 		.data = flow_hw_tx_tag_regc_mask(dev),
8949 		.index = 0xff,
8950 	};
8951 	struct rte_flow_item tag = {
8952 		.type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_TAG,
8953 		.spec = &tag_v,
8954 		.mask = &tag_m,
8955 		.last = NULL
8956 	};
8957 	int it_items_size;
8958 	unsigned int i = 0;
8959 	int rc;
8960 
8961 	/* Validate application items only */
8962 	rc = flow_hw_pattern_validate(dev, attr, items, &item_flags, error);
8963 	if (rc < 0)
8964 		return NULL;
8965 	orig_item_nb = rc;
8966 	if (priv->sh->config.dv_esw_en &&
8967 	    priv->sh->config.repr_matching &&
8968 	    attr->ingress && !attr->egress && !attr->transfer) {
8969 		copied_items = flow_hw_prepend_item(items, orig_item_nb, &port, error);
8970 		if (!copied_items)
8971 			return NULL;
8972 		tmpl_items = copied_items;
8973 	} else if (priv->sh->config.dv_esw_en &&
8974 		   priv->sh->config.repr_matching &&
8975 		   !attr->ingress && attr->egress && !attr->transfer) {
8976 		if (item_flags & MLX5_FLOW_ITEM_SQ) {
8977 			DRV_LOG(DEBUG, "Port %u omitting implicit REG_C_0 match for egress "
8978 				       "pattern template", dev->data->port_id);
8979 			tmpl_items = items;
8980 			goto setup_pattern_template;
8981 		}
8982 		copied_items = flow_hw_prepend_item(items, orig_item_nb, &tag, error);
8983 		if (!copied_items)
8984 			return NULL;
8985 		tmpl_items = copied_items;
8986 	} else {
8987 		tmpl_items = items;
8988 	}
8989 setup_pattern_template:
8990 	it = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*it), 0, rte_socket_id());
8991 	if (!it) {
8992 		rte_flow_error_set(error, ENOMEM,
8993 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8994 				   NULL,
8995 				   "cannot allocate item template");
8996 		goto error;
8997 	}
8998 	it->attr = *attr;
8999 	it->item_flags = item_flags;
9000 	it->orig_item_nb = orig_item_nb;
9001 	it_items_size = rte_flow_conv(RTE_FLOW_CONV_OP_PATTERN, NULL, 0, tmpl_items, error);
9002 	if (it_items_size <= 0) {
9003 		rte_flow_error_set(error, ENOMEM,
9004 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9005 				   NULL,
9006 				   "Failed to determine buffer size for pattern");
9007 		goto error;
9008 	}
9009 	it_items_size = RTE_ALIGN(it_items_size, 16);
9010 	it->items = mlx5_malloc(MLX5_MEM_ZERO, it_items_size, 0, rte_dev_numa_node(dev->device));
9011 	if (it->items == NULL) {
9012 		rte_flow_error_set(error, ENOMEM,
9013 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9014 				   NULL,
9015 				   "Cannot allocate memory for pattern");
9016 		goto error;
9017 	}
9018 	rc = rte_flow_conv(RTE_FLOW_CONV_OP_PATTERN, it->items, it_items_size, tmpl_items, error);
9019 	if (rc <= 0) {
9020 		rte_flow_error_set(error, ENOMEM,
9021 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9022 				   NULL,
9023 				   "Failed to store pattern");
9024 		goto error;
9025 	}
9026 	it->mt = mlx5dr_match_template_create(tmpl_items, attr->relaxed_matching);
9027 	if (!it->mt) {
9028 		rte_flow_error_set(error, rte_errno,
9029 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9030 				   NULL,
9031 				   "cannot create match template");
9032 		goto error;
9033 	}
9034 	if (copied_items) {
9035 		if (attr->ingress)
9036 			it->implicit_port = true;
9037 		else if (attr->egress)
9038 			it->implicit_tag = true;
9039 		mlx5_free(copied_items);
9040 		copied_items = NULL;
9041 	}
9042 	/* Either inner or outer, can't both. */
9043 	if (it->item_flags & (MLX5_FLOW_ITEM_OUTER_IPV6_ROUTING_EXT |
9044 			      MLX5_FLOW_ITEM_INNER_IPV6_ROUTING_EXT)) {
9045 		if (((it->item_flags & MLX5_FLOW_ITEM_OUTER_IPV6_ROUTING_EXT) &&
9046 		     (it->item_flags & MLX5_FLOW_ITEM_INNER_IPV6_ROUTING_EXT)) ||
9047 		    (mlx5_alloc_srh_flex_parser(dev))) {
9048 			rte_flow_error_set(error, rte_errno,
9049 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9050 					   "cannot create IPv6 routing extension support");
9051 			goto error;
9052 		}
9053 	}
9054 	if (it->item_flags & MLX5_FLOW_ITEM_FLEX) {
9055 		for (i = 0; items[i].type != RTE_FLOW_ITEM_TYPE_END; i++) {
9056 			const struct rte_flow_item_flex *spec = items[i].spec;
9057 			struct rte_flow_item_flex_handle *handle;
9058 
9059 			if (items[i].type != RTE_FLOW_ITEM_TYPE_FLEX)
9060 				continue;
9061 			handle = spec->handle;
9062 			if (flow_hw_flex_item_acquire(dev, handle,
9063 						      &it->flex_item)) {
9064 				rte_flow_error_set(error, EINVAL,
9065 						   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9066 						   NULL, "cannot create hw FLEX item");
9067 				goto error;
9068 			}
9069 		}
9070 	}
9071 	if (it->item_flags & MLX5_FLOW_LAYER_GENEVE_OPT) {
9072 		for (i = 0; items[i].type != RTE_FLOW_ITEM_TYPE_END; i++) {
9073 			const struct rte_flow_item_geneve_opt *spec =
9074 				items[i].spec;
9075 
9076 			if (items[i].type != RTE_FLOW_ITEM_TYPE_GENEVE_OPT)
9077 				continue;
9078 			if (mlx5_geneve_tlv_option_register(priv, spec,
9079 							    &it->geneve_opt_mng)) {
9080 				rte_flow_error_set(error, EINVAL,
9081 						   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9082 						   NULL, "cannot register GENEVE TLV option");
9083 				goto error;
9084 			}
9085 		}
9086 	}
9087 	rte_atomic_fetch_add_explicit(&it->refcnt, 1, rte_memory_order_relaxed);
9088 	rc = pattern_template_validate(dev, &it, 1, error);
9089 	if (rc)
9090 		goto error;
9091 	LIST_INSERT_HEAD(&priv->flow_hw_itt, it, next);
9092 	return it;
9093 error:
9094 	if (it) {
9095 		if (it->flex_item)
9096 			flow_hw_flex_item_release(dev, &it->flex_item);
9097 		if (it->geneve_opt_mng.nb_options)
9098 			mlx5_geneve_tlv_options_unregister(priv, &it->geneve_opt_mng);
9099 		if (it->mt)
9100 			claim_zero(mlx5dr_match_template_destroy(it->mt));
9101 		mlx5_free(it->items);
9102 		mlx5_free(it);
9103 	}
9104 	if (copied_items)
9105 		mlx5_free(copied_items);
9106 	return NULL;
9107 }
9108 
9109 /**
9110  * Destroy flow item template.
9111  *
9112  * @param[in] dev
9113  *   Pointer to the rte_eth_dev structure.
9114  * @param[in] template
9115  *   Pointer to the item template to be destroyed.
9116  * @param[out] error
9117  *   Pointer to error structure.
9118  *
9119  * @return
9120  *   0 on success, a negative errno value otherwise and rte_errno is set.
9121  */
9122 static int
9123 flow_hw_pattern_template_destroy(struct rte_eth_dev *dev,
9124 			      struct rte_flow_pattern_template *template,
9125 			      struct rte_flow_error *error __rte_unused)
9126 {
9127 	struct mlx5_priv *priv = dev->data->dev_private;
9128 
9129 	if (rte_atomic_load_explicit(&template->refcnt, rte_memory_order_relaxed) > 1) {
9130 		DRV_LOG(WARNING, "Item template %p is still in use.",
9131 			(void *)template);
9132 		return rte_flow_error_set(error, EBUSY,
9133 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9134 				   NULL,
9135 				   "item template is in use");
9136 	}
9137 	if (template->item_flags & (MLX5_FLOW_ITEM_OUTER_IPV6_ROUTING_EXT |
9138 				    MLX5_FLOW_ITEM_INNER_IPV6_ROUTING_EXT))
9139 		mlx5_free_srh_flex_parser(dev);
9140 	LIST_REMOVE(template, next);
9141 	flow_hw_flex_item_release(dev, &template->flex_item);
9142 	mlx5_geneve_tlv_options_unregister(priv, &template->geneve_opt_mng);
9143 	claim_zero(mlx5dr_match_template_destroy(template->mt));
9144 	mlx5_free(template->items);
9145 	mlx5_free(template);
9146 	return 0;
9147 }
9148 
9149 /*
9150  * Get information about HWS pre-configurable resources.
9151  *
9152  * @param[in] dev
9153  *   Pointer to the rte_eth_dev structure.
9154  * @param[out] port_info
9155  *   Pointer to port information.
9156  * @param[out] queue_info
9157  *   Pointer to queue information.
9158  * @param[out] error
9159  *   Pointer to error structure.
9160  *
9161  * @return
9162  *   0 on success, a negative errno value otherwise and rte_errno is set.
9163  */
9164 static int
9165 flow_hw_info_get(struct rte_eth_dev *dev,
9166 		 struct rte_flow_port_info *port_info,
9167 		 struct rte_flow_queue_info *queue_info,
9168 		 struct rte_flow_error *error __rte_unused)
9169 {
9170 	struct mlx5_priv *priv = dev->data->dev_private;
9171 	uint16_t port_id = dev->data->port_id;
9172 	struct rte_mtr_capabilities mtr_cap;
9173 	int ret;
9174 
9175 	memset(port_info, 0, sizeof(*port_info));
9176 	/* Queue size is unlimited from low-level. */
9177 	port_info->max_nb_queues = UINT32_MAX;
9178 	queue_info->max_size = UINT32_MAX;
9179 
9180 	memset(&mtr_cap, 0, sizeof(struct rte_mtr_capabilities));
9181 	ret = rte_mtr_capabilities_get(port_id, &mtr_cap, NULL);
9182 	if (!ret)
9183 		port_info->max_nb_meters = mtr_cap.n_max;
9184 	port_info->max_nb_counters = priv->sh->hws_max_nb_counters;
9185 	port_info->max_nb_aging_objects = port_info->max_nb_counters;
9186 	return 0;
9187 }
9188 
9189 /**
9190  * Create group callback.
9191  *
9192  * @param[in] tool_ctx
9193  *   Pointer to the hash list related context.
9194  * @param[in] cb_ctx
9195  *   Pointer to the group creation context.
9196  *
9197  * @return
9198  *   Group entry on success, NULL otherwise and rte_errno is set.
9199  */
9200 struct mlx5_list_entry *
9201 flow_hw_grp_create_cb(void *tool_ctx, void *cb_ctx)
9202 {
9203 	struct mlx5_dev_ctx_shared *sh = tool_ctx;
9204 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
9205 	struct rte_eth_dev *dev = ctx->dev;
9206 	struct rte_flow_attr *attr = (struct rte_flow_attr *)ctx->data;
9207 	struct mlx5_priv *priv = dev->data->dev_private;
9208 	struct mlx5dr_table_attr dr_tbl_attr = {0};
9209 	struct rte_flow_error *error = ctx->error;
9210 	struct mlx5_flow_group *grp_data;
9211 	struct mlx5dr_table *tbl = NULL;
9212 	struct mlx5dr_action *jump;
9213 	uint32_t idx = 0;
9214 	MKSTR(matcher_name, "%s_%s_%u_%u_matcher_list",
9215 	      attr->transfer ? "FDB" : "NIC", attr->egress ? "egress" : "ingress",
9216 	      attr->group, idx);
9217 
9218 	grp_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_HW_GRP], &idx);
9219 	if (!grp_data) {
9220 		rte_flow_error_set(error, ENOMEM,
9221 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9222 				   NULL,
9223 				   "cannot allocate flow table data entry");
9224 		return NULL;
9225 	}
9226 	dr_tbl_attr.level = attr->group;
9227 	if (attr->transfer)
9228 		dr_tbl_attr.type = MLX5DR_TABLE_TYPE_FDB;
9229 	else if (attr->egress)
9230 		dr_tbl_attr.type = MLX5DR_TABLE_TYPE_NIC_TX;
9231 	else
9232 		dr_tbl_attr.type = MLX5DR_TABLE_TYPE_NIC_RX;
9233 	tbl = mlx5dr_table_create(priv->dr_ctx, &dr_tbl_attr);
9234 	if (!tbl)
9235 		goto error;
9236 	grp_data->tbl = tbl;
9237 	if (attr->group) {
9238 		/* Jump action be used by non-root table. */
9239 		jump = mlx5dr_action_create_dest_table
9240 			(priv->dr_ctx, tbl,
9241 			 mlx5_hw_act_flag[!!attr->group][dr_tbl_attr.type]);
9242 		if (!jump)
9243 			goto error;
9244 		grp_data->jump.hws_action = jump;
9245 		/* Jump action be used by root table.  */
9246 		jump = mlx5dr_action_create_dest_table
9247 			(priv->dr_ctx, tbl,
9248 			 mlx5_hw_act_flag[MLX5_HW_ACTION_FLAG_ROOT]
9249 					 [dr_tbl_attr.type]);
9250 		if (!jump)
9251 			goto error;
9252 		grp_data->jump.root_action = jump;
9253 	}
9254 
9255 	grp_data->matchers = mlx5_list_create(matcher_name, sh, true,
9256 					      flow_matcher_create_cb,
9257 					      flow_matcher_match_cb,
9258 					      flow_matcher_remove_cb,
9259 					      flow_matcher_clone_cb,
9260 					      flow_matcher_clone_free_cb);
9261 	grp_data->dev = dev;
9262 	grp_data->idx = idx;
9263 	grp_data->group_id = attr->group;
9264 	grp_data->type = dr_tbl_attr.type;
9265 	return &grp_data->entry;
9266 error:
9267 	if (grp_data->jump.root_action)
9268 		mlx5dr_action_destroy(grp_data->jump.root_action);
9269 	if (grp_data->jump.hws_action)
9270 		mlx5dr_action_destroy(grp_data->jump.hws_action);
9271 	if (tbl)
9272 		mlx5dr_table_destroy(tbl);
9273 	if (idx)
9274 		mlx5_ipool_free(sh->ipool[MLX5_IPOOL_HW_GRP], idx);
9275 	rte_flow_error_set(error, ENOMEM,
9276 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9277 			   NULL,
9278 			   "cannot allocate flow dr table");
9279 	return NULL;
9280 }
9281 
9282 /**
9283  * Remove group callback.
9284  *
9285  * @param[in] tool_ctx
9286  *   Pointer to the hash list related context.
9287  * @param[in] entry
9288  *   Pointer to the entry to be removed.
9289  */
9290 void
9291 flow_hw_grp_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
9292 {
9293 	struct mlx5_dev_ctx_shared *sh = tool_ctx;
9294 	struct mlx5_flow_group *grp_data =
9295 		    container_of(entry, struct mlx5_flow_group, entry);
9296 
9297 	MLX5_ASSERT(entry && sh);
9298 	/* To use the wrapper glue functions instead. */
9299 	if (grp_data->jump.hws_action)
9300 		mlx5dr_action_destroy(grp_data->jump.hws_action);
9301 	if (grp_data->jump.root_action)
9302 		mlx5dr_action_destroy(grp_data->jump.root_action);
9303 	mlx5_list_destroy(grp_data->matchers);
9304 	mlx5dr_table_destroy(grp_data->tbl);
9305 	mlx5_ipool_free(sh->ipool[MLX5_IPOOL_HW_GRP], grp_data->idx);
9306 }
9307 
9308 /**
9309  * Match group callback.
9310  *
9311  * @param[in] tool_ctx
9312  *   Pointer to the hash list related context.
9313  * @param[in] entry
9314  *   Pointer to the group to be matched.
9315  * @param[in] cb_ctx
9316  *   Pointer to the group matching context.
9317  *
9318  * @return
9319  *   0 on matched, 1 on miss matched.
9320  */
9321 int
9322 flow_hw_grp_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry,
9323 		     void *cb_ctx)
9324 {
9325 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
9326 	struct mlx5_flow_group *grp_data =
9327 		container_of(entry, struct mlx5_flow_group, entry);
9328 	struct rte_flow_attr *attr =
9329 			(struct rte_flow_attr *)ctx->data;
9330 
9331 	return (grp_data->dev != ctx->dev) ||
9332 		(grp_data->group_id != attr->group) ||
9333 		((grp_data->type != MLX5DR_TABLE_TYPE_FDB) &&
9334 		attr->transfer) ||
9335 		((grp_data->type != MLX5DR_TABLE_TYPE_NIC_TX) &&
9336 		attr->egress) ||
9337 		((grp_data->type != MLX5DR_TABLE_TYPE_NIC_RX) &&
9338 		attr->ingress);
9339 }
9340 
9341 /**
9342  * Clone group entry callback.
9343  *
9344  * @param[in] tool_ctx
9345  *   Pointer to the hash list related context.
9346  * @param[in] entry
9347  *   Pointer to the group to be matched.
9348  * @param[in] cb_ctx
9349  *   Pointer to the group matching context.
9350  *
9351  * @return
9352  *   0 on matched, 1 on miss matched.
9353  */
9354 struct mlx5_list_entry *
9355 flow_hw_grp_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
9356 		     void *cb_ctx)
9357 {
9358 	struct mlx5_dev_ctx_shared *sh = tool_ctx;
9359 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
9360 	struct mlx5_flow_group *grp_data;
9361 	struct rte_flow_error *error = ctx->error;
9362 	uint32_t idx = 0;
9363 
9364 	grp_data = mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_HW_GRP], &idx);
9365 	if (!grp_data) {
9366 		rte_flow_error_set(error, ENOMEM,
9367 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9368 				   NULL,
9369 				   "cannot allocate flow table data entry");
9370 		return NULL;
9371 	}
9372 	memcpy(grp_data, oentry, sizeof(*grp_data));
9373 	grp_data->idx = idx;
9374 	return &grp_data->entry;
9375 }
9376 
9377 /**
9378  * Free cloned group entry callback.
9379  *
9380  * @param[in] tool_ctx
9381  *   Pointer to the hash list related context.
9382  * @param[in] entry
9383  *   Pointer to the group to be freed.
9384  */
9385 void
9386 flow_hw_grp_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
9387 {
9388 	struct mlx5_dev_ctx_shared *sh = tool_ctx;
9389 	struct mlx5_flow_group *grp_data =
9390 		    container_of(entry, struct mlx5_flow_group, entry);
9391 
9392 	mlx5_ipool_free(sh->ipool[MLX5_IPOOL_HW_GRP], grp_data->idx);
9393 }
9394 
9395 /**
9396  * Create and cache a vport action for given @p dev port. vport actions
9397  * cache is used in HWS with FDB flows.
9398  *
9399  * This function does not create any function if proxy port for @p dev port
9400  * was not configured for HW Steering.
9401  *
9402  * This function assumes that E-Switch is enabled and PMD is running with
9403  * HW Steering configured.
9404  *
9405  * @param dev
9406  *   Pointer to Ethernet device which will be the action destination.
9407  *
9408  * @return
9409  *   0 on success, positive value otherwise.
9410  */
9411 int
9412 flow_hw_create_vport_action(struct rte_eth_dev *dev)
9413 {
9414 	struct mlx5_priv *priv = dev->data->dev_private;
9415 	struct rte_eth_dev *proxy_dev;
9416 	struct mlx5_priv *proxy_priv;
9417 	uint16_t port_id = dev->data->port_id;
9418 	uint16_t proxy_port_id = port_id;
9419 	int ret;
9420 
9421 	ret = mlx5_flow_pick_transfer_proxy(dev, &proxy_port_id, NULL);
9422 	if (ret)
9423 		return ret;
9424 	proxy_dev = &rte_eth_devices[proxy_port_id];
9425 	proxy_priv = proxy_dev->data->dev_private;
9426 	if (!proxy_priv->hw_vport)
9427 		return 0;
9428 	if (proxy_priv->hw_vport[port_id]) {
9429 		DRV_LOG(ERR, "port %u HWS vport action already created",
9430 			port_id);
9431 		return -EINVAL;
9432 	}
9433 	proxy_priv->hw_vport[port_id] = mlx5dr_action_create_dest_vport
9434 			(proxy_priv->dr_ctx, priv->dev_port,
9435 			 MLX5DR_ACTION_FLAG_HWS_FDB);
9436 	if (!proxy_priv->hw_vport[port_id]) {
9437 		DRV_LOG(ERR, "port %u unable to create HWS vport action",
9438 			port_id);
9439 		return -EINVAL;
9440 	}
9441 	return 0;
9442 }
9443 
9444 /**
9445  * Destroys the vport action associated with @p dev device
9446  * from actions' cache.
9447  *
9448  * This function does not destroy any action if there is no action cached
9449  * for @p dev or proxy port was not configured for HW Steering.
9450  *
9451  * This function assumes that E-Switch is enabled and PMD is running with
9452  * HW Steering configured.
9453  *
9454  * @param dev
9455  *   Pointer to Ethernet device which will be the action destination.
9456  */
9457 void
9458 flow_hw_destroy_vport_action(struct rte_eth_dev *dev)
9459 {
9460 	struct rte_eth_dev *proxy_dev;
9461 	struct mlx5_priv *proxy_priv;
9462 	uint16_t port_id = dev->data->port_id;
9463 	uint16_t proxy_port_id = port_id;
9464 
9465 	if (mlx5_flow_pick_transfer_proxy(dev, &proxy_port_id, NULL))
9466 		return;
9467 	proxy_dev = &rte_eth_devices[proxy_port_id];
9468 	proxy_priv = proxy_dev->data->dev_private;
9469 	if (!proxy_priv->hw_vport || !proxy_priv->hw_vport[port_id])
9470 		return;
9471 	mlx5dr_action_destroy(proxy_priv->hw_vport[port_id]);
9472 	proxy_priv->hw_vport[port_id] = NULL;
9473 }
9474 
9475 static int
9476 flow_hw_create_vport_actions(struct mlx5_priv *priv)
9477 {
9478 	uint16_t port_id;
9479 
9480 	MLX5_ASSERT(!priv->hw_vport);
9481 	priv->hw_vport = mlx5_malloc(MLX5_MEM_ZERO,
9482 				     sizeof(*priv->hw_vport) * RTE_MAX_ETHPORTS,
9483 				     0, SOCKET_ID_ANY);
9484 	if (!priv->hw_vport)
9485 		return -ENOMEM;
9486 	DRV_LOG(DEBUG, "port %u :: creating vport actions", priv->dev_data->port_id);
9487 	DRV_LOG(DEBUG, "port %u ::    domain_id=%u", priv->dev_data->port_id, priv->domain_id);
9488 	MLX5_ETH_FOREACH_DEV(port_id, NULL) {
9489 		struct mlx5_priv *port_priv = rte_eth_devices[port_id].data->dev_private;
9490 
9491 		if (!port_priv ||
9492 		    port_priv->domain_id != priv->domain_id)
9493 			continue;
9494 		DRV_LOG(DEBUG, "port %u :: for port_id=%u, calling mlx5dr_action_create_dest_vport() with ibport=%u",
9495 			priv->dev_data->port_id, port_id, port_priv->dev_port);
9496 		priv->hw_vport[port_id] = mlx5dr_action_create_dest_vport
9497 				(priv->dr_ctx, port_priv->dev_port,
9498 				 MLX5DR_ACTION_FLAG_HWS_FDB);
9499 		DRV_LOG(DEBUG, "port %u :: priv->hw_vport[%u]=%p",
9500 			priv->dev_data->port_id, port_id, (void *)priv->hw_vport[port_id]);
9501 		if (!priv->hw_vport[port_id])
9502 			return -EINVAL;
9503 	}
9504 	return 0;
9505 }
9506 
9507 static void
9508 flow_hw_free_vport_actions(struct mlx5_priv *priv)
9509 {
9510 	uint16_t port_id;
9511 
9512 	if (!priv->hw_vport)
9513 		return;
9514 	for (port_id = 0; port_id < RTE_MAX_ETHPORTS; ++port_id)
9515 		if (priv->hw_vport[port_id])
9516 			mlx5dr_action_destroy(priv->hw_vport[port_id]);
9517 	mlx5_free(priv->hw_vport);
9518 	priv->hw_vport = NULL;
9519 }
9520 
9521 static void
9522 flow_hw_create_send_to_kernel_actions(struct mlx5_priv *priv __rte_unused)
9523 {
9524 #ifdef HAVE_MLX5DV_DR_ACTION_CREATE_DEST_ROOT_TABLE
9525 	int action_flag;
9526 	int i;
9527 	bool is_vf_sf_dev = priv->sh->dev_cap.vf || priv->sh->dev_cap.sf;
9528 
9529 	for (i = MLX5DR_TABLE_TYPE_NIC_RX; i < MLX5DR_TABLE_TYPE_MAX; i++) {
9530 		if ((!priv->sh->config.dv_esw_en || is_vf_sf_dev) &&
9531 		     i == MLX5DR_TABLE_TYPE_FDB)
9532 			continue;
9533 		action_flag = mlx5_hw_act_flag[1][i];
9534 		priv->hw_send_to_kernel[i] =
9535 				mlx5dr_action_create_dest_root(priv->dr_ctx,
9536 							MLX5_HW_LOWEST_PRIO_ROOT,
9537 							action_flag);
9538 		if (!priv->hw_send_to_kernel[i]) {
9539 			DRV_LOG(WARNING, "Unable to create HWS send to kernel action");
9540 			return;
9541 		}
9542 	}
9543 #endif
9544 }
9545 
9546 static void
9547 flow_hw_destroy_send_to_kernel_action(struct mlx5_priv *priv)
9548 {
9549 	int i;
9550 	for (i = MLX5DR_TABLE_TYPE_NIC_RX; i < MLX5DR_TABLE_TYPE_MAX; i++) {
9551 		if (priv->hw_send_to_kernel[i]) {
9552 			mlx5dr_action_destroy(priv->hw_send_to_kernel[i]);
9553 			priv->hw_send_to_kernel[i] = NULL;
9554 		}
9555 	}
9556 }
9557 
9558 static void
9559 flow_hw_destroy_nat64_actions(struct mlx5_priv *priv)
9560 {
9561 	uint32_t i;
9562 
9563 	for (i = MLX5DR_TABLE_TYPE_NIC_RX; i < MLX5DR_TABLE_TYPE_MAX; i++) {
9564 		if (priv->action_nat64[i][RTE_FLOW_NAT64_6TO4]) {
9565 			(void)mlx5dr_action_destroy(priv->action_nat64[i][RTE_FLOW_NAT64_6TO4]);
9566 			priv->action_nat64[i][RTE_FLOW_NAT64_6TO4] = NULL;
9567 		}
9568 		if (priv->action_nat64[i][RTE_FLOW_NAT64_4TO6]) {
9569 			(void)mlx5dr_action_destroy(priv->action_nat64[i][RTE_FLOW_NAT64_4TO6]);
9570 			priv->action_nat64[i][RTE_FLOW_NAT64_4TO6] = NULL;
9571 		}
9572 	}
9573 }
9574 
9575 static int
9576 flow_hw_create_nat64_actions(struct mlx5_priv *priv, struct rte_flow_error *error)
9577 {
9578 	struct mlx5dr_action_nat64_attr attr;
9579 	uint8_t regs[MLX5_FLOW_NAT64_REGS_MAX];
9580 	uint32_t i;
9581 	const uint32_t flags[MLX5DR_TABLE_TYPE_MAX] = {
9582 		MLX5DR_ACTION_FLAG_HWS_RX | MLX5DR_ACTION_FLAG_SHARED,
9583 		MLX5DR_ACTION_FLAG_HWS_TX | MLX5DR_ACTION_FLAG_SHARED,
9584 		MLX5DR_ACTION_FLAG_HWS_FDB | MLX5DR_ACTION_FLAG_SHARED,
9585 	};
9586 	struct mlx5dr_action *act;
9587 
9588 	attr.registers = regs;
9589 	/* Try to use 3 registers by default. */
9590 	attr.num_of_registers = MLX5_FLOW_NAT64_REGS_MAX;
9591 	for (i = 0; i < MLX5_FLOW_NAT64_REGS_MAX; i++) {
9592 		MLX5_ASSERT(priv->sh->registers.nat64_regs[i] != REG_NON);
9593 		regs[i] = mlx5_convert_reg_to_field(priv->sh->registers.nat64_regs[i]);
9594 	}
9595 	for (i = MLX5DR_TABLE_TYPE_NIC_RX; i < MLX5DR_TABLE_TYPE_MAX; i++) {
9596 		if (i == MLX5DR_TABLE_TYPE_FDB && !priv->sh->config.dv_esw_en)
9597 			continue;
9598 		attr.flags = (enum mlx5dr_action_nat64_flags)
9599 			     (MLX5DR_ACTION_NAT64_V6_TO_V4 | MLX5DR_ACTION_NAT64_BACKUP_ADDR);
9600 		act = mlx5dr_action_create_nat64(priv->dr_ctx, &attr, flags[i]);
9601 		if (!act)
9602 			return rte_flow_error_set(error, rte_errno,
9603 						  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9604 						  "Failed to create v6 to v4 action.");
9605 		priv->action_nat64[i][RTE_FLOW_NAT64_6TO4] = act;
9606 		attr.flags = (enum mlx5dr_action_nat64_flags)
9607 			     (MLX5DR_ACTION_NAT64_V4_TO_V6 | MLX5DR_ACTION_NAT64_BACKUP_ADDR);
9608 		act = mlx5dr_action_create_nat64(priv->dr_ctx, &attr, flags[i]);
9609 		if (!act)
9610 			return rte_flow_error_set(error, rte_errno,
9611 						  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9612 						  "Failed to create v4 to v6 action.");
9613 		priv->action_nat64[i][RTE_FLOW_NAT64_4TO6] = act;
9614 	}
9615 	return 0;
9616 }
9617 
9618 /**
9619  * Create an egress pattern template matching on source SQ.
9620  *
9621  * @param dev
9622  *   Pointer to Ethernet device.
9623  * @param[out] error
9624  *   Pointer to error structure.
9625  *
9626  * @return
9627  *   Pointer to pattern template on success. NULL otherwise, and rte_errno is set.
9628  */
9629 static struct rte_flow_pattern_template *
9630 flow_hw_create_tx_repr_sq_pattern_tmpl(struct rte_eth_dev *dev, struct rte_flow_error *error)
9631 {
9632 	struct rte_flow_pattern_template_attr attr = {
9633 		.relaxed_matching = 0,
9634 		.egress = 1,
9635 	};
9636 	struct mlx5_rte_flow_item_sq sq_mask = {
9637 		.queue = UINT32_MAX,
9638 	};
9639 	struct rte_flow_item items[] = {
9640 		{
9641 			.type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_SQ,
9642 			.mask = &sq_mask,
9643 		},
9644 		{
9645 			.type = RTE_FLOW_ITEM_TYPE_END,
9646 		},
9647 	};
9648 
9649 	return flow_hw_pattern_template_create(dev, &attr, items, error);
9650 }
9651 
9652 static __rte_always_inline uint32_t
9653 flow_hw_tx_tag_regc_mask(struct rte_eth_dev *dev)
9654 {
9655 	struct mlx5_priv *priv = dev->data->dev_private;
9656 	uint32_t mask = priv->sh->dv_regc0_mask;
9657 
9658 	/* Mask is verified during device initialization. Sanity checking here. */
9659 	MLX5_ASSERT(mask != 0);
9660 	/*
9661 	 * Availability of sufficient number of bits in REG_C_0 is verified on initialization.
9662 	 * Sanity checking here.
9663 	 */
9664 	MLX5_ASSERT(rte_popcount32(mask) >= rte_popcount32(priv->vport_meta_mask));
9665 	return mask;
9666 }
9667 
9668 static __rte_always_inline uint32_t
9669 flow_hw_tx_tag_regc_value(struct rte_eth_dev *dev)
9670 {
9671 	struct mlx5_priv *priv = dev->data->dev_private;
9672 	uint32_t tag;
9673 
9674 	/* Mask is verified during device initialization. Sanity checking here. */
9675 	MLX5_ASSERT(priv->vport_meta_mask != 0);
9676 	tag = priv->vport_meta_tag >> (rte_bsf32(priv->vport_meta_mask));
9677 	/*
9678 	 * Availability of sufficient number of bits in REG_C_0 is verified on initialization.
9679 	 * Sanity checking here.
9680 	 */
9681 	MLX5_ASSERT((tag & priv->sh->dv_regc0_mask) == tag);
9682 	return tag;
9683 }
9684 
9685 static void
9686 flow_hw_update_action_mask(struct rte_flow_action *action,
9687 			   struct rte_flow_action *mask,
9688 			   enum rte_flow_action_type type,
9689 			   void *conf_v,
9690 			   void *conf_m)
9691 {
9692 	action->type = type;
9693 	action->conf = conf_v;
9694 	mask->type = type;
9695 	mask->conf = conf_m;
9696 }
9697 
9698 /**
9699  * Create an egress actions template with MODIFY_FIELD action for setting unused REG_C_0 bits
9700  * to vport tag and JUMP action to group 1.
9701  *
9702  * If extended metadata mode is enabled, then MODIFY_FIELD action for copying software metadata
9703  * to REG_C_1 is added as well.
9704  *
9705  * @param dev
9706  *   Pointer to Ethernet device.
9707  * @param[out] error
9708  *   Pointer to error structure.
9709  *
9710  * @return
9711  *   Pointer to actions template on success. NULL otherwise, and rte_errno is set.
9712  */
9713 static struct rte_flow_actions_template *
9714 flow_hw_create_tx_repr_tag_jump_acts_tmpl(struct rte_eth_dev *dev,
9715 					  struct rte_flow_error *error)
9716 {
9717 	uint32_t tag_mask = flow_hw_tx_tag_regc_mask(dev);
9718 	uint32_t tag_value = flow_hw_tx_tag_regc_value(dev);
9719 	struct rte_flow_actions_template_attr attr = {
9720 		.egress = 1,
9721 	};
9722 	struct rte_flow_action_modify_field set_tag_v = {
9723 		.operation = RTE_FLOW_MODIFY_SET,
9724 		.dst = {
9725 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
9726 			.tag_index = REG_C_0,
9727 			.offset = rte_bsf32(tag_mask),
9728 		},
9729 		.src = {
9730 			.field = RTE_FLOW_FIELD_VALUE,
9731 		},
9732 		.width = rte_popcount32(tag_mask),
9733 	};
9734 	struct rte_flow_action_modify_field set_tag_m = {
9735 		.operation = RTE_FLOW_MODIFY_SET,
9736 		.dst = {
9737 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
9738 			.level = UINT8_MAX,
9739 			.tag_index = UINT8_MAX,
9740 			.offset = UINT32_MAX,
9741 		},
9742 		.src = {
9743 			.field = RTE_FLOW_FIELD_VALUE,
9744 		},
9745 		.width = UINT32_MAX,
9746 	};
9747 	struct rte_flow_action_modify_field copy_metadata_v = {
9748 		.operation = RTE_FLOW_MODIFY_SET,
9749 		.dst = {
9750 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
9751 			.tag_index = REG_C_1,
9752 		},
9753 		.src = {
9754 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
9755 			.tag_index = REG_A,
9756 		},
9757 		.width = 32,
9758 	};
9759 	struct rte_flow_action_modify_field copy_metadata_m = {
9760 		.operation = RTE_FLOW_MODIFY_SET,
9761 		.dst = {
9762 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
9763 			.level = UINT8_MAX,
9764 			.tag_index = UINT8_MAX,
9765 			.offset = UINT32_MAX,
9766 		},
9767 		.src = {
9768 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
9769 			.level = UINT8_MAX,
9770 			.tag_index = UINT8_MAX,
9771 			.offset = UINT32_MAX,
9772 		},
9773 		.width = UINT32_MAX,
9774 	};
9775 	struct rte_flow_action_jump jump_v = {
9776 		.group = MLX5_HW_LOWEST_USABLE_GROUP,
9777 	};
9778 	struct rte_flow_action_jump jump_m = {
9779 		.group = UINT32_MAX,
9780 	};
9781 	struct rte_flow_action actions_v[4] = { { 0 } };
9782 	struct rte_flow_action actions_m[4] = { { 0 } };
9783 	unsigned int idx = 0;
9784 
9785 	rte_memcpy(set_tag_v.src.value, &tag_value, sizeof(tag_value));
9786 	rte_memcpy(set_tag_m.src.value, &tag_mask, sizeof(tag_mask));
9787 	flow_hw_update_action_mask(&actions_v[idx], &actions_m[idx],
9788 				   RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
9789 				   &set_tag_v, &set_tag_m);
9790 	idx++;
9791 	if (MLX5_SH(dev)->config.dv_xmeta_en == MLX5_XMETA_MODE_META32_HWS) {
9792 		flow_hw_update_action_mask(&actions_v[idx], &actions_m[idx],
9793 					   RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
9794 					   &copy_metadata_v, &copy_metadata_m);
9795 		idx++;
9796 	}
9797 	flow_hw_update_action_mask(&actions_v[idx], &actions_m[idx], RTE_FLOW_ACTION_TYPE_JUMP,
9798 				   &jump_v, &jump_m);
9799 	idx++;
9800 	flow_hw_update_action_mask(&actions_v[idx], &actions_m[idx], RTE_FLOW_ACTION_TYPE_END,
9801 				   NULL, NULL);
9802 	idx++;
9803 	MLX5_ASSERT(idx <= RTE_DIM(actions_v));
9804 	return flow_hw_actions_template_create(dev, &attr, actions_v, actions_m, error);
9805 }
9806 
9807 static void
9808 flow_hw_cleanup_tx_repr_tagging(struct rte_eth_dev *dev)
9809 {
9810 	struct mlx5_priv *priv = dev->data->dev_private;
9811 
9812 	if (priv->hw_tx_repr_tagging_tbl) {
9813 		flow_hw_table_destroy(dev, priv->hw_tx_repr_tagging_tbl, NULL);
9814 		priv->hw_tx_repr_tagging_tbl = NULL;
9815 	}
9816 	if (priv->hw_tx_repr_tagging_at) {
9817 		flow_hw_actions_template_destroy(dev, priv->hw_tx_repr_tagging_at, NULL);
9818 		priv->hw_tx_repr_tagging_at = NULL;
9819 	}
9820 	if (priv->hw_tx_repr_tagging_pt) {
9821 		flow_hw_pattern_template_destroy(dev, priv->hw_tx_repr_tagging_pt, NULL);
9822 		priv->hw_tx_repr_tagging_pt = NULL;
9823 	}
9824 }
9825 
9826 /**
9827  * Setup templates and table used to create default Tx flow rules. These default rules
9828  * allow for matching Tx representor traffic using a vport tag placed in unused bits of
9829  * REG_C_0 register.
9830  *
9831  * @param dev
9832  *   Pointer to Ethernet device.
9833  * @param[out] error
9834  *   Pointer to error structure.
9835  *
9836  * @return
9837  *   0 on success, negative errno value otherwise.
9838  */
9839 static int
9840 flow_hw_setup_tx_repr_tagging(struct rte_eth_dev *dev, struct rte_flow_error *error)
9841 {
9842 	struct mlx5_priv *priv = dev->data->dev_private;
9843 	struct rte_flow_template_table_attr attr = {
9844 		.flow_attr = {
9845 			.group = 0,
9846 			.priority = MLX5_HW_LOWEST_PRIO_ROOT,
9847 			.egress = 1,
9848 		},
9849 		.nb_flows = MLX5_HW_CTRL_FLOW_NB_RULES,
9850 	};
9851 	struct mlx5_flow_template_table_cfg cfg = {
9852 		.attr = attr,
9853 		.external = false,
9854 	};
9855 
9856 	MLX5_ASSERT(priv->sh->config.dv_esw_en);
9857 	MLX5_ASSERT(priv->sh->config.repr_matching);
9858 	priv->hw_tx_repr_tagging_pt =
9859 		flow_hw_create_tx_repr_sq_pattern_tmpl(dev, error);
9860 	if (!priv->hw_tx_repr_tagging_pt)
9861 		goto err;
9862 	priv->hw_tx_repr_tagging_at =
9863 		flow_hw_create_tx_repr_tag_jump_acts_tmpl(dev, error);
9864 	if (!priv->hw_tx_repr_tagging_at)
9865 		goto err;
9866 	priv->hw_tx_repr_tagging_tbl = flow_hw_table_create(dev, &cfg,
9867 							    &priv->hw_tx_repr_tagging_pt, 1,
9868 							    &priv->hw_tx_repr_tagging_at, 1,
9869 							    error);
9870 	if (!priv->hw_tx_repr_tagging_tbl)
9871 		goto err;
9872 	return 0;
9873 err:
9874 	flow_hw_cleanup_tx_repr_tagging(dev);
9875 	return -rte_errno;
9876 }
9877 
9878 static uint32_t
9879 flow_hw_esw_mgr_regc_marker_mask(struct rte_eth_dev *dev)
9880 {
9881 	uint32_t mask = MLX5_SH(dev)->dv_regc0_mask;
9882 
9883 	/* Mask is verified during device initialization. */
9884 	MLX5_ASSERT(mask != 0);
9885 	return mask;
9886 }
9887 
9888 static uint32_t
9889 flow_hw_esw_mgr_regc_marker(struct rte_eth_dev *dev)
9890 {
9891 	uint32_t mask = MLX5_SH(dev)->dv_regc0_mask;
9892 
9893 	/* Mask is verified during device initialization. */
9894 	MLX5_ASSERT(mask != 0);
9895 	return RTE_BIT32(rte_bsf32(mask));
9896 }
9897 
9898 /**
9899  * Creates a flow pattern template used to match on E-Switch Manager.
9900  * This template is used to set up a table for SQ miss default flow.
9901  *
9902  * @param dev
9903  *   Pointer to Ethernet device.
9904  * @param error
9905  *   Pointer to error structure.
9906  *
9907  * @return
9908  *   Pointer to flow pattern template on success, NULL otherwise.
9909  */
9910 static struct rte_flow_pattern_template *
9911 flow_hw_create_ctrl_esw_mgr_pattern_template(struct rte_eth_dev *dev,
9912 					     struct rte_flow_error *error)
9913 {
9914 	struct rte_flow_pattern_template_attr attr = {
9915 		.relaxed_matching = 0,
9916 		.transfer = 1,
9917 	};
9918 	struct rte_flow_item_ethdev port_spec = {
9919 		.port_id = MLX5_REPRESENTED_PORT_ESW_MGR,
9920 	};
9921 	struct rte_flow_item_ethdev port_mask = {
9922 		.port_id = UINT16_MAX,
9923 	};
9924 	struct mlx5_rte_flow_item_sq sq_mask = {
9925 		.queue = UINT32_MAX,
9926 	};
9927 	struct rte_flow_item items[] = {
9928 		{
9929 			.type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,
9930 			.spec = &port_spec,
9931 			.mask = &port_mask,
9932 		},
9933 		{
9934 			.type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_SQ,
9935 			.mask = &sq_mask,
9936 		},
9937 		{
9938 			.type = RTE_FLOW_ITEM_TYPE_END,
9939 		},
9940 	};
9941 
9942 	return flow_hw_pattern_template_create(dev, &attr, items, error);
9943 }
9944 
9945 /**
9946  * Creates a flow pattern template used to match REG_C_0 and a SQ.
9947  * Matching on REG_C_0 is set up to match on all bits usable by user-space.
9948  * If traffic was sent from E-Switch Manager, then all usable bits will be set to 0,
9949  * except the least significant bit, which will be set to 1.
9950  *
9951  * This template is used to set up a table for SQ miss default flow.
9952  *
9953  * @param dev
9954  *   Pointer to Ethernet device.
9955  * @param error
9956  *   Pointer to error structure.
9957  *
9958  * @return
9959  *   Pointer to flow pattern template on success, NULL otherwise.
9960  */
9961 static struct rte_flow_pattern_template *
9962 flow_hw_create_ctrl_regc_sq_pattern_template(struct rte_eth_dev *dev,
9963 					     struct rte_flow_error *error)
9964 {
9965 	struct rte_flow_pattern_template_attr attr = {
9966 		.relaxed_matching = 0,
9967 		.transfer = 1,
9968 	};
9969 	struct rte_flow_item_tag reg_c0_spec = {
9970 		.index = (uint8_t)REG_C_0,
9971 	};
9972 	struct rte_flow_item_tag reg_c0_mask = {
9973 		.index = 0xff,
9974 		.data = flow_hw_esw_mgr_regc_marker_mask(dev),
9975 	};
9976 	struct mlx5_rte_flow_item_sq queue_mask = {
9977 		.queue = UINT32_MAX,
9978 	};
9979 	struct rte_flow_item items[] = {
9980 		{
9981 			.type = (enum rte_flow_item_type)
9982 				MLX5_RTE_FLOW_ITEM_TYPE_TAG,
9983 			.spec = &reg_c0_spec,
9984 			.mask = &reg_c0_mask,
9985 		},
9986 		{
9987 			.type = (enum rte_flow_item_type)
9988 				MLX5_RTE_FLOW_ITEM_TYPE_SQ,
9989 			.mask = &queue_mask,
9990 		},
9991 		{
9992 			.type = RTE_FLOW_ITEM_TYPE_END,
9993 		},
9994 	};
9995 
9996 	return flow_hw_pattern_template_create(dev, &attr, items, error);
9997 }
9998 
9999 /**
10000  * Creates a flow pattern template with unmasked represented port matching.
10001  * This template is used to set up a table for default transfer flows
10002  * directing packets to group 1.
10003  *
10004  * @param dev
10005  *   Pointer to Ethernet device.
10006  * @param error
10007  *   Pointer to error structure.
10008  *
10009  * @return
10010  *   Pointer to flow pattern template on success, NULL otherwise.
10011  */
10012 static struct rte_flow_pattern_template *
10013 flow_hw_create_ctrl_port_pattern_template(struct rte_eth_dev *dev,
10014 					  struct rte_flow_error *error)
10015 {
10016 	struct rte_flow_pattern_template_attr attr = {
10017 		.relaxed_matching = 0,
10018 		.transfer = 1,
10019 	};
10020 	struct rte_flow_item_ethdev port_mask = {
10021 		.port_id = UINT16_MAX,
10022 	};
10023 	struct rte_flow_item items[] = {
10024 		{
10025 			.type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,
10026 			.mask = &port_mask,
10027 		},
10028 		{
10029 			.type = RTE_FLOW_ITEM_TYPE_END,
10030 		},
10031 	};
10032 
10033 	return flow_hw_pattern_template_create(dev, &attr, items, error);
10034 }
10035 
10036 /*
10037  * Creating a flow pattern template with all ETH packets matching.
10038  * This template is used to set up a table for default Tx copy (Tx metadata
10039  * to REG_C_1) flow rule usage.
10040  *
10041  * @param dev
10042  *   Pointer to Ethernet device.
10043  * @param error
10044  *   Pointer to error structure.
10045  *
10046  * @return
10047  *   Pointer to flow pattern template on success, NULL otherwise.
10048  */
10049 static struct rte_flow_pattern_template *
10050 flow_hw_create_tx_default_mreg_copy_pattern_template(struct rte_eth_dev *dev,
10051 						     struct rte_flow_error *error)
10052 {
10053 	struct rte_flow_pattern_template_attr tx_pa_attr = {
10054 		.relaxed_matching = 0,
10055 		.egress = 1,
10056 	};
10057 	struct rte_flow_item_eth promisc = {
10058 		.hdr.dst_addr.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
10059 		.hdr.src_addr.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
10060 		.hdr.ether_type = 0,
10061 	};
10062 	struct rte_flow_item eth_all[] = {
10063 		[0] = {
10064 			.type = RTE_FLOW_ITEM_TYPE_ETH,
10065 			.spec = &promisc,
10066 			.mask = &promisc,
10067 		},
10068 		[1] = {
10069 			.type = RTE_FLOW_ITEM_TYPE_END,
10070 		},
10071 	};
10072 
10073 	return flow_hw_pattern_template_create(dev, &tx_pa_attr, eth_all, error);
10074 }
10075 
10076 /*
10077  * Creating a flow pattern template with all LACP packets matching, only for NIC
10078  * ingress domain.
10079  *
10080  * @param dev
10081  *   Pointer to Ethernet device.
10082  * @param error
10083  *   Pointer to error structure.
10084  *
10085  * @return
10086  *   Pointer to flow pattern template on success, NULL otherwise.
10087  */
10088 static struct rte_flow_pattern_template *
10089 flow_hw_create_lacp_rx_pattern_template(struct rte_eth_dev *dev, struct rte_flow_error *error)
10090 {
10091 	struct rte_flow_pattern_template_attr pa_attr = {
10092 		.relaxed_matching = 0,
10093 		.ingress = 1,
10094 	};
10095 	struct rte_flow_item_eth lacp_mask = {
10096 		.dst.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
10097 		.src.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
10098 		.type = 0xFFFF,
10099 	};
10100 	struct rte_flow_item eth_all[] = {
10101 		[0] = {
10102 			.type = RTE_FLOW_ITEM_TYPE_ETH,
10103 			.mask = &lacp_mask,
10104 		},
10105 		[1] = {
10106 			.type = RTE_FLOW_ITEM_TYPE_END,
10107 		},
10108 	};
10109 	return flow_hw_pattern_template_create(dev, &pa_attr, eth_all, error);
10110 }
10111 
10112 /**
10113  * Creates a flow actions template with modify field action and masked jump action.
10114  * Modify field action sets the least significant bit of REG_C_0 (usable by user-space)
10115  * to 1, meaning that packet was originated from E-Switch Manager. Jump action
10116  * transfers steering to group 1.
10117  *
10118  * @param dev
10119  *   Pointer to Ethernet device.
10120  * @param error
10121  *   Pointer to error structure.
10122  *
10123  * @return
10124  *   Pointer to flow actions template on success, NULL otherwise.
10125  */
10126 static struct rte_flow_actions_template *
10127 flow_hw_create_ctrl_regc_jump_actions_template(struct rte_eth_dev *dev,
10128 					       struct rte_flow_error *error)
10129 {
10130 	uint32_t marker_mask = flow_hw_esw_mgr_regc_marker_mask(dev);
10131 	uint32_t marker_bits = flow_hw_esw_mgr_regc_marker(dev);
10132 	struct rte_flow_actions_template_attr attr = {
10133 		.transfer = 1,
10134 	};
10135 	struct rte_flow_action_modify_field set_reg_v = {
10136 		.operation = RTE_FLOW_MODIFY_SET,
10137 		.dst = {
10138 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
10139 			.tag_index = REG_C_0,
10140 		},
10141 		.src = {
10142 			.field = RTE_FLOW_FIELD_VALUE,
10143 		},
10144 		.width = rte_popcount32(marker_mask),
10145 	};
10146 	struct rte_flow_action_modify_field set_reg_m = {
10147 		.operation = RTE_FLOW_MODIFY_SET,
10148 		.dst = {
10149 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
10150 			.level = UINT8_MAX,
10151 			.tag_index = UINT8_MAX,
10152 			.offset = UINT32_MAX,
10153 		},
10154 		.src = {
10155 			.field = RTE_FLOW_FIELD_VALUE,
10156 		},
10157 		.width = UINT32_MAX,
10158 	};
10159 	struct rte_flow_action_jump jump_v = {
10160 		.group = MLX5_HW_LOWEST_USABLE_GROUP,
10161 	};
10162 	struct rte_flow_action_jump jump_m = {
10163 		.group = UINT32_MAX,
10164 	};
10165 	struct rte_flow_action actions_v[] = {
10166 		{
10167 			.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
10168 			.conf = &set_reg_v,
10169 		},
10170 		{
10171 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
10172 			.conf = &jump_v,
10173 		},
10174 		{
10175 			.type = RTE_FLOW_ACTION_TYPE_END,
10176 		}
10177 	};
10178 	struct rte_flow_action actions_m[] = {
10179 		{
10180 			.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
10181 			.conf = &set_reg_m,
10182 		},
10183 		{
10184 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
10185 			.conf = &jump_m,
10186 		},
10187 		{
10188 			.type = RTE_FLOW_ACTION_TYPE_END,
10189 		}
10190 	};
10191 
10192 	set_reg_v.dst.offset = rte_bsf32(marker_mask);
10193 	rte_memcpy(set_reg_v.src.value, &marker_bits, sizeof(marker_bits));
10194 	rte_memcpy(set_reg_m.src.value, &marker_mask, sizeof(marker_mask));
10195 	return flow_hw_actions_template_create(dev, &attr, actions_v, actions_m, error);
10196 }
10197 
10198 /**
10199  * Creates a flow actions template with an unmasked JUMP action. Flows
10200  * based on this template will perform a jump to some group. This template
10201  * is used to set up tables for control flows.
10202  *
10203  * @param dev
10204  *   Pointer to Ethernet device.
10205  * @param group
10206  *   Destination group for this action template.
10207  * @param error
10208  *   Pointer to error structure.
10209  *
10210  * @return
10211  *   Pointer to flow actions template on success, NULL otherwise.
10212  */
10213 static struct rte_flow_actions_template *
10214 flow_hw_create_ctrl_jump_actions_template(struct rte_eth_dev *dev,
10215 					  uint32_t group,
10216 					  struct rte_flow_error *error)
10217 {
10218 	struct rte_flow_actions_template_attr attr = {
10219 		.transfer = 1,
10220 	};
10221 	struct rte_flow_action_jump jump_v = {
10222 		.group = group,
10223 	};
10224 	struct rte_flow_action_jump jump_m = {
10225 		.group = UINT32_MAX,
10226 	};
10227 	struct rte_flow_action actions_v[] = {
10228 		{
10229 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
10230 			.conf = &jump_v,
10231 		},
10232 		{
10233 			.type = RTE_FLOW_ACTION_TYPE_END,
10234 		}
10235 	};
10236 	struct rte_flow_action actions_m[] = {
10237 		{
10238 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
10239 			.conf = &jump_m,
10240 		},
10241 		{
10242 			.type = RTE_FLOW_ACTION_TYPE_END,
10243 		}
10244 	};
10245 
10246 	return flow_hw_actions_template_create(dev, &attr, actions_v,
10247 					       actions_m, error);
10248 }
10249 
10250 /**
10251  * Creates a flow action template with a unmasked REPRESENTED_PORT action.
10252  * It is used to create control flow tables.
10253  *
10254  * @param dev
10255  *   Pointer to Ethernet device.
10256  * @param error
10257  *   Pointer to error structure.
10258  *
10259  * @return
10260  *   Pointer to flow action template on success, NULL otherwise.
10261  */
10262 static struct rte_flow_actions_template *
10263 flow_hw_create_ctrl_port_actions_template(struct rte_eth_dev *dev,
10264 					  struct rte_flow_error *error)
10265 {
10266 	struct rte_flow_actions_template_attr attr = {
10267 		.transfer = 1,
10268 	};
10269 	struct rte_flow_action_ethdev port_v = {
10270 		.port_id = 0,
10271 	};
10272 	struct rte_flow_action actions_v[] = {
10273 		{
10274 			.type = RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT,
10275 			.conf = &port_v,
10276 		},
10277 		{
10278 			.type = RTE_FLOW_ACTION_TYPE_END,
10279 		}
10280 	};
10281 	struct rte_flow_action_ethdev port_m = {
10282 		.port_id = 0,
10283 	};
10284 	struct rte_flow_action actions_m[] = {
10285 		{
10286 			.type = RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT,
10287 			.conf = &port_m,
10288 		},
10289 		{
10290 			.type = RTE_FLOW_ACTION_TYPE_END,
10291 		}
10292 	};
10293 
10294 	return flow_hw_actions_template_create(dev, &attr, actions_v, actions_m, error);
10295 }
10296 
10297 /*
10298  * Creating an actions template to use header modify action for register
10299  * copying. This template is used to set up a table for copy flow.
10300  *
10301  * @param dev
10302  *   Pointer to Ethernet device.
10303  * @param error
10304  *   Pointer to error structure.
10305  *
10306  * @return
10307  *   Pointer to flow actions template on success, NULL otherwise.
10308  */
10309 static struct rte_flow_actions_template *
10310 flow_hw_create_tx_default_mreg_copy_actions_template(struct rte_eth_dev *dev,
10311 						     struct rte_flow_error *error)
10312 {
10313 	struct rte_flow_actions_template_attr tx_act_attr = {
10314 		.egress = 1,
10315 	};
10316 	const struct rte_flow_action_modify_field mreg_action = {
10317 		.operation = RTE_FLOW_MODIFY_SET,
10318 		.dst = {
10319 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
10320 			.tag_index = REG_C_1,
10321 		},
10322 		.src = {
10323 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
10324 			.tag_index = REG_A,
10325 		},
10326 		.width = 32,
10327 	};
10328 	const struct rte_flow_action_modify_field mreg_mask = {
10329 		.operation = RTE_FLOW_MODIFY_SET,
10330 		.dst = {
10331 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
10332 			.level = UINT8_MAX,
10333 			.tag_index = UINT8_MAX,
10334 			.offset = UINT32_MAX,
10335 		},
10336 		.src = {
10337 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
10338 			.level = UINT8_MAX,
10339 			.tag_index = UINT8_MAX,
10340 			.offset = UINT32_MAX,
10341 		},
10342 		.width = UINT32_MAX,
10343 	};
10344 	const struct rte_flow_action_jump jump_action = {
10345 		.group = 1,
10346 	};
10347 	const struct rte_flow_action_jump jump_mask = {
10348 		.group = UINT32_MAX,
10349 	};
10350 	const struct rte_flow_action actions[] = {
10351 		[0] = {
10352 			.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
10353 			.conf = &mreg_action,
10354 		},
10355 		[1] = {
10356 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
10357 			.conf = &jump_action,
10358 		},
10359 		[2] = {
10360 			.type = RTE_FLOW_ACTION_TYPE_END,
10361 		},
10362 	};
10363 	const struct rte_flow_action masks[] = {
10364 		[0] = {
10365 			.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
10366 			.conf = &mreg_mask,
10367 		},
10368 		[1] = {
10369 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
10370 			.conf = &jump_mask,
10371 		},
10372 		[2] = {
10373 			.type = RTE_FLOW_ACTION_TYPE_END,
10374 		},
10375 	};
10376 
10377 	return flow_hw_actions_template_create(dev, &tx_act_attr, actions,
10378 					       masks, error);
10379 }
10380 
10381 /*
10382  * Creating an actions template to use default miss to re-route packets to the
10383  * kernel driver stack.
10384  * On root table, only DEFAULT_MISS action can be used.
10385  *
10386  * @param dev
10387  *   Pointer to Ethernet device.
10388  * @param error
10389  *   Pointer to error structure.
10390  *
10391  * @return
10392  *   Pointer to flow actions template on success, NULL otherwise.
10393  */
10394 static struct rte_flow_actions_template *
10395 flow_hw_create_lacp_rx_actions_template(struct rte_eth_dev *dev, struct rte_flow_error *error)
10396 {
10397 	struct rte_flow_actions_template_attr act_attr = {
10398 		.ingress = 1,
10399 	};
10400 	const struct rte_flow_action actions[] = {
10401 		[0] = {
10402 			.type = (enum rte_flow_action_type)
10403 				MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS,
10404 		},
10405 		[1] = {
10406 			.type = RTE_FLOW_ACTION_TYPE_END,
10407 		},
10408 	};
10409 
10410 	return flow_hw_actions_template_create(dev, &act_attr, actions, actions, error);
10411 }
10412 
10413 /**
10414  * Creates a control flow table used to transfer traffic from E-Switch Manager
10415  * and TX queues from group 0 to group 1.
10416  *
10417  * @param dev
10418  *   Pointer to Ethernet device.
10419  * @param it
10420  *   Pointer to flow pattern template.
10421  * @param at
10422  *   Pointer to flow actions template.
10423  * @param error
10424  *   Pointer to error structure.
10425  *
10426  * @return
10427  *   Pointer to flow table on success, NULL otherwise.
10428  */
10429 static struct rte_flow_template_table*
10430 flow_hw_create_ctrl_sq_miss_root_table(struct rte_eth_dev *dev,
10431 				       struct rte_flow_pattern_template *it,
10432 				       struct rte_flow_actions_template *at,
10433 				       struct rte_flow_error *error)
10434 {
10435 	struct rte_flow_template_table_attr attr = {
10436 		.flow_attr = {
10437 			.group = 0,
10438 			.priority = MLX5_HW_LOWEST_PRIO_ROOT,
10439 			.ingress = 0,
10440 			.egress = 0,
10441 			.transfer = 1,
10442 		},
10443 		.nb_flows = MLX5_HW_CTRL_FLOW_NB_RULES,
10444 	};
10445 	struct mlx5_flow_template_table_cfg cfg = {
10446 		.attr = attr,
10447 		.external = false,
10448 	};
10449 
10450 	return flow_hw_table_create(dev, &cfg, &it, 1, &at, 1, error);
10451 }
10452 
10453 
10454 /**
10455  * Creates a control flow table used to transfer traffic from E-Switch Manager
10456  * and TX queues from group 0 to group 1.
10457  *
10458  * @param dev
10459  *   Pointer to Ethernet device.
10460  * @param it
10461  *   Pointer to flow pattern template.
10462  * @param at
10463  *   Pointer to flow actions template.
10464  * @param error
10465  *   Pointer to error structure.
10466  *
10467  * @return
10468  *   Pointer to flow table on success, NULL otherwise.
10469  */
10470 static struct rte_flow_template_table*
10471 flow_hw_create_ctrl_sq_miss_table(struct rte_eth_dev *dev,
10472 				  struct rte_flow_pattern_template *it,
10473 				  struct rte_flow_actions_template *at,
10474 				  struct rte_flow_error *error)
10475 {
10476 	struct rte_flow_template_table_attr attr = {
10477 		.flow_attr = {
10478 			.group = 1,
10479 			.priority = MLX5_HW_LOWEST_PRIO_NON_ROOT,
10480 			.ingress = 0,
10481 			.egress = 0,
10482 			.transfer = 1,
10483 		},
10484 		.nb_flows = MLX5_HW_CTRL_FLOW_NB_RULES,
10485 	};
10486 	struct mlx5_flow_template_table_cfg cfg = {
10487 		.attr = attr,
10488 		.external = false,
10489 	};
10490 
10491 	return flow_hw_table_create(dev, &cfg, &it, 1, &at, 1, error);
10492 }
10493 
10494 /*
10495  * Creating the default Tx metadata copy table on NIC Tx group 0.
10496  *
10497  * @param dev
10498  *   Pointer to Ethernet device.
10499  * @param pt
10500  *   Pointer to flow pattern template.
10501  * @param at
10502  *   Pointer to flow actions template.
10503  * @param error
10504  *   Pointer to error structure.
10505  *
10506  * @return
10507  *   Pointer to flow table on success, NULL otherwise.
10508  */
10509 static struct rte_flow_template_table*
10510 flow_hw_create_tx_default_mreg_copy_table(struct rte_eth_dev *dev,
10511 					  struct rte_flow_pattern_template *pt,
10512 					  struct rte_flow_actions_template *at,
10513 					  struct rte_flow_error *error)
10514 {
10515 	struct rte_flow_template_table_attr tx_tbl_attr = {
10516 		.flow_attr = {
10517 			.group = 0, /* Root */
10518 			.priority = MLX5_HW_LOWEST_PRIO_ROOT,
10519 			.egress = 1,
10520 		},
10521 		.nb_flows = 1, /* One default flow rule for all. */
10522 	};
10523 	struct mlx5_flow_template_table_cfg tx_tbl_cfg = {
10524 		.attr = tx_tbl_attr,
10525 		.external = false,
10526 	};
10527 
10528 	return flow_hw_table_create(dev, &tx_tbl_cfg, &pt, 1, &at, 1, error);
10529 }
10530 
10531 /**
10532  * Creates a control flow table used to transfer traffic
10533  * from group 0 to group 1.
10534  *
10535  * @param dev
10536  *   Pointer to Ethernet device.
10537  * @param it
10538  *   Pointer to flow pattern template.
10539  * @param at
10540  *   Pointer to flow actions template.
10541  * @param error
10542  *   Pointer to error structure.
10543  *
10544  * @return
10545  *   Pointer to flow table on success, NULL otherwise.
10546  */
10547 static struct rte_flow_template_table *
10548 flow_hw_create_ctrl_jump_table(struct rte_eth_dev *dev,
10549 			       struct rte_flow_pattern_template *it,
10550 			       struct rte_flow_actions_template *at,
10551 			       struct rte_flow_error *error)
10552 {
10553 	struct rte_flow_template_table_attr attr = {
10554 		.flow_attr = {
10555 			.group = 0,
10556 			.priority = 0,
10557 			.ingress = 0,
10558 			.egress = 0,
10559 			.transfer = 1,
10560 		},
10561 		.nb_flows = MLX5_HW_CTRL_FLOW_NB_RULES,
10562 	};
10563 	struct mlx5_flow_template_table_cfg cfg = {
10564 		.attr = attr,
10565 		.external = false,
10566 	};
10567 
10568 	return flow_hw_table_create(dev, &cfg, &it, 1, &at, 1, error);
10569 }
10570 
10571 /**
10572  * Cleans up all template tables and pattern, and actions templates used for
10573  * FDB control flow rules.
10574  *
10575  * @param dev
10576  *   Pointer to Ethernet device.
10577  */
10578 static void
10579 flow_hw_cleanup_ctrl_fdb_tables(struct rte_eth_dev *dev)
10580 {
10581 	struct mlx5_priv *priv = dev->data->dev_private;
10582 	struct mlx5_flow_hw_ctrl_fdb *hw_ctrl_fdb;
10583 
10584 	if (!priv->hw_ctrl_fdb)
10585 		return;
10586 	hw_ctrl_fdb = priv->hw_ctrl_fdb;
10587 	/* Clean up templates used for LACP default miss table. */
10588 	if (hw_ctrl_fdb->hw_lacp_rx_tbl)
10589 		claim_zero(flow_hw_table_destroy(dev, hw_ctrl_fdb->hw_lacp_rx_tbl, NULL));
10590 	if (hw_ctrl_fdb->lacp_rx_actions_tmpl)
10591 		claim_zero(flow_hw_actions_template_destroy(dev, hw_ctrl_fdb->lacp_rx_actions_tmpl,
10592 			   NULL));
10593 	if (hw_ctrl_fdb->lacp_rx_items_tmpl)
10594 		claim_zero(flow_hw_pattern_template_destroy(dev, hw_ctrl_fdb->lacp_rx_items_tmpl,
10595 			   NULL));
10596 	/* Clean up templates used for default Tx metadata copy. */
10597 	if (hw_ctrl_fdb->hw_tx_meta_cpy_tbl)
10598 		claim_zero(flow_hw_table_destroy(dev, hw_ctrl_fdb->hw_tx_meta_cpy_tbl, NULL));
10599 	if (hw_ctrl_fdb->tx_meta_actions_tmpl)
10600 		claim_zero(flow_hw_actions_template_destroy(dev, hw_ctrl_fdb->tx_meta_actions_tmpl,
10601 			   NULL));
10602 	if (hw_ctrl_fdb->tx_meta_items_tmpl)
10603 		claim_zero(flow_hw_pattern_template_destroy(dev, hw_ctrl_fdb->tx_meta_items_tmpl,
10604 			   NULL));
10605 	/* Clean up templates used for default FDB jump rule. */
10606 	if (hw_ctrl_fdb->hw_esw_zero_tbl)
10607 		claim_zero(flow_hw_table_destroy(dev, hw_ctrl_fdb->hw_esw_zero_tbl, NULL));
10608 	if (hw_ctrl_fdb->jump_one_actions_tmpl)
10609 		claim_zero(flow_hw_actions_template_destroy(dev, hw_ctrl_fdb->jump_one_actions_tmpl,
10610 			   NULL));
10611 	if (hw_ctrl_fdb->port_items_tmpl)
10612 		claim_zero(flow_hw_pattern_template_destroy(dev, hw_ctrl_fdb->port_items_tmpl,
10613 			   NULL));
10614 	/* Clean up templates used for default SQ miss flow rules - non-root table. */
10615 	if (hw_ctrl_fdb->hw_esw_sq_miss_tbl)
10616 		claim_zero(flow_hw_table_destroy(dev, hw_ctrl_fdb->hw_esw_sq_miss_tbl, NULL));
10617 	if (hw_ctrl_fdb->regc_sq_items_tmpl)
10618 		claim_zero(flow_hw_pattern_template_destroy(dev, hw_ctrl_fdb->regc_sq_items_tmpl,
10619 			   NULL));
10620 	if (hw_ctrl_fdb->port_actions_tmpl)
10621 		claim_zero(flow_hw_actions_template_destroy(dev, hw_ctrl_fdb->port_actions_tmpl,
10622 			   NULL));
10623 	/* Clean up templates used for default SQ miss flow rules - root table. */
10624 	if (hw_ctrl_fdb->hw_esw_sq_miss_root_tbl)
10625 		claim_zero(flow_hw_table_destroy(dev, hw_ctrl_fdb->hw_esw_sq_miss_root_tbl, NULL));
10626 	if (hw_ctrl_fdb->regc_jump_actions_tmpl)
10627 		claim_zero(flow_hw_actions_template_destroy(dev,
10628 			   hw_ctrl_fdb->regc_jump_actions_tmpl, NULL));
10629 	if (hw_ctrl_fdb->esw_mgr_items_tmpl)
10630 		claim_zero(flow_hw_pattern_template_destroy(dev, hw_ctrl_fdb->esw_mgr_items_tmpl,
10631 			   NULL));
10632 	/* Clean up templates structure for FDB control flow rules. */
10633 	mlx5_free(hw_ctrl_fdb);
10634 	priv->hw_ctrl_fdb = NULL;
10635 }
10636 
10637 /*
10638  * Create a table on the root group to for the LACP traffic redirecting.
10639  *
10640  * @param dev
10641  *   Pointer to Ethernet device.
10642  * @param it
10643  *   Pointer to flow pattern template.
10644  * @param at
10645  *   Pointer to flow actions template.
10646  *
10647  * @return
10648  *   Pointer to flow table on success, NULL otherwise.
10649  */
10650 static struct rte_flow_template_table *
10651 flow_hw_create_lacp_rx_table(struct rte_eth_dev *dev,
10652 			     struct rte_flow_pattern_template *it,
10653 			     struct rte_flow_actions_template *at,
10654 			     struct rte_flow_error *error)
10655 {
10656 	struct rte_flow_template_table_attr attr = {
10657 		.flow_attr = {
10658 			.group = 0,
10659 			.priority = 0,
10660 			.ingress = 1,
10661 			.egress = 0,
10662 			.transfer = 0,
10663 		},
10664 		.nb_flows = 1,
10665 	};
10666 	struct mlx5_flow_template_table_cfg cfg = {
10667 		.attr = attr,
10668 		.external = false,
10669 	};
10670 
10671 	return flow_hw_table_create(dev, &cfg, &it, 1, &at, 1, error);
10672 }
10673 
10674 /**
10675  * Creates a set of flow tables used to create control flows used
10676  * when E-Switch is engaged.
10677  *
10678  * @param dev
10679  *   Pointer to Ethernet device.
10680  * @param error
10681  *   Pointer to error structure.
10682  *
10683  * @return
10684  *   0 on success, negative values otherwise
10685  */
10686 static int
10687 flow_hw_create_ctrl_tables(struct rte_eth_dev *dev, struct rte_flow_error *error)
10688 {
10689 	struct mlx5_priv *priv = dev->data->dev_private;
10690 	struct mlx5_flow_hw_ctrl_fdb *hw_ctrl_fdb;
10691 	uint32_t xmeta = priv->sh->config.dv_xmeta_en;
10692 	uint32_t repr_matching = priv->sh->config.repr_matching;
10693 	uint32_t fdb_def_rule = priv->sh->config.fdb_def_rule;
10694 
10695 	MLX5_ASSERT(priv->hw_ctrl_fdb == NULL);
10696 	hw_ctrl_fdb = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*hw_ctrl_fdb), 0, SOCKET_ID_ANY);
10697 	if (!hw_ctrl_fdb) {
10698 		DRV_LOG(ERR, "port %u failed to allocate memory for FDB control flow templates",
10699 			dev->data->port_id);
10700 		rte_errno = ENOMEM;
10701 		goto err;
10702 	}
10703 	priv->hw_ctrl_fdb = hw_ctrl_fdb;
10704 	if (fdb_def_rule) {
10705 		/* Create templates and table for default SQ miss flow rules - root table. */
10706 		hw_ctrl_fdb->esw_mgr_items_tmpl =
10707 				flow_hw_create_ctrl_esw_mgr_pattern_template(dev, error);
10708 		if (!hw_ctrl_fdb->esw_mgr_items_tmpl) {
10709 			DRV_LOG(ERR, "port %u failed to create E-Switch Manager item"
10710 				" template for control flows", dev->data->port_id);
10711 			goto err;
10712 		}
10713 		hw_ctrl_fdb->regc_jump_actions_tmpl =
10714 				flow_hw_create_ctrl_regc_jump_actions_template(dev, error);
10715 		if (!hw_ctrl_fdb->regc_jump_actions_tmpl) {
10716 			DRV_LOG(ERR, "port %u failed to create REG_C set and jump action template"
10717 				" for control flows", dev->data->port_id);
10718 			goto err;
10719 		}
10720 		hw_ctrl_fdb->hw_esw_sq_miss_root_tbl =
10721 				flow_hw_create_ctrl_sq_miss_root_table
10722 					(dev, hw_ctrl_fdb->esw_mgr_items_tmpl,
10723 					 hw_ctrl_fdb->regc_jump_actions_tmpl, error);
10724 		if (!hw_ctrl_fdb->hw_esw_sq_miss_root_tbl) {
10725 			DRV_LOG(ERR, "port %u failed to create table for default sq miss (root table)"
10726 				" for control flows", dev->data->port_id);
10727 			goto err;
10728 		}
10729 		/* Create templates and table for default SQ miss flow rules - non-root table. */
10730 		hw_ctrl_fdb->regc_sq_items_tmpl =
10731 				flow_hw_create_ctrl_regc_sq_pattern_template(dev, error);
10732 		if (!hw_ctrl_fdb->regc_sq_items_tmpl) {
10733 			DRV_LOG(ERR, "port %u failed to create SQ item template for"
10734 				" control flows", dev->data->port_id);
10735 			goto err;
10736 		}
10737 		hw_ctrl_fdb->port_actions_tmpl =
10738 				flow_hw_create_ctrl_port_actions_template(dev, error);
10739 		if (!hw_ctrl_fdb->port_actions_tmpl) {
10740 			DRV_LOG(ERR, "port %u failed to create port action template"
10741 				" for control flows", dev->data->port_id);
10742 			goto err;
10743 		}
10744 		hw_ctrl_fdb->hw_esw_sq_miss_tbl =
10745 				flow_hw_create_ctrl_sq_miss_table
10746 					(dev, hw_ctrl_fdb->regc_sq_items_tmpl,
10747 					 hw_ctrl_fdb->port_actions_tmpl, error);
10748 		if (!hw_ctrl_fdb->hw_esw_sq_miss_tbl) {
10749 			DRV_LOG(ERR, "port %u failed to create table for default sq miss (non-root table)"
10750 				" for control flows", dev->data->port_id);
10751 			goto err;
10752 		}
10753 		/* Create templates and table for default FDB jump flow rules. */
10754 		hw_ctrl_fdb->port_items_tmpl =
10755 				flow_hw_create_ctrl_port_pattern_template(dev, error);
10756 		if (!hw_ctrl_fdb->port_items_tmpl) {
10757 			DRV_LOG(ERR, "port %u failed to create SQ item template for"
10758 				" control flows", dev->data->port_id);
10759 			goto err;
10760 		}
10761 		hw_ctrl_fdb->jump_one_actions_tmpl =
10762 				flow_hw_create_ctrl_jump_actions_template
10763 					(dev, MLX5_HW_LOWEST_USABLE_GROUP, error);
10764 		if (!hw_ctrl_fdb->jump_one_actions_tmpl) {
10765 			DRV_LOG(ERR, "port %u failed to create jump action template"
10766 				" for control flows", dev->data->port_id);
10767 			goto err;
10768 		}
10769 		hw_ctrl_fdb->hw_esw_zero_tbl = flow_hw_create_ctrl_jump_table
10770 				(dev, hw_ctrl_fdb->port_items_tmpl,
10771 				 hw_ctrl_fdb->jump_one_actions_tmpl, error);
10772 		if (!hw_ctrl_fdb->hw_esw_zero_tbl) {
10773 			DRV_LOG(ERR, "port %u failed to create table for default jump to group 1"
10774 				" for control flows", dev->data->port_id);
10775 			goto err;
10776 		}
10777 	}
10778 	/* Create templates and table for default Tx metadata copy flow rule. */
10779 	if (!repr_matching && xmeta == MLX5_XMETA_MODE_META32_HWS) {
10780 		hw_ctrl_fdb->tx_meta_items_tmpl =
10781 			flow_hw_create_tx_default_mreg_copy_pattern_template(dev, error);
10782 		if (!hw_ctrl_fdb->tx_meta_items_tmpl) {
10783 			DRV_LOG(ERR, "port %u failed to Tx metadata copy pattern"
10784 				" template for control flows", dev->data->port_id);
10785 			goto err;
10786 		}
10787 		hw_ctrl_fdb->tx_meta_actions_tmpl =
10788 			flow_hw_create_tx_default_mreg_copy_actions_template(dev, error);
10789 		if (!hw_ctrl_fdb->tx_meta_actions_tmpl) {
10790 			DRV_LOG(ERR, "port %u failed to Tx metadata copy actions"
10791 				" template for control flows", dev->data->port_id);
10792 			goto err;
10793 		}
10794 		hw_ctrl_fdb->hw_tx_meta_cpy_tbl =
10795 			flow_hw_create_tx_default_mreg_copy_table
10796 				(dev, hw_ctrl_fdb->tx_meta_items_tmpl,
10797 				 hw_ctrl_fdb->tx_meta_actions_tmpl, error);
10798 		if (!hw_ctrl_fdb->hw_tx_meta_cpy_tbl) {
10799 			DRV_LOG(ERR, "port %u failed to create table for default"
10800 				" Tx metadata copy flow rule", dev->data->port_id);
10801 			goto err;
10802 		}
10803 	}
10804 	/* Create LACP default miss table. */
10805 	if (!priv->sh->config.lacp_by_user && priv->pf_bond >= 0 && priv->master) {
10806 		hw_ctrl_fdb->lacp_rx_items_tmpl =
10807 				flow_hw_create_lacp_rx_pattern_template(dev, error);
10808 		if (!hw_ctrl_fdb->lacp_rx_items_tmpl) {
10809 			DRV_LOG(ERR, "port %u failed to create pattern template"
10810 				" for LACP Rx traffic", dev->data->port_id);
10811 			goto err;
10812 		}
10813 		hw_ctrl_fdb->lacp_rx_actions_tmpl =
10814 				flow_hw_create_lacp_rx_actions_template(dev, error);
10815 		if (!hw_ctrl_fdb->lacp_rx_actions_tmpl) {
10816 			DRV_LOG(ERR, "port %u failed to create actions template"
10817 				" for LACP Rx traffic", dev->data->port_id);
10818 			goto err;
10819 		}
10820 		hw_ctrl_fdb->hw_lacp_rx_tbl = flow_hw_create_lacp_rx_table
10821 				(dev, hw_ctrl_fdb->lacp_rx_items_tmpl,
10822 				 hw_ctrl_fdb->lacp_rx_actions_tmpl, error);
10823 		if (!hw_ctrl_fdb->hw_lacp_rx_tbl) {
10824 			DRV_LOG(ERR, "port %u failed to create template table for"
10825 				" for LACP Rx traffic", dev->data->port_id);
10826 			goto err;
10827 		}
10828 	}
10829 	return 0;
10830 
10831 err:
10832 	flow_hw_cleanup_ctrl_fdb_tables(dev);
10833 	return -EINVAL;
10834 }
10835 
10836 static void
10837 flow_hw_ct_mng_destroy(struct rte_eth_dev *dev,
10838 		       struct mlx5_aso_ct_pools_mng *ct_mng)
10839 {
10840 	struct mlx5_priv *priv = dev->data->dev_private;
10841 
10842 	mlx5_aso_ct_queue_uninit(priv->sh, ct_mng);
10843 	mlx5_free(ct_mng);
10844 }
10845 
10846 static void
10847 flow_hw_ct_pool_destroy(struct rte_eth_dev *dev,
10848 			struct mlx5_aso_ct_pool *pool)
10849 {
10850 	struct mlx5_priv *priv = dev->data->dev_private;
10851 
10852 	if (pool->dr_action)
10853 		mlx5dr_action_destroy(pool->dr_action);
10854 	if (!priv->shared_host) {
10855 		if (pool->devx_obj)
10856 			claim_zero(mlx5_devx_cmd_destroy(pool->devx_obj));
10857 		if (pool->cts)
10858 			mlx5_ipool_destroy(pool->cts);
10859 	}
10860 	mlx5_free(pool);
10861 }
10862 
10863 static struct mlx5_aso_ct_pool *
10864 flow_hw_ct_pool_create(struct rte_eth_dev *dev,
10865 		       uint32_t nb_conn_tracks)
10866 {
10867 	struct mlx5_priv *priv = dev->data->dev_private;
10868 	struct mlx5_aso_ct_pool *pool;
10869 	struct mlx5_devx_obj *obj;
10870 	uint32_t nb_cts = rte_align32pow2(nb_conn_tracks);
10871 	uint32_t log_obj_size = rte_log2_u32(nb_cts);
10872 	struct mlx5_indexed_pool_config cfg = {
10873 		.size = sizeof(struct mlx5_aso_ct_action),
10874 		.trunk_size = 1 << 12,
10875 		.per_core_cache = 1 << 13,
10876 		.need_lock = 1,
10877 		.release_mem_en = !!priv->sh->config.reclaim_mode,
10878 		.malloc = mlx5_malloc,
10879 		.free = mlx5_free,
10880 		.type = "mlx5_hw_ct_action",
10881 	};
10882 	int reg_id;
10883 	uint32_t flags = 0;
10884 
10885 	pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
10886 	if (!pool) {
10887 		rte_errno = ENOMEM;
10888 		return NULL;
10889 	}
10890 	if (!priv->shared_host) {
10891 		/*
10892 		 * No need for local cache if CT number is a small number. Since
10893 		 * flow insertion rate will be very limited in that case. Here let's
10894 		 * set the number to less than default trunk size 4K.
10895 		 */
10896 		if (nb_cts <= cfg.trunk_size) {
10897 			cfg.per_core_cache = 0;
10898 			cfg.trunk_size = nb_cts;
10899 		} else if (nb_cts <= MLX5_HW_IPOOL_SIZE_THRESHOLD) {
10900 			cfg.per_core_cache = MLX5_HW_IPOOL_CACHE_MIN;
10901 		}
10902 		cfg.max_idx = nb_cts;
10903 		pool->cts = mlx5_ipool_create(&cfg);
10904 		if (!pool->cts)
10905 			goto err;
10906 		obj = mlx5_devx_cmd_create_conn_track_offload_obj(priv->sh->cdev->ctx,
10907 								  priv->sh->cdev->pdn,
10908 								  log_obj_size);
10909 		if (!obj) {
10910 			rte_errno = ENODATA;
10911 			DRV_LOG(ERR, "Failed to create conn_track_offload_obj using DevX.");
10912 			goto err;
10913 		}
10914 		pool->devx_obj = obj;
10915 	} else {
10916 		struct rte_eth_dev *host_dev = priv->shared_host;
10917 		struct mlx5_priv *host_priv = host_dev->data->dev_private;
10918 
10919 		pool->devx_obj = host_priv->hws_ctpool->devx_obj;
10920 		pool->cts = host_priv->hws_ctpool->cts;
10921 		MLX5_ASSERT(pool->cts);
10922 		MLX5_ASSERT(!nb_conn_tracks);
10923 	}
10924 	reg_id = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, NULL);
10925 	flags |= MLX5DR_ACTION_FLAG_HWS_RX | MLX5DR_ACTION_FLAG_HWS_TX;
10926 	if (priv->sh->config.dv_esw_en && priv->master)
10927 		flags |= MLX5DR_ACTION_FLAG_HWS_FDB;
10928 	pool->dr_action = mlx5dr_action_create_aso_ct(priv->dr_ctx,
10929 						      (struct mlx5dr_devx_obj *)pool->devx_obj,
10930 						      reg_id - REG_C_0, flags);
10931 	if (!pool->dr_action)
10932 		goto err;
10933 	pool->sq = priv->ct_mng->aso_sqs;
10934 	/* Assign the last extra ASO SQ as public SQ. */
10935 	pool->shared_sq = &priv->ct_mng->aso_sqs[priv->nb_queue - 1];
10936 	return pool;
10937 err:
10938 	flow_hw_ct_pool_destroy(dev, pool);
10939 	return NULL;
10940 }
10941 
10942 static int
10943 mlx5_flow_ct_init(struct rte_eth_dev *dev,
10944 		  uint32_t nb_conn_tracks,
10945 		  uint16_t nb_queue)
10946 {
10947 	struct mlx5_priv *priv = dev->data->dev_private;
10948 	uint32_t mem_size;
10949 	int ret = -ENOMEM;
10950 
10951 	if (!priv->shared_host) {
10952 		mem_size = sizeof(struct mlx5_aso_sq) * nb_queue +
10953 				sizeof(*priv->ct_mng);
10954 		priv->ct_mng = mlx5_malloc(MLX5_MEM_ZERO, mem_size,
10955 						RTE_CACHE_LINE_SIZE,
10956 						SOCKET_ID_ANY);
10957 		if (!priv->ct_mng)
10958 			goto err;
10959 		ret = mlx5_aso_ct_queue_init(priv->sh, priv->ct_mng,
10960 						nb_queue);
10961 		if (ret)
10962 			goto err;
10963 	}
10964 	priv->hws_ctpool = flow_hw_ct_pool_create(dev, nb_conn_tracks);
10965 	if (!priv->hws_ctpool)
10966 		goto err;
10967 	priv->sh->ct_aso_en = 1;
10968 	return 0;
10969 
10970 err:
10971 	if (priv->hws_ctpool) {
10972 		flow_hw_ct_pool_destroy(dev, priv->hws_ctpool);
10973 		priv->hws_ctpool = NULL;
10974 	}
10975 	if (priv->ct_mng) {
10976 		flow_hw_ct_mng_destroy(dev, priv->ct_mng);
10977 		priv->ct_mng = NULL;
10978 	}
10979 	return ret;
10980 }
10981 
10982 static void
10983 flow_hw_destroy_vlan(struct rte_eth_dev *dev)
10984 {
10985 	struct mlx5_priv *priv = dev->data->dev_private;
10986 	enum mlx5dr_table_type i;
10987 
10988 	for (i = MLX5DR_TABLE_TYPE_NIC_RX; i < MLX5DR_TABLE_TYPE_MAX; i++) {
10989 		if (priv->hw_pop_vlan[i]) {
10990 			mlx5dr_action_destroy(priv->hw_pop_vlan[i]);
10991 			priv->hw_pop_vlan[i] = NULL;
10992 		}
10993 		if (priv->hw_push_vlan[i]) {
10994 			mlx5dr_action_destroy(priv->hw_push_vlan[i]);
10995 			priv->hw_push_vlan[i] = NULL;
10996 		}
10997 	}
10998 }
10999 
11000 static int
11001 flow_hw_create_vlan(struct rte_eth_dev *dev)
11002 {
11003 	struct mlx5_priv *priv = dev->data->dev_private;
11004 	enum mlx5dr_table_type i;
11005 	const enum mlx5dr_action_flags flags[MLX5DR_TABLE_TYPE_MAX] = {
11006 		MLX5DR_ACTION_FLAG_HWS_RX,
11007 		MLX5DR_ACTION_FLAG_HWS_TX,
11008 		MLX5DR_ACTION_FLAG_HWS_FDB
11009 	};
11010 
11011 	/* rte_errno is set in the mlx5dr_action* functions. */
11012 	for (i = MLX5DR_TABLE_TYPE_NIC_RX; i <= MLX5DR_TABLE_TYPE_NIC_TX; i++) {
11013 		priv->hw_pop_vlan[i] =
11014 			mlx5dr_action_create_pop_vlan(priv->dr_ctx, flags[i]);
11015 		if (!priv->hw_pop_vlan[i])
11016 			return -rte_errno;
11017 		priv->hw_push_vlan[i] =
11018 			mlx5dr_action_create_push_vlan(priv->dr_ctx, flags[i]);
11019 		if (!priv->hw_pop_vlan[i])
11020 			return -rte_errno;
11021 	}
11022 	if (priv->sh->config.dv_esw_en && priv->master) {
11023 		priv->hw_pop_vlan[MLX5DR_TABLE_TYPE_FDB] =
11024 			mlx5dr_action_create_pop_vlan
11025 				(priv->dr_ctx, MLX5DR_ACTION_FLAG_HWS_FDB);
11026 		if (!priv->hw_pop_vlan[MLX5DR_TABLE_TYPE_FDB])
11027 			return -rte_errno;
11028 		priv->hw_push_vlan[MLX5DR_TABLE_TYPE_FDB] =
11029 			mlx5dr_action_create_push_vlan
11030 				(priv->dr_ctx, MLX5DR_ACTION_FLAG_HWS_FDB);
11031 		if (!priv->hw_pop_vlan[MLX5DR_TABLE_TYPE_FDB])
11032 			return -rte_errno;
11033 	}
11034 	return 0;
11035 }
11036 
11037 static void
11038 flow_hw_cleanup_ctrl_rx_tables(struct rte_eth_dev *dev)
11039 {
11040 	struct mlx5_priv *priv = dev->data->dev_private;
11041 	unsigned int i;
11042 	unsigned int j;
11043 
11044 	if (!priv->hw_ctrl_rx)
11045 		return;
11046 	for (i = 0; i < MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_MAX; ++i) {
11047 		for (j = 0; j < MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX; ++j) {
11048 			struct rte_flow_template_table *tbl = priv->hw_ctrl_rx->tables[i][j].tbl;
11049 			struct rte_flow_pattern_template *pt = priv->hw_ctrl_rx->tables[i][j].pt;
11050 
11051 			if (tbl)
11052 				claim_zero(flow_hw_table_destroy(dev, tbl, NULL));
11053 			if (pt)
11054 				claim_zero(flow_hw_pattern_template_destroy(dev, pt, NULL));
11055 		}
11056 	}
11057 	for (i = 0; i < MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX; ++i) {
11058 		struct rte_flow_actions_template *at = priv->hw_ctrl_rx->rss[i];
11059 
11060 		if (at)
11061 			claim_zero(flow_hw_actions_template_destroy(dev, at, NULL));
11062 	}
11063 	mlx5_free(priv->hw_ctrl_rx);
11064 	priv->hw_ctrl_rx = NULL;
11065 }
11066 
11067 static uint64_t
11068 flow_hw_ctrl_rx_rss_type_hash_types(const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
11069 {
11070 	switch (rss_type) {
11071 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_NON_IP:
11072 		return 0;
11073 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4:
11074 		return RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | RTE_ETH_RSS_NONFRAG_IPV4_OTHER;
11075 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_UDP:
11076 		return RTE_ETH_RSS_NONFRAG_IPV4_UDP;
11077 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_TCP:
11078 		return RTE_ETH_RSS_NONFRAG_IPV4_TCP;
11079 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6:
11080 		return RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_OTHER;
11081 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_UDP:
11082 		return RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_IPV6_UDP_EX;
11083 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_TCP:
11084 		return RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_IPV6_TCP_EX;
11085 	default:
11086 		/* Should not reach here. */
11087 		MLX5_ASSERT(false);
11088 		return 0;
11089 	}
11090 }
11091 
11092 static struct rte_flow_actions_template *
11093 flow_hw_create_ctrl_rx_rss_template(struct rte_eth_dev *dev,
11094 				    const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
11095 {
11096 	struct mlx5_priv *priv = dev->data->dev_private;
11097 	struct rte_flow_actions_template_attr attr = {
11098 		.ingress = 1,
11099 	};
11100 	uint16_t queue[RTE_MAX_QUEUES_PER_PORT];
11101 	struct rte_flow_action_rss rss_conf = {
11102 		.func = RTE_ETH_HASH_FUNCTION_DEFAULT,
11103 		.level = 0,
11104 		.types = 0,
11105 		.key_len = priv->rss_conf.rss_key_len,
11106 		.key = priv->rss_conf.rss_key,
11107 		.queue_num = priv->reta_idx_n,
11108 		.queue = queue,
11109 	};
11110 	struct rte_flow_action actions[] = {
11111 		{
11112 			.type = RTE_FLOW_ACTION_TYPE_RSS,
11113 			.conf = &rss_conf,
11114 		},
11115 		{
11116 			.type = RTE_FLOW_ACTION_TYPE_END,
11117 		}
11118 	};
11119 	struct rte_flow_action masks[] = {
11120 		{
11121 			.type = RTE_FLOW_ACTION_TYPE_RSS,
11122 			.conf = &rss_conf,
11123 		},
11124 		{
11125 			.type = RTE_FLOW_ACTION_TYPE_END,
11126 		}
11127 	};
11128 	struct rte_flow_actions_template *at;
11129 	struct rte_flow_error error;
11130 	unsigned int i;
11131 
11132 	MLX5_ASSERT(priv->reta_idx_n > 0 && priv->reta_idx);
11133 	/* Select proper RSS hash types and based on that configure the actions template. */
11134 	rss_conf.types = flow_hw_ctrl_rx_rss_type_hash_types(rss_type);
11135 	if (rss_conf.types) {
11136 		for (i = 0; i < priv->reta_idx_n; ++i)
11137 			queue[i] = (*priv->reta_idx)[i];
11138 	} else {
11139 		rss_conf.queue_num = 1;
11140 		queue[0] = (*priv->reta_idx)[0];
11141 	}
11142 	at = flow_hw_actions_template_create(dev, &attr, actions, masks, &error);
11143 	if (!at)
11144 		DRV_LOG(ERR,
11145 			"Failed to create ctrl flow actions template: rte_errno(%d), type(%d): %s",
11146 			rte_errno, error.type,
11147 			error.message ? error.message : "(no stated reason)");
11148 	return at;
11149 }
11150 
11151 static uint32_t ctrl_rx_rss_priority_map[MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX] = {
11152 	[MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_NON_IP] = MLX5_HW_CTRL_RX_PRIO_L2,
11153 	[MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4] = MLX5_HW_CTRL_RX_PRIO_L3,
11154 	[MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_UDP] = MLX5_HW_CTRL_RX_PRIO_L4,
11155 	[MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_TCP] = MLX5_HW_CTRL_RX_PRIO_L4,
11156 	[MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6] = MLX5_HW_CTRL_RX_PRIO_L3,
11157 	[MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_UDP] = MLX5_HW_CTRL_RX_PRIO_L4,
11158 	[MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_TCP] = MLX5_HW_CTRL_RX_PRIO_L4,
11159 };
11160 
11161 static uint32_t ctrl_rx_nb_flows_map[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_MAX] = {
11162 	[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL] = 1,
11163 	[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL_MCAST] = 1,
11164 	[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST] = 1,
11165 	[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST_VLAN] = MLX5_MAX_VLAN_IDS,
11166 	[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST] = 1,
11167 	[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST_VLAN] = MLX5_MAX_VLAN_IDS,
11168 	[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST] = 1,
11169 	[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST_VLAN] = MLX5_MAX_VLAN_IDS,
11170 	[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC] = MLX5_MAX_UC_MAC_ADDRESSES,
11171 	[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN] =
11172 			MLX5_MAX_UC_MAC_ADDRESSES * MLX5_MAX_VLAN_IDS,
11173 };
11174 
11175 static struct rte_flow_template_table_attr
11176 flow_hw_get_ctrl_rx_table_attr(enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type,
11177 			       const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
11178 {
11179 	return (struct rte_flow_template_table_attr){
11180 		.flow_attr = {
11181 			.group = 0,
11182 			.priority = ctrl_rx_rss_priority_map[rss_type],
11183 			.ingress = 1,
11184 		},
11185 		.nb_flows = ctrl_rx_nb_flows_map[eth_pattern_type],
11186 	};
11187 }
11188 
11189 static struct rte_flow_item
11190 flow_hw_get_ctrl_rx_eth_item(const enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type)
11191 {
11192 	struct rte_flow_item item = {
11193 		.type = RTE_FLOW_ITEM_TYPE_ETH,
11194 		.mask = NULL,
11195 	};
11196 
11197 	switch (eth_pattern_type) {
11198 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL:
11199 		item.mask = &ctrl_rx_eth_promisc_mask;
11200 		break;
11201 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL_MCAST:
11202 		item.mask = &ctrl_rx_eth_mcast_mask;
11203 		break;
11204 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST:
11205 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST_VLAN:
11206 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC:
11207 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN:
11208 		item.mask = &ctrl_rx_eth_dmac_mask;
11209 		break;
11210 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST:
11211 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST_VLAN:
11212 		item.mask = &ctrl_rx_eth_ipv4_mcast_mask;
11213 		break;
11214 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST:
11215 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST_VLAN:
11216 		item.mask = &ctrl_rx_eth_ipv6_mcast_mask;
11217 		break;
11218 	default:
11219 		/* Should not reach here - ETH mask must be present. */
11220 		item.type = RTE_FLOW_ITEM_TYPE_END;
11221 		MLX5_ASSERT(false);
11222 		break;
11223 	}
11224 	return item;
11225 }
11226 
11227 static struct rte_flow_item
11228 flow_hw_get_ctrl_rx_vlan_item(const enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type)
11229 {
11230 	struct rte_flow_item item = {
11231 		.type = RTE_FLOW_ITEM_TYPE_VOID,
11232 		.mask = NULL,
11233 	};
11234 
11235 	switch (eth_pattern_type) {
11236 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST_VLAN:
11237 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST_VLAN:
11238 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST_VLAN:
11239 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN:
11240 		item.type = RTE_FLOW_ITEM_TYPE_VLAN;
11241 		item.mask = &rte_flow_item_vlan_mask;
11242 		break;
11243 	default:
11244 		/* Nothing to update. */
11245 		break;
11246 	}
11247 	return item;
11248 }
11249 
11250 static struct rte_flow_item
11251 flow_hw_get_ctrl_rx_l3_item(const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
11252 {
11253 	struct rte_flow_item item = {
11254 		.type = RTE_FLOW_ITEM_TYPE_VOID,
11255 		.mask = NULL,
11256 	};
11257 
11258 	switch (rss_type) {
11259 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4:
11260 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_UDP:
11261 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_TCP:
11262 		item.type = RTE_FLOW_ITEM_TYPE_IPV4;
11263 		break;
11264 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6:
11265 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_UDP:
11266 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_TCP:
11267 		item.type = RTE_FLOW_ITEM_TYPE_IPV6;
11268 		break;
11269 	default:
11270 		/* Nothing to update. */
11271 		break;
11272 	}
11273 	return item;
11274 }
11275 
11276 static struct rte_flow_item
11277 flow_hw_get_ctrl_rx_l4_item(const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
11278 {
11279 	struct rte_flow_item item = {
11280 		.type = RTE_FLOW_ITEM_TYPE_VOID,
11281 		.mask = NULL,
11282 	};
11283 
11284 	switch (rss_type) {
11285 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_UDP:
11286 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_UDP:
11287 		item.type = RTE_FLOW_ITEM_TYPE_UDP;
11288 		break;
11289 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_TCP:
11290 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_TCP:
11291 		item.type = RTE_FLOW_ITEM_TYPE_TCP;
11292 		break;
11293 	default:
11294 		/* Nothing to update. */
11295 		break;
11296 	}
11297 	return item;
11298 }
11299 
11300 static struct rte_flow_pattern_template *
11301 flow_hw_create_ctrl_rx_pattern_template
11302 		(struct rte_eth_dev *dev,
11303 		 const enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type,
11304 		 const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
11305 {
11306 	const struct rte_flow_pattern_template_attr attr = {
11307 		.relaxed_matching = 0,
11308 		.ingress = 1,
11309 	};
11310 	struct rte_flow_item items[] = {
11311 		/* Matching patterns */
11312 		flow_hw_get_ctrl_rx_eth_item(eth_pattern_type),
11313 		flow_hw_get_ctrl_rx_vlan_item(eth_pattern_type),
11314 		flow_hw_get_ctrl_rx_l3_item(rss_type),
11315 		flow_hw_get_ctrl_rx_l4_item(rss_type),
11316 		/* Terminate pattern */
11317 		{ .type = RTE_FLOW_ITEM_TYPE_END }
11318 	};
11319 
11320 	return flow_hw_pattern_template_create(dev, &attr, items, NULL);
11321 }
11322 
11323 static int
11324 flow_hw_create_ctrl_rx_tables(struct rte_eth_dev *dev)
11325 {
11326 	struct mlx5_priv *priv = dev->data->dev_private;
11327 	unsigned int i;
11328 	unsigned int j;
11329 	int ret;
11330 
11331 	MLX5_ASSERT(!priv->hw_ctrl_rx);
11332 	priv->hw_ctrl_rx = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*priv->hw_ctrl_rx),
11333 				       RTE_CACHE_LINE_SIZE, rte_socket_id());
11334 	if (!priv->hw_ctrl_rx) {
11335 		DRV_LOG(ERR, "Failed to allocate memory for Rx control flow tables");
11336 		rte_errno = ENOMEM;
11337 		return -rte_errno;
11338 	}
11339 	/* Create all pattern template variants. */
11340 	for (i = 0; i < MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_MAX; ++i) {
11341 		enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type = i;
11342 
11343 		for (j = 0; j < MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX; ++j) {
11344 			const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type = j;
11345 			struct rte_flow_template_table_attr attr;
11346 			struct rte_flow_pattern_template *pt;
11347 
11348 			attr = flow_hw_get_ctrl_rx_table_attr(eth_pattern_type, rss_type);
11349 			pt = flow_hw_create_ctrl_rx_pattern_template(dev, eth_pattern_type,
11350 								     rss_type);
11351 			if (!pt)
11352 				goto err;
11353 			priv->hw_ctrl_rx->tables[i][j].attr = attr;
11354 			priv->hw_ctrl_rx->tables[i][j].pt = pt;
11355 		}
11356 	}
11357 	return 0;
11358 err:
11359 	ret = rte_errno;
11360 	flow_hw_cleanup_ctrl_rx_tables(dev);
11361 	rte_errno = ret;
11362 	return -ret;
11363 }
11364 
11365 void
11366 mlx5_flow_hw_cleanup_ctrl_rx_templates(struct rte_eth_dev *dev)
11367 {
11368 	struct mlx5_priv *priv = dev->data->dev_private;
11369 	struct mlx5_flow_hw_ctrl_rx *hw_ctrl_rx;
11370 	unsigned int i;
11371 	unsigned int j;
11372 
11373 	if (!priv->dr_ctx)
11374 		return;
11375 	if (!priv->hw_ctrl_rx)
11376 		return;
11377 	hw_ctrl_rx = priv->hw_ctrl_rx;
11378 	for (i = 0; i < MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_MAX; ++i) {
11379 		for (j = 0; j < MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX; ++j) {
11380 			struct mlx5_flow_hw_ctrl_rx_table *tmpls = &hw_ctrl_rx->tables[i][j];
11381 
11382 			if (tmpls->tbl) {
11383 				claim_zero(flow_hw_table_destroy(dev, tmpls->tbl, NULL));
11384 				tmpls->tbl = NULL;
11385 			}
11386 		}
11387 	}
11388 	for (j = 0; j < MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX; ++j) {
11389 		if (hw_ctrl_rx->rss[j]) {
11390 			claim_zero(flow_hw_actions_template_destroy(dev, hw_ctrl_rx->rss[j], NULL));
11391 			hw_ctrl_rx->rss[j] = NULL;
11392 		}
11393 	}
11394 }
11395 
11396 /**
11397  * Copy the provided HWS configuration to a newly allocated buffer.
11398  *
11399  * @param[in] port_attr
11400  *   Port configuration attributes.
11401  * @param[in] nb_queue
11402  *   Number of queue.
11403  * @param[in] queue_attr
11404  *   Array that holds attributes for each flow queue.
11405  * @param[in] nt_mode
11406  *   Non template mode.
11407  *
11408  * @return
11409  *   Pointer to copied HWS configuration is returned on success.
11410  *   Otherwise, NULL is returned and rte_errno is set.
11411  */
11412 static struct mlx5_flow_hw_attr *
11413 flow_hw_alloc_copy_config(const struct rte_flow_port_attr *port_attr,
11414 			  const uint16_t nb_queue,
11415 			  const struct rte_flow_queue_attr *queue_attr[],
11416 			  bool nt_mode,
11417 			  struct rte_flow_error *error)
11418 {
11419 	struct mlx5_flow_hw_attr *hw_attr;
11420 	size_t hw_attr_size;
11421 	unsigned int i;
11422 
11423 	hw_attr_size = sizeof(*hw_attr) + nb_queue * sizeof(*hw_attr->queue_attr);
11424 	hw_attr = mlx5_malloc(MLX5_MEM_ZERO, hw_attr_size, 0, SOCKET_ID_ANY);
11425 	if (!hw_attr) {
11426 		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11427 				   "Not enough memory to store configuration");
11428 		return NULL;
11429 	}
11430 	memcpy(&hw_attr->port_attr, port_attr, sizeof(*port_attr));
11431 	hw_attr->nb_queue = nb_queue;
11432 	/* Queue attributes are placed after the mlx5_flow_hw_attr. */
11433 	hw_attr->queue_attr = (struct rte_flow_queue_attr *)(hw_attr + 1);
11434 	for (i = 0; i < nb_queue; ++i)
11435 		memcpy(&hw_attr->queue_attr[i], queue_attr[i], sizeof(hw_attr->queue_attr[i]));
11436 	hw_attr->nt_mode = nt_mode;
11437 	return hw_attr;
11438 }
11439 
11440 /**
11441  * Compares the preserved HWS configuration with the provided one.
11442  *
11443  * @param[in] hw_attr
11444  *   Pointer to preserved HWS configuration.
11445  * @param[in] new_pa
11446  *   Port configuration attributes to compare.
11447  * @param[in] new_nbq
11448  *   Number of queues to compare.
11449  * @param[in] new_qa
11450  *   Array that holds attributes for each flow queue.
11451  *
11452  * @return
11453  *   True if configurations are the same, false otherwise.
11454  */
11455 static bool
11456 flow_hw_compare_config(const struct mlx5_flow_hw_attr *hw_attr,
11457 		       const struct rte_flow_port_attr *new_pa,
11458 		       const uint16_t new_nbq,
11459 		       const struct rte_flow_queue_attr *new_qa[])
11460 {
11461 	const struct rte_flow_port_attr *old_pa = &hw_attr->port_attr;
11462 	const uint16_t old_nbq = hw_attr->nb_queue;
11463 	const struct rte_flow_queue_attr *old_qa = hw_attr->queue_attr;
11464 	unsigned int i;
11465 
11466 	if (old_pa->nb_counters != new_pa->nb_counters ||
11467 	    old_pa->nb_aging_objects != new_pa->nb_aging_objects ||
11468 	    old_pa->nb_meters != new_pa->nb_meters ||
11469 	    old_pa->nb_conn_tracks != new_pa->nb_conn_tracks ||
11470 	    old_pa->flags != new_pa->flags)
11471 		return false;
11472 	if (old_nbq != new_nbq)
11473 		return false;
11474 	for (i = 0; i < old_nbq; ++i)
11475 		if (old_qa[i].size != new_qa[i]->size)
11476 			return false;
11477 	return true;
11478 }
11479 
11480 /*
11481  * No need to explicitly release drop action templates on port stop.
11482  * Drop action templates release with other action templates during
11483  * mlx5_dev_close -> flow_hw_resource_release -> flow_hw_actions_template_destroy
11484  */
11485 static void
11486 flow_hw_action_template_drop_release(struct rte_eth_dev *dev)
11487 {
11488 	int i;
11489 	struct mlx5_priv *priv = dev->data->dev_private;
11490 
11491 	for (i = 0; i < MLX5DR_TABLE_TYPE_MAX; i++) {
11492 		if (!priv->action_template_drop[i])
11493 			continue;
11494 		flow_hw_actions_template_destroy(dev,
11495 						 priv->action_template_drop[i],
11496 						 NULL);
11497 		priv->action_template_drop[i] = NULL;
11498 	}
11499 }
11500 
11501 static int
11502 flow_hw_action_template_drop_init(struct rte_eth_dev *dev,
11503 			  struct rte_flow_error *error)
11504 {
11505 	const struct rte_flow_action drop[2] = {
11506 		[0] = { .type = RTE_FLOW_ACTION_TYPE_DROP },
11507 		[1] = { .type = RTE_FLOW_ACTION_TYPE_END },
11508 	};
11509 	const struct rte_flow_action *actions = drop;
11510 	const struct rte_flow_action *masks = drop;
11511 	const struct rte_flow_actions_template_attr attr[MLX5DR_TABLE_TYPE_MAX] = {
11512 		[MLX5DR_TABLE_TYPE_NIC_RX] = { .ingress = 1 },
11513 		[MLX5DR_TABLE_TYPE_NIC_TX] = { .egress = 1 },
11514 		[MLX5DR_TABLE_TYPE_FDB] = { .transfer = 1 }
11515 	};
11516 	struct mlx5_priv *priv = dev->data->dev_private;
11517 
11518 	priv->action_template_drop[MLX5DR_TABLE_TYPE_NIC_RX] =
11519 		flow_hw_actions_template_create(dev,
11520 						&attr[MLX5DR_TABLE_TYPE_NIC_RX],
11521 						actions, masks, error);
11522 	if (!priv->action_template_drop[MLX5DR_TABLE_TYPE_NIC_RX])
11523 		return -1;
11524 	priv->action_template_drop[MLX5DR_TABLE_TYPE_NIC_TX] =
11525 		flow_hw_actions_template_create(dev,
11526 						&attr[MLX5DR_TABLE_TYPE_NIC_TX],
11527 						actions, masks, error);
11528 	if (!priv->action_template_drop[MLX5DR_TABLE_TYPE_NIC_TX])
11529 		return -1;
11530 	if (priv->sh->config.dv_esw_en && priv->master) {
11531 		priv->action_template_drop[MLX5DR_TABLE_TYPE_FDB] =
11532 			flow_hw_actions_template_create(dev,
11533 							&attr[MLX5DR_TABLE_TYPE_FDB],
11534 							actions, masks, error);
11535 		if (!priv->action_template_drop[MLX5DR_TABLE_TYPE_FDB])
11536 			return -1;
11537 	}
11538 	return 0;
11539 }
11540 
11541 static void
11542 __flow_hw_resource_release(struct rte_eth_dev *dev, bool ctx_close)
11543 {
11544 	struct mlx5_priv *priv = dev->data->dev_private;
11545 	struct rte_flow_template_table *tbl, *temp_tbl;
11546 	struct rte_flow_pattern_template *it, *temp_it;
11547 	struct rte_flow_actions_template *at, *temp_at;
11548 	struct mlx5_flow_group *grp, *temp_grp;
11549 	uint32_t i;
11550 
11551 	flow_hw_rxq_flag_set(dev, false);
11552 	flow_hw_flush_all_ctrl_flows(dev);
11553 	flow_hw_cleanup_ctrl_fdb_tables(dev);
11554 	flow_hw_cleanup_tx_repr_tagging(dev);
11555 	flow_hw_cleanup_ctrl_rx_tables(dev);
11556 	flow_hw_action_template_drop_release(dev);
11557 	grp = LIST_FIRST(&priv->flow_hw_grp);
11558 	while (grp) {
11559 		temp_grp = LIST_NEXT(grp, next);
11560 		claim_zero(flow_hw_group_unset_miss_group(dev, grp, NULL));
11561 		grp = temp_grp;
11562 	}
11563 	tbl = LIST_FIRST(&priv->flow_hw_tbl_ongo);
11564 	while (tbl) {
11565 		temp_tbl = LIST_NEXT(tbl, next);
11566 		claim_zero(flow_hw_table_destroy(dev, tbl, NULL));
11567 		tbl = temp_tbl;
11568 	}
11569 	tbl = LIST_FIRST(&priv->flow_hw_tbl);
11570 	while (tbl) {
11571 		temp_tbl = LIST_NEXT(tbl, next);
11572 		claim_zero(flow_hw_table_destroy(dev, tbl, NULL));
11573 		tbl = temp_tbl;
11574 	}
11575 	it = LIST_FIRST(&priv->flow_hw_itt);
11576 	while (it) {
11577 		temp_it = LIST_NEXT(it, next);
11578 		claim_zero(flow_hw_pattern_template_destroy(dev, it, NULL));
11579 		it = temp_it;
11580 	}
11581 	at = LIST_FIRST(&priv->flow_hw_at);
11582 	while (at) {
11583 		temp_at = LIST_NEXT(at, next);
11584 		claim_zero(flow_hw_actions_template_destroy(dev, at, NULL));
11585 		at = temp_at;
11586 	}
11587 	for (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) {
11588 		if (priv->hw_drop[i])
11589 			mlx5dr_action_destroy(priv->hw_drop[i]);
11590 		if (priv->hw_tag[i])
11591 			mlx5dr_action_destroy(priv->hw_tag[i]);
11592 	}
11593 	if (priv->hw_def_miss)
11594 		mlx5dr_action_destroy(priv->hw_def_miss);
11595 	flow_hw_destroy_nat64_actions(priv);
11596 	flow_hw_destroy_vlan(dev);
11597 	flow_hw_destroy_send_to_kernel_action(priv);
11598 	flow_hw_free_vport_actions(priv);
11599 	if (priv->acts_ipool) {
11600 		mlx5_ipool_destroy(priv->acts_ipool);
11601 		priv->acts_ipool = NULL;
11602 	}
11603 	if (priv->hws_age_req)
11604 		mlx5_hws_age_pool_destroy(priv);
11605 	if (!priv->shared_host && priv->hws_cpool) {
11606 		mlx5_hws_cnt_pool_destroy(priv->sh, priv->hws_cpool);
11607 		priv->hws_cpool = NULL;
11608 	}
11609 	if (priv->hws_ctpool) {
11610 		flow_hw_ct_pool_destroy(dev, priv->hws_ctpool);
11611 		priv->hws_ctpool = NULL;
11612 	}
11613 	if (priv->ct_mng) {
11614 		flow_hw_ct_mng_destroy(dev, priv->ct_mng);
11615 		priv->ct_mng = NULL;
11616 	}
11617 	mlx5_flow_quota_destroy(dev);
11618 	if (priv->hw_q) {
11619 		for (i = 0; i < priv->nb_queue; i++) {
11620 			struct mlx5_hw_q *hwq = &priv->hw_q[i];
11621 			rte_ring_free(hwq->indir_iq);
11622 			rte_ring_free(hwq->indir_cq);
11623 			rte_ring_free(hwq->flow_transfer_pending);
11624 			rte_ring_free(hwq->flow_transfer_completed);
11625 		}
11626 		mlx5_free(priv->hw_q);
11627 		priv->hw_q = NULL;
11628 	}
11629 	if (ctx_close) {
11630 		if (priv->dr_ctx) {
11631 			claim_zero(mlx5dr_context_close(priv->dr_ctx));
11632 			priv->dr_ctx = NULL;
11633 		}
11634 	}
11635 	if (priv->shared_host) {
11636 		struct mlx5_priv *host_priv = priv->shared_host->data->dev_private;
11637 		rte_atomic_fetch_sub_explicit(&host_priv->shared_refcnt, 1,
11638 				rte_memory_order_relaxed);
11639 		priv->shared_host = NULL;
11640 	}
11641 	if (priv->hw_attr) {
11642 		mlx5_free(priv->hw_attr);
11643 		priv->hw_attr = NULL;
11644 	}
11645 	priv->nb_queue = 0;
11646 }
11647 
11648 static __rte_always_inline struct rte_ring *
11649 mlx5_hwq_ring_create(uint16_t port_id, uint32_t queue, uint32_t size, const char *str)
11650 {
11651 	char mz_name[RTE_MEMZONE_NAMESIZE];
11652 
11653 	snprintf(mz_name, sizeof(mz_name), "port_%u_%s_%u", port_id, str, queue);
11654 	return rte_ring_create(mz_name, size, SOCKET_ID_ANY,
11655 			       RING_F_SP_ENQ | RING_F_SC_DEQ | RING_F_EXACT_SZ);
11656 }
11657 
11658 static int
11659 flow_hw_validate_attributes(const struct rte_flow_port_attr *port_attr,
11660 			    uint16_t nb_queue,
11661 			    const struct rte_flow_queue_attr *queue_attr[],
11662 			    bool nt_mode, struct rte_flow_error *error)
11663 {
11664 	uint32_t size;
11665 	unsigned int i;
11666 
11667 	if (port_attr == NULL)
11668 		return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11669 					  "Port attributes must be non-NULL");
11670 
11671 	if (nb_queue == 0 && !nt_mode)
11672 		return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11673 					  "At least one flow queue is required");
11674 
11675 	if (queue_attr == NULL)
11676 		return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11677 					  "Queue attributes must be non-NULL");
11678 
11679 	size = queue_attr[0]->size;
11680 	for (i = 1; i < nb_queue; ++i) {
11681 		if (queue_attr[i]->size != size)
11682 			return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11683 						  NULL,
11684 						  "All flow queues must have the same size");
11685 	}
11686 
11687 	return 0;
11688 }
11689 
11690 /**
11691  * Configure port HWS resources.
11692  *
11693  * @param[in] dev
11694  *   Pointer to the rte_eth_dev structure.
11695  * @param[in] port_attr
11696  *   Port configuration attributes.
11697  * @param[in] nb_queue
11698  *   Number of queue.
11699  * @param[in] queue_attr
11700  *   Array that holds attributes for each flow queue.
11701  * @param[in] nt_mode
11702  *   Non-template mode.
11703  * @param[out] error
11704  *   Pointer to error structure.
11705  *
11706  * @return
11707  *   0 on success, a negative errno value otherwise and rte_errno is set.
11708  */
11709 static int
11710 __flow_hw_configure(struct rte_eth_dev *dev,
11711 		  const struct rte_flow_port_attr *port_attr,
11712 		  uint16_t nb_queue,
11713 		  const struct rte_flow_queue_attr *queue_attr[],
11714 		  bool nt_mode,
11715 		  struct rte_flow_error *error)
11716 {
11717 	struct mlx5_priv *priv = dev->data->dev_private;
11718 	struct mlx5_priv *host_priv = NULL;
11719 	struct mlx5dr_context_attr dr_ctx_attr = {0};
11720 	struct mlx5_hw_q *hw_q;
11721 	struct mlx5_hw_q_job *job = NULL;
11722 	uint32_t mem_size, i, j;
11723 	struct mlx5_indexed_pool_config cfg = {
11724 		.size = sizeof(struct mlx5_action_construct_data),
11725 		.trunk_size = 4096,
11726 		.need_lock = 1,
11727 		.release_mem_en = !!priv->sh->config.reclaim_mode,
11728 		.malloc = mlx5_malloc,
11729 		.free = mlx5_free,
11730 		.type = "mlx5_hw_action_construct_data",
11731 	};
11732 	/*
11733 	 * Adds one queue to be used by PMD.
11734 	 * The last queue will be used by the PMD.
11735 	 */
11736 	uint16_t nb_q_updated = 0;
11737 	struct rte_flow_queue_attr **_queue_attr = NULL;
11738 	struct rte_flow_queue_attr ctrl_queue_attr = {0};
11739 	bool is_proxy = !!(priv->sh->config.dv_esw_en && priv->master);
11740 	int ret = 0;
11741 	uint32_t action_flags;
11742 	bool strict_queue = false;
11743 
11744 	error->type = RTE_FLOW_ERROR_TYPE_NONE;
11745 	if (mlx5dr_rule_get_handle_size() != MLX5_DR_RULE_SIZE) {
11746 		rte_errno = EINVAL;
11747 		goto err;
11748 	}
11749 	if (flow_hw_validate_attributes(port_attr, nb_queue, queue_attr, nt_mode, error))
11750 		return -rte_errno;
11751 	/*
11752 	 * Calling rte_flow_configure() again is allowed if
11753 	 * provided configuration matches the initially provided one,
11754 	 * or previous configuration was default non template one.
11755 	 */
11756 	if (priv->dr_ctx) {
11757 		MLX5_ASSERT(priv->hw_attr != NULL);
11758 		for (i = 0; i < priv->nb_queue; i++) {
11759 			hw_q = &priv->hw_q[i];
11760 			/* Make sure all queues are empty. */
11761 			if (hw_q->size != hw_q->job_idx) {
11762 				rte_errno = EBUSY;
11763 				goto err;
11764 			}
11765 		}
11766 		/* If previous configuration was not default non template mode config. */
11767 		if (!priv->hw_attr->nt_mode) {
11768 			if (flow_hw_compare_config(priv->hw_attr, port_attr, nb_queue, queue_attr))
11769 				return 0;
11770 			else
11771 				return rte_flow_error_set(error, ENOTSUP,
11772 							RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11773 							"Changing HWS configuration attributes "
11774 							"is not supported");
11775 		}
11776 		/* Reconfiguration, need to release all resources from previous allocation. */
11777 		__flow_hw_resource_release(dev, true);
11778 	}
11779 	priv->hw_attr = flow_hw_alloc_copy_config(port_attr, nb_queue, queue_attr, nt_mode, error);
11780 	if (!priv->hw_attr) {
11781 		ret = -rte_errno;
11782 		goto err;
11783 	}
11784 	ctrl_queue_attr.size = queue_attr[0]->size;
11785 	nb_q_updated = nb_queue + 1;
11786 	_queue_attr = mlx5_malloc(MLX5_MEM_ZERO,
11787 				  nb_q_updated *
11788 				  sizeof(struct rte_flow_queue_attr *),
11789 				  64, SOCKET_ID_ANY);
11790 	if (!_queue_attr) {
11791 		rte_errno = ENOMEM;
11792 		goto err;
11793 	}
11794 
11795 	memcpy(_queue_attr, queue_attr, sizeof(void *) * nb_queue);
11796 	_queue_attr[nb_queue] = &ctrl_queue_attr;
11797 	priv->acts_ipool = mlx5_ipool_create(&cfg);
11798 	if (!priv->acts_ipool)
11799 		goto err;
11800 	/* Allocate the queue job descriptor LIFO. */
11801 	mem_size = sizeof(priv->hw_q[0]) * nb_q_updated;
11802 	for (i = 0; i < nb_q_updated; i++) {
11803 		mem_size += (sizeof(struct mlx5_hw_q_job *) +
11804 			     sizeof(struct mlx5_hw_q_job)) * _queue_attr[i]->size;
11805 	}
11806 	priv->hw_q = mlx5_malloc(MLX5_MEM_ZERO, mem_size,
11807 				 64, SOCKET_ID_ANY);
11808 	if (!priv->hw_q) {
11809 		rte_errno = ENOMEM;
11810 		goto err;
11811 	}
11812 	for (i = 0; i < nb_q_updated; i++) {
11813 		priv->hw_q[i].job_idx = _queue_attr[i]->size;
11814 		priv->hw_q[i].size = _queue_attr[i]->size;
11815 		priv->hw_q[i].ongoing_flow_ops = 0;
11816 		if (i == 0)
11817 			priv->hw_q[i].job = (struct mlx5_hw_q_job **)
11818 					    &priv->hw_q[nb_q_updated];
11819 		else
11820 			priv->hw_q[i].job = (struct mlx5_hw_q_job **)&job[_queue_attr[i - 1]->size];
11821 		job = (struct mlx5_hw_q_job *)
11822 		      &priv->hw_q[i].job[_queue_attr[i]->size];
11823 		for (j = 0; j < _queue_attr[i]->size; j++)
11824 			priv->hw_q[i].job[j] = &job[j];
11825 		/* Notice ring name length is limited. */
11826 		priv->hw_q[i].indir_cq = mlx5_hwq_ring_create
11827 			(dev->data->port_id, i, _queue_attr[i]->size, "indir_act_cq");
11828 		if (!priv->hw_q[i].indir_cq)
11829 			goto err;
11830 		priv->hw_q[i].indir_iq = mlx5_hwq_ring_create
11831 			(dev->data->port_id, i, _queue_attr[i]->size, "indir_act_iq");
11832 		if (!priv->hw_q[i].indir_iq)
11833 			goto err;
11834 		priv->hw_q[i].flow_transfer_pending = mlx5_hwq_ring_create
11835 			(dev->data->port_id, i, _queue_attr[i]->size, "tx_pending");
11836 		if (!priv->hw_q[i].flow_transfer_pending)
11837 			goto err;
11838 		priv->hw_q[i].flow_transfer_completed = mlx5_hwq_ring_create
11839 			(dev->data->port_id, i, _queue_attr[i]->size, "tx_done");
11840 		if (!priv->hw_q[i].flow_transfer_completed)
11841 			goto err;
11842 	}
11843 	dr_ctx_attr.pd = priv->sh->cdev->pd;
11844 	dr_ctx_attr.queues = nb_q_updated;
11845 	/* Assign initial value of STC numbers for representors. */
11846 	if (priv->representor)
11847 		dr_ctx_attr.initial_log_stc_memory = MLX5_REPR_STC_MEMORY_LOG;
11848 	/* Queue size should all be the same. Take the first one. */
11849 	dr_ctx_attr.queue_size = _queue_attr[0]->size;
11850 	if (port_attr->flags & RTE_FLOW_PORT_FLAG_SHARE_INDIRECT) {
11851 		struct rte_eth_dev *host_dev = NULL;
11852 		uint16_t port_id;
11853 
11854 		MLX5_ASSERT(rte_eth_dev_is_valid_port(port_attr->host_port_id));
11855 		if (is_proxy) {
11856 			DRV_LOG(ERR, "cross vHCA shared mode not supported "
11857 				"for E-Switch confgiurations");
11858 			rte_errno = ENOTSUP;
11859 			goto err;
11860 		}
11861 		MLX5_ETH_FOREACH_DEV(port_id, dev->device) {
11862 			if (port_id == port_attr->host_port_id) {
11863 				host_dev = &rte_eth_devices[port_id];
11864 				break;
11865 			}
11866 		}
11867 		if (!host_dev || host_dev == dev ||
11868 		    !host_dev->data || !host_dev->data->dev_private) {
11869 			DRV_LOG(ERR, "Invalid cross vHCA host port %u",
11870 				port_attr->host_port_id);
11871 			rte_errno = EINVAL;
11872 			goto err;
11873 		}
11874 		host_priv = host_dev->data->dev_private;
11875 		if (host_priv->sh->cdev->ctx == priv->sh->cdev->ctx) {
11876 			DRV_LOG(ERR, "Sibling ports %u and %u do not "
11877 				     "require cross vHCA sharing mode",
11878 				dev->data->port_id, port_attr->host_port_id);
11879 			rte_errno = EINVAL;
11880 			goto err;
11881 		}
11882 		if (host_priv->shared_host) {
11883 			DRV_LOG(ERR, "Host port %u is not the sharing base",
11884 				port_attr->host_port_id);
11885 			rte_errno = EINVAL;
11886 			goto err;
11887 		}
11888 		if (port_attr->nb_counters ||
11889 		    port_attr->nb_aging_objects ||
11890 		    port_attr->nb_meters ||
11891 		    port_attr->nb_conn_tracks) {
11892 			DRV_LOG(ERR,
11893 				"Object numbers on guest port must be zeros");
11894 			rte_errno = EINVAL;
11895 			goto err;
11896 		}
11897 		dr_ctx_attr.shared_ibv_ctx = host_priv->sh->cdev->ctx;
11898 		priv->shared_host = host_dev;
11899 		rte_atomic_fetch_add_explicit(&host_priv->shared_refcnt, 1,
11900 				rte_memory_order_relaxed);
11901 	}
11902 	/* Set backward compatibale mode to support non template RTE FLOW API.*/
11903 	dr_ctx_attr.bwc = true;
11904 	priv->dr_ctx = mlx5dr_context_open(priv->sh->cdev->ctx, &dr_ctx_attr);
11905 	/* rte_errno has been updated by HWS layer. */
11906 	if (!priv->dr_ctx)
11907 		goto err;
11908 	priv->nb_queue = nb_q_updated;
11909 	ret = flow_hw_action_template_drop_init(dev, error);
11910 	if (ret)
11911 		goto err;
11912 	ret = flow_hw_create_ctrl_rx_tables(dev);
11913 	if (ret) {
11914 		rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11915 				   "Failed to set up Rx control flow templates");
11916 		goto err;
11917 	}
11918 	/* Initialize quotas */
11919 	if (port_attr->nb_quotas || (host_priv && host_priv->quota_ctx.devx_obj)) {
11920 		ret = mlx5_flow_quota_init(dev, port_attr->nb_quotas);
11921 		if (ret) {
11922 			rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11923 					   "Failed to initialize quota.");
11924 			goto err;
11925 		}
11926 	}
11927 	/* Initialize meter library*/
11928 	if (port_attr->nb_meters || (host_priv && host_priv->hws_mpool))
11929 		if (mlx5_flow_meter_init(dev, port_attr->nb_meters, 0, 0, nb_q_updated))
11930 			goto err;
11931 	/* Add global actions. */
11932 	for (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) {
11933 		uint32_t act_flags = 0;
11934 
11935 		act_flags = mlx5_hw_act_flag[i][0] | mlx5_hw_act_flag[i][1];
11936 		if (is_proxy)
11937 			act_flags |= mlx5_hw_act_flag[i][2];
11938 		priv->hw_drop[i] = mlx5dr_action_create_dest_drop(priv->dr_ctx, act_flags);
11939 		if (!priv->hw_drop[i])
11940 			goto err;
11941 		priv->hw_tag[i] = mlx5dr_action_create_tag
11942 			(priv->dr_ctx, mlx5_hw_act_flag[i][0]);
11943 		if (!priv->hw_tag[i])
11944 			goto err;
11945 	}
11946 	if (priv->sh->config.dv_esw_en && priv->sh->config.repr_matching) {
11947 		ret = flow_hw_setup_tx_repr_tagging(dev, error);
11948 		if (ret)
11949 			goto err;
11950 	}
11951 	/*
11952 	 * DEFAULT_MISS action have different behaviors in different domains.
11953 	 * In FDB, it will steering the packets to the E-switch manager.
11954 	 * In NIC Rx root, it will steering the packet to the kernel driver stack.
11955 	 * An action with all bits set in the flag can be created and the HWS
11956 	 * layer will translate it properly when being used in different rules.
11957 	 */
11958 	action_flags = MLX5DR_ACTION_FLAG_ROOT_RX | MLX5DR_ACTION_FLAG_HWS_RX |
11959 		       MLX5DR_ACTION_FLAG_ROOT_TX | MLX5DR_ACTION_FLAG_HWS_TX;
11960 	if (is_proxy)
11961 		action_flags |= (MLX5DR_ACTION_FLAG_ROOT_FDB | MLX5DR_ACTION_FLAG_HWS_FDB);
11962 	priv->hw_def_miss = mlx5dr_action_create_default_miss(priv->dr_ctx, action_flags);
11963 	if (!priv->hw_def_miss)
11964 		goto err;
11965 	if (is_proxy) {
11966 		ret = flow_hw_create_vport_actions(priv);
11967 		if (ret) {
11968 			rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11969 					   NULL, "Failed to create vport actions.");
11970 			goto err;
11971 		}
11972 		ret = flow_hw_create_ctrl_tables(dev, error);
11973 		if (ret) {
11974 			rte_errno = -ret;
11975 			goto err;
11976 		}
11977 	}
11978 	if (!priv->shared_host)
11979 		flow_hw_create_send_to_kernel_actions(priv);
11980 	if (port_attr->nb_conn_tracks || (host_priv && host_priv->hws_ctpool)) {
11981 		if (mlx5_flow_ct_init(dev, port_attr->nb_conn_tracks, nb_q_updated))
11982 			goto err;
11983 	}
11984 	if (port_attr->nb_counters || (host_priv && host_priv->hws_cpool)) {
11985 		struct mlx5_hws_cnt_pool *hws_cpool = host_priv ? host_priv->hws_cpool : NULL;
11986 
11987 		ret = mlx5_hws_cnt_pool_create(dev, port_attr->nb_counters,
11988 					       nb_queue, hws_cpool, error);
11989 		if (ret)
11990 			goto err;
11991 	}
11992 	if (port_attr->nb_aging_objects) {
11993 		if (port_attr->nb_counters == 0) {
11994 			/*
11995 			 * Aging management uses counter. Number counters
11996 			 * requesting should take into account a counter for
11997 			 * each flow rules containing AGE without counter.
11998 			 */
11999 			DRV_LOG(ERR, "Port %u AGE objects are requested (%u) "
12000 				"without counters requesting.",
12001 				dev->data->port_id,
12002 				port_attr->nb_aging_objects);
12003 			rte_errno = EINVAL;
12004 			goto err;
12005 		}
12006 		if (port_attr->flags & RTE_FLOW_PORT_FLAG_SHARE_INDIRECT) {
12007 			DRV_LOG(ERR, "Aging is not supported "
12008 				"in cross vHCA sharing mode");
12009 			ret = -ENOTSUP;
12010 			goto err;
12011 		}
12012 		strict_queue = !!(port_attr->flags & RTE_FLOW_PORT_FLAG_STRICT_QUEUE);
12013 		ret = mlx5_hws_age_pool_init(dev, port_attr->nb_aging_objects,
12014 						nb_queue, strict_queue);
12015 		if (ret < 0)
12016 			goto err;
12017 	}
12018 	ret = flow_hw_create_vlan(dev);
12019 	if (ret) {
12020 		rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12021 				   NULL, "Failed to VLAN actions.");
12022 		goto err;
12023 	}
12024 	if (flow_hw_create_nat64_actions(priv, error))
12025 		DRV_LOG(WARNING, "Cannot create NAT64 action on port %u, "
12026 			"please check the FW version", dev->data->port_id);
12027 	if (_queue_attr)
12028 		mlx5_free(_queue_attr);
12029 	if (port_attr->flags & RTE_FLOW_PORT_FLAG_STRICT_QUEUE)
12030 		priv->hws_strict_queue = 1;
12031 	dev->flow_fp_ops = &mlx5_flow_hw_fp_ops;
12032 	return 0;
12033 err:
12034 	__flow_hw_resource_release(dev, true);
12035 	if (_queue_attr)
12036 		mlx5_free(_queue_attr);
12037 	/* Do not overwrite the internal errno information. */
12038 	if (ret && error->type != RTE_FLOW_ERROR_TYPE_NONE)
12039 		return ret;
12040 	return rte_flow_error_set(error, rte_errno,
12041 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
12042 				  "fail to configure port");
12043 }
12044 
12045 /**
12046  * Configure port HWS resources.
12047  *
12048  * @param[in] dev
12049  *   Pointer to the rte_eth_dev structure.
12050  * @param[in] port_attr
12051  *   Port configuration attributes.
12052  * @param[in] nb_queue
12053  *   Number of queue.
12054  * @param[in] queue_attr
12055  *   Array that holds attributes for each flow queue.
12056  * @param[out] error
12057  *   Pointer to error structure.
12058  *
12059  * @return
12060  *   0 on success, a negative errno value otherwise and rte_errno is set.
12061  */
12062 static int
12063 flow_hw_configure(struct rte_eth_dev *dev,
12064 		  const struct rte_flow_port_attr *port_attr,
12065 		  uint16_t nb_queue,
12066 		  const struct rte_flow_queue_attr *queue_attr[],
12067 		  struct rte_flow_error *error)
12068 {
12069 	struct rte_flow_error shadow_error = {0, };
12070 
12071 	if (!error)
12072 		error = &shadow_error;
12073 	return __flow_hw_configure(dev, port_attr, nb_queue, queue_attr, false, error);
12074 }
12075 
12076 /**
12077  * Release HWS resources.
12078  *
12079  * @param[in] dev
12080  *   Pointer to the rte_eth_dev structure.
12081  */
12082 void
12083 flow_hw_resource_release(struct rte_eth_dev *dev)
12084 {
12085 	struct mlx5_priv *priv = dev->data->dev_private;
12086 
12087 	if (!priv->dr_ctx)
12088 		return;
12089 	__flow_hw_resource_release(dev, false);
12090 }
12091 
12092 /* Sets vport tag and mask, for given port, used in HWS rules. */
12093 void
12094 flow_hw_set_port_info(struct rte_eth_dev *dev)
12095 {
12096 	struct mlx5_priv *priv = dev->data->dev_private;
12097 	uint16_t port_id = dev->data->port_id;
12098 	struct flow_hw_port_info *info;
12099 
12100 	MLX5_ASSERT(port_id < RTE_MAX_ETHPORTS);
12101 	info = &mlx5_flow_hw_port_infos[port_id];
12102 	info->regc_mask = priv->vport_meta_mask;
12103 	info->regc_value = priv->vport_meta_tag;
12104 	info->is_wire = mlx5_is_port_on_mpesw_device(priv) ? priv->mpesw_uplink : priv->master;
12105 }
12106 
12107 /* Clears vport tag and mask used for HWS rules. */
12108 void
12109 flow_hw_clear_port_info(struct rte_eth_dev *dev)
12110 {
12111 	uint16_t port_id = dev->data->port_id;
12112 	struct flow_hw_port_info *info;
12113 
12114 	MLX5_ASSERT(port_id < RTE_MAX_ETHPORTS);
12115 	info = &mlx5_flow_hw_port_infos[port_id];
12116 	info->regc_mask = 0;
12117 	info->regc_value = 0;
12118 	info->is_wire = 0;
12119 }
12120 
12121 static int
12122 flow_hw_conntrack_destroy(struct rte_eth_dev *dev,
12123 			  uint32_t idx,
12124 			  struct rte_flow_error *error)
12125 {
12126 	struct mlx5_priv *priv = dev->data->dev_private;
12127 	struct mlx5_aso_ct_pool *pool = priv->hws_ctpool;
12128 	struct mlx5_aso_ct_action *ct;
12129 
12130 	if (priv->shared_host)
12131 		return rte_flow_error_set(error, ENOTSUP,
12132 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12133 				NULL,
12134 				"CT destruction is not allowed to guest port");
12135 	ct = mlx5_ipool_get(pool->cts, idx);
12136 	if (!ct) {
12137 		return rte_flow_error_set(error, EINVAL,
12138 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12139 				NULL,
12140 				"Invalid CT destruction index");
12141 	}
12142 	rte_atomic_store_explicit(&ct->state, ASO_CONNTRACK_FREE,
12143 				 rte_memory_order_relaxed);
12144 	mlx5_ipool_free(pool->cts, idx);
12145 	return 0;
12146 }
12147 
12148 static int
12149 flow_hw_conntrack_query(struct rte_eth_dev *dev, uint32_t queue, uint32_t idx,
12150 			struct rte_flow_action_conntrack *profile,
12151 			void *user_data, bool push,
12152 			struct rte_flow_error *error)
12153 {
12154 	struct mlx5_priv *priv = dev->data->dev_private;
12155 	struct mlx5_aso_ct_pool *pool = priv->hws_ctpool;
12156 	struct mlx5_aso_ct_action *ct;
12157 
12158 	if (priv->shared_host)
12159 		return rte_flow_error_set(error, ENOTSUP,
12160 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12161 				NULL,
12162 				"CT query is not allowed to guest port");
12163 	ct = mlx5_ipool_get(pool->cts, idx);
12164 	if (!ct) {
12165 		return rte_flow_error_set(error, EINVAL,
12166 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12167 				NULL,
12168 				"Invalid CT query index");
12169 	}
12170 	profile->peer_port = ct->peer;
12171 	profile->is_original_dir = ct->is_original;
12172 	if (mlx5_aso_ct_query_by_wqe(priv->sh, queue, ct, profile, user_data, push))
12173 		return rte_flow_error_set(error, EIO,
12174 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12175 				NULL,
12176 				"Failed to query CT context");
12177 	return 0;
12178 }
12179 
12180 
12181 static int
12182 flow_hw_conntrack_update(struct rte_eth_dev *dev, uint32_t queue,
12183 			 const struct rte_flow_modify_conntrack *action_conf,
12184 			 uint32_t idx, void *user_data, bool push,
12185 			 struct rte_flow_error *error)
12186 {
12187 	struct mlx5_priv *priv = dev->data->dev_private;
12188 	struct mlx5_aso_ct_pool *pool = priv->hws_ctpool;
12189 	struct mlx5_aso_ct_action *ct;
12190 	const struct rte_flow_action_conntrack *new_prf;
12191 	int ret = 0;
12192 
12193 	if (priv->shared_host)
12194 		return rte_flow_error_set(error, ENOTSUP,
12195 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12196 				NULL,
12197 				"CT update is not allowed to guest port");
12198 	ct = mlx5_ipool_get(pool->cts, idx);
12199 	if (!ct) {
12200 		return rte_flow_error_set(error, EINVAL,
12201 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12202 				NULL,
12203 				"Invalid CT update index");
12204 	}
12205 	new_prf = &action_conf->new_ct;
12206 	if (action_conf->direction)
12207 		ct->is_original = !!new_prf->is_original_dir;
12208 	if (action_conf->state) {
12209 		/* Only validate the profile when it needs to be updated. */
12210 		ret = mlx5_validate_action_ct(dev, new_prf, error);
12211 		if (ret)
12212 			return ret;
12213 		ret = mlx5_aso_ct_update_by_wqe(priv->sh, queue, ct, new_prf,
12214 						user_data, push);
12215 		if (ret)
12216 			return rte_flow_error_set(error, EIO,
12217 					RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12218 					NULL,
12219 					"Failed to send CT context update WQE");
12220 		if (queue != MLX5_HW_INV_QUEUE)
12221 			return 0;
12222 		/* Block until ready or a failure in synchronous mode. */
12223 		ret = mlx5_aso_ct_available(priv->sh, queue, ct);
12224 		if (ret)
12225 			rte_flow_error_set(error, rte_errno,
12226 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12227 					   NULL,
12228 					   "Timeout to get the CT update");
12229 	}
12230 	return ret;
12231 }
12232 
12233 static struct rte_flow_action_handle *
12234 flow_hw_conntrack_create(struct rte_eth_dev *dev, uint32_t queue,
12235 			 const struct rte_flow_action_conntrack *pro,
12236 			 void *user_data, bool push,
12237 			 struct rte_flow_error *error)
12238 {
12239 	struct mlx5_priv *priv = dev->data->dev_private;
12240 	struct mlx5_aso_ct_pool *pool = priv->hws_ctpool;
12241 	struct mlx5_aso_ct_action *ct;
12242 	uint32_t ct_idx = 0;
12243 	int ret;
12244 	bool async = !!(queue != MLX5_HW_INV_QUEUE);
12245 
12246 	if (priv->shared_host) {
12247 		rte_flow_error_set(error, ENOTSUP,
12248 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12249 				NULL,
12250 				"CT create is not allowed to guest port");
12251 		return NULL;
12252 	}
12253 	if (!pool) {
12254 		rte_flow_error_set(error, EINVAL,
12255 				   RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12256 				   "CT is not enabled");
12257 		return 0;
12258 	}
12259 	ct = mlx5_ipool_zmalloc(pool->cts, &ct_idx);
12260 	if (!ct) {
12261 		rte_flow_error_set(error, rte_errno,
12262 				   RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12263 				   "Failed to allocate CT object");
12264 		return 0;
12265 	}
12266 	ct->offset = ct_idx - 1;
12267 	ct->is_original = !!pro->is_original_dir;
12268 	ct->peer = pro->peer_port;
12269 	ct->pool = pool;
12270 	if (mlx5_aso_ct_update_by_wqe(priv->sh, queue, ct, pro, user_data, push)) {
12271 		mlx5_ipool_free(pool->cts, ct_idx);
12272 		rte_flow_error_set(error, EBUSY,
12273 				   RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12274 				   "Failed to update CT");
12275 		return 0;
12276 	}
12277 	if (!async) {
12278 		ret = mlx5_aso_ct_available(priv->sh, queue, ct);
12279 		if (ret) {
12280 			mlx5_ipool_free(pool->cts, ct_idx);
12281 			rte_flow_error_set(error, rte_errno,
12282 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12283 					   NULL,
12284 					   "Timeout to get the CT update");
12285 			return 0;
12286 		}
12287 	}
12288 	return MLX5_INDIRECT_ACT_HWS_CT_GEN_IDX(ct_idx);
12289 }
12290 
12291 /**
12292  * Validate shared action.
12293  *
12294  * @param[in] dev
12295  *   Pointer to the rte_eth_dev structure.
12296  * @param[in] queue
12297  *   Which queue to be used.
12298  * @param[in] attr
12299  *   Operation attribute.
12300  * @param[in] conf
12301  *   Indirect action configuration.
12302  * @param[in] action
12303  *   rte_flow action detail.
12304  * @param[in] user_data
12305  *   Pointer to the user_data.
12306  * @param[out] error
12307  *   Pointer to error structure.
12308  *
12309  * @return
12310  *   0 on success, otherwise negative errno value.
12311  */
12312 static int
12313 flow_hw_action_handle_validate(struct rte_eth_dev *dev, uint32_t queue,
12314 			       const struct rte_flow_op_attr *attr,
12315 			       const struct rte_flow_indir_action_conf *conf,
12316 			       const struct rte_flow_action *action,
12317 			       void *user_data,
12318 			       struct rte_flow_error *error)
12319 {
12320 	struct mlx5_priv *priv = dev->data->dev_private;
12321 
12322 	RTE_SET_USED(attr);
12323 	RTE_SET_USED(queue);
12324 	RTE_SET_USED(user_data);
12325 	switch (action->type) {
12326 	case RTE_FLOW_ACTION_TYPE_AGE:
12327 		if (!priv->hws_age_req) {
12328 			if (flow_hw_allocate_actions(dev, MLX5_FLOW_ACTION_AGE,
12329 						     error))
12330 				return rte_flow_error_set
12331 					(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
12332 					 NULL, "aging pool not initialized");
12333 		}
12334 		break;
12335 	case RTE_FLOW_ACTION_TYPE_COUNT:
12336 		if (!priv->hws_cpool) {
12337 			if (flow_hw_allocate_actions(dev, MLX5_FLOW_ACTION_COUNT,
12338 						     error))
12339 				return rte_flow_error_set
12340 					(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
12341 					 NULL, "counters pool not initialized");
12342 		}
12343 		break;
12344 	case RTE_FLOW_ACTION_TYPE_CONNTRACK:
12345 		if (priv->hws_ctpool == NULL) {
12346 			if (flow_hw_allocate_actions(dev, MLX5_FLOW_ACTION_CT,
12347 						     error))
12348 				return rte_flow_error_set
12349 					(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
12350 					 NULL, "CT pool not initialized");
12351 		}
12352 		return mlx5_validate_action_ct(dev, action->conf, error);
12353 	case RTE_FLOW_ACTION_TYPE_METER_MARK:
12354 		return flow_hw_validate_action_meter_mark(dev, action, true, error);
12355 	case RTE_FLOW_ACTION_TYPE_RSS:
12356 		return flow_dv_action_validate(dev, conf, action, error);
12357 	case RTE_FLOW_ACTION_TYPE_QUOTA:
12358 		return 0;
12359 	default:
12360 		return rte_flow_error_set(error, ENOTSUP,
12361 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12362 					  "action type not supported");
12363 	}
12364 	return 0;
12365 }
12366 
12367 static __rte_always_inline bool
12368 flow_hw_action_push(const struct rte_flow_op_attr *attr)
12369 {
12370 	return attr ? !attr->postpone : true;
12371 }
12372 
12373 static __rte_always_inline struct mlx5_hw_q_job *
12374 flow_hw_action_job_init(struct mlx5_priv *priv, uint32_t queue,
12375 			const struct rte_flow_action_handle *handle,
12376 			void *user_data, void *query_data,
12377 			enum mlx5_hw_job_type type,
12378 			enum mlx5_hw_indirect_type indirect_type,
12379 			struct rte_flow_error *error)
12380 {
12381 	struct mlx5_hw_q_job *job;
12382 
12383 	if (queue == MLX5_HW_INV_QUEUE)
12384 		queue = CTRL_QUEUE_ID(priv);
12385 	job = flow_hw_job_get(priv, queue);
12386 	if (!job) {
12387 		rte_flow_error_set(error, ENOMEM,
12388 				   RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
12389 				   "Action destroy failed due to queue full.");
12390 		return NULL;
12391 	}
12392 	job->type = type;
12393 	job->action = handle;
12394 	job->user_data = user_data;
12395 	job->query.user = query_data;
12396 	job->indirect_type = indirect_type;
12397 	return job;
12398 }
12399 
12400 struct mlx5_hw_q_job *
12401 mlx5_flow_action_job_init(struct mlx5_priv *priv, uint32_t queue,
12402 			  const struct rte_flow_action_handle *handle,
12403 			  void *user_data, void *query_data,
12404 			  enum mlx5_hw_job_type type,
12405 			  struct rte_flow_error *error)
12406 {
12407 	return flow_hw_action_job_init(priv, queue, handle, user_data, query_data,
12408 				       type, MLX5_HW_INDIRECT_TYPE_LEGACY, error);
12409 }
12410 
12411 static __rte_always_inline void
12412 flow_hw_action_finalize(struct rte_eth_dev *dev, uint32_t queue,
12413 			struct mlx5_hw_q_job *job,
12414 			bool push, bool aso, bool status)
12415 {
12416 	struct mlx5_priv *priv = dev->data->dev_private;
12417 
12418 	if (queue == MLX5_HW_INV_QUEUE)
12419 		queue = CTRL_QUEUE_ID(priv);
12420 	if (likely(status)) {
12421 		/* 1. add new job to a queue */
12422 		if (!aso)
12423 			rte_ring_enqueue(push ?
12424 					 priv->hw_q[queue].indir_cq :
12425 					 priv->hw_q[queue].indir_iq,
12426 					 job);
12427 		/* 2. send pending jobs */
12428 		if (push)
12429 			__flow_hw_push_action(dev, queue);
12430 	} else {
12431 		flow_hw_job_put(priv, job, queue);
12432 	}
12433 }
12434 
12435 /**
12436  * Create shared action.
12437  *
12438  * @param[in] dev
12439  *   Pointer to the rte_eth_dev structure.
12440  * @param[in] queue
12441  *   Which queue to be used.
12442  * @param[in] attr
12443  *   Operation attribute.
12444  * @param[in] conf
12445  *   Indirect action configuration.
12446  * @param[in] action
12447  *   rte_flow action detail.
12448  * @param[in] user_data
12449  *   Pointer to the user_data.
12450  * @param[out] error
12451  *   Pointer to error structure.
12452  *
12453  * @return
12454  *   Action handle on success, NULL otherwise and rte_errno is set.
12455  */
12456 static struct rte_flow_action_handle *
12457 flow_hw_action_handle_create(struct rte_eth_dev *dev, uint32_t queue,
12458 			     const struct rte_flow_op_attr *attr,
12459 			     const struct rte_flow_indir_action_conf *conf,
12460 			     const struct rte_flow_action *action,
12461 			     void *user_data,
12462 			     struct rte_flow_error *error)
12463 {
12464 	struct rte_flow_action_handle *handle = NULL;
12465 	struct mlx5_hw_q_job *job = NULL;
12466 	struct mlx5_priv *priv = dev->data->dev_private;
12467 	const struct rte_flow_action_age *age;
12468 	struct mlx5_aso_mtr *aso_mtr;
12469 	cnt_id_t cnt_id;
12470 	uint32_t age_idx;
12471 	bool push = flow_hw_action_push(attr);
12472 	bool aso = false;
12473 	bool force_job = action->type == RTE_FLOW_ACTION_TYPE_METER_MARK;
12474 
12475 	if (!mlx5_hw_ctx_validate(dev, error))
12476 		return NULL;
12477 	if (attr || force_job) {
12478 		job = flow_hw_action_job_init(priv, queue, NULL, user_data,
12479 					      NULL, MLX5_HW_Q_JOB_TYPE_CREATE,
12480 					      MLX5_HW_INDIRECT_TYPE_LEGACY, error);
12481 		if (!job)
12482 			return NULL;
12483 	}
12484 	switch (action->type) {
12485 	case RTE_FLOW_ACTION_TYPE_AGE:
12486 		if (priv->hws_strict_queue) {
12487 			struct mlx5_age_info *info = GET_PORT_AGE_INFO(priv);
12488 
12489 			if (queue >= info->hw_q_age->nb_rings) {
12490 				rte_flow_error_set(error, EINVAL,
12491 						   RTE_FLOW_ERROR_TYPE_ACTION,
12492 						   NULL,
12493 						   "Invalid queue ID for indirect AGE.");
12494 				rte_errno = EINVAL;
12495 				return NULL;
12496 			}
12497 		}
12498 		age = action->conf;
12499 		age_idx = mlx5_hws_age_action_create(priv, queue, true, age,
12500 						     0, error);
12501 		if (age_idx == 0) {
12502 			rte_flow_error_set(error, ENODEV,
12503 					   RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12504 					   "AGE are not configured!");
12505 		} else {
12506 			age_idx = (MLX5_INDIRECT_ACTION_TYPE_AGE <<
12507 				   MLX5_INDIRECT_ACTION_TYPE_OFFSET) | age_idx;
12508 			handle =
12509 			    (struct rte_flow_action_handle *)(uintptr_t)age_idx;
12510 		}
12511 		break;
12512 	case RTE_FLOW_ACTION_TYPE_COUNT:
12513 		if (mlx5_hws_cnt_shared_get(priv->hws_cpool, &cnt_id, 0))
12514 			rte_flow_error_set(error, ENODEV,
12515 					RTE_FLOW_ERROR_TYPE_ACTION,
12516 					NULL,
12517 					"counter are not configured!");
12518 		else
12519 			handle = (struct rte_flow_action_handle *)
12520 				 (uintptr_t)cnt_id;
12521 		break;
12522 	case RTE_FLOW_ACTION_TYPE_CONNTRACK:
12523 		aso = true;
12524 		handle = flow_hw_conntrack_create(dev, queue, action->conf, job,
12525 						  push, error);
12526 		break;
12527 	case RTE_FLOW_ACTION_TYPE_METER_MARK:
12528 		aso = true;
12529 		aso_mtr = flow_hw_meter_mark_alloc(dev, queue, action, job, push, error);
12530 		if (!aso_mtr)
12531 			break;
12532 		handle = (void *)(uintptr_t)job->action;
12533 		break;
12534 	case RTE_FLOW_ACTION_TYPE_RSS:
12535 		handle = flow_dv_action_create(dev, conf, action, error);
12536 		break;
12537 	case RTE_FLOW_ACTION_TYPE_QUOTA:
12538 		aso = true;
12539 		handle = mlx5_quota_alloc(dev, queue, action->conf,
12540 					  job, push, error);
12541 		break;
12542 	default:
12543 		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
12544 				   NULL, "action type not supported");
12545 		break;
12546 	}
12547 	if (job && !force_job) {
12548 		job->action = handle;
12549 		flow_hw_action_finalize(dev, queue, job, push, aso,
12550 					handle != NULL);
12551 	}
12552 	return handle;
12553 }
12554 
12555 static int
12556 mlx5_flow_update_meter_mark(struct rte_eth_dev *dev, uint32_t queue,
12557 			    const struct rte_flow_update_meter_mark *upd_meter_mark,
12558 			    uint32_t idx, bool push,
12559 			    struct mlx5_hw_q_job *job, struct rte_flow_error *error)
12560 {
12561 	struct mlx5_priv *priv = dev->data->dev_private;
12562 	struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
12563 	const struct rte_flow_action_meter_mark *meter_mark = &upd_meter_mark->meter_mark;
12564 	struct mlx5_aso_mtr *aso_mtr = mlx5_ipool_get(pool->idx_pool, idx);
12565 	struct mlx5_flow_meter_info *fm;
12566 
12567 	if (!aso_mtr)
12568 		return rte_flow_error_set(error, EINVAL,
12569 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12570 					  NULL, "Invalid meter_mark update index");
12571 	fm = &aso_mtr->fm;
12572 	if (upd_meter_mark->profile_valid)
12573 		fm->profile = (struct mlx5_flow_meter_profile *)
12574 			(meter_mark->profile);
12575 	if (upd_meter_mark->color_mode_valid)
12576 		fm->color_aware = meter_mark->color_mode;
12577 	if (upd_meter_mark->state_valid)
12578 		fm->is_enable = meter_mark->state;
12579 	aso_mtr->state = (queue == MLX5_HW_INV_QUEUE) ?
12580 			 ASO_METER_WAIT : ASO_METER_WAIT_ASYNC;
12581 	/* Update ASO flow meter by wqe. */
12582 	if (mlx5_aso_meter_update_by_wqe(priv, queue,
12583 					 aso_mtr, &priv->mtr_bulk, job, push))
12584 		return rte_flow_error_set(error, EINVAL,
12585 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12586 					  NULL, "Unable to update ASO meter WQE");
12587 	/* Wait for ASO object completion. */
12588 	if (queue == MLX5_HW_INV_QUEUE &&
12589 	    mlx5_aso_mtr_wait(priv, aso_mtr, true))
12590 		return rte_flow_error_set(error, EINVAL,
12591 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12592 					  NULL, "Unable to wait for ASO meter CQE");
12593 	return 0;
12594 }
12595 
12596 /**
12597  * Update shared action.
12598  *
12599  * @param[in] dev
12600  *   Pointer to the rte_eth_dev structure.
12601  * @param[in] queue
12602  *   Which queue to be used.
12603  * @param[in] attr
12604  *   Operation attribute.
12605  * @param[in] handle
12606  *   Action handle to be updated.
12607  * @param[in] update
12608  *   Update value.
12609  * @param[in] user_data
12610  *   Pointer to the user_data.
12611  * @param[out] error
12612  *   Pointer to error structure.
12613  *
12614  * @return
12615  *   0 on success, negative value otherwise and rte_errno is set.
12616  */
12617 static int
12618 flow_hw_action_handle_update(struct rte_eth_dev *dev, uint32_t queue,
12619 			     const struct rte_flow_op_attr *attr,
12620 			     struct rte_flow_action_handle *handle,
12621 			     const void *update,
12622 			     void *user_data,
12623 			     struct rte_flow_error *error)
12624 {
12625 	struct mlx5_priv *priv = dev->data->dev_private;
12626 	const struct rte_flow_modify_conntrack *ct_conf =
12627 		(const struct rte_flow_modify_conntrack *)update;
12628 	struct mlx5_hw_q_job *job = NULL;
12629 	uint32_t act_idx = (uint32_t)(uintptr_t)handle;
12630 	uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
12631 	uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
12632 	int ret = 0;
12633 	bool push = flow_hw_action_push(attr);
12634 	bool aso = false;
12635 	bool force_job = type == MLX5_INDIRECT_ACTION_TYPE_METER_MARK;
12636 
12637 	if (attr || force_job) {
12638 		job = flow_hw_action_job_init(priv, queue, handle, user_data,
12639 					      NULL, MLX5_HW_Q_JOB_TYPE_UPDATE,
12640 					      MLX5_HW_INDIRECT_TYPE_LEGACY, error);
12641 		if (!job)
12642 			return -rte_errno;
12643 	}
12644 	switch (type) {
12645 	case MLX5_INDIRECT_ACTION_TYPE_AGE:
12646 		ret = mlx5_hws_age_action_update(priv, idx, update, error);
12647 		break;
12648 	case MLX5_INDIRECT_ACTION_TYPE_CT:
12649 		if (ct_conf->state)
12650 			aso = true;
12651 		ret = flow_hw_conntrack_update(dev, queue, update, idx,
12652 					       job, push, error);
12653 		break;
12654 	case MLX5_INDIRECT_ACTION_TYPE_METER_MARK:
12655 		aso = true;
12656 		ret = mlx5_flow_update_meter_mark(dev, queue, update, idx, push,
12657 						  job, error);
12658 		break;
12659 	case MLX5_INDIRECT_ACTION_TYPE_RSS:
12660 		ret = flow_dv_action_update(dev, handle, update, error);
12661 		break;
12662 	case MLX5_INDIRECT_ACTION_TYPE_QUOTA:
12663 		aso = true;
12664 		ret = mlx5_quota_query_update(dev, queue, handle, update, NULL,
12665 					      job, push, error);
12666 		break;
12667 	default:
12668 		ret = -ENOTSUP;
12669 		rte_flow_error_set(error, ENOTSUP,
12670 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12671 					  "action type not supported");
12672 		break;
12673 	}
12674 	if (job && !force_job)
12675 		flow_hw_action_finalize(dev, queue, job, push, aso, ret == 0);
12676 	return ret;
12677 }
12678 
12679 /**
12680  * Destroy shared action.
12681  *
12682  * @param[in] dev
12683  *   Pointer to the rte_eth_dev structure.
12684  * @param[in] queue
12685  *   Which queue to be used.
12686  * @param[in] attr
12687  *   Operation attribute.
12688  * @param[in] handle
12689  *   Action handle to be destroyed.
12690  * @param[in] user_data
12691  *   Pointer to the user_data.
12692  * @param[out] error
12693  *   Pointer to error structure.
12694  *
12695  * @return
12696  *   0 on success, negative value otherwise and rte_errno is set.
12697  */
12698 static int
12699 flow_hw_action_handle_destroy(struct rte_eth_dev *dev, uint32_t queue,
12700 			      const struct rte_flow_op_attr *attr,
12701 			      struct rte_flow_action_handle *handle,
12702 			      void *user_data,
12703 			      struct rte_flow_error *error)
12704 {
12705 	uint32_t act_idx = (uint32_t)(uintptr_t)handle;
12706 	uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
12707 	uint32_t age_idx = act_idx & MLX5_HWS_AGE_IDX_MASK;
12708 	uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
12709 	struct mlx5_priv *priv = dev->data->dev_private;
12710 	struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
12711 	struct mlx5_hw_q_job *job = NULL;
12712 	struct mlx5_aso_mtr *aso_mtr;
12713 	struct mlx5_flow_meter_info *fm;
12714 	bool push = flow_hw_action_push(attr);
12715 	bool aso = false;
12716 	int ret = 0;
12717 	bool force_job = type == MLX5_INDIRECT_ACTION_TYPE_METER_MARK;
12718 
12719 	if (attr || force_job) {
12720 		job = flow_hw_action_job_init(priv, queue, handle, user_data,
12721 					      NULL, MLX5_HW_Q_JOB_TYPE_DESTROY,
12722 					      MLX5_HW_INDIRECT_TYPE_LEGACY, error);
12723 		if (!job)
12724 			return -rte_errno;
12725 	}
12726 	switch (type) {
12727 	case MLX5_INDIRECT_ACTION_TYPE_AGE:
12728 		ret = mlx5_hws_age_action_destroy(priv, age_idx, error);
12729 		break;
12730 	case MLX5_INDIRECT_ACTION_TYPE_COUNT:
12731 		age_idx = mlx5_hws_cnt_age_get(priv->hws_cpool, act_idx);
12732 		if (age_idx != 0)
12733 			/*
12734 			 * If this counter belongs to indirect AGE, here is the
12735 			 * time to update the AGE.
12736 			 */
12737 			mlx5_hws_age_nb_cnt_decrease(priv, age_idx);
12738 		mlx5_hws_cnt_shared_put(priv->hws_cpool, &act_idx);
12739 		break;
12740 	case MLX5_INDIRECT_ACTION_TYPE_CT:
12741 		ret = flow_hw_conntrack_destroy(dev, idx, error);
12742 		break;
12743 	case MLX5_INDIRECT_ACTION_TYPE_METER_MARK:
12744 		aso_mtr = mlx5_ipool_get(pool->idx_pool, idx);
12745 		if (!aso_mtr) {
12746 			ret = -EINVAL;
12747 			rte_flow_error_set(error, EINVAL,
12748 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12749 				NULL, "Invalid meter_mark destroy index");
12750 			break;
12751 		}
12752 		fm = &aso_mtr->fm;
12753 		fm->is_enable = 0;
12754 		/* Update ASO flow meter by wqe. */
12755 		if (mlx5_aso_meter_update_by_wqe(priv, queue, aso_mtr,
12756 						 &priv->mtr_bulk, job, push)) {
12757 			ret = -EINVAL;
12758 			rte_flow_error_set(error, EINVAL,
12759 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12760 				NULL, "Unable to update ASO meter WQE");
12761 			break;
12762 		}
12763 		/* Wait for ASO object completion. */
12764 		if (queue == MLX5_HW_INV_QUEUE &&
12765 		    mlx5_aso_mtr_wait(priv, aso_mtr, true)) {
12766 			ret = -EINVAL;
12767 			rte_flow_error_set(error, EINVAL,
12768 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12769 				NULL, "Unable to wait for ASO meter CQE");
12770 			break;
12771 		}
12772 		aso = true;
12773 		break;
12774 	case MLX5_INDIRECT_ACTION_TYPE_RSS:
12775 		ret = flow_dv_action_destroy(dev, handle, error);
12776 		break;
12777 	case MLX5_INDIRECT_ACTION_TYPE_QUOTA:
12778 		break;
12779 	default:
12780 		ret = -ENOTSUP;
12781 		rte_flow_error_set(error, ENOTSUP,
12782 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12783 					  "action type not supported");
12784 		break;
12785 	}
12786 	if (job && !force_job)
12787 		flow_hw_action_finalize(dev, queue, job, push, aso, ret == 0);
12788 	return ret;
12789 }
12790 
12791 static int
12792 flow_hw_query_counter(const struct rte_eth_dev *dev, uint32_t counter,
12793 		      void *data, struct rte_flow_error *error)
12794 {
12795 	struct mlx5_hws_cnt_pool *hpool;
12796 	struct mlx5_priv *priv = dev->data->dev_private;
12797 	struct mlx5_hws_cnt *cnt;
12798 	struct rte_flow_query_count *qc = data;
12799 	uint32_t iidx;
12800 	uint64_t pkts, bytes;
12801 
12802 	if (!mlx5_hws_cnt_id_valid(counter))
12803 		return rte_flow_error_set(error, EINVAL,
12804 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
12805 				"counter are not available");
12806 	hpool = mlx5_hws_cnt_host_pool(priv->hws_cpool);
12807 	iidx = mlx5_hws_cnt_iidx(hpool, counter);
12808 	cnt = &hpool->pool[iidx];
12809 	__hws_cnt_query_raw(priv->hws_cpool, counter, &pkts, &bytes);
12810 	qc->hits_set = 1;
12811 	qc->bytes_set = 1;
12812 	qc->hits = pkts - cnt->reset.hits;
12813 	qc->bytes = bytes - cnt->reset.bytes;
12814 	if (qc->reset) {
12815 		cnt->reset.bytes = bytes;
12816 		cnt->reset.hits = pkts;
12817 	}
12818 	return 0;
12819 }
12820 
12821 /**
12822  * Query a flow rule AGE action for aging information.
12823  *
12824  * @param[in] dev
12825  *   Pointer to Ethernet device.
12826  * @param[in] age_idx
12827  *   Index of AGE action parameter.
12828  * @param[out] data
12829  *   Data retrieved by the query.
12830  * @param[out] error
12831  *   Perform verbose error reporting if not NULL.
12832  *
12833  * @return
12834  *   0 on success, a negative errno value otherwise and rte_errno is set.
12835  */
12836 static int
12837 flow_hw_query_age(const struct rte_eth_dev *dev, uint32_t age_idx, void *data,
12838 		  struct rte_flow_error *error)
12839 {
12840 	struct mlx5_priv *priv = dev->data->dev_private;
12841 	struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);
12842 	struct mlx5_indexed_pool *ipool = age_info->ages_ipool;
12843 	struct mlx5_hws_age_param *param = mlx5_ipool_get(ipool, age_idx);
12844 	struct rte_flow_query_age *resp = data;
12845 
12846 	if (!param || !param->timeout)
12847 		return rte_flow_error_set(error, EINVAL,
12848 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12849 					  NULL, "age data not available");
12850 	switch (rte_atomic_load_explicit(&param->state, rte_memory_order_relaxed)) {
12851 	case HWS_AGE_AGED_OUT_REPORTED:
12852 	case HWS_AGE_AGED_OUT_NOT_REPORTED:
12853 		resp->aged = 1;
12854 		break;
12855 	case HWS_AGE_CANDIDATE:
12856 	case HWS_AGE_CANDIDATE_INSIDE_RING:
12857 		resp->aged = 0;
12858 		break;
12859 	case HWS_AGE_FREE:
12860 		/*
12861 		 * When state is FREE the flow itself should be invalid.
12862 		 * Fall-through.
12863 		 */
12864 	default:
12865 		MLX5_ASSERT(0);
12866 		break;
12867 	}
12868 	resp->sec_since_last_hit_valid = !resp->aged;
12869 	if (resp->sec_since_last_hit_valid)
12870 		resp->sec_since_last_hit = rte_atomic_load_explicit
12871 				 (&param->sec_since_last_hit, rte_memory_order_relaxed);
12872 	return 0;
12873 }
12874 
12875 static int
12876 flow_hw_query(struct rte_eth_dev *dev, struct rte_flow *flow,
12877 	      const struct rte_flow_action *actions, void *data,
12878 	      struct rte_flow_error *error)
12879 {
12880 	int ret = -EINVAL;
12881 	struct rte_flow_hw *hw_flow = (struct rte_flow_hw *)flow;
12882 	struct rte_flow_hw_aux *aux;
12883 
12884 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
12885 		switch (actions->type) {
12886 		case RTE_FLOW_ACTION_TYPE_VOID:
12887 			break;
12888 		case RTE_FLOW_ACTION_TYPE_COUNT:
12889 			if (!(hw_flow->flags & MLX5_FLOW_HW_FLOW_FLAG_CNT_ID))
12890 				return rte_flow_error_set(error, EINVAL,
12891 							  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
12892 							  "counter not defined in the rule");
12893 			ret = flow_hw_query_counter(dev, hw_flow->cnt_id, data,
12894 						    error);
12895 			break;
12896 		case RTE_FLOW_ACTION_TYPE_AGE:
12897 			if (!(hw_flow->flags & MLX5_FLOW_HW_FLOW_FLAG_AGE_IDX))
12898 				return rte_flow_error_set(error, EINVAL,
12899 							  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
12900 							  "age data not available");
12901 			aux = mlx5_flow_hw_aux(dev->data->port_id, hw_flow);
12902 			ret = flow_hw_query_age(dev, mlx5_flow_hw_aux_get_age_idx(hw_flow, aux),
12903 						data, error);
12904 			break;
12905 		default:
12906 			return rte_flow_error_set(error, ENOTSUP,
12907 						  RTE_FLOW_ERROR_TYPE_ACTION,
12908 						  actions,
12909 						  "action not supported");
12910 		}
12911 	}
12912 	return ret;
12913 }
12914 
12915 /**
12916  * Validate indirect action.
12917  *
12918  * @param[in] dev
12919  *   Pointer to the Ethernet device structure.
12920  * @param[in] conf
12921  *   Shared action configuration.
12922  * @param[in] action
12923  *   Action specification used to create indirect action.
12924  * @param[out] error
12925  *   Perform verbose error reporting if not NULL. Initialized in case of
12926  *   error only.
12927  *
12928  * @return
12929  *   0 on success, otherwise negative errno value.
12930  */
12931 static int
12932 flow_hw_action_validate(struct rte_eth_dev *dev,
12933 			const struct rte_flow_indir_action_conf *conf,
12934 			const struct rte_flow_action *action,
12935 			struct rte_flow_error *err)
12936 {
12937 	struct rte_flow_error shadow_error = {0, };
12938 
12939 	if (!err)
12940 		err = &shadow_error;
12941 	return flow_hw_action_handle_validate(dev, MLX5_HW_INV_QUEUE, NULL,
12942 					      conf, action, NULL, err);
12943 }
12944 
12945 /**
12946  * Create indirect action.
12947  *
12948  * @param[in] dev
12949  *   Pointer to the Ethernet device structure.
12950  * @param[in] conf
12951  *   Shared action configuration.
12952  * @param[in] action
12953  *   Action specification used to create indirect action.
12954  * @param[out] error
12955  *   Perform verbose error reporting if not NULL. Initialized in case of
12956  *   error only.
12957  *
12958  * @return
12959  *   A valid shared action handle in case of success, NULL otherwise and
12960  *   rte_errno is set.
12961  */
12962 static struct rte_flow_action_handle *
12963 flow_hw_action_create(struct rte_eth_dev *dev,
12964 		       const struct rte_flow_indir_action_conf *conf,
12965 		       const struct rte_flow_action *action,
12966 		       struct rte_flow_error *err)
12967 {
12968 	return flow_hw_action_handle_create(dev, MLX5_HW_INV_QUEUE,
12969 					    NULL, conf, action, NULL, err);
12970 }
12971 
12972 /**
12973  * Destroy the indirect action.
12974  * Release action related resources on the NIC and the memory.
12975  * Lock free, (mutex should be acquired by caller).
12976  * Dispatcher for action type specific call.
12977  *
12978  * @param[in] dev
12979  *   Pointer to the Ethernet device structure.
12980  * @param[in] handle
12981  *   The indirect action object handle to be removed.
12982  * @param[out] error
12983  *   Perform verbose error reporting if not NULL. Initialized in case of
12984  *   error only.
12985  *
12986  * @return
12987  *   0 on success, otherwise negative errno value.
12988  */
12989 static int
12990 flow_hw_action_destroy(struct rte_eth_dev *dev,
12991 		       struct rte_flow_action_handle *handle,
12992 		       struct rte_flow_error *error)
12993 {
12994 	return flow_hw_action_handle_destroy(dev, MLX5_HW_INV_QUEUE,
12995 			NULL, handle, NULL, error);
12996 }
12997 
12998 /**
12999  * Updates in place shared action configuration.
13000  *
13001  * @param[in] dev
13002  *   Pointer to the Ethernet device structure.
13003  * @param[in] handle
13004  *   The indirect action object handle to be updated.
13005  * @param[in] update
13006  *   Action specification used to modify the action pointed by *handle*.
13007  *   *update* could be of same type with the action pointed by the *handle*
13008  *   handle argument, or some other structures like a wrapper, depending on
13009  *   the indirect action type.
13010  * @param[out] error
13011  *   Perform verbose error reporting if not NULL. Initialized in case of
13012  *   error only.
13013  *
13014  * @return
13015  *   0 on success, otherwise negative errno value.
13016  */
13017 static int
13018 flow_hw_action_update(struct rte_eth_dev *dev,
13019 		      struct rte_flow_action_handle *handle,
13020 		      const void *update,
13021 		      struct rte_flow_error *err)
13022 {
13023 	return flow_hw_action_handle_update(dev, MLX5_HW_INV_QUEUE,
13024 			NULL, handle, update, NULL, err);
13025 }
13026 
13027 static int
13028 flow_hw_action_handle_query(struct rte_eth_dev *dev, uint32_t queue,
13029 			    const struct rte_flow_op_attr *attr,
13030 			    const struct rte_flow_action_handle *handle,
13031 			    void *data, void *user_data,
13032 			    struct rte_flow_error *error)
13033 {
13034 	struct mlx5_priv *priv = dev->data->dev_private;
13035 	struct mlx5_hw_q_job *job = NULL;
13036 	uint32_t act_idx = (uint32_t)(uintptr_t)handle;
13037 	uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
13038 	uint32_t idx = MLX5_INDIRECT_ACTION_IDX_GET(handle);
13039 	uint32_t age_idx = act_idx & MLX5_HWS_AGE_IDX_MASK;
13040 	int ret;
13041 	bool push = flow_hw_action_push(attr);
13042 	bool aso = false;
13043 
13044 	if (attr) {
13045 		job = flow_hw_action_job_init(priv, queue, handle, user_data,
13046 					      data, MLX5_HW_Q_JOB_TYPE_QUERY,
13047 					      MLX5_HW_INDIRECT_TYPE_LEGACY, error);
13048 		if (!job)
13049 			return -rte_errno;
13050 	}
13051 	switch (type) {
13052 	case MLX5_INDIRECT_ACTION_TYPE_AGE:
13053 		ret = flow_hw_query_age(dev, age_idx, data, error);
13054 		break;
13055 	case MLX5_INDIRECT_ACTION_TYPE_COUNT:
13056 		ret = flow_hw_query_counter(dev, act_idx, data, error);
13057 		break;
13058 	case MLX5_INDIRECT_ACTION_TYPE_CT:
13059 		aso = true;
13060 		if (job)
13061 			job->query.user = data;
13062 		ret = flow_hw_conntrack_query(dev, queue, idx, data,
13063 					      job, push, error);
13064 		break;
13065 	case MLX5_INDIRECT_ACTION_TYPE_QUOTA:
13066 		aso = true;
13067 		ret = mlx5_quota_query(dev, queue, handle, data,
13068 				       job, push, error);
13069 		break;
13070 	default:
13071 		ret = -ENOTSUP;
13072 		rte_flow_error_set(error, ENOTSUP,
13073 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
13074 					  "action type not supported");
13075 		break;
13076 	}
13077 	if (job)
13078 		flow_hw_action_finalize(dev, queue, job, push, aso, ret == 0);
13079 	return ret;
13080 }
13081 
13082 static int
13083 flow_hw_async_action_handle_query_update
13084 			(struct rte_eth_dev *dev, uint32_t queue,
13085 			 const struct rte_flow_op_attr *attr,
13086 			 struct rte_flow_action_handle *handle,
13087 			 const void *update, void *query,
13088 			 enum rte_flow_query_update_mode qu_mode,
13089 			 void *user_data, struct rte_flow_error *error)
13090 {
13091 	struct mlx5_priv *priv = dev->data->dev_private;
13092 	bool push = flow_hw_action_push(attr);
13093 	bool aso = false;
13094 	struct mlx5_hw_q_job *job = NULL;
13095 	int ret = 0;
13096 
13097 	if (attr) {
13098 		job = flow_hw_action_job_init(priv, queue, handle, user_data,
13099 					      query,
13100 					      MLX5_HW_Q_JOB_TYPE_UPDATE_QUERY,
13101 					      MLX5_HW_INDIRECT_TYPE_LEGACY, error);
13102 		if (!job)
13103 			return -rte_errno;
13104 	}
13105 	switch (MLX5_INDIRECT_ACTION_TYPE_GET(handle)) {
13106 	case MLX5_INDIRECT_ACTION_TYPE_QUOTA:
13107 		if (qu_mode != RTE_FLOW_QU_QUERY_FIRST) {
13108 			ret = rte_flow_error_set
13109 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF,
13110 				 NULL, "quota action must query before update");
13111 			break;
13112 		}
13113 		aso = true;
13114 		ret = mlx5_quota_query_update(dev, queue, handle,
13115 					      update, query, job, push, error);
13116 		break;
13117 	default:
13118 		ret = rte_flow_error_set(error, ENOTSUP,
13119 					 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, "update and query not supportred");
13120 	}
13121 	if (job)
13122 		flow_hw_action_finalize(dev, queue, job, push, aso, ret == 0);
13123 	return ret;
13124 }
13125 
13126 static int
13127 flow_hw_action_query(struct rte_eth_dev *dev,
13128 		     const struct rte_flow_action_handle *handle, void *data,
13129 		     struct rte_flow_error *error)
13130 {
13131 	return flow_hw_action_handle_query(dev, MLX5_HW_INV_QUEUE, NULL,
13132 			handle, data, NULL, error);
13133 }
13134 
13135 static int
13136 flow_hw_action_query_update(struct rte_eth_dev *dev,
13137 			    struct rte_flow_action_handle *handle,
13138 			    const void *update, void *query,
13139 			    enum rte_flow_query_update_mode qu_mode,
13140 			    struct rte_flow_error *error)
13141 {
13142 	return flow_hw_async_action_handle_query_update(dev, MLX5_HW_INV_QUEUE,
13143 							NULL, handle, update,
13144 							query, qu_mode, NULL,
13145 							error);
13146 }
13147 
13148 /**
13149  * Get aged-out flows of a given port on the given HWS flow queue.
13150  *
13151  * @param[in] dev
13152  *   Pointer to the Ethernet device structure.
13153  * @param[in] queue_id
13154  *   Flow queue to query. Ignored when RTE_FLOW_PORT_FLAG_STRICT_QUEUE not set.
13155  * @param[in, out] contexts
13156  *   The address of an array of pointers to the aged-out flows contexts.
13157  * @param[in] nb_contexts
13158  *   The length of context array pointers.
13159  * @param[out] error
13160  *   Perform verbose error reporting if not NULL. Initialized in case of
13161  *   error only.
13162  *
13163  * @return
13164  *   if nb_contexts is 0, return the amount of all aged contexts.
13165  *   if nb_contexts is not 0 , return the amount of aged flows reported
13166  *   in the context array, otherwise negative errno value.
13167  */
13168 static int
13169 flow_hw_get_q_aged_flows(struct rte_eth_dev *dev, uint32_t queue_id,
13170 			 void **contexts, uint32_t nb_contexts,
13171 			 struct rte_flow_error *error)
13172 {
13173 	struct mlx5_priv *priv = dev->data->dev_private;
13174 	struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);
13175 	struct rte_ring *r;
13176 	int nb_flows = 0;
13177 
13178 	if (nb_contexts && !contexts)
13179 		return rte_flow_error_set(error, EINVAL,
13180 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13181 					  NULL, "empty context");
13182 	if (!priv->hws_age_req)
13183 		return rte_flow_error_set(error, ENOENT,
13184 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13185 					  NULL, "No aging initialized");
13186 	if (priv->hws_strict_queue) {
13187 		if (queue_id >= age_info->hw_q_age->nb_rings)
13188 			return rte_flow_error_set(error, EINVAL,
13189 						RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13190 						NULL, "invalid queue id");
13191 		r = age_info->hw_q_age->aged_lists[queue_id];
13192 	} else {
13193 		r = age_info->hw_age.aged_list;
13194 		MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
13195 	}
13196 	if (nb_contexts == 0)
13197 		return rte_ring_count(r);
13198 	while ((uint32_t)nb_flows < nb_contexts) {
13199 		uint32_t age_idx;
13200 
13201 		if (rte_ring_dequeue_elem(r, &age_idx, sizeof(uint32_t)) < 0)
13202 			break;
13203 		/* get the AGE context if the aged-out index is still valid. */
13204 		contexts[nb_flows] = mlx5_hws_age_context_get(priv, age_idx);
13205 		if (!contexts[nb_flows])
13206 			continue;
13207 		nb_flows++;
13208 	}
13209 	return nb_flows;
13210 }
13211 
13212 /**
13213  * Get aged-out flows.
13214  *
13215  * This function is relevant only if RTE_FLOW_PORT_FLAG_STRICT_QUEUE isn't set.
13216  *
13217  * @param[in] dev
13218  *   Pointer to the Ethernet device structure.
13219  * @param[in] contexts
13220  *   The address of an array of pointers to the aged-out flows contexts.
13221  * @param[in] nb_contexts
13222  *   The length of context array pointers.
13223  * @param[out] error
13224  *   Perform verbose error reporting if not NULL. Initialized in case of
13225  *   error only.
13226  *
13227  * @return
13228  *   how many contexts get in success, otherwise negative errno value.
13229  *   if nb_contexts is 0, return the amount of all aged contexts.
13230  *   if nb_contexts is not 0 , return the amount of aged flows reported
13231  *   in the context array.
13232  */
13233 static int
13234 flow_hw_get_aged_flows(struct rte_eth_dev *dev, void **contexts,
13235 		       uint32_t nb_contexts, struct rte_flow_error *error)
13236 {
13237 	struct mlx5_priv *priv = dev->data->dev_private;
13238 
13239 	if (priv->hws_strict_queue)
13240 		DRV_LOG(WARNING,
13241 			"port %u get aged flows called in strict queue mode.",
13242 			dev->data->port_id);
13243 	return flow_hw_get_q_aged_flows(dev, 0, contexts, nb_contexts, error);
13244 }
13245 /**
13246  * Initialization function for non template API which calls
13247  * flow_hw_configure with default values.
13248  * Configure non queues cause 1 queue is configured by default for inner usage.
13249  *
13250  * @param[in] dev
13251  *   Pointer to the Ethernet device structure.
13252  * @param[out] error
13253  *   Pointer to the error structure.
13254  *
13255  * @return
13256  *   0 on success, a negative errno value otherwise and rte_errno is set.
13257  */
13258 int
13259 flow_hw_init(struct rte_eth_dev *dev,
13260 	     struct rte_flow_error *error)
13261 {
13262 	const struct rte_flow_port_attr port_attr = {0};
13263 	const struct rte_flow_queue_attr queue_attr = {.size = MLX5_NT_DEFAULT_QUEUE_SIZE};
13264 	const struct rte_flow_queue_attr *attr_list = &queue_attr;
13265 
13266 	/**
13267 	 * If user uses template and non template API:
13268 	 * User will call flow_hw_configure and non template
13269 	 * API will use the allocated actions.
13270 	 * Init function will not call flow_hw_configure.
13271 	 *
13272 	 * If user uses only non template API's:
13273 	 * Init function will call flow_hw_configure.
13274 	 * It will not allocate memory for actions.
13275 	 * When needed allocation, it will handle same as for SWS today,
13276 	 * meaning using bulk allocations and resize as needed.
13277 	 */
13278 	/* Configure hws with default values. */
13279 	DRV_LOG(DEBUG, "Apply default configuration, zero number of queues, inner control queue size is %u",
13280 		MLX5_NT_DEFAULT_QUEUE_SIZE);
13281 	return __flow_hw_configure(dev, &port_attr, 0, &attr_list, true, error);
13282 }
13283 
13284 static int flow_hw_prepare(struct rte_eth_dev *dev,
13285 			   const struct rte_flow_action actions[] __rte_unused,
13286 			   enum mlx5_flow_type type,
13287 			   struct rte_flow_hw **flow,
13288 			   struct rte_flow_error *error)
13289 {
13290 	struct mlx5_priv *priv = dev->data->dev_private;
13291 	uint32_t idx = 0;
13292 
13293 	 /*
13294 	  * Notice pool idx size = (sizeof(struct rte_flow_hw)
13295 	  * + sizeof(struct rte_flow_nt2hws)) for HWS mode.
13296 	  */
13297 	*flow = mlx5_ipool_zmalloc(priv->flows[type], &idx);
13298 	if (!(*flow))
13299 		return rte_flow_error_set(error, ENOMEM,
13300 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13301 			"cannot allocate flow memory");
13302 	/* Allocating 2 structures in one pool slot, updating nt2hw pointer.*/
13303 	(*flow)->nt2hws = (struct rte_flow_nt2hws *)
13304 				((uintptr_t)(*flow) + sizeof(struct rte_flow_hw));
13305 	(*flow)->idx = idx;
13306 	(*flow)->nt2hws->flow_aux = mlx5_malloc(MLX5_MEM_ZERO, sizeof(struct rte_flow_hw_aux),
13307 				    RTE_CACHE_LINE_SIZE, rte_dev_numa_node(dev->device));
13308 	if (!(*flow)->nt2hws->flow_aux)
13309 		return rte_flow_error_set(error, ENOMEM,
13310 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13311 				"cannot allocate flow aux memory");
13312 	return 0;
13313 }
13314 
13315 #define FLOW_HW_SET_DV_FIELDS(flow_attr, root, dv_resource) {					\
13316 	typeof(flow_attr) _flow_attr = (flow_attr);						\
13317 	if (_flow_attr->transfer)								\
13318 		dv_resource.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;				\
13319 	else											\
13320 		dv_resource.ft_type = _flow_attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :	\
13321 					     MLX5DV_FLOW_TABLE_TYPE_NIC_RX;			\
13322 	root = _flow_attr->group ? 0 : 1;							\
13323 	dv_resource.flags =									\
13324 		mlx5_hw_act_flag[!!_flow_attr->group][get_mlx5dr_table_type(_flow_attr)];	\
13325 }
13326 
13327 static int
13328 flow_hw_modify_hdr_resource_register
13329 			(struct rte_eth_dev *dev,
13330 			 struct rte_flow_template_table *table,
13331 			 struct mlx5_hw_actions *hw_acts,
13332 			 struct rte_flow_hw *dev_flow,
13333 			 struct rte_flow_error *error)
13334 {
13335 	struct rte_flow_attr *attr = &table->cfg.attr.flow_attr;
13336 	struct mlx5_flow_dv_modify_hdr_resource *dv_resource_ptr = NULL;
13337 	union {
13338 		struct mlx5_flow_dv_modify_hdr_resource dv_resource;
13339 		uint8_t data[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
13340 			     sizeof(struct mlx5_modification_cmd) * MLX5_MHDR_MAX_CMD];
13341 	} dummy;
13342 	int ret;
13343 
13344 	if (hw_acts->mhdr) {
13345 		dummy.dv_resource.actions_num = hw_acts->mhdr->mhdr_cmds_num;
13346 		memcpy(dummy.dv_resource.actions, hw_acts->mhdr->mhdr_cmds,
13347 			sizeof(struct mlx5_modification_cmd) * dummy.dv_resource.actions_num);
13348 	} else {
13349 		return 0;
13350 	}
13351 	FLOW_HW_SET_DV_FIELDS(attr, dummy.dv_resource.root, dummy.dv_resource);
13352 	dummy.dv_resource.flags |= MLX5DR_ACTION_FLAG_SHARED;
13353 	ret = __flow_modify_hdr_resource_register(dev, &dummy.dv_resource,
13354 		&dv_resource_ptr, error);
13355 	if (ret)
13356 		return ret;
13357 	MLX5_ASSERT(dv_resource_ptr);
13358 	dev_flow->nt2hws->modify_hdr = dv_resource_ptr;
13359 	/* keep action for the rule construction. */
13360 	hw_acts->rule_acts[hw_acts->mhdr->pos].action = dv_resource_ptr->action;
13361 	/* Bulk size is 1, so index is 1. */
13362 	dev_flow->res_idx = 1;
13363 	return 0;
13364 }
13365 
13366 static int
13367 flow_hw_encap_decap_resource_register
13368 			(struct rte_eth_dev *dev,
13369 			 struct rte_flow_template_table *table,
13370 			 struct mlx5_hw_actions *hw_acts,
13371 			 struct rte_flow_hw *dev_flow,
13372 			 struct rte_flow_error *error)
13373 {
13374 	struct rte_flow_attr *attr = &table->cfg.attr.flow_attr;
13375 	struct mlx5_flow_dv_encap_decap_resource *dv_resource_ptr = NULL;
13376 	struct mlx5_flow_dv_encap_decap_resource dv_resource;
13377 	struct mlx5_tbl_multi_pattern_ctx *mpctx = &table->mpctx;
13378 	int ret;
13379 	bool is_root;
13380 	int ix;
13381 
13382 	if (hw_acts->encap_decap)
13383 		dv_resource.reformat_type = hw_acts->encap_decap->action_type;
13384 	else
13385 		return 0;
13386 	FLOW_HW_SET_DV_FIELDS(attr, is_root, dv_resource);
13387 	ix = mlx5_bwc_multi_pattern_reformat_to_index((enum mlx5dr_action_type)
13388 			dv_resource.reformat_type);
13389 	if (ix < 0)
13390 		return ix;
13391 	if (hw_acts->encap_decap->shared) {
13392 		dv_resource.size = hw_acts->encap_decap->data_size;
13393 		MLX5_ASSERT(dv_resource.size <= MLX5_ENCAP_MAX_LEN);
13394 		memcpy(&dv_resource.buf, hw_acts->encap_decap->data, dv_resource.size);
13395 		dv_resource.flags |= MLX5DR_ACTION_FLAG_SHARED;
13396 	} else {
13397 		typeof(mpctx->reformat[0]) *reformat = mpctx->reformat + ix;
13398 		if (!reformat->elements_num)
13399 			return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
13400 					NULL, "No reformat action exist in the table.");
13401 		dv_resource.size = reformat->reformat_hdr->sz;
13402 		MLX5_ASSERT(dv_resource.size <= MLX5_ENCAP_MAX_LEN);
13403 		memcpy(&dv_resource.buf, reformat->reformat_hdr->data, dv_resource.size);
13404 	}
13405 	ret = __flow_encap_decap_resource_register(dev, &dv_resource, is_root,
13406 		&dv_resource_ptr, error);
13407 	if (ret)
13408 		return ret;
13409 	MLX5_ASSERT(dv_resource_ptr);
13410 	dev_flow->nt2hws->rix_encap_decap = dv_resource_ptr->idx;
13411 	/* keep action for the rule construction. */
13412 	if (hw_acts->encap_decap->shared)
13413 		hw_acts->rule_acts[hw_acts->encap_decap_pos].action = dv_resource_ptr->action;
13414 	else
13415 		mpctx->segments[0].reformat_action[ix] = dv_resource_ptr->action;
13416 	/* Bulk size is 1, so index is 1. */
13417 	dev_flow->res_idx = 1;
13418 	return 0;
13419 }
13420 
13421 static enum rte_flow_action_type
13422 flow_nta_get_indirect_action_type(const struct rte_flow_action *action)
13423 {
13424 	switch (MLX5_INDIRECT_ACTION_TYPE_GET(action->conf)) {
13425 	case MLX5_INDIRECT_ACTION_TYPE_RSS:
13426 		return RTE_FLOW_ACTION_TYPE_RSS;
13427 	case MLX5_INDIRECT_ACTION_TYPE_AGE:
13428 		return RTE_FLOW_ACTION_TYPE_AGE;
13429 	case MLX5_INDIRECT_ACTION_TYPE_COUNT:
13430 		return RTE_FLOW_ACTION_TYPE_COUNT;
13431 	case MLX5_INDIRECT_ACTION_TYPE_CT:
13432 		return RTE_FLOW_ACTION_TYPE_CONNTRACK;
13433 	default:
13434 		break;
13435 	}
13436 	return RTE_FLOW_ACTION_TYPE_END;
13437 }
13438 
13439 static void
13440 flow_nta_set_mh_mask_conf(const struct rte_flow_action_modify_field *action_conf,
13441 			  struct rte_flow_action_modify_field *mask_conf)
13442 {
13443 	memset(mask_conf, 0xff, sizeof(*mask_conf));
13444 	mask_conf->operation = action_conf->operation;
13445 	mask_conf->dst.field = action_conf->dst.field;
13446 	mask_conf->src.field = action_conf->src.field;
13447 }
13448 
13449 union actions_conf {
13450 	struct rte_flow_action_modify_field modify_field;
13451 	struct rte_flow_action_raw_encap raw_encap;
13452 	struct rte_flow_action_vxlan_encap vxlan_encap;
13453 	struct rte_flow_action_nvgre_encap nvgre_encap;
13454 };
13455 
13456 static int
13457 flow_nta_build_template_mask(const struct rte_flow_action actions[],
13458 			     struct rte_flow_action masks[MLX5_HW_MAX_ACTS],
13459 			     union actions_conf mask_conf[MLX5_HW_MAX_ACTS])
13460 {
13461 	int i;
13462 
13463 	for (i = 0; i == 0 || actions[i - 1].type != RTE_FLOW_ACTION_TYPE_END; i++) {
13464 		const struct rte_flow_action *action = &actions[i];
13465 		struct rte_flow_action *mask = &masks[i];
13466 		union actions_conf *conf = &mask_conf[i];
13467 
13468 		mask->type = action->type;
13469 		switch (action->type) {
13470 		case RTE_FLOW_ACTION_TYPE_INDIRECT:
13471 			mask->type = flow_nta_get_indirect_action_type(action);
13472 			if (!mask->type)
13473 				return -EINVAL;
13474 			break;
13475 		case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
13476 			flow_nta_set_mh_mask_conf(action->conf, (void *)conf);
13477 			mask->conf = conf;
13478 			break;
13479 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
13480 			/* This mask will set this action as shared. */
13481 			memset(conf, 0xff, sizeof(struct rte_flow_action_raw_encap));
13482 			mask->conf = conf;
13483 			break;
13484 		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
13485 			/* This mask will set this action as shared. */
13486 			conf->vxlan_encap.definition =
13487 				((const struct rte_flow_action_vxlan_encap *)
13488 					action->conf)->definition;
13489 			mask->conf = conf;
13490 			break;
13491 		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
13492 			/* This mask will set this action as shared. */
13493 			conf->nvgre_encap.definition =
13494 				((const struct rte_flow_action_nvgre_encap *)
13495 					action->conf)->definition;
13496 			mask->conf = conf;
13497 			break;
13498 		default:
13499 			break;
13500 		}
13501 	}
13502 	return 0;
13503 #undef NTA_CHECK_CONF_BUF_SIZE
13504 }
13505 
13506 static int
13507 flow_hw_translate_flow_actions(struct rte_eth_dev *dev,
13508 			       const struct rte_flow_attr *attr,
13509 			       const struct rte_flow_action actions[],
13510 			       struct rte_flow_hw *flow,
13511 			       struct mlx5_flow_hw_action_params *ap,
13512 			       struct mlx5_hw_actions *hw_acts,
13513 			       uint64_t item_flags, uint64_t action_flags,
13514 			       bool external,
13515 			       struct rte_flow_error *error)
13516 {
13517 	int ret = 0;
13518 	uint32_t src_group = 0;
13519 	enum mlx5dr_table_type table_type;
13520 	struct mlx5_flow_group grp;
13521 	struct rte_flow_actions_template *at = NULL;
13522 	struct rte_flow_actions_template_attr template_attr = {
13523 		.egress = attr->egress,
13524 		.ingress = attr->ingress,
13525 		.transfer = attr->transfer,
13526 	};
13527 	struct rte_flow_action masks[MLX5_HW_MAX_ACTS];
13528 	union actions_conf mask_conf[MLX5_HW_MAX_ACTS];
13529 
13530 	RTE_SET_USED(action_flags);
13531 	memset(masks, 0, sizeof(masks));
13532 	memset(mask_conf, 0, sizeof(mask_conf));
13533 	/* Only set the needed fields explicitly. */
13534 	struct mlx5_flow_workspace *wks = mlx5_flow_push_thread_workspace();
13535 	struct rte_flow_template_table *table;
13536 
13537 	/*
13538 	 * Notice All direct actions will be unmasked,
13539 	 * except for modify header and encap,
13540 	 * and therefore will be parsed as part of action construct.
13541 	 * Modify header is always shared in HWS,
13542 	 * encap is masked such that it will be treated as shared.
13543 	 * shared actions will be parsed as part of template translation
13544 	 * and not during action construct.
13545 	 */
13546 	if (!wks)
13547 		return rte_flow_error_set(error, ENOMEM,
13548 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13549 					  NULL,
13550 					  "failed to push flow workspace");
13551 	table = wks->table;
13552 	flow_nta_build_template_mask(actions, masks, mask_conf);
13553 	/* The group in the attribute translation was done in advance. */
13554 	ret = __translate_group(dev, attr, external, attr->group, &src_group, error);
13555 	if (ret)
13556 		return ret;
13557 	if (attr->transfer)
13558 		table_type = MLX5DR_TABLE_TYPE_FDB;
13559 	else if (attr->egress)
13560 		table_type = MLX5DR_TABLE_TYPE_NIC_TX;
13561 	else
13562 		table_type = MLX5DR_TABLE_TYPE_NIC_RX;
13563 	at = __flow_hw_actions_template_create(dev, &template_attr, actions, masks, true, error);
13564 	if (!at) {
13565 		ret = -rte_errno;
13566 		goto end;
13567 	}
13568 	grp.group_id = src_group;
13569 	table->grp = &grp;
13570 	table->type = table_type;
13571 	table->cfg.external = external;
13572 	table->nb_action_templates = 1;
13573 	memcpy(&table->cfg.attr.flow_attr, attr, sizeof(*attr));
13574 	table->ats[0].action_template = at;
13575 	ret = __flow_hw_translate_actions_template(dev, &table->cfg, hw_acts, at,
13576 						   &table->mpctx, true, error);
13577 	if (ret)
13578 		goto end;
13579 	/* handle bulk actions register. */
13580 	ret = flow_hw_encap_decap_resource_register(dev, table, hw_acts, flow, error);
13581 	if (ret)
13582 		goto end;
13583 	ret = flow_hw_modify_hdr_resource_register(dev, table, hw_acts, flow, error);
13584 	if (ret)
13585 		goto end;
13586 	table->ats[0].acts = *hw_acts;
13587 	ret = flow_hw_actions_construct(dev, flow, ap,
13588 					&table->ats[0], item_flags, table,
13589 					actions, hw_acts->rule_acts, 0, error);
13590 	if (ret)
13591 		goto end;
13592 	goto end;
13593 end:
13594 	if (ret)
13595 		/* Make sure that there is no garbage in the actions. */
13596 		__flow_hw_action_template_destroy(dev, hw_acts);
13597 	else
13598 		__flow_hw_act_data_flush(dev, hw_acts);
13599 	if (at)
13600 		mlx5_free(at);
13601 	mlx5_flow_pop_thread_workspace();
13602 	return ret;
13603 }
13604 
13605 static int
13606 flow_hw_unregister_matcher(struct rte_eth_dev *dev,
13607 			   struct mlx5_flow_dv_matcher *matcher)
13608 {
13609 	struct mlx5_priv *priv = dev->data->dev_private;
13610 	struct mlx5_flow_group *group = matcher->group;
13611 	int ret = 0;
13612 
13613 	if (group) {
13614 		if (matcher->matcher_object)
13615 			ret |= mlx5_list_unregister(group->matchers, &matcher->entry);
13616 		ret |= mlx5_hlist_unregister(priv->sh->groups, &group->entry);
13617 	}
13618 	return ret;
13619 }
13620 
13621 static int flow_hw_register_matcher(struct rte_eth_dev *dev,
13622 				    const struct rte_flow_attr *attr,
13623 				    const struct rte_flow_item items[],
13624 				    bool external,
13625 				    struct rte_flow_hw *flow,
13626 				    struct mlx5_flow_dv_matcher *matcher,
13627 				    struct rte_flow_error *error)
13628 {
13629 	struct mlx5_priv *priv = dev->data->dev_private;
13630 	struct rte_flow_error sub_error = {
13631 		.type = RTE_FLOW_ERROR_TYPE_NONE,
13632 		.cause = NULL,
13633 		.message = NULL,
13634 	};
13635 	struct rte_flow_attr flow_attr = *attr;
13636 	uint32_t specialize = 0; /* No unified FDB. */
13637 	struct mlx5_flow_cb_ctx ctx = {
13638 		.dev = dev,
13639 		.error = &sub_error,
13640 		.data = &flow_attr,
13641 		.data2 = &specialize,
13642 	};
13643 	void *items_ptr = &items;
13644 	struct mlx5_flow_cb_ctx matcher_ctx = {
13645 		.error = &sub_error,
13646 		.data = matcher,
13647 		.data2 = items_ptr,
13648 	};
13649 	struct mlx5_list_entry *group_entry = NULL;
13650 	struct mlx5_list_entry *matcher_entry = NULL;
13651 	struct mlx5_flow_dv_matcher *resource;
13652 	struct mlx5_list *matchers_list;
13653 	struct mlx5_flow_group *flow_group;
13654 	int ret;
13655 
13656 
13657 	matcher->crc = rte_raw_cksum((const void *)matcher->mask.buf,
13658 				    matcher->mask.size);
13659 	matcher->priority = attr->priority;
13660 	ret = __translate_group(dev, attr, external, attr->group, &flow_attr.group, error);
13661 	if (ret)
13662 		return ret;
13663 
13664 	/* Register the flow group. */
13665 	group_entry = mlx5_hlist_register(priv->sh->groups, flow_attr.group, &ctx);
13666 	if (!group_entry)
13667 		goto error;
13668 	flow_group = container_of(group_entry, struct mlx5_flow_group, entry);
13669 
13670 	matchers_list = flow_group->matchers;
13671 	matcher->group = flow_group;
13672 	matcher_entry = mlx5_list_register(matchers_list, &matcher_ctx);
13673 	if (!matcher_entry)
13674 		goto error;
13675 	resource = container_of(matcher_entry, typeof(*resource), entry);
13676 	flow->nt2hws->matcher = resource;
13677 	return 0;
13678 
13679 error:
13680 	if (group_entry)
13681 		mlx5_hlist_unregister(priv->sh->groups, group_entry);
13682 	if (error) {
13683 		if (sub_error.type != RTE_FLOW_ERROR_TYPE_NONE)
13684 			rte_memcpy(error, &sub_error, sizeof(sub_error));
13685 	}
13686 	return rte_flow_error_set(error, ENOMEM,
13687 					RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13688 					NULL, "fail to register matcher");
13689 }
13690 
13691 static int
13692 flow_hw_allocate_actions(struct rte_eth_dev *dev,
13693 			 uint64_t action_flags,
13694 			 struct rte_flow_error *error)
13695 {
13696 	struct mlx5_priv *priv = dev->data->dev_private;
13697 	int ret;
13698 	uint obj_num;
13699 
13700 	error->type = RTE_FLOW_ERROR_TYPE_NONE;
13701 	if (action_flags & MLX5_FLOW_ACTION_AGE) {
13702 		/* If no age objects were previously allocated. */
13703 		if (!priv->hws_age_req) {
13704 			/* If no counters were previously allocated. */
13705 			if (!priv->hws_cpool) {
13706 				obj_num = MLX5_CNT_NT_MAX(priv);
13707 				ret = mlx5_hws_cnt_pool_create(dev, obj_num,
13708 							       priv->nb_queue,
13709 							       NULL, error);
13710 				if (ret)
13711 					goto err;
13712 			}
13713 			/* Allocate same number of counters. */
13714 			ret = mlx5_hws_age_pool_init(dev, priv->hws_cpool->cfg.request_num,
13715 						     priv->nb_queue, false);
13716 			if (ret)
13717 				goto err;
13718 		}
13719 	}
13720 	if (action_flags & MLX5_FLOW_ACTION_COUNT) {
13721 		/* If no counters were previously allocated. */
13722 		if (!priv->hws_cpool) {
13723 			obj_num = MLX5_CNT_NT_MAX(priv);
13724 			ret = mlx5_hws_cnt_pool_create(dev, obj_num,
13725 						       priv->nb_queue, NULL,
13726 						       error);
13727 			if (ret)
13728 				goto err;
13729 		}
13730 	}
13731 	if (action_flags & MLX5_FLOW_ACTION_CT) {
13732 		/* If no CT were previously allocated. */
13733 		if (!priv->hws_ctpool) {
13734 			obj_num = MLX5_CT_NT_MAX(priv);
13735 			ret = mlx5_flow_ct_init(dev, obj_num, priv->nb_queue);
13736 			if (ret)
13737 				goto err;
13738 		}
13739 	}
13740 	if (action_flags & MLX5_FLOW_ACTION_METER) {
13741 		/* If no meters were previously allocated. */
13742 		if (!priv->hws_mpool) {
13743 			obj_num = MLX5_MTR_NT_MAX(priv);
13744 			ret = mlx5_flow_meter_init(dev, obj_num, 0, 0,
13745 						   priv->nb_queue);
13746 			if (ret)
13747 				goto err;
13748 		}
13749 	}
13750 	return 0;
13751 err:
13752 	if (ret && error->type != RTE_FLOW_ERROR_TYPE_NONE)
13753 		return ret;
13754 	return rte_flow_error_set(error, ret,
13755 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13756 				  NULL, "fail to allocate actions");
13757 }
13758 
13759 static int flow_hw_apply(const struct rte_flow_item items[],
13760 			 struct mlx5dr_rule_action rule_actions[],
13761 			 struct rte_flow_hw *flow,
13762 			 struct rte_flow_error *error)
13763 {
13764 	struct mlx5dr_bwc_rule *rule = NULL;
13765 
13766 	rule = mlx5dr_bwc_rule_create((struct mlx5dr_bwc_matcher *)
13767 		flow->nt2hws->matcher->matcher_object,
13768 		items, rule_actions);
13769 	flow->nt2hws->nt_rule = rule;
13770 	if (!rule) {
13771 		return rte_flow_error_set(error, EINVAL,
13772 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13773 			NULL, "fail to create rte flow");
13774 	}
13775 	return 0;
13776 }
13777 
13778 #ifdef HAVE_MLX5_HWS_SUPPORT
13779 /**
13780  * Create a flow.
13781  *
13782  * @param[in] dev
13783  *   Pointer to Ethernet device.
13784  * @param[in] type
13785  *   Flow type.
13786  * @param[in] attr
13787  *   Flow rule attributes.
13788  * @param[in] items
13789  *   Pattern specification (list terminated by the END pattern item).
13790  * @param[in] actions
13791  *   Associated actions (list terminated by the END action).
13792  * @param[in] external
13793  *   This flow rule is created by request external to PMD.
13794  * @param[out] flow
13795  *   Flow pointer
13796  * @param[out] error
13797  *   Perform verbose error reporting if not NULL.
13798  *
13799  * @return
13800  *   0 on success, negative errno value otherwise and rte_errno set.
13801  */
13802 int
13803 flow_hw_create_flow(struct rte_eth_dev *dev, enum mlx5_flow_type type,
13804 		    const struct rte_flow_attr *attr,
13805 		    const struct rte_flow_item items[],
13806 		    const struct rte_flow_action actions[],
13807 		    uint64_t item_flags, uint64_t action_flags, bool external,
13808 		    struct rte_flow_hw **flow, struct rte_flow_error *error)
13809 {
13810 	int ret;
13811 	struct mlx5_hw_actions hw_act = { { NULL } };
13812 	struct mlx5_flow_hw_action_params ap;
13813 	struct mlx5_flow_dv_matcher matcher = {
13814 		.mask = {
13815 			.size = sizeof(matcher.mask.buf),
13816 		},
13817 	};
13818 	uint32_t tbl_type;
13819 
13820 	struct mlx5_flow_attr flow_attr = {
13821 		.port_id = dev->data->port_id,
13822 		.group = attr->group,
13823 		.priority = attr->priority,
13824 		.rss_level = 0,
13825 		.act_flags = action_flags,
13826 		.tbl_type = 0,
13827 	};
13828 
13829 	if (attr->transfer)
13830 		tbl_type = MLX5DR_TABLE_TYPE_FDB;
13831 	else if (attr->egress)
13832 		tbl_type = MLX5DR_TABLE_TYPE_NIC_TX;
13833 	else
13834 		tbl_type = MLX5DR_TABLE_TYPE_NIC_RX;
13835 	flow_attr.tbl_type = tbl_type;
13836 
13837 	/* Allocate needed memory. */
13838 	ret = flow_hw_prepare(dev, actions, type, flow, error);
13839 	if (ret)
13840 		goto error;
13841 
13842 	/* TODO TBD flow_hw_handle_tunnel_offload(). */
13843 	(*flow)->nt_rule = true;
13844 	(*flow)->nt2hws->matcher = &matcher;
13845 	ret = flow_dv_translate_items_hws(items, &flow_attr, &matcher.mask.buf,
13846 					MLX5_SET_MATCHER_HS_M, NULL,
13847 					NULL, error);
13848 
13849 	if (ret)
13850 		goto error;
13851 
13852 	ret = flow_hw_register_matcher(dev, attr, items, external, *flow, &matcher, error);
13853 	if (ret)
13854 		goto error;
13855 
13856 	/*
13857 	 * ASO allocation – iterating on actions list to allocate missing resources.
13858 	 * In the future when validate function in hws will be added,
13859 	 * The output actions bit mask instead of
13860 	 * looping on the actions array twice.
13861 	 */
13862 	ret = flow_hw_allocate_actions(dev, action_flags, error);
13863 	if (ret)
13864 		goto error;
13865 
13866 	/* Note: the actions should be saved in the sub-flow rule itself for reference. */
13867 	ret = flow_hw_translate_flow_actions(dev, attr, actions, *flow, &ap, &hw_act,
13868 					item_flags, action_flags, external, error);
13869 	if (ret)
13870 		goto error;
13871 
13872 	/*
13873 	 * If the flow is external (from application) OR device is started,
13874 	 * OR mreg discover, then apply immediately.
13875 	 */
13876 	if (external || dev->data->dev_started ||
13877 	    (attr->group == MLX5_FLOW_MREG_CP_TABLE_GROUP &&
13878 	     attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR)) {
13879 		ret = flow_hw_apply(items, hw_act.rule_acts, *flow, error);
13880 		if (ret)
13881 			goto error;
13882 	}
13883 	ret = 0;
13884 error:
13885 	/*
13886 	 * Release memory allocated.
13887 	 * Cannot use __flow_hw_actions_release(dev, &hw_act);
13888 	 * since it destroys the actions as well.
13889 	 */
13890 	if (hw_act.encap_decap)
13891 		mlx5_free(hw_act.encap_decap);
13892 	if (hw_act.push_remove)
13893 		mlx5_free(hw_act.push_remove);
13894 	if (hw_act.mhdr)
13895 		mlx5_free(hw_act.mhdr);
13896 	if (ret) {
13897 		/* release after actual error */
13898 		if ((*flow)->nt2hws && (*flow)->nt2hws->matcher)
13899 			flow_hw_unregister_matcher(dev, (*flow)->nt2hws->matcher);
13900 	}
13901 	return ret;
13902 }
13903 #endif
13904 
13905 void
13906 flow_hw_destroy(struct rte_eth_dev *dev, struct rte_flow_hw *flow)
13907 {
13908 	int ret;
13909 	struct mlx5_priv *priv = dev->data->dev_private;
13910 
13911 	if (!flow || !flow->nt2hws)
13912 		return;
13913 
13914 	if (flow->nt2hws->nt_rule) {
13915 		ret = mlx5dr_bwc_rule_destroy(flow->nt2hws->nt_rule);
13916 		if (ret)
13917 			DRV_LOG(ERR, "bwc rule destroy failed");
13918 	}
13919 	flow->operation_type = MLX5_FLOW_HW_FLOW_OP_TYPE_DESTROY;
13920 	/* Notice this function does not handle shared/static actions. */
13921 	hw_cmpl_flow_update_or_destroy(dev, flow, 0, NULL);
13922 
13923 	/**
13924 	 * TODO: TBD - Release tunnel related memory allocations(mlx5_flow_tunnel_free)
13925 	 * – needed only if supporting tunnel offloads, notice update RX queue flags in SWS.
13926 	 */
13927 
13928 	 /**
13929 	  * Notice matcher destroy will take place when matcher's list is destroyed
13930 	  * , same as for DV.
13931 	  */
13932 	if (flow->nt2hws->flow_aux)
13933 		mlx5_free(flow->nt2hws->flow_aux);
13934 
13935 	if (flow->nt2hws->rix_encap_decap)
13936 		flow_encap_decap_resource_release(dev, flow->nt2hws->rix_encap_decap);
13937 	if (flow->nt2hws->modify_hdr) {
13938 		MLX5_ASSERT(flow->nt2hws->modify_hdr->action);
13939 		mlx5_hlist_unregister(priv->sh->modify_cmds,
13940 				      &flow->nt2hws->modify_hdr->entry);
13941 	}
13942 	if (flow->nt2hws->matcher)
13943 		flow_hw_unregister_matcher(dev, flow->nt2hws->matcher);
13944 }
13945 
13946 #ifdef HAVE_MLX5_HWS_SUPPORT
13947 /**
13948  * Destroy a flow.
13949  *
13950  * @param[in] dev
13951  *   Pointer to Ethernet device.
13952  * @param[in] type
13953  *   Flow type.
13954  * @param[in] flow_addr
13955  *   Address of flow to destroy.
13956  */
13957 void
13958 flow_hw_list_destroy(struct rte_eth_dev *dev, enum mlx5_flow_type type,
13959 		     uintptr_t flow_addr)
13960 {
13961 	struct mlx5_priv *priv = dev->data->dev_private;
13962 	struct rte_flow_hw *flow = (struct rte_flow_hw *)flow_addr;
13963 	struct mlx5_nta_rss_flow_head head = { .slh_first = flow };
13964 
13965 	if (!flow || !flow->nt2hws || flow->nt2hws->chaned_flow)
13966 		return;
13967 	mlx5_flow_nta_del_copy_action(dev, flow->nt2hws->rix_mreg_copy);
13968 	while (!SLIST_EMPTY(&head)) {
13969 		flow = SLIST_FIRST(&head);
13970 		SLIST_REMOVE_HEAD(&head, nt2hws->next);
13971 		flow_hw_destroy(dev, flow);
13972 		/* Release flow memory by idx */
13973 		mlx5_ipool_free(priv->flows[type], flow->idx);
13974 	}
13975 }
13976 #endif
13977 
13978 /**
13979  * Create a flow.
13980  *
13981  * @param[in] dev
13982  *   Pointer to Ethernet device.
13983  * @param[in] type
13984  *   Flow type.
13985  * @param[in] attr
13986  *   Flow rule attributes.
13987  * @param[in] items
13988  *   Pattern specification (list terminated by the END pattern item).
13989  * @param[in] actions
13990  *   Associated actions (list terminated by the END action).
13991  * @param[in] external
13992  *   This flow rule is created by request external to PMD.
13993  * @param[out] error
13994  *   Perform verbose error reporting if not NULL.
13995  *
13996  * @return
13997  *   A flow addr on success, 0 otherwise and rte_errno is set.
13998  */
13999 static uintptr_t flow_hw_list_create(struct rte_eth_dev *dev,
14000 				     enum mlx5_flow_type type,
14001 				     const struct rte_flow_attr *attr,
14002 				     const struct rte_flow_item items[],
14003 				     const struct rte_flow_action actions[],
14004 				     bool external,
14005 				     struct rte_flow_error *error)
14006 {
14007 	int ret;
14008 	int split;
14009 	int encap_idx;
14010 	uint32_t cpy_idx = 0;
14011 	int actions_n = 0;
14012 	struct rte_flow_hw *flow = NULL;
14013 	struct rte_flow_hw *prfx_flow = NULL;
14014 	const struct rte_flow_action *qrss = NULL;
14015 	const struct rte_flow_action *mark = NULL;
14016 	uint64_t item_flags = flow_hw_matching_item_flags_get(items);
14017 	uint64_t action_flags = flow_hw_action_flags_get(actions, &qrss, &mark,
14018 							 &encap_idx, &actions_n, error);
14019 	struct mlx5_flow_hw_split_resource resource = {
14020 		.suffix = {
14021 			.attr = attr,
14022 			.items = items,
14023 			.actions = actions,
14024 		},
14025 	};
14026 	struct rte_flow_error shadow_error = {0, };
14027 
14028 	/*
14029 	 * TODO: add a call to flow_hw_validate function once it exist.
14030 	 * and update mlx5_flow_hw_drv_ops accordingly.
14031 	 */
14032 
14033 	RTE_SET_USED(encap_idx);
14034 	if (!error)
14035 		error = &shadow_error;
14036 	split = mlx5_flow_nta_split_metadata(dev, attr, actions, qrss, action_flags,
14037 					     actions_n, external, &resource, error);
14038 	if (split < 0)
14039 		return split;
14040 
14041 	/* Update the metadata copy table - MLX5_FLOW_MREG_CP_TABLE_GROUP */
14042 	if (((attr->ingress && attr->group != MLX5_FLOW_MREG_CP_TABLE_GROUP) ||
14043 	     attr->transfer) && external) {
14044 		ret = mlx5_flow_nta_update_copy_table(dev, &cpy_idx, mark,
14045 						      action_flags, error);
14046 		if (ret)
14047 			goto free;
14048 	}
14049 
14050 	if (action_flags & MLX5_FLOW_ACTION_RSS) {
14051 		const struct rte_flow_action_rss
14052 			*rss_conf = flow_nta_locate_rss(dev, actions, error);
14053 		flow = flow_nta_handle_rss(dev, attr, items, actions, rss_conf,
14054 					   item_flags, action_flags, external,
14055 					   type, error);
14056 		if (flow) {
14057 			flow->nt2hws->rix_mreg_copy = cpy_idx;
14058 			cpy_idx = 0;
14059 			if (!split)
14060 				return (uintptr_t)flow;
14061 			goto prefix_flow;
14062 		}
14063 		goto free;
14064 	}
14065 	/* Create single flow. */
14066 	ret = flow_hw_create_flow(dev, type, resource.suffix.attr, resource.suffix.items,
14067 				  resource.suffix.actions, item_flags, action_flags,
14068 				  external, &flow, error);
14069 	if (ret)
14070 		goto free;
14071 	if (flow) {
14072 		flow->nt2hws->rix_mreg_copy = cpy_idx;
14073 		cpy_idx = 0;
14074 		if (!split)
14075 			return (uintptr_t)flow;
14076 		/* Fall Through to prefix flow creation. */
14077 	}
14078 prefix_flow:
14079 	ret = flow_hw_create_flow(dev, type, attr, items, resource.prefix.actions,
14080 				  item_flags, action_flags, external, &prfx_flow, error);
14081 	if (ret)
14082 		goto free;
14083 	if (prfx_flow) {
14084 		prfx_flow->nt2hws->rix_mreg_copy = flow->nt2hws->rix_mreg_copy;
14085 		flow->nt2hws->chaned_flow = 1;
14086 		SLIST_INSERT_AFTER(prfx_flow, flow, nt2hws->next);
14087 		mlx5_flow_nta_split_resource_free(dev, &resource);
14088 		return (uintptr_t)prfx_flow;
14089 	}
14090 free:
14091 	if (prfx_flow)
14092 		flow_hw_list_destroy(dev, type, (uintptr_t)prfx_flow);
14093 	if (flow)
14094 		flow_hw_list_destroy(dev, type, (uintptr_t)flow);
14095 	if (cpy_idx)
14096 		mlx5_flow_nta_del_copy_action(dev, cpy_idx);
14097 	if (split > 0)
14098 		mlx5_flow_nta_split_resource_free(dev, &resource);
14099 	return 0;
14100 }
14101 
14102 static void
14103 mlx5_mirror_destroy_clone(struct rte_eth_dev *dev,
14104 			  struct mlx5_mirror_clone *clone)
14105 {
14106 	switch (clone->type) {
14107 	case RTE_FLOW_ACTION_TYPE_RSS:
14108 	case RTE_FLOW_ACTION_TYPE_QUEUE:
14109 		mlx5_hrxq_release(dev,
14110 				  ((struct mlx5_hrxq *)(clone->action_ctx))->idx);
14111 		break;
14112 	case RTE_FLOW_ACTION_TYPE_JUMP:
14113 		flow_hw_jump_release(dev, clone->action_ctx);
14114 		break;
14115 	case RTE_FLOW_ACTION_TYPE_JUMP_TO_TABLE_INDEX:
14116 	case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
14117 	case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
14118 	case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
14119 	case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
14120 	case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
14121 	case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
14122 	default:
14123 		break;
14124 	}
14125 }
14126 
14127 void
14128 mlx5_hw_mirror_destroy(struct rte_eth_dev *dev, struct mlx5_mirror *mirror)
14129 {
14130 	uint32_t i;
14131 
14132 	mlx5_indirect_list_remove_entry(&mirror->indirect);
14133 	for (i = 0; i < mirror->clones_num; i++)
14134 		mlx5_mirror_destroy_clone(dev, &mirror->clone[i]);
14135 	if (mirror->mirror_action)
14136 		mlx5dr_action_destroy(mirror->mirror_action);
14137 	mlx5_free(mirror);
14138 }
14139 
14140 static __rte_always_inline bool
14141 mlx5_mirror_terminal_action(const struct rte_flow_action *action)
14142 {
14143 	switch (action->type) {
14144 	case RTE_FLOW_ACTION_TYPE_JUMP:
14145 	case RTE_FLOW_ACTION_TYPE_RSS:
14146 	case RTE_FLOW_ACTION_TYPE_QUEUE:
14147 	case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
14148 	case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
14149 	case RTE_FLOW_ACTION_TYPE_JUMP_TO_TABLE_INDEX:
14150 		return true;
14151 	default:
14152 		break;
14153 	}
14154 	return false;
14155 }
14156 
14157 static bool
14158 mlx5_mirror_validate_sample_action(struct rte_eth_dev *dev,
14159 				   const struct rte_flow_attr *flow_attr,
14160 				   const struct rte_flow_action *action)
14161 {
14162 	struct mlx5_priv *priv = dev->data->dev_private;
14163 	const struct rte_flow_action_ethdev *port = NULL;
14164 	bool is_proxy = MLX5_HW_PORT_IS_PROXY(priv);
14165 
14166 	if (!action)
14167 		return false;
14168 	switch (action->type) {
14169 	case RTE_FLOW_ACTION_TYPE_QUEUE:
14170 	case RTE_FLOW_ACTION_TYPE_RSS:
14171 		if (flow_attr->transfer)
14172 			return false;
14173 		break;
14174 	case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
14175 		if (!is_proxy || !flow_attr->transfer)
14176 			return false;
14177 		port = action->conf;
14178 		if (!port || port->port_id != MLX5_REPRESENTED_PORT_ESW_MGR)
14179 			return false;
14180 		break;
14181 	case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
14182 	case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
14183 	case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
14184 	case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
14185 	case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
14186 		if (!is_proxy || !flow_attr->transfer)
14187 			return false;
14188 		if (action[0].type == RTE_FLOW_ACTION_TYPE_RAW_DECAP &&
14189 		    action[1].type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP)
14190 			return false;
14191 		break;
14192 	case RTE_FLOW_ACTION_TYPE_JUMP_TO_TABLE_INDEX:
14193 		break;
14194 	default:
14195 		return false;
14196 	}
14197 	return true;
14198 }
14199 
14200 /**
14201  * Valid mirror actions list includes one or two SAMPLE actions
14202  * followed by JUMP.
14203  *
14204  * @return
14205  * Number of mirrors *action* list was valid.
14206  * -EINVAL otherwise.
14207  */
14208 static int
14209 mlx5_hw_mirror_actions_list_validate(struct rte_eth_dev *dev,
14210 				     const struct rte_flow_attr *flow_attr,
14211 				     const struct rte_flow_action *actions)
14212 {
14213 	if (actions[0].type == RTE_FLOW_ACTION_TYPE_SAMPLE) {
14214 		int i = 1;
14215 		bool valid;
14216 		const struct rte_flow_action_sample *sample = actions[0].conf;
14217 		valid = mlx5_mirror_validate_sample_action(dev, flow_attr,
14218 							   sample->actions);
14219 		if (!valid)
14220 			return -EINVAL;
14221 		if (actions[1].type == RTE_FLOW_ACTION_TYPE_SAMPLE) {
14222 			i = 2;
14223 			sample = actions[1].conf;
14224 			valid = mlx5_mirror_validate_sample_action(dev, flow_attr,
14225 								   sample->actions);
14226 			if (!valid)
14227 				return -EINVAL;
14228 		}
14229 		return mlx5_mirror_terminal_action(actions + i) ? i + 1 : -EINVAL;
14230 	}
14231 	return -EINVAL;
14232 }
14233 
14234 static int
14235 mirror_format_tir(struct rte_eth_dev *dev,
14236 		  struct mlx5_mirror_clone *clone,
14237 		  const struct mlx5_flow_template_table_cfg *table_cfg,
14238 		  const struct rte_flow_action *action,
14239 		  struct mlx5dr_action_dest_attr *dest_attr,
14240 		  struct rte_flow_error *error)
14241 {
14242 	uint32_t hws_flags;
14243 	enum mlx5dr_table_type table_type;
14244 	struct mlx5_hrxq *tir_ctx;
14245 
14246 	table_type = get_mlx5dr_table_type(&table_cfg->attr.flow_attr);
14247 	hws_flags = mlx5_hw_act_flag[MLX5_HW_ACTION_FLAG_NONE_ROOT][table_type];
14248 	tir_ctx = flow_hw_tir_action_register(dev, hws_flags, action);
14249 	if (!tir_ctx)
14250 		return rte_flow_error_set(error, EINVAL,
14251 					  RTE_FLOW_ERROR_TYPE_ACTION,
14252 					  action, "failed to create QUEUE action for mirror clone");
14253 	dest_attr->dest = tir_ctx->action;
14254 	clone->action_ctx = tir_ctx;
14255 	return 0;
14256 }
14257 
14258 static int
14259 mirror_format_jump(struct rte_eth_dev *dev,
14260 		   struct mlx5_mirror_clone *clone,
14261 		   const struct mlx5_flow_template_table_cfg *table_cfg,
14262 		   const struct rte_flow_action *action,
14263 		   struct mlx5dr_action_dest_attr *dest_attr,
14264 		   struct rte_flow_error *error)
14265 {
14266 	const struct rte_flow_action_jump *jump_conf = action->conf;
14267 	struct mlx5_hw_jump_action *jump = flow_hw_jump_action_register
14268 						(dev, table_cfg,
14269 						 jump_conf->group, error);
14270 
14271 	if (!jump)
14272 		return rte_flow_error_set(error, EINVAL,
14273 					  RTE_FLOW_ERROR_TYPE_ACTION,
14274 					  action, "failed to create JUMP action for mirror clone");
14275 	dest_attr->dest = jump->hws_action;
14276 	clone->action_ctx = jump;
14277 	return 0;
14278 }
14279 
14280 static int
14281 mirror_format_port(struct rte_eth_dev *dev,
14282 		   const struct rte_flow_action *action,
14283 		   struct mlx5dr_action_dest_attr *dest_attr,
14284 		   struct rte_flow_error __rte_unused *error)
14285 {
14286 	struct mlx5_priv *priv = dev->data->dev_private;
14287 	const struct rte_flow_action_ethdev *port_action = action->conf;
14288 
14289 	dest_attr->dest = priv->hw_vport[port_action->port_id];
14290 	return 0;
14291 }
14292 
14293 static int
14294 hw_mirror_clone_reformat(const struct rte_flow_action *actions,
14295 			 struct mlx5dr_action_dest_attr *dest_attr,
14296 			 enum mlx5dr_action_type *action_type,
14297 			 uint8_t *reformat_buf, bool decap)
14298 {
14299 	int ret;
14300 	const struct rte_flow_item *encap_item = NULL;
14301 	const struct rte_flow_action_raw_encap *encap_conf = NULL;
14302 	typeof(dest_attr->reformat) *reformat = &dest_attr->reformat;
14303 
14304 	switch (actions[0].type) {
14305 	case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
14306 		encap_conf = actions[0].conf;
14307 		break;
14308 	case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
14309 		encap_item = MLX5_CONST_ENCAP_ITEM(rte_flow_action_vxlan_encap,
14310 						   actions);
14311 		break;
14312 	case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
14313 		encap_item = MLX5_CONST_ENCAP_ITEM(rte_flow_action_nvgre_encap,
14314 						   actions);
14315 		break;
14316 	default:
14317 		return -EINVAL;
14318 	}
14319 	*action_type = decap ?
14320 		       MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3 :
14321 		       MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
14322 	if (encap_item) {
14323 		ret = flow_dv_convert_encap_data(encap_item, reformat_buf,
14324 						 &reformat->reformat_data_sz, NULL);
14325 		if (ret)
14326 			return -EINVAL;
14327 		reformat->reformat_data = reformat_buf;
14328 	} else {
14329 		reformat->reformat_data = (void *)(uintptr_t)encap_conf->data;
14330 		reformat->reformat_data_sz = encap_conf->size;
14331 	}
14332 	return 0;
14333 }
14334 
14335 static int
14336 hw_mirror_format_clone(struct rte_eth_dev *dev,
14337 			struct mlx5_mirror_clone *clone,
14338 			const struct mlx5_flow_template_table_cfg *table_cfg,
14339 			const struct rte_flow_action *actions,
14340 			struct mlx5dr_action_dest_attr *dest_attr,
14341 			uint8_t *reformat_buf, struct rte_flow_error *error)
14342 {
14343 	struct mlx5_priv *priv = dev->data->dev_private;
14344 	int ret;
14345 	uint32_t i;
14346 	bool decap_seen = false;
14347 
14348 	for (i = 0; actions[i].type != RTE_FLOW_ACTION_TYPE_END; i++) {
14349 		dest_attr->action_type[i] = mlx5_hw_dr_action_types[actions[i].type];
14350 		switch (actions[i].type) {
14351 		case RTE_FLOW_ACTION_TYPE_QUEUE:
14352 		case RTE_FLOW_ACTION_TYPE_RSS:
14353 			ret = mirror_format_tir(dev, clone, table_cfg,
14354 						&actions[i], dest_attr, error);
14355 			if (ret)
14356 				return ret;
14357 			break;
14358 		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
14359 			ret = mirror_format_port(dev, &actions[i],
14360 						 dest_attr, error);
14361 			if (ret)
14362 				return ret;
14363 			break;
14364 		case RTE_FLOW_ACTION_TYPE_JUMP:
14365 			ret = mirror_format_jump(dev, clone, table_cfg,
14366 						 &actions[i], dest_attr, error);
14367 			if (ret)
14368 				return ret;
14369 			break;
14370 		case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
14371 			dest_attr->dest = priv->hw_def_miss;
14372 			break;
14373 		case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
14374 			decap_seen = true;
14375 			break;
14376 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
14377 		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
14378 		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
14379 			ret = hw_mirror_clone_reformat(&actions[i], dest_attr,
14380 						       &dest_attr->action_type[i],
14381 						       reformat_buf, decap_seen);
14382 			if (ret < 0)
14383 				return rte_flow_error_set(error, EINVAL,
14384 							  RTE_FLOW_ERROR_TYPE_ACTION,
14385 							  &actions[i],
14386 							  "failed to create reformat action");
14387 			break;
14388 		default:
14389 			return rte_flow_error_set(error, EINVAL,
14390 						  RTE_FLOW_ERROR_TYPE_ACTION,
14391 						  &actions[i], "unsupported sample action");
14392 		}
14393 		clone->type = actions->type;
14394 	}
14395 	dest_attr->action_type[i] = MLX5DR_ACTION_TYP_LAST;
14396 	return 0;
14397 }
14398 
14399 static struct rte_flow_action_list_handle *
14400 mlx5_hw_mirror_handle_create(struct rte_eth_dev *dev,
14401 			     const struct mlx5_flow_template_table_cfg *table_cfg,
14402 			     const struct rte_flow_action *actions,
14403 			     struct rte_flow_error *error)
14404 {
14405 	uint32_t hws_flags;
14406 	int ret = 0, i, clones_num;
14407 	struct mlx5_mirror *mirror;
14408 	enum mlx5dr_table_type table_type;
14409 	struct mlx5_priv *priv = dev->data->dev_private;
14410 	const struct rte_flow_attr *flow_attr = &table_cfg->attr.flow_attr;
14411 	uint8_t reformat_buf[MLX5_MIRROR_MAX_CLONES_NUM][MLX5_ENCAP_MAX_LEN];
14412 	struct mlx5dr_action_dest_attr mirror_attr[MLX5_MIRROR_MAX_CLONES_NUM + 1];
14413 	enum mlx5dr_action_type array_action_types[MLX5_MIRROR_MAX_CLONES_NUM + 1]
14414 						  [MLX5_MIRROR_MAX_SAMPLE_ACTIONS_LEN + 1];
14415 
14416 	memset(mirror_attr, 0, sizeof(mirror_attr));
14417 	memset(array_action_types, 0, sizeof(array_action_types));
14418 	table_type = get_mlx5dr_table_type(flow_attr);
14419 	hws_flags = mlx5_hw_act_flag[MLX5_HW_ACTION_FLAG_NONE_ROOT][table_type];
14420 	clones_num = mlx5_hw_mirror_actions_list_validate(dev, flow_attr,
14421 							  actions);
14422 	if (clones_num < 0) {
14423 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
14424 				   actions, "Invalid mirror list format");
14425 		return NULL;
14426 	}
14427 	mirror = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*mirror),
14428 			     0, SOCKET_ID_ANY);
14429 	if (!mirror) {
14430 		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ACTION,
14431 				   actions, "Failed to allocate mirror context");
14432 		return NULL;
14433 	}
14434 
14435 	mirror->indirect.type = MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR;
14436 	mirror->clones_num = clones_num;
14437 	for (i = 0; i < clones_num; i++) {
14438 		const struct rte_flow_action *clone_actions;
14439 
14440 		mirror_attr[i].action_type = array_action_types[i];
14441 		if (actions[i].type == RTE_FLOW_ACTION_TYPE_SAMPLE) {
14442 			const struct rte_flow_action_sample *sample = actions[i].conf;
14443 
14444 			clone_actions = sample->actions;
14445 		} else {
14446 			clone_actions = &actions[i];
14447 		}
14448 		ret = hw_mirror_format_clone(dev, &mirror->clone[i], table_cfg,
14449 					     clone_actions, &mirror_attr[i],
14450 					     reformat_buf[i], error);
14451 
14452 		if (ret)
14453 			goto error;
14454 	}
14455 	hws_flags |= MLX5DR_ACTION_FLAG_SHARED;
14456 	mirror->mirror_action = mlx5dr_action_create_dest_array(priv->dr_ctx,
14457 								clones_num,
14458 								mirror_attr,
14459 								hws_flags);
14460 	if (!mirror->mirror_action) {
14461 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
14462 				   actions, "Failed to create HWS mirror action");
14463 		goto error;
14464 	}
14465 
14466 	mlx5_indirect_list_add_entry(&priv->indirect_list_head, &mirror->indirect);
14467 	return (struct rte_flow_action_list_handle *)mirror;
14468 
14469 error:
14470 	mlx5_hw_mirror_destroy(dev, mirror);
14471 	return NULL;
14472 }
14473 
14474 void
14475 mlx5_destroy_legacy_indirect(__rte_unused struct rte_eth_dev *dev,
14476 			     struct mlx5_indirect_list *ptr)
14477 {
14478 	struct mlx5_indlst_legacy *obj = (typeof(obj))ptr;
14479 
14480 	switch (obj->legacy_type) {
14481 	case RTE_FLOW_ACTION_TYPE_METER_MARK:
14482 		break; /* ASO meters were released in mlx5_flow_meter_flush() */
14483 	default:
14484 		break;
14485 	}
14486 	mlx5_free(obj);
14487 }
14488 
14489 static struct rte_flow_action_list_handle *
14490 mlx5_create_legacy_indlst(struct rte_eth_dev *dev, uint32_t queue,
14491 			  const struct rte_flow_op_attr *attr,
14492 			  const struct rte_flow_indir_action_conf *conf,
14493 			  const struct rte_flow_action *actions,
14494 			  void *user_data, struct rte_flow_error *error)
14495 {
14496 	struct mlx5_priv *priv = dev->data->dev_private;
14497 	struct mlx5_indlst_legacy *indlst_obj = mlx5_malloc(MLX5_MEM_ZERO,
14498 							    sizeof(*indlst_obj),
14499 							    0, SOCKET_ID_ANY);
14500 
14501 	if (!indlst_obj)
14502 		return NULL;
14503 	indlst_obj->handle = flow_hw_action_handle_create(dev, queue, attr, conf,
14504 							  actions, user_data,
14505 							  error);
14506 	if (!indlst_obj->handle) {
14507 		mlx5_free(indlst_obj);
14508 		return NULL;
14509 	}
14510 	indlst_obj->legacy_type = actions[0].type;
14511 	indlst_obj->indirect.type = MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY;
14512 	mlx5_indirect_list_add_entry(&priv->indirect_list_head, &indlst_obj->indirect);
14513 	return (struct rte_flow_action_list_handle *)indlst_obj;
14514 }
14515 
14516 static __rte_always_inline enum mlx5_indirect_list_type
14517 flow_hw_inlist_type_get(const struct rte_flow_action *actions)
14518 {
14519 	switch (actions[0].type) {
14520 	case RTE_FLOW_ACTION_TYPE_SAMPLE:
14521 		return MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR;
14522 	case RTE_FLOW_ACTION_TYPE_METER_MARK:
14523 		return actions[1].type == RTE_FLOW_ACTION_TYPE_END ?
14524 		       MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY :
14525 		       MLX5_INDIRECT_ACTION_LIST_TYPE_ERR;
14526 	case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
14527 	case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
14528 		return MLX5_INDIRECT_ACTION_LIST_TYPE_REFORMAT;
14529 	default:
14530 		break;
14531 	}
14532 	return MLX5_INDIRECT_ACTION_LIST_TYPE_ERR;
14533 }
14534 
14535 static struct rte_flow_action_list_handle*
14536 mlx5_hw_decap_encap_handle_create(struct rte_eth_dev *dev,
14537 				  const struct mlx5_flow_template_table_cfg *table_cfg,
14538 				  const struct rte_flow_action *actions,
14539 				  struct rte_flow_error *error)
14540 {
14541 	struct mlx5_priv *priv = dev->data->dev_private;
14542 	const struct rte_flow_attr *flow_attr = &table_cfg->attr.flow_attr;
14543 	const struct rte_flow_action *encap = NULL;
14544 	const struct rte_flow_action *decap = NULL;
14545 	struct rte_flow_indir_action_conf indirect_conf = {
14546 		.ingress = flow_attr->ingress,
14547 		.egress = flow_attr->egress,
14548 		.transfer = flow_attr->transfer,
14549 	};
14550 	struct mlx5_hw_encap_decap_action *handle;
14551 	uint64_t action_flags = 0;
14552 
14553 	/*
14554 	 * Allow
14555 	 * 1. raw_decap / raw_encap / end
14556 	 * 2. raw_encap / end
14557 	 * 3. raw_decap / end
14558 	 */
14559 	while (actions->type != RTE_FLOW_ACTION_TYPE_END) {
14560 		if (actions->type == RTE_FLOW_ACTION_TYPE_RAW_DECAP) {
14561 			if (action_flags) {
14562 				rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
14563 						   actions, "Invalid indirect action list sequence");
14564 				return NULL;
14565 			}
14566 			action_flags |= MLX5_FLOW_ACTION_DECAP;
14567 			decap = actions;
14568 		} else if (actions->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
14569 			if (action_flags & MLX5_FLOW_ACTION_ENCAP) {
14570 				rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
14571 						   actions, "Invalid indirect action list sequence");
14572 				return NULL;
14573 			}
14574 			action_flags |= MLX5_FLOW_ACTION_ENCAP;
14575 			encap = actions;
14576 		} else {
14577 			rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
14578 					   actions, "Invalid indirect action type in list");
14579 			return NULL;
14580 		}
14581 		actions++;
14582 	}
14583 	if (!decap && !encap) {
14584 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
14585 				   actions, "Invalid indirect action combinations");
14586 		return NULL;
14587 	}
14588 	handle = mlx5_reformat_action_create(dev, &indirect_conf, encap, decap, error);
14589 	if (!handle) {
14590 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
14591 				   actions, "Failed to create HWS decap_encap action");
14592 		return NULL;
14593 	}
14594 	handle->indirect.type = MLX5_INDIRECT_ACTION_LIST_TYPE_REFORMAT;
14595 	LIST_INSERT_HEAD(&priv->indirect_list_head, &handle->indirect, entry);
14596 	return (struct rte_flow_action_list_handle *)handle;
14597 }
14598 
14599 static struct rte_flow_action_list_handle *
14600 flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue,
14601 					const struct rte_flow_op_attr *attr,
14602 					const struct rte_flow_indir_action_conf *conf,
14603 					const struct rte_flow_action *actions,
14604 					void *user_data,
14605 					struct rte_flow_error *error)
14606 {
14607 	struct mlx5_hw_q_job *job = NULL;
14608 	bool push = flow_hw_action_push(attr);
14609 	enum mlx5_indirect_list_type list_type;
14610 	struct rte_flow_action_list_handle *handle;
14611 	struct mlx5_priv *priv = dev->data->dev_private;
14612 	const struct mlx5_flow_template_table_cfg table_cfg = {
14613 		.external = true,
14614 		.attr = {
14615 			.flow_attr = {
14616 				.ingress = conf->ingress,
14617 				.egress = conf->egress,
14618 				.transfer = conf->transfer
14619 			}
14620 		}
14621 	};
14622 
14623 	if (!mlx5_hw_ctx_validate(dev, error))
14624 		return NULL;
14625 	if (!actions) {
14626 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
14627 				   NULL, "No action list");
14628 		return NULL;
14629 	}
14630 	list_type = flow_hw_inlist_type_get(actions);
14631 	if (list_type == MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY) {
14632 		/*
14633 		 * Legacy indirect actions already have
14634 		 * async resources management. No need to do it twice.
14635 		 */
14636 		handle = mlx5_create_legacy_indlst(dev, queue, attr, conf,
14637 						   actions, user_data, error);
14638 		goto end;
14639 	}
14640 	if (attr) {
14641 		job = flow_hw_action_job_init(priv, queue, NULL, user_data,
14642 					      NULL, MLX5_HW_Q_JOB_TYPE_CREATE,
14643 					      MLX5_HW_INDIRECT_TYPE_LIST, error);
14644 		if (!job)
14645 			return NULL;
14646 	}
14647 	switch (list_type) {
14648 	case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
14649 		handle = mlx5_hw_mirror_handle_create(dev, &table_cfg,
14650 						      actions, error);
14651 		break;
14652 	case MLX5_INDIRECT_ACTION_LIST_TYPE_REFORMAT:
14653 		handle = mlx5_hw_decap_encap_handle_create(dev, &table_cfg,
14654 							   actions, error);
14655 		break;
14656 	default:
14657 		handle = NULL;
14658 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
14659 				   actions, "Invalid list");
14660 	}
14661 	if (job) {
14662 		job->action = handle;
14663 		flow_hw_action_finalize(dev, queue, job, push, false,
14664 					handle != NULL);
14665 	}
14666 end:
14667 	return handle;
14668 }
14669 
14670 static struct rte_flow_action_list_handle *
14671 flow_hw_action_list_handle_create(struct rte_eth_dev *dev,
14672 				  const struct rte_flow_indir_action_conf *conf,
14673 				  const struct rte_flow_action *actions,
14674 				  struct rte_flow_error *error)
14675 {
14676 	return flow_hw_async_action_list_handle_create(dev, MLX5_HW_INV_QUEUE,
14677 						       NULL, conf, actions,
14678 						       NULL, error);
14679 }
14680 
14681 static int
14682 flow_hw_async_action_list_handle_destroy
14683 			(struct rte_eth_dev *dev, uint32_t queue,
14684 			 const struct rte_flow_op_attr *attr,
14685 			 struct rte_flow_action_list_handle *handle,
14686 			 void *user_data, struct rte_flow_error *error)
14687 {
14688 	int ret = 0;
14689 	struct mlx5_hw_q_job *job = NULL;
14690 	bool push = flow_hw_action_push(attr);
14691 	struct mlx5_priv *priv = dev->data->dev_private;
14692 	enum mlx5_indirect_list_type type =
14693 		mlx5_get_indirect_list_type((void *)handle);
14694 
14695 	if (type == MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY) {
14696 		struct mlx5_indlst_legacy *legacy = (typeof(legacy))handle;
14697 
14698 		ret = flow_hw_action_handle_destroy(dev, queue, attr,
14699 						    legacy->handle,
14700 						    user_data, error);
14701 		mlx5_indirect_list_remove_entry(&legacy->indirect);
14702 		goto end;
14703 	}
14704 	if (attr) {
14705 		job = flow_hw_action_job_init(priv, queue, NULL, user_data,
14706 					      NULL, MLX5_HW_Q_JOB_TYPE_DESTROY,
14707 					      MLX5_HW_INDIRECT_TYPE_LIST, error);
14708 		if (!job)
14709 			return rte_errno;
14710 	}
14711 	switch (type) {
14712 	case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
14713 		mlx5_hw_mirror_destroy(dev, (struct mlx5_mirror *)handle);
14714 		break;
14715 	case MLX5_INDIRECT_ACTION_LIST_TYPE_REFORMAT:
14716 		LIST_REMOVE(&((struct mlx5_hw_encap_decap_action *)handle)->indirect,
14717 			    entry);
14718 		mlx5_reformat_action_destroy(dev, handle, error);
14719 		break;
14720 	default:
14721 		ret = rte_flow_error_set(error, EINVAL,
14722 					  RTE_FLOW_ERROR_TYPE_ACTION,
14723 					  NULL, "Invalid indirect list handle");
14724 	}
14725 	if (job) {
14726 		flow_hw_action_finalize(dev, queue, job, push, false, true);
14727 	}
14728 end:
14729 	return ret;
14730 }
14731 
14732 static int
14733 flow_hw_action_list_handle_destroy(struct rte_eth_dev *dev,
14734 				   struct rte_flow_action_list_handle *handle,
14735 				   struct rte_flow_error *error)
14736 {
14737 	return flow_hw_async_action_list_handle_destroy(dev, MLX5_HW_INV_QUEUE,
14738 							NULL, handle, NULL,
14739 							error);
14740 }
14741 
14742 static int
14743 flow_hw_async_action_list_handle_query_update
14744 		(struct rte_eth_dev *dev, uint32_t queue_id,
14745 		 const struct rte_flow_op_attr *attr,
14746 		 const struct rte_flow_action_list_handle *handle,
14747 		 const void **update, void **query,
14748 		 enum rte_flow_query_update_mode mode,
14749 		 void *user_data, struct rte_flow_error *error)
14750 {
14751 	enum mlx5_indirect_list_type type =
14752 		mlx5_get_indirect_list_type((const void *)handle);
14753 
14754 	if (type == MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY) {
14755 		struct mlx5_indlst_legacy *legacy = (void *)(uintptr_t)handle;
14756 
14757 		if (update && query)
14758 			return flow_hw_async_action_handle_query_update
14759 				(dev, queue_id, attr, legacy->handle,
14760 				 update, query, mode, user_data, error);
14761 		else if (update && update[0])
14762 			return flow_hw_action_handle_update(dev, queue_id, attr,
14763 							    legacy->handle, update[0],
14764 							    user_data, error);
14765 		else if (query && query[0])
14766 			return flow_hw_action_handle_query(dev, queue_id, attr,
14767 							   legacy->handle, query[0],
14768 							   user_data, error);
14769 		else
14770 			return rte_flow_error_set(error, EINVAL,
14771 						  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14772 						  NULL, "invalid legacy handle query_update parameters");
14773 	}
14774 	return -ENOTSUP;
14775 }
14776 
14777 static int
14778 flow_hw_action_list_handle_query_update(struct rte_eth_dev *dev,
14779 					const struct rte_flow_action_list_handle *handle,
14780 					const void **update, void **query,
14781 					enum rte_flow_query_update_mode mode,
14782 					struct rte_flow_error *error)
14783 {
14784 	return flow_hw_async_action_list_handle_query_update
14785 					(dev, MLX5_HW_INV_QUEUE, NULL, handle,
14786 					 update, query, mode, NULL, error);
14787 }
14788 
14789 static int
14790 flow_hw_calc_table_hash(struct rte_eth_dev *dev,
14791 			 const struct rte_flow_template_table *table,
14792 			 const struct rte_flow_item pattern[],
14793 			 uint8_t pattern_template_index,
14794 			 uint32_t *hash, struct rte_flow_error *error)
14795 {
14796 	const struct rte_flow_item *items;
14797 	struct mlx5_flow_hw_pattern_params pp;
14798 	int res;
14799 
14800 	items = flow_hw_get_rule_items(dev, table, pattern,
14801 				       pattern_template_index,
14802 				       &pp);
14803 	res = mlx5dr_rule_hash_calculate(mlx5_table_matcher(table), items,
14804 					 pattern_template_index,
14805 					 MLX5DR_RULE_HASH_CALC_MODE_RAW,
14806 					 hash);
14807 	if (res)
14808 		return rte_flow_error_set(error, res,
14809 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14810 					  NULL,
14811 					  "hash could not be calculated");
14812 	return 0;
14813 }
14814 
14815 static int
14816 flow_hw_calc_encap_hash(struct rte_eth_dev *dev,
14817 			const struct rte_flow_item pattern[],
14818 			enum rte_flow_encap_hash_field dest_field,
14819 			uint8_t *hash,
14820 			struct rte_flow_error *error)
14821 {
14822 	struct mlx5_priv *priv = dev->data->dev_private;
14823 	struct mlx5dr_crc_encap_entropy_hash_fields data;
14824 	enum mlx5dr_crc_encap_entropy_hash_size res_size =
14825 			dest_field == RTE_FLOW_ENCAP_HASH_FIELD_SRC_PORT ?
14826 				MLX5DR_CRC_ENCAP_ENTROPY_HASH_SIZE_16 :
14827 				MLX5DR_CRC_ENCAP_ENTROPY_HASH_SIZE_8;
14828 	int res;
14829 
14830 	memset(&data, 0, sizeof(struct mlx5dr_crc_encap_entropy_hash_fields));
14831 
14832 	for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
14833 		switch (pattern->type) {
14834 		case RTE_FLOW_ITEM_TYPE_IPV4:
14835 			data.dst.ipv4_addr =
14836 				((const struct rte_flow_item_ipv4 *)(pattern->spec))->hdr.dst_addr;
14837 			data.src.ipv4_addr =
14838 				((const struct rte_flow_item_ipv4 *)(pattern->spec))->hdr.src_addr;
14839 			break;
14840 		case RTE_FLOW_ITEM_TYPE_IPV6:
14841 			memcpy(data.dst.ipv6_addr,
14842 			       &((const struct rte_flow_item_ipv6 *)(pattern->spec))->hdr.dst_addr,
14843 			       sizeof(data.dst.ipv6_addr));
14844 			memcpy(data.src.ipv6_addr,
14845 			       &((const struct rte_flow_item_ipv6 *)(pattern->spec))->hdr.src_addr,
14846 			       sizeof(data.src.ipv6_addr));
14847 			break;
14848 		case RTE_FLOW_ITEM_TYPE_UDP:
14849 			data.next_protocol = IPPROTO_UDP;
14850 			data.dst_port =
14851 				((const struct rte_flow_item_udp *)(pattern->spec))->hdr.dst_port;
14852 			data.src_port =
14853 				((const struct rte_flow_item_udp *)(pattern->spec))->hdr.src_port;
14854 			break;
14855 		case RTE_FLOW_ITEM_TYPE_TCP:
14856 			data.next_protocol = IPPROTO_TCP;
14857 			data.dst_port =
14858 				((const struct rte_flow_item_tcp *)(pattern->spec))->hdr.dst_port;
14859 			data.src_port =
14860 				((const struct rte_flow_item_tcp *)(pattern->spec))->hdr.src_port;
14861 			break;
14862 		case RTE_FLOW_ITEM_TYPE_ICMP:
14863 			data.next_protocol = IPPROTO_ICMP;
14864 			break;
14865 		case RTE_FLOW_ITEM_TYPE_ICMP6:
14866 			data.next_protocol = IPPROTO_ICMPV6;
14867 			break;
14868 		default:
14869 			break;
14870 		}
14871 	}
14872 	res = mlx5dr_crc_encap_entropy_hash_calc(priv->dr_ctx, &data, hash, res_size);
14873 	if (res)
14874 		return rte_flow_error_set(error, res,
14875 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14876 					  NULL, "error while calculating encap hash");
14877 	return 0;
14878 }
14879 
14880 static int
14881 flow_hw_table_resize_multi_pattern_actions(struct rte_eth_dev *dev,
14882 					   struct rte_flow_template_table *table,
14883 					   uint32_t nb_flows,
14884 					   struct rte_flow_error *error)
14885 {
14886 	struct mlx5_multi_pattern_segment *segment = table->mpctx.segments;
14887 	uint32_t bulk_size;
14888 	int i, ret;
14889 
14890 	/**
14891 	 * Segment always allocates Modify Header Argument Objects number in
14892 	 * powers of 2.
14893 	 * On resize, PMD adds minimal required argument objects number.
14894 	 * For example, if table size was 10, it allocated 16 argument objects.
14895 	 * Resize to 15 will not add new objects.
14896 	 */
14897 	for (i = 1;
14898 	     i < MLX5_MAX_TABLE_RESIZE_NUM && segment->capacity;
14899 	     i++, segment++) {
14900 		/* keep the devtools/checkpatches.sh happy */
14901 	}
14902 	if (i == MLX5_MAX_TABLE_RESIZE_NUM)
14903 		return rte_flow_error_set(error, EINVAL,
14904 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14905 					  table, "too many resizes");
14906 	if (segment->head_index - 1 >= nb_flows)
14907 		return 0;
14908 	bulk_size = rte_align32pow2(nb_flows - segment->head_index + 1);
14909 	ret = mlx5_tbl_multi_pattern_process(dev, table, segment,
14910 					     rte_log2_u32(bulk_size),
14911 					     error);
14912 	if (ret)
14913 		return rte_flow_error_set(error, EINVAL,
14914 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14915 					  table, "too many resizes");
14916 	return i;
14917 }
14918 
14919 static int
14920 flow_hw_table_resize(struct rte_eth_dev *dev,
14921 		     struct rte_flow_template_table *table,
14922 		     uint32_t nb_flows,
14923 		     struct rte_flow_error *error)
14924 {
14925 	struct mlx5dr_action_template *at[MLX5_HW_TBL_MAX_ACTION_TEMPLATE];
14926 	struct mlx5dr_match_template *mt[MLX5_HW_TBL_MAX_ITEM_TEMPLATE];
14927 	struct mlx5dr_matcher_attr matcher_attr = table->matcher_attr;
14928 	struct mlx5dr_action_jump_to_matcher_attr jump_attr = {
14929 		.type = MLX5DR_ACTION_JUMP_TO_MATCHER_BY_INDEX,
14930 		.matcher = NULL,
14931 	};
14932 	struct mlx5_multi_pattern_segment *segment = NULL;
14933 	struct mlx5dr_matcher *matcher = NULL;
14934 	struct mlx5dr_action *jump = NULL;
14935 	struct mlx5_priv *priv = dev->data->dev_private;
14936 	uint32_t i, selector = table->matcher_selector;
14937 	uint32_t other_selector = (selector + 1) & 1;
14938 	int ret;
14939 
14940 	if (!rte_flow_template_table_resizable(dev->data->port_id, &table->cfg.attr))
14941 		return rte_flow_error_set(error, EINVAL,
14942 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14943 					  table, "no resizable attribute");
14944 	if (table->matcher_info[other_selector].matcher)
14945 		return rte_flow_error_set(error, EINVAL,
14946 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14947 					  table, "last table resize was not completed");
14948 	if (nb_flows <= table->cfg.attr.nb_flows)
14949 		return rte_flow_error_set(error, EINVAL,
14950 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14951 					  table, "shrinking table is not supported");
14952 	ret = mlx5_ipool_resize(table->flow, nb_flows, error);
14953 	if (ret)
14954 		return ret;
14955 	/*
14956 	 * A resizable matcher doesn't support rule update. In this case, the ipool
14957 	 * for the resource is not created and there is no need to resize it.
14958 	 */
14959 	MLX5_ASSERT(!table->resource);
14960 	if (mlx5_is_multi_pattern_active(&table->mpctx)) {
14961 		ret = flow_hw_table_resize_multi_pattern_actions(dev, table, nb_flows, error);
14962 		if (ret < 0)
14963 			return ret;
14964 		if (ret > 0)
14965 			segment = table->mpctx.segments + ret;
14966 	}
14967 	for (i = 0; i < table->nb_item_templates; i++)
14968 		mt[i] = table->its[i]->mt;
14969 	for (i = 0; i < table->nb_action_templates; i++)
14970 		at[i] = table->ats[i].action_template->tmpl;
14971 	nb_flows = rte_align32pow2(nb_flows);
14972 	matcher_attr.rule.num_log = rte_log2_u32(nb_flows);
14973 	matcher = mlx5dr_matcher_create(table->grp->tbl, mt,
14974 					table->nb_item_templates, at,
14975 					table->nb_action_templates,
14976 					&matcher_attr);
14977 	if (!matcher) {
14978 		ret = rte_flow_error_set(error, rte_errno,
14979 					 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14980 					 table, "failed to create new matcher");
14981 		goto error;
14982 	}
14983 	if (matcher_attr.isolated) {
14984 		jump_attr.matcher = matcher;
14985 		jump = mlx5dr_action_create_jump_to_matcher(priv->dr_ctx, &jump_attr,
14986 			mlx5_hw_act_flag[!!table->cfg.attr.flow_attr.group][table->type]);
14987 		if (!jump) {
14988 			ret = rte_flow_error_set(error, rte_errno,
14989 						RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14990 						table, "failed to create jump to matcher action");
14991 			goto error;
14992 		}
14993 	}
14994 	rte_rwlock_write_lock(&table->matcher_replace_rwlk);
14995 	ret = mlx5dr_matcher_resize_set_target
14996 			(table->matcher_info[selector].matcher, matcher);
14997 	if (ret) {
14998 		rte_rwlock_write_unlock(&table->matcher_replace_rwlk);
14999 		ret = rte_flow_error_set(error, rte_errno,
15000 					 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15001 					 table, "failed to initiate matcher swap");
15002 		goto error;
15003 	}
15004 	table->cfg.attr.nb_flows = nb_flows;
15005 	table->matcher_info[other_selector].matcher = matcher;
15006 	table->matcher_info[other_selector].jump = jump;
15007 	table->matcher_selector = other_selector;
15008 	rte_atomic_store_explicit(&table->matcher_info[other_selector].refcnt,
15009 				  0, rte_memory_order_relaxed);
15010 	rte_rwlock_write_unlock(&table->matcher_replace_rwlk);
15011 	return 0;
15012 error:
15013 	if (segment)
15014 		mlx5_destroy_multi_pattern_segment(segment);
15015 	if (jump)
15016 		mlx5dr_action_destroy(jump);
15017 	if (matcher) {
15018 		ret = mlx5dr_matcher_destroy(matcher);
15019 		return rte_flow_error_set(error, rte_errno,
15020 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15021 					  table, "failed to destroy new matcher");
15022 	}
15023 	return ret;
15024 }
15025 
15026 static int
15027 flow_hw_table_resize_complete(__rte_unused struct rte_eth_dev *dev,
15028 			      struct rte_flow_template_table *table,
15029 			      struct rte_flow_error *error)
15030 {
15031 	int ret;
15032 	uint32_t selector = table->matcher_selector;
15033 	uint32_t other_selector = (selector + 1) & 1;
15034 	struct mlx5_matcher_info *matcher_info = &table->matcher_info[other_selector];
15035 	uint32_t matcher_refcnt;
15036 
15037 	if (!rte_flow_template_table_resizable(dev->data->port_id, &table->cfg.attr))
15038 		return rte_flow_error_set(error, EINVAL,
15039 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15040 					  table, "no resizable attribute");
15041 	matcher_refcnt = rte_atomic_load_explicit(&matcher_info->refcnt,
15042 						  rte_memory_order_relaxed);
15043 	if (!matcher_info->matcher || matcher_refcnt)
15044 		return rte_flow_error_set(error, EBUSY,
15045 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15046 					  table, "cannot complete table resize");
15047 	if (matcher_info->jump)
15048 		mlx5dr_action_destroy(matcher_info->jump);
15049 	ret = mlx5dr_matcher_destroy(matcher_info->matcher);
15050 	if (ret)
15051 		return rte_flow_error_set(error, rte_errno,
15052 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15053 					  table, "failed to destroy retired matcher");
15054 	matcher_info->matcher = NULL;
15055 	return 0;
15056 }
15057 
15058 static int
15059 flow_hw_update_resized(struct rte_eth_dev *dev, uint32_t queue,
15060 		       const struct rte_flow_op_attr *attr,
15061 		       struct rte_flow *flow, void *user_data,
15062 		       struct rte_flow_error *error)
15063 {
15064 	int ret;
15065 	struct mlx5_priv *priv = dev->data->dev_private;
15066 	struct rte_flow_hw *hw_flow = (struct rte_flow_hw *)flow;
15067 	struct rte_flow_template_table *table = hw_flow->table;
15068 	struct rte_flow_hw_aux *aux = mlx5_flow_hw_aux(dev->data->port_id, hw_flow);
15069 	uint32_t table_selector = table->matcher_selector;
15070 	uint32_t rule_selector = aux->matcher_selector;
15071 	uint32_t other_selector;
15072 	struct mlx5dr_matcher *other_matcher;
15073 	struct mlx5dr_rule_attr rule_attr = {
15074 		.queue_id = queue,
15075 		.burst = attr->postpone,
15076 	};
15077 
15078 	MLX5_ASSERT(hw_flow->flags & MLX5_FLOW_HW_FLOW_FLAG_MATCHER_SELECTOR);
15079 	/**
15080 	 * mlx5dr_matcher_resize_rule_move() accepts original table matcher -
15081 	 * the one that was used BEFORE table resize.
15082 	 * Since the function is called AFTER table resize,
15083 	 * `table->matcher_selector` always points to the new matcher and
15084 	 * `aux->matcher_selector` points to a matcher used to create the flow.
15085 	 */
15086 	other_selector = rule_selector == table_selector ?
15087 			 (rule_selector + 1) & 1 : rule_selector;
15088 	other_matcher = table->matcher_info[other_selector].matcher;
15089 	if (!other_matcher)
15090 		return rte_flow_error_set(error, EINVAL,
15091 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
15092 					  "no active table resize");
15093 	hw_flow->operation_type = MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_MOVE;
15094 	hw_flow->user_data = user_data;
15095 	rule_attr.user_data = hw_flow;
15096 	if (rule_selector == table_selector) {
15097 		struct rte_ring *ring = !attr->postpone ?
15098 					priv->hw_q[queue].flow_transfer_completed :
15099 					priv->hw_q[queue].flow_transfer_pending;
15100 		rte_ring_enqueue(ring, hw_flow);
15101 		flow_hw_q_inc_flow_ops(priv, queue);
15102 		return 0;
15103 	}
15104 	ret = mlx5dr_matcher_resize_rule_move(other_matcher,
15105 					      (struct mlx5dr_rule *)hw_flow->rule,
15106 					      &rule_attr);
15107 	if (ret) {
15108 		return rte_flow_error_set(error, rte_errno,
15109 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
15110 					  "flow transfer failed");
15111 	}
15112 	flow_hw_q_inc_flow_ops(priv, queue);
15113 	return 0;
15114 }
15115 
15116 const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {
15117 	.list_create = flow_hw_list_create,
15118 	.list_destroy = flow_hw_list_destroy,
15119 	.validate = flow_dv_validate,
15120 	.info_get = flow_hw_info_get,
15121 	.configure = flow_hw_configure,
15122 	.pattern_validate = flow_hw_pattern_validate,
15123 	.pattern_template_create = flow_hw_pattern_template_create,
15124 	.pattern_template_destroy = flow_hw_pattern_template_destroy,
15125 	.actions_validate = flow_hw_actions_validate,
15126 	.actions_template_create = flow_hw_actions_template_create,
15127 	.actions_template_destroy = flow_hw_actions_template_destroy,
15128 	.template_table_create = flow_hw_template_table_create,
15129 	.template_table_destroy = flow_hw_table_destroy,
15130 	.table_resize = flow_hw_table_resize,
15131 	.group_set_miss_actions = flow_hw_group_set_miss_actions,
15132 	.async_flow_create = flow_hw_async_flow_create,
15133 	.async_flow_create_by_index = flow_hw_async_flow_create_by_index,
15134 	.async_flow_update = flow_hw_async_flow_update,
15135 	.async_flow_destroy = flow_hw_async_flow_destroy,
15136 	.flow_update_resized = flow_hw_update_resized,
15137 	.table_resize_complete = flow_hw_table_resize_complete,
15138 	.pull = flow_hw_pull,
15139 	.push = flow_hw_push,
15140 	.async_action_create = flow_hw_action_handle_create,
15141 	.async_action_destroy = flow_hw_action_handle_destroy,
15142 	.async_action_update = flow_hw_action_handle_update,
15143 	.async_action_query_update = flow_hw_async_action_handle_query_update,
15144 	.async_action_query = flow_hw_action_handle_query,
15145 	.action_validate = flow_hw_action_validate,
15146 	.action_create = flow_hw_action_create,
15147 	.action_destroy = flow_hw_action_destroy,
15148 	.action_update = flow_hw_action_update,
15149 	.action_query = flow_hw_action_query,
15150 	.action_query_update = flow_hw_action_query_update,
15151 	.action_list_handle_create = flow_hw_action_list_handle_create,
15152 	.action_list_handle_destroy = flow_hw_action_list_handle_destroy,
15153 	.action_list_handle_query_update =
15154 		flow_hw_action_list_handle_query_update,
15155 	.async_action_list_handle_create =
15156 		flow_hw_async_action_list_handle_create,
15157 	.async_action_list_handle_destroy =
15158 		flow_hw_async_action_list_handle_destroy,
15159 	.async_action_list_handle_query_update =
15160 		flow_hw_async_action_list_handle_query_update,
15161 	.query = flow_hw_query,
15162 	.get_aged_flows = flow_hw_get_aged_flows,
15163 	.get_q_aged_flows = flow_hw_get_q_aged_flows,
15164 	.item_create = flow_dv_item_create,
15165 	.item_release = flow_dv_item_release,
15166 	.flow_calc_table_hash = flow_hw_calc_table_hash,
15167 	.flow_calc_encap_hash = flow_hw_calc_encap_hash,
15168 };
15169 
15170 /**
15171  * Creates a control flow using flow template API on @p proxy_dev device,
15172  * on behalf of @p owner_dev device.
15173  *
15174  * This function uses locks internally to synchronize access to the
15175  * flow queue.
15176  *
15177  * Created flow is stored in private list associated with @p proxy_dev device.
15178  *
15179  * @param owner_dev
15180  *   Pointer to Ethernet device on behalf of which flow is created.
15181  * @param proxy_dev
15182  *   Pointer to Ethernet device on which flow is created.
15183  * @param table
15184  *   Pointer to flow table.
15185  * @param items
15186  *   Pointer to flow rule items.
15187  * @param item_template_idx
15188  *   Index of an item template associated with @p table.
15189  * @param actions
15190  *   Pointer to flow rule actions.
15191  * @param action_template_idx
15192  *   Index of an action template associated with @p table.
15193  * @param info
15194  *   Additional info about control flow rule.
15195  * @param external
15196  *   External ctrl flow.
15197  *
15198  * @return
15199  *   0 on success, negative errno value otherwise and rte_errno set.
15200  */
15201 static __rte_unused int
15202 flow_hw_create_ctrl_flow(struct rte_eth_dev *owner_dev,
15203 			 struct rte_eth_dev *proxy_dev,
15204 			 struct rte_flow_template_table *table,
15205 			 struct rte_flow_item items[],
15206 			 uint8_t item_template_idx,
15207 			 struct rte_flow_action actions[],
15208 			 uint8_t action_template_idx,
15209 			 struct mlx5_ctrl_flow_info *info,
15210 			 bool external)
15211 {
15212 	struct mlx5_priv *priv = proxy_dev->data->dev_private;
15213 	uint32_t queue = CTRL_QUEUE_ID(priv);
15214 	struct rte_flow_op_attr op_attr = {
15215 		.postpone = 0,
15216 	};
15217 	struct rte_flow *flow = NULL;
15218 	struct mlx5_ctrl_flow_entry *entry = NULL;
15219 	int ret;
15220 
15221 	rte_spinlock_lock(&priv->hw_ctrl_lock);
15222 	entry = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_SYS, sizeof(*entry),
15223 			    0, SOCKET_ID_ANY);
15224 	if (!entry) {
15225 		DRV_LOG(ERR, "port %u not enough memory to create control flows",
15226 			proxy_dev->data->port_id);
15227 		rte_errno = ENOMEM;
15228 		ret = -rte_errno;
15229 		goto error;
15230 	}
15231 	flow = flow_hw_async_flow_create(proxy_dev, queue, &op_attr, table,
15232 					 items, item_template_idx,
15233 					 actions, action_template_idx,
15234 					 NULL, NULL);
15235 	if (!flow) {
15236 		DRV_LOG(ERR, "port %u failed to enqueue create control"
15237 			" flow operation", proxy_dev->data->port_id);
15238 		ret = -rte_errno;
15239 		goto error;
15240 	}
15241 	ret = __flow_hw_pull_comp(proxy_dev, queue, NULL);
15242 	if (ret) {
15243 		DRV_LOG(ERR, "port %u failed to insert control flow",
15244 			proxy_dev->data->port_id);
15245 		rte_errno = EINVAL;
15246 		ret = -rte_errno;
15247 		goto error;
15248 	}
15249 	entry->owner_dev = owner_dev;
15250 	entry->flow = flow;
15251 	if (info)
15252 		entry->info = *info;
15253 	else
15254 		entry->info.type = MLX5_CTRL_FLOW_TYPE_GENERAL;
15255 	if (external)
15256 		LIST_INSERT_HEAD(&priv->hw_ext_ctrl_flows, entry, next);
15257 	else
15258 		LIST_INSERT_HEAD(&priv->hw_ctrl_flows, entry, next);
15259 	rte_spinlock_unlock(&priv->hw_ctrl_lock);
15260 	return 0;
15261 error:
15262 	if (entry)
15263 		mlx5_free(entry);
15264 	rte_spinlock_unlock(&priv->hw_ctrl_lock);
15265 	return ret;
15266 }
15267 
15268 /**
15269  * Destroys a control flow @p flow using flow template API on @p dev device.
15270  *
15271  * This function uses locks internally to synchronize access to the
15272  * flow queue.
15273  *
15274  * If the @p flow is stored on any private list/pool, then caller must free up
15275  * the relevant resources.
15276  *
15277  * @param dev
15278  *   Pointer to Ethernet device.
15279  * @param flow
15280  *   Pointer to flow rule.
15281  *
15282  * @return
15283  *   0 on success, non-zero value otherwise.
15284  */
15285 static int
15286 flow_hw_destroy_ctrl_flow(struct rte_eth_dev *dev, struct rte_flow *flow)
15287 {
15288 	struct mlx5_priv *priv = dev->data->dev_private;
15289 	uint32_t queue = CTRL_QUEUE_ID(priv);
15290 	struct rte_flow_op_attr op_attr = {
15291 		.postpone = 0,
15292 	};
15293 	int ret;
15294 
15295 	rte_spinlock_lock(&priv->hw_ctrl_lock);
15296 	ret = flow_hw_async_flow_destroy(dev, queue, &op_attr, flow, NULL, NULL);
15297 	if (ret) {
15298 		DRV_LOG(ERR, "port %u failed to enqueue destroy control"
15299 			" flow operation", dev->data->port_id);
15300 		goto exit;
15301 	}
15302 	ret = __flow_hw_pull_comp(dev, queue, NULL);
15303 	if (ret) {
15304 		DRV_LOG(ERR, "port %u failed to destroy control flow",
15305 			dev->data->port_id);
15306 		rte_errno = EINVAL;
15307 		ret = -rte_errno;
15308 		goto exit;
15309 	}
15310 exit:
15311 	rte_spinlock_unlock(&priv->hw_ctrl_lock);
15312 	return ret;
15313 }
15314 
15315 /**
15316  * Destroys control flows created on behalf of @p owner device on @p dev device.
15317  *
15318  * @param dev
15319  *   Pointer to Ethernet device on which control flows were created.
15320  * @param owner
15321  *   Pointer to Ethernet device owning control flows.
15322  *
15323  * @return
15324  *   0 on success, otherwise negative error code is returned and
15325  *   rte_errno is set.
15326  */
15327 static int
15328 flow_hw_flush_ctrl_flows_owned_by(struct rte_eth_dev *dev, struct rte_eth_dev *owner)
15329 {
15330 	struct mlx5_priv *priv = dev->data->dev_private;
15331 	struct mlx5_ctrl_flow_entry *cf;
15332 	struct mlx5_ctrl_flow_entry *cf_next;
15333 	int ret;
15334 
15335 	cf = LIST_FIRST(&priv->hw_ctrl_flows);
15336 	while (cf != NULL) {
15337 		cf_next = LIST_NEXT(cf, next);
15338 		if (cf->owner_dev == owner) {
15339 			ret = flow_hw_destroy_ctrl_flow(dev, cf->flow);
15340 			if (ret) {
15341 				rte_errno = ret;
15342 				return -ret;
15343 			}
15344 			LIST_REMOVE(cf, next);
15345 			mlx5_free(cf);
15346 		}
15347 		cf = cf_next;
15348 	}
15349 	return 0;
15350 }
15351 
15352 /**
15353  * Destroys control flows created for @p owner_dev device.
15354  *
15355  * @param owner_dev
15356  *   Pointer to Ethernet device owning control flows.
15357  *
15358  * @return
15359  *   0 on success, otherwise negative error code is returned and
15360  *   rte_errno is set.
15361  */
15362 int
15363 mlx5_flow_hw_flush_ctrl_flows(struct rte_eth_dev *owner_dev)
15364 {
15365 	struct mlx5_priv *owner_priv = owner_dev->data->dev_private;
15366 	struct rte_eth_dev *proxy_dev;
15367 	uint16_t owner_port_id = owner_dev->data->port_id;
15368 	uint16_t proxy_port_id = owner_dev->data->port_id;
15369 	int ret;
15370 
15371 	/* Flush all flows created by this port for itself. */
15372 	ret = flow_hw_flush_ctrl_flows_owned_by(owner_dev, owner_dev);
15373 	if (ret)
15374 		return ret;
15375 	/* Flush all flows created for this port on proxy port. */
15376 	if (owner_priv->sh->config.dv_esw_en) {
15377 		ret = rte_flow_pick_transfer_proxy(owner_port_id, &proxy_port_id, NULL);
15378 		if (ret == -ENODEV) {
15379 			DRV_LOG(DEBUG, "Unable to find transfer proxy port for port %u. It was "
15380 				       "probably closed. Control flows were cleared.",
15381 				       owner_port_id);
15382 			rte_errno = 0;
15383 			return 0;
15384 		} else if (ret) {
15385 			DRV_LOG(ERR, "Unable to find proxy port for port %u (ret = %d)",
15386 				owner_port_id, ret);
15387 			return ret;
15388 		}
15389 		proxy_dev = &rte_eth_devices[proxy_port_id];
15390 	} else {
15391 		proxy_dev = owner_dev;
15392 	}
15393 	return flow_hw_flush_ctrl_flows_owned_by(proxy_dev, owner_dev);
15394 }
15395 
15396 /**
15397  * Destroys all control flows created on @p dev device.
15398  *
15399  * @param owner_dev
15400  *   Pointer to Ethernet device.
15401  *
15402  * @return
15403  *   0 on success, otherwise negative error code is returned and
15404  *   rte_errno is set.
15405  */
15406 static int
15407 flow_hw_flush_all_ctrl_flows(struct rte_eth_dev *dev)
15408 {
15409 	struct mlx5_priv *priv = dev->data->dev_private;
15410 	struct mlx5_ctrl_flow_entry *cf;
15411 	struct mlx5_ctrl_flow_entry *cf_next;
15412 	int ret;
15413 
15414 	cf = LIST_FIRST(&priv->hw_ctrl_flows);
15415 	while (cf != NULL) {
15416 		cf_next = LIST_NEXT(cf, next);
15417 		ret = flow_hw_destroy_ctrl_flow(dev, cf->flow);
15418 		if (ret) {
15419 			rte_errno = ret;
15420 			return -ret;
15421 		}
15422 		LIST_REMOVE(cf, next);
15423 		mlx5_free(cf);
15424 		cf = cf_next;
15425 	}
15426 	cf = LIST_FIRST(&priv->hw_ext_ctrl_flows);
15427 	while (cf != NULL) {
15428 		cf_next = LIST_NEXT(cf, next);
15429 		ret = flow_hw_destroy_ctrl_flow(dev, cf->flow);
15430 		if (ret) {
15431 			rte_errno = ret;
15432 			return -ret;
15433 		}
15434 		LIST_REMOVE(cf, next);
15435 		mlx5_free(cf);
15436 		cf = cf_next;
15437 	}
15438 	return 0;
15439 }
15440 
15441 int
15442 mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn, bool external)
15443 {
15444 	uint16_t port_id = dev->data->port_id;
15445 	struct rte_flow_item_ethdev esw_mgr_spec = {
15446 		.port_id = MLX5_REPRESENTED_PORT_ESW_MGR,
15447 	};
15448 	struct rte_flow_item_ethdev esw_mgr_mask = {
15449 		.port_id = MLX5_REPRESENTED_PORT_ESW_MGR,
15450 	};
15451 	struct rte_flow_item_tag reg_c0_spec = {
15452 		.index = (uint8_t)REG_C_0,
15453 		.data = flow_hw_esw_mgr_regc_marker(dev),
15454 	};
15455 	struct rte_flow_item_tag reg_c0_mask = {
15456 		.index = 0xff,
15457 		.data = flow_hw_esw_mgr_regc_marker_mask(dev),
15458 	};
15459 	struct mlx5_rte_flow_item_sq sq_spec = {
15460 		.queue = sqn,
15461 	};
15462 	struct rte_flow_action_ethdev port = {
15463 		.port_id = port_id,
15464 	};
15465 	struct rte_flow_item items[3] = { { 0 } };
15466 	struct rte_flow_action actions[3] = { { 0 } };
15467 	struct mlx5_ctrl_flow_info flow_info = {
15468 		.type = MLX5_CTRL_FLOW_TYPE_SQ_MISS_ROOT,
15469 		.esw_mgr_sq = sqn,
15470 	};
15471 	struct rte_eth_dev *proxy_dev;
15472 	struct mlx5_priv *proxy_priv;
15473 	uint16_t proxy_port_id = dev->data->port_id;
15474 	int ret;
15475 
15476 	ret = rte_flow_pick_transfer_proxy(port_id, &proxy_port_id, NULL);
15477 	if (ret) {
15478 		DRV_LOG(ERR, "Unable to pick transfer proxy port for port %u. Transfer proxy "
15479 			     "port must be present to create default SQ miss flows.",
15480 			     port_id);
15481 		return ret;
15482 	}
15483 	proxy_dev = &rte_eth_devices[proxy_port_id];
15484 	proxy_priv = proxy_dev->data->dev_private;
15485 	if (!proxy_priv->dr_ctx) {
15486 		DRV_LOG(DEBUG, "Transfer proxy port (port %u) of port %u must be configured "
15487 			       "for HWS to create default SQ miss flows. Default flows will "
15488 			       "not be created.",
15489 			       proxy_port_id, port_id);
15490 		return 0;
15491 	}
15492 	if (!proxy_priv->hw_ctrl_fdb ||
15493 	    !proxy_priv->hw_ctrl_fdb->hw_esw_sq_miss_root_tbl ||
15494 	    !proxy_priv->hw_ctrl_fdb->hw_esw_sq_miss_tbl) {
15495 		DRV_LOG(ERR, "Transfer proxy port (port %u) of port %u was configured, but "
15496 			     "default flow tables were not created.",
15497 			     proxy_port_id, port_id);
15498 		rte_errno = ENOMEM;
15499 		return -rte_errno;
15500 	}
15501 	/*
15502 	 * Create a root SQ miss flow rule - match E-Switch Manager and SQ,
15503 	 * and jump to group 1.
15504 	 */
15505 	items[0] = (struct rte_flow_item){
15506 		.type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,
15507 		.spec = &esw_mgr_spec,
15508 		.mask = &esw_mgr_mask,
15509 	};
15510 	items[1] = (struct rte_flow_item){
15511 		.type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_SQ,
15512 		.spec = &sq_spec,
15513 	};
15514 	items[2] = (struct rte_flow_item){
15515 		.type = RTE_FLOW_ITEM_TYPE_END,
15516 	};
15517 	actions[0] = (struct rte_flow_action){
15518 		.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
15519 	};
15520 	actions[1] = (struct rte_flow_action){
15521 		.type = RTE_FLOW_ACTION_TYPE_JUMP,
15522 	};
15523 	actions[2] = (struct rte_flow_action) {
15524 		.type = RTE_FLOW_ACTION_TYPE_END,
15525 	};
15526 	ret = flow_hw_create_ctrl_flow(dev, proxy_dev,
15527 				       proxy_priv->hw_ctrl_fdb->hw_esw_sq_miss_root_tbl,
15528 				       items, 0, actions, 0, &flow_info, external);
15529 	if (ret) {
15530 		DRV_LOG(ERR, "Port %u failed to create root SQ miss flow rule for SQ %u, ret %d",
15531 			port_id, sqn, ret);
15532 		return ret;
15533 	}
15534 	/*
15535 	 * Create a non-root SQ miss flow rule - match REG_C_0 marker and SQ,
15536 	 * and forward to port.
15537 	 */
15538 	items[0] = (struct rte_flow_item){
15539 		.type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_TAG,
15540 		.spec = &reg_c0_spec,
15541 		.mask = &reg_c0_mask,
15542 	};
15543 	items[1] = (struct rte_flow_item){
15544 		.type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_SQ,
15545 		.spec = &sq_spec,
15546 	};
15547 	items[2] = (struct rte_flow_item){
15548 		.type = RTE_FLOW_ITEM_TYPE_END,
15549 	};
15550 	actions[0] = (struct rte_flow_action){
15551 		.type = RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT,
15552 		.conf = &port,
15553 	};
15554 	actions[1] = (struct rte_flow_action){
15555 		.type = RTE_FLOW_ACTION_TYPE_END,
15556 	};
15557 	flow_info.type = MLX5_CTRL_FLOW_TYPE_SQ_MISS;
15558 	ret = flow_hw_create_ctrl_flow(dev, proxy_dev,
15559 				       proxy_priv->hw_ctrl_fdb->hw_esw_sq_miss_tbl,
15560 				       items, 0, actions, 0, &flow_info, external);
15561 	if (ret) {
15562 		DRV_LOG(ERR, "Port %u failed to create HWS SQ miss flow rule for SQ %u, ret %d",
15563 			port_id, sqn, ret);
15564 		return ret;
15565 	}
15566 	return 0;
15567 }
15568 
15569 static bool
15570 flow_hw_is_matching_sq_miss_flow(struct mlx5_ctrl_flow_entry *cf,
15571 				 struct rte_eth_dev *dev,
15572 				 uint32_t sqn)
15573 {
15574 	if (cf->owner_dev != dev)
15575 		return false;
15576 	if (cf->info.type == MLX5_CTRL_FLOW_TYPE_SQ_MISS_ROOT && cf->info.esw_mgr_sq == sqn)
15577 		return true;
15578 	if (cf->info.type == MLX5_CTRL_FLOW_TYPE_SQ_MISS && cf->info.esw_mgr_sq == sqn)
15579 		return true;
15580 	return false;
15581 }
15582 
15583 int
15584 mlx5_flow_hw_esw_destroy_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn)
15585 {
15586 	uint16_t port_id = dev->data->port_id;
15587 	uint16_t proxy_port_id = dev->data->port_id;
15588 	struct rte_eth_dev *proxy_dev;
15589 	struct mlx5_priv *proxy_priv;
15590 	struct mlx5_ctrl_flow_entry *cf;
15591 	struct mlx5_ctrl_flow_entry *cf_next;
15592 	int ret;
15593 
15594 	ret = rte_flow_pick_transfer_proxy(port_id, &proxy_port_id, NULL);
15595 	if (ret) {
15596 		DRV_LOG(ERR, "Unable to pick transfer proxy port for port %u. Transfer proxy "
15597 			     "port must be present for default SQ miss flow rules to exist.",
15598 			     port_id);
15599 		return ret;
15600 	}
15601 	proxy_dev = &rte_eth_devices[proxy_port_id];
15602 	proxy_priv = proxy_dev->data->dev_private;
15603 	/* FDB default flow rules must be enabled. */
15604 	MLX5_ASSERT(proxy_priv->sh->config.fdb_def_rule);
15605 	if (!proxy_priv->dr_ctx)
15606 		return 0;
15607 	if (!proxy_priv->hw_ctrl_fdb ||
15608 	    !proxy_priv->hw_ctrl_fdb->hw_esw_sq_miss_root_tbl ||
15609 	    !proxy_priv->hw_ctrl_fdb->hw_esw_sq_miss_tbl)
15610 		return 0;
15611 	cf = LIST_FIRST(&proxy_priv->hw_ctrl_flows);
15612 	while (cf != NULL) {
15613 		cf_next = LIST_NEXT(cf, next);
15614 		if (flow_hw_is_matching_sq_miss_flow(cf, dev, sqn)) {
15615 			claim_zero(flow_hw_destroy_ctrl_flow(proxy_dev, cf->flow));
15616 			LIST_REMOVE(cf, next);
15617 			mlx5_free(cf);
15618 		}
15619 		cf = cf_next;
15620 	}
15621 	return 0;
15622 }
15623 
15624 int
15625 mlx5_flow_hw_esw_create_default_jump_flow(struct rte_eth_dev *dev)
15626 {
15627 	uint16_t port_id = dev->data->port_id;
15628 	struct rte_flow_item_ethdev port_spec = {
15629 		.port_id = port_id,
15630 	};
15631 	struct rte_flow_item items[] = {
15632 		{
15633 			.type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,
15634 			.spec = &port_spec,
15635 		},
15636 		{
15637 			.type = RTE_FLOW_ITEM_TYPE_END,
15638 		},
15639 	};
15640 	struct rte_flow_action_jump jump = {
15641 		.group = 1,
15642 	};
15643 	struct rte_flow_action actions[] = {
15644 		{
15645 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
15646 			.conf = &jump,
15647 		},
15648 		{
15649 			.type = RTE_FLOW_ACTION_TYPE_END,
15650 		}
15651 	};
15652 	struct mlx5_ctrl_flow_info flow_info = {
15653 		.type = MLX5_CTRL_FLOW_TYPE_DEFAULT_JUMP,
15654 	};
15655 	struct rte_eth_dev *proxy_dev;
15656 	struct mlx5_priv *proxy_priv;
15657 	uint16_t proxy_port_id = dev->data->port_id;
15658 	int ret;
15659 
15660 	ret = rte_flow_pick_transfer_proxy(port_id, &proxy_port_id, NULL);
15661 	if (ret) {
15662 		DRV_LOG(ERR, "Unable to pick transfer proxy port for port %u. Transfer proxy "
15663 			     "port must be present to create default FDB jump rule.",
15664 			     port_id);
15665 		return ret;
15666 	}
15667 	proxy_dev = &rte_eth_devices[proxy_port_id];
15668 	proxy_priv = proxy_dev->data->dev_private;
15669 	/* FDB default flow rules must be enabled. */
15670 	MLX5_ASSERT(proxy_priv->sh->config.fdb_def_rule);
15671 	if (!proxy_priv->dr_ctx) {
15672 		DRV_LOG(DEBUG, "Transfer proxy port (port %u) of port %u must be configured "
15673 			       "for HWS to create default FDB jump rule. Default rule will "
15674 			       "not be created.",
15675 			       proxy_port_id, port_id);
15676 		return 0;
15677 	}
15678 	if (!proxy_priv->hw_ctrl_fdb || !proxy_priv->hw_ctrl_fdb->hw_esw_zero_tbl) {
15679 		DRV_LOG(ERR, "Transfer proxy port (port %u) of port %u was configured, but "
15680 			     "default flow tables were not created.",
15681 			     proxy_port_id, port_id);
15682 		rte_errno = EINVAL;
15683 		return -rte_errno;
15684 	}
15685 	return flow_hw_create_ctrl_flow(dev, proxy_dev,
15686 					proxy_priv->hw_ctrl_fdb->hw_esw_zero_tbl,
15687 					items, 0, actions, 0, &flow_info, false);
15688 }
15689 
15690 int
15691 mlx5_flow_hw_create_tx_default_mreg_copy_flow(struct rte_eth_dev *dev)
15692 {
15693 	struct mlx5_priv *priv = dev->data->dev_private;
15694 	struct rte_flow_item_eth promisc = {
15695 		.hdr.dst_addr.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
15696 		.hdr.src_addr.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
15697 		.hdr.ether_type = 0,
15698 	};
15699 	struct rte_flow_item eth_all[] = {
15700 		[0] = {
15701 			.type = RTE_FLOW_ITEM_TYPE_ETH,
15702 			.spec = &promisc,
15703 			.mask = &promisc,
15704 		},
15705 		[1] = {
15706 			.type = RTE_FLOW_ITEM_TYPE_END,
15707 		},
15708 	};
15709 	struct rte_flow_action_modify_field mreg_action = {
15710 		.operation = RTE_FLOW_MODIFY_SET,
15711 		.dst = {
15712 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
15713 			.tag_index = REG_C_1,
15714 		},
15715 		.src = {
15716 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
15717 			.tag_index = REG_A,
15718 		},
15719 		.width = 32,
15720 	};
15721 	struct rte_flow_action copy_reg_action[] = {
15722 		[0] = {
15723 			.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
15724 			.conf = &mreg_action,
15725 		},
15726 		[1] = {
15727 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
15728 		},
15729 		[2] = {
15730 			.type = RTE_FLOW_ACTION_TYPE_END,
15731 		},
15732 	};
15733 	struct mlx5_ctrl_flow_info flow_info = {
15734 		.type = MLX5_CTRL_FLOW_TYPE_TX_META_COPY,
15735 	};
15736 
15737 	MLX5_ASSERT(priv->master);
15738 	if (!priv->dr_ctx ||
15739 	    !priv->hw_ctrl_fdb ||
15740 	    !priv->hw_ctrl_fdb->hw_tx_meta_cpy_tbl)
15741 		return 0;
15742 	return flow_hw_create_ctrl_flow(dev, dev,
15743 					priv->hw_ctrl_fdb->hw_tx_meta_cpy_tbl,
15744 					eth_all, 0, copy_reg_action, 0, &flow_info, false);
15745 }
15746 
15747 int
15748 mlx5_flow_hw_tx_repr_matching_flow(struct rte_eth_dev *dev, uint32_t sqn, bool external)
15749 {
15750 	struct mlx5_priv *priv = dev->data->dev_private;
15751 	struct mlx5_rte_flow_item_sq sq_spec = {
15752 		.queue = sqn,
15753 	};
15754 	struct rte_flow_item items[] = {
15755 		{
15756 			.type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_SQ,
15757 			.spec = &sq_spec,
15758 		},
15759 		{
15760 			.type = RTE_FLOW_ITEM_TYPE_END,
15761 		},
15762 	};
15763 	/*
15764 	 * Allocate actions array suitable for all cases - extended metadata enabled or not.
15765 	 * With extended metadata there will be an additional MODIFY_FIELD action before JUMP.
15766 	 */
15767 	struct rte_flow_action actions[] = {
15768 		{ .type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD },
15769 		{ .type = RTE_FLOW_ACTION_TYPE_JUMP },
15770 		{ .type = RTE_FLOW_ACTION_TYPE_END },
15771 		{ .type = RTE_FLOW_ACTION_TYPE_END },
15772 	};
15773 	struct mlx5_ctrl_flow_info flow_info = {
15774 		.type = MLX5_CTRL_FLOW_TYPE_TX_REPR_MATCH,
15775 		.tx_repr_sq = sqn,
15776 	};
15777 
15778 	/* It is assumed that caller checked for representor matching. */
15779 	MLX5_ASSERT(priv->sh->config.repr_matching);
15780 	if (!priv->dr_ctx) {
15781 		DRV_LOG(DEBUG, "Port %u must be configured for HWS, before creating "
15782 			       "default egress flow rules. Omitting creation.",
15783 			       dev->data->port_id);
15784 		return 0;
15785 	}
15786 	if (!priv->hw_tx_repr_tagging_tbl) {
15787 		DRV_LOG(ERR, "Port %u is configured for HWS, but table for default "
15788 			     "egress flow rules does not exist.",
15789 			     dev->data->port_id);
15790 		rte_errno = EINVAL;
15791 		return -rte_errno;
15792 	}
15793 	/*
15794 	 * If extended metadata mode is enabled, then an additional MODIFY_FIELD action must be
15795 	 * placed before terminating JUMP action.
15796 	 */
15797 	if (priv->sh->config.dv_xmeta_en == MLX5_XMETA_MODE_META32_HWS) {
15798 		actions[1].type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD;
15799 		actions[2].type = RTE_FLOW_ACTION_TYPE_JUMP;
15800 	}
15801 	return flow_hw_create_ctrl_flow(dev, dev, priv->hw_tx_repr_tagging_tbl,
15802 					items, 0, actions, 0, &flow_info, external);
15803 }
15804 
15805 int
15806 mlx5_flow_hw_lacp_rx_flow(struct rte_eth_dev *dev)
15807 {
15808 	struct mlx5_priv *priv = dev->data->dev_private;
15809 	struct rte_flow_item_eth lacp_item = {
15810 		.type = RTE_BE16(RTE_ETHER_TYPE_SLOW),
15811 	};
15812 	struct rte_flow_item eth_lacp[] = {
15813 		[0] = {
15814 			.type = RTE_FLOW_ITEM_TYPE_ETH,
15815 			.spec = &lacp_item,
15816 			.mask = &lacp_item,
15817 		},
15818 		[1] = {
15819 			.type = RTE_FLOW_ITEM_TYPE_END,
15820 		},
15821 	};
15822 	struct rte_flow_action miss_action[] = {
15823 		[0] = {
15824 			.type = (enum rte_flow_action_type)
15825 				MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS,
15826 		},
15827 		[1] = {
15828 			.type = RTE_FLOW_ACTION_TYPE_END,
15829 		},
15830 	};
15831 	struct mlx5_ctrl_flow_info flow_info = {
15832 		.type = MLX5_CTRL_FLOW_TYPE_LACP_RX,
15833 	};
15834 
15835 	if (!priv->dr_ctx || !priv->hw_ctrl_fdb || !priv->hw_ctrl_fdb->hw_lacp_rx_tbl)
15836 		return 0;
15837 	return flow_hw_create_ctrl_flow(dev, dev,
15838 					priv->hw_ctrl_fdb->hw_lacp_rx_tbl,
15839 					eth_lacp, 0, miss_action, 0, &flow_info, false);
15840 }
15841 
15842 static uint32_t
15843 __calc_pattern_flags(const enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type)
15844 {
15845 	switch (eth_pattern_type) {
15846 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL:
15847 		return MLX5_CTRL_PROMISCUOUS;
15848 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL_MCAST:
15849 		return MLX5_CTRL_ALL_MULTICAST;
15850 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST:
15851 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST_VLAN:
15852 		return MLX5_CTRL_BROADCAST;
15853 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST:
15854 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST_VLAN:
15855 		return MLX5_CTRL_IPV4_MULTICAST;
15856 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST:
15857 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST_VLAN:
15858 		return MLX5_CTRL_IPV6_MULTICAST;
15859 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC:
15860 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN:
15861 		return MLX5_CTRL_DMAC;
15862 	default:
15863 		/* Should not reach here. */
15864 		MLX5_ASSERT(false);
15865 		return 0;
15866 	}
15867 }
15868 
15869 static uint32_t
15870 __calc_vlan_flags(const enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type)
15871 {
15872 	switch (eth_pattern_type) {
15873 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST_VLAN:
15874 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST_VLAN:
15875 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST_VLAN:
15876 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN:
15877 		return MLX5_CTRL_VLAN_FILTER;
15878 	default:
15879 		return 0;
15880 	}
15881 }
15882 
15883 static bool
15884 eth_pattern_type_is_requested(const enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type,
15885 			      uint32_t flags)
15886 {
15887 	uint32_t pattern_flags = __calc_pattern_flags(eth_pattern_type);
15888 	uint32_t vlan_flags = __calc_vlan_flags(eth_pattern_type);
15889 	bool pattern_requested = !!(pattern_flags & flags);
15890 	bool consider_vlan = vlan_flags || (MLX5_CTRL_VLAN_FILTER & flags);
15891 	bool vlan_requested = !!(vlan_flags & flags);
15892 
15893 	if (consider_vlan)
15894 		return pattern_requested && vlan_requested;
15895 	else
15896 		return pattern_requested;
15897 }
15898 
15899 static bool
15900 rss_type_is_requested(struct mlx5_priv *priv,
15901 		      const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
15902 {
15903 	struct rte_flow_actions_template *at = priv->hw_ctrl_rx->rss[rss_type];
15904 	unsigned int i;
15905 
15906 	for (i = 0; at->actions[i].type != RTE_FLOW_ACTION_TYPE_END; ++i) {
15907 		if (at->actions[i].type == RTE_FLOW_ACTION_TYPE_RSS) {
15908 			const struct rte_flow_action_rss *rss = at->actions[i].conf;
15909 			uint64_t rss_types = rss->types;
15910 
15911 			if ((rss_types & priv->rss_conf.rss_hf) != rss_types)
15912 				return false;
15913 		}
15914 	}
15915 	return true;
15916 }
15917 
15918 static const struct rte_flow_item_eth *
15919 __get_eth_spec(const enum mlx5_flow_ctrl_rx_eth_pattern_type pattern)
15920 {
15921 	switch (pattern) {
15922 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL:
15923 		return &ctrl_rx_eth_promisc_spec;
15924 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL_MCAST:
15925 		return &ctrl_rx_eth_mcast_spec;
15926 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST:
15927 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST_VLAN:
15928 		return &ctrl_rx_eth_bcast_spec;
15929 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST:
15930 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST_VLAN:
15931 		return &ctrl_rx_eth_ipv4_mcast_spec;
15932 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST:
15933 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST_VLAN:
15934 		return &ctrl_rx_eth_ipv6_mcast_spec;
15935 	default:
15936 		/* This case should not be reached. */
15937 		MLX5_ASSERT(false);
15938 		return NULL;
15939 	}
15940 }
15941 
15942 static int
15943 __flow_hw_ctrl_flows_single(struct rte_eth_dev *dev,
15944 			    struct rte_flow_template_table *tbl,
15945 			    const enum mlx5_flow_ctrl_rx_eth_pattern_type pattern_type,
15946 			    const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
15947 {
15948 	const struct rte_flow_item_eth *eth_spec = __get_eth_spec(pattern_type);
15949 	struct rte_flow_item items[5];
15950 	struct rte_flow_action actions[] = {
15951 		{ .type = RTE_FLOW_ACTION_TYPE_RSS },
15952 		{ .type = RTE_FLOW_ACTION_TYPE_END },
15953 	};
15954 	struct mlx5_ctrl_flow_info flow_info = {
15955 		.type = MLX5_CTRL_FLOW_TYPE_DEFAULT_RX_RSS,
15956 	};
15957 
15958 	if (!eth_spec)
15959 		return -EINVAL;
15960 	memset(items, 0, sizeof(items));
15961 	items[0] = (struct rte_flow_item){
15962 		.type = RTE_FLOW_ITEM_TYPE_ETH,
15963 		.spec = eth_spec,
15964 	};
15965 	items[1] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_VOID };
15966 	items[2] = flow_hw_get_ctrl_rx_l3_item(rss_type);
15967 	items[3] = flow_hw_get_ctrl_rx_l4_item(rss_type);
15968 	items[4] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_END };
15969 	/* Without VLAN filtering, only a single flow rule must be created. */
15970 	return flow_hw_create_ctrl_flow(dev, dev, tbl, items, 0, actions, 0, &flow_info, false);
15971 }
15972 
15973 static int
15974 __flow_hw_ctrl_flows_single_vlan(struct rte_eth_dev *dev,
15975 				 struct rte_flow_template_table *tbl,
15976 				 const enum mlx5_flow_ctrl_rx_eth_pattern_type pattern_type,
15977 				 const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
15978 {
15979 	struct mlx5_priv *priv = dev->data->dev_private;
15980 	const struct rte_flow_item_eth *eth_spec = __get_eth_spec(pattern_type);
15981 	struct rte_flow_item items[5];
15982 	struct rte_flow_action actions[] = {
15983 		{ .type = RTE_FLOW_ACTION_TYPE_RSS },
15984 		{ .type = RTE_FLOW_ACTION_TYPE_END },
15985 	};
15986 	struct mlx5_ctrl_flow_info flow_info = {
15987 		.type = MLX5_CTRL_FLOW_TYPE_DEFAULT_RX_RSS,
15988 	};
15989 	unsigned int i;
15990 
15991 	if (!eth_spec)
15992 		return -EINVAL;
15993 	memset(items, 0, sizeof(items));
15994 	items[0] = (struct rte_flow_item){
15995 		.type = RTE_FLOW_ITEM_TYPE_ETH,
15996 		.spec = eth_spec,
15997 	};
15998 	/* Optional VLAN for now will be VOID - will be filled later. */
15999 	items[1] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_VLAN };
16000 	items[2] = flow_hw_get_ctrl_rx_l3_item(rss_type);
16001 	items[3] = flow_hw_get_ctrl_rx_l4_item(rss_type);
16002 	items[4] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_END };
16003 	/* Since VLAN filtering is done, create a single flow rule for each registered vid. */
16004 	for (i = 0; i < priv->vlan_filter_n; ++i) {
16005 		uint16_t vlan = priv->vlan_filter[i];
16006 		struct rte_flow_item_vlan vlan_spec = {
16007 			.hdr.vlan_tci = rte_cpu_to_be_16(vlan),
16008 		};
16009 
16010 		items[1].spec = &vlan_spec;
16011 		if (flow_hw_create_ctrl_flow(dev, dev,
16012 					     tbl, items, 0, actions, 0, &flow_info, false))
16013 			return -rte_errno;
16014 	}
16015 	return 0;
16016 }
16017 
16018 static int
16019 __flow_hw_ctrl_flows_unicast_create(struct rte_eth_dev *dev,
16020 				    struct rte_flow_template_table *tbl,
16021 				    const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type,
16022 				    const struct rte_ether_addr *addr)
16023 {
16024 	struct rte_flow_item_eth eth_spec = {
16025 		.hdr.dst_addr = *addr,
16026 	};
16027 	struct rte_flow_item items[5];
16028 	struct rte_flow_action actions[] = {
16029 		{ .type = RTE_FLOW_ACTION_TYPE_RSS },
16030 		{ .type = RTE_FLOW_ACTION_TYPE_END },
16031 	};
16032 	struct mlx5_ctrl_flow_info flow_info = {
16033 		.type = MLX5_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC,
16034 		.uc = {
16035 			.dmac = *addr,
16036 		},
16037 	};
16038 
16039 	memset(items, 0, sizeof(items));
16040 	items[0] = (struct rte_flow_item){
16041 		.type = RTE_FLOW_ITEM_TYPE_ETH,
16042 		.spec = &eth_spec,
16043 	};
16044 	items[1] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_VOID };
16045 	items[2] = flow_hw_get_ctrl_rx_l3_item(rss_type);
16046 	items[3] = flow_hw_get_ctrl_rx_l4_item(rss_type);
16047 	items[4] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_END };
16048 
16049 	if (flow_hw_create_ctrl_flow(dev, dev, tbl, items, 0, actions, 0, &flow_info, false))
16050 		return -rte_errno;
16051 
16052 	return 0;
16053 }
16054 
16055 static int
16056 __flow_hw_ctrl_flows_unicast(struct rte_eth_dev *dev,
16057 			     struct rte_flow_template_table *tbl,
16058 			     const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
16059 {
16060 	unsigned int i;
16061 	int ret;
16062 
16063 	for (i = 0; i < MLX5_MAX_MAC_ADDRESSES; ++i) {
16064 		struct rte_ether_addr *mac = &dev->data->mac_addrs[i];
16065 
16066 		if (rte_is_zero_ether_addr(mac))
16067 			continue;
16068 
16069 		ret = __flow_hw_ctrl_flows_unicast_create(dev, tbl, rss_type, mac);
16070 		if (ret < 0)
16071 			return ret;
16072 	}
16073 	return 0;
16074 }
16075 
16076 static int
16077 __flow_hw_ctrl_flows_unicast_vlan_create(struct rte_eth_dev *dev,
16078 					 struct rte_flow_template_table *tbl,
16079 					 const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type,
16080 					 const struct rte_ether_addr *addr,
16081 					 const uint16_t vid)
16082 {
16083 	struct rte_flow_item_eth eth_spec = {
16084 		.hdr.dst_addr = *addr,
16085 	};
16086 	struct rte_flow_item_vlan vlan_spec = {
16087 		.tci = rte_cpu_to_be_16(vid),
16088 	};
16089 	struct rte_flow_item items[5];
16090 	struct rte_flow_action actions[] = {
16091 		{ .type = RTE_FLOW_ACTION_TYPE_RSS },
16092 		{ .type = RTE_FLOW_ACTION_TYPE_END },
16093 	};
16094 	struct mlx5_ctrl_flow_info flow_info = {
16095 		.type = MLX5_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC_VLAN,
16096 		.uc = {
16097 			.dmac = *addr,
16098 			.vlan = vid,
16099 		},
16100 	};
16101 
16102 	memset(items, 0, sizeof(items));
16103 	items[0] = (struct rte_flow_item){
16104 		.type = RTE_FLOW_ITEM_TYPE_ETH,
16105 		.spec = &eth_spec,
16106 	};
16107 	items[1] = (struct rte_flow_item){
16108 		.type = RTE_FLOW_ITEM_TYPE_VLAN,
16109 		.spec = &vlan_spec,
16110 	};
16111 	items[2] = flow_hw_get_ctrl_rx_l3_item(rss_type);
16112 	items[3] = flow_hw_get_ctrl_rx_l4_item(rss_type);
16113 	items[4] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_END };
16114 
16115 	if (flow_hw_create_ctrl_flow(dev, dev, tbl, items, 0, actions, 0, &flow_info, false))
16116 		return -rte_errno;
16117 
16118 	return 0;
16119 }
16120 
16121 static int
16122 __flow_hw_ctrl_flows_unicast_vlan(struct rte_eth_dev *dev,
16123 				  struct rte_flow_template_table *tbl,
16124 				  const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
16125 {
16126 	struct mlx5_priv *priv = dev->data->dev_private;
16127 	unsigned int i;
16128 	unsigned int j;
16129 
16130 	for (i = 0; i < MLX5_MAX_MAC_ADDRESSES; ++i) {
16131 		struct rte_ether_addr *mac = &dev->data->mac_addrs[i];
16132 
16133 		if (rte_is_zero_ether_addr(mac))
16134 			continue;
16135 
16136 		for (j = 0; j < priv->vlan_filter_n; ++j) {
16137 			uint16_t vlan = priv->vlan_filter[j];
16138 			int ret;
16139 
16140 			ret = __flow_hw_ctrl_flows_unicast_vlan_create(dev, tbl, rss_type,
16141 								       mac, vlan);
16142 			if (ret < 0)
16143 				return ret;
16144 		}
16145 	}
16146 	return 0;
16147 }
16148 
16149 static int
16150 __flow_hw_ctrl_flows(struct rte_eth_dev *dev,
16151 		     struct rte_flow_template_table *tbl,
16152 		     const enum mlx5_flow_ctrl_rx_eth_pattern_type pattern_type,
16153 		     const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
16154 {
16155 	switch (pattern_type) {
16156 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL:
16157 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL_MCAST:
16158 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST:
16159 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST:
16160 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST:
16161 		return __flow_hw_ctrl_flows_single(dev, tbl, pattern_type, rss_type);
16162 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST_VLAN:
16163 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST_VLAN:
16164 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST_VLAN:
16165 		return __flow_hw_ctrl_flows_single_vlan(dev, tbl, pattern_type, rss_type);
16166 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC:
16167 		return __flow_hw_ctrl_flows_unicast(dev, tbl, rss_type);
16168 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN:
16169 		return __flow_hw_ctrl_flows_unicast_vlan(dev, tbl, rss_type);
16170 	default:
16171 		/* Should not reach here. */
16172 		MLX5_ASSERT(false);
16173 		rte_errno = EINVAL;
16174 		return -EINVAL;
16175 	}
16176 }
16177 
16178 
16179 int
16180 mlx5_flow_hw_ctrl_flows(struct rte_eth_dev *dev, uint32_t flags)
16181 {
16182 	struct mlx5_priv *priv = dev->data->dev_private;
16183 	struct mlx5_flow_hw_ctrl_rx *hw_ctrl_rx;
16184 	unsigned int i;
16185 	int j;
16186 	int ret = 0;
16187 
16188 	RTE_SET_USED(priv);
16189 	RTE_SET_USED(flags);
16190 	if (!priv->dr_ctx) {
16191 		DRV_LOG(DEBUG, "port %u Control flow rules will not be created. "
16192 			       "HWS needs to be configured beforehand.",
16193 			       dev->data->port_id);
16194 		return 0;
16195 	}
16196 	if (!priv->hw_ctrl_rx) {
16197 		DRV_LOG(ERR, "port %u Control flow rules templates were not created.",
16198 			dev->data->port_id);
16199 		rte_errno = EINVAL;
16200 		return -rte_errno;
16201 	}
16202 	hw_ctrl_rx = priv->hw_ctrl_rx;
16203 	for (i = 0; i < MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_MAX; ++i) {
16204 		const enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type = i;
16205 
16206 		if (!eth_pattern_type_is_requested(eth_pattern_type, flags))
16207 			continue;
16208 		for (j = 0; j < MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX; ++j) {
16209 			const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type = j;
16210 			struct rte_flow_actions_template *at;
16211 			struct mlx5_flow_hw_ctrl_rx_table *tmpls = &hw_ctrl_rx->tables[i][j];
16212 			const struct mlx5_flow_template_table_cfg cfg = {
16213 				.attr = tmpls->attr,
16214 				.external = 0,
16215 			};
16216 
16217 			if (!hw_ctrl_rx->rss[rss_type]) {
16218 				at = flow_hw_create_ctrl_rx_rss_template(dev, rss_type);
16219 				if (!at)
16220 					return -rte_errno;
16221 				hw_ctrl_rx->rss[rss_type] = at;
16222 			} else {
16223 				at = hw_ctrl_rx->rss[rss_type];
16224 			}
16225 			if (!rss_type_is_requested(priv, rss_type))
16226 				continue;
16227 			if (!tmpls->tbl) {
16228 				tmpls->tbl = flow_hw_table_create(dev, &cfg,
16229 								  &tmpls->pt, 1, &at, 1, NULL);
16230 				if (!tmpls->tbl) {
16231 					DRV_LOG(ERR, "port %u Failed to create template table "
16232 						     "for control flow rules. Unable to create "
16233 						     "control flow rules.",
16234 						     dev->data->port_id);
16235 					return -rte_errno;
16236 				}
16237 			}
16238 
16239 			ret = __flow_hw_ctrl_flows(dev, tmpls->tbl, eth_pattern_type, rss_type);
16240 			if (ret) {
16241 				DRV_LOG(ERR, "port %u Failed to create control flow rule.",
16242 					dev->data->port_id);
16243 				return ret;
16244 			}
16245 		}
16246 	}
16247 	return 0;
16248 }
16249 
16250 static int
16251 mlx5_flow_hw_ctrl_flow_single(struct rte_eth_dev *dev,
16252 			      const enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type,
16253 			      const struct rte_ether_addr *addr,
16254 			      const uint16_t vlan)
16255 {
16256 	struct mlx5_priv *priv = dev->data->dev_private;
16257 	struct mlx5_flow_hw_ctrl_rx *hw_ctrl_rx;
16258 	unsigned int j;
16259 	int ret = 0;
16260 
16261 	if (!priv->dr_ctx) {
16262 		DRV_LOG(DEBUG, "port %u Control flow rules will not be created. "
16263 			       "HWS needs to be configured beforehand.",
16264 			       dev->data->port_id);
16265 		return 0;
16266 	}
16267 	if (!priv->hw_ctrl_rx) {
16268 		DRV_LOG(ERR, "port %u Control flow rules templates were not created.",
16269 			dev->data->port_id);
16270 		rte_errno = EINVAL;
16271 		return -rte_errno;
16272 	}
16273 	hw_ctrl_rx = priv->hw_ctrl_rx;
16274 
16275 	/* TODO: this part should be somehow refactored. It's common with common flow creation. */
16276 	for (j = 0; j < MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX; ++j) {
16277 		const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type = j;
16278 		const unsigned int pti = eth_pattern_type;
16279 		struct rte_flow_actions_template *at;
16280 		struct mlx5_flow_hw_ctrl_rx_table *tmpls = &hw_ctrl_rx->tables[pti][j];
16281 		const struct mlx5_flow_template_table_cfg cfg = {
16282 			.attr = tmpls->attr,
16283 			.external = 0,
16284 		};
16285 
16286 		if (!hw_ctrl_rx->rss[rss_type]) {
16287 			at = flow_hw_create_ctrl_rx_rss_template(dev, rss_type);
16288 			if (!at)
16289 				return -rte_errno;
16290 			hw_ctrl_rx->rss[rss_type] = at;
16291 		} else {
16292 			at = hw_ctrl_rx->rss[rss_type];
16293 		}
16294 		if (!rss_type_is_requested(priv, rss_type))
16295 			continue;
16296 		if (!tmpls->tbl) {
16297 			tmpls->tbl = flow_hw_table_create(dev, &cfg,
16298 							  &tmpls->pt, 1, &at, 1, NULL);
16299 			if (!tmpls->tbl) {
16300 				DRV_LOG(ERR, "port %u Failed to create template table "
16301 					     "for control flow rules. Unable to create "
16302 					     "control flow rules.",
16303 					     dev->data->port_id);
16304 				return -rte_errno;
16305 			}
16306 		}
16307 
16308 		MLX5_ASSERT(eth_pattern_type == MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC ||
16309 			    eth_pattern_type == MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN);
16310 
16311 		if (eth_pattern_type == MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC)
16312 			ret = __flow_hw_ctrl_flows_unicast_create(dev, tmpls->tbl, rss_type, addr);
16313 		else
16314 			ret = __flow_hw_ctrl_flows_unicast_vlan_create(dev, tmpls->tbl, rss_type,
16315 								       addr, vlan);
16316 		if (ret) {
16317 			DRV_LOG(ERR, "port %u Failed to create unicast control flow rule.",
16318 				dev->data->port_id);
16319 			return ret;
16320 		}
16321 	}
16322 
16323 	return 0;
16324 }
16325 
16326 int
16327 mlx5_flow_hw_ctrl_flow_dmac(struct rte_eth_dev *dev,
16328 			    const struct rte_ether_addr *addr)
16329 {
16330 	return mlx5_flow_hw_ctrl_flow_single(dev, MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC,
16331 					     addr, 0);
16332 }
16333 
16334 int
16335 mlx5_flow_hw_ctrl_flow_dmac_destroy(struct rte_eth_dev *dev,
16336 				    const struct rte_ether_addr *addr)
16337 {
16338 	struct mlx5_priv *priv = dev->data->dev_private;
16339 	struct mlx5_ctrl_flow_entry *entry;
16340 	struct mlx5_ctrl_flow_entry *tmp;
16341 	int ret;
16342 
16343 	/*
16344 	 * HWS does not have automatic RSS flow expansion,
16345 	 * so each variant of the control flow rule is a separate entry in the list.
16346 	 * In that case, the whole list must be traversed.
16347 	 */
16348 	entry = LIST_FIRST(&priv->hw_ctrl_flows);
16349 	while (entry != NULL) {
16350 		tmp = LIST_NEXT(entry, next);
16351 
16352 		if (entry->info.type != MLX5_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC ||
16353 		    !rte_is_same_ether_addr(addr, &entry->info.uc.dmac)) {
16354 			entry = tmp;
16355 			continue;
16356 		}
16357 
16358 		ret = flow_hw_destroy_ctrl_flow(dev, entry->flow);
16359 		LIST_REMOVE(entry, next);
16360 		mlx5_free(entry);
16361 		if (ret)
16362 			return ret;
16363 
16364 		entry = tmp;
16365 	}
16366 	return 0;
16367 }
16368 
16369 int
16370 mlx5_flow_hw_ctrl_flow_dmac_vlan(struct rte_eth_dev *dev,
16371 				 const struct rte_ether_addr *addr,
16372 				 const uint16_t vlan)
16373 {
16374 	return mlx5_flow_hw_ctrl_flow_single(dev, MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN,
16375 					     addr, vlan);
16376 }
16377 
16378 int
16379 mlx5_flow_hw_ctrl_flow_dmac_vlan_destroy(struct rte_eth_dev *dev,
16380 					 const struct rte_ether_addr *addr,
16381 					 const uint16_t vlan)
16382 {
16383 	struct mlx5_priv *priv = dev->data->dev_private;
16384 	struct mlx5_ctrl_flow_entry *entry;
16385 	struct mlx5_ctrl_flow_entry *tmp;
16386 	int ret;
16387 
16388 	/*
16389 	 * HWS does not have automatic RSS flow expansion,
16390 	 * so each variant of the control flow rule is a separate entry in the list.
16391 	 * In that case, the whole list must be traversed.
16392 	 */
16393 	entry = LIST_FIRST(&priv->hw_ctrl_flows);
16394 	while (entry != NULL) {
16395 		tmp = LIST_NEXT(entry, next);
16396 
16397 		if (entry->info.type != MLX5_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC_VLAN ||
16398 		    !rte_is_same_ether_addr(addr, &entry->info.uc.dmac) ||
16399 		    vlan != entry->info.uc.vlan) {
16400 			entry = tmp;
16401 			continue;
16402 		}
16403 
16404 		ret = flow_hw_destroy_ctrl_flow(dev, entry->flow);
16405 		LIST_REMOVE(entry, next);
16406 		mlx5_free(entry);
16407 		if (ret)
16408 			return ret;
16409 
16410 		entry = tmp;
16411 	}
16412 	return 0;
16413 }
16414 
16415 static __rte_always_inline uint32_t
16416 mlx5_reformat_domain_to_tbl_type(const struct rte_flow_indir_action_conf *domain)
16417 {
16418 	uint32_t tbl_type;
16419 
16420 	if (domain->transfer)
16421 		tbl_type = MLX5DR_ACTION_FLAG_HWS_FDB;
16422 	else if (domain->egress)
16423 		tbl_type = MLX5DR_ACTION_FLAG_HWS_TX;
16424 	else if (domain->ingress)
16425 		tbl_type = MLX5DR_ACTION_FLAG_HWS_RX;
16426 	else
16427 		tbl_type = UINT32_MAX;
16428 	return tbl_type;
16429 }
16430 
16431 static struct mlx5_hw_encap_decap_action *
16432 __mlx5_reformat_create(struct rte_eth_dev *dev,
16433 		       const struct rte_flow_action_raw_encap *encap_conf,
16434 		       const struct rte_flow_indir_action_conf *domain,
16435 		       enum mlx5dr_action_type type)
16436 {
16437 	struct mlx5_priv *priv = dev->data->dev_private;
16438 	struct mlx5_hw_encap_decap_action *handle;
16439 	struct mlx5dr_action_reformat_header hdr;
16440 	uint32_t flags;
16441 
16442 	flags = mlx5_reformat_domain_to_tbl_type(domain);
16443 	flags |= (uint32_t)MLX5DR_ACTION_FLAG_SHARED;
16444 	if (flags == UINT32_MAX) {
16445 		DRV_LOG(ERR, "Reformat: invalid indirect action configuration");
16446 		return NULL;
16447 	}
16448 	/* Allocate new list entry. */
16449 	handle = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*handle), 0, SOCKET_ID_ANY);
16450 	if (!handle) {
16451 		DRV_LOG(ERR, "Reformat: failed to allocate reformat entry");
16452 		return NULL;
16453 	}
16454 	handle->action_type = type;
16455 	hdr.sz = encap_conf ? encap_conf->size : 0;
16456 	hdr.data = encap_conf ? encap_conf->data : NULL;
16457 	handle->action = mlx5dr_action_create_reformat(priv->dr_ctx,
16458 					type, 1, &hdr, 0, flags);
16459 	if (!handle->action) {
16460 		DRV_LOG(ERR, "Reformat: failed to create reformat action");
16461 		mlx5_free(handle);
16462 		return NULL;
16463 	}
16464 	return handle;
16465 }
16466 
16467 /**
16468  * Create mlx5 reformat action.
16469  *
16470  * @param[in] dev
16471  *   Pointer to rte_eth_dev structure.
16472  * @param[in] conf
16473  *   Pointer to the indirect action parameters.
16474  * @param[in] encap_action
16475  *   Pointer to the raw_encap action configuration.
16476  * @param[in] decap_action
16477  *   Pointer to the raw_decap action configuration.
16478  * @param[out] error
16479  *   Pointer to error structure.
16480  *
16481  * @return
16482  *   A valid shared action handle in case of success, NULL otherwise and
16483  *   rte_errno is set.
16484  */
16485 struct mlx5_hw_encap_decap_action*
16486 mlx5_reformat_action_create(struct rte_eth_dev *dev,
16487 			    const struct rte_flow_indir_action_conf *conf,
16488 			    const struct rte_flow_action *encap_action,
16489 			    const struct rte_flow_action *decap_action,
16490 			    struct rte_flow_error *error)
16491 {
16492 	struct mlx5_priv *priv = dev->data->dev_private;
16493 	struct mlx5_hw_encap_decap_action *handle;
16494 	const struct rte_flow_action_raw_encap *encap = NULL;
16495 	const struct rte_flow_action_raw_decap *decap = NULL;
16496 	enum mlx5dr_action_type type = MLX5DR_ACTION_TYP_LAST;
16497 
16498 	MLX5_ASSERT(!encap_action || encap_action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP);
16499 	MLX5_ASSERT(!decap_action || decap_action->type == RTE_FLOW_ACTION_TYPE_RAW_DECAP);
16500 	if (priv->sh->config.dv_flow_en != 2) {
16501 		rte_flow_error_set(error, ENOTSUP,
16502 				   RTE_FLOW_ERROR_TYPE_ACTION, encap_action,
16503 				   "Reformat: hardware does not support");
16504 		return NULL;
16505 	}
16506 	if (!conf || (conf->transfer + conf->egress + conf->ingress != 1)) {
16507 		rte_flow_error_set(error, EINVAL,
16508 				   RTE_FLOW_ERROR_TYPE_ACTION, encap_action,
16509 				   "Reformat: domain should be specified");
16510 		return NULL;
16511 	}
16512 	if ((encap_action && !encap_action->conf) || (decap_action && !decap_action->conf)) {
16513 		rte_flow_error_set(error, EINVAL,
16514 				   RTE_FLOW_ERROR_TYPE_ACTION, encap_action,
16515 				   "Reformat: missed action configuration");
16516 		return NULL;
16517 	}
16518 	if (encap_action && !decap_action) {
16519 		encap = (const struct rte_flow_action_raw_encap *)encap_action->conf;
16520 		if (!encap->size || encap->size > MLX5_ENCAP_MAX_LEN ||
16521 		    encap->size < MLX5_ENCAPSULATION_DECISION_SIZE) {
16522 			rte_flow_error_set(error, EINVAL,
16523 					   RTE_FLOW_ERROR_TYPE_ACTION, encap_action,
16524 					   "Reformat: Invalid encap length");
16525 			return NULL;
16526 		}
16527 		type = MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
16528 	} else if (decap_action && !encap_action) {
16529 		decap = (const struct rte_flow_action_raw_decap *)decap_action->conf;
16530 		if (!decap->size || decap->size < MLX5_ENCAPSULATION_DECISION_SIZE) {
16531 			rte_flow_error_set(error, EINVAL,
16532 					   RTE_FLOW_ERROR_TYPE_ACTION, encap_action,
16533 					   "Reformat: Invalid decap length");
16534 			return NULL;
16535 		}
16536 		type = MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2;
16537 	} else if (encap_action && decap_action) {
16538 		decap = (const struct rte_flow_action_raw_decap *)decap_action->conf;
16539 		encap = (const struct rte_flow_action_raw_encap *)encap_action->conf;
16540 		if (decap->size < MLX5_ENCAPSULATION_DECISION_SIZE &&
16541 		    encap->size >= MLX5_ENCAPSULATION_DECISION_SIZE &&
16542 		    encap->size <= MLX5_ENCAP_MAX_LEN) {
16543 			type = MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3;
16544 		} else if (decap->size >= MLX5_ENCAPSULATION_DECISION_SIZE &&
16545 			   encap->size < MLX5_ENCAPSULATION_DECISION_SIZE) {
16546 			type = MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2;
16547 		} else {
16548 			rte_flow_error_set(error, EINVAL,
16549 					   RTE_FLOW_ERROR_TYPE_ACTION, encap_action,
16550 					   "Reformat: Invalid decap & encap length");
16551 			return NULL;
16552 		}
16553 	} else if (!encap_action && !decap_action) {
16554 		rte_flow_error_set(error, EINVAL,
16555 				   RTE_FLOW_ERROR_TYPE_ACTION, encap_action,
16556 				   "Reformat: Invalid decap & encap configurations");
16557 		return NULL;
16558 	}
16559 	if (!priv->dr_ctx) {
16560 		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
16561 				   encap_action, "Reformat: HWS not supported");
16562 		return NULL;
16563 	}
16564 	handle = __mlx5_reformat_create(dev, encap, conf, type);
16565 	if (!handle) {
16566 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, encap_action,
16567 				   "Reformat: failed to create indirect action");
16568 		return NULL;
16569 	}
16570 	return handle;
16571 }
16572 
16573 /**
16574  * Destroy the indirect reformat action.
16575  * Release action related resources on the NIC and the memory.
16576  * Lock free, (mutex should be acquired by caller).
16577  *
16578  * @param[in] dev
16579  *   Pointer to the Ethernet device structure.
16580  * @param[in] handle
16581  *   The indirect action list handle to be removed.
16582  * @param[out] error
16583  *   Perform verbose error reporting if not NULL. Initialized in case of
16584  *   error only.
16585  *
16586  * @return
16587  *   0 on success, otherwise negative errno value.
16588  */
16589 int
16590 mlx5_reformat_action_destroy(struct rte_eth_dev *dev,
16591 			     struct rte_flow_action_list_handle *handle,
16592 			     struct rte_flow_error *error)
16593 {
16594 	struct mlx5_priv *priv = dev->data->dev_private;
16595 	struct mlx5_hw_encap_decap_action *action;
16596 
16597 	action = (struct mlx5_hw_encap_decap_action *)handle;
16598 	if (!priv->dr_ctx || !action)
16599 		return rte_flow_error_set(error, ENOTSUP,
16600 					  RTE_FLOW_ERROR_TYPE_ACTION, handle,
16601 					  "Reformat: invalid action handle");
16602 	mlx5dr_action_destroy(action->action);
16603 	mlx5_free(handle);
16604 	return 0;
16605 }
16606 
16607 static bool
16608 flow_hw_is_item_masked(const struct rte_flow_item *item)
16609 {
16610 	const uint8_t *byte;
16611 	int size;
16612 	int i;
16613 
16614 	if (item->mask == NULL)
16615 		return false;
16616 
16617 	switch ((int)item->type) {
16618 	case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
16619 		size = sizeof(struct rte_flow_item_tag);
16620 		break;
16621 	case MLX5_RTE_FLOW_ITEM_TYPE_SQ:
16622 		size = sizeof(struct mlx5_rte_flow_item_sq);
16623 		break;
16624 	default:
16625 		size = rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_MASK, NULL, 0, item, NULL);
16626 		/*
16627 		 * Pattern template items are passed to this function.
16628 		 * These items were already validated, so error is not expected.
16629 		 * Also, if mask is NULL, then spec size is bigger than 0 always.
16630 		 */
16631 		MLX5_ASSERT(size > 0);
16632 	}
16633 
16634 	byte = (const uint8_t *)item->mask;
16635 	for (i = 0; i < size; ++i)
16636 		if (byte[i])
16637 			return true;
16638 
16639 	return false;
16640 }
16641 
16642 static int
16643 flow_hw_validate_rule_pattern(struct rte_eth_dev *dev,
16644 			      const struct rte_flow_template_table *table,
16645 			      const uint8_t pattern_template_idx,
16646 			      const struct rte_flow_item items[],
16647 			      struct rte_flow_error *error)
16648 {
16649 	const struct rte_flow_pattern_template *pt;
16650 	const struct rte_flow_item *pt_item;
16651 
16652 	if (pattern_template_idx >= table->nb_item_templates)
16653 		return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
16654 					  "Pattern template index out of range");
16655 
16656 	pt = table->its[pattern_template_idx];
16657 	pt_item = pt->items;
16658 
16659 	/* If any item was prepended, skip it. */
16660 	if (pt->implicit_port || pt->implicit_tag)
16661 		pt_item++;
16662 
16663 	for (; pt_item->type != RTE_FLOW_ITEM_TYPE_END; pt_item++, items++) {
16664 		if (pt_item->type != items->type)
16665 			return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
16666 						  items, "Item type does not match the template");
16667 
16668 		/*
16669 		 * Assumptions:
16670 		 * - Currently mlx5dr layer contains info on which fields in masks are supported.
16671 		 * - This info is not exposed to PMD directly.
16672 		 * - Because of that, it is assumed that since pattern template is correct,
16673 		 *   then, items' masks in pattern template have nonzero values only in
16674 		 *   supported fields.
16675 		 *   This is known, because a temporary mlx5dr matcher is created during pattern
16676 		 *   template creation to validate the template.
16677 		 * - As a result, it is safe to look for nonzero bytes in mask to determine if
16678 		 *   item spec is needed in a flow rule.
16679 		 */
16680 		if (!flow_hw_is_item_masked(pt_item))
16681 			continue;
16682 
16683 		if (items->spec == NULL)
16684 			return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
16685 						  items, "Item spec is required");
16686 
16687 		switch (items->type) {
16688 		const struct rte_flow_item_ethdev *ethdev;
16689 		const struct rte_flow_item_tx_queue *tx_queue;
16690 		struct mlx5_txq_ctrl *txq;
16691 
16692 		case RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT:
16693 			ethdev = items->spec;
16694 			if (flow_hw_validate_target_port_id(dev, ethdev->port_id)) {
16695 				return rte_flow_error_set(error, EINVAL,
16696 							  RTE_FLOW_ERROR_TYPE_ITEM_SPEC, items,
16697 							  "Invalid port");
16698 			}
16699 			break;
16700 		case RTE_FLOW_ITEM_TYPE_TX_QUEUE:
16701 			tx_queue = items->spec;
16702 			if (mlx5_is_external_txq(dev, tx_queue->tx_queue))
16703 				continue;
16704 			txq = mlx5_txq_get(dev, tx_queue->tx_queue);
16705 			if (!txq)
16706 				return rte_flow_error_set(error, EINVAL,
16707 							  RTE_FLOW_ERROR_TYPE_ITEM_SPEC, items,
16708 							  "Invalid Tx queue");
16709 			mlx5_txq_release(dev, tx_queue->tx_queue);
16710 		default:
16711 			break;
16712 		}
16713 	}
16714 
16715 	return 0;
16716 }
16717 
16718 static bool
16719 flow_hw_valid_indirect_action_type(const struct rte_flow_action *user_action,
16720 				   const enum rte_flow_action_type expected_type)
16721 {
16722 	uint32_t user_indirect_type = MLX5_INDIRECT_ACTION_TYPE_GET(user_action->conf);
16723 	uint32_t expected_indirect_type;
16724 
16725 	switch ((int)expected_type) {
16726 	case RTE_FLOW_ACTION_TYPE_RSS:
16727 	case MLX5_RTE_FLOW_ACTION_TYPE_RSS:
16728 		expected_indirect_type = MLX5_INDIRECT_ACTION_TYPE_RSS;
16729 		break;
16730 	case RTE_FLOW_ACTION_TYPE_COUNT:
16731 	case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
16732 		expected_indirect_type = MLX5_INDIRECT_ACTION_TYPE_COUNT;
16733 		break;
16734 	case RTE_FLOW_ACTION_TYPE_AGE:
16735 		expected_indirect_type = MLX5_INDIRECT_ACTION_TYPE_AGE;
16736 		break;
16737 	case RTE_FLOW_ACTION_TYPE_CONNTRACK:
16738 		expected_indirect_type = MLX5_INDIRECT_ACTION_TYPE_CT;
16739 		break;
16740 	case RTE_FLOW_ACTION_TYPE_METER_MARK:
16741 	case MLX5_RTE_FLOW_ACTION_TYPE_METER_MARK:
16742 		expected_indirect_type = MLX5_INDIRECT_ACTION_TYPE_METER_MARK;
16743 		break;
16744 	case RTE_FLOW_ACTION_TYPE_QUOTA:
16745 		expected_indirect_type = MLX5_INDIRECT_ACTION_TYPE_QUOTA;
16746 		break;
16747 	default:
16748 		return false;
16749 	}
16750 
16751 	return user_indirect_type == expected_indirect_type;
16752 }
16753 
16754 static int
16755 flow_hw_validate_rule_actions(struct rte_eth_dev *dev,
16756 			      const struct rte_flow_template_table *table,
16757 			      const uint8_t actions_template_idx,
16758 			      const struct rte_flow_action actions[],
16759 			      struct rte_flow_error *error)
16760 {
16761 	const struct rte_flow_actions_template *at;
16762 	const struct mlx5_hw_actions *hw_acts;
16763 	const struct mlx5_action_construct_data *act_data;
16764 	unsigned int idx;
16765 
16766 	if (actions_template_idx >= table->nb_action_templates)
16767 		return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
16768 					  "Actions template index out of range");
16769 
16770 	at = table->ats[actions_template_idx].action_template;
16771 	hw_acts = &table->ats[actions_template_idx].acts;
16772 
16773 	for (idx = 0; actions[idx].type != RTE_FLOW_ACTION_TYPE_END; ++idx) {
16774 		const struct rte_flow_action *user_action = &actions[idx];
16775 		const struct rte_flow_action *tmpl_action = &at->orig_actions[idx];
16776 
16777 		if (user_action->type != tmpl_action->type)
16778 			return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
16779 						  user_action,
16780 						  "Action type does not match type specified in "
16781 						  "actions template");
16782 	}
16783 
16784 	/*
16785 	 * Only go through unmasked actions and check if configuration is provided.
16786 	 * Configuration of masked actions is ignored.
16787 	 */
16788 	LIST_FOREACH(act_data, &hw_acts->act_list, next) {
16789 		const struct rte_flow_action *user_action;
16790 
16791 		user_action = &actions[act_data->action_src];
16792 
16793 		/* Skip actions which do not require conf. */
16794 		switch ((int)act_data->type) {
16795 		case RTE_FLOW_ACTION_TYPE_COUNT:
16796 		case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
16797 		case MLX5_RTE_FLOW_ACTION_TYPE_METER_MARK:
16798 		case MLX5_RTE_FLOW_ACTION_TYPE_RSS:
16799 			continue;
16800 		default:
16801 			break;
16802 		}
16803 
16804 		if (user_action->conf == NULL)
16805 			return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
16806 						  user_action,
16807 						  "Action requires configuration");
16808 
16809 		switch ((int)user_action->type) {
16810 		enum rte_flow_action_type expected_type;
16811 		const struct rte_flow_action_ethdev *ethdev;
16812 		const struct rte_flow_action_modify_field *mf;
16813 
16814 		case RTE_FLOW_ACTION_TYPE_INDIRECT:
16815 			expected_type = act_data->indirect.expected_type;
16816 			if (!flow_hw_valid_indirect_action_type(user_action, expected_type))
16817 				return rte_flow_error_set(error, EINVAL,
16818 							  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
16819 							  user_action,
16820 							  "Indirect action type does not match "
16821 							  "the type specified in the mask");
16822 			break;
16823 		case RTE_FLOW_ACTION_TYPE_QUEUE:
16824 			if (mlx5_flow_validate_target_queue(dev, user_action, error))
16825 				return -rte_errno;
16826 			break;
16827 		case RTE_FLOW_ACTION_TYPE_RSS:
16828 			if (mlx5_validate_action_rss(dev, user_action, error))
16829 				return -rte_errno;
16830 			break;
16831 		case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
16832 			/* TODO: Compare other fields if needed. */
16833 			mf = user_action->conf;
16834 			if (mf->operation != act_data->modify_header.action.operation ||
16835 			    mf->src.field != act_data->modify_header.action.src.field ||
16836 			    mf->dst.field != act_data->modify_header.action.dst.field ||
16837 			    mf->width != act_data->modify_header.action.width)
16838 				return rte_flow_error_set(error, EINVAL,
16839 							  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
16840 							  user_action,
16841 							  "Modify field configuration does not "
16842 							  "match configuration from actions "
16843 							  "template");
16844 			break;
16845 		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
16846 			ethdev = user_action->conf;
16847 			if (flow_hw_validate_target_port_id(dev, ethdev->port_id)) {
16848 				return rte_flow_error_set(error, EINVAL,
16849 							  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
16850 							  user_action, "Invalid port");
16851 			}
16852 			break;
16853 		default:
16854 			break;
16855 		}
16856 	}
16857 
16858 	return 0;
16859 }
16860 
16861 static int
16862 flow_hw_async_op_validate(struct rte_eth_dev *dev,
16863 			  const uint32_t queue,
16864 			  const struct rte_flow_template_table *table,
16865 			  struct rte_flow_error *error)
16866 {
16867 	struct mlx5_priv *priv = dev->data->dev_private;
16868 
16869 	MLX5_ASSERT(table != NULL);
16870 
16871 	if (table->cfg.external && queue >= priv->hw_attr->nb_queue)
16872 		return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
16873 					  "Incorrect queue");
16874 
16875 	return 0;
16876 }
16877 
16878 /**
16879  * Validate user input for rte_flow_async_create() implementation.
16880  *
16881  * If RTE_LIBRTE_MLX5_DEBUG macro is not defined, this function is a no-op.
16882  *
16883  * @param[in] dev
16884  *   Pointer to the rte_eth_dev structure.
16885  * @param[in] queue
16886  *   The queue to create the flow.
16887  * @param[in] table
16888  *   Pointer to template table.
16889  * @param[in] rule_index
16890  *   The item pattern flow follows from the table.
16891  * @param[in] items
16892  *   Items with flow spec value.
16893  * @param[in] pattern_template_index
16894  *   The item pattern flow follows from the table.
16895  * @param[in] actions
16896  *   Action with flow spec value.
16897  * @param[in] action_template_index
16898  *   The action pattern flow follows from the table.
16899  * @param[out] error
16900  *   Pointer to error structure.
16901  *
16902  * @return
16903  *    0 if user input is valid.
16904  *    Negative errno otherwise, rte_errno and error struct is populated.
16905  */
16906 static int
16907 flow_hw_async_create_validate(struct rte_eth_dev *dev,
16908 			      const uint32_t queue,
16909 			      const struct rte_flow_template_table *table,
16910 			      enum rte_flow_table_insertion_type insertion_type,
16911 			      uint32_t rule_index,
16912 			      const struct rte_flow_item items[],
16913 			      const uint8_t pattern_template_index,
16914 			      const struct rte_flow_action actions[],
16915 			      const uint8_t action_template_index,
16916 			      struct rte_flow_error *error)
16917 {
16918 	if (flow_hw_async_op_validate(dev, queue, table, error))
16919 		return -rte_errno;
16920 
16921 	if (insertion_type != table->cfg.attr.insertion_type)
16922 		return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
16923 					  NULL, "Flow rule insertion type mismatch with table configuration");
16924 
16925 	if (table->cfg.attr.insertion_type != RTE_FLOW_TABLE_INSERTION_TYPE_PATTERN)
16926 		if (rule_index >= table->cfg.attr.nb_flows)
16927 			return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
16928 						  NULL, "Flow rule index exceeds table size");
16929 
16930 	if (table->cfg.attr.insertion_type != RTE_FLOW_TABLE_INSERTION_TYPE_INDEX)
16931 		if (flow_hw_validate_rule_pattern(dev, table, pattern_template_index, items, error))
16932 			return -rte_errno;
16933 
16934 	if (flow_hw_validate_rule_actions(dev, table, action_template_index, actions, error))
16935 		return -rte_errno;
16936 
16937 	return 0;
16938 }
16939 
16940 /**
16941  * Validate user input for rte_flow_async_update() implementation.
16942  *
16943  * If RTE_LIBRTE_MLX5_DEBUG macro is not defined, this function is a no-op.
16944  *
16945  * @param[in] dev
16946  *   Pointer to the rte_eth_dev structure.
16947  * @param[in] queue
16948  *   The queue to create the flow.
16949  * @param[in] flow
16950  *   Flow rule to be updated.
16951  * @param[in] actions
16952  *   Action with flow spec value.
16953  * @param[in] action_template_index
16954  *   The action pattern flow follows from the table.
16955  * @param[out] error
16956  *   Pointer to error structure.
16957  *
16958  * @return
16959  *    0 if user input is valid.
16960  *    Negative errno otherwise, rte_errno and error struct is set.
16961  */
16962 static int
16963 flow_hw_async_update_validate(struct rte_eth_dev *dev,
16964 			      const uint32_t queue,
16965 			      const struct rte_flow_hw *flow,
16966 			      const struct rte_flow_action actions[],
16967 			      const uint8_t action_template_index,
16968 			      struct rte_flow_error *error)
16969 {
16970 	if (flow_hw_async_op_validate(dev, queue, flow->table, error))
16971 		return -rte_errno;
16972 
16973 	if (flow_hw_validate_rule_actions(dev, flow->table, action_template_index, actions, error))
16974 		return -rte_errno;
16975 
16976 	return 0;
16977 }
16978 
16979 /**
16980  * Validate user input for rte_flow_async_destroy() implementation.
16981  *
16982  * If RTE_LIBRTE_MLX5_DEBUG macro is not defined, this function is a no-op.
16983  *
16984  * @param[in] dev
16985  *   Pointer to the rte_eth_dev structure.
16986  * @param[in] queue
16987  *   The queue to create the flow.
16988  * @param[in] flow
16989  *   Flow rule to be destroyed.
16990  * @param[out] error
16991  *   Pointer to error structure.
16992  *
16993  * @return
16994  *    0 if user input is valid.
16995  *    Negative errno otherwise, rte_errno and error struct is set.
16996  */
16997 static int
16998 flow_hw_async_destroy_validate(struct rte_eth_dev *dev,
16999 			       const uint32_t queue,
17000 			       const struct rte_flow_hw *flow,
17001 			       struct rte_flow_error *error)
17002 {
17003 	if (flow_hw_async_op_validate(dev, queue, flow->table, error))
17004 		return -rte_errno;
17005 
17006 	return 0;
17007 }
17008 
17009 static struct rte_flow_fp_ops mlx5_flow_hw_fp_ops = {
17010 	.async_create = flow_hw_async_flow_create,
17011 	.async_create_by_index = flow_hw_async_flow_create_by_index,
17012 	.async_create_by_index_with_pattern = flow_hw_async_flow_create_by_index_with_pattern,
17013 	.async_actions_update = flow_hw_async_flow_update,
17014 	.async_destroy = flow_hw_async_flow_destroy,
17015 	.push = flow_hw_push,
17016 	.pull = flow_hw_pull,
17017 	.async_action_handle_create = flow_hw_action_handle_create,
17018 	.async_action_handle_destroy = flow_hw_action_handle_destroy,
17019 	.async_action_handle_update = flow_hw_action_handle_update,
17020 	.async_action_handle_query = flow_hw_action_handle_query,
17021 	.async_action_handle_query_update = flow_hw_async_action_handle_query_update,
17022 	.async_action_list_handle_create = flow_hw_async_action_list_handle_create,
17023 	.async_action_list_handle_destroy = flow_hw_async_action_list_handle_destroy,
17024 	.async_action_list_handle_query_update =
17025 		flow_hw_async_action_list_handle_query_update,
17026 };
17027 
17028 #endif
17029