xref: /dpdk/drivers/net/mlx5/mlx5_flow_hw.c (revision 37dda90ee15b7098bc48356868a87d34f727eecc)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2022 NVIDIA Corporation & Affiliates
3  */
4 
5 #include <rte_flow.h>
6 #include <rte_flow_driver.h>
7 #include <rte_stdatomic.h>
8 
9 #include <mlx5_malloc.h>
10 
11 #include "mlx5.h"
12 #include "mlx5_common.h"
13 #include "mlx5_defs.h"
14 #include "mlx5_flow.h"
15 #include "mlx5_flow_os.h"
16 #include "mlx5_rx.h"
17 
18 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
19 #include "mlx5_hws_cnt.h"
20 
21 /** Fast path async flow API functions. */
22 static struct rte_flow_fp_ops mlx5_flow_hw_fp_ops;
23 
24 /* The maximum actions support in the flow. */
25 #define MLX5_HW_MAX_ACTS 16
26 
27 /*
28  * The default ipool threshold value indicates which per_core_cache
29  * value to set.
30  */
31 #define MLX5_HW_IPOOL_SIZE_THRESHOLD (1 << 19)
32 /* The default min local cache size. */
33 #define MLX5_HW_IPOOL_CACHE_MIN (1 << 9)
34 
35 /* Default push burst threshold. */
36 #define BURST_THR 32u
37 
38 /* Default queue to flush the flows. */
39 #define MLX5_DEFAULT_FLUSH_QUEUE 0
40 
41 /* Maximum number of rules in control flow tables. */
42 #define MLX5_HW_CTRL_FLOW_NB_RULES (4096)
43 
44 /* Lowest flow group usable by an application if group translation is done. */
45 #define MLX5_HW_LOWEST_USABLE_GROUP (1)
46 
47 /* Maximum group index usable by user applications for transfer flows. */
48 #define MLX5_HW_MAX_TRANSFER_GROUP (UINT32_MAX - 1)
49 
50 /* Maximum group index usable by user applications for egress flows. */
51 #define MLX5_HW_MAX_EGRESS_GROUP (UINT32_MAX - 1)
52 
53 /* Lowest priority for HW root table. */
54 #define MLX5_HW_LOWEST_PRIO_ROOT 15
55 
56 /* Lowest priority for HW non-root table. */
57 #define MLX5_HW_LOWEST_PRIO_NON_ROOT (UINT32_MAX)
58 
59 /* Priorities for Rx control flow rules. */
60 #define MLX5_HW_CTRL_RX_PRIO_L2 (MLX5_HW_LOWEST_PRIO_ROOT)
61 #define MLX5_HW_CTRL_RX_PRIO_L3 (MLX5_HW_LOWEST_PRIO_ROOT - 1)
62 #define MLX5_HW_CTRL_RX_PRIO_L4 (MLX5_HW_LOWEST_PRIO_ROOT - 2)
63 
64 #define MLX5_HW_VLAN_PUSH_TYPE_IDX 0
65 #define MLX5_HW_VLAN_PUSH_VID_IDX 1
66 #define MLX5_HW_VLAN_PUSH_PCP_IDX 2
67 
68 #define MLX5_MIRROR_MAX_CLONES_NUM 3
69 #define MLX5_MIRROR_MAX_SAMPLE_ACTIONS_LEN 4
70 
71 #define MLX5_HW_PORT_IS_PROXY(priv) \
72 	(!!((priv)->sh->esw_mode && (priv)->master))
73 
74 
75 struct mlx5_indlst_legacy {
76 	struct mlx5_indirect_list indirect;
77 	struct rte_flow_action_handle *handle;
78 	enum rte_flow_action_type legacy_type;
79 };
80 
81 #define MLX5_CONST_ENCAP_ITEM(encap_type, ptr) \
82 (((const struct encap_type *)(ptr))->definition)
83 
84 /**
85  * Returns the size of a struct with a following layout:
86  *
87  * @code{.c}
88  * struct rte_flow_hw {
89  *     // rte_flow_hw fields
90  *     uint8_t rule[mlx5dr_rule_get_handle_size()];
91  * };
92  * @endcode
93  *
94  * Such struct is used as a basic container for HW Steering flow rule.
95  */
96 static size_t
97 mlx5_flow_hw_entry_size(void)
98 {
99 	return sizeof(struct rte_flow_hw) + mlx5dr_rule_get_handle_size();
100 }
101 
102 /**
103  * Returns the size of "auxed" rte_flow_hw structure which is assumed to be laid out as follows:
104  *
105  * @code{.c}
106  * struct {
107  *     struct rte_flow_hw {
108  *         // rte_flow_hw fields
109  *         uint8_t rule[mlx5dr_rule_get_handle_size()];
110  *     } flow;
111  *     struct rte_flow_hw_aux aux;
112  * };
113  * @endcode
114  *
115  * Such struct is used whenever rte_flow_hw_aux cannot be allocated separately from the rte_flow_hw
116  * e.g., when table is resizable.
117  */
118 static size_t
119 mlx5_flow_hw_auxed_entry_size(void)
120 {
121 	size_t rule_size = mlx5dr_rule_get_handle_size();
122 
123 	return sizeof(struct rte_flow_hw) + rule_size + sizeof(struct rte_flow_hw_aux);
124 }
125 
126 /**
127  * Returns a valid pointer to rte_flow_hw_aux associated with given rte_flow_hw
128  * depending on template table configuration.
129  */
130 static __rte_always_inline struct rte_flow_hw_aux *
131 mlx5_flow_hw_aux(uint16_t port_id, struct rte_flow_hw *flow)
132 {
133 	struct rte_flow_template_table *table = flow->table;
134 
135 	if (rte_flow_template_table_resizable(port_id, &table->cfg.attr)) {
136 		size_t offset = sizeof(struct rte_flow_hw) + mlx5dr_rule_get_handle_size();
137 
138 		return RTE_PTR_ADD(flow, offset);
139 	} else {
140 		return ((flow->nt_rule) ? flow->nt2hws->flow_aux : &table->flow_aux[flow->idx - 1]);
141 	}
142 }
143 
144 static __rte_always_inline void
145 mlx5_flow_hw_aux_set_age_idx(struct rte_flow_hw *flow,
146 			     struct rte_flow_hw_aux *aux,
147 			     uint32_t age_idx)
148 {
149 	/*
150 	 * Only when creating a flow rule, the type will be set explicitly.
151 	 * Or else, it should be none in the rule update case.
152 	 */
153 	if (unlikely(flow->operation_type == MLX5_FLOW_HW_FLOW_OP_TYPE_UPDATE))
154 		aux->upd.age_idx = age_idx;
155 	else
156 		aux->orig.age_idx = age_idx;
157 }
158 
159 static __rte_always_inline uint32_t
160 mlx5_flow_hw_aux_get_age_idx(struct rte_flow_hw *flow, struct rte_flow_hw_aux *aux)
161 {
162 	if (unlikely(flow->operation_type == MLX5_FLOW_HW_FLOW_OP_TYPE_UPDATE))
163 		return aux->upd.age_idx;
164 	else
165 		return aux->orig.age_idx;
166 }
167 
168 static __rte_always_inline void
169 mlx5_flow_hw_aux_set_mtr_id(struct rte_flow_hw *flow,
170 			    struct rte_flow_hw_aux *aux,
171 			    uint32_t mtr_id)
172 {
173 	if (unlikely(flow->operation_type == MLX5_FLOW_HW_FLOW_OP_TYPE_UPDATE))
174 		aux->upd.mtr_id = mtr_id;
175 	else
176 		aux->orig.mtr_id = mtr_id;
177 }
178 
179 static __rte_always_inline uint32_t
180 mlx5_flow_hw_aux_get_mtr_id(struct rte_flow_hw *flow, struct rte_flow_hw_aux *aux)
181 {
182 	if (unlikely(flow->operation_type == MLX5_FLOW_HW_FLOW_OP_TYPE_UPDATE))
183 		return aux->upd.mtr_id;
184 	else
185 		return aux->orig.mtr_id;
186 }
187 
188 static __rte_always_inline struct mlx5_hw_q_job *
189 flow_hw_action_job_init(struct mlx5_priv *priv, uint32_t queue,
190 			const struct rte_flow_action_handle *handle,
191 			void *user_data, void *query_data,
192 			enum mlx5_hw_job_type type,
193 			enum mlx5_hw_indirect_type indirect_type,
194 			struct rte_flow_error *error);
195 static void
196 flow_hw_age_count_release(struct mlx5_priv *priv, uint32_t queue, struct rte_flow_hw *flow,
197 			  struct rte_flow_error *error);
198 
199 static int
200 mlx5_tbl_multi_pattern_process(struct rte_eth_dev *dev,
201 			       struct rte_flow_template_table *tbl,
202 			       struct mlx5_multi_pattern_segment *segment,
203 			       uint32_t bulk_size,
204 			       struct rte_flow_error *error);
205 static void
206 mlx5_destroy_multi_pattern_segment(struct mlx5_multi_pattern_segment *segment);
207 
208 static __rte_always_inline enum mlx5_indirect_list_type
209 flow_hw_inlist_type_get(const struct rte_flow_action *actions);
210 
211 static int
212 flow_hw_allocate_actions(struct rte_eth_dev *dev,
213 			 uint64_t action_flags,
214 			 struct rte_flow_error *error);
215 
216 bool
217 mlx5_hw_ctx_validate(const struct rte_eth_dev *dev, struct rte_flow_error *error)
218 {
219 	const struct mlx5_priv *priv = dev->data->dev_private;
220 
221 	if (!priv->dr_ctx) {
222 		rte_flow_error_set(error, EINVAL,
223 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
224 				   "non-template flow engine was not configured");
225 		return false;
226 	}
227 	return true;
228 }
229 
230 static int
231 flow_hw_allocate_actions(struct rte_eth_dev *dev,
232 			 uint64_t action_flags,
233 			 struct rte_flow_error *error);
234 
235 static __rte_always_inline int
236 mlx5_multi_pattern_reformat_to_index(enum mlx5dr_action_type type)
237 {
238 	switch (type) {
239 	case MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2:
240 		return 0;
241 	case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2:
242 		return 1;
243 	case MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
244 		return 2;
245 	case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3:
246 		return 3;
247 	default:
248 		break;
249 	}
250 	return -1;
251 }
252 
253 /* Include only supported reformat actions for BWC non template API. */
254 static __rte_always_inline int
255 mlx5_bwc_multi_pattern_reformat_to_index(enum mlx5dr_action_type type)
256 {
257 	switch (type) {
258 	case MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2:
259 	case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2:
260 	case MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
261 	case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3:
262 		return mlx5_multi_pattern_reformat_to_index(type);
263 	default:
264 		break;
265 	}
266 	return -1;
267 }
268 
269 static __rte_always_inline enum mlx5dr_action_type
270 mlx5_multi_pattern_reformat_index_to_type(uint32_t ix)
271 {
272 	switch (ix) {
273 	case 0:
274 		return MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2;
275 	case 1:
276 		return MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
277 	case 2:
278 		return MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2;
279 	case 3:
280 		return MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3;
281 	default:
282 		break;
283 	}
284 	return MLX5DR_ACTION_TYP_MAX;
285 }
286 
287 static inline enum mlx5dr_table_type
288 get_mlx5dr_table_type(const struct rte_flow_attr *attr)
289 {
290 	enum mlx5dr_table_type type;
291 
292 	if (attr->transfer)
293 		type = MLX5DR_TABLE_TYPE_FDB;
294 	else if (attr->egress)
295 		type = MLX5DR_TABLE_TYPE_NIC_TX;
296 	else
297 		type = MLX5DR_TABLE_TYPE_NIC_RX;
298 	return type;
299 }
300 
301 /* Non template default queue size used for inner ctrl queue. */
302 #define MLX5_NT_DEFAULT_QUEUE_SIZE 32
303 
304 struct mlx5_mirror_clone {
305 	enum rte_flow_action_type type;
306 	void *action_ctx;
307 };
308 
309 struct mlx5_mirror {
310 	struct mlx5_indirect_list indirect;
311 	uint32_t clones_num;
312 	struct mlx5dr_action *mirror_action;
313 	struct mlx5_mirror_clone clone[MLX5_MIRROR_MAX_CLONES_NUM];
314 };
315 
316 static int flow_hw_flush_all_ctrl_flows(struct rte_eth_dev *dev);
317 static int flow_hw_translate_group(struct rte_eth_dev *dev,
318 				   const struct mlx5_flow_template_table_cfg *cfg,
319 				   uint32_t group,
320 				   uint32_t *table_group,
321 				   struct rte_flow_error *error);
322 static __rte_always_inline int
323 flow_hw_set_vlan_vid_construct(struct rte_eth_dev *dev,
324 			       struct mlx5_modification_cmd *mhdr_cmd,
325 			       struct mlx5_action_construct_data *act_data,
326 			       const struct mlx5_hw_actions *hw_acts,
327 			       const struct rte_flow_action *action);
328 static void
329 flow_hw_construct_quota(struct mlx5_priv *priv,
330 			struct mlx5dr_rule_action *rule_act, uint32_t qid);
331 
332 static int
333 mlx5_flow_ct_init(struct rte_eth_dev *dev,
334 		  uint32_t nb_conn_tracks,
335 		  uint16_t nb_queue);
336 
337 static __rte_always_inline uint32_t flow_hw_tx_tag_regc_mask(struct rte_eth_dev *dev);
338 static __rte_always_inline uint32_t flow_hw_tx_tag_regc_value(struct rte_eth_dev *dev);
339 
340 static int flow_hw_async_create_validate(struct rte_eth_dev *dev,
341 					 const uint32_t queue,
342 					 const struct rte_flow_template_table *table,
343 					 enum rte_flow_table_insertion_type insertion_type,
344 					 const uint32_t rule_index,
345 					 const struct rte_flow_item items[],
346 					 const uint8_t pattern_template_index,
347 					 const struct rte_flow_action actions[],
348 					 const uint8_t action_template_index,
349 					 struct rte_flow_error *error);
350 static int flow_hw_async_update_validate(struct rte_eth_dev *dev,
351 					 const uint32_t queue,
352 					 const struct rte_flow_hw *flow,
353 					 const struct rte_flow_action actions[],
354 					 const uint8_t action_template_index,
355 					 struct rte_flow_error *error);
356 static int flow_hw_async_destroy_validate(struct rte_eth_dev *dev,
357 					  const uint32_t queue,
358 					  const struct rte_flow_hw *flow,
359 					  struct rte_flow_error *error);
360 
361 const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops;
362 
363 /* DR action flags with different table. */
364 static uint32_t mlx5_hw_act_flag[MLX5_HW_ACTION_FLAG_MAX]
365 				[MLX5DR_TABLE_TYPE_MAX] = {
366 	{
367 		MLX5DR_ACTION_FLAG_ROOT_RX,
368 		MLX5DR_ACTION_FLAG_ROOT_TX,
369 		MLX5DR_ACTION_FLAG_ROOT_FDB,
370 	},
371 	{
372 		MLX5DR_ACTION_FLAG_HWS_RX,
373 		MLX5DR_ACTION_FLAG_HWS_TX,
374 		MLX5DR_ACTION_FLAG_HWS_FDB,
375 	},
376 };
377 
378 /* Ethernet item spec for promiscuous mode. */
379 static const struct rte_flow_item_eth ctrl_rx_eth_promisc_spec = {
380 	.hdr.dst_addr.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
381 	.hdr.src_addr.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
382 	.hdr.ether_type = 0,
383 };
384 /* Ethernet item mask for promiscuous mode. */
385 static const struct rte_flow_item_eth ctrl_rx_eth_promisc_mask = {
386 	.hdr.dst_addr.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
387 	.hdr.src_addr.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
388 	.hdr.ether_type = 0,
389 };
390 
391 /* Ethernet item spec for all multicast mode. */
392 static const struct rte_flow_item_eth ctrl_rx_eth_mcast_spec = {
393 	.hdr.dst_addr.addr_bytes = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 },
394 	.hdr.src_addr.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
395 	.hdr.ether_type = 0,
396 };
397 /* Ethernet item mask for all multicast mode. */
398 static const struct rte_flow_item_eth ctrl_rx_eth_mcast_mask = {
399 	.hdr.dst_addr.addr_bytes = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 },
400 	.hdr.src_addr.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
401 	.hdr.ether_type = 0,
402 };
403 
404 /* Ethernet item spec for IPv4 multicast traffic. */
405 static const struct rte_flow_item_eth ctrl_rx_eth_ipv4_mcast_spec = {
406 	.hdr.dst_addr.addr_bytes = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 },
407 	.hdr.src_addr.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
408 	.hdr.ether_type = 0,
409 };
410 /* Ethernet item mask for IPv4 multicast traffic. */
411 static const struct rte_flow_item_eth ctrl_rx_eth_ipv4_mcast_mask = {
412 	.hdr.dst_addr.addr_bytes = { 0xff, 0xff, 0xff, 0x00, 0x00, 0x00 },
413 	.hdr.src_addr.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
414 	.hdr.ether_type = 0,
415 };
416 
417 /* Ethernet item spec for IPv6 multicast traffic. */
418 static const struct rte_flow_item_eth ctrl_rx_eth_ipv6_mcast_spec = {
419 	.hdr.dst_addr.addr_bytes = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 },
420 	.hdr.src_addr.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
421 	.hdr.ether_type = 0,
422 };
423 /* Ethernet item mask for IPv6 multicast traffic. */
424 static const struct rte_flow_item_eth ctrl_rx_eth_ipv6_mcast_mask = {
425 	.hdr.dst_addr.addr_bytes = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 },
426 	.hdr.src_addr.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
427 	.hdr.ether_type = 0,
428 };
429 
430 /* Ethernet item mask for unicast traffic. */
431 static const struct rte_flow_item_eth ctrl_rx_eth_dmac_mask = {
432 	.hdr.dst_addr.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
433 	.hdr.src_addr.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
434 	.hdr.ether_type = 0,
435 };
436 
437 /* Ethernet item spec for broadcast. */
438 static const struct rte_flow_item_eth ctrl_rx_eth_bcast_spec = {
439 	.hdr.dst_addr.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
440 	.hdr.src_addr.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
441 	.hdr.ether_type = 0,
442 };
443 
444 static inline uint32_t
445 flow_hw_q_pending(struct mlx5_priv *priv, uint32_t queue)
446 {
447 	struct mlx5_hw_q *q = &priv->hw_q[queue];
448 
449 	MLX5_ASSERT(q->size >= q->job_idx);
450 	return (q->size - q->job_idx) + q->ongoing_flow_ops;
451 }
452 
453 static inline void
454 flow_hw_q_inc_flow_ops(struct mlx5_priv *priv, uint32_t queue)
455 {
456 	struct mlx5_hw_q *q = &priv->hw_q[queue];
457 
458 	q->ongoing_flow_ops++;
459 }
460 
461 static inline void
462 flow_hw_q_dec_flow_ops(struct mlx5_priv *priv, uint32_t queue)
463 {
464 	struct mlx5_hw_q *q = &priv->hw_q[queue];
465 
466 	q->ongoing_flow_ops--;
467 }
468 
469 static inline enum mlx5dr_matcher_insert_mode
470 flow_hw_matcher_insert_mode_get(enum rte_flow_table_insertion_type insert_type)
471 {
472 	if (insert_type == RTE_FLOW_TABLE_INSERTION_TYPE_PATTERN)
473 		return MLX5DR_MATCHER_INSERT_BY_HASH;
474 	else
475 		return MLX5DR_MATCHER_INSERT_BY_INDEX;
476 }
477 
478 static inline enum mlx5dr_matcher_distribute_mode
479 flow_hw_matcher_distribute_mode_get(enum rte_flow_table_hash_func hash_func)
480 {
481 	if (hash_func == RTE_FLOW_TABLE_HASH_FUNC_LINEAR)
482 		return MLX5DR_MATCHER_DISTRIBUTE_BY_LINEAR;
483 	else
484 		return MLX5DR_MATCHER_DISTRIBUTE_BY_HASH;
485 }
486 
487 /**
488  * Set the hash fields according to the @p rss_desc information.
489  *
490  * @param[in] rss_desc
491  *   Pointer to the mlx5_flow_rss_desc.
492  * @param[out] hash_fields
493  *   Pointer to the RSS hash fields.
494  */
495 static void
496 flow_hw_hashfields_set(struct mlx5_flow_rss_desc *rss_desc,
497 		       uint64_t *hash_fields)
498 {
499 	uint64_t fields = 0;
500 	int rss_inner = 0;
501 	uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
502 
503 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
504 	if (rss_desc->level >= 2)
505 		rss_inner = 1;
506 #endif
507 	if (rss_types & MLX5_IPV4_LAYER_TYPES) {
508 		if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
509 			fields |= IBV_RX_HASH_SRC_IPV4;
510 		else if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
511 			fields |= IBV_RX_HASH_DST_IPV4;
512 		else
513 			fields |= MLX5_IPV4_IBV_RX_HASH;
514 	} else if (rss_types & MLX5_IPV6_LAYER_TYPES) {
515 		if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
516 			fields |= IBV_RX_HASH_SRC_IPV6;
517 		else if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
518 			fields |= IBV_RX_HASH_DST_IPV6;
519 		else
520 			fields |= MLX5_IPV6_IBV_RX_HASH;
521 	}
522 	if (rss_types & RTE_ETH_RSS_UDP) {
523 		if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
524 			fields |= IBV_RX_HASH_SRC_PORT_UDP;
525 		else if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
526 			fields |= IBV_RX_HASH_DST_PORT_UDP;
527 		else
528 			fields |= MLX5_UDP_IBV_RX_HASH;
529 	} else if (rss_types & RTE_ETH_RSS_TCP) {
530 		if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
531 			fields |= IBV_RX_HASH_SRC_PORT_TCP;
532 		else if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
533 			fields |= IBV_RX_HASH_DST_PORT_TCP;
534 		else
535 			fields |= MLX5_TCP_IBV_RX_HASH;
536 	}
537 	if (rss_types & RTE_ETH_RSS_ESP)
538 		fields |= IBV_RX_HASH_IPSEC_SPI;
539 	if (rss_inner)
540 		fields |= IBV_RX_HASH_INNER;
541 	*hash_fields |= fields;
542 }
543 
544 /**
545  * Generate the matching pattern item flags.
546  *
547  * @param[in] items
548  *   Pointer to the list of items.
549  *
550  * @return
551  *   Matching item flags. RSS hash field function
552  *   silently ignores the flags which are unsupported.
553  */
554 static uint64_t
555 flow_hw_matching_item_flags_get(const struct rte_flow_item items[])
556 {
557 	uint64_t item_flags = 0;
558 	uint64_t last_item = 0;
559 
560 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
561 		enum rte_flow_item_flex_tunnel_mode tunnel_mode = FLEX_TUNNEL_MODE_SINGLE;
562 		int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
563 		int item_type = items->type;
564 
565 		switch (item_type) {
566 		case RTE_FLOW_ITEM_TYPE_IPV4:
567 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
568 					     MLX5_FLOW_LAYER_OUTER_L3_IPV4;
569 			break;
570 		case RTE_FLOW_ITEM_TYPE_IPV6:
571 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
572 					     MLX5_FLOW_LAYER_OUTER_L3_IPV6;
573 			break;
574 		case RTE_FLOW_ITEM_TYPE_TCP:
575 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
576 					     MLX5_FLOW_LAYER_OUTER_L4_TCP;
577 			break;
578 		case RTE_FLOW_ITEM_TYPE_UDP:
579 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
580 					     MLX5_FLOW_LAYER_OUTER_L4_UDP;
581 			break;
582 		case RTE_FLOW_ITEM_TYPE_IPV6_ROUTING_EXT:
583 			last_item = tunnel ? MLX5_FLOW_ITEM_INNER_IPV6_ROUTING_EXT :
584 					     MLX5_FLOW_ITEM_OUTER_IPV6_ROUTING_EXT;
585 			break;
586 		case RTE_FLOW_ITEM_TYPE_GRE:
587 			last_item = MLX5_FLOW_LAYER_GRE;
588 			break;
589 		case RTE_FLOW_ITEM_TYPE_NVGRE:
590 			last_item = MLX5_FLOW_LAYER_GRE;
591 			break;
592 		case RTE_FLOW_ITEM_TYPE_VXLAN:
593 			last_item = MLX5_FLOW_LAYER_VXLAN;
594 			break;
595 		case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
596 			last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
597 			break;
598 		case RTE_FLOW_ITEM_TYPE_GENEVE:
599 			last_item = MLX5_FLOW_LAYER_GENEVE;
600 			break;
601 		case RTE_FLOW_ITEM_TYPE_MPLS:
602 			last_item = MLX5_FLOW_LAYER_MPLS;
603 			break;
604 		case RTE_FLOW_ITEM_TYPE_GTP:
605 			last_item = MLX5_FLOW_LAYER_GTP;
606 			break;
607 		case RTE_FLOW_ITEM_TYPE_COMPARE:
608 			last_item = MLX5_FLOW_ITEM_COMPARE;
609 			break;
610 		case RTE_FLOW_ITEM_TYPE_FLEX:
611 			mlx5_flex_get_tunnel_mode(items, &tunnel_mode);
612 			last_item = tunnel_mode == FLEX_TUNNEL_MODE_TUNNEL ?
613 					MLX5_FLOW_ITEM_FLEX_TUNNEL :
614 					tunnel ? MLX5_FLOW_ITEM_INNER_FLEX :
615 						MLX5_FLOW_ITEM_OUTER_FLEX;
616 			break;
617 		default:
618 			break;
619 		}
620 		item_flags |= last_item;
621 	}
622 	return item_flags;
623 }
624 
625 static uint64_t
626 flow_hw_action_flags_get(const struct rte_flow_action actions[],
627 			 const struct rte_flow_action **qrss,
628 			 const struct rte_flow_action **mark,
629 			 int *encap_idx,
630 			 int *act_cnt,
631 			 struct rte_flow_error *error)
632 {
633 	uint64_t action_flags = 0;
634 	const struct rte_flow_action *action;
635 	const struct rte_flow_action_raw_encap *raw_encap;
636 	int raw_decap_idx = -1;
637 	int action_idx;
638 
639 	*encap_idx = -1;
640 	action_idx = 0;
641 	for (action = actions; action->type != RTE_FLOW_ACTION_TYPE_END; action++) {
642 		int type = (int)action->type;
643 		switch (type) {
644 		case RTE_FLOW_ACTION_TYPE_INDIRECT:
645 			switch (MLX5_INDIRECT_ACTION_TYPE_GET(action->conf)) {
646 			case MLX5_INDIRECT_ACTION_TYPE_RSS:
647 				goto rss;
648 			case MLX5_INDIRECT_ACTION_TYPE_AGE:
649 				goto age;
650 			case MLX5_INDIRECT_ACTION_TYPE_COUNT:
651 				goto count;
652 			case MLX5_INDIRECT_ACTION_TYPE_CT:
653 				goto ct;
654 			case MLX5_INDIRECT_ACTION_TYPE_METER_MARK:
655 				goto meter;
656 			default:
657 				goto error;
658 			}
659 			break;
660 		case RTE_FLOW_ACTION_TYPE_DROP:
661 			action_flags |= MLX5_FLOW_ACTION_DROP;
662 			break;
663 		case RTE_FLOW_ACTION_TYPE_FLAG:
664 			action_flags |= MLX5_FLOW_ACTION_FLAG;
665 			break;
666 		case RTE_FLOW_ACTION_TYPE_MARK:
667 			action_flags |= MLX5_FLOW_ACTION_MARK;
668 			*mark = action;
669 			break;
670 		case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
671 			action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
672 			break;
673 		case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
674 			action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
675 			break;
676 		case RTE_FLOW_ACTION_TYPE_JUMP:
677 			action_flags |= MLX5_FLOW_ACTION_JUMP;
678 			break;
679 		case RTE_FLOW_ACTION_TYPE_QUEUE:
680 			action_flags |= MLX5_FLOW_ACTION_QUEUE;
681 			*qrss = action;
682 			break;
683 		case RTE_FLOW_ACTION_TYPE_RSS:
684 rss:
685 			action_flags |= MLX5_FLOW_ACTION_RSS;
686 			*qrss = action;
687 			break;
688 		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
689 		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
690 			action_flags |= MLX5_FLOW_ACTION_ENCAP;
691 			*encap_idx = action_idx;
692 			break;
693 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
694 			action_flags |= MLX5_FLOW_ACTION_ENCAP;
695 			raw_encap = action->conf;
696 			if (raw_encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
697 				*encap_idx = raw_decap_idx != -1 ?
698 					     raw_decap_idx : action_idx;
699 			break;
700 		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
701 		case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
702 			action_flags |= MLX5_FLOW_ACTION_DECAP;
703 			break;
704 		case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
705 			action_flags |= MLX5_FLOW_ACTION_DECAP;
706 			raw_decap_idx = action_idx;
707 			break;
708 		case RTE_FLOW_ACTION_TYPE_SEND_TO_KERNEL:
709 			action_flags |= MLX5_FLOW_ACTION_SEND_TO_KERNEL;
710 			break;
711 		case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
712 			action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
713 			break;
714 		case RTE_FLOW_ACTION_TYPE_PORT_ID:
715 		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
716 			action_flags |= MLX5_FLOW_ACTION_PORT_ID;
717 			break;
718 		case RTE_FLOW_ACTION_TYPE_AGE:
719 age:
720 			action_flags |= MLX5_FLOW_ACTION_AGE;
721 			break;
722 		case RTE_FLOW_ACTION_TYPE_COUNT:
723 count:
724 			action_flags |= MLX5_FLOW_ACTION_COUNT;
725 			break;
726 		case RTE_FLOW_ACTION_TYPE_CONNTRACK:
727 ct:
728 			action_flags |= MLX5_FLOW_ACTION_CT;
729 			break;
730 		case RTE_FLOW_ACTION_TYPE_METER_MARK:
731 meter:
732 			action_flags |= MLX5_FLOW_ACTION_METER;
733 			break;
734 		case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
735 			action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
736 			break;
737 		case RTE_FLOW_ACTION_TYPE_JUMP_TO_TABLE_INDEX:
738 			action_flags |= MLX5_FLOW_ACTION_JUMP_TO_TABLE_INDEX;
739 			break;
740 		case RTE_FLOW_ACTION_TYPE_VOID:
741 		case RTE_FLOW_ACTION_TYPE_END:
742 			break;
743 		default:
744 			goto error;
745 		}
746 		action_idx++;
747 	}
748 	if (*encap_idx == -1)
749 		*encap_idx = action_idx;
750 	action_idx++; /* The END action. */
751 	*act_cnt = action_idx;
752 	return action_flags;
753 error:
754 	rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
755 			   action, "invalid flow action");
756 	return 0;
757 }
758 
759 /**
760  * Register destination table DR jump action.
761  *
762  * @param[in] dev
763  *   Pointer to the rte_eth_dev structure.
764  * @param[in] table_attr
765  *   Pointer to the flow attributes.
766  * @param[in] dest_group
767  *   The destination group ID.
768  * @param[out] error
769  *   Pointer to error structure.
770  *
771  * @return
772  *    Table on success, NULL otherwise and rte_errno is set.
773  */
774 static struct mlx5_hw_jump_action *
775 flow_hw_jump_action_register(struct rte_eth_dev *dev,
776 			     const struct mlx5_flow_template_table_cfg *cfg,
777 			     uint32_t dest_group,
778 			     struct rte_flow_error *error)
779 {
780 	struct mlx5_priv *priv = dev->data->dev_private;
781 	struct rte_flow_attr jattr = cfg->attr.flow_attr;
782 	struct mlx5_flow_group *grp;
783 	struct mlx5_flow_cb_ctx ctx = {
784 		.dev = dev,
785 		.error = error,
786 		.data = &jattr,
787 	};
788 	struct mlx5_list_entry *ge;
789 	uint32_t target_group;
790 
791 	target_group = dest_group;
792 	if (flow_hw_translate_group(dev, cfg, dest_group, &target_group, error))
793 		return NULL;
794 	jattr.group = target_group;
795 	ge = mlx5_hlist_register(priv->sh->flow_tbls, target_group, &ctx);
796 	if (!ge)
797 		return NULL;
798 	grp = container_of(ge, struct mlx5_flow_group, entry);
799 	return &grp->jump;
800 }
801 
802 /**
803  * Release jump action.
804  *
805  * @param[in] dev
806  *   Pointer to the rte_eth_dev structure.
807  * @param[in] jump
808  *   Pointer to the jump action.
809  */
810 
811 static void
812 flow_hw_jump_release(struct rte_eth_dev *dev, struct mlx5_hw_jump_action *jump)
813 {
814 	struct mlx5_priv *priv = dev->data->dev_private;
815 	struct mlx5_flow_group *grp;
816 
817 	grp = container_of(jump, struct mlx5_flow_group, jump);
818 	mlx5_hlist_unregister(priv->sh->flow_tbls, &grp->entry);
819 }
820 
821 /**
822  * Register queue/RSS action.
823  *
824  * @param[in] dev
825  *   Pointer to the rte_eth_dev structure.
826  * @param[in] hws_flags
827  *   DR action flags.
828  * @param[in] action
829  *   rte flow action.
830  * @param[in] item_flags
831  *   Item flags for non template rule.
832  * @param[in] is_template
833  *   True if it is a template rule.
834  *
835  * @return
836  *    Table on success, NULL otherwise and rte_errno is set.
837  */
838 static inline struct mlx5_hrxq*
839 flow_hw_tir_action_register(struct rte_eth_dev *dev,
840 			    uint32_t hws_flags,
841 			    const struct rte_flow_action *action)
842 {
843 	struct mlx5_flow_rss_desc rss_desc = {
844 		.hws_flags = hws_flags,
845 	};
846 	struct mlx5_hrxq *hrxq;
847 
848 	if (action->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
849 		const struct rte_flow_action_queue *queue = action->conf;
850 
851 		rss_desc.const_q = &queue->index;
852 		rss_desc.queue_num = 1;
853 	} else {
854 		const struct rte_flow_action_rss *rss = action->conf;
855 
856 		rss_desc.queue_num = rss->queue_num;
857 		rss_desc.const_q = rss->queue;
858 		memcpy(rss_desc.key,
859 		       !rss->key ? rss_hash_default_key : rss->key,
860 		       MLX5_RSS_HASH_KEY_LEN);
861 		rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
862 		rss_desc.types = !rss->types ? RTE_ETH_RSS_IP : rss->types;
863 		rss_desc.symmetric_hash_function = MLX5_RSS_IS_SYMM(rss->func);
864 		flow_hw_hashfields_set(&rss_desc, &rss_desc.hash_fields);
865 		flow_dv_action_rss_l34_hash_adjust(rss->types,
866 						   &rss_desc.hash_fields);
867 		if (rss->level > 1) {
868 			rss_desc.hash_fields |= IBV_RX_HASH_INNER;
869 			rss_desc.tunnel = 1;
870 		}
871 	}
872 	hrxq = mlx5_hrxq_get(dev, &rss_desc);
873 	return hrxq;
874 }
875 
876 static __rte_always_inline int
877 flow_hw_ct_compile(struct rte_eth_dev *dev,
878 		   uint32_t queue, uint32_t idx,
879 		   struct mlx5dr_rule_action *rule_act)
880 {
881 	struct mlx5_priv *priv = dev->data->dev_private;
882 	struct mlx5_aso_ct_action *ct;
883 
884 	ct = mlx5_ipool_get(priv->hws_ctpool->cts, idx);
885 	if (!ct || (!priv->shared_host && mlx5_aso_ct_available(priv->sh, queue, ct)))
886 		return -1;
887 	rule_act->action = priv->hws_ctpool->dr_action;
888 	rule_act->aso_ct.offset = ct->offset;
889 	rule_act->aso_ct.direction = ct->is_original ?
890 		MLX5DR_ACTION_ASO_CT_DIRECTION_INITIATOR :
891 		MLX5DR_ACTION_ASO_CT_DIRECTION_RESPONDER;
892 	return 0;
893 }
894 
895 static void
896 flow_hw_template_destroy_reformat_action(struct mlx5_hw_encap_decap_action *encap_decap)
897 {
898 	if (encap_decap->action && !encap_decap->multi_pattern)
899 		mlx5dr_action_destroy(encap_decap->action);
900 }
901 
902 static void
903 flow_hw_template_destroy_mhdr_action(struct mlx5_hw_modify_header_action *mhdr)
904 {
905 	if (mhdr->action && !mhdr->multi_pattern)
906 		mlx5dr_action_destroy(mhdr->action);
907 }
908 
909 /**
910  * Destroy DR actions created by action template.
911  *
912  * For DR actions created during table creation's action translate.
913  * Need to destroy the DR action when destroying the table.
914  *
915  * @param[in] dev
916  *   Pointer to the rte_eth_dev structure.
917  * @param[in] acts
918  *   Pointer to the template HW steering DR actions.
919  */
920 static void
921 __flow_hw_actions_release(struct rte_eth_dev *dev, struct mlx5_hw_actions *acts)
922 {
923 	struct mlx5_priv *priv = dev->data->dev_private;
924 
925 	if (acts->mark)
926 		if (!(rte_atomic_fetch_sub_explicit(&priv->hws_mark_refcnt, 1,
927 				rte_memory_order_relaxed) - 1))
928 			flow_hw_rxq_flag_set(dev, false);
929 
930 	if (acts->jump) {
931 		struct mlx5_flow_group *grp;
932 
933 		grp = container_of
934 			(acts->jump, struct mlx5_flow_group, jump);
935 		mlx5_hlist_unregister(priv->sh->flow_tbls, &grp->entry);
936 		acts->jump = NULL;
937 	}
938 	if (acts->tir) {
939 		mlx5_hrxq_release(dev, acts->tir->idx);
940 		acts->tir = NULL;
941 	}
942 	if (acts->encap_decap) {
943 		flow_hw_template_destroy_reformat_action(acts->encap_decap);
944 		mlx5_free(acts->encap_decap);
945 		acts->encap_decap = NULL;
946 	}
947 	if (acts->push_remove) {
948 		if (acts->push_remove->action)
949 			mlx5dr_action_destroy(acts->push_remove->action);
950 		mlx5_free(acts->push_remove);
951 		acts->push_remove = NULL;
952 	}
953 	if (acts->mhdr) {
954 		flow_hw_template_destroy_mhdr_action(acts->mhdr);
955 		mlx5_free(acts->mhdr);
956 		acts->mhdr = NULL;
957 	}
958 	if (mlx5_hws_cnt_id_valid(acts->cnt_id)) {
959 		mlx5_hws_cnt_shared_put(priv->hws_cpool, &acts->cnt_id);
960 		acts->cnt_id = 0;
961 	}
962 	if (acts->mtr_id) {
963 		mlx5_ipool_free(priv->hws_mpool->idx_pool, acts->mtr_id);
964 		acts->mtr_id = 0;
965 	}
966 }
967 
968 /**
969  * Destroy DR actions created by action template.
970  *
971  * For DR actions created during table creation's action translate.
972  * Need to destroy the DR action when destroying the table.
973  *
974  * @param[in] dev
975  *   Pointer to the rte_eth_dev structure.
976  * @param[in] acts
977  *   Pointer to the template HW steering DR actions.
978  */
979 static void
980 __flow_hw_action_template_destroy(struct rte_eth_dev *dev, struct mlx5_hw_actions *acts)
981 {
982 	struct mlx5_priv *priv = dev->data->dev_private;
983 	struct mlx5_action_construct_data *data;
984 
985 	while (!LIST_EMPTY(&acts->act_list)) {
986 		data = LIST_FIRST(&acts->act_list);
987 		LIST_REMOVE(data, next);
988 		mlx5_ipool_free(priv->acts_ipool, data->idx);
989 	}
990 
991 	__flow_hw_actions_release(dev, acts);
992 }
993 
994 /**
995  * Append dynamic action to the dynamic action list.
996  *
997  * @param[in] priv
998  *   Pointer to the port private data structure.
999  * @param[in] acts
1000  *   Pointer to the template HW steering DR actions.
1001  * @param[in] type
1002  *   Action type.
1003  * @param[in] action_src
1004  *   Offset of source rte flow action.
1005  * @param[in] action_dst
1006  *   Offset of destination DR action.
1007  *
1008  * @return
1009  *    0 on success, negative value otherwise and rte_errno is set.
1010  */
1011 static __rte_always_inline struct mlx5_action_construct_data *
1012 __flow_hw_act_data_alloc(struct mlx5_priv *priv,
1013 			 enum rte_flow_action_type type,
1014 			 uint16_t action_src,
1015 			 uint16_t action_dst)
1016 {
1017 	struct mlx5_action_construct_data *act_data;
1018 	uint32_t idx = 0;
1019 
1020 	act_data = mlx5_ipool_zmalloc(priv->acts_ipool, &idx);
1021 	if (!act_data)
1022 		return NULL;
1023 	act_data->idx = idx;
1024 	act_data->type = type;
1025 	act_data->action_src = action_src;
1026 	act_data->action_dst = action_dst;
1027 	return act_data;
1028 }
1029 
1030 /**
1031  * Append dynamic action to the dynamic action list.
1032  *
1033  * @param[in] priv
1034  *   Pointer to the port private data structure.
1035  * @param[in] acts
1036  *   Pointer to the template HW steering DR actions.
1037  * @param[in] type
1038  *   Action type.
1039  * @param[in] action_src
1040  *   Offset of source rte flow action.
1041  * @param[in] action_dst
1042  *   Offset of destination DR action.
1043  *
1044  * @return
1045  *    0 on success, negative value otherwise and rte_errno is set.
1046  */
1047 static __rte_always_inline int
1048 __flow_hw_act_data_general_append(struct mlx5_priv *priv,
1049 				  struct mlx5_hw_actions *acts,
1050 				  enum rte_flow_action_type type,
1051 				  uint16_t action_src,
1052 				  uint16_t action_dst)
1053 {
1054 	struct mlx5_action_construct_data *act_data;
1055 
1056 	act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
1057 	if (!act_data)
1058 		return -1;
1059 	LIST_INSERT_HEAD(&acts->act_list, act_data, next);
1060 	return 0;
1061 }
1062 
1063 static __rte_always_inline int
1064 __flow_hw_act_data_indirect_append(struct mlx5_priv *priv,
1065 				   struct mlx5_hw_actions *acts,
1066 				   enum rte_flow_action_type type,
1067 				   enum rte_flow_action_type mask_type,
1068 				   uint16_t action_src,
1069 				   uint16_t action_dst)
1070 {
1071 	struct mlx5_action_construct_data *act_data;
1072 
1073 	act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
1074 	if (!act_data)
1075 		return -1;
1076 	act_data->indirect.expected_type = mask_type;
1077 	LIST_INSERT_HEAD(&acts->act_list, act_data, next);
1078 	return 0;
1079 }
1080 
1081 static __rte_always_inline int
1082 flow_hw_act_data_indirect_list_append(struct mlx5_priv *priv,
1083 				      struct mlx5_hw_actions *acts,
1084 				      enum rte_flow_action_type type,
1085 				      uint16_t action_src, uint16_t action_dst,
1086 				      indirect_list_callback_t cb)
1087 {
1088 	struct mlx5_action_construct_data *act_data;
1089 
1090 	act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
1091 	if (!act_data)
1092 		return -1;
1093 	act_data->indirect_list_cb = cb;
1094 	LIST_INSERT_HEAD(&acts->act_list, act_data, next);
1095 	return 0;
1096 }
1097 /**
1098  * Append dynamic encap action to the dynamic action list.
1099  *
1100  * @param[in] priv
1101  *   Pointer to the port private data structure.
1102  * @param[in] acts
1103  *   Pointer to the template HW steering DR actions.
1104  * @param[in] type
1105  *   Action type.
1106  * @param[in] action_src
1107  *   Offset of source rte flow action.
1108  * @param[in] action_dst
1109  *   Offset of destination DR action.
1110  * @param[in] len
1111  *   Length of the data to be updated.
1112  *
1113  * @return
1114  *    0 on success, negative value otherwise and rte_errno is set.
1115  */
1116 static __rte_always_inline int
1117 __flow_hw_act_data_encap_append(struct mlx5_priv *priv,
1118 				struct mlx5_hw_actions *acts,
1119 				enum rte_flow_action_type type,
1120 				uint16_t action_src,
1121 				uint16_t action_dst,
1122 				uint16_t len)
1123 {
1124 	struct mlx5_action_construct_data *act_data;
1125 
1126 	act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
1127 	if (!act_data)
1128 		return -1;
1129 	act_data->encap.len = len;
1130 	LIST_INSERT_HEAD(&acts->act_list, act_data, next);
1131 	return 0;
1132 }
1133 
1134 /**
1135  * Append dynamic push action to the dynamic action list.
1136  *
1137  * @param[in] dev
1138  *   Pointer to the port.
1139  * @param[in] acts
1140  *   Pointer to the template HW steering DR actions.
1141  * @param[in] type
1142  *   Action type.
1143  * @param[in] action_src
1144  *   Offset of source rte flow action.
1145  * @param[in] action_dst
1146  *   Offset of destination DR action.
1147  * @param[in] len
1148  *   Length of the data to be updated.
1149  *
1150  * @return
1151  *    Data pointer on success, NULL otherwise and rte_errno is set.
1152  */
1153 static __rte_always_inline void *
1154 __flow_hw_act_data_push_append(struct rte_eth_dev *dev,
1155 			       struct mlx5_hw_actions *acts,
1156 			       enum rte_flow_action_type type,
1157 			       uint16_t action_src,
1158 			       uint16_t action_dst,
1159 			       uint16_t len)
1160 {
1161 	struct mlx5_action_construct_data *act_data;
1162 	struct mlx5_priv *priv = dev->data->dev_private;
1163 
1164 	act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
1165 	if (!act_data)
1166 		return NULL;
1167 	act_data->ipv6_ext.len = len;
1168 	LIST_INSERT_HEAD(&acts->act_list, act_data, next);
1169 	return act_data;
1170 }
1171 
1172 static __rte_always_inline int
1173 __flow_hw_act_data_hdr_modify_append(struct mlx5_priv *priv,
1174 				     struct mlx5_hw_actions *acts,
1175 				     enum rte_flow_action_type type,
1176 				     uint16_t action_src,
1177 				     uint16_t action_dst,
1178 				     const struct rte_flow_action_modify_field *mf,
1179 				     uint16_t mhdr_cmds_off,
1180 				     uint16_t mhdr_cmds_end,
1181 				     bool shared,
1182 				     struct field_modify_info *field,
1183 				     struct field_modify_info *dcopy,
1184 				     uint32_t *mask)
1185 {
1186 	struct mlx5_action_construct_data *act_data;
1187 
1188 	act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
1189 	if (!act_data)
1190 		return -1;
1191 	act_data->modify_header.action = *mf;
1192 	act_data->modify_header.mhdr_cmds_off = mhdr_cmds_off;
1193 	act_data->modify_header.mhdr_cmds_end = mhdr_cmds_end;
1194 	act_data->modify_header.shared = shared;
1195 	rte_memcpy(act_data->modify_header.field, field,
1196 		   sizeof(*field) * MLX5_ACT_MAX_MOD_FIELDS);
1197 	rte_memcpy(act_data->modify_header.dcopy, dcopy,
1198 		   sizeof(*dcopy) * MLX5_ACT_MAX_MOD_FIELDS);
1199 	rte_memcpy(act_data->modify_header.mask, mask,
1200 		   sizeof(*mask) * MLX5_ACT_MAX_MOD_FIELDS);
1201 	LIST_INSERT_HEAD(&acts->act_list, act_data, next);
1202 	return 0;
1203 }
1204 
1205 /**
1206  * Append shared RSS action to the dynamic action list.
1207  *
1208  * @param[in] priv
1209  *   Pointer to the port private data structure.
1210  * @param[in] acts
1211  *   Pointer to the template HW steering DR actions.
1212  * @param[in] type
1213  *   Action type.
1214  * @param[in] action_src
1215  *   Offset of source rte flow action.
1216  * @param[in] action_dst
1217  *   Offset of destination DR action.
1218  * @param[in] idx
1219  *   Shared RSS index.
1220  * @param[in] rss
1221  *   Pointer to the shared RSS info.
1222  *
1223  * @return
1224  *    0 on success, negative value otherwise and rte_errno is set.
1225  */
1226 static __rte_always_inline int
1227 __flow_hw_act_data_shared_rss_append(struct mlx5_priv *priv,
1228 				     struct mlx5_hw_actions *acts,
1229 				     enum rte_flow_action_type type,
1230 				     uint16_t action_src,
1231 				     uint16_t action_dst,
1232 				     uint32_t idx,
1233 				     struct mlx5_shared_action_rss *rss)
1234 {
1235 	struct mlx5_action_construct_data *act_data;
1236 
1237 	act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
1238 	if (!act_data)
1239 		return -1;
1240 	act_data->shared_rss.level = rss->origin.level;
1241 	act_data->shared_rss.types = !rss->origin.types ? RTE_ETH_RSS_IP :
1242 				     rss->origin.types;
1243 	act_data->shared_rss.idx = idx;
1244 	act_data->shared_rss.symmetric_hash_function =
1245 		MLX5_RSS_IS_SYMM(rss->origin.func);
1246 	LIST_INSERT_HEAD(&acts->act_list, act_data, next);
1247 	return 0;
1248 }
1249 
1250 /**
1251  * Append shared counter action to the dynamic action list.
1252  *
1253  * @param[in] priv
1254  *   Pointer to the port private data structure.
1255  * @param[in] acts
1256  *   Pointer to the template HW steering DR actions.
1257  * @param[in] type
1258  *   Action type.
1259  * @param[in] action_src
1260  *   Offset of source rte flow action.
1261  * @param[in] action_dst
1262  *   Offset of destination DR action.
1263  * @param[in] cnt_id
1264  *   Shared counter id.
1265  *
1266  * @return
1267  *    0 on success, negative value otherwise and rte_errno is set.
1268  */
1269 static __rte_always_inline int
1270 __flow_hw_act_data_shared_cnt_append(struct mlx5_priv *priv,
1271 				     struct mlx5_hw_actions *acts,
1272 				     enum rte_flow_action_type type,
1273 				     uint16_t action_src,
1274 				     uint16_t action_dst,
1275 				     cnt_id_t cnt_id)
1276 {
1277 	struct mlx5_action_construct_data *act_data;
1278 
1279 	act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
1280 	if (!act_data)
1281 		return -1;
1282 	act_data->type = type;
1283 	act_data->shared_counter.id = cnt_id;
1284 	LIST_INSERT_HEAD(&acts->act_list, act_data, next);
1285 	return 0;
1286 }
1287 
1288 /**
1289  * Append shared meter_mark action to the dynamic action list.
1290  *
1291  * @param[in] priv
1292  *   Pointer to the port private data structure.
1293  * @param[in] acts
1294  *   Pointer to the template HW steering DR actions.
1295  * @param[in] type
1296  *   Action type.
1297  * @param[in] action_src
1298  *   Offset of source rte flow action.
1299  * @param[in] action_dst
1300  *   Offset of destination DR action.
1301  * @param[in] mtr_id
1302  *   Shared meter id.
1303  *
1304  * @return
1305  *    0 on success, negative value otherwise and rte_errno is set.
1306  */
1307 static __rte_always_inline int
1308 __flow_hw_act_data_shared_mtr_append(struct mlx5_priv *priv,
1309 				     struct mlx5_hw_actions *acts,
1310 				     enum rte_flow_action_type type,
1311 				     uint16_t action_src,
1312 				     uint16_t action_dst,
1313 				     cnt_id_t mtr_id)
1314 {	struct mlx5_action_construct_data *act_data;
1315 
1316 	act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
1317 	if (!act_data)
1318 		return -1;
1319 	act_data->type = type;
1320 	act_data->shared_meter.id = mtr_id;
1321 	LIST_INSERT_HEAD(&acts->act_list, act_data, next);
1322 	return 0;
1323 }
1324 
1325 /**
1326  * Translate shared indirect action.
1327  *
1328  * @param[in] dev
1329  *   Pointer to the rte_eth_dev data structure.
1330  * @param[in] action
1331  *   Pointer to the shared indirect rte_flow action.
1332  * @param[in] acts
1333  *   Pointer to the template HW steering DR actions.
1334  * @param[in] action_src
1335  *   Offset of source rte flow action.
1336  * @param[in] action_dst
1337  *   Offset of destination DR action.
1338  *
1339  * @return
1340  *    0 on success, negative value otherwise and rte_errno is set.
1341  */
1342 static __rte_always_inline int
1343 flow_hw_shared_action_translate(struct rte_eth_dev *dev,
1344 				const struct rte_flow_action *action,
1345 				struct mlx5_hw_actions *acts,
1346 				uint16_t action_src,
1347 				uint16_t action_dst)
1348 {
1349 	struct mlx5_priv *priv = dev->data->dev_private;
1350 	struct mlx5_shared_action_rss *shared_rss;
1351 	uint32_t act_idx = (uint32_t)(uintptr_t)action->conf;
1352 	uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
1353 	uint32_t idx = act_idx &
1354 		       ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
1355 
1356 	switch (type) {
1357 	case MLX5_INDIRECT_ACTION_TYPE_RSS:
1358 		shared_rss = mlx5_ipool_get
1359 		  (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
1360 		if (!shared_rss || __flow_hw_act_data_shared_rss_append
1361 		    (priv, acts,
1362 		    (enum rte_flow_action_type)MLX5_RTE_FLOW_ACTION_TYPE_RSS,
1363 		    action_src, action_dst, idx, shared_rss)) {
1364 			DRV_LOG(WARNING, "Indirect RSS action index %d translate failed", act_idx);
1365 			return -1;
1366 		}
1367 		break;
1368 	case MLX5_INDIRECT_ACTION_TYPE_COUNT:
1369 		if (__flow_hw_act_data_shared_cnt_append(priv, acts,
1370 			(enum rte_flow_action_type)
1371 			MLX5_RTE_FLOW_ACTION_TYPE_COUNT,
1372 			action_src, action_dst, act_idx)) {
1373 			DRV_LOG(WARNING, "Indirect count action translate failed");
1374 			return -1;
1375 		}
1376 		break;
1377 	case MLX5_INDIRECT_ACTION_TYPE_AGE:
1378 		/* Not supported, prevent by validate function. */
1379 		MLX5_ASSERT(0);
1380 		break;
1381 	case MLX5_INDIRECT_ACTION_TYPE_CT:
1382 		if (flow_hw_ct_compile(dev, MLX5_HW_INV_QUEUE,
1383 				       idx, &acts->rule_acts[action_dst])) {
1384 			DRV_LOG(WARNING, "Indirect CT action translate failed");
1385 			return -1;
1386 		}
1387 		break;
1388 	case MLX5_INDIRECT_ACTION_TYPE_METER_MARK:
1389 		if (__flow_hw_act_data_shared_mtr_append(priv, acts,
1390 			(enum rte_flow_action_type)
1391 			MLX5_RTE_FLOW_ACTION_TYPE_METER_MARK,
1392 			action_src, action_dst, idx)) {
1393 			DRV_LOG(WARNING, "Indirect meter mark action translate failed");
1394 			return -1;
1395 		}
1396 		break;
1397 	case MLX5_INDIRECT_ACTION_TYPE_QUOTA:
1398 		flow_hw_construct_quota(priv, &acts->rule_acts[action_dst], idx);
1399 		break;
1400 	default:
1401 		DRV_LOG(WARNING, "Unsupported shared action type:%d", type);
1402 		break;
1403 	}
1404 	return 0;
1405 }
1406 
1407 static __rte_always_inline bool
1408 flow_hw_action_modify_field_is_shared(const struct rte_flow_action *action,
1409 				      const struct rte_flow_action *mask)
1410 {
1411 	const struct rte_flow_action_modify_field *v = action->conf;
1412 	const struct rte_flow_action_modify_field *m = mask->conf;
1413 
1414 	if (v->src.field == RTE_FLOW_FIELD_VALUE) {
1415 		uint32_t j;
1416 
1417 		for (j = 0; j < RTE_DIM(m->src.value); ++j) {
1418 			/*
1419 			 * Immediate value is considered to be masked
1420 			 * (and thus shared by all flow rules), if mask
1421 			 * is non-zero. Partial mask over immediate value
1422 			 * is not allowed.
1423 			 */
1424 			if (m->src.value[j])
1425 				return true;
1426 		}
1427 		return false;
1428 	}
1429 	if (v->src.field == RTE_FLOW_FIELD_POINTER)
1430 		return m->src.pvalue != NULL;
1431 	/*
1432 	 * Source field types other than VALUE and
1433 	 * POINTER are always shared.
1434 	 */
1435 	return true;
1436 }
1437 
1438 static __rte_always_inline bool
1439 flow_hw_should_insert_nop(const struct mlx5_hw_modify_header_action *mhdr,
1440 			  const struct mlx5_modification_cmd *cmd)
1441 {
1442 	struct mlx5_modification_cmd last_cmd = { { 0 } };
1443 	struct mlx5_modification_cmd new_cmd = { { 0 } };
1444 	const uint32_t cmds_num = mhdr->mhdr_cmds_num;
1445 	unsigned int last_type;
1446 	bool should_insert = false;
1447 
1448 	if (cmds_num == 0)
1449 		return false;
1450 	last_cmd = *(&mhdr->mhdr_cmds[cmds_num - 1]);
1451 	last_cmd.data0 = rte_be_to_cpu_32(last_cmd.data0);
1452 	last_cmd.data1 = rte_be_to_cpu_32(last_cmd.data1);
1453 	last_type = last_cmd.action_type;
1454 	new_cmd = *cmd;
1455 	new_cmd.data0 = rte_be_to_cpu_32(new_cmd.data0);
1456 	new_cmd.data1 = rte_be_to_cpu_32(new_cmd.data1);
1457 	switch (new_cmd.action_type) {
1458 	case MLX5_MODIFICATION_TYPE_SET:
1459 	case MLX5_MODIFICATION_TYPE_ADD:
1460 		if (last_type == MLX5_MODIFICATION_TYPE_SET ||
1461 		    last_type == MLX5_MODIFICATION_TYPE_ADD)
1462 			should_insert = new_cmd.field == last_cmd.field;
1463 		else if (last_type == MLX5_MODIFICATION_TYPE_COPY ||
1464 			 last_type == MLX5_MODIFICATION_TYPE_ADD_FIELD)
1465 			should_insert = new_cmd.field == last_cmd.dst_field;
1466 		else if (last_type == MLX5_MODIFICATION_TYPE_NOP)
1467 			should_insert = false;
1468 		else
1469 			MLX5_ASSERT(false); /* Other types are not supported. */
1470 		break;
1471 	case MLX5_MODIFICATION_TYPE_COPY:
1472 	case MLX5_MODIFICATION_TYPE_ADD_FIELD:
1473 		if (last_type == MLX5_MODIFICATION_TYPE_SET ||
1474 		    last_type == MLX5_MODIFICATION_TYPE_ADD)
1475 			should_insert = (new_cmd.field == last_cmd.field ||
1476 					 new_cmd.dst_field == last_cmd.field);
1477 		else if (last_type == MLX5_MODIFICATION_TYPE_COPY ||
1478 			 last_type == MLX5_MODIFICATION_TYPE_ADD_FIELD)
1479 			should_insert = (new_cmd.field == last_cmd.dst_field ||
1480 					 new_cmd.dst_field == last_cmd.dst_field);
1481 		else if (last_type == MLX5_MODIFICATION_TYPE_NOP)
1482 			should_insert = false;
1483 		else
1484 			MLX5_ASSERT(false); /* Other types are not supported. */
1485 		break;
1486 	default:
1487 		/* Other action types should be rejected on AT validation. */
1488 		MLX5_ASSERT(false);
1489 		break;
1490 	}
1491 	return should_insert;
1492 }
1493 
1494 static __rte_always_inline int
1495 flow_hw_mhdr_cmd_nop_append(struct mlx5_hw_modify_header_action *mhdr)
1496 {
1497 	struct mlx5_modification_cmd *nop;
1498 	uint32_t num = mhdr->mhdr_cmds_num;
1499 
1500 	if (num + 1 >= MLX5_MHDR_MAX_CMD)
1501 		return -ENOMEM;
1502 	nop = mhdr->mhdr_cmds + num;
1503 	nop->data0 = 0;
1504 	nop->action_type = MLX5_MODIFICATION_TYPE_NOP;
1505 	nop->data0 = rte_cpu_to_be_32(nop->data0);
1506 	nop->data1 = 0;
1507 	mhdr->mhdr_cmds_num = num + 1;
1508 	return 0;
1509 }
1510 
1511 static __rte_always_inline int
1512 flow_hw_mhdr_cmd_append(struct mlx5_hw_modify_header_action *mhdr,
1513 			struct mlx5_modification_cmd *cmd)
1514 {
1515 	uint32_t num = mhdr->mhdr_cmds_num;
1516 
1517 	if (num + 1 >= MLX5_MHDR_MAX_CMD)
1518 		return -ENOMEM;
1519 	mhdr->mhdr_cmds[num] = *cmd;
1520 	mhdr->mhdr_cmds_num = num + 1;
1521 	return 0;
1522 }
1523 
1524 static __rte_always_inline int
1525 flow_hw_converted_mhdr_cmds_append(struct mlx5_hw_modify_header_action *mhdr,
1526 				   struct mlx5_flow_dv_modify_hdr_resource *resource)
1527 {
1528 	uint32_t idx;
1529 	int ret;
1530 
1531 	for (idx = 0; idx < resource->actions_num; ++idx) {
1532 		struct mlx5_modification_cmd *src = &resource->actions[idx];
1533 
1534 		if (flow_hw_should_insert_nop(mhdr, src)) {
1535 			ret = flow_hw_mhdr_cmd_nop_append(mhdr);
1536 			if (ret)
1537 				return ret;
1538 		}
1539 		ret = flow_hw_mhdr_cmd_append(mhdr, src);
1540 		if (ret)
1541 			return ret;
1542 	}
1543 	return 0;
1544 }
1545 
1546 static __rte_always_inline void
1547 flow_hw_modify_field_init(struct mlx5_hw_modify_header_action *mhdr,
1548 			  struct rte_flow_actions_template *at)
1549 {
1550 	memset(mhdr, 0, sizeof(*mhdr));
1551 	/* Modify header action without any commands is shared by default. */
1552 	mhdr->shared = true;
1553 	mhdr->pos = at->mhdr_off;
1554 }
1555 
1556 static __rte_always_inline int
1557 flow_hw_modify_field_compile(struct rte_eth_dev *dev,
1558 			     const struct rte_flow_attr *attr,
1559 			     const struct rte_flow_action *action, /* Current action from AT. */
1560 			     const struct rte_flow_action *action_mask, /* Current mask from AT. */
1561 			     struct mlx5_hw_actions *acts,
1562 			     struct mlx5_hw_modify_header_action *mhdr,
1563 			     uint16_t src_pos,
1564 			     struct rte_flow_error *error)
1565 {
1566 	struct mlx5_priv *priv = dev->data->dev_private;
1567 	const struct rte_flow_action_modify_field *conf = action->conf;
1568 	union {
1569 		struct mlx5_flow_dv_modify_hdr_resource resource;
1570 		uint8_t data[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
1571 			     sizeof(struct mlx5_modification_cmd) * MLX5_MHDR_MAX_CMD];
1572 	} dummy;
1573 	struct mlx5_flow_dv_modify_hdr_resource *resource;
1574 	struct rte_flow_item item = {
1575 		.spec = NULL,
1576 		.mask = NULL
1577 	};
1578 	struct field_modify_info field[MLX5_ACT_MAX_MOD_FIELDS] = {
1579 						{0, 0, MLX5_MODI_OUT_NONE} };
1580 	struct field_modify_info dcopy[MLX5_ACT_MAX_MOD_FIELDS] = {
1581 						{0, 0, MLX5_MODI_OUT_NONE} };
1582 	uint32_t mask[MLX5_ACT_MAX_MOD_FIELDS] = { 0 };
1583 	uint32_t type, value = 0;
1584 	uint16_t cmds_start, cmds_end;
1585 	bool shared;
1586 	int ret;
1587 
1588 	/*
1589 	 * Modify header action is shared if previous modify_field actions
1590 	 * are shared and currently compiled action is shared.
1591 	 */
1592 	shared = flow_hw_action_modify_field_is_shared(action, action_mask);
1593 	mhdr->shared &= shared;
1594 	if (conf->src.field == RTE_FLOW_FIELD_POINTER ||
1595 	    conf->src.field == RTE_FLOW_FIELD_VALUE) {
1596 		type = conf->operation == RTE_FLOW_MODIFY_SET ? MLX5_MODIFICATION_TYPE_SET :
1597 								MLX5_MODIFICATION_TYPE_ADD;
1598 		/* For SET/ADD fill the destination field (field) first. */
1599 		mlx5_flow_field_id_to_modify_info(&conf->dst, field, mask,
1600 						  conf->width, dev,
1601 						  attr, error);
1602 		item.spec = conf->src.field == RTE_FLOW_FIELD_POINTER ?
1603 				(void *)(uintptr_t)conf->src.pvalue :
1604 				(void *)(uintptr_t)&conf->src.value;
1605 		if (conf->dst.field == RTE_FLOW_FIELD_META ||
1606 		    conf->dst.field == RTE_FLOW_FIELD_TAG ||
1607 		    conf->dst.field == RTE_FLOW_FIELD_METER_COLOR ||
1608 		    conf->dst.field == (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG) {
1609 			uint8_t tag_index = flow_tag_index_get(&conf->dst);
1610 
1611 			value = *(const unaligned_uint32_t *)item.spec;
1612 			if (conf->dst.field == RTE_FLOW_FIELD_TAG &&
1613 			    tag_index == RTE_PMD_MLX5_LINEAR_HASH_TAG_INDEX)
1614 				value = rte_cpu_to_be_32(value << 16);
1615 			else
1616 				value = rte_cpu_to_be_32(value);
1617 			item.spec = &value;
1618 		} else if (conf->dst.field == RTE_FLOW_FIELD_GTP_PSC_QFI ||
1619 			   conf->dst.field == RTE_FLOW_FIELD_GENEVE_OPT_TYPE) {
1620 			/*
1621 			 * Both QFI and Geneve option type are passed as an uint8_t integer,
1622 			 * but it is accessed through a 2nd least significant byte of a 32-bit
1623 			 * field in modify header command.
1624 			 */
1625 			value = *(const uint8_t *)item.spec;
1626 			value = rte_cpu_to_be_32(value << 8);
1627 			item.spec = &value;
1628 		} else if (conf->dst.field == RTE_FLOW_FIELD_VXLAN_LAST_RSVD) {
1629 			value = *(const uint8_t *)item.spec << 24;
1630 			value = rte_cpu_to_be_32(value);
1631 			item.spec = &value;
1632 		}
1633 	} else {
1634 		type = conf->operation == RTE_FLOW_MODIFY_SET ?
1635 		       MLX5_MODIFICATION_TYPE_COPY : MLX5_MODIFICATION_TYPE_ADD_FIELD;
1636 		/* For COPY fill the destination field (dcopy) without mask. */
1637 		mlx5_flow_field_id_to_modify_info(&conf->dst, dcopy, NULL,
1638 						  conf->width, dev,
1639 						  attr, error);
1640 		/* Then construct the source field (field) with mask. */
1641 		mlx5_flow_field_id_to_modify_info(&conf->src, field, mask,
1642 						  conf->width, dev,
1643 						  attr, error);
1644 	}
1645 	item.mask = &mask;
1646 	memset(&dummy, 0, sizeof(dummy));
1647 	resource = &dummy.resource;
1648 	ret = flow_dv_convert_modify_action(&item, field, dcopy, resource, type, error);
1649 	if (ret)
1650 		return ret;
1651 	MLX5_ASSERT(resource->actions_num > 0);
1652 	/*
1653 	 * If previous modify field action collide with this one, then insert NOP command.
1654 	 * This NOP command will not be a part of action's command range used to update commands
1655 	 * on rule creation.
1656 	 */
1657 	if (flow_hw_should_insert_nop(mhdr, &resource->actions[0])) {
1658 		ret = flow_hw_mhdr_cmd_nop_append(mhdr);
1659 		if (ret)
1660 			return rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1661 						  NULL, "too many modify field operations specified");
1662 	}
1663 	cmds_start = mhdr->mhdr_cmds_num;
1664 	ret = flow_hw_converted_mhdr_cmds_append(mhdr, resource);
1665 	if (ret)
1666 		return rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1667 					  NULL, "too many modify field operations specified");
1668 
1669 	cmds_end = mhdr->mhdr_cmds_num;
1670 	if (shared)
1671 		return 0;
1672 	ret = __flow_hw_act_data_hdr_modify_append(priv, acts, RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
1673 						   src_pos, mhdr->pos, conf,
1674 						   cmds_start, cmds_end, shared,
1675 						   field, dcopy, mask);
1676 	if (ret)
1677 		return rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1678 					  NULL, "not enough memory to store modify field metadata");
1679 	return 0;
1680 }
1681 
1682 static uint32_t
1683 flow_hw_count_nop_modify_field(struct mlx5_hw_modify_header_action *mhdr)
1684 {
1685 	uint32_t i;
1686 	uint32_t nops = 0;
1687 
1688 	for (i = 0; i < mhdr->mhdr_cmds_num; ++i) {
1689 		struct mlx5_modification_cmd cmd = mhdr->mhdr_cmds[i];
1690 
1691 		cmd.data0 = rte_be_to_cpu_32(cmd.data0);
1692 		if (cmd.action_type == MLX5_MODIFICATION_TYPE_NOP)
1693 			++nops;
1694 	}
1695 	return nops;
1696 }
1697 
1698 static int
1699 flow_hw_validate_compiled_modify_field(struct rte_eth_dev *dev,
1700 				       const struct mlx5_flow_template_table_cfg *cfg,
1701 				       struct mlx5_hw_modify_header_action *mhdr,
1702 				       struct rte_flow_error *error)
1703 {
1704 	struct mlx5_priv *priv = dev->data->dev_private;
1705 	struct mlx5_hca_attr *hca_attr = &priv->sh->cdev->config.hca_attr;
1706 
1707 	/*
1708 	 * Header modify pattern length limitation is only valid for HWS groups, i.e. groups > 0.
1709 	 * In group 0, MODIFY_FIELD actions are handled with header modify actions
1710 	 * managed by rdma-core.
1711 	 */
1712 	if (cfg->attr.flow_attr.group != 0 &&
1713 	    mhdr->mhdr_cmds_num > hca_attr->max_header_modify_pattern_length) {
1714 		uint32_t nops = flow_hw_count_nop_modify_field(mhdr);
1715 
1716 		DRV_LOG(ERR, "Too many modify header commands generated from "
1717 			     "MODIFY_FIELD actions. "
1718 			     "Generated HW commands = %u (amount of NOP commands = %u). "
1719 			     "Maximum supported = %u.",
1720 			     mhdr->mhdr_cmds_num, nops,
1721 			     hca_attr->max_header_modify_pattern_length);
1722 		return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1723 					  "Number of MODIFY_FIELD actions exceeds maximum "
1724 					  "supported limit of actions");
1725 	}
1726 	return 0;
1727 }
1728 
1729 static int
1730 flow_hw_represented_port_compile(struct rte_eth_dev *dev,
1731 				 const struct rte_flow_attr *attr,
1732 				 const struct rte_flow_action *action,
1733 				 const struct rte_flow_action *action_mask,
1734 				 struct mlx5_hw_actions *acts,
1735 				 uint16_t action_src, uint16_t action_dst,
1736 				 struct rte_flow_error *error)
1737 {
1738 	struct mlx5_priv *priv = dev->data->dev_private;
1739 	const struct rte_flow_action_ethdev *v = action->conf;
1740 	const struct rte_flow_action_ethdev *m = action_mask->conf;
1741 	int ret;
1742 
1743 	if (!attr->group)
1744 		return rte_flow_error_set(error, EINVAL,
1745 					  RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1746 					  "represented_port action cannot"
1747 					  " be used on group 0");
1748 	if (!attr->transfer)
1749 		return rte_flow_error_set(error, EINVAL,
1750 					  RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1751 					  NULL,
1752 					  "represented_port action requires"
1753 					  " transfer attribute");
1754 	if (attr->ingress || attr->egress)
1755 		return rte_flow_error_set(error, EINVAL,
1756 					  RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1757 					  "represented_port action cannot"
1758 					  " be used with direction attributes");
1759 	if (!priv->master)
1760 		return rte_flow_error_set(error, EINVAL,
1761 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1762 					  "represented_port action must"
1763 					  " be used on proxy port");
1764 	if (m && !!m->port_id) {
1765 		struct mlx5_priv *port_priv;
1766 
1767 		if (!v)
1768 			return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1769 						  action, "port index was not provided");
1770 		port_priv = mlx5_port_to_eswitch_info(v->port_id, false);
1771 		if (port_priv == NULL)
1772 			return rte_flow_error_set
1773 					(error, EINVAL,
1774 					 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1775 					 "port does not exist or unable to"
1776 					 " obtain E-Switch info for port");
1777 		MLX5_ASSERT(priv->hw_vport != NULL);
1778 		if (priv->hw_vport[v->port_id]) {
1779 			acts->rule_acts[action_dst].action =
1780 					priv->hw_vport[v->port_id];
1781 		} else {
1782 			return rte_flow_error_set
1783 					(error, EINVAL,
1784 					 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1785 					 "cannot use represented_port action"
1786 					 " with this port");
1787 		}
1788 	} else {
1789 		ret = __flow_hw_act_data_general_append
1790 				(priv, acts, action->type,
1791 				 action_src, action_dst);
1792 		if (ret)
1793 			return rte_flow_error_set
1794 					(error, ENOMEM,
1795 					 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1796 					 "not enough memory to store"
1797 					 " vport action");
1798 	}
1799 	return 0;
1800 }
1801 
1802 static __rte_always_inline int
1803 flow_hw_meter_compile(struct rte_eth_dev *dev,
1804 		      const struct mlx5_flow_template_table_cfg *cfg,
1805 		      uint16_t aso_mtr_pos,
1806 		      uint16_t jump_pos,
1807 		      const struct rte_flow_action *action,
1808 		      struct mlx5_hw_actions *acts,
1809 		      struct rte_flow_error *error)
1810 {
1811 	struct mlx5_priv *priv = dev->data->dev_private;
1812 	struct mlx5_aso_mtr *aso_mtr;
1813 	const struct rte_flow_action_meter *meter = action->conf;
1814 	uint32_t group = cfg->attr.flow_attr.group;
1815 
1816 	aso_mtr = mlx5_aso_meter_by_idx(priv, meter->mtr_id);
1817 	acts->rule_acts[aso_mtr_pos].action = priv->mtr_bulk.action;
1818 	acts->rule_acts[aso_mtr_pos].aso_meter.offset = aso_mtr->offset;
1819 	acts->jump = flow_hw_jump_action_register
1820 		(dev, cfg, aso_mtr->fm.group, error);
1821 	if (!acts->jump)
1822 		return -ENOMEM;
1823 	acts->rule_acts[jump_pos].action = (!!group) ?
1824 				    acts->jump->hws_action :
1825 				    acts->jump->root_action;
1826 	if (mlx5_aso_mtr_wait(priv, aso_mtr, true))
1827 		return -ENOMEM;
1828 	return 0;
1829 }
1830 
1831 static __rte_always_inline int
1832 flow_hw_cnt_compile(struct rte_eth_dev *dev, uint32_t  start_pos,
1833 		      struct mlx5_hw_actions *acts)
1834 {
1835 	struct mlx5_priv *priv = dev->data->dev_private;
1836 	uint32_t pos = start_pos;
1837 	cnt_id_t cnt_id;
1838 	int ret;
1839 
1840 	ret = mlx5_hws_cnt_shared_get(priv->hws_cpool, &cnt_id, 0);
1841 	if (ret != 0)
1842 		return ret;
1843 	ret = mlx5_hws_cnt_pool_get_action_offset
1844 				(priv->hws_cpool,
1845 				 cnt_id,
1846 				 &acts->rule_acts[pos].action,
1847 				 &acts->rule_acts[pos].counter.offset);
1848 	if (ret != 0)
1849 		return ret;
1850 	acts->cnt_id = cnt_id;
1851 	return 0;
1852 }
1853 
1854 static __rte_always_inline bool
1855 is_of_vlan_pcp_present(const struct rte_flow_action *actions)
1856 {
1857 	/*
1858 	 * Order of RTE VLAN push actions is
1859 	 * OF_PUSH_VLAN / OF_SET_VLAN_VID [ / OF_SET_VLAN_PCP ]
1860 	 */
1861 	return actions[MLX5_HW_VLAN_PUSH_PCP_IDX].type ==
1862 		RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP;
1863 }
1864 
1865 static __rte_always_inline bool
1866 is_template_masked_push_vlan(const struct rte_flow_action_of_push_vlan *mask)
1867 {
1868 	/*
1869 	 * In masked push VLAN template all RTE push actions are masked.
1870 	 */
1871 	return mask && mask->ethertype != 0;
1872 }
1873 
1874 static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
1875 {
1876 /*
1877  * OpenFlow Switch Specification defines 801.1q VID as 12+1 bits.
1878  */
1879 	rte_be32_t type, vid, pcp;
1880 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1881 	rte_be32_t vid_lo, vid_hi;
1882 #endif
1883 
1884 	type = ((const struct rte_flow_action_of_push_vlan *)
1885 		actions[MLX5_HW_VLAN_PUSH_TYPE_IDX].conf)->ethertype;
1886 	vid = ((const struct rte_flow_action_of_set_vlan_vid *)
1887 		actions[MLX5_HW_VLAN_PUSH_VID_IDX].conf)->vlan_vid;
1888 	pcp = is_of_vlan_pcp_present(actions) ?
1889 	      ((const struct rte_flow_action_of_set_vlan_pcp *)
1890 		      actions[MLX5_HW_VLAN_PUSH_PCP_IDX].conf)->vlan_pcp : 0;
1891 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1892 	vid_hi = vid & 0xff;
1893 	vid_lo = vid >> 8;
1894 	return (((vid_lo << 8) | (pcp << 5) | vid_hi) << 16) | type;
1895 #else
1896 	return (type << 16) | (pcp << 13) | vid;
1897 #endif
1898 }
1899 
1900 static __rte_always_inline struct mlx5_aso_mtr *
1901 flow_hw_meter_mark_alloc(struct rte_eth_dev *dev, uint32_t queue,
1902 			 const struct rte_flow_action *action,
1903 			 struct mlx5_hw_q_job *job, bool push,
1904 			 struct rte_flow_error *error)
1905 {
1906 	struct mlx5_priv *priv = dev->data->dev_private;
1907 	struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
1908 	const struct rte_flow_action_meter_mark *meter_mark = action->conf;
1909 	struct mlx5_aso_mtr *aso_mtr;
1910 	struct mlx5_flow_meter_info *fm;
1911 	uint32_t mtr_id = 0;
1912 	uintptr_t handle = (uintptr_t)MLX5_INDIRECT_ACTION_TYPE_METER_MARK <<
1913 					MLX5_INDIRECT_ACTION_TYPE_OFFSET;
1914 
1915 	if (priv->shared_host) {
1916 		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1917 				   "Meter mark actions can only be created on the host port");
1918 		return NULL;
1919 	}
1920 	if (meter_mark->profile == NULL)
1921 		return NULL;
1922 	aso_mtr = mlx5_ipool_malloc(pool->idx_pool, &mtr_id);
1923 	if (!aso_mtr) {
1924 		rte_flow_error_set(error, ENOMEM,
1925 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1926 				   NULL,
1927 				   "failed to allocate aso meter entry");
1928 		if (mtr_id)
1929 			mlx5_ipool_free(pool->idx_pool, mtr_id);
1930 		return NULL;
1931 	}
1932 	/* Fill the flow meter parameters. */
1933 	aso_mtr->type = ASO_METER_INDIRECT;
1934 	fm = &aso_mtr->fm;
1935 	fm->meter_id = mtr_id;
1936 	fm->profile = (struct mlx5_flow_meter_profile *)(meter_mark->profile);
1937 	fm->is_enable = meter_mark->state;
1938 	fm->color_aware = meter_mark->color_mode;
1939 	aso_mtr->pool = pool;
1940 	aso_mtr->state = (queue == MLX5_HW_INV_QUEUE) ?
1941 			  ASO_METER_WAIT : ASO_METER_WAIT_ASYNC;
1942 	aso_mtr->offset = mtr_id - 1;
1943 	aso_mtr->init_color = fm->color_aware ? RTE_COLORS : RTE_COLOR_GREEN;
1944 	job->action = (void *)(handle | mtr_id);
1945 	/* Update ASO flow meter by wqe. */
1946 	if (mlx5_aso_meter_update_by_wqe(priv, queue, aso_mtr,
1947 					 &priv->mtr_bulk, job, push)) {
1948 		mlx5_ipool_free(pool->idx_pool, mtr_id);
1949 		return NULL;
1950 	}
1951 	/* Wait for ASO object completion. */
1952 	if (queue == MLX5_HW_INV_QUEUE &&
1953 	    mlx5_aso_mtr_wait(priv, aso_mtr, true)) {
1954 		mlx5_ipool_free(pool->idx_pool, mtr_id);
1955 		return NULL;
1956 	}
1957 	return aso_mtr;
1958 }
1959 
1960 static __rte_always_inline int
1961 flow_hw_meter_mark_compile(struct rte_eth_dev *dev,
1962 			   uint16_t aso_mtr_pos,
1963 			   const struct rte_flow_action *action,
1964 			   struct mlx5dr_rule_action *acts,
1965 			   uint32_t *index,
1966 			   uint32_t queue,
1967 			   struct rte_flow_error *error)
1968 {
1969 	struct mlx5_priv *priv = dev->data->dev_private;
1970 	struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
1971 	struct mlx5_aso_mtr *aso_mtr;
1972 	struct mlx5_hw_q_job *job =
1973 		flow_hw_action_job_init(priv, queue, NULL, NULL, NULL,
1974 					MLX5_HW_Q_JOB_TYPE_CREATE,
1975 					MLX5_HW_INDIRECT_TYPE_LEGACY, NULL);
1976 
1977 	if (!job)
1978 		return -1;
1979 	aso_mtr = flow_hw_meter_mark_alloc(dev, queue, action, job,
1980 					   true, error);
1981 	if (!aso_mtr) {
1982 		flow_hw_job_put(priv, job, queue);
1983 		return -1;
1984 	}
1985 
1986 	/* Compile METER_MARK action */
1987 	acts[aso_mtr_pos].action = pool->action;
1988 	acts[aso_mtr_pos].aso_meter.offset = aso_mtr->offset;
1989 	*index = aso_mtr->fm.meter_id;
1990 	return 0;
1991 }
1992 
1993 static int
1994 flow_hw_translate_indirect_mirror(__rte_unused struct rte_eth_dev *dev,
1995 				  __rte_unused const struct mlx5_action_construct_data *act_data,
1996 				  const struct rte_flow_action *action,
1997 				  struct mlx5dr_rule_action *dr_rule)
1998 {
1999 	const struct rte_flow_action_indirect_list *list_conf = action->conf;
2000 	const struct mlx5_mirror *mirror = (typeof(mirror))list_conf->handle;
2001 
2002 	dr_rule->action = mirror->mirror_action;
2003 	return 0;
2004 }
2005 
2006 /**
2007  * HWS mirror implemented as FW island.
2008  * The action does not support indirect list flow configuration.
2009  * If template handle was masked, use handle mirror action in flow rules.
2010  * Otherwise let flow rule specify mirror handle.
2011  */
2012 static int
2013 hws_table_tmpl_translate_indirect_mirror(struct rte_eth_dev *dev,
2014 					 const struct rte_flow_action *action,
2015 					 const struct rte_flow_action *mask,
2016 					 struct mlx5_hw_actions *acts,
2017 					 uint16_t action_src, uint16_t action_dst)
2018 {
2019 	int ret = 0;
2020 	const struct rte_flow_action_indirect_list *mask_conf = mask->conf;
2021 
2022 	if (mask_conf && mask_conf->handle) {
2023 		/**
2024 		 * If mirror handle was masked, assign fixed DR5 mirror action.
2025 		 */
2026 		flow_hw_translate_indirect_mirror(dev, NULL, action,
2027 						  &acts->rule_acts[action_dst]);
2028 	} else {
2029 		struct mlx5_priv *priv = dev->data->dev_private;
2030 		ret = flow_hw_act_data_indirect_list_append
2031 			(priv, acts, RTE_FLOW_ACTION_TYPE_INDIRECT_LIST,
2032 			 action_src, action_dst,
2033 			 flow_hw_translate_indirect_mirror);
2034 	}
2035 	return ret;
2036 }
2037 
2038 static int
2039 flow_hw_reformat_action(__rte_unused struct rte_eth_dev *dev,
2040 			__rte_unused const struct mlx5_action_construct_data *data,
2041 			const struct rte_flow_action *action,
2042 			struct mlx5dr_rule_action *dr_rule)
2043 {
2044 	const struct rte_flow_action_indirect_list *indlst_conf = action->conf;
2045 
2046 	dr_rule->action = ((struct mlx5_hw_encap_decap_action *)
2047 			   (indlst_conf->handle))->action;
2048 	if (!dr_rule->action)
2049 		return -EINVAL;
2050 	return 0;
2051 }
2052 
2053 /**
2054  * Template conf must not be masked. If handle is masked, use the one in template,
2055  * otherwise update per flow rule.
2056  */
2057 static int
2058 hws_table_tmpl_translate_indirect_reformat(struct rte_eth_dev *dev,
2059 					   const struct rte_flow_action *action,
2060 					   const struct rte_flow_action *mask,
2061 					   struct mlx5_hw_actions *acts,
2062 					   uint16_t action_src, uint16_t action_dst)
2063 {
2064 	int ret = -1;
2065 	const struct rte_flow_action_indirect_list *mask_conf = mask->conf;
2066 	struct mlx5_priv *priv = dev->data->dev_private;
2067 
2068 	if (mask_conf && mask_conf->handle && !mask_conf->conf)
2069 		/**
2070 		 * If handle was masked, assign fixed DR action.
2071 		 */
2072 		ret = flow_hw_reformat_action(dev, NULL, action,
2073 					      &acts->rule_acts[action_dst]);
2074 	else if (mask_conf && !mask_conf->handle && !mask_conf->conf)
2075 		ret = flow_hw_act_data_indirect_list_append
2076 			(priv, acts, RTE_FLOW_ACTION_TYPE_INDIRECT_LIST,
2077 			 action_src, action_dst, flow_hw_reformat_action);
2078 	return ret;
2079 }
2080 
2081 static int
2082 flow_dr_set_meter(struct mlx5_priv *priv,
2083 		  struct mlx5dr_rule_action *dr_rule,
2084 		  const struct rte_flow_action_indirect_list *action_conf)
2085 {
2086 	const struct mlx5_indlst_legacy *legacy_obj =
2087 		(typeof(legacy_obj))action_conf->handle;
2088 	struct mlx5_aso_mtr_pool *mtr_pool = priv->hws_mpool;
2089 	uint32_t act_idx = (uint32_t)(uintptr_t)legacy_obj->handle;
2090 	uint32_t mtr_id = act_idx & (RTE_BIT32(MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
2091 	struct mlx5_aso_mtr *aso_mtr = mlx5_ipool_get(mtr_pool->idx_pool, mtr_id);
2092 
2093 	if (!aso_mtr)
2094 		return -EINVAL;
2095 	dr_rule->action = mtr_pool->action;
2096 	dr_rule->aso_meter.offset = aso_mtr->offset;
2097 	return 0;
2098 }
2099 
2100 __rte_always_inline static void
2101 flow_dr_mtr_flow_color(struct mlx5dr_rule_action *dr_rule, enum rte_color init_color)
2102 {
2103 	dr_rule->aso_meter.init_color =
2104 		(enum mlx5dr_action_aso_meter_color)rte_col_2_mlx5_col(init_color);
2105 }
2106 
2107 static int
2108 flow_hw_translate_indirect_meter(struct rte_eth_dev *dev,
2109 				 const struct mlx5_action_construct_data *act_data,
2110 				 const struct rte_flow_action *action,
2111 				 struct mlx5dr_rule_action *dr_rule)
2112 {
2113 	int ret;
2114 	struct mlx5_priv *priv = dev->data->dev_private;
2115 	const struct rte_flow_action_indirect_list *action_conf = action->conf;
2116 	const struct rte_flow_indirect_update_flow_meter_mark **flow_conf =
2117 		(typeof(flow_conf))action_conf->conf;
2118 
2119 	ret = flow_dr_set_meter(priv, dr_rule, action_conf);
2120 	if (ret)
2121 		return ret;
2122 	if (!act_data->shared_meter.conf_masked) {
2123 		if (flow_conf && flow_conf[0] && flow_conf[0]->init_color < RTE_COLORS)
2124 			flow_dr_mtr_flow_color(dr_rule, flow_conf[0]->init_color);
2125 	}
2126 	return 0;
2127 }
2128 
2129 static int
2130 hws_table_tmpl_translate_indirect_meter(struct rte_eth_dev *dev,
2131 					const struct rte_flow_action *action,
2132 					const struct rte_flow_action *mask,
2133 					struct mlx5_hw_actions *acts,
2134 					uint16_t action_src, uint16_t action_dst)
2135 {
2136 	int ret;
2137 	struct mlx5_priv *priv = dev->data->dev_private;
2138 	const struct rte_flow_action_indirect_list *action_conf = action->conf;
2139 	const struct rte_flow_action_indirect_list *mask_conf = mask->conf;
2140 	bool is_handle_masked = mask_conf && mask_conf->handle;
2141 	bool is_conf_masked = mask_conf && mask_conf->conf && mask_conf->conf[0];
2142 	struct mlx5dr_rule_action *dr_rule = &acts->rule_acts[action_dst];
2143 
2144 	if (is_handle_masked) {
2145 		ret = flow_dr_set_meter(priv, dr_rule, action->conf);
2146 		if (ret)
2147 			return ret;
2148 	}
2149 	if (is_conf_masked) {
2150 		const struct
2151 			rte_flow_indirect_update_flow_meter_mark **flow_conf =
2152 			(typeof(flow_conf))action_conf->conf;
2153 		flow_dr_mtr_flow_color(dr_rule,
2154 				       flow_conf[0]->init_color);
2155 	}
2156 	if (!is_handle_masked || !is_conf_masked) {
2157 		struct mlx5_action_construct_data *act_data;
2158 
2159 		ret = flow_hw_act_data_indirect_list_append
2160 			(priv, acts, RTE_FLOW_ACTION_TYPE_INDIRECT_LIST,
2161 			 action_src, action_dst, flow_hw_translate_indirect_meter);
2162 		if (ret)
2163 			return ret;
2164 		act_data = LIST_FIRST(&acts->act_list);
2165 		act_data->shared_meter.conf_masked = is_conf_masked;
2166 	}
2167 	return 0;
2168 }
2169 
2170 static int
2171 hws_table_tmpl_translate_indirect_legacy(struct rte_eth_dev *dev,
2172 					 const struct rte_flow_action *action,
2173 					 const struct rte_flow_action *mask,
2174 					 struct mlx5_hw_actions *acts,
2175 					 uint16_t action_src, uint16_t action_dst)
2176 {
2177 	int ret;
2178 	const struct rte_flow_action_indirect_list *indlst_conf = action->conf;
2179 	struct mlx5_indlst_legacy *indlst_obj = (typeof(indlst_obj))indlst_conf->handle;
2180 	uint32_t act_idx = (uint32_t)(uintptr_t)indlst_obj->handle;
2181 	uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
2182 
2183 	switch (type) {
2184 	case MLX5_INDIRECT_ACTION_TYPE_METER_MARK:
2185 		ret = hws_table_tmpl_translate_indirect_meter(dev, action, mask,
2186 							      acts, action_src,
2187 							      action_dst);
2188 		break;
2189 	default:
2190 		ret = -EINVAL;
2191 		break;
2192 	}
2193 	return ret;
2194 }
2195 
2196 /*
2197  * template .. indirect_list handle Ht conf Ct ..
2198  * mask     .. indirect_list handle Hm conf Cm ..
2199  *
2200  * PMD requires Ht != 0 to resolve handle type.
2201  * If Ht was masked (Hm != 0) DR5 action will be set according to Ht and will
2202  * not change. Otherwise, DR5 action will be resolved during flow rule build.
2203  * If Ct was masked (Cm != 0), table template processing updates base
2204  * indirect action configuration with Ct parameters.
2205  */
2206 static int
2207 table_template_translate_indirect_list(struct rte_eth_dev *dev,
2208 				       const struct rte_flow_action *action,
2209 				       const struct rte_flow_action *mask,
2210 				       struct mlx5_hw_actions *acts,
2211 				       uint16_t action_src, uint16_t action_dst)
2212 {
2213 	int ret = 0;
2214 	enum mlx5_indirect_list_type type;
2215 	const struct rte_flow_action_indirect_list *list_conf = action->conf;
2216 
2217 	if (!list_conf || !list_conf->handle)
2218 		return -EINVAL;
2219 	type = mlx5_get_indirect_list_type(list_conf->handle);
2220 	switch (type) {
2221 	case MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY:
2222 		ret = hws_table_tmpl_translate_indirect_legacy(dev, action, mask,
2223 							       acts, action_src,
2224 							       action_dst);
2225 		break;
2226 	case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
2227 		ret = hws_table_tmpl_translate_indirect_mirror(dev, action, mask,
2228 							       acts, action_src,
2229 							       action_dst);
2230 		break;
2231 	case MLX5_INDIRECT_ACTION_LIST_TYPE_REFORMAT:
2232 		if (list_conf->conf)
2233 			return -EINVAL;
2234 		ret = hws_table_tmpl_translate_indirect_reformat(dev, action, mask,
2235 								 acts, action_src,
2236 								 action_dst);
2237 		break;
2238 	default:
2239 		return -EINVAL;
2240 	}
2241 	return ret;
2242 }
2243 
2244 static void
2245 mlx5_set_reformat_header(struct mlx5dr_action_reformat_header *hdr,
2246 			 uint8_t *encap_data,
2247 			 size_t data_size)
2248 {
2249 	hdr->sz = data_size;
2250 	hdr->data = encap_data;
2251 }
2252 
2253 static int
2254 mlx5_tbl_translate_reformat(struct mlx5_priv *priv,
2255 			    struct mlx5_hw_actions *acts,
2256 			    struct rte_flow_actions_template *at,
2257 			    const struct rte_flow_item *enc_item,
2258 			    const struct rte_flow_item *enc_item_m,
2259 			    uint8_t *encap_data, uint8_t *encap_data_m,
2260 			    struct mlx5_tbl_multi_pattern_ctx *mp_ctx,
2261 			    size_t data_size, uint16_t reformat_src,
2262 			    enum mlx5dr_action_type refmt_type,
2263 			    struct rte_flow_error *error)
2264 {
2265 	int mp_reformat_ix = mlx5_multi_pattern_reformat_to_index(refmt_type);
2266 	struct mlx5dr_action_reformat_header hdr;
2267 	uint8_t buf[MLX5_ENCAP_MAX_LEN];
2268 	bool shared_rfmt = false;
2269 	int ret;
2270 
2271 	MLX5_ASSERT(at->reformat_off != UINT16_MAX);
2272 	if (enc_item) {
2273 		MLX5_ASSERT(!encap_data);
2274 		ret = flow_dv_convert_encap_data(enc_item, buf, &data_size, error);
2275 		if (ret)
2276 			return ret;
2277 		encap_data = buf;
2278 		if (enc_item_m)
2279 			shared_rfmt = true;
2280 	} else if (encap_data && encap_data_m) {
2281 		shared_rfmt = true;
2282 	}
2283 	acts->encap_decap = mlx5_malloc(MLX5_MEM_ZERO,
2284 					sizeof(*acts->encap_decap) + data_size,
2285 					0, SOCKET_ID_ANY);
2286 	if (!acts->encap_decap)
2287 		return rte_flow_error_set(error, ENOMEM,
2288 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2289 					  NULL, "no memory for reformat context");
2290 	acts->encap_decap_pos = at->reformat_off;
2291 	acts->encap_decap->data_size = data_size;
2292 	acts->encap_decap->action_type = refmt_type;
2293 	if (shared_rfmt || mp_reformat_ix < 0) {
2294 		uint16_t reformat_ix = at->reformat_off;
2295 		/*
2296 		 * This copy is only needed in non template mode.
2297 		 * In order to create the action later.
2298 		 */
2299 		memcpy(acts->encap_decap->data, encap_data, data_size);
2300 		acts->rule_acts[reformat_ix].reformat.data = acts->encap_decap->data;
2301 		acts->rule_acts[reformat_ix].reformat.offset = 0;
2302 		acts->encap_decap->shared = true;
2303 	} else {
2304 		uint32_t ix;
2305 		typeof(mp_ctx->reformat[0]) *reformat = mp_ctx->reformat +
2306 							mp_reformat_ix;
2307 		mlx5_set_reformat_header(&hdr, encap_data, data_size);
2308 		ix = reformat->elements_num++;
2309 		reformat->reformat_hdr[ix] = hdr;
2310 		acts->rule_acts[at->reformat_off].reformat.hdr_idx = ix;
2311 		acts->encap_decap->multi_pattern = 1;
2312 		ret = __flow_hw_act_data_encap_append
2313 			(priv, acts, (at->actions + reformat_src)->type,
2314 			 reformat_src, at->reformat_off, data_size);
2315 		if (ret)
2316 			return -rte_errno;
2317 		mlx5_multi_pattern_activate(mp_ctx);
2318 	}
2319 	return 0;
2320 }
2321 
2322 static int
2323 mlx5_tbl_create_reformat_action(struct mlx5_priv *priv,
2324 				const struct rte_flow_template_table_attr *table_attr,
2325 				struct mlx5_hw_actions *acts,
2326 				struct rte_flow_actions_template *at,
2327 				uint8_t *encap_data,
2328 				size_t data_size,
2329 				enum mlx5dr_action_type refmt_type)
2330 {
2331 	const struct rte_flow_attr *attr = &table_attr->flow_attr;
2332 	enum mlx5dr_table_type tbl_type = get_mlx5dr_table_type(attr);
2333 	struct mlx5dr_action_reformat_header hdr;
2334 
2335 	mlx5_set_reformat_header(&hdr, encap_data, data_size);
2336 	uint16_t reformat_ix = at->reformat_off;
2337 	uint32_t flags = mlx5_hw_act_flag[!!attr->group][tbl_type] |
2338 				MLX5DR_ACTION_FLAG_SHARED;
2339 
2340 	acts->encap_decap->action = mlx5dr_action_create_reformat(priv->dr_ctx, refmt_type,
2341 							   1, &hdr, 0, flags);
2342 	if (!acts->encap_decap->action)
2343 		return -rte_errno;
2344 	acts->rule_acts[reformat_ix].action = acts->encap_decap->action;
2345 	return 0;
2346 }
2347 
2348 static int
2349 mlx5_tbl_translate_modify_header(struct rte_eth_dev *dev,
2350 				 const struct mlx5_flow_template_table_cfg *cfg,
2351 				 struct mlx5_hw_actions *acts,
2352 				 struct mlx5_tbl_multi_pattern_ctx *mp_ctx,
2353 				 struct mlx5_hw_modify_header_action *mhdr,
2354 				 struct rte_flow_error *error)
2355 {
2356 	uint16_t mhdr_ix = mhdr->pos;
2357 	struct mlx5dr_action_mh_pattern pattern = {
2358 		.sz = sizeof(struct mlx5_modification_cmd) * mhdr->mhdr_cmds_num
2359 	};
2360 
2361 	if (flow_hw_validate_compiled_modify_field(dev, cfg, mhdr, error)) {
2362 		__flow_hw_action_template_destroy(dev, acts);
2363 		return -rte_errno;
2364 	}
2365 	acts->mhdr = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*acts->mhdr),
2366 				 0, SOCKET_ID_ANY);
2367 	if (!acts->mhdr)
2368 		return rte_flow_error_set(error, ENOMEM,
2369 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2370 					  NULL, "translate modify_header: no memory for modify header context");
2371 	rte_memcpy(acts->mhdr, mhdr, sizeof(*mhdr));
2372 	if (!mhdr->shared) {
2373 		pattern.data = (__be64 *)acts->mhdr->mhdr_cmds;
2374 		typeof(mp_ctx->mh) *mh = &mp_ctx->mh;
2375 		uint32_t idx = mh->elements_num;
2376 		mh->pattern[mh->elements_num++] = pattern;
2377 		acts->mhdr->multi_pattern = 1;
2378 		acts->rule_acts[mhdr_ix].modify_header.pattern_idx = idx;
2379 		mlx5_multi_pattern_activate(mp_ctx);
2380 	}
2381 	return 0;
2382 }
2383 
2384 static int
2385 mlx5_tbl_ensure_shared_modify_header(struct rte_eth_dev *dev,
2386 				     const struct mlx5_flow_template_table_cfg *cfg,
2387 				     struct mlx5_hw_actions *acts,
2388 				     struct rte_flow_error *error)
2389 {
2390 	struct mlx5_priv *priv = dev->data->dev_private;
2391 	const struct rte_flow_template_table_attr *table_attr = &cfg->attr;
2392 	const struct rte_flow_attr *attr = &table_attr->flow_attr;
2393 	enum mlx5dr_table_type tbl_type = get_mlx5dr_table_type(attr);
2394 	struct mlx5dr_action_mh_pattern pattern = {
2395 		.sz = sizeof(struct mlx5_modification_cmd) * acts->mhdr->mhdr_cmds_num
2396 	};
2397 	uint16_t mhdr_ix = acts->mhdr->pos;
2398 	uint32_t flags = mlx5_hw_act_flag[!!attr->group][tbl_type] | MLX5DR_ACTION_FLAG_SHARED;
2399 
2400 	pattern.data = (__be64 *)acts->mhdr->mhdr_cmds;
2401 	acts->mhdr->action = mlx5dr_action_create_modify_header(priv->dr_ctx, 1,
2402 								&pattern, 0, flags);
2403 	if (!acts->mhdr->action)
2404 		return rte_flow_error_set(error, rte_errno,
2405 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2406 					  "translate modify_header: failed to create DR action");
2407 	acts->rule_acts[mhdr_ix].action = acts->mhdr->action;
2408 	return 0;
2409 }
2410 
2411 static int
2412 mlx5_create_ipv6_ext_reformat(struct rte_eth_dev *dev,
2413 			      const struct mlx5_flow_template_table_cfg *cfg,
2414 			      struct mlx5_hw_actions *acts,
2415 			      struct rte_flow_actions_template *at,
2416 			      uint8_t *push_data, uint8_t *push_data_m,
2417 			      size_t push_size, uint16_t recom_src,
2418 			      enum mlx5dr_action_type recom_type)
2419 {
2420 	struct mlx5_priv *priv = dev->data->dev_private;
2421 	const struct rte_flow_template_table_attr *table_attr = &cfg->attr;
2422 	const struct rte_flow_attr *attr = &table_attr->flow_attr;
2423 	enum mlx5dr_table_type type = get_mlx5dr_table_type(attr);
2424 	struct mlx5_action_construct_data *act_data;
2425 	struct mlx5dr_action_reformat_header hdr = {0};
2426 	uint32_t flag, bulk = 0;
2427 
2428 	flag = mlx5_hw_act_flag[!!attr->group][type];
2429 	acts->push_remove = mlx5_malloc(MLX5_MEM_ZERO,
2430 					sizeof(*acts->push_remove) + push_size,
2431 					0, SOCKET_ID_ANY);
2432 	if (!acts->push_remove)
2433 		return -ENOMEM;
2434 
2435 	switch (recom_type) {
2436 	case MLX5DR_ACTION_TYP_PUSH_IPV6_ROUTE_EXT:
2437 		if (!push_data || !push_size)
2438 			goto err1;
2439 		if (!push_data_m) {
2440 			bulk = rte_log2_u32(table_attr->nb_flows);
2441 		} else {
2442 			flag |= MLX5DR_ACTION_FLAG_SHARED;
2443 			acts->push_remove->shared = 1;
2444 		}
2445 		acts->push_remove->data_size = push_size;
2446 		memcpy(acts->push_remove->data, push_data, push_size);
2447 		hdr.data = push_data;
2448 		hdr.sz = push_size;
2449 		break;
2450 	case MLX5DR_ACTION_TYP_POP_IPV6_ROUTE_EXT:
2451 		flag |= MLX5DR_ACTION_FLAG_SHARED;
2452 		acts->push_remove->shared = 1;
2453 		break;
2454 	default:
2455 		break;
2456 	}
2457 
2458 	acts->push_remove->action =
2459 		mlx5dr_action_create_reformat_ipv6_ext(priv->dr_ctx,
2460 				recom_type, &hdr, bulk, flag);
2461 	if (!acts->push_remove->action)
2462 		goto err1;
2463 	acts->rule_acts[at->recom_off].action = acts->push_remove->action;
2464 	acts->rule_acts[at->recom_off].ipv6_ext.header = acts->push_remove->data;
2465 	acts->rule_acts[at->recom_off].ipv6_ext.offset = 0;
2466 	acts->push_remove_pos = at->recom_off;
2467 	if (!acts->push_remove->shared) {
2468 		act_data = __flow_hw_act_data_push_append(dev, acts,
2469 				RTE_FLOW_ACTION_TYPE_IPV6_EXT_PUSH,
2470 				recom_src, at->recom_off, push_size);
2471 		if (!act_data)
2472 			goto err;
2473 	}
2474 	return 0;
2475 err:
2476 	if (acts->push_remove->action)
2477 		mlx5dr_action_destroy(acts->push_remove->action);
2478 err1:
2479 	if (acts->push_remove) {
2480 		mlx5_free(acts->push_remove);
2481 		acts->push_remove = NULL;
2482 	}
2483 	return -EINVAL;
2484 }
2485 
2486 /**
2487  * Translate rte_flow actions to DR action.
2488  *
2489  * As the action template has already indicated the actions. Translate
2490  * the rte_flow actions to DR action if possbile. So in flow create
2491  * stage we will save cycles from handing the actions' organizing.
2492  * For the actions with limited information, need to add these to a
2493  * list.
2494  *
2495  * @param[in] dev
2496  *   Pointer to the rte_eth_dev structure.
2497  * @param[in] cfg
2498  *   Pointer to the table configuration.
2499  * @param[in/out] acts
2500  *   Pointer to the template HW steering DR actions.
2501  * @param[in] at
2502  *   Action template.
2503  * @param[in] nt_mode
2504  *   Non template rule translate.
2505  * @param[out] error
2506  *   Pointer to error structure.
2507  *
2508  * @return
2509  *   0 on success, a negative errno otherwise and rte_errno is set.
2510  */
2511 static int
2512 __flow_hw_translate_actions_template(struct rte_eth_dev *dev,
2513 				     const struct mlx5_flow_template_table_cfg *cfg,
2514 				     struct mlx5_hw_actions *acts,
2515 				     struct rte_flow_actions_template *at,
2516 				     struct mlx5_tbl_multi_pattern_ctx *mp_ctx,
2517 				     bool nt_mode,
2518 				     struct rte_flow_error *error)
2519 {
2520 	struct mlx5_priv *priv = dev->data->dev_private;
2521 	const struct rte_flow_template_table_attr *table_attr = &cfg->attr;
2522 	struct mlx5_hca_flex_attr *hca_attr = &priv->sh->cdev->config.hca_attr.flex;
2523 	const struct rte_flow_attr *attr = &table_attr->flow_attr;
2524 	struct rte_flow_action *actions = at->actions;
2525 	struct rte_flow_action *masks = at->masks;
2526 	enum mlx5dr_action_type refmt_type = MLX5DR_ACTION_TYP_LAST;
2527 	enum mlx5dr_action_type recom_type = MLX5DR_ACTION_TYP_LAST;
2528 	const struct rte_flow_action_raw_encap *raw_encap_data;
2529 	const struct rte_flow_action_ipv6_ext_push *ipv6_ext_data;
2530 	const struct rte_flow_item *enc_item = NULL, *enc_item_m = NULL;
2531 	uint16_t reformat_src = 0, recom_src = 0;
2532 	uint8_t *encap_data = NULL, *encap_data_m = NULL;
2533 	uint8_t *push_data = NULL, *push_data_m = NULL;
2534 	size_t data_size = 0, push_size = 0;
2535 	struct mlx5_hw_modify_header_action mhdr = { 0 };
2536 	bool actions_end = false;
2537 	uint32_t type;
2538 	bool reformat_used = false;
2539 	bool recom_used = false;
2540 	unsigned int of_vlan_offset;
2541 	uint16_t jump_pos;
2542 	uint32_t ct_idx;
2543 	int ret, err;
2544 	uint32_t target_grp = 0;
2545 	int table_type;
2546 
2547 	flow_hw_modify_field_init(&mhdr, at);
2548 	if (attr->transfer)
2549 		type = MLX5DR_TABLE_TYPE_FDB;
2550 	else if (attr->egress)
2551 		type = MLX5DR_TABLE_TYPE_NIC_TX;
2552 	else
2553 		type = MLX5DR_TABLE_TYPE_NIC_RX;
2554 	for (; !actions_end; actions++, masks++) {
2555 		uint64_t pos = actions - at->actions;
2556 		uint16_t src_pos = pos - at->src_off[pos];
2557 		uint16_t dr_pos = at->dr_off[pos];
2558 
2559 		switch ((int)actions->type) {
2560 		case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
2561 			if (!attr->group) {
2562 				DRV_LOG(ERR, "Indirect action is not supported in root table.");
2563 				goto err;
2564 			}
2565 			ret = table_template_translate_indirect_list
2566 				(dev, actions, masks, acts, src_pos, dr_pos);
2567 			if (ret)
2568 				goto err;
2569 			break;
2570 		case RTE_FLOW_ACTION_TYPE_INDIRECT:
2571 			if (!attr->group) {
2572 				DRV_LOG(ERR, "Indirect action is not supported in root table.");
2573 				goto err;
2574 			}
2575 			if (actions->conf && masks->conf) {
2576 				if (flow_hw_shared_action_translate
2577 				(dev, actions, acts, src_pos, dr_pos))
2578 					goto err;
2579 			} else if (__flow_hw_act_data_indirect_append
2580 					(priv, acts, RTE_FLOW_ACTION_TYPE_INDIRECT,
2581 					 masks->type, src_pos, dr_pos)){
2582 				goto err;
2583 			}
2584 			break;
2585 		case RTE_FLOW_ACTION_TYPE_VOID:
2586 			break;
2587 		case RTE_FLOW_ACTION_TYPE_DROP:
2588 			acts->rule_acts[dr_pos].action =
2589 				priv->hw_drop[!!attr->group];
2590 			break;
2591 		case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
2592 			if (!attr->group) {
2593 				DRV_LOG(ERR, "Port representor is not supported in root table.");
2594 				goto err;
2595 			}
2596 			acts->rule_acts[dr_pos].action = priv->hw_def_miss;
2597 			break;
2598 		case RTE_FLOW_ACTION_TYPE_FLAG:
2599 			acts->mark = true;
2600 			acts->rule_acts[dr_pos].tag.value =
2601 				mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
2602 			acts->rule_acts[dr_pos].action =
2603 				priv->hw_tag[!!attr->group];
2604 			rte_atomic_fetch_add_explicit(&priv->hws_mark_refcnt, 1,
2605 					rte_memory_order_relaxed);
2606 			flow_hw_rxq_flag_set(dev, true);
2607 			break;
2608 		case RTE_FLOW_ACTION_TYPE_MARK:
2609 			acts->mark = true;
2610 			if (masks->conf &&
2611 			    ((const struct rte_flow_action_mark *)
2612 			     masks->conf)->id)
2613 				acts->rule_acts[dr_pos].tag.value =
2614 					mlx5_flow_mark_set
2615 					(((const struct rte_flow_action_mark *)
2616 					(actions->conf))->id);
2617 			else if (__flow_hw_act_data_general_append(priv, acts,
2618 								   actions->type,
2619 								   src_pos, dr_pos))
2620 				goto err;
2621 			acts->rule_acts[dr_pos].action =
2622 				priv->hw_tag[!!attr->group];
2623 			rte_atomic_fetch_add_explicit(&priv->hws_mark_refcnt, 1,
2624 					rte_memory_order_relaxed);
2625 			flow_hw_rxq_flag_set(dev, true);
2626 			break;
2627 		case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
2628 			acts->rule_acts[dr_pos].action =
2629 				priv->hw_push_vlan[type];
2630 			if (is_template_masked_push_vlan(masks->conf))
2631 				acts->rule_acts[dr_pos].push_vlan.vlan_hdr =
2632 					vlan_hdr_to_be32(actions);
2633 			else if (__flow_hw_act_data_general_append
2634 					(priv, acts, actions->type,
2635 					 src_pos, dr_pos))
2636 				goto err;
2637 			of_vlan_offset = is_of_vlan_pcp_present(actions) ?
2638 					MLX5_HW_VLAN_PUSH_PCP_IDX :
2639 					MLX5_HW_VLAN_PUSH_VID_IDX;
2640 			actions += of_vlan_offset;
2641 			masks += of_vlan_offset;
2642 			break;
2643 		case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
2644 			acts->rule_acts[dr_pos].action =
2645 				priv->hw_pop_vlan[type];
2646 			break;
2647 		case RTE_FLOW_ACTION_TYPE_JUMP:
2648 			if (masks->conf &&
2649 			    ((const struct rte_flow_action_jump *)
2650 			     masks->conf)->group) {
2651 				uint32_t jump_group =
2652 					((const struct rte_flow_action_jump *)
2653 					actions->conf)->group;
2654 				acts->jump = flow_hw_jump_action_register
2655 						(dev, cfg, jump_group, error);
2656 				if (!acts->jump)
2657 					goto err;
2658 				acts->rule_acts[dr_pos].action = (!!attr->group) ?
2659 								 acts->jump->hws_action :
2660 								 acts->jump->root_action;
2661 			} else if (__flow_hw_act_data_general_append
2662 					(priv, acts, actions->type,
2663 					 src_pos, dr_pos)){
2664 				goto err;
2665 			}
2666 			break;
2667 		case RTE_FLOW_ACTION_TYPE_QUEUE:
2668 			if (masks->conf &&
2669 			    ((const struct rte_flow_action_queue *)
2670 			     masks->conf)->index) {
2671 				acts->tir = flow_hw_tir_action_register
2672 				(dev, mlx5_hw_act_flag[!!attr->group][type],
2673 				 actions);
2674 				if (!acts->tir)
2675 					goto err;
2676 				acts->rule_acts[dr_pos].action =
2677 					acts->tir->action;
2678 			} else if (__flow_hw_act_data_general_append
2679 					(priv, acts, actions->type,
2680 					 src_pos, dr_pos)) {
2681 				goto err;
2682 			}
2683 			break;
2684 		case RTE_FLOW_ACTION_TYPE_RSS:
2685 			if (actions->conf && masks->conf) {
2686 				acts->tir = flow_hw_tir_action_register
2687 				(dev, mlx5_hw_act_flag[!!attr->group][type],
2688 				 actions);
2689 				if (!acts->tir)
2690 					goto err;
2691 				acts->rule_acts[dr_pos].action =
2692 					acts->tir->action;
2693 			} else if (__flow_hw_act_data_general_append
2694 					(priv, acts, actions->type,
2695 					 src_pos, dr_pos)) {
2696 				goto err;
2697 			}
2698 			break;
2699 		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
2700 			MLX5_ASSERT(!reformat_used);
2701 			enc_item = MLX5_CONST_ENCAP_ITEM(rte_flow_action_vxlan_encap,
2702 							 actions->conf);
2703 			if (masks->conf)
2704 				enc_item_m = MLX5_CONST_ENCAP_ITEM(rte_flow_action_vxlan_encap,
2705 								   masks->conf);
2706 			reformat_used = true;
2707 			reformat_src = src_pos;
2708 			refmt_type = MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
2709 			break;
2710 		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
2711 			MLX5_ASSERT(!reformat_used);
2712 			enc_item = MLX5_CONST_ENCAP_ITEM(rte_flow_action_nvgre_encap,
2713 							 actions->conf);
2714 			if (masks->conf)
2715 				enc_item_m = MLX5_CONST_ENCAP_ITEM(rte_flow_action_nvgre_encap,
2716 								   masks->conf);
2717 			reformat_used = true;
2718 			reformat_src = src_pos;
2719 			refmt_type = MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
2720 			break;
2721 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
2722 			raw_encap_data =
2723 				(const struct rte_flow_action_raw_encap *)
2724 				 masks->conf;
2725 			if (raw_encap_data)
2726 				encap_data_m = raw_encap_data->data;
2727 			raw_encap_data =
2728 				(const struct rte_flow_action_raw_encap *)
2729 				 actions->conf;
2730 			encap_data = raw_encap_data->data;
2731 			data_size = raw_encap_data->size;
2732 			if (reformat_used) {
2733 				refmt_type = data_size <
2734 				MLX5_ENCAPSULATION_DECISION_SIZE ?
2735 				MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2 :
2736 				MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3;
2737 			} else {
2738 				reformat_used = true;
2739 				refmt_type =
2740 				MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
2741 			}
2742 			reformat_src = src_pos;
2743 			break;
2744 		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
2745 		case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
2746 			MLX5_ASSERT(!reformat_used);
2747 			reformat_used = true;
2748 			refmt_type = MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2;
2749 			break;
2750 		case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
2751 			reformat_used = true;
2752 			refmt_type = MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2;
2753 			break;
2754 		case RTE_FLOW_ACTION_TYPE_IPV6_EXT_PUSH:
2755 			if (!hca_attr->query_match_sample_info || !hca_attr->parse_graph_anchor ||
2756 			    !priv->sh->srh_flex_parser.flex.mapnum) {
2757 				DRV_LOG(ERR, "SRv6 anchor is not supported.");
2758 				goto err;
2759 			}
2760 			MLX5_ASSERT(!recom_used && !recom_type);
2761 			recom_used = true;
2762 			recom_type = MLX5DR_ACTION_TYP_PUSH_IPV6_ROUTE_EXT;
2763 			ipv6_ext_data =
2764 				(const struct rte_flow_action_ipv6_ext_push *)masks->conf;
2765 			if (ipv6_ext_data)
2766 				push_data_m = ipv6_ext_data->data;
2767 			ipv6_ext_data =
2768 				(const struct rte_flow_action_ipv6_ext_push *)actions->conf;
2769 			if (ipv6_ext_data) {
2770 				push_data = ipv6_ext_data->data;
2771 				push_size = ipv6_ext_data->size;
2772 			}
2773 			recom_src = src_pos;
2774 			break;
2775 		case RTE_FLOW_ACTION_TYPE_IPV6_EXT_REMOVE:
2776 			if (!hca_attr->query_match_sample_info || !hca_attr->parse_graph_anchor ||
2777 			    !priv->sh->srh_flex_parser.flex.mapnum) {
2778 				DRV_LOG(ERR, "SRv6 anchor is not supported.");
2779 				goto err;
2780 			}
2781 			recom_used = true;
2782 			recom_type = MLX5DR_ACTION_TYP_POP_IPV6_ROUTE_EXT;
2783 			break;
2784 		case RTE_FLOW_ACTION_TYPE_SEND_TO_KERNEL:
2785 			ret = flow_hw_translate_group(dev, cfg, attr->group,
2786 						&target_grp, error);
2787 			if (ret)
2788 				return ret;
2789 			if (target_grp == 0) {
2790 				__flow_hw_action_template_destroy(dev, acts);
2791 				return rte_flow_error_set(error, ENOTSUP,
2792 						RTE_FLOW_ERROR_TYPE_ACTION,
2793 						NULL,
2794 						"Send to kernel action on root table is not supported in HW steering mode");
2795 			}
2796 			table_type = attr->ingress ? MLX5DR_TABLE_TYPE_NIC_RX :
2797 				     ((attr->egress) ? MLX5DR_TABLE_TYPE_NIC_TX :
2798 				      MLX5DR_TABLE_TYPE_FDB);
2799 			acts->rule_acts[dr_pos].action = priv->hw_send_to_kernel[table_type];
2800 			break;
2801 		case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
2802 			err = flow_hw_modify_field_compile(dev, attr, actions,
2803 							   masks, acts, &mhdr,
2804 							   src_pos, error);
2805 			if (err)
2806 				goto err;
2807 			break;
2808 		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
2809 			if (flow_hw_represented_port_compile
2810 					(dev, attr, actions,
2811 					 masks, acts, src_pos, dr_pos, error))
2812 				goto err;
2813 			break;
2814 		case RTE_FLOW_ACTION_TYPE_METER:
2815 			/*
2816 			 * METER action is compiled to 2 DR actions - ASO_METER and FT.
2817 			 * Calculated DR offset is stored only for ASO_METER and FT
2818 			 * is assumed to be the next action.
2819 			 */
2820 			jump_pos = dr_pos + 1;
2821 			if (actions->conf && masks->conf &&
2822 			    ((const struct rte_flow_action_meter *)
2823 			     masks->conf)->mtr_id) {
2824 				err = flow_hw_meter_compile(dev, cfg,
2825 							    dr_pos, jump_pos, actions, acts, error);
2826 				if (err)
2827 					goto err;
2828 			} else if (__flow_hw_act_data_general_append(priv, acts,
2829 								     actions->type,
2830 								     src_pos,
2831 								     dr_pos))
2832 				goto err;
2833 			break;
2834 		case RTE_FLOW_ACTION_TYPE_AGE:
2835 			ret = flow_hw_translate_group(dev, cfg, attr->group,
2836 						&target_grp, error);
2837 			if (ret)
2838 				return ret;
2839 			if (target_grp == 0) {
2840 				__flow_hw_action_template_destroy(dev, acts);
2841 				return rte_flow_error_set(error, ENOTSUP,
2842 						RTE_FLOW_ERROR_TYPE_ACTION,
2843 						NULL,
2844 						"Age action on root table is not supported in HW steering mode");
2845 			}
2846 			if (__flow_hw_act_data_general_append(priv, acts,
2847 							      actions->type,
2848 							      src_pos,
2849 							      dr_pos))
2850 				goto err;
2851 			break;
2852 		case RTE_FLOW_ACTION_TYPE_COUNT:
2853 			ret = flow_hw_translate_group(dev, cfg, attr->group,
2854 						&target_grp, error);
2855 			if (ret)
2856 				return ret;
2857 			if (target_grp == 0) {
2858 				__flow_hw_action_template_destroy(dev, acts);
2859 				return rte_flow_error_set(error, ENOTSUP,
2860 						RTE_FLOW_ERROR_TYPE_ACTION,
2861 						NULL,
2862 						"Counter action on root table is not supported in HW steering mode");
2863 			}
2864 			if ((at->action_flags & MLX5_FLOW_ACTION_AGE) ||
2865 			    (at->action_flags & MLX5_FLOW_ACTION_INDIRECT_AGE))
2866 				/*
2867 				 * When both COUNT and AGE are requested, it is
2868 				 * saved as AGE action which creates also the
2869 				 * counter.
2870 				 */
2871 				break;
2872 			if (masks->conf &&
2873 			    ((const struct rte_flow_action_count *)
2874 			     masks->conf)->id) {
2875 				err = flow_hw_cnt_compile(dev, dr_pos, acts);
2876 				if (err)
2877 					goto err;
2878 			} else if (__flow_hw_act_data_general_append
2879 					(priv, acts, actions->type,
2880 					 src_pos, dr_pos)) {
2881 				goto err;
2882 			}
2883 			break;
2884 		case RTE_FLOW_ACTION_TYPE_CONNTRACK:
2885 			if (masks->conf) {
2886 				ct_idx = MLX5_INDIRECT_ACTION_IDX_GET(actions->conf);
2887 				if (flow_hw_ct_compile(dev, MLX5_HW_INV_QUEUE, ct_idx,
2888 						       &acts->rule_acts[dr_pos]))
2889 					goto err;
2890 			} else if (__flow_hw_act_data_general_append
2891 					(priv, acts, actions->type,
2892 					 src_pos, dr_pos)) {
2893 				goto err;
2894 			}
2895 			break;
2896 		case RTE_FLOW_ACTION_TYPE_METER_MARK:
2897 			if (actions->conf && masks->conf &&
2898 			    ((const struct rte_flow_action_meter_mark *)
2899 			     masks->conf)->profile) {
2900 				err = flow_hw_meter_mark_compile(dev,
2901 								 dr_pos, actions,
2902 								 acts->rule_acts,
2903 								 &acts->mtr_id,
2904 								 MLX5_HW_INV_QUEUE,
2905 								 error);
2906 				if (err)
2907 					goto err;
2908 			} else if (__flow_hw_act_data_general_append(priv, acts,
2909 								     actions->type,
2910 								     src_pos,
2911 								     dr_pos))
2912 				goto err;
2913 			break;
2914 		case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
2915 			/* Internal, can be skipped. */
2916 			if (!!attr->group) {
2917 				DRV_LOG(ERR, "DEFAULT MISS action is only"
2918 					" supported in root table.");
2919 				goto err;
2920 			}
2921 			acts->rule_acts[dr_pos].action = priv->hw_def_miss;
2922 			break;
2923 		case RTE_FLOW_ACTION_TYPE_NAT64:
2924 			if (masks->conf &&
2925 			    ((const struct rte_flow_action_nat64 *)masks->conf)->type) {
2926 				const struct rte_flow_action_nat64 *nat64_c =
2927 					(const struct rte_flow_action_nat64 *)actions->conf;
2928 
2929 				acts->rule_acts[dr_pos].action =
2930 					priv->action_nat64[type][nat64_c->type];
2931 			} else if (__flow_hw_act_data_general_append(priv, acts,
2932 								     actions->type,
2933 								     src_pos, dr_pos))
2934 				goto err;
2935 			break;
2936 		case RTE_FLOW_ACTION_TYPE_JUMP_TO_TABLE_INDEX:
2937 			if (masks->conf &&
2938 			    ((const struct rte_flow_action_jump_to_table_index *)
2939 			     masks->conf)->table) {
2940 				struct rte_flow_template_table *jump_table =
2941 					((const struct rte_flow_action_jump_to_table_index *)
2942 					actions->conf)->table;
2943 				acts->rule_acts[dr_pos].jump_to_matcher.offset =
2944 					((const struct rte_flow_action_jump_to_table_index *)
2945 					actions->conf)->index;
2946 				if (likely(!rte_flow_template_table_resizable(dev->data->port_id,
2947 									&jump_table->cfg.attr))) {
2948 					acts->rule_acts[dr_pos].action =
2949 						jump_table->matcher_info[0].jump;
2950 				} else {
2951 					uint32_t selector;
2952 					rte_rwlock_read_lock(&jump_table->matcher_replace_rwlk);
2953 					selector = jump_table->matcher_selector;
2954 					acts->rule_acts[dr_pos].action =
2955 						jump_table->matcher_info[selector].jump;
2956 					rte_rwlock_read_unlock(&jump_table->matcher_replace_rwlk);
2957 				}
2958 			} else if (__flow_hw_act_data_general_append
2959 					(priv, acts, actions->type,
2960 					 src_pos, dr_pos)){
2961 				goto err;
2962 			}
2963 			break;
2964 		case RTE_FLOW_ACTION_TYPE_END:
2965 			actions_end = true;
2966 			break;
2967 		default:
2968 			break;
2969 		}
2970 	}
2971 	if (mhdr.pos != UINT16_MAX) {
2972 		ret = mlx5_tbl_translate_modify_header(dev, cfg, acts, mp_ctx, &mhdr, error);
2973 		if (ret)
2974 			goto err;
2975 		if (!nt_mode && mhdr.shared) {
2976 			ret = mlx5_tbl_ensure_shared_modify_header(dev, cfg, acts, error);
2977 			if (ret)
2978 				goto err;
2979 		}
2980 	}
2981 	if (reformat_used) {
2982 		ret = mlx5_tbl_translate_reformat(priv, acts, at,
2983 						  enc_item, enc_item_m,
2984 						  encap_data, encap_data_m,
2985 						  mp_ctx, data_size,
2986 						  reformat_src,
2987 						  refmt_type, error);
2988 		if (ret)
2989 			goto err;
2990 		if (!nt_mode && acts->encap_decap->shared) {
2991 			ret = mlx5_tbl_create_reformat_action(priv, table_attr, acts, at,
2992 							      encap_data, data_size,
2993 							      refmt_type);
2994 			if (ret)
2995 				goto err;
2996 		}
2997 	}
2998 	if (recom_used) {
2999 		MLX5_ASSERT(at->recom_off != UINT16_MAX);
3000 		ret = mlx5_create_ipv6_ext_reformat(dev, cfg, acts, at, push_data,
3001 						    push_data_m, push_size, recom_src,
3002 						    recom_type);
3003 		if (ret)
3004 			goto err;
3005 	}
3006 	return 0;
3007 err:
3008 	/* If rte_errno was not initialized and reached error state. */
3009 	if (!rte_errno)
3010 		rte_errno = EINVAL;
3011 	err = rte_errno;
3012 	__flow_hw_action_template_destroy(dev, acts);
3013 	return rte_flow_error_set(error, err,
3014 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3015 				  "fail to create rte table");
3016 }
3017 
3018 /**
3019  * Translate rte_flow actions to DR action.
3020  *
3021  * As the action template has already indicated the actions. Translate
3022  * the rte_flow actions to DR action if possible. So in flow create
3023  * stage we will save cycles from handing the actions' organizing.
3024  * For the actions with limited information, need to add these to a
3025  * list.
3026  *
3027  * @param[in] dev
3028  *   Pointer to the rte_eth_dev structure.
3029  * @param[in] cfg
3030  *   Pointer to the table configuration.
3031  * @param[in/out] acts
3032  *   Pointer to the template HW steering DR actions.
3033  * @param[in] at
3034  *   Action template.
3035  * @param[out] error
3036  *   Pointer to error structure.
3037  *
3038  * @return
3039  *   0 on success, a negative errno otherwise and rte_errno is set.
3040  */
3041 static int
3042 flow_hw_translate_actions_template(struct rte_eth_dev *dev,
3043 			    const struct mlx5_flow_template_table_cfg *cfg,
3044 			    struct mlx5_hw_actions *acts,
3045 			    struct rte_flow_actions_template *at,
3046 			    struct mlx5_tbl_multi_pattern_ctx *mp_ctx,
3047 			    struct rte_flow_error *error)
3048 {
3049 	return __flow_hw_translate_actions_template(dev, cfg, acts, at, mp_ctx, false, error);
3050 }
3051 
3052 static __rte_always_inline struct mlx5dr_rule_action *
3053 flow_hw_get_dr_action_buffer(struct mlx5_priv *priv,
3054 			     struct rte_flow_template_table *table,
3055 			     uint8_t action_template_index,
3056 			     uint32_t queue)
3057 {
3058 	uint32_t offset = action_template_index * priv->nb_queue + queue;
3059 
3060 	return &table->rule_acts[offset].acts[0];
3061 }
3062 
3063 static void
3064 flow_hw_populate_rule_acts_caches(struct rte_eth_dev *dev,
3065 				  struct rte_flow_template_table *table,
3066 				  uint8_t at_idx)
3067 {
3068 	struct mlx5_priv *priv = dev->data->dev_private;
3069 	uint32_t q;
3070 
3071 	for (q = 0; q < priv->nb_queue; ++q) {
3072 		struct mlx5dr_rule_action *rule_acts =
3073 				flow_hw_get_dr_action_buffer(priv, table, at_idx, q);
3074 
3075 		rte_memcpy(rule_acts, table->ats[at_idx].acts.rule_acts,
3076 			   sizeof(table->ats[at_idx].acts.rule_acts));
3077 	}
3078 }
3079 
3080 /**
3081  * Translate rte_flow actions to DR action.
3082  *
3083  * @param[in] dev
3084  *   Pointer to the rte_eth_dev structure.
3085  * @param[in] tbl
3086  *   Pointer to the flow template table.
3087  * @param[out] error
3088  *   Pointer to error structure.
3089  *
3090  * @return
3091  *    0 on success, negative value otherwise and rte_errno is set.
3092  */
3093 static int
3094 flow_hw_translate_all_actions_templates(struct rte_eth_dev *dev,
3095 			  struct rte_flow_template_table *tbl,
3096 			  struct rte_flow_error *error)
3097 {
3098 	int ret;
3099 	uint32_t i;
3100 
3101 	for (i = 0; i < tbl->nb_action_templates; i++) {
3102 		if (flow_hw_translate_actions_template(dev, &tbl->cfg,
3103 						&tbl->ats[i].acts,
3104 						tbl->ats[i].action_template,
3105 						&tbl->mpctx, error))
3106 			goto err;
3107 		flow_hw_populate_rule_acts_caches(dev, tbl, i);
3108 	}
3109 	ret = mlx5_tbl_multi_pattern_process(dev, tbl, &tbl->mpctx.segments[0],
3110 					     rte_log2_u32(tbl->cfg.attr.nb_flows),
3111 					     error);
3112 	if (ret)
3113 		goto err;
3114 	return 0;
3115 err:
3116 	while (i--)
3117 		__flow_hw_action_template_destroy(dev, &tbl->ats[i].acts);
3118 	return -1;
3119 }
3120 
3121 /**
3122  * Get shared indirect action.
3123  *
3124  * @param[in] dev
3125  *   Pointer to the rte_eth_dev data structure.
3126  * @param[in] act_data
3127  *   Pointer to the recorded action construct data.
3128  * @param[in] item_flags
3129  *   The matcher itme_flags used for RSS lookup.
3130  * @param[in] rule_act
3131  *   Pointer to the shared action's destination rule DR action.
3132  *
3133  * @return
3134  *    0 on success, negative value otherwise and rte_errno is set.
3135  */
3136 static __rte_always_inline int
3137 flow_hw_shared_action_get(struct rte_eth_dev *dev,
3138 			  struct mlx5_action_construct_data *act_data,
3139 			  const uint64_t item_flags,
3140 			  struct mlx5dr_rule_action *rule_act)
3141 {
3142 	struct mlx5_priv *priv = dev->data->dev_private;
3143 	struct mlx5_flow_rss_desc rss_desc = { 0 };
3144 	uint64_t hash_fields = 0;
3145 	uint32_t hrxq_idx = 0;
3146 	struct mlx5_hrxq *hrxq = NULL;
3147 	int act_type = act_data->type;
3148 
3149 	switch (act_type) {
3150 	case MLX5_RTE_FLOW_ACTION_TYPE_RSS:
3151 		rss_desc.level = act_data->shared_rss.level;
3152 		rss_desc.types = act_data->shared_rss.types;
3153 		rss_desc.symmetric_hash_function = act_data->shared_rss.symmetric_hash_function;
3154 		flow_dv_hashfields_set(item_flags, &rss_desc, &hash_fields);
3155 		hrxq_idx = flow_dv_action_rss_hrxq_lookup
3156 			(dev, act_data->shared_rss.idx, hash_fields);
3157 		if (hrxq_idx)
3158 			hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
3159 					      hrxq_idx);
3160 		if (hrxq) {
3161 			rule_act->action = hrxq->action;
3162 			return 0;
3163 		}
3164 		break;
3165 	default:
3166 		DRV_LOG(WARNING, "Unsupported shared action type:%d",
3167 			act_data->type);
3168 		break;
3169 	}
3170 	return -1;
3171 }
3172 
3173 static void
3174 flow_hw_construct_quota(struct mlx5_priv *priv,
3175 			struct mlx5dr_rule_action *rule_act, uint32_t qid)
3176 {
3177 	rule_act->action = priv->quota_ctx.dr_action;
3178 	rule_act->aso_meter.offset = qid - 1;
3179 	rule_act->aso_meter.init_color =
3180 		MLX5DR_ACTION_ASO_METER_COLOR_GREEN;
3181 }
3182 
3183 /**
3184  * Construct shared indirect action.
3185  *
3186  * @param[in] dev
3187  *   Pointer to the rte_eth_dev data structure.
3188  * @param[in] queue
3189  *   The flow creation queue index.
3190  * @param[in] action
3191  *   Pointer to the shared indirect rte_flow action.
3192   * @param[in] table
3193  *   Pointer to the flow table.
3194  * @param[in] item_flags
3195  *   Item flags.
3196  * @param[in] action_flags
3197  *   Actions bit-map detected in this template.
3198  * @param[in, out] flow
3199  *   Pointer to the flow containing the counter.
3200  * @param[in] rule_act
3201  *   Pointer to the shared action's destination rule DR action.
3202  *
3203  * @return
3204  *    0 on success, negative value otherwise and rte_errno is set.
3205  */
3206 static __rte_always_inline int
3207 flow_hw_shared_action_construct(struct rte_eth_dev *dev, uint32_t queue,
3208 				const struct rte_flow_action *action,
3209 				struct rte_flow_template_table *table __rte_unused,
3210 				const uint64_t item_flags, uint64_t action_flags,
3211 				struct rte_flow_hw *flow,
3212 				struct mlx5dr_rule_action *rule_act)
3213 {
3214 	struct mlx5_priv *priv = dev->data->dev_private;
3215 	struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
3216 	struct mlx5_action_construct_data act_data;
3217 	struct mlx5_shared_action_rss *shared_rss;
3218 	struct mlx5_aso_mtr *aso_mtr;
3219 	struct mlx5_age_info *age_info;
3220 	struct mlx5_hws_age_param *param;
3221 	struct rte_flow_hw_aux *aux;
3222 	uint32_t act_idx = (uint32_t)(uintptr_t)action->conf;
3223 	uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
3224 	uint32_t idx = act_idx &
3225 		       ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
3226 	cnt_id_t age_cnt;
3227 
3228 	memset(&act_data, 0, sizeof(act_data));
3229 	switch (type) {
3230 	case MLX5_INDIRECT_ACTION_TYPE_RSS:
3231 		act_data.type = MLX5_RTE_FLOW_ACTION_TYPE_RSS;
3232 		shared_rss = mlx5_ipool_get
3233 			(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
3234 		if (!shared_rss)
3235 			return -1;
3236 		act_data.shared_rss.idx = idx;
3237 		act_data.shared_rss.level = shared_rss->origin.level;
3238 		act_data.shared_rss.types = !shared_rss->origin.types ?
3239 					    RTE_ETH_RSS_IP :
3240 					    shared_rss->origin.types;
3241 		act_data.shared_rss.symmetric_hash_function =
3242 			MLX5_RSS_IS_SYMM(shared_rss->origin.func);
3243 
3244 		if (flow_hw_shared_action_get
3245 				(dev, &act_data, item_flags, rule_act))
3246 			return -1;
3247 		break;
3248 	case MLX5_INDIRECT_ACTION_TYPE_COUNT:
3249 		if (mlx5_hws_cnt_pool_get_action_offset(priv->hws_cpool,
3250 				act_idx,
3251 				&rule_act->action,
3252 				&rule_act->counter.offset))
3253 			return -1;
3254 		flow->flags |= MLX5_FLOW_HW_FLOW_FLAG_CNT_ID;
3255 		flow->cnt_id = act_idx;
3256 		break;
3257 	case MLX5_INDIRECT_ACTION_TYPE_AGE:
3258 		aux = mlx5_flow_hw_aux(dev->data->port_id, flow);
3259 		/*
3260 		 * Save the index with the indirect type, to recognize
3261 		 * it in flow destroy.
3262 		 */
3263 		mlx5_flow_hw_aux_set_age_idx(flow, aux, act_idx);
3264 		flow->flags |= MLX5_FLOW_HW_FLOW_FLAG_AGE_IDX;
3265 		if (action_flags & MLX5_FLOW_ACTION_INDIRECT_COUNT)
3266 			/*
3267 			 * The mutual update for idirect AGE & COUNT will be
3268 			 * performed later after we have ID for both of them.
3269 			 */
3270 			break;
3271 		age_info = GET_PORT_AGE_INFO(priv);
3272 		param = mlx5_ipool_get(age_info->ages_ipool, idx);
3273 		if (param == NULL)
3274 			return -1;
3275 		if (action_flags & MLX5_FLOW_ACTION_COUNT) {
3276 			if (mlx5_hws_cnt_pool_get(priv->hws_cpool,
3277 						  &param->queue_id, &age_cnt,
3278 						  idx) < 0)
3279 				return -1;
3280 			flow->flags |= MLX5_FLOW_HW_FLOW_FLAG_CNT_ID;
3281 			flow->cnt_id = age_cnt;
3282 			param->nb_cnts++;
3283 		} else {
3284 			/*
3285 			 * Get the counter of this indirect AGE or create one
3286 			 * if doesn't exist.
3287 			 */
3288 			age_cnt = mlx5_hws_age_cnt_get(priv, param, idx);
3289 			if (age_cnt == 0)
3290 				return -1;
3291 		}
3292 		if (mlx5_hws_cnt_pool_get_action_offset(priv->hws_cpool,
3293 						     age_cnt, &rule_act->action,
3294 						     &rule_act->counter.offset))
3295 			return -1;
3296 		break;
3297 	case MLX5_INDIRECT_ACTION_TYPE_CT:
3298 		if (flow_hw_ct_compile(dev, queue, idx, rule_act))
3299 			return -1;
3300 		break;
3301 	case MLX5_INDIRECT_ACTION_TYPE_METER_MARK:
3302 		/* Find ASO object. */
3303 		aso_mtr = mlx5_ipool_get(pool->idx_pool, idx);
3304 		if (!aso_mtr)
3305 			return -1;
3306 		rule_act->action = pool->action;
3307 		rule_act->aso_meter.offset = aso_mtr->offset;
3308 		break;
3309 	case MLX5_INDIRECT_ACTION_TYPE_QUOTA:
3310 		flow_hw_construct_quota(priv, rule_act, idx);
3311 		break;
3312 	default:
3313 		DRV_LOG(WARNING, "Unsupported shared action type:%d", type);
3314 		break;
3315 	}
3316 	return 0;
3317 }
3318 
3319 static __rte_always_inline int
3320 flow_hw_mhdr_cmd_is_nop(const struct mlx5_modification_cmd *cmd)
3321 {
3322 	struct mlx5_modification_cmd cmd_he = {
3323 		.data0 = rte_be_to_cpu_32(cmd->data0),
3324 		.data1 = 0,
3325 	};
3326 
3327 	return cmd_he.action_type == MLX5_MODIFICATION_TYPE_NOP;
3328 }
3329 
3330 /**
3331  * Construct flow action array.
3332  *
3333  * For action template contains dynamic actions, these actions need to
3334  * be updated according to the rte_flow action during flow creation.
3335  *
3336  * @param[in] dev
3337  *   Pointer to the rte_eth_dev structure.
3338  * @param[in] job
3339  *   Pointer to job descriptor.
3340  * @param[in] hw_acts
3341  *   Pointer to translated actions from template.
3342  * @param[in] it_idx
3343  *   Item template index the action template refer to.
3344  * @param[in] actions
3345  *   Array of rte_flow action need to be checked.
3346  * @param[in] rule_acts
3347  *   Array of DR rule actions to be used during flow creation..
3348  * @param[in] acts_num
3349  *   Pointer to the real acts_num flow has.
3350  *
3351  * @return
3352  *    0 on success, negative value otherwise and rte_errno is set.
3353  */
3354 static __rte_always_inline int
3355 flow_hw_modify_field_construct(struct mlx5_modification_cmd *mhdr_cmd,
3356 			       struct mlx5_action_construct_data *act_data,
3357 			       const struct mlx5_hw_actions *hw_acts,
3358 			       const struct rte_flow_action *action)
3359 {
3360 	const struct rte_flow_action_modify_field *mhdr_action = action->conf;
3361 	uint8_t values[16] = { 0 };
3362 	unaligned_uint32_t *value_p;
3363 	uint32_t i;
3364 	struct field_modify_info *field;
3365 
3366 	if (!hw_acts->mhdr)
3367 		return -1;
3368 	if (hw_acts->mhdr->shared || act_data->modify_header.shared)
3369 		return 0;
3370 	MLX5_ASSERT(mhdr_action->operation == RTE_FLOW_MODIFY_SET ||
3371 		    mhdr_action->operation == RTE_FLOW_MODIFY_ADD);
3372 	if (mhdr_action->src.field != RTE_FLOW_FIELD_VALUE &&
3373 	    mhdr_action->src.field != RTE_FLOW_FIELD_POINTER)
3374 		return 0;
3375 	if (mhdr_action->src.field == RTE_FLOW_FIELD_VALUE)
3376 		rte_memcpy(values, &mhdr_action->src.value, sizeof(values));
3377 	else
3378 		rte_memcpy(values, mhdr_action->src.pvalue, sizeof(values));
3379 	if (mhdr_action->dst.field == RTE_FLOW_FIELD_META ||
3380 	    mhdr_action->dst.field == RTE_FLOW_FIELD_TAG ||
3381 	    mhdr_action->dst.field == RTE_FLOW_FIELD_METER_COLOR ||
3382 	    mhdr_action->dst.field == (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG) {
3383 		uint8_t tag_index = flow_tag_index_get(&mhdr_action->dst);
3384 
3385 		value_p = (unaligned_uint32_t *)values;
3386 		if (mhdr_action->dst.field == RTE_FLOW_FIELD_TAG &&
3387 		    tag_index == RTE_PMD_MLX5_LINEAR_HASH_TAG_INDEX)
3388 			*value_p = rte_cpu_to_be_32(*value_p << 16);
3389 		else
3390 			*value_p = rte_cpu_to_be_32(*value_p);
3391 	} else if (mhdr_action->dst.field == RTE_FLOW_FIELD_GTP_PSC_QFI ||
3392 		   mhdr_action->dst.field == RTE_FLOW_FIELD_GENEVE_OPT_TYPE) {
3393 		uint32_t tmp;
3394 
3395 		/*
3396 		 * Both QFI and Geneve option type are passed as an uint8_t integer,
3397 		 * but it is accessed through a 2nd least significant byte of a 32-bit
3398 		 * field in modify header command.
3399 		 */
3400 		tmp = values[0];
3401 		value_p = (unaligned_uint32_t *)values;
3402 		*value_p = rte_cpu_to_be_32(tmp << 8);
3403 	}
3404 	i = act_data->modify_header.mhdr_cmds_off;
3405 	field = act_data->modify_header.field;
3406 	do {
3407 		uint32_t off_b;
3408 		uint32_t mask;
3409 		uint32_t data;
3410 		const uint8_t *mask_src;
3411 
3412 		if (i >= act_data->modify_header.mhdr_cmds_end)
3413 			return -1;
3414 		if (flow_hw_mhdr_cmd_is_nop(&mhdr_cmd[i])) {
3415 			++i;
3416 			continue;
3417 		}
3418 		mask_src = (const uint8_t *)act_data->modify_header.mask;
3419 		mask = flow_dv_fetch_field(mask_src + field->offset, field->size);
3420 		if (!mask) {
3421 			++field;
3422 			continue;
3423 		}
3424 		off_b = rte_bsf32(mask);
3425 		data = flow_dv_fetch_field(values + field->offset, field->size);
3426 		/*
3427 		 * IPv6 DSCP uses OUT_IPV6_TRAFFIC_CLASS as ID but it starts from 2
3428 		 * bits left. Shift the data left for IPv6 DSCP
3429 		 */
3430 		if (field->id == MLX5_MODI_OUT_IPV6_TRAFFIC_CLASS &&
3431 		    mhdr_action->dst.field == RTE_FLOW_FIELD_IPV6_DSCP)
3432 			data <<= MLX5_IPV6_HDR_DSCP_SHIFT;
3433 		data = (data & mask) >> off_b;
3434 		mhdr_cmd[i++].data1 = rte_cpu_to_be_32(data);
3435 		++field;
3436 	} while (field->size);
3437 	return 0;
3438 }
3439 
3440 /**
3441  * Release any actions allocated for the flow rule during actions construction.
3442  *
3443  * @param[in] flow
3444  *   Pointer to flow structure.
3445  */
3446 static void
3447 flow_hw_release_actions(struct rte_eth_dev *dev,
3448 			uint32_t queue,
3449 			struct rte_flow_hw *flow)
3450 {
3451 	struct mlx5_priv *priv = dev->data->dev_private;
3452 	struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
3453 	struct rte_flow_hw_aux *aux = mlx5_flow_hw_aux(dev->data->port_id, flow);
3454 
3455 	if (flow->flags & MLX5_FLOW_HW_FLOW_FLAG_FATE_JUMP)
3456 		flow_hw_jump_release(dev, flow->jump);
3457 	else if (flow->flags & MLX5_FLOW_HW_FLOW_FLAG_FATE_HRXQ)
3458 		mlx5_hrxq_obj_release(dev, flow->hrxq);
3459 	if (flow->flags & MLX5_FLOW_HW_FLOW_FLAG_CNT_ID)
3460 		flow_hw_age_count_release(priv, queue, flow, NULL);
3461 	if (flow->flags & MLX5_FLOW_HW_FLOW_FLAG_MTR_ID)
3462 		mlx5_ipool_free(pool->idx_pool, mlx5_flow_hw_aux_get_mtr_id(flow, aux));
3463 }
3464 
3465 /**
3466  * Construct flow action array.
3467  *
3468  * For action template contains dynamic actions, these actions need to
3469  * be updated according to the rte_flow action during flow creation.
3470  *
3471  * @param[in] dev
3472  *   Pointer to the rte_eth_dev structure.
3473  * @param[in] flow
3474  *   Pointer to flow structure.
3475  * @param[in] ap
3476  *   Pointer to container for temporarily constructed actions' parameters.
3477  * @param[in] hw_acts
3478  *   Pointer to translated actions from template.
3479  * @param[in] items_flags
3480  *   Item flags.
3481  * @param[in] table
3482  *   Pointer to the template table.
3483  * @param[in] actions
3484  *   Array of rte_flow action need to be checked.
3485  * @param[in] rule_acts
3486  *   Array of DR rule actions to be used during flow creation..
3487  * @param[in] acts_num
3488  *   Pointer to the real acts_num flow has.
3489  *
3490  * @return
3491  *    0 on success, negative value otherwise and rte_errno is set.
3492  */
3493 static __rte_always_inline int
3494 flow_hw_actions_construct(struct rte_eth_dev *dev,
3495 			  struct rte_flow_hw *flow,
3496 			  struct mlx5_flow_hw_action_params *ap,
3497 			  const struct mlx5_hw_action_template *hw_at,
3498 			  uint64_t item_flags,
3499 			  struct rte_flow_template_table *table,
3500 			  const struct rte_flow_action actions[],
3501 			  struct mlx5dr_rule_action *rule_acts,
3502 			  uint32_t queue,
3503 			  struct rte_flow_error *error)
3504 {
3505 	struct mlx5_priv *priv = dev->data->dev_private;
3506 	struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
3507 	struct mlx5_action_construct_data *act_data;
3508 	const struct rte_flow_actions_template *at = hw_at->action_template;
3509 	const struct mlx5_hw_actions *hw_acts = &hw_at->acts;
3510 	const struct rte_flow_action *action;
3511 	const struct rte_flow_action_raw_encap *raw_encap_data;
3512 	const struct rte_flow_action_ipv6_ext_push *ipv6_push;
3513 	const struct rte_flow_item *enc_item = NULL;
3514 	const struct rte_flow_action_ethdev *port_action = NULL;
3515 	const struct rte_flow_action_meter *meter = NULL;
3516 	const struct rte_flow_action_age *age = NULL;
3517 	const struct rte_flow_action_nat64 *nat64_c = NULL;
3518 	struct rte_flow_attr attr = {
3519 		.ingress = 1,
3520 	};
3521 	uint32_t ft_flag;
3522 	int ret;
3523 	size_t encap_len = 0;
3524 	uint32_t age_idx = 0;
3525 	uint32_t mtr_idx = 0;
3526 	struct mlx5_aso_mtr *aso_mtr;
3527 	struct mlx5_multi_pattern_segment *mp_segment = NULL;
3528 	struct rte_flow_hw_aux *aux;
3529 
3530 	attr.group = table->grp->group_id;
3531 	ft_flag = mlx5_hw_act_flag[!!table->grp->group_id][table->type];
3532 	if (table->type == MLX5DR_TABLE_TYPE_FDB) {
3533 		attr.transfer = 1;
3534 		attr.ingress = 1;
3535 	} else if (table->type == MLX5DR_TABLE_TYPE_NIC_TX) {
3536 		attr.egress = 1;
3537 		attr.ingress = 0;
3538 	} else {
3539 		attr.ingress = 1;
3540 	}
3541 	if (hw_acts->mhdr && hw_acts->mhdr->mhdr_cmds_num > 0 && !hw_acts->mhdr->shared) {
3542 		uint16_t pos = hw_acts->mhdr->pos;
3543 
3544 		mp_segment = mlx5_multi_pattern_segment_find(table, flow->res_idx);
3545 		if (!mp_segment || !mp_segment->mhdr_action)
3546 			return -1;
3547 		rule_acts[pos].action = mp_segment->mhdr_action;
3548 		/* offset is relative to DR action */
3549 		rule_acts[pos].modify_header.offset =
3550 					flow->res_idx - mp_segment->head_index;
3551 		rule_acts[pos].modify_header.data =
3552 					(uint8_t *)ap->mhdr_cmd;
3553 		MLX5_ASSERT(hw_acts->mhdr->mhdr_cmds_num <= MLX5_MHDR_MAX_CMD);
3554 		rte_memcpy(ap->mhdr_cmd, hw_acts->mhdr->mhdr_cmds,
3555 			   sizeof(*ap->mhdr_cmd) * hw_acts->mhdr->mhdr_cmds_num);
3556 	}
3557 	LIST_FOREACH(act_data, &hw_acts->act_list, next) {
3558 		uint32_t jump_group;
3559 		uint32_t tag;
3560 		struct mlx5_hw_jump_action *jump;
3561 		struct mlx5_hrxq *hrxq;
3562 		uint32_t ct_idx;
3563 		cnt_id_t cnt_id;
3564 		uint32_t *cnt_queue;
3565 		uint32_t mtr_id;
3566 		struct rte_flow_template_table *jump_table;
3567 
3568 		action = &actions[act_data->action_src];
3569 		/*
3570 		 * action template construction replaces
3571 		 * OF_SET_VLAN_VID with MODIFY_FIELD
3572 		 */
3573 		if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)
3574 			MLX5_ASSERT(act_data->type ==
3575 				    RTE_FLOW_ACTION_TYPE_MODIFY_FIELD);
3576 		else
3577 			MLX5_ASSERT(action->type ==
3578 				    RTE_FLOW_ACTION_TYPE_INDIRECT ||
3579 				    (int)action->type == act_data->type);
3580 		switch ((int)act_data->type) {
3581 		case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
3582 			act_data->indirect_list_cb(dev, act_data, action,
3583 						   &rule_acts[act_data->action_dst]);
3584 			break;
3585 		case RTE_FLOW_ACTION_TYPE_INDIRECT:
3586 			if (flow_hw_shared_action_construct
3587 					(dev, queue, action, table,
3588 					 item_flags, at->action_flags, flow,
3589 					 &rule_acts[act_data->action_dst]))
3590 				goto error;
3591 			break;
3592 		case RTE_FLOW_ACTION_TYPE_VOID:
3593 			break;
3594 		case RTE_FLOW_ACTION_TYPE_MARK:
3595 			tag = mlx5_flow_mark_set
3596 			      (((const struct rte_flow_action_mark *)
3597 			      (action->conf))->id);
3598 			rule_acts[act_data->action_dst].tag.value = tag;
3599 			break;
3600 		case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
3601 			rule_acts[act_data->action_dst].push_vlan.vlan_hdr =
3602 				vlan_hdr_to_be32(action);
3603 			break;
3604 		case RTE_FLOW_ACTION_TYPE_JUMP:
3605 			jump_group = ((const struct rte_flow_action_jump *)
3606 						action->conf)->group;
3607 			jump = flow_hw_jump_action_register
3608 				(dev, &table->cfg, jump_group, NULL);
3609 			if (!jump)
3610 				goto error;
3611 			rule_acts[act_data->action_dst].action =
3612 			(!!attr.group) ? jump->hws_action : jump->root_action;
3613 			flow->jump = jump;
3614 			flow->flags |= MLX5_FLOW_HW_FLOW_FLAG_FATE_JUMP;
3615 			break;
3616 		case RTE_FLOW_ACTION_TYPE_RSS:
3617 		case RTE_FLOW_ACTION_TYPE_QUEUE:
3618 			hrxq = flow_hw_tir_action_register(dev, ft_flag, action);
3619 			if (!hrxq)
3620 				goto error;
3621 			rule_acts[act_data->action_dst].action = hrxq->action;
3622 			flow->hrxq = hrxq;
3623 			flow->flags |= MLX5_FLOW_HW_FLOW_FLAG_FATE_HRXQ;
3624 			break;
3625 		case MLX5_RTE_FLOW_ACTION_TYPE_RSS:
3626 			if (flow_hw_shared_action_get
3627 				(dev, act_data, item_flags,
3628 				 &rule_acts[act_data->action_dst]))
3629 				goto error;
3630 			break;
3631 		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3632 			enc_item = ((const struct rte_flow_action_vxlan_encap *)
3633 				   action->conf)->definition;
3634 			if (flow_dv_convert_encap_data(enc_item, ap->encap_data, &encap_len, NULL))
3635 				goto error;
3636 			break;
3637 		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
3638 			enc_item = ((const struct rte_flow_action_nvgre_encap *)
3639 				   action->conf)->definition;
3640 			if (flow_dv_convert_encap_data(enc_item, ap->encap_data, &encap_len, NULL))
3641 				goto error;
3642 			break;
3643 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
3644 			raw_encap_data =
3645 				(const struct rte_flow_action_raw_encap *)
3646 				 action->conf;
3647 			MLX5_ASSERT(raw_encap_data->size == act_data->encap.len);
3648 			if (unlikely(act_data->encap.len > MLX5_ENCAP_MAX_LEN))
3649 				return -1;
3650 			rte_memcpy(ap->encap_data, raw_encap_data->data, act_data->encap.len);
3651 			break;
3652 		case RTE_FLOW_ACTION_TYPE_IPV6_EXT_PUSH:
3653 			ipv6_push =
3654 				(const struct rte_flow_action_ipv6_ext_push *)action->conf;
3655 			MLX5_ASSERT(ipv6_push->size == act_data->ipv6_ext.len);
3656 			if (unlikely(act_data->ipv6_ext.len > MLX5_PUSH_MAX_LEN))
3657 				return -1;
3658 			rte_memcpy(ap->ipv6_push_data, ipv6_push->data,
3659 				   act_data->ipv6_ext.len);
3660 			break;
3661 		case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
3662 			if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)
3663 				ret = flow_hw_set_vlan_vid_construct(dev, ap->mhdr_cmd,
3664 								     act_data,
3665 								     hw_acts,
3666 								     action);
3667 			else
3668 				ret = flow_hw_modify_field_construct(ap->mhdr_cmd,
3669 								     act_data,
3670 								     hw_acts,
3671 								     action);
3672 			if (ret)
3673 				goto error;
3674 			break;
3675 		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
3676 			port_action = action->conf;
3677 			if (!priv->hw_vport[port_action->port_id])
3678 				goto error;
3679 			rule_acts[act_data->action_dst].action =
3680 					priv->hw_vport[port_action->port_id];
3681 			break;
3682 		case RTE_FLOW_ACTION_TYPE_QUOTA:
3683 			flow_hw_construct_quota(priv,
3684 						rule_acts + act_data->action_dst,
3685 						act_data->shared_meter.id);
3686 			break;
3687 		case RTE_FLOW_ACTION_TYPE_METER:
3688 			meter = action->conf;
3689 			mtr_id = meter->mtr_id;
3690 			aso_mtr = mlx5_aso_meter_by_idx(priv, mtr_id);
3691 			rule_acts[act_data->action_dst].action =
3692 				priv->mtr_bulk.action;
3693 			rule_acts[act_data->action_dst].aso_meter.offset =
3694 								aso_mtr->offset;
3695 			jump = flow_hw_jump_action_register
3696 				(dev, &table->cfg, aso_mtr->fm.group, NULL);
3697 			if (!jump)
3698 				goto error;
3699 			MLX5_ASSERT
3700 				(!rule_acts[act_data->action_dst + 1].action);
3701 			rule_acts[act_data->action_dst + 1].action =
3702 					(!!attr.group) ? jump->hws_action :
3703 							 jump->root_action;
3704 			flow->jump = jump;
3705 			flow->flags |= MLX5_FLOW_HW_FLOW_FLAG_FATE_JUMP;
3706 			if (mlx5_aso_mtr_wait(priv, aso_mtr, true))
3707 				goto error;
3708 			break;
3709 		case RTE_FLOW_ACTION_TYPE_AGE:
3710 			aux = mlx5_flow_hw_aux(dev->data->port_id, flow);
3711 			age = action->conf;
3712 			/*
3713 			 * First, create the AGE parameter, then create its
3714 			 * counter later:
3715 			 * Regular counter - in next case.
3716 			 * Indirect counter - update it after the loop.
3717 			 */
3718 			age_idx = mlx5_hws_age_action_create(priv, queue, 0,
3719 							     age,
3720 							     flow->res_idx,
3721 							     error);
3722 			if (age_idx == 0)
3723 				goto error;
3724 			mlx5_flow_hw_aux_set_age_idx(flow, aux, age_idx);
3725 			flow->flags |= MLX5_FLOW_HW_FLOW_FLAG_AGE_IDX;
3726 			if (at->action_flags & MLX5_FLOW_ACTION_INDIRECT_COUNT)
3727 				/*
3728 				 * When AGE uses indirect counter, no need to
3729 				 * create counter but need to update it with the
3730 				 * AGE parameter, will be done after the loop.
3731 				 */
3732 				break;
3733 			/* Fall-through. */
3734 		case RTE_FLOW_ACTION_TYPE_COUNT:
3735 			cnt_queue = mlx5_hws_cnt_get_queue(priv, &queue);
3736 			ret = mlx5_hws_cnt_pool_get(priv->hws_cpool, cnt_queue, &cnt_id, age_idx);
3737 			if (ret != 0) {
3738 				rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_ACTION,
3739 						action, "Failed to allocate flow counter");
3740 				goto error;
3741 			}
3742 			ret = mlx5_hws_cnt_pool_get_action_offset
3743 				(priv->hws_cpool,
3744 				 cnt_id,
3745 				 &rule_acts[act_data->action_dst].action,
3746 				 &rule_acts[act_data->action_dst].counter.offset
3747 				 );
3748 			if (ret != 0)
3749 				goto error;
3750 			flow->flags |= MLX5_FLOW_HW_FLOW_FLAG_CNT_ID;
3751 			flow->cnt_id = cnt_id;
3752 			break;
3753 		case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
3754 			ret = mlx5_hws_cnt_pool_get_action_offset
3755 				(priv->hws_cpool,
3756 				 act_data->shared_counter.id,
3757 				 &rule_acts[act_data->action_dst].action,
3758 				 &rule_acts[act_data->action_dst].counter.offset
3759 				 );
3760 			if (ret != 0)
3761 				goto error;
3762 			flow->flags |= MLX5_FLOW_HW_FLOW_FLAG_CNT_ID;
3763 			flow->cnt_id = act_data->shared_counter.id;
3764 			break;
3765 		case RTE_FLOW_ACTION_TYPE_CONNTRACK:
3766 			ct_idx = MLX5_INDIRECT_ACTION_IDX_GET(action->conf);
3767 			if (flow_hw_ct_compile(dev, queue, ct_idx,
3768 					       &rule_acts[act_data->action_dst]))
3769 				goto error;
3770 			break;
3771 		case MLX5_RTE_FLOW_ACTION_TYPE_METER_MARK:
3772 			mtr_id = act_data->shared_meter.id &
3773 				((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
3774 			/* Find ASO object. */
3775 			aso_mtr = mlx5_ipool_get(pool->idx_pool, mtr_id);
3776 			if (!aso_mtr)
3777 				goto error;
3778 			rule_acts[act_data->action_dst].action =
3779 							pool->action;
3780 			rule_acts[act_data->action_dst].aso_meter.offset =
3781 							aso_mtr->offset;
3782 			break;
3783 		case RTE_FLOW_ACTION_TYPE_METER_MARK:
3784 			/*
3785 			 * Allocate meter directly will slow down flow
3786 			 * insertion rate.
3787 			 */
3788 			ret = flow_hw_meter_mark_compile(dev,
3789 				act_data->action_dst, action,
3790 				rule_acts, &mtr_idx, MLX5_HW_INV_QUEUE, error);
3791 			if (ret != 0)
3792 				goto error;
3793 			aux = mlx5_flow_hw_aux(dev->data->port_id, flow);
3794 			mlx5_flow_hw_aux_set_mtr_id(flow, aux, mtr_idx);
3795 			flow->flags |= MLX5_FLOW_HW_FLOW_FLAG_MTR_ID;
3796 			break;
3797 		case RTE_FLOW_ACTION_TYPE_NAT64:
3798 			nat64_c = action->conf;
3799 			rule_acts[act_data->action_dst].action =
3800 				priv->action_nat64[table->type][nat64_c->type];
3801 			break;
3802 		case RTE_FLOW_ACTION_TYPE_JUMP_TO_TABLE_INDEX:
3803 			jump_table = ((const struct rte_flow_action_jump_to_table_index *)
3804 						action->conf)->table;
3805 			if (likely(!rte_flow_template_table_resizable(dev->data->port_id,
3806 								      &table->cfg.attr))) {
3807 				rule_acts[act_data->action_dst].action =
3808 					jump_table->matcher_info[0].jump;
3809 			} else {
3810 				uint32_t selector;
3811 				rte_rwlock_read_lock(&table->matcher_replace_rwlk);
3812 				selector = table->matcher_selector;
3813 				rule_acts[act_data->action_dst].action =
3814 					jump_table->matcher_info[selector].jump;
3815 				rte_rwlock_read_unlock(&table->matcher_replace_rwlk);
3816 			}
3817 			rule_acts[act_data->action_dst].jump_to_matcher.offset =
3818 				((const struct rte_flow_action_jump_to_table_index *)
3819 				action->conf)->index;
3820 			break;
3821 		default:
3822 			break;
3823 		}
3824 	}
3825 	if (at->action_flags & MLX5_FLOW_ACTION_INDIRECT_COUNT) {
3826 		/* If indirect count is used, then CNT_ID flag should be set. */
3827 		MLX5_ASSERT(flow->flags & MLX5_FLOW_HW_FLOW_FLAG_CNT_ID);
3828 		if (at->action_flags & MLX5_FLOW_ACTION_INDIRECT_AGE) {
3829 			/* If indirect AGE is used, then AGE_IDX flag should be set. */
3830 			MLX5_ASSERT(flow->flags & MLX5_FLOW_HW_FLOW_FLAG_AGE_IDX);
3831 			aux = mlx5_flow_hw_aux(dev->data->port_id, flow);
3832 			age_idx = mlx5_flow_hw_aux_get_age_idx(flow, aux) &
3833 				  MLX5_HWS_AGE_IDX_MASK;
3834 			if (mlx5_hws_cnt_age_get(priv->hws_cpool, flow->cnt_id) != age_idx)
3835 				/*
3836 				 * This is first use of this indirect counter
3837 				 * for this indirect AGE, need to increase the
3838 				 * number of counters.
3839 				 */
3840 				mlx5_hws_age_nb_cnt_increase(priv, age_idx);
3841 		}
3842 		/*
3843 		 * Update this indirect counter the indirect/direct AGE in which
3844 		 * using it.
3845 		 */
3846 		mlx5_hws_cnt_age_set(priv->hws_cpool, flow->cnt_id, age_idx);
3847 	}
3848 	if (hw_acts->encap_decap && !hw_acts->encap_decap->shared) {
3849 		int ix = mlx5_multi_pattern_reformat_to_index(hw_acts->encap_decap->action_type);
3850 		struct mlx5dr_rule_action *ra = &rule_acts[hw_acts->encap_decap_pos];
3851 
3852 		if (ix < 0)
3853 			goto error;
3854 		if (!mp_segment)
3855 			mp_segment = mlx5_multi_pattern_segment_find(table, flow->res_idx);
3856 		if (!mp_segment || !mp_segment->reformat_action[ix])
3857 			goto error;
3858 		ra->action = mp_segment->reformat_action[ix];
3859 		/* reformat offset is relative to selected DR action */
3860 		ra->reformat.offset = flow->res_idx - mp_segment->head_index;
3861 		ra->reformat.data = ap->encap_data;
3862 	}
3863 	if (hw_acts->push_remove && !hw_acts->push_remove->shared) {
3864 		rule_acts[hw_acts->push_remove_pos].ipv6_ext.offset =
3865 				flow->res_idx - 1;
3866 		rule_acts[hw_acts->push_remove_pos].ipv6_ext.header = ap->ipv6_push_data;
3867 	}
3868 	if (mlx5_hws_cnt_id_valid(hw_acts->cnt_id)) {
3869 		flow->flags |= MLX5_FLOW_HW_FLOW_FLAG_CNT_ID;
3870 		flow->cnt_id = hw_acts->cnt_id;
3871 	}
3872 	return 0;
3873 
3874 error:
3875 	flow_hw_release_actions(dev, queue, flow);
3876 	rte_errno = EINVAL;
3877 	return -rte_errno;
3878 }
3879 
3880 static const struct rte_flow_item *
3881 flow_hw_get_rule_items(struct rte_eth_dev *dev,
3882 		       const struct rte_flow_template_table *table,
3883 		       const struct rte_flow_item items[],
3884 		       uint8_t pattern_template_index,
3885 		       struct mlx5_flow_hw_pattern_params *pp)
3886 {
3887 	struct rte_flow_pattern_template *pt = table->its[pattern_template_index];
3888 
3889 	/* Only one implicit item can be added to flow rule pattern. */
3890 	MLX5_ASSERT(!pt->implicit_port || !pt->implicit_tag);
3891 	/* At least one item was allocated in pattern params for items. */
3892 	MLX5_ASSERT(MLX5_HW_MAX_ITEMS >= 1);
3893 	if (pt->implicit_port) {
3894 		if (pt->orig_item_nb + 1 > MLX5_HW_MAX_ITEMS) {
3895 			rte_errno = ENOMEM;
3896 			return NULL;
3897 		}
3898 		/* Set up represented port item in pattern params. */
3899 		pp->port_spec = (struct rte_flow_item_ethdev){
3900 			.port_id = dev->data->port_id,
3901 		};
3902 		pp->items[0] = (struct rte_flow_item){
3903 			.type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,
3904 			.spec = &pp->port_spec,
3905 		};
3906 		rte_memcpy(&pp->items[1], items, sizeof(*items) * pt->orig_item_nb);
3907 		return pp->items;
3908 	} else if (pt->implicit_tag) {
3909 		if (pt->orig_item_nb + 1 > MLX5_HW_MAX_ITEMS) {
3910 			rte_errno = ENOMEM;
3911 			return NULL;
3912 		}
3913 		/* Set up tag item in pattern params. */
3914 		pp->tag_spec = (struct rte_flow_item_tag){
3915 			.data = flow_hw_tx_tag_regc_value(dev),
3916 		};
3917 		pp->items[0] = (struct rte_flow_item){
3918 			.type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_TAG,
3919 			.spec = &pp->tag_spec,
3920 		};
3921 		rte_memcpy(&pp->items[1], items, sizeof(*items) * pt->orig_item_nb);
3922 		return pp->items;
3923 	} else {
3924 		return items;
3925 	}
3926 }
3927 
3928 /**
3929  * Enqueue HW steering flow creation.
3930  *
3931  * The flow will be applied to the HW only if the postpone bit is not set or
3932  * the extra push function is called.
3933  * The flow creation status should be checked from dequeue result.
3934  *
3935  * @param[in] dev
3936  *   Pointer to the rte_eth_dev structure.
3937  * @param[in] queue
3938  *   The queue to create the flow.
3939  * @param[in] attr
3940  *   Pointer to the flow operation attributes.
3941  * @param[in] table
3942  *   Pointer to the template table.
3943  * @param[in] insertion_type
3944  *   Insertion type for flow rules.
3945  * @param[in] rule_index
3946  *   The item pattern flow follows from the table.
3947  * @param[in] items
3948  *   Items with flow spec value.
3949  * @param[in] pattern_template_index
3950  *   The item pattern flow follows from the table.
3951  * @param[in] actions
3952  *   Action with flow spec value.
3953  * @param[in] action_template_index
3954  *   The action pattern flow follows from the table.
3955  * @param[in] user_data
3956  *   Pointer to the user_data.
3957  * @param[out] error
3958  *   Pointer to error structure.
3959  *
3960  * @return
3961  *    Flow pointer on success, NULL otherwise and rte_errno is set.
3962  */
3963 static __rte_always_inline struct rte_flow *
3964 flow_hw_async_flow_create_generic(struct rte_eth_dev *dev,
3965 				  uint32_t queue,
3966 				  const struct rte_flow_op_attr *attr,
3967 				  struct rte_flow_template_table *table,
3968 				  enum rte_flow_table_insertion_type insertion_type,
3969 				  uint32_t rule_index,
3970 				  const struct rte_flow_item items[],
3971 				  uint8_t pattern_template_index,
3972 				  const struct rte_flow_action actions[],
3973 				  uint8_t action_template_index,
3974 				  void *user_data,
3975 				  struct rte_flow_error *error)
3976 {
3977 	struct mlx5_priv *priv = dev->data->dev_private;
3978 	struct mlx5dr_rule_attr rule_attr = {
3979 		.queue_id = queue,
3980 		.user_data = user_data,
3981 		.burst = attr->postpone,
3982 	};
3983 	struct mlx5dr_rule_action *rule_acts;
3984 	struct rte_flow_hw *flow = NULL;
3985 	const struct rte_flow_item *rule_items;
3986 	struct rte_flow_error sub_error = { 0 };
3987 	uint32_t flow_idx = 0;
3988 	uint32_t res_idx = 0;
3989 	int ret;
3990 
3991 	if (mlx5_fp_debug_enabled()) {
3992 		if (flow_hw_async_create_validate(dev, queue, table, insertion_type, rule_index,
3993 			items, pattern_template_index, actions, action_template_index, error))
3994 			return NULL;
3995 	}
3996 	flow = mlx5_ipool_malloc(table->flow, &flow_idx);
3997 	if (!flow) {
3998 		rte_errno = ENOMEM;
3999 		goto error;
4000 	}
4001 	rule_acts = flow_hw_get_dr_action_buffer(priv, table, action_template_index, queue);
4002 	/*
4003 	 * Set the table here in order to know the destination table
4004 	 * when free the flow afterward.
4005 	 */
4006 	flow->table = table;
4007 	flow->mt_idx = pattern_template_index;
4008 	flow->idx = flow_idx;
4009 	if (table->resource) {
4010 		mlx5_ipool_malloc(table->resource, &res_idx);
4011 		if (!res_idx) {
4012 			rte_errno = ENOMEM;
4013 			goto error;
4014 		}
4015 		flow->res_idx = res_idx;
4016 	} else {
4017 		flow->res_idx = flow_idx;
4018 	}
4019 	flow->flags = 0;
4020 	/*
4021 	 * Set the flow operation type here in order to know if the flow memory
4022 	 * should be freed or not when get the result from dequeue.
4023 	 */
4024 	flow->operation_type = MLX5_FLOW_HW_FLOW_OP_TYPE_CREATE;
4025 	flow->user_data = user_data;
4026 	rule_attr.user_data = flow;
4027 	/*
4028 	 * Indexed pool returns 1-based indices, but mlx5dr expects 0-based indices
4029 	 * for rule insertion hints.
4030 	 */
4031 	flow->rule_idx = (rule_index == UINT32_MAX) ? flow->res_idx - 1 : rule_index;
4032 	rule_attr.rule_idx = flow->rule_idx;
4033 	/*
4034 	 * Construct the flow actions based on the input actions.
4035 	 * The implicitly appended action is always fixed, like metadata
4036 	 * copy action from FDB to NIC Rx.
4037 	 * No need to copy and contrust a new "actions" list based on the
4038 	 * user's input, in order to save the cost.
4039 	 */
4040 	if (flow_hw_actions_construct(dev, flow, &priv->hw_q[queue].ap,
4041 				      &table->ats[action_template_index],
4042 				      table->its[pattern_template_index]->item_flags,
4043 				      flow->table, actions,
4044 				      rule_acts, queue, &sub_error))
4045 		goto error;
4046 	rule_items = flow_hw_get_rule_items(dev, table, items,
4047 					    pattern_template_index, &priv->hw_q[queue].pp);
4048 	if (!rule_items)
4049 		goto error;
4050 	if (likely(!rte_flow_template_table_resizable(dev->data->port_id, &table->cfg.attr))) {
4051 		ret = mlx5dr_rule_create(table->matcher_info[0].matcher,
4052 					 pattern_template_index, rule_items,
4053 					 action_template_index, rule_acts,
4054 					 &rule_attr,
4055 					 (struct mlx5dr_rule *)flow->rule);
4056 	} else {
4057 		struct rte_flow_hw_aux *aux = mlx5_flow_hw_aux(dev->data->port_id, flow);
4058 		uint32_t selector;
4059 
4060 		flow->operation_type = MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_CREATE;
4061 		rte_rwlock_read_lock(&table->matcher_replace_rwlk);
4062 		selector = table->matcher_selector;
4063 		ret = mlx5dr_rule_create(table->matcher_info[selector].matcher,
4064 					 pattern_template_index, rule_items,
4065 					 action_template_index, rule_acts,
4066 					 &rule_attr,
4067 					 (struct mlx5dr_rule *)flow->rule);
4068 		rte_rwlock_read_unlock(&table->matcher_replace_rwlk);
4069 		aux->matcher_selector = selector;
4070 		flow->flags |= MLX5_FLOW_HW_FLOW_FLAG_MATCHER_SELECTOR;
4071 	}
4072 	if (likely(!ret)) {
4073 		flow_hw_q_inc_flow_ops(priv, queue);
4074 		return (struct rte_flow *)flow;
4075 	}
4076 error:
4077 	if (table->resource && res_idx)
4078 		mlx5_ipool_free(table->resource, res_idx);
4079 	if (flow_idx)
4080 		mlx5_ipool_free(table->flow, flow_idx);
4081 	if (sub_error.cause != RTE_FLOW_ERROR_TYPE_NONE && error != NULL)
4082 		*error = sub_error;
4083 	else
4084 		rte_flow_error_set(error, rte_errno,
4085 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4086 				   "fail to create rte flow");
4087 	return NULL;
4088 }
4089 
4090 static struct rte_flow *
4091 flow_hw_async_flow_create(struct rte_eth_dev *dev,
4092 			  uint32_t queue,
4093 			  const struct rte_flow_op_attr *attr,
4094 			  struct rte_flow_template_table *table,
4095 			  const struct rte_flow_item items[],
4096 			  uint8_t pattern_template_index,
4097 			  const struct rte_flow_action actions[],
4098 			  uint8_t action_template_index,
4099 			  void *user_data,
4100 			  struct rte_flow_error *error)
4101 {
4102 	uint32_t rule_index = UINT32_MAX;
4103 
4104 	return flow_hw_async_flow_create_generic(dev, queue, attr, table,
4105 		RTE_FLOW_TABLE_INSERTION_TYPE_PATTERN, rule_index,
4106 		items, pattern_template_index, actions, action_template_index,
4107 		user_data, error);
4108 }
4109 
4110 static struct rte_flow *
4111 flow_hw_async_flow_create_by_index(struct rte_eth_dev *dev,
4112 			  uint32_t queue,
4113 			  const struct rte_flow_op_attr *attr,
4114 			  struct rte_flow_template_table *table,
4115 			  uint32_t rule_index,
4116 			  const struct rte_flow_action actions[],
4117 			  uint8_t action_template_index,
4118 			  void *user_data,
4119 			  struct rte_flow_error *error)
4120 {
4121 	struct rte_flow_item items[] = {{.type = RTE_FLOW_ITEM_TYPE_END,}};
4122 	uint8_t pattern_template_index = 0;
4123 
4124 	return flow_hw_async_flow_create_generic(dev, queue, attr, table,
4125 		RTE_FLOW_TABLE_INSERTION_TYPE_INDEX, rule_index,
4126 		items, pattern_template_index, actions, action_template_index,
4127 		user_data, error);
4128 }
4129 
4130 static struct rte_flow *
4131 flow_hw_async_flow_create_by_index_with_pattern(struct rte_eth_dev *dev,
4132 						uint32_t queue,
4133 						const struct rte_flow_op_attr *attr,
4134 						struct rte_flow_template_table *table,
4135 						uint32_t rule_index,
4136 						const struct rte_flow_item items[],
4137 						uint8_t pattern_template_index,
4138 						const struct rte_flow_action actions[],
4139 						uint8_t action_template_index,
4140 						void *user_data,
4141 						struct rte_flow_error *error)
4142 {
4143 	return flow_hw_async_flow_create_generic(dev, queue, attr, table,
4144 		RTE_FLOW_TABLE_INSERTION_TYPE_INDEX_WITH_PATTERN, rule_index,
4145 		items, pattern_template_index, actions, action_template_index,
4146 		user_data, error);
4147 }
4148 
4149 /**
4150  * Enqueue HW steering flow update.
4151  *
4152  * The flow will be applied to the HW only if the postpone bit is not set or
4153  * the extra push function is called.
4154  * The flow destruction status should be checked from dequeue result.
4155  *
4156  * @param[in] dev
4157  *   Pointer to the rte_eth_dev structure.
4158  * @param[in] queue
4159  *   The queue to destroy the flow.
4160  * @param[in] attr
4161  *   Pointer to the flow operation attributes.
4162  * @param[in] flow
4163  *   Pointer to the flow to be destroyed.
4164  * @param[in] actions
4165  *   Action with flow spec value.
4166  * @param[in] action_template_index
4167  *   The action pattern flow follows from the table.
4168  * @param[in] user_data
4169  *   Pointer to the user_data.
4170  * @param[out] error
4171  *   Pointer to error structure.
4172  *
4173  * @return
4174  *    0 on success, negative value otherwise and rte_errno is set.
4175  */
4176 static int
4177 flow_hw_async_flow_update(struct rte_eth_dev *dev,
4178 			   uint32_t queue,
4179 			   const struct rte_flow_op_attr *attr,
4180 			   struct rte_flow *flow,
4181 			   const struct rte_flow_action actions[],
4182 			   uint8_t action_template_index,
4183 			   void *user_data,
4184 			   struct rte_flow_error *error)
4185 {
4186 	struct mlx5_priv *priv = dev->data->dev_private;
4187 	struct mlx5dr_rule_attr rule_attr = {
4188 		.queue_id = queue,
4189 		.user_data = user_data,
4190 		.burst = attr->postpone,
4191 	};
4192 	struct mlx5dr_rule_action *rule_acts;
4193 	struct rte_flow_hw *of = (struct rte_flow_hw *)flow;
4194 	struct rte_flow_hw *nf;
4195 	struct rte_flow_hw_aux *aux;
4196 	struct rte_flow_template_table *table = of->table;
4197 	uint32_t res_idx = 0;
4198 	int ret;
4199 
4200 	if (mlx5_fp_debug_enabled()) {
4201 		if (flow_hw_async_update_validate(dev, queue, of, actions, action_template_index,
4202 						  error))
4203 			return -rte_errno;
4204 	}
4205 	aux = mlx5_flow_hw_aux(dev->data->port_id, of);
4206 	nf = &aux->upd_flow;
4207 	memset(nf, 0, sizeof(struct rte_flow_hw));
4208 	rule_acts = flow_hw_get_dr_action_buffer(priv, table, action_template_index, queue);
4209 	/*
4210 	 * Set the table here in order to know the destination table
4211 	 * when free the flow afterwards.
4212 	 */
4213 	nf->table = table;
4214 	nf->mt_idx = of->mt_idx;
4215 	nf->idx = of->idx;
4216 	if (table->resource) {
4217 		mlx5_ipool_malloc(table->resource, &res_idx);
4218 		if (!res_idx) {
4219 			rte_errno = ENOMEM;
4220 			goto error;
4221 		}
4222 		nf->res_idx = res_idx;
4223 	} else {
4224 		nf->res_idx = of->res_idx;
4225 	}
4226 	nf->flags = 0;
4227 	/* Indicate the construction function to set the proper fields. */
4228 	nf->operation_type = MLX5_FLOW_HW_FLOW_OP_TYPE_UPDATE;
4229 	/*
4230 	 * Indexed pool returns 1-based indices, but mlx5dr expects 0-based indices
4231 	 * for rule insertion hints.
4232 	 * If there is only one STE, the update will be atomic by nature.
4233 	 */
4234 	nf->rule_idx = nf->res_idx - 1;
4235 	rule_attr.rule_idx = nf->rule_idx;
4236 	/*
4237 	 * Construct the flow actions based on the input actions.
4238 	 * The implicitly appended action is always fixed, like metadata
4239 	 * copy action from FDB to NIC Rx.
4240 	 * No need to copy and contrust a new "actions" list based on the
4241 	 * user's input, in order to save the cost.
4242 	 */
4243 	if (flow_hw_actions_construct(dev, nf, &priv->hw_q[queue].ap,
4244 				      &table->ats[action_template_index],
4245 				      table->its[nf->mt_idx]->item_flags,
4246 				      table, actions,
4247 				      rule_acts, queue, error)) {
4248 		rte_errno = EINVAL;
4249 		goto error;
4250 	}
4251 	/*
4252 	 * Set the flow operation type here in order to know if the flow memory
4253 	 * should be freed or not when get the result from dequeue.
4254 	 */
4255 	of->operation_type = MLX5_FLOW_HW_FLOW_OP_TYPE_UPDATE;
4256 	of->user_data = user_data;
4257 	of->flags |= MLX5_FLOW_HW_FLOW_FLAG_UPD_FLOW;
4258 	rule_attr.user_data = of;
4259 	ret = mlx5dr_rule_action_update((struct mlx5dr_rule *)of->rule,
4260 					action_template_index, rule_acts, &rule_attr);
4261 	if (likely(!ret)) {
4262 		flow_hw_q_inc_flow_ops(priv, queue);
4263 		return 0;
4264 	}
4265 error:
4266 	if (table->resource && res_idx)
4267 		mlx5_ipool_free(table->resource, res_idx);
4268 	return rte_flow_error_set(error, rte_errno,
4269 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4270 				  "fail to update rte flow");
4271 }
4272 
4273 /**
4274  * Enqueue HW steering flow destruction.
4275  *
4276  * The flow will be applied to the HW only if the postpone bit is not set or
4277  * the extra push function is called.
4278  * The flow destruction status should be checked from dequeue result.
4279  *
4280  * @param[in] dev
4281  *   Pointer to the rte_eth_dev structure.
4282  * @param[in] queue
4283  *   The queue to destroy the flow.
4284  * @param[in] attr
4285  *   Pointer to the flow operation attributes.
4286  * @param[in] flow
4287  *   Pointer to the flow to be destroyed.
4288  * @param[in] user_data
4289  *   Pointer to the user_data.
4290  * @param[out] error
4291  *   Pointer to error structure.
4292  *
4293  * @return
4294  *    0 on success, negative value otherwise and rte_errno is set.
4295  */
4296 static int
4297 flow_hw_async_flow_destroy(struct rte_eth_dev *dev,
4298 			   uint32_t queue,
4299 			   const struct rte_flow_op_attr *attr,
4300 			   struct rte_flow *flow,
4301 			   void *user_data,
4302 			   struct rte_flow_error *error)
4303 {
4304 	struct mlx5_priv *priv = dev->data->dev_private;
4305 	struct mlx5dr_rule_attr rule_attr = {
4306 		.queue_id = queue,
4307 		.user_data = user_data,
4308 		.burst = attr->postpone,
4309 	};
4310 	struct rte_flow_hw *fh = (struct rte_flow_hw *)flow;
4311 	bool resizable = rte_flow_template_table_resizable(dev->data->port_id,
4312 							   &fh->table->cfg.attr);
4313 	int ret;
4314 
4315 	if (mlx5_fp_debug_enabled()) {
4316 		if (flow_hw_async_destroy_validate(dev, queue, fh, error))
4317 			return -rte_errno;
4318 	}
4319 	fh->operation_type = !resizable ?
4320 			     MLX5_FLOW_HW_FLOW_OP_TYPE_DESTROY :
4321 			     MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_DESTROY;
4322 	fh->user_data = user_data;
4323 	rule_attr.user_data = fh;
4324 	rule_attr.rule_idx = fh->rule_idx;
4325 	ret = mlx5dr_rule_destroy((struct mlx5dr_rule *)fh->rule, &rule_attr);
4326 	if (ret) {
4327 		return rte_flow_error_set(error, rte_errno,
4328 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4329 					  "fail to destroy rte flow");
4330 	}
4331 	flow_hw_q_inc_flow_ops(priv, queue);
4332 	return 0;
4333 }
4334 
4335 /**
4336  * Release the AGE and counter for given flow.
4337  *
4338  * @param[in] priv
4339  *   Pointer to the port private data structure.
4340  * @param[in] queue
4341  *   The queue to release the counter.
4342  * @param[in, out] flow
4343  *   Pointer to the flow containing the counter.
4344  * @param[out] error
4345  *   Pointer to error structure.
4346  */
4347 static void
4348 flow_hw_age_count_release(struct mlx5_priv *priv, uint32_t queue,
4349 			  struct rte_flow_hw *flow,
4350 			  struct rte_flow_error *error)
4351 {
4352 	struct rte_flow_hw_aux *aux = mlx5_flow_hw_aux(priv->dev_data->port_id, flow);
4353 	uint32_t *cnt_queue;
4354 	uint32_t age_idx = aux->orig.age_idx;
4355 
4356 	MLX5_ASSERT(flow->flags & MLX5_FLOW_HW_FLOW_FLAG_CNT_ID);
4357 	if (mlx5_hws_cnt_is_shared(priv->hws_cpool, flow->cnt_id)) {
4358 		if ((flow->flags & MLX5_FLOW_HW_FLOW_FLAG_AGE_IDX) &&
4359 		    !mlx5_hws_age_is_indirect(age_idx)) {
4360 			/* Remove this AGE parameter from indirect counter. */
4361 			mlx5_hws_cnt_age_set(priv->hws_cpool, flow->cnt_id, 0);
4362 			/* Release the AGE parameter. */
4363 			mlx5_hws_age_action_destroy(priv, age_idx, error);
4364 		}
4365 		return;
4366 	}
4367 	cnt_queue = mlx5_hws_cnt_get_queue(priv, &queue);
4368 	/* Put the counter first to reduce the race risk in BG thread. */
4369 	mlx5_hws_cnt_pool_put(priv->hws_cpool, cnt_queue, &flow->cnt_id);
4370 	if (flow->flags & MLX5_FLOW_HW_FLOW_FLAG_AGE_IDX) {
4371 		if (mlx5_hws_age_is_indirect(age_idx)) {
4372 			uint32_t idx = age_idx & MLX5_HWS_AGE_IDX_MASK;
4373 
4374 			mlx5_hws_age_nb_cnt_decrease(priv, idx);
4375 		} else {
4376 			/* Release the AGE parameter. */
4377 			mlx5_hws_age_action_destroy(priv, age_idx, error);
4378 		}
4379 	}
4380 }
4381 
4382 static __rte_always_inline void
4383 flow_hw_pull_legacy_indirect_comp(struct rte_eth_dev *dev, struct mlx5_hw_q_job *job,
4384 				  uint32_t queue)
4385 {
4386 	struct mlx5_priv *priv = dev->data->dev_private;
4387 	struct mlx5_aso_ct_action *aso_ct;
4388 	struct mlx5_aso_mtr *aso_mtr;
4389 	uint32_t type, idx;
4390 
4391 	if (MLX5_INDIRECT_ACTION_TYPE_GET(job->action) ==
4392 	    MLX5_INDIRECT_ACTION_TYPE_QUOTA) {
4393 		mlx5_quota_async_completion(dev, queue, job);
4394 	} else if (job->type == MLX5_HW_Q_JOB_TYPE_DESTROY) {
4395 		type = MLX5_INDIRECT_ACTION_TYPE_GET(job->action);
4396 		if (type == MLX5_INDIRECT_ACTION_TYPE_METER_MARK) {
4397 			idx = MLX5_INDIRECT_ACTION_IDX_GET(job->action);
4398 			mlx5_ipool_free(priv->hws_mpool->idx_pool, idx);
4399 		}
4400 	} else if (job->type == MLX5_HW_Q_JOB_TYPE_CREATE) {
4401 		type = MLX5_INDIRECT_ACTION_TYPE_GET(job->action);
4402 		if (type == MLX5_INDIRECT_ACTION_TYPE_METER_MARK) {
4403 			idx = MLX5_INDIRECT_ACTION_IDX_GET(job->action);
4404 			aso_mtr = mlx5_ipool_get(priv->hws_mpool->idx_pool, idx);
4405 			aso_mtr->state = ASO_METER_READY;
4406 		} else if (type == MLX5_INDIRECT_ACTION_TYPE_CT) {
4407 			idx = MLX5_INDIRECT_ACTION_IDX_GET(job->action);
4408 			aso_ct = mlx5_ipool_get(priv->hws_ctpool->cts, idx);
4409 			aso_ct->state = ASO_CONNTRACK_READY;
4410 		}
4411 	} else if (job->type == MLX5_HW_Q_JOB_TYPE_QUERY) {
4412 		type = MLX5_INDIRECT_ACTION_TYPE_GET(job->action);
4413 		if (type == MLX5_INDIRECT_ACTION_TYPE_CT) {
4414 			idx = MLX5_INDIRECT_ACTION_IDX_GET(job->action);
4415 			aso_ct = mlx5_ipool_get(priv->hws_ctpool->cts, idx);
4416 			mlx5_aso_ct_obj_analyze(job->query.user,
4417 						job->query.hw);
4418 			aso_ct->state = ASO_CONNTRACK_READY;
4419 		}
4420 	}
4421 }
4422 
4423 static __rte_always_inline int
4424 mlx5_hw_pull_flow_transfer_comp(struct rte_eth_dev *dev,
4425 				uint32_t queue, struct rte_flow_op_result res[],
4426 				uint16_t n_res)
4427 {
4428 	uint32_t size, i;
4429 	struct rte_flow_hw *flow = NULL;
4430 	struct mlx5_priv *priv = dev->data->dev_private;
4431 	struct rte_ring *ring = priv->hw_q[queue].flow_transfer_completed;
4432 
4433 	size = RTE_MIN(rte_ring_count(ring), n_res);
4434 	for (i = 0; i < size; i++) {
4435 		res[i].status = RTE_FLOW_OP_SUCCESS;
4436 		rte_ring_dequeue(ring, (void **)&flow);
4437 		res[i].user_data = flow->user_data;
4438 		flow_hw_q_dec_flow_ops(priv, queue);
4439 	}
4440 	return (int)size;
4441 }
4442 
4443 static inline int
4444 __flow_hw_pull_indir_action_comp(struct rte_eth_dev *dev,
4445 				 uint32_t queue,
4446 				 struct rte_flow_op_result res[],
4447 				 uint16_t n_res)
4448 
4449 {
4450 	struct mlx5_priv *priv = dev->data->dev_private;
4451 	struct rte_ring *r = priv->hw_q[queue].indir_cq;
4452 	void *user_data = NULL;
4453 	int ret_comp, i;
4454 
4455 	ret_comp = (int)rte_ring_count(r);
4456 	if (ret_comp > n_res)
4457 		ret_comp = n_res;
4458 	for (i = 0; i < ret_comp; i++) {
4459 		rte_ring_dequeue(r, &user_data);
4460 		res[i].user_data = user_data;
4461 		res[i].status = RTE_FLOW_OP_SUCCESS;
4462 	}
4463 	if (!priv->shared_host) {
4464 		if (ret_comp < n_res && priv->hws_mpool)
4465 			ret_comp += mlx5_aso_pull_completion(&priv->hws_mpool->sq[queue],
4466 					&res[ret_comp], n_res - ret_comp);
4467 		if (ret_comp < n_res && priv->hws_ctpool)
4468 			ret_comp += mlx5_aso_pull_completion(&priv->ct_mng->aso_sqs[queue],
4469 					&res[ret_comp], n_res - ret_comp);
4470 	}
4471 	if (ret_comp < n_res && priv->quota_ctx.sq)
4472 		ret_comp += mlx5_aso_pull_completion(&priv->quota_ctx.sq[queue],
4473 						     &res[ret_comp],
4474 						     n_res - ret_comp);
4475 	for (i = 0; i <  ret_comp; i++) {
4476 		struct mlx5_hw_q_job *job = (struct mlx5_hw_q_job *)res[i].user_data;
4477 
4478 		/* Restore user data. */
4479 		res[i].user_data = job->user_data;
4480 		if (job->indirect_type == MLX5_HW_INDIRECT_TYPE_LEGACY)
4481 			flow_hw_pull_legacy_indirect_comp(dev, job, queue);
4482 		/*
4483 		 * Current PMD supports 2 indirect action list types - MIRROR and REFORMAT.
4484 		 * These indirect list types do not post WQE to create action.
4485 		 * Future indirect list types that do post WQE will add
4486 		 * completion handlers here.
4487 		 */
4488 		flow_hw_job_put(priv, job, queue);
4489 	}
4490 	return ret_comp;
4491 }
4492 
4493 static __rte_always_inline void
4494 hw_cmpl_flow_update_or_destroy(struct rte_eth_dev *dev,
4495 			       struct rte_flow_hw *flow,
4496 			       uint32_t queue, struct rte_flow_error *error)
4497 {
4498 	struct mlx5_priv *priv = dev->data->dev_private;
4499 	struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
4500 	struct rte_flow_template_table *table = flow->table;
4501 	/* Release the original resource index in case of update. */
4502 	uint32_t res_idx = flow->res_idx;
4503 
4504 	if (flow->flags & MLX5_FLOW_HW_FLOW_FLAGS_ALL) {
4505 		struct rte_flow_hw_aux *aux = mlx5_flow_hw_aux(dev->data->port_id, flow);
4506 
4507 		if (flow->flags & MLX5_FLOW_HW_FLOW_FLAG_FATE_JUMP)
4508 			flow_hw_jump_release(dev, flow->jump);
4509 		else if (flow->flags & MLX5_FLOW_HW_FLOW_FLAG_FATE_HRXQ)
4510 			mlx5_hrxq_obj_release(dev, flow->hrxq);
4511 		if (flow->flags & MLX5_FLOW_HW_FLOW_FLAG_CNT_ID)
4512 			flow_hw_age_count_release(priv, queue, flow, error);
4513 		if (flow->flags & MLX5_FLOW_HW_FLOW_FLAG_MTR_ID)
4514 			mlx5_ipool_free(pool->idx_pool, aux->orig.mtr_id);
4515 		if (flow->flags & MLX5_FLOW_HW_FLOW_FLAG_UPD_FLOW) {
4516 			struct rte_flow_hw *upd_flow = &aux->upd_flow;
4517 
4518 			rte_memcpy(flow, upd_flow, offsetof(struct rte_flow_hw, rule));
4519 			aux->orig = aux->upd;
4520 			flow->operation_type = MLX5_FLOW_HW_FLOW_OP_TYPE_CREATE;
4521 			if (!flow->nt_rule && table->resource)
4522 				mlx5_ipool_free(table->resource, res_idx);
4523 		}
4524 	}
4525 	if (flow->operation_type == MLX5_FLOW_HW_FLOW_OP_TYPE_DESTROY ||
4526 	    flow->operation_type == MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_DESTROY) {
4527 		if (!flow->nt_rule) {
4528 			if (table->resource)
4529 				mlx5_ipool_free(table->resource, res_idx);
4530 			mlx5_ipool_free(table->flow, flow->idx);
4531 		}
4532 	}
4533 }
4534 
4535 static __rte_always_inline void
4536 hw_cmpl_resizable_tbl(struct rte_eth_dev *dev,
4537 		      struct rte_flow_hw *flow,
4538 		      uint32_t queue, enum rte_flow_op_status status,
4539 		      struct rte_flow_error *error)
4540 {
4541 	struct rte_flow_template_table *table = flow->table;
4542 	struct rte_flow_hw_aux *aux = mlx5_flow_hw_aux(dev->data->port_id, flow);
4543 	uint32_t selector = aux->matcher_selector;
4544 	uint32_t other_selector = (selector + 1) & 1;
4545 
4546 	MLX5_ASSERT(flow->flags & MLX5_FLOW_HW_FLOW_FLAG_MATCHER_SELECTOR);
4547 	switch (flow->operation_type) {
4548 	case MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_CREATE:
4549 		rte_atomic_fetch_add_explicit
4550 			(&table->matcher_info[selector].refcnt, 1,
4551 			 rte_memory_order_relaxed);
4552 		break;
4553 	case MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_DESTROY:
4554 		rte_atomic_fetch_sub_explicit
4555 			(&table->matcher_info[selector].refcnt, 1,
4556 			 rte_memory_order_relaxed);
4557 		hw_cmpl_flow_update_or_destroy(dev, flow, queue, error);
4558 		break;
4559 	case MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_MOVE:
4560 		if (status == RTE_FLOW_OP_SUCCESS) {
4561 			rte_atomic_fetch_sub_explicit
4562 				(&table->matcher_info[selector].refcnt, 1,
4563 				 rte_memory_order_relaxed);
4564 			rte_atomic_fetch_add_explicit
4565 				(&table->matcher_info[other_selector].refcnt, 1,
4566 				 rte_memory_order_relaxed);
4567 			aux->matcher_selector = other_selector;
4568 		}
4569 		break;
4570 	default:
4571 		break;
4572 	}
4573 }
4574 
4575 /**
4576  * Pull the enqueued flows.
4577  *
4578  * For flows enqueued from creation/destruction, the status should be
4579  * checked from the dequeue result.
4580  *
4581  * @param[in] dev
4582  *   Pointer to the rte_eth_dev structure.
4583  * @param[in] queue
4584  *   The queue to pull the result.
4585  * @param[in/out] res
4586  *   Array to save the results.
4587  * @param[in] n_res
4588  *   Available result with the array.
4589  * @param[out] error
4590  *   Pointer to error structure.
4591  *
4592  * @return
4593  *    Result number on success, negative value otherwise and rte_errno is set.
4594  */
4595 static int
4596 flow_hw_pull(struct rte_eth_dev *dev,
4597 	     uint32_t queue,
4598 	     struct rte_flow_op_result res[],
4599 	     uint16_t n_res,
4600 	     struct rte_flow_error *error)
4601 {
4602 	struct mlx5_priv *priv = dev->data->dev_private;
4603 	int ret, i;
4604 
4605 	/* 1. Pull the flow completion. */
4606 	ret = mlx5dr_send_queue_poll(priv->dr_ctx, queue, res, n_res);
4607 	if (ret < 0)
4608 		return rte_flow_error_set(error, rte_errno,
4609 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4610 				"fail to query flow queue");
4611 	for (i = 0; i <  ret; i++) {
4612 		struct rte_flow_hw *flow = res[i].user_data;
4613 
4614 		/* Restore user data. */
4615 		res[i].user_data = flow->user_data;
4616 		switch (flow->operation_type) {
4617 		case MLX5_FLOW_HW_FLOW_OP_TYPE_DESTROY:
4618 		case MLX5_FLOW_HW_FLOW_OP_TYPE_UPDATE:
4619 			hw_cmpl_flow_update_or_destroy(dev, flow, queue, error);
4620 			break;
4621 		case MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_CREATE:
4622 		case MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_DESTROY:
4623 		case MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_MOVE:
4624 			hw_cmpl_resizable_tbl(dev, flow, queue, res[i].status, error);
4625 			break;
4626 		default:
4627 			break;
4628 		}
4629 		flow_hw_q_dec_flow_ops(priv, queue);
4630 	}
4631 	/* 2. Pull indirect action comp. */
4632 	if (ret < n_res)
4633 		ret += __flow_hw_pull_indir_action_comp(dev, queue, &res[ret],
4634 							n_res - ret);
4635 	if (ret < n_res)
4636 		ret += mlx5_hw_pull_flow_transfer_comp(dev, queue, &res[ret],
4637 						       n_res - ret);
4638 
4639 	return ret;
4640 }
4641 
4642 static uint32_t
4643 mlx5_hw_push_queue(struct rte_ring *pending_q, struct rte_ring *cmpl_q)
4644 {
4645 	void *job = NULL;
4646 	uint32_t i, size = rte_ring_count(pending_q);
4647 
4648 	for (i = 0; i < size; i++) {
4649 		rte_ring_dequeue(pending_q, &job);
4650 		rte_ring_enqueue(cmpl_q, job);
4651 	}
4652 	return size;
4653 }
4654 
4655 static inline uint32_t
4656 __flow_hw_push_action(struct rte_eth_dev *dev,
4657 		    uint32_t queue)
4658 {
4659 	struct mlx5_priv *priv = dev->data->dev_private;
4660 	struct mlx5_hw_q *hw_q = &priv->hw_q[queue];
4661 
4662 	mlx5_hw_push_queue(hw_q->indir_iq, hw_q->indir_cq);
4663 	mlx5_hw_push_queue(hw_q->flow_transfer_pending,
4664 			   hw_q->flow_transfer_completed);
4665 	if (!priv->shared_host) {
4666 		if (priv->hws_ctpool)
4667 			mlx5_aso_push_wqe(priv->sh,
4668 					  &priv->ct_mng->aso_sqs[queue]);
4669 		if (priv->hws_mpool)
4670 			mlx5_aso_push_wqe(priv->sh,
4671 					  &priv->hws_mpool->sq[queue]);
4672 	}
4673 	return flow_hw_q_pending(priv, queue);
4674 }
4675 
4676 static int
4677 __flow_hw_push(struct rte_eth_dev *dev,
4678 	       uint32_t queue,
4679 	       struct rte_flow_error *error)
4680 {
4681 	struct mlx5_priv *priv = dev->data->dev_private;
4682 	int ret, num;
4683 
4684 	num = __flow_hw_push_action(dev, queue);
4685 	ret = mlx5dr_send_queue_action(priv->dr_ctx, queue,
4686 				       MLX5DR_SEND_QUEUE_ACTION_DRAIN_ASYNC);
4687 	if (ret) {
4688 		rte_flow_error_set(error, rte_errno,
4689 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4690 				   "fail to push flows");
4691 		return ret;
4692 	}
4693 	return num;
4694 }
4695 
4696 /**
4697  * Push the enqueued flows to HW.
4698  *
4699  * Force apply all the enqueued flows to the HW.
4700  *
4701  * @param[in] dev
4702  *   Pointer to the rte_eth_dev structure.
4703  * @param[in] queue
4704  *   The queue to push the flow.
4705  * @param[out] error
4706  *   Pointer to error structure.
4707  *
4708  * @return
4709  *    0 on success, negative value otherwise and rte_errno is set.
4710  */
4711 static int
4712 flow_hw_push(struct rte_eth_dev *dev,
4713 	     uint32_t queue, struct rte_flow_error *error)
4714 {
4715 	int ret = __flow_hw_push(dev, queue, error);
4716 
4717 	return ret >= 0 ? 0 : ret;
4718 }
4719 
4720 /**
4721  * Drain the enqueued flows' completion.
4722  *
4723  * @param[in] dev
4724  *   Pointer to the rte_eth_dev structure.
4725  * @param[in] queue
4726  *   The queue to pull the flow.
4727  * @param[out] error
4728  *   Pointer to error structure.
4729  *
4730  * @return
4731  *    0 on success, negative value otherwise and rte_errno is set.
4732  */
4733 static int
4734 __flow_hw_pull_comp(struct rte_eth_dev *dev,
4735 		    uint32_t queue, struct rte_flow_error *error)
4736 {
4737 	struct rte_flow_op_result comp[BURST_THR];
4738 	int ret, i, empty_loop = 0;
4739 	uint32_t pending_rules;
4740 
4741 	ret = __flow_hw_push(dev, queue, error);
4742 	if (ret < 0)
4743 		return ret;
4744 	pending_rules = ret;
4745 	while (pending_rules) {
4746 		ret = flow_hw_pull(dev, queue, comp, BURST_THR, error);
4747 		if (ret < 0)
4748 			return -1;
4749 		if (!ret) {
4750 			rte_delay_us_sleep(MLX5_ASO_WQE_CQE_RESPONSE_DELAY);
4751 			if (++empty_loop > 5) {
4752 				DRV_LOG(WARNING, "No available dequeue %u, quit.", pending_rules);
4753 				break;
4754 			}
4755 			continue;
4756 		}
4757 		for (i = 0; i < ret; i++) {
4758 			if (comp[i].status == RTE_FLOW_OP_ERROR)
4759 				DRV_LOG(WARNING, "Flow flush get error CQE.");
4760 		}
4761 		/*
4762 		 * Indirect **SYNC** METER_MARK and CT actions do not
4763 		 * remove completion after WQE post.
4764 		 * That implementation avoids HW timeout.
4765 		 * The completion is removed before the following WQE post.
4766 		 * However, HWS queue updates do not reflect that behaviour.
4767 		 * Therefore, during port destruction sync queue may have
4768 		 * pending completions.
4769 		 */
4770 		pending_rules -= RTE_MIN(pending_rules, (uint32_t)ret);
4771 		empty_loop = 0;
4772 	}
4773 	return 0;
4774 }
4775 
4776 /**
4777  * Flush created flows.
4778  *
4779  * @param[in] dev
4780  *   Pointer to the rte_eth_dev structure.
4781  * @param[out] error
4782  *   Pointer to error structure.
4783  *
4784  * @return
4785  *    0 on success, negative value otherwise and rte_errno is set.
4786  */
4787 int
4788 flow_hw_q_flow_flush(struct rte_eth_dev *dev,
4789 		     struct rte_flow_error *error)
4790 {
4791 	struct mlx5_priv *priv = dev->data->dev_private;
4792 	struct mlx5_hw_q *hw_q = &priv->hw_q[MLX5_DEFAULT_FLUSH_QUEUE];
4793 	struct rte_flow_template_table *tbl;
4794 	struct rte_flow_hw *flow;
4795 	struct rte_flow_op_attr attr = {
4796 		.postpone = 0,
4797 	};
4798 	uint32_t pending_rules = 0;
4799 	uint32_t queue;
4800 	uint32_t fidx;
4801 
4802 	/*
4803 	 * Ensure to push and dequeue all the enqueued flow
4804 	 * creation/destruction jobs in case user forgot to
4805 	 * dequeue. Or the enqueued created flows will be
4806 	 * leaked. The forgotten dequeues would also cause
4807 	 * flow flush get extra CQEs as expected and pending_rules
4808 	 * be minus value.
4809 	 */
4810 	for (queue = 0; queue < priv->nb_queue; queue++) {
4811 		if (__flow_hw_pull_comp(dev, queue, error))
4812 			return -1;
4813 	}
4814 	/* Flush flow per-table from MLX5_DEFAULT_FLUSH_QUEUE. */
4815 	LIST_FOREACH(tbl, &priv->flow_hw_tbl, next) {
4816 		if (!tbl->cfg.external)
4817 			continue;
4818 		MLX5_IPOOL_FOREACH(tbl->flow, fidx, flow) {
4819 			if (flow_hw_async_flow_destroy(dev,
4820 						MLX5_DEFAULT_FLUSH_QUEUE,
4821 						&attr,
4822 						(struct rte_flow *)flow,
4823 						NULL,
4824 						error))
4825 				return -1;
4826 			pending_rules++;
4827 			/* Drain completion with queue size. */
4828 			if (pending_rules >= hw_q->size) {
4829 				if (__flow_hw_pull_comp(dev,
4830 							MLX5_DEFAULT_FLUSH_QUEUE,
4831 							error))
4832 					return -1;
4833 				pending_rules = 0;
4834 			}
4835 		}
4836 	}
4837 	/* Drain left completion. */
4838 	if (pending_rules &&
4839 	    __flow_hw_pull_comp(dev, MLX5_DEFAULT_FLUSH_QUEUE, error))
4840 		return -1;
4841 	return 0;
4842 }
4843 
4844 static int
4845 mlx5_tbl_multi_pattern_process(struct rte_eth_dev *dev,
4846 			       struct rte_flow_template_table *tbl,
4847 			       struct mlx5_multi_pattern_segment *segment,
4848 			       uint32_t bulk_size,
4849 			       struct rte_flow_error *error)
4850 {
4851 	int ret = 0;
4852 	uint32_t i;
4853 	struct mlx5_priv *priv = dev->data->dev_private;
4854 	struct mlx5_tbl_multi_pattern_ctx *mpctx = &tbl->mpctx;
4855 	const struct rte_flow_template_table_attr *table_attr = &tbl->cfg.attr;
4856 	const struct rte_flow_attr *attr = &table_attr->flow_attr;
4857 	enum mlx5dr_table_type type = get_mlx5dr_table_type(attr);
4858 	uint32_t flags = mlx5_hw_act_flag[!!attr->group][type];
4859 	struct mlx5dr_action *dr_action = NULL;
4860 
4861 	for (i = 0; i < MLX5_MULTIPATTERN_ENCAP_NUM; i++) {
4862 		typeof(mpctx->reformat[0]) *reformat = mpctx->reformat + i;
4863 		enum mlx5dr_action_type reformat_type =
4864 			mlx5_multi_pattern_reformat_index_to_type(i);
4865 
4866 		if (!reformat->elements_num)
4867 			continue;
4868 		dr_action = reformat_type == MLX5DR_ACTION_TYP_INSERT_HEADER ?
4869 			mlx5dr_action_create_insert_header
4870 			(priv->dr_ctx, reformat->elements_num,
4871 			 reformat->insert_hdr, bulk_size, flags) :
4872 			mlx5dr_action_create_reformat
4873 			(priv->dr_ctx, reformat_type, reformat->elements_num,
4874 			 reformat->reformat_hdr, bulk_size, flags);
4875 		if (!dr_action) {
4876 			ret = rte_flow_error_set(error, rte_errno,
4877 						 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4878 						 NULL,
4879 						 "failed to create multi-pattern encap action");
4880 			goto error;
4881 		}
4882 		segment->reformat_action[i] = dr_action;
4883 	}
4884 	if (mpctx->mh.elements_num) {
4885 		typeof(mpctx->mh) *mh = &mpctx->mh;
4886 		dr_action = mlx5dr_action_create_modify_header
4887 			(priv->dr_ctx, mpctx->mh.elements_num, mh->pattern,
4888 			 bulk_size, flags);
4889 		if (!dr_action) {
4890 			ret = rte_flow_error_set(error, rte_errno,
4891 						  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4892 						  NULL, "failed to create multi-pattern header modify action");
4893 			goto error;
4894 		}
4895 		segment->mhdr_action = dr_action;
4896 	}
4897 	if (dr_action) {
4898 		segment->capacity = RTE_BIT32(bulk_size);
4899 		if (segment != &mpctx->segments[MLX5_MAX_TABLE_RESIZE_NUM - 1])
4900 			segment[1].head_index = segment->head_index + segment->capacity;
4901 	}
4902 	return 0;
4903 error:
4904 	mlx5_destroy_multi_pattern_segment(segment);
4905 	return ret;
4906 }
4907 
4908 static int
4909 mlx5_hw_build_template_table(struct rte_eth_dev *dev,
4910 			     uint8_t nb_action_templates,
4911 			     struct rte_flow_actions_template *action_templates[],
4912 			     struct mlx5dr_action_template *at[],
4913 			     struct rte_flow_template_table *tbl,
4914 			     struct rte_flow_error *error)
4915 {
4916 	int ret;
4917 	uint8_t i;
4918 
4919 	for (i = 0; i < nb_action_templates; i++) {
4920 		uint32_t refcnt = rte_atomic_fetch_add_explicit(&action_templates[i]->refcnt, 1,
4921 						     rte_memory_order_relaxed) + 1;
4922 
4923 		if (refcnt <= 1) {
4924 			rte_flow_error_set(error, EINVAL,
4925 					   RTE_FLOW_ERROR_TYPE_ACTION,
4926 					   &action_templates[i], "invalid AT refcount");
4927 			goto at_error;
4928 		}
4929 		at[i] = action_templates[i]->tmpl;
4930 		tbl->ats[i].action_template = action_templates[i];
4931 		LIST_INIT(&tbl->ats[i].acts.act_list);
4932 		/* do NOT translate table action if `dev` was not started */
4933 		if (!dev->data->dev_started)
4934 			continue;
4935 		ret = flow_hw_translate_actions_template(dev, &tbl->cfg,
4936 						  &tbl->ats[i].acts,
4937 						  action_templates[i],
4938 						  &tbl->mpctx, error);
4939 		if (ret) {
4940 			i++;
4941 			goto at_error;
4942 		}
4943 		flow_hw_populate_rule_acts_caches(dev, tbl, i);
4944 	}
4945 	tbl->nb_action_templates = nb_action_templates;
4946 	if (mlx5_is_multi_pattern_active(&tbl->mpctx)) {
4947 		ret = mlx5_tbl_multi_pattern_process(dev, tbl,
4948 						     &tbl->mpctx.segments[0],
4949 						     rte_log2_u32(tbl->cfg.attr.nb_flows),
4950 						     error);
4951 		if (ret)
4952 			goto at_error;
4953 	}
4954 	return 0;
4955 
4956 at_error:
4957 	while (i--) {
4958 		__flow_hw_action_template_destroy(dev, &tbl->ats[i].acts);
4959 		rte_atomic_fetch_sub_explicit(&action_templates[i]->refcnt,
4960 				   1, rte_memory_order_relaxed);
4961 	}
4962 	return rte_errno;
4963 }
4964 
4965 static bool
4966 flow_hw_validate_template_domain(const struct rte_flow_attr *table_attr,
4967 				 uint32_t ingress, uint32_t egress, uint32_t transfer)
4968 {
4969 	if (table_attr->ingress)
4970 		return ingress != 0;
4971 	else if (table_attr->egress)
4972 		return egress != 0;
4973 	else
4974 		return transfer;
4975 }
4976 
4977 static bool
4978 flow_hw_validate_table_domain(const struct rte_flow_attr *table_attr)
4979 {
4980 	return table_attr->ingress + table_attr->egress + table_attr->transfer
4981 		== 1;
4982 }
4983 
4984 /**
4985  * Create flow table.
4986  *
4987  * The input item and action templates will be binded to the table.
4988  * Flow memory will also be allocated. Matcher will be created based
4989  * on the item template. Action will be translated to the dedicated
4990  * DR action if possible.
4991  *
4992  * @param[in] dev
4993  *   Pointer to the rte_eth_dev structure.
4994  * @param[in] table_cfg
4995  *   Pointer to the table configuration.
4996  * @param[in] item_templates
4997  *   Item template array to be binded to the table.
4998  * @param[in] nb_item_templates
4999  *   Number of item template.
5000  * @param[in] action_templates
5001  *   Action template array to be binded to the table.
5002  * @param[in] nb_action_templates
5003  *   Number of action template.
5004  * @param[out] error
5005  *   Pointer to error structure.
5006  *
5007  * @return
5008  *    Table on success, NULL otherwise and rte_errno is set.
5009  */
5010 static struct rte_flow_template_table *
5011 flow_hw_table_create(struct rte_eth_dev *dev,
5012 		     const struct mlx5_flow_template_table_cfg *table_cfg,
5013 		     struct rte_flow_pattern_template *item_templates[],
5014 		     uint8_t nb_item_templates,
5015 		     struct rte_flow_actions_template *action_templates[],
5016 		     uint8_t nb_action_templates,
5017 		     struct rte_flow_error *error)
5018 {
5019 	struct rte_flow_error sub_error = {
5020 		.type = RTE_FLOW_ERROR_TYPE_NONE,
5021 		.cause = NULL,
5022 		.message = NULL,
5023 	};
5024 	struct mlx5_priv *priv = dev->data->dev_private;
5025 	struct mlx5dr_matcher_attr matcher_attr = {0};
5026 	struct mlx5dr_action_jump_to_matcher_attr jump_attr = {
5027 		.type = MLX5DR_ACTION_JUMP_TO_MATCHER_BY_INDEX,
5028 		.matcher = NULL,
5029 	};
5030 	struct rte_flow_template_table *tbl = NULL;
5031 	struct mlx5_flow_group *grp;
5032 	struct mlx5dr_match_template *mt[MLX5_HW_TBL_MAX_ITEM_TEMPLATE];
5033 	struct mlx5dr_action_template *at[MLX5_HW_TBL_MAX_ACTION_TEMPLATE];
5034 	const struct rte_flow_template_table_attr *attr = &table_cfg->attr;
5035 	struct rte_flow_attr flow_attr = attr->flow_attr;
5036 	struct mlx5_flow_cb_ctx ctx = {
5037 		.dev = dev,
5038 		.error = &sub_error,
5039 		.data = &flow_attr,
5040 	};
5041 	struct mlx5_indexed_pool_config cfg = {
5042 		.trunk_size = 1 << 12,
5043 		.per_core_cache = 1 << 13,
5044 		.need_lock = 1,
5045 		.release_mem_en = !!priv->sh->config.reclaim_mode,
5046 		.malloc = mlx5_malloc,
5047 		.free = mlx5_free,
5048 		.type = "mlx5_hw_table_flow",
5049 	};
5050 	struct mlx5_list_entry *ge;
5051 	uint32_t i = 0, max_tpl = MLX5_HW_TBL_MAX_ITEM_TEMPLATE;
5052 	uint32_t nb_flows = rte_align32pow2(attr->nb_flows);
5053 	bool port_started = !!dev->data->dev_started;
5054 	bool rpool_needed;
5055 	size_t tbl_mem_size;
5056 	int err;
5057 
5058 	if (!flow_hw_validate_table_domain(&attr->flow_attr)) {
5059 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR,
5060 				   NULL, "invalid table domain attributes");
5061 		return NULL;
5062 	}
5063 	for (i = 0; i < nb_item_templates; i++) {
5064 		const struct rte_flow_pattern_template_attr *pt_attr =
5065 			&item_templates[i]->attr;
5066 		bool match = flow_hw_validate_template_domain(&attr->flow_attr,
5067 							      pt_attr->ingress,
5068 							      pt_attr->egress,
5069 							      pt_attr->transfer);
5070 		if (!match) {
5071 			rte_flow_error_set(error, EINVAL,
5072 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5073 					   NULL, "pattern template domain does not match table");
5074 			return NULL;
5075 		}
5076 	}
5077 	for (i = 0; i < nb_action_templates; i++) {
5078 		const struct rte_flow_actions_template *at = action_templates[i];
5079 		bool match = flow_hw_validate_template_domain(&attr->flow_attr,
5080 							      at->attr.ingress,
5081 							      at->attr.egress,
5082 							      at->attr.transfer);
5083 		if (!match) {
5084 			rte_flow_error_set(error, EINVAL,
5085 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5086 					   NULL, "action template domain does not match table");
5087 			return NULL;
5088 		}
5089 	}
5090 	/* HWS layer accepts only 1 item template with root table. */
5091 	if (!attr->flow_attr.group)
5092 		max_tpl = 1;
5093 	cfg.max_idx = nb_flows;
5094 	cfg.size = !rte_flow_template_table_resizable(dev->data->port_id, attr) ?
5095 		   mlx5_flow_hw_entry_size() :
5096 		   mlx5_flow_hw_auxed_entry_size();
5097 	/* For table has very limited flows, disable cache. */
5098 	if (nb_flows < cfg.trunk_size) {
5099 		cfg.per_core_cache = 0;
5100 		cfg.trunk_size = nb_flows;
5101 	} else if (nb_flows <= MLX5_HW_IPOOL_SIZE_THRESHOLD) {
5102 		cfg.per_core_cache = MLX5_HW_IPOOL_CACHE_MIN;
5103 	}
5104 	/* Check if we requires too many templates. */
5105 	if (nb_item_templates > max_tpl ||
5106 	    nb_action_templates > MLX5_HW_TBL_MAX_ACTION_TEMPLATE) {
5107 		rte_errno = EINVAL;
5108 		goto error;
5109 	}
5110 	/*
5111 	 * Amount of memory required for rte_flow_template_table struct:
5112 	 * - Size of the struct itself.
5113 	 * - VLA of DR rule action containers at the end =
5114 	 *     number of actions templates * number of queues * size of DR rule actions container.
5115 	 */
5116 	tbl_mem_size = sizeof(*tbl);
5117 	tbl_mem_size += nb_action_templates * priv->nb_queue * sizeof(tbl->rule_acts[0]);
5118 	/* Allocate the table memory. */
5119 	tbl = mlx5_malloc(MLX5_MEM_ZERO, tbl_mem_size, RTE_CACHE_LINE_SIZE, rte_socket_id());
5120 	if (!tbl)
5121 		goto error;
5122 	tbl->cfg = *table_cfg;
5123 	/* Allocate flow indexed pool. */
5124 	tbl->flow = mlx5_ipool_create(&cfg);
5125 	if (!tbl->flow)
5126 		goto error;
5127 	/* Allocate table of auxiliary flow rule structs. */
5128 	tbl->flow_aux = mlx5_malloc(MLX5_MEM_ZERO, sizeof(struct rte_flow_hw_aux) * nb_flows,
5129 				    RTE_CACHE_LINE_SIZE, rte_dev_numa_node(dev->device));
5130 	if (!tbl->flow_aux)
5131 		goto error;
5132 	/* Register the flow group. */
5133 	ge = mlx5_hlist_register(priv->sh->groups, attr->flow_attr.group, &ctx);
5134 	if (!ge)
5135 		goto error;
5136 	grp = container_of(ge, struct mlx5_flow_group, entry);
5137 	tbl->grp = grp;
5138 	/* Prepare matcher information. */
5139 	matcher_attr.resizable = !!rte_flow_template_table_resizable
5140 					(dev->data->port_id, &table_cfg->attr);
5141 	matcher_attr.optimize_flow_src = MLX5DR_MATCHER_FLOW_SRC_ANY;
5142 	matcher_attr.priority = attr->flow_attr.priority;
5143 	matcher_attr.optimize_using_rule_idx = true;
5144 	matcher_attr.mode = MLX5DR_MATCHER_RESOURCE_MODE_RULE;
5145 	matcher_attr.insert_mode = flow_hw_matcher_insert_mode_get(attr->insertion_type);
5146 	if (matcher_attr.insert_mode == MLX5DR_MATCHER_INSERT_BY_INDEX) {
5147 		if (attr->insertion_type == RTE_FLOW_TABLE_INSERTION_TYPE_INDEX_WITH_PATTERN) {
5148 			matcher_attr.isolated = true;
5149 			matcher_attr.match_mode = MLX5DR_MATCHER_MATCH_MODE_DEFAULT;
5150 		} else {
5151 			matcher_attr.isolated = false;
5152 			matcher_attr.match_mode = MLX5DR_MATCHER_MATCH_MODE_ALWAYS_HIT;
5153 		}
5154 	}
5155 	if (attr->hash_func == RTE_FLOW_TABLE_HASH_FUNC_CRC16) {
5156 		DRV_LOG(ERR, "16-bit checksum hash type is not supported");
5157 		rte_errno = ENOTSUP;
5158 		goto it_error;
5159 	}
5160 	matcher_attr.distribute_mode = flow_hw_matcher_distribute_mode_get(attr->hash_func);
5161 	matcher_attr.rule.num_log = rte_log2_u32(nb_flows);
5162 	/* Parse hints information. */
5163 	if (attr->specialize) {
5164 		uint32_t val = RTE_FLOW_TABLE_SPECIALIZE_TRANSFER_WIRE_ORIG |
5165 			       RTE_FLOW_TABLE_SPECIALIZE_TRANSFER_VPORT_ORIG;
5166 
5167 		if ((attr->specialize & val) == val) {
5168 			DRV_LOG(ERR, "Invalid hint value %x",
5169 				attr->specialize);
5170 			rte_errno = EINVAL;
5171 			goto it_error;
5172 		}
5173 		if (attr->specialize &
5174 		    RTE_FLOW_TABLE_SPECIALIZE_TRANSFER_WIRE_ORIG)
5175 			matcher_attr.optimize_flow_src =
5176 				MLX5DR_MATCHER_FLOW_SRC_WIRE;
5177 		else if (attr->specialize &
5178 			 RTE_FLOW_TABLE_SPECIALIZE_TRANSFER_VPORT_ORIG)
5179 			matcher_attr.optimize_flow_src =
5180 				MLX5DR_MATCHER_FLOW_SRC_VPORT;
5181 	}
5182 	/* Build the item template. */
5183 	for (i = 0; i < nb_item_templates; i++) {
5184 		uint32_t ret;
5185 
5186 		if ((flow_attr.ingress && !item_templates[i]->attr.ingress) ||
5187 		    (flow_attr.egress && !item_templates[i]->attr.egress) ||
5188 		    (flow_attr.transfer && !item_templates[i]->attr.transfer)) {
5189 			DRV_LOG(ERR, "pattern template and template table attribute mismatch");
5190 			rte_errno = EINVAL;
5191 			goto it_error;
5192 		}
5193 		if (item_templates[i]->item_flags & MLX5_FLOW_ITEM_COMPARE)
5194 			matcher_attr.mode = MLX5DR_MATCHER_RESOURCE_MODE_HTABLE;
5195 		ret = rte_atomic_fetch_add_explicit(&item_templates[i]->refcnt, 1,
5196 					 rte_memory_order_relaxed) + 1;
5197 		if (ret <= 1) {
5198 			rte_errno = EINVAL;
5199 			goto it_error;
5200 		}
5201 		mt[i] = item_templates[i]->mt;
5202 		tbl->its[i] = item_templates[i];
5203 	}
5204 	tbl->nb_item_templates = nb_item_templates;
5205 	/* Build the action template. */
5206 	err = mlx5_hw_build_template_table(dev, nb_action_templates,
5207 					   action_templates, at, tbl, &sub_error);
5208 	if (err) {
5209 		i = nb_item_templates;
5210 		goto it_error;
5211 	}
5212 	tbl->matcher_info[0].matcher = mlx5dr_matcher_create
5213 		(tbl->grp->tbl, mt, nb_item_templates, at, nb_action_templates, &matcher_attr);
5214 	if (!tbl->matcher_info[0].matcher)
5215 		goto at_error;
5216 	tbl->matcher_attr = matcher_attr;
5217 	tbl->type = attr->flow_attr.transfer ? MLX5DR_TABLE_TYPE_FDB :
5218 		    (attr->flow_attr.egress ? MLX5DR_TABLE_TYPE_NIC_TX :
5219 		    MLX5DR_TABLE_TYPE_NIC_RX);
5220 	if (matcher_attr.isolated) {
5221 		jump_attr.matcher = tbl->matcher_info[0].matcher;
5222 		tbl->matcher_info[0].jump = mlx5dr_action_create_jump_to_matcher(priv->dr_ctx,
5223 				&jump_attr, mlx5_hw_act_flag[!!attr->flow_attr.group][tbl->type]);
5224 		if (!tbl->matcher_info[0].jump)
5225 			goto jtm_error;
5226 	}
5227 	/*
5228 	 * Only the matcher supports update and needs more than 1 WQE, an additional
5229 	 * index is needed. Or else the flow index can be reused.
5230 	 */
5231 	rpool_needed = mlx5dr_matcher_is_updatable(tbl->matcher_info[0].matcher) &&
5232 		       mlx5dr_matcher_is_dependent(tbl->matcher_info[0].matcher);
5233 	if (rpool_needed) {
5234 		/* Allocate rule indexed pool. */
5235 		cfg.size = 0;
5236 		cfg.type = "mlx5_hw_table_rule";
5237 		cfg.max_idx += priv->hw_q[0].size;
5238 		tbl->resource = mlx5_ipool_create(&cfg);
5239 		if (!tbl->resource)
5240 			goto res_error;
5241 	}
5242 	if (port_started)
5243 		LIST_INSERT_HEAD(&priv->flow_hw_tbl, tbl, next);
5244 	else
5245 		LIST_INSERT_HEAD(&priv->flow_hw_tbl_ongo, tbl, next);
5246 	rte_rwlock_init(&tbl->matcher_replace_rwlk);
5247 	return tbl;
5248 res_error:
5249 	if (tbl->matcher_info[0].jump)
5250 		mlx5dr_action_destroy(tbl->matcher_info[0].jump);
5251 jtm_error:
5252 	if (tbl->matcher_info[0].matcher)
5253 		(void)mlx5dr_matcher_destroy(tbl->matcher_info[0].matcher);
5254 at_error:
5255 	for (i = 0; i < nb_action_templates; i++) {
5256 		__flow_hw_action_template_destroy(dev, &tbl->ats[i].acts);
5257 		rte_atomic_fetch_sub_explicit(&action_templates[i]->refcnt,
5258 				   1, rte_memory_order_relaxed);
5259 	}
5260 	i = nb_item_templates;
5261 it_error:
5262 	while (i--)
5263 		rte_atomic_fetch_sub_explicit(&item_templates[i]->refcnt,
5264 				   1, rte_memory_order_relaxed);
5265 error:
5266 	err = rte_errno;
5267 	if (tbl) {
5268 		if (tbl->grp)
5269 			mlx5_hlist_unregister(priv->sh->groups,
5270 					      &tbl->grp->entry);
5271 		if (tbl->flow_aux)
5272 			mlx5_free(tbl->flow_aux);
5273 		if (tbl->flow)
5274 			mlx5_ipool_destroy(tbl->flow);
5275 		mlx5_free(tbl);
5276 	}
5277 	if (error != NULL) {
5278 		if (sub_error.type == RTE_FLOW_ERROR_TYPE_NONE)
5279 			rte_flow_error_set(error, err, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5280 					   "Failed to create template table");
5281 		else
5282 			rte_memcpy(error, &sub_error, sizeof(sub_error));
5283 	}
5284 	return NULL;
5285 }
5286 
5287 /**
5288  * Update flow template table.
5289  *
5290  * @param[in] dev
5291  *   Pointer to the rte_eth_dev structure.
5292  * @param[out] error
5293  *   Pointer to error structure.
5294  *
5295  * @return
5296  *    0 on success, negative value otherwise and rte_errno is set.
5297  */
5298 int
5299 flow_hw_table_update(struct rte_eth_dev *dev,
5300 		     struct rte_flow_error *error)
5301 {
5302 	struct mlx5_priv *priv = dev->data->dev_private;
5303 	struct rte_flow_template_table *tbl;
5304 
5305 	while ((tbl = LIST_FIRST(&priv->flow_hw_tbl_ongo)) != NULL) {
5306 		if (flow_hw_translate_all_actions_templates(dev, tbl, error))
5307 			return -1;
5308 		LIST_REMOVE(tbl, next);
5309 		LIST_INSERT_HEAD(&priv->flow_hw_tbl, tbl, next);
5310 	}
5311 	return 0;
5312 }
5313 
5314 static inline int
5315 __translate_group(struct rte_eth_dev *dev,
5316 			const struct rte_flow_attr *flow_attr,
5317 			bool external,
5318 			uint32_t group,
5319 			uint32_t *table_group,
5320 			struct rte_flow_error *error)
5321 {
5322 	struct mlx5_priv *priv = dev->data->dev_private;
5323 	struct mlx5_sh_config *config = &priv->sh->config;
5324 
5325 	if (config->dv_esw_en &&
5326 	    priv->fdb_def_rule &&
5327 	    external &&
5328 	    flow_attr->transfer) {
5329 		if (group > MLX5_HW_MAX_TRANSFER_GROUP)
5330 			return rte_flow_error_set(error, EINVAL,
5331 						  RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
5332 						  NULL,
5333 						  "group index not supported");
5334 		*table_group = group + 1;
5335 	} else if (config->dv_esw_en &&
5336 		   (config->repr_matching || config->dv_xmeta_en == MLX5_XMETA_MODE_META32_HWS) &&
5337 		   external &&
5338 		   flow_attr->egress) {
5339 		/*
5340 		 * On E-Switch setups, default egress flow rules are inserted to allow
5341 		 * representor matching and/or preserving metadata across steering domains.
5342 		 * These flow rules are inserted in group 0 and this group is reserved by PMD
5343 		 * for these purposes.
5344 		 *
5345 		 * As a result, if representor matching or extended metadata mode is enabled,
5346 		 * group provided by the user must be incremented to avoid inserting flow rules
5347 		 * in group 0.
5348 		 */
5349 		if (group > MLX5_HW_MAX_EGRESS_GROUP)
5350 			return rte_flow_error_set(error, EINVAL,
5351 						  RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
5352 						  NULL,
5353 						  "group index not supported");
5354 		*table_group = group + 1;
5355 	} else {
5356 		*table_group = group;
5357 	}
5358 	return 0;
5359 }
5360 
5361 /**
5362  * Translates group index specified by the user in @p attr to internal
5363  * group index.
5364  *
5365  * Translation is done by incrementing group index, so group n becomes n + 1.
5366  *
5367  * @param[in] dev
5368  *   Pointer to Ethernet device.
5369  * @param[in] cfg
5370  *   Pointer to the template table configuration.
5371  * @param[in] group
5372  *   Currently used group index (table group or jump destination).
5373  * @param[out] table_group
5374  *   Pointer to output group index.
5375  * @param[out] error
5376  *   Pointer to error structure.
5377  *
5378  * @return
5379  *   0 on success. Otherwise, returns negative error code, rte_errno is set
5380  *   and error structure is filled.
5381  */
5382 static int
5383 flow_hw_translate_group(struct rte_eth_dev *dev,
5384 			const struct mlx5_flow_template_table_cfg *cfg,
5385 			uint32_t group,
5386 			uint32_t *table_group,
5387 			struct rte_flow_error *error)
5388 {
5389 	const struct rte_flow_attr *flow_attr = &cfg->attr.flow_attr;
5390 
5391 	return __translate_group(dev, flow_attr, cfg->external, group, table_group, error);
5392 }
5393 
5394 /**
5395  * Create flow table.
5396  *
5397  * This function is a wrapper over @ref flow_hw_table_create(), which translates parameters
5398  * provided by user to proper internal values.
5399  *
5400  * @param[in] dev
5401  *   Pointer to Ethernet device.
5402  * @param[in] attr
5403  *   Pointer to the table attributes.
5404  * @param[in] item_templates
5405  *   Item template array to be binded to the table.
5406  * @param[in] nb_item_templates
5407  *   Number of item templates.
5408  * @param[in] action_templates
5409  *   Action template array to be binded to the table.
5410  * @param[in] nb_action_templates
5411  *   Number of action templates.
5412  * @param[out] error
5413  *   Pointer to error structure.
5414  *
5415  * @return
5416  *   Table on success, Otherwise, returns negative error code, rte_errno is set
5417  *   and error structure is filled.
5418  */
5419 static struct rte_flow_template_table *
5420 flow_hw_template_table_create(struct rte_eth_dev *dev,
5421 			      const struct rte_flow_template_table_attr *attr,
5422 			      struct rte_flow_pattern_template *item_templates[],
5423 			      uint8_t nb_item_templates,
5424 			      struct rte_flow_actions_template *action_templates[],
5425 			      uint8_t nb_action_templates,
5426 			      struct rte_flow_error *error)
5427 {
5428 	struct mlx5_flow_template_table_cfg cfg = {
5429 		.attr = *attr,
5430 		.external = true,
5431 	};
5432 	uint32_t group = attr->flow_attr.group;
5433 
5434 	if (flow_hw_translate_group(dev, &cfg, group, &cfg.attr.flow_attr.group, error))
5435 		return NULL;
5436 	if (!cfg.attr.flow_attr.group &&
5437 	    rte_flow_template_table_resizable(dev->data->port_id, attr)) {
5438 		rte_flow_error_set(error, EINVAL,
5439 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5440 				   "table cannot be resized: invalid group");
5441 		return NULL;
5442 	}
5443 	return flow_hw_table_create(dev, &cfg, item_templates, nb_item_templates,
5444 				    action_templates, nb_action_templates, error);
5445 }
5446 
5447 static void
5448 mlx5_destroy_multi_pattern_segment(struct mlx5_multi_pattern_segment *segment)
5449 {
5450 	int i;
5451 
5452 	if (segment->mhdr_action)
5453 		mlx5dr_action_destroy(segment->mhdr_action);
5454 	for (i = 0; i < MLX5_MULTIPATTERN_ENCAP_NUM; i++) {
5455 		if (segment->reformat_action[i])
5456 			mlx5dr_action_destroy(segment->reformat_action[i]);
5457 	}
5458 	segment->capacity = 0;
5459 }
5460 
5461 static void
5462 flow_hw_destroy_table_multi_pattern_ctx(struct rte_flow_template_table *table)
5463 {
5464 	int sx;
5465 
5466 	for (sx = 0; sx < MLX5_MAX_TABLE_RESIZE_NUM; sx++)
5467 		mlx5_destroy_multi_pattern_segment(table->mpctx.segments + sx);
5468 }
5469 /**
5470  * Destroy flow table.
5471  *
5472  * @param[in] dev
5473  *   Pointer to the rte_eth_dev structure.
5474  * @param[in] table
5475  *   Pointer to the table to be destroyed.
5476  * @param[out] error
5477  *   Pointer to error structure.
5478  *
5479  * @return
5480  *   0 on success, a negative errno value otherwise and rte_errno is set.
5481  */
5482 static int
5483 flow_hw_table_destroy(struct rte_eth_dev *dev,
5484 		      struct rte_flow_template_table *table,
5485 		      struct rte_flow_error *error)
5486 {
5487 	struct mlx5_priv *priv = dev->data->dev_private;
5488 	int i;
5489 	uint32_t fidx = 1;
5490 	uint32_t ridx = 1;
5491 
5492 	/* Build ipool allocated object bitmap. */
5493 	if (table->resource)
5494 		mlx5_ipool_flush_cache(table->resource);
5495 	mlx5_ipool_flush_cache(table->flow);
5496 	/* Check if ipool has allocated objects. */
5497 	if (table->refcnt ||
5498 	    mlx5_ipool_get_next(table->flow, &fidx) ||
5499 	    (table->resource && mlx5_ipool_get_next(table->resource, &ridx))) {
5500 		DRV_LOG(WARNING, "Table %p is still in use.", (void *)table);
5501 		return rte_flow_error_set(error, EBUSY,
5502 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5503 				   NULL,
5504 				   "table is in use");
5505 	}
5506 	LIST_REMOVE(table, next);
5507 	for (i = 0; i < table->nb_item_templates; i++)
5508 		rte_atomic_fetch_sub_explicit(&table->its[i]->refcnt,
5509 				   1, rte_memory_order_relaxed);
5510 	for (i = 0; i < table->nb_action_templates; i++) {
5511 		__flow_hw_action_template_destroy(dev, &table->ats[i].acts);
5512 		rte_atomic_fetch_sub_explicit(&table->ats[i].action_template->refcnt,
5513 				   1, rte_memory_order_relaxed);
5514 	}
5515 	flow_hw_destroy_table_multi_pattern_ctx(table);
5516 	if (table->matcher_info[0].jump)
5517 		mlx5dr_action_destroy(table->matcher_info[0].jump);
5518 	if (table->matcher_info[0].matcher)
5519 		mlx5dr_matcher_destroy(table->matcher_info[0].matcher);
5520 	if (table->matcher_info[1].jump)
5521 		mlx5dr_action_destroy(table->matcher_info[1].jump);
5522 	if (table->matcher_info[1].matcher)
5523 		mlx5dr_matcher_destroy(table->matcher_info[1].matcher);
5524 	mlx5_hlist_unregister(priv->sh->groups, &table->grp->entry);
5525 	if (table->resource)
5526 		mlx5_ipool_destroy(table->resource);
5527 	mlx5_free(table->flow_aux);
5528 	mlx5_ipool_destroy(table->flow);
5529 	mlx5_free(table);
5530 	return 0;
5531 }
5532 
5533 /**
5534  * Parse group's miss actions.
5535  *
5536  * @param[in] dev
5537  *   Pointer to the rte_eth_dev structure.
5538  * @param[in] cfg
5539  *   Pointer to the table_cfg structure.
5540  * @param[in] actions
5541  *   Array of actions to perform on group miss. Supported types:
5542  *   RTE_FLOW_ACTION_TYPE_JUMP, RTE_FLOW_ACTION_TYPE_VOID, RTE_FLOW_ACTION_TYPE_END.
5543  * @param[out] dst_group_id
5544  *   Pointer to destination group id output. will be set to 0 if actions is END,
5545  *   otherwise will be set to destination group id.
5546  * @param[out] error
5547  *   Pointer to error structure.
5548  *
5549  * @return
5550  *   0 on success, a negative errno value otherwise and rte_errno is set.
5551  */
5552 
5553 static int
5554 flow_hw_group_parse_miss_actions(struct rte_eth_dev *dev,
5555 				 struct mlx5_flow_template_table_cfg *cfg,
5556 				 const struct rte_flow_action actions[],
5557 				 uint32_t *dst_group_id,
5558 				 struct rte_flow_error *error)
5559 {
5560 	const struct rte_flow_action_jump *jump_conf;
5561 	uint32_t temp = 0;
5562 	uint32_t i;
5563 
5564 	for (i = 0; actions[i].type != RTE_FLOW_ACTION_TYPE_END; i++) {
5565 		switch (actions[i].type) {
5566 		case RTE_FLOW_ACTION_TYPE_VOID:
5567 			continue;
5568 		case RTE_FLOW_ACTION_TYPE_JUMP:
5569 			if (temp)
5570 				return rte_flow_error_set(error, ENOTSUP,
5571 							  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, actions,
5572 							  "Miss actions can contain only a single JUMP");
5573 
5574 			jump_conf = (const struct rte_flow_action_jump *)actions[i].conf;
5575 			if (!jump_conf)
5576 				return rte_flow_error_set(error, EINVAL,
5577 							  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5578 							  jump_conf, "Jump conf must not be NULL");
5579 
5580 			if (flow_hw_translate_group(dev, cfg, jump_conf->group, &temp, error))
5581 				return -rte_errno;
5582 
5583 			if (!temp)
5584 				return rte_flow_error_set(error, EINVAL,
5585 							  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5586 							  "Failed to set group miss actions - Invalid target group");
5587 			break;
5588 		default:
5589 			return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
5590 						  &actions[i], "Unsupported default miss action type");
5591 		}
5592 	}
5593 
5594 	*dst_group_id = temp;
5595 	return 0;
5596 }
5597 
5598 /**
5599  * Set group's miss group.
5600  *
5601  * @param[in] dev
5602  *   Pointer to the rte_eth_dev structure.
5603  * @param[in] cfg
5604  *   Pointer to the table_cfg structure.
5605  * @param[in] src_grp
5606  *   Pointer to source group structure.
5607  *   if NULL, a new group will be created based on group id from cfg->attr.flow_attr.group.
5608  * @param[in] dst_grp
5609  *   Pointer to destination group structure.
5610  * @param[out] error
5611  *   Pointer to error structure.
5612  *
5613  * @return
5614  *   0 on success, a negative errno value otherwise and rte_errno is set.
5615  */
5616 
5617 static int
5618 flow_hw_group_set_miss_group(struct rte_eth_dev *dev,
5619 			     struct mlx5_flow_template_table_cfg *cfg,
5620 			     struct mlx5_flow_group *src_grp,
5621 			     struct mlx5_flow_group *dst_grp,
5622 			     struct rte_flow_error *error)
5623 {
5624 	struct rte_flow_error sub_error = {
5625 		.type = RTE_FLOW_ERROR_TYPE_NONE,
5626 		.cause = NULL,
5627 		.message = NULL,
5628 	};
5629 	struct mlx5_flow_cb_ctx ctx = {
5630 		.dev = dev,
5631 		.error = &sub_error,
5632 		.data = &cfg->attr.flow_attr,
5633 	};
5634 	struct mlx5_priv *priv = dev->data->dev_private;
5635 	struct mlx5_list_entry *ge;
5636 	bool ref = false;
5637 	int ret;
5638 
5639 	if (!dst_grp)
5640 		return -EINVAL;
5641 
5642 	/* If group doesn't exist - needs to be created. */
5643 	if (!src_grp) {
5644 		ge = mlx5_hlist_register(priv->sh->groups, cfg->attr.flow_attr.group, &ctx);
5645 		if (!ge)
5646 			return -rte_errno;
5647 
5648 		src_grp = container_of(ge, struct mlx5_flow_group, entry);
5649 		LIST_INSERT_HEAD(&priv->flow_hw_grp, src_grp, next);
5650 		ref = true;
5651 	} else if (!src_grp->miss_group) {
5652 		/* If group exists, but has no miss actions - need to increase ref_cnt. */
5653 		LIST_INSERT_HEAD(&priv->flow_hw_grp, src_grp, next);
5654 		src_grp->entry.ref_cnt++;
5655 		ref = true;
5656 	}
5657 
5658 	ret = mlx5dr_table_set_default_miss(src_grp->tbl, dst_grp->tbl);
5659 	if (ret)
5660 		goto mlx5dr_error;
5661 
5662 	/* If group existed and had old miss actions - ref_cnt is already correct.
5663 	 * However, need to reduce ref counter for old miss group.
5664 	 */
5665 	if (src_grp->miss_group)
5666 		mlx5_hlist_unregister(priv->sh->groups, &src_grp->miss_group->entry);
5667 
5668 	src_grp->miss_group = dst_grp;
5669 	return 0;
5670 
5671 mlx5dr_error:
5672 	/* Reduce src_grp ref_cnt back & remove from grp list in case of mlx5dr error */
5673 	if (ref) {
5674 		mlx5_hlist_unregister(priv->sh->groups, &src_grp->entry);
5675 		LIST_REMOVE(src_grp, next);
5676 	}
5677 
5678 	return rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5679 				  "Failed to set group miss actions");
5680 }
5681 
5682 /**
5683  * Unset group's miss group.
5684  *
5685  * @param[in] dev
5686  *   Pointer to the rte_eth_dev structure.
5687  * @param[in] grp
5688  *   Pointer to group structure.
5689  * @param[out] error
5690  *   Pointer to error structure.
5691  *
5692  * @return
5693  *   0 on success, a negative errno value otherwise and rte_errno is set.
5694  */
5695 
5696 static int
5697 flow_hw_group_unset_miss_group(struct rte_eth_dev *dev,
5698 			       struct mlx5_flow_group *grp,
5699 			       struct rte_flow_error *error)
5700 {
5701 	struct mlx5_priv *priv = dev->data->dev_private;
5702 	int ret;
5703 
5704 	/* If group doesn't exist - no need to change anything. */
5705 	if (!grp)
5706 		return 0;
5707 
5708 	/* If group exists, but miss actions is already default behavior -
5709 	 * no need to change anything.
5710 	 */
5711 	if (!grp->miss_group)
5712 		return 0;
5713 
5714 	ret = mlx5dr_table_set_default_miss(grp->tbl, NULL);
5715 	if (ret)
5716 		return rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5717 					  "Failed to unset group miss actions");
5718 
5719 	mlx5_hlist_unregister(priv->sh->groups, &grp->miss_group->entry);
5720 	grp->miss_group = NULL;
5721 
5722 	LIST_REMOVE(grp, next);
5723 	mlx5_hlist_unregister(priv->sh->groups, &grp->entry);
5724 
5725 	return 0;
5726 }
5727 
5728 /**
5729  * Set group miss actions.
5730  *
5731  * @param[in] dev
5732  *   Pointer to the rte_eth_dev structure.
5733  * @param[in] group_id
5734  *   Group id.
5735  * @param[in] attr
5736  *   Pointer to group attributes structure.
5737  * @param[in] actions
5738  *   Array of actions to perform on group miss. Supported types:
5739  *   RTE_FLOW_ACTION_TYPE_JUMP, RTE_FLOW_ACTION_TYPE_VOID, RTE_FLOW_ACTION_TYPE_END.
5740  * @param[out] error
5741  *   Pointer to error structure.
5742  *
5743  * @return
5744  *   0 on success, a negative errno value otherwise and rte_errno is set.
5745  */
5746 
5747 static int
5748 flow_hw_group_set_miss_actions(struct rte_eth_dev *dev,
5749 			       uint32_t group_id,
5750 			       const struct rte_flow_group_attr *attr,
5751 			       const struct rte_flow_action actions[],
5752 			       struct rte_flow_error *error)
5753 {
5754 	struct rte_flow_error sub_error = {
5755 		.type = RTE_FLOW_ERROR_TYPE_NONE,
5756 		.cause = NULL,
5757 		.message = NULL,
5758 	};
5759 	struct mlx5_flow_template_table_cfg cfg = {
5760 		.external = true,
5761 		.attr = {
5762 			.flow_attr = {
5763 				.group = group_id,
5764 				.ingress = attr->ingress,
5765 				.egress = attr->egress,
5766 				.transfer = attr->transfer,
5767 			},
5768 		},
5769 	};
5770 	struct mlx5_flow_cb_ctx ctx = {
5771 		.dev = dev,
5772 		.error = &sub_error,
5773 		.data = &cfg.attr.flow_attr,
5774 	};
5775 	struct mlx5_priv *priv = dev->data->dev_private;
5776 	struct mlx5_flow_group *src_grp = NULL;
5777 	struct mlx5_flow_group *dst_grp = NULL;
5778 	struct mlx5_list_entry *ge;
5779 	uint32_t dst_group_id = 0;
5780 	int ret;
5781 
5782 	if (flow_hw_translate_group(dev, &cfg, group_id, &group_id, error))
5783 		return -rte_errno;
5784 
5785 	if (!group_id)
5786 		return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5787 					  NULL, "Failed to set group miss actions - invalid group id");
5788 
5789 	ret = flow_hw_group_parse_miss_actions(dev, &cfg, actions, &dst_group_id, error);
5790 	if (ret)
5791 		return -rte_errno;
5792 
5793 	if (dst_group_id == group_id) {
5794 		return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5795 					  NULL, "Failed to set group miss actions - target group id must differ from group_id");
5796 	}
5797 
5798 	cfg.attr.flow_attr.group = group_id;
5799 	ge = mlx5_hlist_lookup(priv->sh->groups, group_id, &ctx);
5800 	if (ge)
5801 		src_grp = container_of(ge, struct mlx5_flow_group, entry);
5802 
5803 	if (dst_group_id) {
5804 		/* Increase ref_cnt for new miss group. */
5805 		cfg.attr.flow_attr.group = dst_group_id;
5806 		ge = mlx5_hlist_register(priv->sh->groups, dst_group_id, &ctx);
5807 		if (!ge)
5808 			return -rte_errno;
5809 
5810 		dst_grp = container_of(ge, struct mlx5_flow_group, entry);
5811 
5812 		cfg.attr.flow_attr.group = group_id;
5813 		ret = flow_hw_group_set_miss_group(dev, &cfg, src_grp, dst_grp, error);
5814 		if (ret)
5815 			goto error;
5816 	} else {
5817 		return flow_hw_group_unset_miss_group(dev, src_grp, error);
5818 	}
5819 
5820 	return 0;
5821 
5822 error:
5823 	if (dst_grp)
5824 		mlx5_hlist_unregister(priv->sh->groups, &dst_grp->entry);
5825 	return -rte_errno;
5826 }
5827 
5828 static bool
5829 flow_hw_modify_field_is_used(const struct rte_flow_action_modify_field *action,
5830 			     enum rte_flow_field_id field)
5831 {
5832 	return action->src.field == field || action->dst.field == field;
5833 }
5834 
5835 static bool
5836 flow_hw_modify_field_is_geneve_opt(enum rte_flow_field_id field)
5837 {
5838 	return field == RTE_FLOW_FIELD_GENEVE_OPT_TYPE ||
5839 	       field == RTE_FLOW_FIELD_GENEVE_OPT_CLASS ||
5840 	       field == RTE_FLOW_FIELD_GENEVE_OPT_DATA;
5841 }
5842 
5843 static bool
5844 flow_hw_modify_field_is_add_dst_valid(const struct rte_flow_action_modify_field *conf)
5845 {
5846 	if (conf->operation != RTE_FLOW_MODIFY_ADD)
5847 		return true;
5848 	if (conf->src.field == RTE_FLOW_FIELD_POINTER ||
5849 	    conf->src.field == RTE_FLOW_FIELD_VALUE)
5850 		return true;
5851 	switch (conf->dst.field) {
5852 	case RTE_FLOW_FIELD_IPV4_TTL:
5853 	case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
5854 	case RTE_FLOW_FIELD_TCP_SEQ_NUM:
5855 	case RTE_FLOW_FIELD_TCP_ACK_NUM:
5856 	case RTE_FLOW_FIELD_TAG:
5857 	case RTE_FLOW_FIELD_META:
5858 	case RTE_FLOW_FIELD_FLEX_ITEM:
5859 	case RTE_FLOW_FIELD_TCP_DATA_OFFSET:
5860 	case RTE_FLOW_FIELD_IPV4_IHL:
5861 	case RTE_FLOW_FIELD_IPV4_TOTAL_LEN:
5862 	case RTE_FLOW_FIELD_IPV6_PAYLOAD_LEN:
5863 		return true;
5864 	default:
5865 		break;
5866 	}
5867 	return false;
5868 }
5869 
5870 /**
5871  * Validate the level value for modify field action.
5872  *
5873  * @param[in] data
5874  *   Pointer to the rte_flow_field_data structure either src or dst.
5875  * @param[in] inner_supported
5876  *   Indicator whether inner should be supported.
5877  * @param[out] error
5878  *   Pointer to error structure.
5879  *
5880  * @return
5881  *   0 on success, a negative errno value otherwise and rte_errno is set.
5882  */
5883 static int
5884 flow_hw_validate_modify_field_level(const struct rte_flow_field_data *data,
5885 				    bool inner_supported,
5886 				    struct rte_flow_error *error)
5887 {
5888 	switch ((int)data->field) {
5889 	case RTE_FLOW_FIELD_START:
5890 	case RTE_FLOW_FIELD_VLAN_TYPE:
5891 	case RTE_FLOW_FIELD_RANDOM:
5892 	case RTE_FLOW_FIELD_FLEX_ITEM:
5893 		/*
5894 		 * Level shouldn't be valid since field isn't supported or
5895 		 * doesn't use 'level'.
5896 		 */
5897 		break;
5898 	case RTE_FLOW_FIELD_MARK:
5899 	case RTE_FLOW_FIELD_META:
5900 	case RTE_FLOW_FIELD_METER_COLOR:
5901 	case RTE_FLOW_FIELD_HASH_RESULT:
5902 		/* For meta data fields encapsulation level is don't-care. */
5903 		break;
5904 	case RTE_FLOW_FIELD_TAG:
5905 	case MLX5_RTE_FLOW_FIELD_META_REG:
5906 		/*
5907 		 * The tag array for RTE_FLOW_FIELD_TAG type is provided using
5908 		 * 'tag_index' field. In old API, it was provided using 'level'
5909 		 * field and it is still supported for backwards compatibility.
5910 		 * Therefore, for meta tag field only, level is matter. It is
5911 		 * taken as tag index when 'tag_index' field isn't set, and
5912 		 * return error otherwise.
5913 		 */
5914 		if (data->level > 0) {
5915 			if (data->tag_index > 0)
5916 				return rte_flow_error_set(error, EINVAL,
5917 							  RTE_FLOW_ERROR_TYPE_ACTION,
5918 							  data,
5919 							  "tag array can be provided using 'level' or 'tag_index' fields, not both");
5920 			DRV_LOG(WARNING,
5921 				"tag array provided in 'level' field instead of 'tag_index' field.");
5922 		}
5923 		break;
5924 	case RTE_FLOW_FIELD_MAC_DST:
5925 	case RTE_FLOW_FIELD_MAC_SRC:
5926 	case RTE_FLOW_FIELD_MAC_TYPE:
5927 	case RTE_FLOW_FIELD_IPV4_IHL:
5928 	case RTE_FLOW_FIELD_IPV4_TOTAL_LEN:
5929 	case RTE_FLOW_FIELD_IPV4_DSCP:
5930 	case RTE_FLOW_FIELD_IPV4_ECN:
5931 	case RTE_FLOW_FIELD_IPV4_TTL:
5932 	case RTE_FLOW_FIELD_IPV4_SRC:
5933 	case RTE_FLOW_FIELD_IPV4_DST:
5934 	case RTE_FLOW_FIELD_IPV6_TRAFFIC_CLASS:
5935 	case RTE_FLOW_FIELD_IPV6_FLOW_LABEL:
5936 	case RTE_FLOW_FIELD_IPV6_PAYLOAD_LEN:
5937 	case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
5938 	case RTE_FLOW_FIELD_IPV6_SRC:
5939 	case RTE_FLOW_FIELD_IPV6_DST:
5940 	case RTE_FLOW_FIELD_TCP_PORT_SRC:
5941 	case RTE_FLOW_FIELD_TCP_PORT_DST:
5942 	case RTE_FLOW_FIELD_TCP_FLAGS:
5943 	case RTE_FLOW_FIELD_TCP_DATA_OFFSET:
5944 	case RTE_FLOW_FIELD_UDP_PORT_SRC:
5945 	case RTE_FLOW_FIELD_UDP_PORT_DST:
5946 		if (data->level > 2)
5947 			return rte_flow_error_set(error, ENOTSUP,
5948 						  RTE_FLOW_ERROR_TYPE_ACTION,
5949 						  data,
5950 						  "second inner header fields modification is not supported");
5951 		if (inner_supported)
5952 			break;
5953 		/* Fallthrough */
5954 	case RTE_FLOW_FIELD_VLAN_ID:
5955 	case RTE_FLOW_FIELD_IPV4_PROTO:
5956 	case RTE_FLOW_FIELD_IPV6_PROTO:
5957 	case RTE_FLOW_FIELD_IPV6_DSCP:
5958 	case RTE_FLOW_FIELD_IPV6_ECN:
5959 	case RTE_FLOW_FIELD_TCP_SEQ_NUM:
5960 	case RTE_FLOW_FIELD_TCP_ACK_NUM:
5961 	case RTE_FLOW_FIELD_ESP_PROTO:
5962 	case RTE_FLOW_FIELD_ESP_SPI:
5963 	case RTE_FLOW_FIELD_ESP_SEQ_NUM:
5964 	case RTE_FLOW_FIELD_VXLAN_VNI:
5965 	case RTE_FLOW_FIELD_VXLAN_LAST_RSVD:
5966 	case RTE_FLOW_FIELD_GENEVE_VNI:
5967 	case RTE_FLOW_FIELD_GENEVE_OPT_TYPE:
5968 	case RTE_FLOW_FIELD_GENEVE_OPT_CLASS:
5969 	case RTE_FLOW_FIELD_GENEVE_OPT_DATA:
5970 	case RTE_FLOW_FIELD_GTP_TEID:
5971 	case RTE_FLOW_FIELD_GTP_PSC_QFI:
5972 		if (data->level > 1)
5973 			return rte_flow_error_set(error, ENOTSUP,
5974 						  RTE_FLOW_ERROR_TYPE_ACTION,
5975 						  data,
5976 						  "inner header fields modification is not supported");
5977 		break;
5978 	case RTE_FLOW_FIELD_MPLS:
5979 		if (data->level == 1)
5980 			return rte_flow_error_set(error, ENOTSUP,
5981 						  RTE_FLOW_ERROR_TYPE_ACTION,
5982 						  data,
5983 						  "outer MPLS header modification is not supported");
5984 		if (data->level > 2)
5985 			return rte_flow_error_set(error, ENOTSUP,
5986 						  RTE_FLOW_ERROR_TYPE_ACTION,
5987 						  data,
5988 						  "inner MPLS header modification is not supported");
5989 		break;
5990 	case RTE_FLOW_FIELD_POINTER:
5991 	case RTE_FLOW_FIELD_VALUE:
5992 	default:
5993 		MLX5_ASSERT(false);
5994 	}
5995 	return 0;
5996 }
5997 
5998 static int
5999 flow_hw_validate_action_modify_field(struct rte_eth_dev *dev,
6000 				     const struct rte_flow_action *action,
6001 				     const struct rte_flow_action *mask,
6002 				     struct rte_flow_error *error)
6003 {
6004 	const struct rte_flow_action_modify_field *action_conf = action->conf;
6005 	const struct rte_flow_action_modify_field *mask_conf = mask->conf;
6006 	struct mlx5_priv *priv = dev->data->dev_private;
6007 	struct mlx5_hca_attr *attr = &priv->sh->cdev->config.hca_attr;
6008 	int ret;
6009 
6010 	if (!mask_conf)
6011 		return rte_flow_error_set(error, EINVAL,
6012 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
6013 					  "modify_field mask conf is missing");
6014 	if (action_conf->operation != mask_conf->operation)
6015 		return rte_flow_error_set(error, EINVAL,
6016 				RTE_FLOW_ERROR_TYPE_ACTION, action,
6017 				"modify_field operation mask and template are not equal");
6018 	if (action_conf->dst.field != mask_conf->dst.field)
6019 		return rte_flow_error_set(error, EINVAL,
6020 				RTE_FLOW_ERROR_TYPE_ACTION, action,
6021 				"destination field mask and template are not equal");
6022 	if (action_conf->dst.field == RTE_FLOW_FIELD_POINTER ||
6023 	    action_conf->dst.field == RTE_FLOW_FIELD_VALUE ||
6024 	    action_conf->dst.field == RTE_FLOW_FIELD_HASH_RESULT)
6025 		return rte_flow_error_set(error, EINVAL,
6026 				RTE_FLOW_ERROR_TYPE_ACTION, action,
6027 				"immediate value, pointer and hash result cannot be used as destination");
6028 	ret = flow_hw_validate_modify_field_level(&action_conf->dst, false, error);
6029 	if (ret)
6030 		return ret;
6031 	if (action_conf->dst.field != RTE_FLOW_FIELD_FLEX_ITEM &&
6032 	    !flow_hw_modify_field_is_geneve_opt(action_conf->dst.field)) {
6033 		if (action_conf->dst.tag_index &&
6034 		    !flow_modify_field_support_tag_array(action_conf->dst.field))
6035 			return rte_flow_error_set(error, EINVAL,
6036 					RTE_FLOW_ERROR_TYPE_ACTION, action,
6037 					"destination tag index is not supported");
6038 		if (action_conf->dst.class_id)
6039 			return rte_flow_error_set(error, EINVAL,
6040 					RTE_FLOW_ERROR_TYPE_ACTION, action,
6041 					"destination class id is not supported");
6042 	}
6043 	if (mask_conf->dst.level != UINT8_MAX)
6044 		return rte_flow_error_set(error, EINVAL,
6045 			RTE_FLOW_ERROR_TYPE_ACTION, action,
6046 			"destination encapsulation level must be fully masked");
6047 	if (mask_conf->dst.offset != UINT32_MAX)
6048 		return rte_flow_error_set(error, EINVAL,
6049 			RTE_FLOW_ERROR_TYPE_ACTION, action,
6050 			"destination offset level must be fully masked");
6051 	if (action_conf->src.field != mask_conf->src.field)
6052 		return rte_flow_error_set(error, EINVAL,
6053 				RTE_FLOW_ERROR_TYPE_ACTION, action,
6054 				"destination field mask and template are not equal");
6055 	if (action_conf->src.field != RTE_FLOW_FIELD_POINTER &&
6056 	    action_conf->src.field != RTE_FLOW_FIELD_VALUE) {
6057 		if (action_conf->src.field != RTE_FLOW_FIELD_FLEX_ITEM &&
6058 		    !flow_hw_modify_field_is_geneve_opt(action_conf->src.field)) {
6059 			if (action_conf->src.tag_index &&
6060 			    !flow_modify_field_support_tag_array(action_conf->src.field))
6061 				return rte_flow_error_set(error, EINVAL,
6062 					RTE_FLOW_ERROR_TYPE_ACTION, action,
6063 					"source tag index is not supported");
6064 			if (action_conf->src.class_id)
6065 				return rte_flow_error_set(error, EINVAL,
6066 					RTE_FLOW_ERROR_TYPE_ACTION, action,
6067 					"source class id is not supported");
6068 		}
6069 		if (mask_conf->src.level != UINT8_MAX)
6070 			return rte_flow_error_set(error, EINVAL,
6071 				RTE_FLOW_ERROR_TYPE_ACTION, action,
6072 				"source encapsulation level must be fully masked");
6073 		if (mask_conf->src.offset != UINT32_MAX)
6074 			return rte_flow_error_set(error, EINVAL,
6075 				RTE_FLOW_ERROR_TYPE_ACTION, action,
6076 				"source offset level must be fully masked");
6077 		ret = flow_hw_validate_modify_field_level(&action_conf->src, true, error);
6078 		if (ret)
6079 			return ret;
6080 	}
6081 	if ((action_conf->dst.field == RTE_FLOW_FIELD_TAG &&
6082 	     action_conf->dst.tag_index >= MLX5_FLOW_HW_TAGS_MAX &&
6083 	     action_conf->dst.tag_index != RTE_PMD_MLX5_LINEAR_HASH_TAG_INDEX) ||
6084 	    (action_conf->src.field == RTE_FLOW_FIELD_TAG &&
6085 	     action_conf->src.tag_index >= MLX5_FLOW_HW_TAGS_MAX &&
6086 	     action_conf->src.tag_index != RTE_PMD_MLX5_LINEAR_HASH_TAG_INDEX))
6087 		return rte_flow_error_set(error, EINVAL,
6088 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
6089 				 "tag index is out of range");
6090 	if ((action_conf->dst.field == RTE_FLOW_FIELD_TAG &&
6091 	     flow_hw_get_reg_id(dev, RTE_FLOW_ITEM_TYPE_TAG, action_conf->dst.tag_index) == REG_NON) ||
6092 	    (action_conf->src.field == RTE_FLOW_FIELD_TAG &&
6093 	     flow_hw_get_reg_id(dev, RTE_FLOW_ITEM_TYPE_TAG, action_conf->src.tag_index) == REG_NON))
6094 		return rte_flow_error_set(error, EINVAL,
6095 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
6096 					  "tag index is out of range");
6097 	if (mask_conf->width != UINT32_MAX)
6098 		return rte_flow_error_set(error, EINVAL,
6099 				RTE_FLOW_ERROR_TYPE_ACTION, action,
6100 				"modify_field width field must be fully masked");
6101 	if (flow_hw_modify_field_is_used(action_conf, RTE_FLOW_FIELD_START))
6102 		return rte_flow_error_set(error, EINVAL,
6103 				RTE_FLOW_ERROR_TYPE_ACTION, action,
6104 				"modifying arbitrary place in a packet is not supported");
6105 	if (flow_hw_modify_field_is_used(action_conf, RTE_FLOW_FIELD_VLAN_TYPE))
6106 		return rte_flow_error_set(error, EINVAL,
6107 				RTE_FLOW_ERROR_TYPE_ACTION, action,
6108 				"modifying vlan_type is not supported");
6109 	if (flow_hw_modify_field_is_used(action_conf, RTE_FLOW_FIELD_RANDOM))
6110 		return rte_flow_error_set(error, EINVAL,
6111 				RTE_FLOW_ERROR_TYPE_ACTION, action,
6112 				"modifying random value is not supported");
6113 	/**
6114 	 * Geneve VNI modification is supported only when Geneve header is
6115 	 * parsed natively. When GENEVE options are supported, they both Geneve
6116 	 * and options headers are parsed as a flex parser.
6117 	 */
6118 	if (flow_hw_modify_field_is_used(action_conf, RTE_FLOW_FIELD_GENEVE_VNI) &&
6119 	    attr->geneve_tlv_opt)
6120 		return rte_flow_error_set(error, EINVAL,
6121 				RTE_FLOW_ERROR_TYPE_ACTION, action,
6122 				"modifying Geneve VNI is not supported when GENEVE opt is supported");
6123 	if (priv->tlv_options == NULL &&
6124 	    (flow_hw_modify_field_is_used(action_conf, RTE_FLOW_FIELD_GENEVE_OPT_TYPE) ||
6125 	     flow_hw_modify_field_is_used(action_conf, RTE_FLOW_FIELD_GENEVE_OPT_CLASS) ||
6126 	     flow_hw_modify_field_is_used(action_conf, RTE_FLOW_FIELD_GENEVE_OPT_DATA)))
6127 		return rte_flow_error_set(error, EINVAL,
6128 				RTE_FLOW_ERROR_TYPE_ACTION, action,
6129 				"modifying Geneve TLV option is supported only after parser configuration");
6130 	/* Due to HW bug, tunnel MPLS header is read only. */
6131 	if (action_conf->dst.field == RTE_FLOW_FIELD_MPLS)
6132 		return rte_flow_error_set(error, EINVAL,
6133 				RTE_FLOW_ERROR_TYPE_ACTION, action,
6134 				"MPLS cannot be used as destination");
6135 	/* ADD_FIELD is not supported for all the fields. */
6136 	if (!flow_hw_modify_field_is_add_dst_valid(action_conf))
6137 		return rte_flow_error_set(error, EINVAL,
6138 				RTE_FLOW_ERROR_TYPE_ACTION, action,
6139 				"invalid add_field destination");
6140 	return 0;
6141 }
6142 
6143 static int
6144 flow_hw_validate_action_port_representor(struct rte_eth_dev *dev __rte_unused,
6145 					 const struct rte_flow_actions_template_attr *attr,
6146 					 const struct rte_flow_action *action,
6147 					 const struct rte_flow_action *mask,
6148 					 struct rte_flow_error *error)
6149 {
6150 	const struct rte_flow_action_ethdev *action_conf = NULL;
6151 	const struct rte_flow_action_ethdev *mask_conf = NULL;
6152 
6153 	/* If transfer is set, port has been validated as proxy port. */
6154 	if (!attr->transfer)
6155 		return rte_flow_error_set(error, EINVAL,
6156 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6157 					  "cannot use port_representor actions"
6158 					  " without an E-Switch");
6159 	if (!action || !mask)
6160 		return rte_flow_error_set(error, EINVAL,
6161 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6162 					  "actiona and mask configuration must be set");
6163 	action_conf = action->conf;
6164 	mask_conf = mask->conf;
6165 	if (!mask_conf || mask_conf->port_id != MLX5_REPRESENTED_PORT_ESW_MGR ||
6166 	    !action_conf || action_conf->port_id != MLX5_REPRESENTED_PORT_ESW_MGR)
6167 		return rte_flow_error_set(error, EINVAL,
6168 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6169 					  "only eswitch manager port 0xffff is"
6170 					  " supported");
6171 	return 0;
6172 }
6173 
6174 static int
6175 flow_hw_validate_target_port_id(struct rte_eth_dev *dev,
6176 				uint16_t target_port_id)
6177 {
6178 	struct mlx5_priv *port_priv;
6179 	struct mlx5_priv *dev_priv;
6180 
6181 	if (target_port_id == MLX5_REPRESENTED_PORT_ESW_MGR)
6182 		return 0;
6183 
6184 	port_priv = mlx5_port_to_eswitch_info(target_port_id, false);
6185 	if (!port_priv) {
6186 		rte_errno = EINVAL;
6187 		DRV_LOG(ERR, "Port %u Failed to obtain E-Switch info for port %u",
6188 			dev->data->port_id, target_port_id);
6189 		return -rte_errno;
6190 	}
6191 
6192 	dev_priv = mlx5_dev_to_eswitch_info(dev);
6193 	if (!dev_priv) {
6194 		rte_errno = EINVAL;
6195 		DRV_LOG(ERR, "Port %u Failed to obtain E-Switch info for transfer proxy",
6196 			dev->data->port_id);
6197 		return -rte_errno;
6198 	}
6199 
6200 	if (port_priv->domain_id != dev_priv->domain_id) {
6201 		rte_errno = EINVAL;
6202 		DRV_LOG(ERR, "Port %u Failed to obtain E-Switch info for transfer proxy",
6203 			dev->data->port_id);
6204 		return -rte_errno;
6205 	}
6206 
6207 	return 0;
6208 }
6209 
6210 static int
6211 flow_hw_validate_action_represented_port(struct rte_eth_dev *dev,
6212 					 const struct rte_flow_action *action,
6213 					 const struct rte_flow_action *mask,
6214 					 struct rte_flow_error *error)
6215 {
6216 	const struct rte_flow_action_ethdev *action_conf = action->conf;
6217 	const struct rte_flow_action_ethdev *mask_conf = mask->conf;
6218 	struct mlx5_priv *priv = dev->data->dev_private;
6219 
6220 	if (!priv->sh->config.dv_esw_en)
6221 		return rte_flow_error_set(error, EINVAL,
6222 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6223 					  "cannot use represented_port actions"
6224 					  " without an E-Switch");
6225 	if (mask_conf && mask_conf->port_id) {
6226 		if (!action_conf)
6227 			return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
6228 						  action, "port index was not provided");
6229 
6230 		if (flow_hw_validate_target_port_id(dev, action_conf->port_id))
6231 			return rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
6232 						  action, "port index is invalid");
6233 	}
6234 	return 0;
6235 }
6236 
6237 /**
6238  * Validate AGE action.
6239  *
6240  * @param[in] dev
6241  *   Pointer to rte_eth_dev structure.
6242  * @param[in] action
6243  *   Pointer to the indirect action.
6244  * @param[in] action_flags
6245  *   Holds the actions detected until now.
6246  * @param[in] fixed_cnt
6247  *   Indicator if this list has a fixed COUNT action.
6248  * @param[out] error
6249  *   Pointer to error structure.
6250  *
6251  * @return
6252  *   0 on success, a negative errno value otherwise and rte_errno is set.
6253  */
6254 static int
6255 flow_hw_validate_action_age(struct rte_eth_dev *dev,
6256 			    const struct rte_flow_action *action,
6257 			    uint64_t action_flags, bool fixed_cnt,
6258 			    struct rte_flow_error *error)
6259 {
6260 	struct mlx5_priv *priv = dev->data->dev_private;
6261 	struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);
6262 
6263 	if (!priv->sh->cdev->config.devx)
6264 		return rte_flow_error_set(error, ENOTSUP,
6265 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6266 					  NULL, "AGE action not supported");
6267 	if (age_info->ages_ipool == NULL)
6268 		return rte_flow_error_set(error, EINVAL,
6269 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6270 					  "aging pool not initialized");
6271 	if ((action_flags & MLX5_FLOW_ACTION_AGE) ||
6272 	    (action_flags & MLX5_FLOW_ACTION_INDIRECT_AGE))
6273 		return rte_flow_error_set(error, EINVAL,
6274 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
6275 					  "duplicate AGE actions set");
6276 	if (fixed_cnt)
6277 		return rte_flow_error_set(error, EINVAL,
6278 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
6279 					  "AGE and fixed COUNT combination is not supported");
6280 	return 0;
6281 }
6282 
6283 /**
6284  * Validate count action.
6285  *
6286  * @param[in] dev
6287  *   Pointer to rte_eth_dev structure.
6288  * @param[in] action
6289  *   Pointer to the indirect action.
6290  * @param[in] mask
6291  *   Pointer to the indirect action mask.
6292  * @param[in] action_flags
6293  *   Holds the actions detected until now.
6294  * @param[out] error
6295  *   Pointer to error structure.
6296  *
6297  * @return
6298  *   0 on success, a negative errno value otherwise and rte_errno is set.
6299  */
6300 static int
6301 flow_hw_validate_action_count(struct rte_eth_dev *dev,
6302 			      const struct rte_flow_action *action,
6303 			      const struct rte_flow_action *mask,
6304 			      uint64_t action_flags,
6305 			      struct rte_flow_error *error)
6306 {
6307 	struct mlx5_priv *priv = dev->data->dev_private;
6308 	const struct rte_flow_action_count *count = mask->conf;
6309 
6310 	if (!priv->sh->cdev->config.devx)
6311 		return rte_flow_error_set(error, ENOTSUP,
6312 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
6313 					  "count action not supported");
6314 	if (!priv->hws_cpool)
6315 		return rte_flow_error_set(error, EINVAL,
6316 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
6317 					  "counters pool not initialized");
6318 	if ((action_flags & MLX5_FLOW_ACTION_COUNT) ||
6319 	    (action_flags & MLX5_FLOW_ACTION_INDIRECT_COUNT))
6320 		return rte_flow_error_set(error, EINVAL,
6321 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
6322 					  "duplicate count actions set");
6323 	if (count && count->id && (action_flags & MLX5_FLOW_ACTION_AGE))
6324 		return rte_flow_error_set(error, EINVAL,
6325 					  RTE_FLOW_ERROR_TYPE_ACTION, mask,
6326 					  "AGE and COUNT action shared by mask combination is not supported");
6327 	return 0;
6328 }
6329 
6330 /**
6331  * Validate meter_mark action.
6332  *
6333  * @param[in] dev
6334  *   Pointer to rte_eth_dev structure.
6335  * @param[in] action
6336  *   Pointer to the indirect action.
6337  * @param[in] indirect
6338  *   If true, then provided action was passed using an indirect action.
6339  * @param[out] error
6340  *   Pointer to error structure.
6341  *
6342  * @return
6343  *   0 on success, a negative errno value otherwise and rte_errno is set.
6344  */
6345 static int
6346 flow_hw_validate_action_meter_mark(struct rte_eth_dev *dev,
6347 			      const struct rte_flow_action *action,
6348 			      bool indirect,
6349 			      struct rte_flow_error *error)
6350 {
6351 	struct mlx5_priv *priv = dev->data->dev_private;
6352 
6353 	RTE_SET_USED(action);
6354 
6355 	if (!priv->sh->cdev->config.devx)
6356 		return rte_flow_error_set(error, ENOTSUP,
6357 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
6358 					  "meter_mark action not supported");
6359 	if (!indirect && priv->shared_host)
6360 		return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, action,
6361 					  "meter_mark action can only be used on host port");
6362 	if (!priv->hws_mpool)
6363 		return rte_flow_error_set(error, EINVAL,
6364 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
6365 					  "meter_mark pool not initialized");
6366 	return 0;
6367 }
6368 
6369 /**
6370  * Validate indirect action.
6371  *
6372  * @param[in] dev
6373  *   Pointer to rte_eth_dev structure.
6374  * @param[in] action
6375  *   Pointer to the indirect action.
6376  * @param[in] mask
6377  *   Pointer to the indirect action mask.
6378  * @param[in, out] action_flags
6379  *   Holds the actions detected until now.
6380  * @param[in, out] fixed_cnt
6381  *   Pointer to indicator if this list has a fixed COUNT action.
6382  * @param[out] error
6383  *   Pointer to error structure.
6384  *
6385  * @return
6386  *   0 on success, a negative errno value otherwise and rte_errno is set.
6387  */
6388 static int
6389 flow_hw_validate_action_indirect(struct rte_eth_dev *dev,
6390 				 const struct rte_flow_action *action,
6391 				 const struct rte_flow_action *mask,
6392 				 uint64_t *action_flags, bool *fixed_cnt,
6393 				 struct rte_flow_error *error)
6394 {
6395 	uint32_t type;
6396 	int ret;
6397 
6398 	if (!mask)
6399 		return rte_flow_error_set(error, EINVAL,
6400 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6401 					  "Unable to determine indirect action type without a mask specified");
6402 	type = mask->type;
6403 	switch (type) {
6404 	case RTE_FLOW_ACTION_TYPE_METER_MARK:
6405 		ret = flow_hw_validate_action_meter_mark(dev, mask, true, error);
6406 		if (ret < 0)
6407 			return ret;
6408 		*action_flags |= MLX5_FLOW_ACTION_METER;
6409 		break;
6410 	case RTE_FLOW_ACTION_TYPE_RSS:
6411 		/* TODO: Validation logic (same as flow_hw_actions_validate) */
6412 		*action_flags |= MLX5_FLOW_ACTION_RSS;
6413 		break;
6414 	case RTE_FLOW_ACTION_TYPE_CONNTRACK:
6415 		/* TODO: Validation logic (same as flow_hw_actions_validate) */
6416 		*action_flags |= MLX5_FLOW_ACTION_CT;
6417 		break;
6418 	case RTE_FLOW_ACTION_TYPE_COUNT:
6419 		if (action->conf && mask->conf) {
6420 			if ((*action_flags & MLX5_FLOW_ACTION_AGE) ||
6421 			    (*action_flags & MLX5_FLOW_ACTION_INDIRECT_AGE))
6422 				/*
6423 				 * AGE cannot use indirect counter which is
6424 				 * shared with enother flow rules.
6425 				 */
6426 				return rte_flow_error_set(error, EINVAL,
6427 						  RTE_FLOW_ERROR_TYPE_ACTION,
6428 						  NULL,
6429 						  "AGE and fixed COUNT combination is not supported");
6430 			*fixed_cnt = true;
6431 		}
6432 		ret = flow_hw_validate_action_count(dev, action, mask,
6433 						    *action_flags, error);
6434 		if (ret < 0)
6435 			return ret;
6436 		*action_flags |= MLX5_FLOW_ACTION_INDIRECT_COUNT;
6437 		break;
6438 	case RTE_FLOW_ACTION_TYPE_AGE:
6439 		ret = flow_hw_validate_action_age(dev, action, *action_flags,
6440 						  *fixed_cnt, error);
6441 		if (ret < 0)
6442 			return ret;
6443 		*action_flags |= MLX5_FLOW_ACTION_INDIRECT_AGE;
6444 		break;
6445 	case RTE_FLOW_ACTION_TYPE_QUOTA:
6446 		/* TODO: add proper quota verification */
6447 		*action_flags |= MLX5_FLOW_ACTION_QUOTA;
6448 		break;
6449 	default:
6450 		DRV_LOG(WARNING, "Unsupported shared action type: %d", type);
6451 		return rte_flow_error_set(error, ENOTSUP,
6452 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, mask,
6453 					  "Unsupported indirect action type");
6454 	}
6455 	return 0;
6456 }
6457 
6458 /**
6459  * Validate ipv6_ext_push action.
6460  *
6461  * @param[in] dev
6462  *   Pointer to rte_eth_dev structure.
6463  * @param[in] action
6464  *   Pointer to the indirect action.
6465  * @param[out] error
6466  *   Pointer to error structure.
6467  *
6468  * @return
6469  *   0 on success, a negative errno value otherwise and rte_errno is set.
6470  */
6471 static int
6472 flow_hw_validate_action_ipv6_ext_push(struct rte_eth_dev *dev __rte_unused,
6473 				      const struct rte_flow_action *action,
6474 				      struct rte_flow_error *error)
6475 {
6476 	const struct rte_flow_action_ipv6_ext_push *raw_push_data = action->conf;
6477 
6478 	if (!raw_push_data || !raw_push_data->size || !raw_push_data->data)
6479 		return rte_flow_error_set(error, EINVAL,
6480 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
6481 					  "invalid ipv6_ext_push data");
6482 	if (raw_push_data->type != IPPROTO_ROUTING ||
6483 	    raw_push_data->size > MLX5_PUSH_MAX_LEN)
6484 		return rte_flow_error_set(error, EINVAL,
6485 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
6486 					  "Unsupported ipv6_ext_push type or length");
6487 	return 0;
6488 }
6489 
6490 /**
6491  * Process `... / raw_decap / raw_encap / ...` actions sequence.
6492  * The PMD handles the sequence as a single encap or decap reformat action,
6493  * depending on the raw_encap configuration.
6494  *
6495  * The function assumes that the raw_decap / raw_encap location
6496  * in actions template list complies with relative HWS actions order:
6497  * for the required reformat configuration:
6498  * ENCAP configuration must appear before [JUMP|DROP|PORT]
6499  * DECAP configuration must appear at the template head.
6500  */
6501 static uint64_t
6502 mlx5_decap_encap_reformat_type(const struct rte_flow_action *actions,
6503 			       uint32_t encap_ind, uint64_t flags)
6504 {
6505 	const struct rte_flow_action_raw_encap *encap = actions[encap_ind].conf;
6506 
6507 	if ((flags & MLX5_FLOW_ACTION_DECAP) == 0)
6508 		return MLX5_FLOW_ACTION_ENCAP;
6509 	if (actions[encap_ind - 1].type != RTE_FLOW_ACTION_TYPE_RAW_DECAP)
6510 		return MLX5_FLOW_ACTION_ENCAP;
6511 	return encap->size >= MLX5_ENCAPSULATION_DECISION_SIZE ?
6512 	       MLX5_FLOW_ACTION_ENCAP : MLX5_FLOW_ACTION_DECAP;
6513 }
6514 
6515 enum mlx5_hw_indirect_list_relative_position {
6516 	MLX5_INDIRECT_LIST_POSITION_UNKNOWN = -1,
6517 	MLX5_INDIRECT_LIST_POSITION_BEFORE_MH = 0,
6518 	MLX5_INDIRECT_LIST_POSITION_AFTER_MH,
6519 };
6520 
6521 static enum mlx5_hw_indirect_list_relative_position
6522 mlx5_hw_indirect_list_mh_position(const struct rte_flow_action *action)
6523 {
6524 	const struct rte_flow_action_indirect_list *conf = action->conf;
6525 	enum mlx5_indirect_list_type list_type = mlx5_get_indirect_list_type(conf->handle);
6526 	enum mlx5_hw_indirect_list_relative_position pos = MLX5_INDIRECT_LIST_POSITION_UNKNOWN;
6527 	const union {
6528 		struct mlx5_indlst_legacy *legacy;
6529 		struct mlx5_hw_encap_decap_action *reformat;
6530 		struct rte_flow_action_list_handle *handle;
6531 	} h = { .handle = conf->handle};
6532 
6533 	switch (list_type) {
6534 	case  MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY:
6535 		switch (h.legacy->legacy_type) {
6536 		case RTE_FLOW_ACTION_TYPE_AGE:
6537 		case RTE_FLOW_ACTION_TYPE_COUNT:
6538 		case RTE_FLOW_ACTION_TYPE_CONNTRACK:
6539 		case RTE_FLOW_ACTION_TYPE_METER_MARK:
6540 		case RTE_FLOW_ACTION_TYPE_QUOTA:
6541 			pos = MLX5_INDIRECT_LIST_POSITION_BEFORE_MH;
6542 			break;
6543 		case RTE_FLOW_ACTION_TYPE_RSS:
6544 			pos = MLX5_INDIRECT_LIST_POSITION_AFTER_MH;
6545 			break;
6546 		default:
6547 			pos = MLX5_INDIRECT_LIST_POSITION_UNKNOWN;
6548 			break;
6549 		}
6550 		break;
6551 	case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
6552 		pos = MLX5_INDIRECT_LIST_POSITION_AFTER_MH;
6553 		break;
6554 	case MLX5_INDIRECT_ACTION_LIST_TYPE_REFORMAT:
6555 		switch (h.reformat->action_type) {
6556 		case MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2:
6557 		case MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
6558 			pos = MLX5_INDIRECT_LIST_POSITION_BEFORE_MH;
6559 			break;
6560 		case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2:
6561 		case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3:
6562 			pos = MLX5_INDIRECT_LIST_POSITION_AFTER_MH;
6563 			break;
6564 		default:
6565 			pos = MLX5_INDIRECT_LIST_POSITION_UNKNOWN;
6566 			break;
6567 		}
6568 		break;
6569 	default:
6570 		pos = MLX5_INDIRECT_LIST_POSITION_UNKNOWN;
6571 		break;
6572 	}
6573 	return pos;
6574 }
6575 
6576 #define MLX5_HW_EXPAND_MH_FAILED 0xffff
6577 
6578 static inline uint16_t
6579 flow_hw_template_expand_modify_field(struct rte_flow_action actions[],
6580 				     struct rte_flow_action masks[],
6581 				     const struct rte_flow_action *mf_actions,
6582 				     const struct rte_flow_action *mf_masks,
6583 				     uint64_t flags, uint32_t act_num,
6584 				     uint32_t mf_num)
6585 {
6586 	uint32_t i, tail;
6587 
6588 	MLX5_ASSERT(actions && masks);
6589 	MLX5_ASSERT(mf_num > 0);
6590 	if (flags & MLX5_FLOW_ACTION_MODIFY_FIELD) {
6591 		/*
6592 		 * Application action template already has Modify Field.
6593 		 * It's location will be used in DR.
6594 		 * Expanded MF action can be added before the END.
6595 		 */
6596 		i = act_num - 1;
6597 		goto insert;
6598 	}
6599 	/**
6600 	 * Locate the first action positioned BEFORE the new MF.
6601 	 *
6602 	 * Search for a place to insert modify header
6603 	 * from the END action backwards:
6604 	 * 1. END is always present in actions array
6605 	 * 2. END location is always at action[act_num - 1]
6606 	 * 3. END always positioned AFTER modify field location
6607 	 *
6608 	 * Relative actions order is the same for RX, TX and FDB.
6609 	 *
6610 	 * Current actions order (draft-3)
6611 	 * @see action_order_arr[]
6612 	 */
6613 	for (i = act_num - 2; (int)i >= 0; i--) {
6614 		enum mlx5_hw_indirect_list_relative_position pos;
6615 		enum rte_flow_action_type type = actions[i].type;
6616 		uint64_t reformat_type;
6617 
6618 		if (type == RTE_FLOW_ACTION_TYPE_INDIRECT)
6619 			type = masks[i].type;
6620 		switch (type) {
6621 		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
6622 		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
6623 		case RTE_FLOW_ACTION_TYPE_DROP:
6624 		case RTE_FLOW_ACTION_TYPE_SEND_TO_KERNEL:
6625 		case RTE_FLOW_ACTION_TYPE_JUMP:
6626 		case RTE_FLOW_ACTION_TYPE_JUMP_TO_TABLE_INDEX:
6627 		case RTE_FLOW_ACTION_TYPE_QUEUE:
6628 		case RTE_FLOW_ACTION_TYPE_RSS:
6629 		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
6630 		case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
6631 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
6632 		case RTE_FLOW_ACTION_TYPE_VOID:
6633 		case RTE_FLOW_ACTION_TYPE_END:
6634 			break;
6635 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
6636 			reformat_type =
6637 				mlx5_decap_encap_reformat_type(actions, i,
6638 							       flags);
6639 			if (reformat_type == MLX5_FLOW_ACTION_DECAP) {
6640 				i++;
6641 				goto insert;
6642 			}
6643 			if (actions[i - 1].type == RTE_FLOW_ACTION_TYPE_RAW_DECAP)
6644 				i--;
6645 			break;
6646 		case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
6647 			pos = mlx5_hw_indirect_list_mh_position(&actions[i]);
6648 			if (pos == MLX5_INDIRECT_LIST_POSITION_UNKNOWN)
6649 				return MLX5_HW_EXPAND_MH_FAILED;
6650 			if (pos == MLX5_INDIRECT_LIST_POSITION_BEFORE_MH)
6651 				goto insert;
6652 			break;
6653 		default:
6654 			i++; /* new MF inserted AFTER actions[i] */
6655 			goto insert;
6656 		}
6657 	}
6658 	i = 0;
6659 insert:
6660 	tail = act_num - i; /* num action to move */
6661 	memmove(actions + i + mf_num, actions + i, sizeof(actions[0]) * tail);
6662 	memcpy(actions + i, mf_actions, sizeof(actions[0]) * mf_num);
6663 	memmove(masks + i + mf_num, masks + i, sizeof(masks[0]) * tail);
6664 	memcpy(masks + i, mf_masks, sizeof(masks[0]) * mf_num);
6665 	return i;
6666 }
6667 
6668 static int
6669 flow_hw_validate_action_push_vlan(struct rte_eth_dev *dev,
6670 				  const
6671 				  struct rte_flow_actions_template_attr *attr,
6672 				  const struct rte_flow_action *action,
6673 				  const struct rte_flow_action *mask,
6674 				  struct rte_flow_error *error)
6675 {
6676 #define X_FIELD(ptr, t, f) (((ptr)->conf) && ((t *)((ptr)->conf))->f)
6677 
6678 	const bool masked_push =
6679 		X_FIELD(mask + MLX5_HW_VLAN_PUSH_TYPE_IDX,
6680 			const struct rte_flow_action_of_push_vlan, ethertype);
6681 	bool masked_param;
6682 
6683 	/*
6684 	 * Mandatory actions order:
6685 	 * OF_PUSH_VLAN / OF_SET_VLAN_VID [ / OF_SET_VLAN_PCP ]
6686 	 */
6687 	RTE_SET_USED(dev);
6688 	RTE_SET_USED(attr);
6689 	/* Check that mark matches OF_PUSH_VLAN */
6690 	if (mask[MLX5_HW_VLAN_PUSH_TYPE_IDX].type !=
6691 	    RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN)
6692 		return rte_flow_error_set(error, EINVAL,
6693 					  RTE_FLOW_ERROR_TYPE_ACTION,
6694 					  action, "OF_PUSH_VLAN: mask does not match");
6695 	/* Check that the second template and mask items are SET_VLAN_VID */
6696 	if (action[MLX5_HW_VLAN_PUSH_VID_IDX].type !=
6697 	    RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID ||
6698 	    mask[MLX5_HW_VLAN_PUSH_VID_IDX].type !=
6699 	    RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)
6700 		return rte_flow_error_set(error, EINVAL,
6701 					  RTE_FLOW_ERROR_TYPE_ACTION,
6702 					  action, "OF_PUSH_VLAN: invalid actions order");
6703 	masked_param = X_FIELD(mask + MLX5_HW_VLAN_PUSH_VID_IDX,
6704 			       const struct rte_flow_action_of_set_vlan_vid,
6705 			       vlan_vid);
6706 	/*
6707 	 * PMD requires OF_SET_VLAN_VID mask to must match OF_PUSH_VLAN
6708 	 */
6709 	if (masked_push ^ masked_param)
6710 		return rte_flow_error_set(error, EINVAL,
6711 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
6712 					  "OF_SET_VLAN_VID: mask does not match OF_PUSH_VLAN");
6713 	if (is_of_vlan_pcp_present(action)) {
6714 		if (mask[MLX5_HW_VLAN_PUSH_PCP_IDX].type !=
6715 		     RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP)
6716 			return rte_flow_error_set(error, EINVAL,
6717 						  RTE_FLOW_ERROR_TYPE_ACTION,
6718 						  action, "OF_SET_VLAN_PCP: missing mask configuration");
6719 		masked_param = X_FIELD(mask + MLX5_HW_VLAN_PUSH_PCP_IDX,
6720 				       const struct
6721 				       rte_flow_action_of_set_vlan_pcp,
6722 				       vlan_pcp);
6723 		/*
6724 		 * PMD requires OF_SET_VLAN_PCP mask to must match OF_PUSH_VLAN
6725 		 */
6726 		if (masked_push ^ masked_param)
6727 			return rte_flow_error_set(error, EINVAL,
6728 						  RTE_FLOW_ERROR_TYPE_ACTION, action,
6729 						  "OF_SET_VLAN_PCP: mask does not match OF_PUSH_VLAN");
6730 	}
6731 	return 0;
6732 #undef X_FIELD
6733 }
6734 
6735 static int
6736 flow_hw_validate_action_default_miss(struct rte_eth_dev *dev,
6737 				     const struct rte_flow_actions_template_attr *attr,
6738 				     uint64_t action_flags,
6739 				     struct rte_flow_error *error)
6740 {
6741 	/*
6742 	 * The private DEFAULT_MISS action is used internally for LACP in control
6743 	 * flows. So this validation can be ignored. It can be kept right now since
6744 	 * the validation will be done only once.
6745 	 */
6746 	struct mlx5_priv *priv = dev->data->dev_private;
6747 
6748 	if (!attr->ingress || attr->egress || attr->transfer)
6749 		return rte_flow_error_set(error, EINVAL,
6750 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6751 					  "DEFAULT MISS is only supported in ingress.");
6752 	if (!priv->hw_def_miss)
6753 		return rte_flow_error_set(error, EINVAL,
6754 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6755 					  "DEFAULT MISS action does not exist.");
6756 	if (action_flags & MLX5_FLOW_FATE_ACTIONS)
6757 		return rte_flow_error_set(error, EINVAL,
6758 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6759 					  "DEFAULT MISS should be the only termination.");
6760 	return 0;
6761 }
6762 
6763 static int
6764 flow_hw_validate_action_nat64(struct rte_eth_dev *dev,
6765 			      const struct rte_flow_actions_template_attr *attr,
6766 			      const struct rte_flow_action *action,
6767 			      const struct rte_flow_action *mask,
6768 			      uint64_t action_flags,
6769 			      struct rte_flow_error *error)
6770 {
6771 	struct mlx5_priv *priv = dev->data->dev_private;
6772 	const struct rte_flow_action_nat64 *nat64_c;
6773 	enum rte_flow_nat64_type cov_type;
6774 
6775 	RTE_SET_USED(action_flags);
6776 	if (mask->conf && ((const struct rte_flow_action_nat64 *)mask->conf)->type) {
6777 		nat64_c = (const struct rte_flow_action_nat64 *)action->conf;
6778 		cov_type = nat64_c->type;
6779 		if ((attr->ingress && !priv->action_nat64[MLX5DR_TABLE_TYPE_NIC_RX][cov_type]) ||
6780 		    (attr->egress && !priv->action_nat64[MLX5DR_TABLE_TYPE_NIC_TX][cov_type]) ||
6781 		    (attr->transfer && !priv->action_nat64[MLX5DR_TABLE_TYPE_FDB][cov_type]))
6782 			goto err_out;
6783 	} else {
6784 		/*
6785 		 * Usually, the actions will be used on both directions. For non-masked actions,
6786 		 * both directions' actions will be checked.
6787 		 */
6788 		if (attr->ingress)
6789 			if (!priv->action_nat64[MLX5DR_TABLE_TYPE_NIC_RX][RTE_FLOW_NAT64_6TO4] ||
6790 			    !priv->action_nat64[MLX5DR_TABLE_TYPE_NIC_RX][RTE_FLOW_NAT64_4TO6])
6791 				goto err_out;
6792 		if (attr->egress)
6793 			if (!priv->action_nat64[MLX5DR_TABLE_TYPE_NIC_TX][RTE_FLOW_NAT64_6TO4] ||
6794 			    !priv->action_nat64[MLX5DR_TABLE_TYPE_NIC_TX][RTE_FLOW_NAT64_4TO6])
6795 				goto err_out;
6796 		if (attr->transfer)
6797 			if (!priv->action_nat64[MLX5DR_TABLE_TYPE_FDB][RTE_FLOW_NAT64_6TO4] ||
6798 			    !priv->action_nat64[MLX5DR_TABLE_TYPE_FDB][RTE_FLOW_NAT64_4TO6])
6799 				goto err_out;
6800 	}
6801 	return 0;
6802 err_out:
6803 	return rte_flow_error_set(error, EOPNOTSUPP, RTE_FLOW_ERROR_TYPE_ACTION,
6804 				  NULL, "NAT64 action is not supported.");
6805 }
6806 
6807 static int
6808 flow_hw_validate_action_jump(struct rte_eth_dev *dev,
6809 			     const struct rte_flow_actions_template_attr *attr,
6810 			     const struct rte_flow_action *action,
6811 			     const struct rte_flow_action *mask,
6812 			     struct rte_flow_error *error)
6813 {
6814 	const struct rte_flow_action_jump *m = mask->conf;
6815 	const struct rte_flow_action_jump *v = action->conf;
6816 	struct mlx5_flow_template_table_cfg cfg = {
6817 		.external = true,
6818 		.attr = {
6819 			.flow_attr = {
6820 				.ingress = attr->ingress,
6821 				.egress = attr->egress,
6822 				.transfer = attr->transfer,
6823 			},
6824 		},
6825 	};
6826 	uint32_t t_group = 0;
6827 
6828 	if (!m || !m->group)
6829 		return 0;
6830 	if (!v)
6831 		return rte_flow_error_set(error, EINVAL,
6832 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
6833 					  "Invalid jump action configuration");
6834 	if (flow_hw_translate_group(dev, &cfg, v->group, &t_group, error))
6835 		return -rte_errno;
6836 	if (t_group == 0)
6837 		return rte_flow_error_set(error, EINVAL,
6838 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
6839 					  "Unsupported action - jump to root table");
6840 	return 0;
6841 }
6842 
6843 static int
6844 mlx5_flow_validate_action_jump_to_table_index(const struct rte_flow_action *action,
6845 			     const struct rte_flow_action *mask,
6846 			     struct rte_flow_error *error)
6847 {
6848 	const struct rte_flow_action_jump_to_table_index *m = mask->conf;
6849 	const struct rte_flow_action_jump_to_table_index *v = action->conf;
6850 	struct mlx5dr_action *jump_action;
6851 	uint32_t t_group = 0;
6852 
6853 	if (!m || !m->table)
6854 		return 0;
6855 	if (!v)
6856 		return rte_flow_error_set(error, EINVAL,
6857 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
6858 					  "Invalid jump to matcher action configuration");
6859 	t_group = v->table->grp->group_id;
6860 	if (t_group == 0)
6861 		return rte_flow_error_set(error, EINVAL,
6862 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
6863 					  "Unsupported action - jump to root table");
6864 	if (likely(!rte_flow_template_table_resizable(0, &v->table->cfg.attr))) {
6865 		jump_action = v->table->matcher_info[0].jump;
6866 	} else {
6867 		uint32_t selector;
6868 		rte_rwlock_read_lock(&v->table->matcher_replace_rwlk);
6869 		selector = v->table->matcher_selector;
6870 		jump_action = v->table->matcher_info[selector].jump;
6871 		rte_rwlock_read_unlock(&v->table->matcher_replace_rwlk);
6872 	}
6873 	if (jump_action == NULL)
6874 		return rte_flow_error_set(error, EINVAL,
6875 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
6876 					  "Unsupported action - table is not an rule array");
6877 	return 0;
6878 }
6879 
6880 static int
6881 mlx5_hw_validate_action_mark(struct rte_eth_dev *dev,
6882 			     const struct rte_flow_action *template_action,
6883 			     const struct rte_flow_action *template_mask,
6884 			     uint64_t action_flags,
6885 			     const struct rte_flow_actions_template_attr *template_attr,
6886 			     struct rte_flow_error *error)
6887 {
6888 	const struct rte_flow_action_mark *mark_mask = template_mask->conf;
6889 	const struct rte_flow_action *action =
6890 		mark_mask && mark_mask->id ? template_action :
6891 		&(const struct rte_flow_action) {
6892 		.type = RTE_FLOW_ACTION_TYPE_MARK,
6893 		.conf = &(const struct rte_flow_action_mark) {
6894 			.id = MLX5_FLOW_MARK_MAX - 1
6895 		}
6896 	};
6897 	const struct rte_flow_attr attr = {
6898 		.ingress = template_attr->ingress,
6899 		.egress = template_attr->egress,
6900 		.transfer = template_attr->transfer
6901 	};
6902 
6903 	return mlx5_flow_validate_action_mark(dev, action, action_flags,
6904 					      &attr, error);
6905 }
6906 
6907 static int
6908 mlx5_hw_validate_action_queue(struct rte_eth_dev *dev,
6909 			      const struct rte_flow_action *template_action,
6910 			      const struct rte_flow_action *template_mask,
6911 			      const struct rte_flow_actions_template_attr *template_attr,
6912 			      uint64_t action_flags,
6913 			      struct rte_flow_error *error)
6914 {
6915 	const struct rte_flow_action_queue *queue_mask = template_mask->conf;
6916 	const struct rte_flow_attr attr = {
6917 		.ingress = template_attr->ingress,
6918 		.egress = template_attr->egress,
6919 		.transfer = template_attr->transfer
6920 	};
6921 	bool masked = queue_mask != NULL && queue_mask->index;
6922 
6923 	if (template_attr->egress || template_attr->transfer)
6924 		return rte_flow_error_set(error, EINVAL,
6925 					  RTE_FLOW_ERROR_TYPE_ATTR, NULL,
6926 					  "QUEUE action supported for ingress only");
6927 	if (masked)
6928 		return mlx5_flow_validate_action_queue(template_action, action_flags, dev,
6929 						       &attr, error);
6930 	else
6931 		return 0;
6932 }
6933 
6934 static int
6935 mlx5_hw_validate_action_rss(struct rte_eth_dev *dev,
6936 			      const struct rte_flow_action *template_action,
6937 			      const struct rte_flow_action *template_mask,
6938 			      const struct rte_flow_actions_template_attr *template_attr,
6939 			      __rte_unused uint64_t action_flags,
6940 			      struct rte_flow_error *error)
6941 {
6942 	const struct rte_flow_action_rss *mask = template_mask->conf;
6943 
6944 	if (template_attr->egress || template_attr->transfer)
6945 		return rte_flow_error_set(error, EINVAL,
6946 					  RTE_FLOW_ERROR_TYPE_ATTR, NULL,
6947 					  "RSS action supported for ingress only");
6948 	if (mask != NULL)
6949 		return mlx5_validate_action_rss(dev, template_action, error);
6950 	else
6951 		return 0;
6952 }
6953 
6954 static int
6955 mlx5_hw_validate_action_l2_encap(struct rte_eth_dev *dev,
6956 				 const struct rte_flow_action *template_action,
6957 				 const struct rte_flow_action *template_mask,
6958 				 const struct rte_flow_actions_template_attr *template_attr,
6959 				 uint64_t action_flags,
6960 				 struct rte_flow_error *error)
6961 {
6962 	const struct rte_flow_action_vxlan_encap default_action_conf = {
6963 		.definition = (struct rte_flow_item *)
6964 			(struct rte_flow_item [1]) {
6965 			[0] = { .type = RTE_FLOW_ITEM_TYPE_END }
6966 		}
6967 	};
6968 	const struct rte_flow_action *action = template_mask->conf ?
6969 		template_action : &(const struct rte_flow_action) {
6970 			.type = template_mask->type,
6971 			.conf = &default_action_conf
6972 	};
6973 	const struct rte_flow_attr attr = {
6974 		.ingress = template_attr->ingress,
6975 		.egress = template_attr->egress,
6976 		.transfer = template_attr->transfer
6977 	};
6978 
6979 	return mlx5_flow_dv_validate_action_l2_encap(dev, action_flags, action,
6980 						     &attr, error);
6981 }
6982 
6983 static int
6984 mlx5_hw_validate_action_l2_decap(struct rte_eth_dev *dev,
6985 				 const struct rte_flow_action *template_action,
6986 				 const struct rte_flow_action *template_mask,
6987 				 const struct rte_flow_actions_template_attr *template_attr,
6988 				 uint64_t action_flags,
6989 				 struct rte_flow_error *error)
6990 {
6991 	const struct rte_flow_action_vxlan_encap default_action_conf = {
6992 		.definition = (struct rte_flow_item *)
6993 			(struct rte_flow_item [1]) {
6994 				[0] = { .type = RTE_FLOW_ITEM_TYPE_END }
6995 			}
6996 	};
6997 	const struct rte_flow_action *action = template_mask->conf ?
6998 					       template_action : &(const struct rte_flow_action) {
6999 			.type = template_mask->type,
7000 			.conf = &default_action_conf
7001 		};
7002 	const struct rte_flow_attr attr = {
7003 		.ingress = template_attr->ingress,
7004 		.egress = template_attr->egress,
7005 		.transfer = template_attr->transfer
7006 	};
7007 	uint64_t item_flags =
7008 		action->type == RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
7009 		MLX5_FLOW_LAYER_VXLAN : 0;
7010 
7011 	return mlx5_flow_dv_validate_action_decap(dev, action_flags, action,
7012 						  item_flags, &attr, error);
7013 }
7014 
7015 static int
7016 mlx5_hw_validate_action_conntrack(struct rte_eth_dev *dev,
7017 				  const struct rte_flow_action *template_action,
7018 				  const struct rte_flow_action *template_mask,
7019 				  const struct rte_flow_actions_template_attr *template_attr,
7020 				  uint64_t action_flags,
7021 				  struct rte_flow_error *error)
7022 {
7023 	RTE_SET_USED(template_action);
7024 	RTE_SET_USED(template_mask);
7025 	RTE_SET_USED(template_attr);
7026 	return mlx5_flow_dv_validate_action_aso_ct(dev, action_flags,
7027 						   MLX5_FLOW_LAYER_OUTER_L4_TCP,
7028 						   false, error);
7029 }
7030 
7031 static int
7032 flow_hw_validate_action_raw_encap(const struct rte_flow_action *action,
7033 				  const struct rte_flow_action *mask,
7034 				  struct rte_flow_error *error)
7035 {
7036 	const struct rte_flow_action_raw_encap *mask_conf = mask->conf;
7037 	const struct rte_flow_action_raw_encap *action_conf = action->conf;
7038 
7039 	if (!mask_conf || !mask_conf->size)
7040 		return rte_flow_error_set(error, EINVAL,
7041 					  RTE_FLOW_ERROR_TYPE_ACTION, mask,
7042 					  "raw_encap: size must be masked");
7043 	if (!action_conf || !action_conf->size)
7044 		return rte_flow_error_set(error, EINVAL,
7045 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
7046 					  "raw_encap: invalid action configuration");
7047 	if (mask_conf->data && !action_conf->data)
7048 		return rte_flow_error_set(error, EINVAL,
7049 					  RTE_FLOW_ERROR_TYPE_ACTION,
7050 					  action, "raw_encap: masked data is missing");
7051 	return 0;
7052 }
7053 
7054 
7055 static int
7056 flow_hw_validate_action_raw_reformat(struct rte_eth_dev *dev,
7057 				     const struct rte_flow_action *template_action,
7058 				     const struct rte_flow_action *template_mask,
7059 				     const struct
7060 				     rte_flow_actions_template_attr *template_attr,
7061 				     uint64_t *action_flags,
7062 				     struct rte_flow_error *error)
7063 {
7064 	const struct rte_flow_action *encap_action = NULL;
7065 	const struct rte_flow_action *encap_mask = NULL;
7066 	const struct rte_flow_action_raw_decap *raw_decap = NULL;
7067 	const struct rte_flow_action_raw_encap *raw_encap = NULL;
7068 	const struct rte_flow_attr attr = {
7069 		.ingress = template_attr->ingress,
7070 		.egress = template_attr->egress,
7071 		.transfer = template_attr->transfer
7072 	};
7073 	uint64_t item_flags = 0;
7074 	int ret, actions_n = 0;
7075 
7076 	if (template_action->type == RTE_FLOW_ACTION_TYPE_RAW_DECAP) {
7077 		raw_decap = template_mask->conf ?
7078 			    template_action->conf : &empty_decap;
7079 		if ((template_action + 1)->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
7080 			if ((template_mask + 1)->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP)
7081 				return rte_flow_error_set(error, EINVAL,
7082 							  RTE_FLOW_ERROR_TYPE_ACTION,
7083 							  template_mask + 1, "invalid mask type");
7084 			encap_action = template_action + 1;
7085 			encap_mask = template_mask + 1;
7086 		}
7087 	} else {
7088 		encap_action = template_action;
7089 		encap_mask = template_mask;
7090 	}
7091 	if (encap_action) {
7092 		raw_encap = encap_action->conf;
7093 		ret = flow_hw_validate_action_raw_encap(encap_action,
7094 							encap_mask, error);
7095 		if (ret)
7096 			return ret;
7097 	}
7098 	return mlx5_flow_dv_validate_action_raw_encap_decap(dev, raw_decap,
7099 							    raw_encap, &attr,
7100 							    action_flags,
7101 							    &actions_n,
7102 							    template_action,
7103 							    item_flags, error);
7104 }
7105 
7106 
7107 
7108 static int
7109 mlx5_flow_hw_actions_validate(struct rte_eth_dev *dev,
7110 			      const struct rte_flow_actions_template_attr *attr,
7111 			      const struct rte_flow_action actions[],
7112 			      const struct rte_flow_action masks[],
7113 			      uint64_t *act_flags,
7114 			      struct rte_flow_error *error)
7115 {
7116 	struct mlx5_priv *priv = dev->data->dev_private;
7117 	const struct rte_flow_action_count *count_mask = NULL;
7118 	bool fixed_cnt = false;
7119 	uint64_t action_flags = 0;
7120 	bool actions_end = false;
7121 #ifdef HAVE_MLX5DV_DR_ACTION_CREATE_DEST_ROOT_TABLE
7122 	int table_type;
7123 #endif
7124 	uint16_t i;
7125 	int ret;
7126 	const struct rte_flow_action_ipv6_ext_remove *remove_data;
7127 
7128 	if (!mlx5_hw_ctx_validate(dev, error))
7129 		return -rte_errno;
7130 	/* FDB actions are only valid to proxy port. */
7131 	if (attr->transfer && (!priv->sh->config.dv_esw_en || !priv->master))
7132 		return rte_flow_error_set(error, EINVAL,
7133 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7134 					  NULL,
7135 					  "transfer actions are only valid to proxy port");
7136 	for (i = 0; !actions_end; ++i) {
7137 		const struct rte_flow_action *action = &actions[i];
7138 		const struct rte_flow_action *mask = &masks[i];
7139 
7140 		MLX5_ASSERT(i < MLX5_HW_MAX_ACTS);
7141 		if (action->type != RTE_FLOW_ACTION_TYPE_INDIRECT &&
7142 		    action->type != mask->type)
7143 			return rte_flow_error_set(error, ENOTSUP,
7144 						  RTE_FLOW_ERROR_TYPE_ACTION,
7145 						  action,
7146 						  "mask type does not match action type");
7147 		switch ((int)action->type) {
7148 		case RTE_FLOW_ACTION_TYPE_VOID:
7149 			break;
7150 		case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
7151 			break;
7152 		case RTE_FLOW_ACTION_TYPE_INDIRECT:
7153 			ret = flow_hw_validate_action_indirect(dev, action,
7154 							       mask,
7155 							       &action_flags,
7156 							       &fixed_cnt,
7157 							       error);
7158 			if (ret < 0)
7159 				return ret;
7160 			break;
7161 		case RTE_FLOW_ACTION_TYPE_FLAG:
7162 			/* TODO: Validation logic */
7163 			action_flags |= MLX5_FLOW_ACTION_FLAG;
7164 			break;
7165 		case RTE_FLOW_ACTION_TYPE_MARK:
7166 			ret = mlx5_hw_validate_action_mark(dev, action, mask,
7167 							   action_flags,
7168 							   attr, error);
7169 			if (ret)
7170 				return ret;
7171 			action_flags |= MLX5_FLOW_ACTION_MARK;
7172 			break;
7173 		case RTE_FLOW_ACTION_TYPE_DROP:
7174 			ret = mlx5_flow_validate_action_drop
7175 				(dev, action_flags,
7176 				 &(struct rte_flow_attr){.egress = attr->egress},
7177 				 error);
7178 			if (ret)
7179 				return ret;
7180 			action_flags |= MLX5_FLOW_ACTION_DROP;
7181 			break;
7182 		case RTE_FLOW_ACTION_TYPE_JUMP:
7183 			/* Only validate the jump to root table in template stage. */
7184 			ret = flow_hw_validate_action_jump(dev, attr, action, mask, error);
7185 			if (ret)
7186 				return ret;
7187 			action_flags |= MLX5_FLOW_ACTION_JUMP;
7188 			break;
7189 #ifdef HAVE_MLX5DV_DR_ACTION_CREATE_DEST_ROOT_TABLE
7190 		case RTE_FLOW_ACTION_TYPE_SEND_TO_KERNEL:
7191 			if (priv->shared_host)
7192 				return rte_flow_error_set(error, ENOTSUP,
7193 							  RTE_FLOW_ERROR_TYPE_ACTION,
7194 							  action,
7195 							  "action not supported in guest port");
7196 			table_type = attr->ingress ? MLX5DR_TABLE_TYPE_NIC_RX :
7197 				     ((attr->egress) ? MLX5DR_TABLE_TYPE_NIC_TX :
7198 				     MLX5DR_TABLE_TYPE_FDB);
7199 			if (!priv->hw_send_to_kernel[table_type])
7200 				return rte_flow_error_set(error, ENOTSUP,
7201 							  RTE_FLOW_ERROR_TYPE_ACTION,
7202 							  action,
7203 							  "action is not available");
7204 			action_flags |= MLX5_FLOW_ACTION_SEND_TO_KERNEL;
7205 			break;
7206 #endif
7207 		case RTE_FLOW_ACTION_TYPE_QUEUE:
7208 			ret = mlx5_hw_validate_action_queue(dev, action, mask,
7209 							    attr, action_flags,
7210 							    error);
7211 			if (ret)
7212 				return ret;
7213 			action_flags |= MLX5_FLOW_ACTION_QUEUE;
7214 			break;
7215 		case RTE_FLOW_ACTION_TYPE_RSS:
7216 			ret = mlx5_hw_validate_action_rss(dev, action, mask,
7217 							  attr, action_flags,
7218 							  error);
7219 			if (ret)
7220 				return ret;
7221 			action_flags |= MLX5_FLOW_ACTION_RSS;
7222 			break;
7223 		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
7224 		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
7225 			ret = mlx5_hw_validate_action_l2_encap(dev, action, mask,
7226 							       attr, action_flags,
7227 							       error);
7228 			if (ret)
7229 				return ret;
7230 			action_flags |= MLX5_FLOW_ACTION_ENCAP;
7231 			break;
7232 		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
7233 		case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
7234 			ret = mlx5_hw_validate_action_l2_decap(dev, action, mask,
7235 							       attr, action_flags,
7236 							       error);
7237 			if (ret)
7238 				return ret;
7239 			action_flags |= MLX5_FLOW_ACTION_DECAP;
7240 			break;
7241 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
7242 		case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
7243 			ret = flow_hw_validate_action_raw_reformat(dev, action,
7244 								   mask, attr,
7245 								   &action_flags,
7246 								   error);
7247 			if (ret)
7248 				return ret;
7249 			if (action->type == RTE_FLOW_ACTION_TYPE_RAW_DECAP &&
7250 			   (action + 1)->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
7251 				action_flags |= MLX5_FLOW_XCAP_ACTIONS;
7252 				i++;
7253 			}
7254 			break;
7255 		case RTE_FLOW_ACTION_TYPE_IPV6_EXT_PUSH:
7256 			ret = flow_hw_validate_action_ipv6_ext_push(dev, action, error);
7257 			if (ret < 0)
7258 				return ret;
7259 			action_flags |= MLX5_FLOW_ACTION_IPV6_ROUTING_PUSH;
7260 			break;
7261 		case RTE_FLOW_ACTION_TYPE_IPV6_EXT_REMOVE:
7262 			remove_data = action->conf;
7263 			/* Remove action must be shared. */
7264 			if (remove_data->type != IPPROTO_ROUTING || !mask) {
7265 				DRV_LOG(ERR, "Only supports shared IPv6 routing remove");
7266 				return -EINVAL;
7267 			}
7268 			action_flags |= MLX5_FLOW_ACTION_IPV6_ROUTING_REMOVE;
7269 			break;
7270 		case RTE_FLOW_ACTION_TYPE_METER:
7271 			/* TODO: Validation logic */
7272 			action_flags |= MLX5_FLOW_ACTION_METER;
7273 			break;
7274 		case RTE_FLOW_ACTION_TYPE_METER_MARK:
7275 			ret = flow_hw_validate_action_meter_mark(dev, action, false, error);
7276 			if (ret < 0)
7277 				return ret;
7278 			action_flags |= MLX5_FLOW_ACTION_METER;
7279 			break;
7280 		case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
7281 			ret = flow_hw_validate_action_modify_field(dev, action, mask,
7282 								   error);
7283 			if (ret < 0)
7284 				return ret;
7285 			action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
7286 			break;
7287 		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
7288 			ret = flow_hw_validate_action_represented_port
7289 					(dev, action, mask, error);
7290 			if (ret < 0)
7291 				return ret;
7292 			action_flags |= MLX5_FLOW_ACTION_PORT_ID;
7293 			break;
7294 		case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
7295 			ret = flow_hw_validate_action_port_representor
7296 					(dev, attr, action, mask, error);
7297 			if (ret < 0)
7298 				return ret;
7299 			action_flags |= MLX5_FLOW_ACTION_PORT_REPRESENTOR;
7300 			break;
7301 		case RTE_FLOW_ACTION_TYPE_AGE:
7302 			if (count_mask && count_mask->id)
7303 				fixed_cnt = true;
7304 			ret = flow_hw_validate_action_age(dev, action,
7305 							  action_flags,
7306 							  fixed_cnt, error);
7307 			if (ret < 0)
7308 				return ret;
7309 			action_flags |= MLX5_FLOW_ACTION_AGE;
7310 			break;
7311 		case RTE_FLOW_ACTION_TYPE_COUNT:
7312 			ret = flow_hw_validate_action_count(dev, action, mask,
7313 							    action_flags,
7314 							    error);
7315 			if (ret < 0)
7316 				return ret;
7317 			count_mask = mask->conf;
7318 			action_flags |= MLX5_FLOW_ACTION_COUNT;
7319 			break;
7320 		case RTE_FLOW_ACTION_TYPE_CONNTRACK:
7321 			ret = mlx5_hw_validate_action_conntrack(dev, action, mask,
7322 								attr, action_flags,
7323 								error);
7324 			if (ret)
7325 				return ret;
7326 			action_flags |= MLX5_FLOW_ACTION_CT;
7327 			break;
7328 		case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
7329 			action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
7330 			break;
7331 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
7332 			action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
7333 			break;
7334 		case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
7335 			ret = flow_hw_validate_action_push_vlan
7336 					(dev, attr, action, mask, error);
7337 			if (ret != 0)
7338 				return ret;
7339 			i += is_of_vlan_pcp_present(action) ?
7340 				MLX5_HW_VLAN_PUSH_PCP_IDX :
7341 				MLX5_HW_VLAN_PUSH_VID_IDX;
7342 			action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
7343 			break;
7344 		case RTE_FLOW_ACTION_TYPE_NAT64:
7345 			ret = flow_hw_validate_action_nat64(dev, attr, action, mask,
7346 							    action_flags, error);
7347 			if (ret != 0)
7348 				return ret;
7349 			action_flags |= MLX5_FLOW_ACTION_NAT64;
7350 			break;
7351 		case RTE_FLOW_ACTION_TYPE_END:
7352 			actions_end = true;
7353 			break;
7354 		case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
7355 			ret = flow_hw_validate_action_default_miss(dev, attr,
7356 								   action_flags, error);
7357 			if (ret < 0)
7358 				return ret;
7359 			action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
7360 			break;
7361 		case RTE_FLOW_ACTION_TYPE_JUMP_TO_TABLE_INDEX:
7362 			ret = mlx5_flow_validate_action_jump_to_table_index(action, mask, error);
7363 			if (ret < 0)
7364 				return ret;
7365 			action_flags |= MLX5_FLOW_ACTION_JUMP_TO_TABLE_INDEX;
7366 			break;
7367 		default:
7368 			return rte_flow_error_set(error, ENOTSUP,
7369 						  RTE_FLOW_ERROR_TYPE_ACTION,
7370 						  action,
7371 						  "action not supported in template API");
7372 		}
7373 	}
7374 	if (act_flags != NULL)
7375 		*act_flags = action_flags;
7376 	return 0;
7377 }
7378 
7379 static int
7380 flow_hw_actions_validate(struct rte_eth_dev *dev,
7381 			 const struct rte_flow_actions_template_attr *attr,
7382 			 const struct rte_flow_action actions[],
7383 			 const struct rte_flow_action masks[],
7384 			 struct rte_flow_error *error)
7385 {
7386 	return mlx5_flow_hw_actions_validate(dev, attr, actions, masks, NULL, error);
7387 }
7388 
7389 
7390 static enum mlx5dr_action_type mlx5_hw_dr_action_types[] = {
7391 	[RTE_FLOW_ACTION_TYPE_MARK] = MLX5DR_ACTION_TYP_TAG,
7392 	[RTE_FLOW_ACTION_TYPE_FLAG] = MLX5DR_ACTION_TYP_TAG,
7393 	[RTE_FLOW_ACTION_TYPE_DROP] = MLX5DR_ACTION_TYP_DROP,
7394 	[RTE_FLOW_ACTION_TYPE_JUMP] = MLX5DR_ACTION_TYP_TBL,
7395 	[RTE_FLOW_ACTION_TYPE_QUEUE] = MLX5DR_ACTION_TYP_TIR,
7396 	[RTE_FLOW_ACTION_TYPE_RSS] = MLX5DR_ACTION_TYP_TIR,
7397 	[RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP] = MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2,
7398 	[RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP] = MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2,
7399 	[RTE_FLOW_ACTION_TYPE_VXLAN_DECAP] = MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2,
7400 	[RTE_FLOW_ACTION_TYPE_NVGRE_DECAP] = MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2,
7401 	[RTE_FLOW_ACTION_TYPE_MODIFY_FIELD] = MLX5DR_ACTION_TYP_MODIFY_HDR,
7402 	[RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT] = MLX5DR_ACTION_TYP_VPORT,
7403 	[RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR] = MLX5DR_ACTION_TYP_MISS,
7404 	[RTE_FLOW_ACTION_TYPE_CONNTRACK] = MLX5DR_ACTION_TYP_ASO_CT,
7405 	[RTE_FLOW_ACTION_TYPE_OF_POP_VLAN] = MLX5DR_ACTION_TYP_POP_VLAN,
7406 	[RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN] = MLX5DR_ACTION_TYP_PUSH_VLAN,
7407 	[RTE_FLOW_ACTION_TYPE_SEND_TO_KERNEL] = MLX5DR_ACTION_TYP_DEST_ROOT,
7408 	[RTE_FLOW_ACTION_TYPE_IPV6_EXT_PUSH] = MLX5DR_ACTION_TYP_PUSH_IPV6_ROUTE_EXT,
7409 	[RTE_FLOW_ACTION_TYPE_IPV6_EXT_REMOVE] = MLX5DR_ACTION_TYP_POP_IPV6_ROUTE_EXT,
7410 	[RTE_FLOW_ACTION_TYPE_NAT64] = MLX5DR_ACTION_TYP_NAT64,
7411 	[RTE_FLOW_ACTION_TYPE_JUMP_TO_TABLE_INDEX] = MLX5DR_ACTION_TYP_JUMP_TO_MATCHER,
7412 };
7413 
7414 static inline void
7415 action_template_set_type(struct rte_flow_actions_template *at,
7416 			 enum mlx5dr_action_type *action_types,
7417 			 unsigned int action_src, uint16_t *curr_off,
7418 			 enum mlx5dr_action_type type)
7419 {
7420 	at->dr_off[action_src] = *curr_off;
7421 	action_types[*curr_off] = type;
7422 	*curr_off = *curr_off + 1;
7423 }
7424 
7425 static int
7426 flow_hw_dr_actions_template_handle_shared(int type, uint32_t action_src,
7427 					  enum mlx5dr_action_type *action_types,
7428 					  uint16_t *curr_off, uint16_t *cnt_off,
7429 					  struct rte_flow_actions_template *at)
7430 {
7431 	switch (type) {
7432 	case RTE_FLOW_ACTION_TYPE_RSS:
7433 		action_template_set_type(at, action_types, action_src, curr_off,
7434 					 MLX5DR_ACTION_TYP_TIR);
7435 		break;
7436 	case RTE_FLOW_ACTION_TYPE_AGE:
7437 	case RTE_FLOW_ACTION_TYPE_COUNT:
7438 		/*
7439 		 * Both AGE and COUNT action need counter, the first one fills
7440 		 * the action_types array, and the second only saves the offset.
7441 		 */
7442 		if (*cnt_off == UINT16_MAX) {
7443 			*cnt_off = *curr_off;
7444 			action_template_set_type(at, action_types,
7445 						 action_src, curr_off,
7446 						 MLX5DR_ACTION_TYP_CTR);
7447 		}
7448 		at->dr_off[action_src] = *cnt_off;
7449 		break;
7450 	case RTE_FLOW_ACTION_TYPE_CONNTRACK:
7451 		action_template_set_type(at, action_types, action_src, curr_off,
7452 					 MLX5DR_ACTION_TYP_ASO_CT);
7453 		break;
7454 	case RTE_FLOW_ACTION_TYPE_QUOTA:
7455 	case RTE_FLOW_ACTION_TYPE_METER_MARK:
7456 		action_template_set_type(at, action_types, action_src, curr_off,
7457 					 MLX5DR_ACTION_TYP_ASO_METER);
7458 		break;
7459 	default:
7460 		DRV_LOG(WARNING, "Unsupported shared action type: %d", type);
7461 		return -EINVAL;
7462 	}
7463 	return 0;
7464 }
7465 
7466 
7467 static int
7468 flow_hw_template_actions_list(struct rte_flow_actions_template *at,
7469 			      unsigned int action_src,
7470 			      enum mlx5dr_action_type *action_types,
7471 			      uint16_t *curr_off, uint16_t *cnt_off)
7472 {
7473 	int ret;
7474 	const struct rte_flow_action_indirect_list *indlst_conf = at->actions[action_src].conf;
7475 	enum mlx5_indirect_list_type list_type = mlx5_get_indirect_list_type(indlst_conf->handle);
7476 	const union {
7477 		struct mlx5_indlst_legacy *legacy;
7478 		struct rte_flow_action_list_handle *handle;
7479 	} indlst_obj = { .handle = indlst_conf->handle };
7480 	enum mlx5dr_action_type type;
7481 
7482 	switch (list_type) {
7483 	case MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY:
7484 		ret = flow_hw_dr_actions_template_handle_shared
7485 			(indlst_obj.legacy->legacy_type, action_src,
7486 			 action_types, curr_off, cnt_off, at);
7487 		if (ret)
7488 			return ret;
7489 		break;
7490 	case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
7491 		action_template_set_type(at, action_types, action_src, curr_off,
7492 					 MLX5DR_ACTION_TYP_DEST_ARRAY);
7493 		break;
7494 	case MLX5_INDIRECT_ACTION_LIST_TYPE_REFORMAT:
7495 		type = ((struct mlx5_hw_encap_decap_action *)
7496 			(indlst_conf->handle))->action_type;
7497 		action_template_set_type(at, action_types, action_src, curr_off, type);
7498 		break;
7499 	default:
7500 		DRV_LOG(ERR, "Unsupported indirect list type");
7501 		return -EINVAL;
7502 	}
7503 	return 0;
7504 }
7505 
7506 /**
7507  * Create DR action template based on a provided sequence of flow actions.
7508  *
7509  * @param[in] dev
7510  *   Pointer to the rte_eth_dev structure.
7511  * @param[in] at
7512  *   Pointer to flow actions template to be updated.
7513  * @param[out] action_types
7514  *   Action types array to be filled.
7515  * @param[out] tmpl_flags
7516  *   Template DR flags to be filled.
7517  *
7518  * @return
7519  *   0 on success, a negative errno value otherwise and rte_errno is set.
7520  */
7521 static int
7522 flow_hw_parse_flow_actions_to_dr_actions(struct rte_eth_dev *dev,
7523 					struct rte_flow_actions_template *at,
7524 					enum mlx5dr_action_type action_types[MLX5_HW_MAX_ACTS],
7525 					uint32_t *tmpl_flags __rte_unused)
7526 {
7527 	unsigned int i;
7528 	uint16_t curr_off;
7529 	enum mlx5dr_action_type reformat_act_type = MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2;
7530 	uint16_t reformat_off = UINT16_MAX;
7531 	uint16_t mhdr_off = UINT16_MAX;
7532 	uint16_t recom_off = UINT16_MAX;
7533 	uint16_t cnt_off = UINT16_MAX;
7534 	enum mlx5dr_action_type recom_type = MLX5DR_ACTION_TYP_LAST;
7535 	int ret;
7536 
7537 	for (i = 0, curr_off = 0; at->actions[i].type != RTE_FLOW_ACTION_TYPE_END; ++i) {
7538 		const struct rte_flow_action_raw_encap *raw_encap_data;
7539 		size_t data_size;
7540 		enum mlx5dr_action_type type;
7541 
7542 		if (curr_off >= MLX5_HW_MAX_ACTS)
7543 			goto err_actions_num;
7544 		switch ((int)at->actions[i].type) {
7545 		case RTE_FLOW_ACTION_TYPE_VOID:
7546 			break;
7547 		case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
7548 			ret = flow_hw_template_actions_list(at, i, action_types,
7549 							    &curr_off, &cnt_off);
7550 			if (ret)
7551 				return ret;
7552 			break;
7553 		case RTE_FLOW_ACTION_TYPE_INDIRECT:
7554 			ret = flow_hw_dr_actions_template_handle_shared
7555 				(at->masks[i].type, i, action_types,
7556 				 &curr_off, &cnt_off, at);
7557 			if (ret)
7558 				return ret;
7559 			break;
7560 		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
7561 		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
7562 		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
7563 		case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
7564 			MLX5_ASSERT(reformat_off == UINT16_MAX);
7565 			reformat_off = curr_off++;
7566 			reformat_act_type = mlx5_hw_dr_action_types[at->actions[i].type];
7567 			break;
7568 		case RTE_FLOW_ACTION_TYPE_IPV6_EXT_PUSH:
7569 			MLX5_ASSERT(recom_off == UINT16_MAX);
7570 			recom_type = MLX5DR_ACTION_TYP_PUSH_IPV6_ROUTE_EXT;
7571 			recom_off = curr_off++;
7572 			break;
7573 		case RTE_FLOW_ACTION_TYPE_IPV6_EXT_REMOVE:
7574 			MLX5_ASSERT(recom_off == UINT16_MAX);
7575 			recom_type = MLX5DR_ACTION_TYP_POP_IPV6_ROUTE_EXT;
7576 			recom_off = curr_off++;
7577 			break;
7578 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
7579 			raw_encap_data = at->actions[i].conf;
7580 			data_size = raw_encap_data->size;
7581 			if (reformat_off != UINT16_MAX) {
7582 				reformat_act_type = data_size < MLX5_ENCAPSULATION_DECISION_SIZE ?
7583 					MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2 :
7584 					MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3;
7585 			} else {
7586 				reformat_off = curr_off++;
7587 				reformat_act_type = MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
7588 			}
7589 			break;
7590 		case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
7591 			reformat_off = curr_off++;
7592 			reformat_act_type = MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2;
7593 			break;
7594 		case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
7595 			if (mhdr_off == UINT16_MAX) {
7596 				mhdr_off = curr_off++;
7597 				type = mlx5_hw_dr_action_types[at->actions[i].type];
7598 				action_types[mhdr_off] = type;
7599 			}
7600 			break;
7601 		case RTE_FLOW_ACTION_TYPE_METER:
7602 			at->dr_off[i] = curr_off;
7603 			action_types[curr_off++] = MLX5DR_ACTION_TYP_ASO_METER;
7604 			if (curr_off >= MLX5_HW_MAX_ACTS)
7605 				goto err_actions_num;
7606 			action_types[curr_off++] = MLX5DR_ACTION_TYP_TBL;
7607 			break;
7608 		case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
7609 			type = mlx5_hw_dr_action_types[at->actions[i].type];
7610 			at->dr_off[i] = curr_off;
7611 			action_types[curr_off++] = type;
7612 			i += is_of_vlan_pcp_present(at->actions + i) ?
7613 				MLX5_HW_VLAN_PUSH_PCP_IDX :
7614 				MLX5_HW_VLAN_PUSH_VID_IDX;
7615 			break;
7616 		case RTE_FLOW_ACTION_TYPE_METER_MARK:
7617 			at->dr_off[i] = curr_off;
7618 			action_types[curr_off++] = MLX5DR_ACTION_TYP_ASO_METER;
7619 			if (curr_off >= MLX5_HW_MAX_ACTS)
7620 				goto err_actions_num;
7621 			break;
7622 		case RTE_FLOW_ACTION_TYPE_AGE:
7623 		case RTE_FLOW_ACTION_TYPE_COUNT:
7624 			/*
7625 			 * Both AGE and COUNT action need counter, the first
7626 			 * one fills the action_types array, and the second only
7627 			 * saves the offset.
7628 			 */
7629 			if (cnt_off == UINT16_MAX) {
7630 				cnt_off = curr_off++;
7631 				action_types[cnt_off] = MLX5DR_ACTION_TYP_CTR;
7632 			}
7633 			at->dr_off[i] = cnt_off;
7634 			break;
7635 		case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
7636 			at->dr_off[i] = curr_off;
7637 			action_types[curr_off++] = MLX5DR_ACTION_TYP_MISS;
7638 			break;
7639 		case RTE_FLOW_ACTION_TYPE_JUMP_TO_TABLE_INDEX:
7640 			*tmpl_flags |= MLX5DR_ACTION_TEMPLATE_FLAG_RELAXED_ORDER;
7641 			at->dr_off[i] = curr_off;
7642 			action_types[curr_off++] = MLX5DR_ACTION_TYP_JUMP_TO_MATCHER;
7643 			break;
7644 		default:
7645 			type = mlx5_hw_dr_action_types[at->actions[i].type];
7646 			at->dr_off[i] = curr_off;
7647 			action_types[curr_off++] = type;
7648 			break;
7649 		}
7650 	}
7651 	if (curr_off >= MLX5_HW_MAX_ACTS)
7652 		goto err_actions_num;
7653 	if (mhdr_off != UINT16_MAX)
7654 		at->mhdr_off = mhdr_off;
7655 	if (reformat_off != UINT16_MAX) {
7656 		at->reformat_off = reformat_off;
7657 		action_types[reformat_off] = reformat_act_type;
7658 	}
7659 	if (recom_off != UINT16_MAX) {
7660 		at->recom_off = recom_off;
7661 		action_types[recom_off] = recom_type;
7662 	}
7663 	at->dr_actions_num = curr_off;
7664 
7665 	/* Create srh flex parser for remove anchor. */
7666 	if ((recom_type == MLX5DR_ACTION_TYP_POP_IPV6_ROUTE_EXT ||
7667 	     recom_type == MLX5DR_ACTION_TYP_PUSH_IPV6_ROUTE_EXT) &&
7668 	    (ret = mlx5_alloc_srh_flex_parser(dev))) {
7669 		DRV_LOG(ERR, "Failed to create srv6 flex parser");
7670 		return ret;
7671 	}
7672 	return 0;
7673 err_actions_num:
7674 	DRV_LOG(ERR, "Number of HW actions (%u) exceeded maximum (%u) allowed in template",
7675 		curr_off, MLX5_HW_MAX_ACTS);
7676 	return -EINVAL;
7677 }
7678 
7679 static void
7680 flow_hw_set_vlan_vid(struct rte_eth_dev *dev,
7681 		     struct rte_flow_action *ra,
7682 		     struct rte_flow_action *rm,
7683 		     struct rte_flow_action_modify_field *spec,
7684 		     struct rte_flow_action_modify_field *mask,
7685 		     int set_vlan_vid_ix)
7686 {
7687 	struct rte_flow_error error;
7688 	const bool masked = rm[set_vlan_vid_ix].conf &&
7689 		(((const struct rte_flow_action_of_set_vlan_vid *)
7690 			rm[set_vlan_vid_ix].conf)->vlan_vid != 0);
7691 	const struct rte_flow_action_of_set_vlan_vid *conf =
7692 		ra[set_vlan_vid_ix].conf;
7693 	int width = mlx5_flow_item_field_width(dev, RTE_FLOW_FIELD_VLAN_ID, 0,
7694 					       NULL, &error);
7695 	*spec = (typeof(*spec)) {
7696 		.operation = RTE_FLOW_MODIFY_SET,
7697 		.dst = {
7698 			.field = RTE_FLOW_FIELD_VLAN_ID,
7699 			.level = 0, .offset = 0,
7700 		},
7701 		.src = {
7702 			.field = RTE_FLOW_FIELD_VALUE,
7703 		},
7704 		.width = width,
7705 	};
7706 	*mask = (typeof(*mask)) {
7707 		.operation = RTE_FLOW_MODIFY_SET,
7708 		.dst = {
7709 			.field = RTE_FLOW_FIELD_VLAN_ID,
7710 			.level = 0xff, .offset = 0xffffffff,
7711 		},
7712 		.src = {
7713 			.field = RTE_FLOW_FIELD_VALUE,
7714 		},
7715 		.width = 0xffffffff,
7716 	};
7717 	if (masked) {
7718 		uint32_t mask_val = 0xffffffff;
7719 
7720 		rte_memcpy(spec->src.value, &conf->vlan_vid, sizeof(conf->vlan_vid));
7721 		rte_memcpy(mask->src.value, &mask_val, sizeof(mask_val));
7722 	}
7723 	ra[set_vlan_vid_ix].type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD;
7724 	ra[set_vlan_vid_ix].conf = spec;
7725 	rm[set_vlan_vid_ix].type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD;
7726 	rm[set_vlan_vid_ix].conf = mask;
7727 }
7728 
7729 static __rte_always_inline int
7730 flow_hw_set_vlan_vid_construct(struct rte_eth_dev *dev,
7731 			       struct mlx5_modification_cmd *mhdr_cmd,
7732 			       struct mlx5_action_construct_data *act_data,
7733 			       const struct mlx5_hw_actions *hw_acts,
7734 			       const struct rte_flow_action *action)
7735 {
7736 	struct rte_flow_error error;
7737 	rte_be16_t vid = ((const struct rte_flow_action_of_set_vlan_vid *)
7738 			   action->conf)->vlan_vid;
7739 	int width = mlx5_flow_item_field_width(dev, RTE_FLOW_FIELD_VLAN_ID, 0,
7740 					       NULL, &error);
7741 	struct rte_flow_action_modify_field conf = {
7742 		.operation = RTE_FLOW_MODIFY_SET,
7743 		.dst = {
7744 			.field = RTE_FLOW_FIELD_VLAN_ID,
7745 			.level = 0, .offset = 0,
7746 		},
7747 		.src = {
7748 			.field = RTE_FLOW_FIELD_VALUE,
7749 		},
7750 		.width = width,
7751 	};
7752 	struct rte_flow_action modify_action = {
7753 		.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
7754 		.conf = &conf
7755 	};
7756 
7757 	rte_memcpy(conf.src.value, &vid, sizeof(vid));
7758 	return flow_hw_modify_field_construct(mhdr_cmd, act_data, hw_acts, &modify_action);
7759 }
7760 
7761 static int
7762 flow_hw_flex_item_acquire(struct rte_eth_dev *dev,
7763 			  struct rte_flow_item_flex_handle *handle,
7764 			  uint8_t *flex_item)
7765 {
7766 	int index = mlx5_flex_acquire_index(dev, handle, false);
7767 
7768 	MLX5_ASSERT(index >= 0 && index < (int)(sizeof(uint32_t) * CHAR_BIT));
7769 	if (index < 0)
7770 		return -1;
7771 	if (!(*flex_item & RTE_BIT32(index))) {
7772 		/* Don't count same flex item again. */
7773 		if (mlx5_flex_acquire_index(dev, handle, true) != index)
7774 			MLX5_ASSERT(false);
7775 		*flex_item |= (uint8_t)RTE_BIT32(index);
7776 	}
7777 	return 0;
7778 }
7779 
7780 static void
7781 flow_hw_flex_item_release(struct rte_eth_dev *dev, uint8_t *flex_item)
7782 {
7783 	while (*flex_item) {
7784 		int index = rte_bsf32(*flex_item);
7785 
7786 		mlx5_flex_release_index(dev, index);
7787 		*flex_item &= ~(uint8_t)RTE_BIT32(index);
7788 	}
7789 }
7790 static __rte_always_inline void
7791 flow_hw_actions_template_replace_container(const
7792 					   struct rte_flow_action *actions,
7793 					   const
7794 					   struct rte_flow_action *masks,
7795 					   struct rte_flow_action *new_actions,
7796 					   struct rte_flow_action *new_masks,
7797 					   struct rte_flow_action **ra,
7798 					   struct rte_flow_action **rm,
7799 					   uint32_t act_num)
7800 {
7801 	memcpy(new_actions, actions, sizeof(actions[0]) * act_num);
7802 	memcpy(new_masks, masks, sizeof(masks[0]) * act_num);
7803 	*ra = (void *)(uintptr_t)new_actions;
7804 	*rm = (void *)(uintptr_t)new_masks;
7805 }
7806 
7807 /* Action template copies these actions in rte_flow_conv() */
7808 
7809 static const struct rte_flow_action rx_meta_copy_action =  {
7810 	.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
7811 	.conf = &(struct rte_flow_action_modify_field){
7812 		.operation = RTE_FLOW_MODIFY_SET,
7813 		.dst = {
7814 			.field = (enum rte_flow_field_id)
7815 				MLX5_RTE_FLOW_FIELD_META_REG,
7816 			.tag_index = REG_B,
7817 		},
7818 		.src = {
7819 			.field = (enum rte_flow_field_id)
7820 				MLX5_RTE_FLOW_FIELD_META_REG,
7821 			.tag_index = REG_C_1,
7822 		},
7823 		.width = 32,
7824 	}
7825 };
7826 
7827 static const struct rte_flow_action rx_meta_copy_mask = {
7828 	.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
7829 	.conf = &(struct rte_flow_action_modify_field){
7830 		.operation = RTE_FLOW_MODIFY_SET,
7831 		.dst = {
7832 			.field = (enum rte_flow_field_id)
7833 				MLX5_RTE_FLOW_FIELD_META_REG,
7834 			.level = UINT8_MAX,
7835 			.tag_index = UINT8_MAX,
7836 			.offset = UINT32_MAX,
7837 		},
7838 		.src = {
7839 			.field = (enum rte_flow_field_id)
7840 				MLX5_RTE_FLOW_FIELD_META_REG,
7841 			.level = UINT8_MAX,
7842 			.tag_index = UINT8_MAX,
7843 			.offset = UINT32_MAX,
7844 		},
7845 		.width = UINT32_MAX,
7846 	}
7847 };
7848 
7849 static const struct rte_flow_action quota_color_inc_action = {
7850 	.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
7851 	.conf = &(struct rte_flow_action_modify_field) {
7852 		.operation = RTE_FLOW_MODIFY_ADD,
7853 		.dst = {
7854 			.field = RTE_FLOW_FIELD_METER_COLOR,
7855 			.level = 0, .offset = 0
7856 		},
7857 		.src = {
7858 			.field = RTE_FLOW_FIELD_VALUE,
7859 			.level = 1,
7860 			.offset = 0,
7861 		},
7862 		.width = 2
7863 	}
7864 };
7865 
7866 static const struct rte_flow_action quota_color_inc_mask = {
7867 	.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
7868 	.conf = &(struct rte_flow_action_modify_field) {
7869 		.operation = RTE_FLOW_MODIFY_ADD,
7870 		.dst = {
7871 			.field = RTE_FLOW_FIELD_METER_COLOR,
7872 			.level = UINT8_MAX,
7873 			.tag_index = UINT8_MAX,
7874 			.offset = UINT32_MAX,
7875 		},
7876 		.src = {
7877 			.field = RTE_FLOW_FIELD_VALUE,
7878 			.level = 3,
7879 			.offset = 0
7880 		},
7881 		.width = UINT32_MAX
7882 	}
7883 };
7884 
7885 /**
7886  * Create flow action template.
7887  *
7888  * @param[in] dev
7889  *   Pointer to the rte_eth_dev structure.
7890  * @param[in] attr
7891  *   Pointer to the action template attributes.
7892  * @param[in] actions
7893  *   Associated actions (list terminated by the END action).
7894  * @param[in] masks
7895  *   List of actions that marks which of the action's member is constant.
7896  * @param[in] nt_mode
7897  *   Non template mode.
7898  * @param[out] error
7899  *   Pointer to error structure.
7900  *
7901  * @return
7902  *   Action template pointer on success, NULL otherwise and rte_errno is set.
7903  */
7904 static struct rte_flow_actions_template *
7905 __flow_hw_actions_template_create(struct rte_eth_dev *dev,
7906 			const struct rte_flow_actions_template_attr *attr,
7907 			const struct rte_flow_action actions[],
7908 			const struct rte_flow_action masks[],
7909 			bool nt_mode,
7910 			struct rte_flow_error *error)
7911 {
7912 	struct mlx5_priv *priv = dev->data->dev_private;
7913 	int len, act_len, mask_len;
7914 	int orig_act_len;
7915 	unsigned int act_num;
7916 	unsigned int i;
7917 	struct rte_flow_actions_template *at = NULL;
7918 	uint16_t pos;
7919 	uint64_t action_flags = 0;
7920 	struct rte_flow_action tmp_action[MLX5_HW_MAX_ACTS];
7921 	struct rte_flow_action tmp_mask[MLX5_HW_MAX_ACTS];
7922 	struct rte_flow_action *ra = (void *)(uintptr_t)actions;
7923 	struct rte_flow_action *rm = (void *)(uintptr_t)masks;
7924 	int set_vlan_vid_ix = -1;
7925 	struct rte_flow_action_modify_field set_vlan_vid_spec = {0, };
7926 	struct rte_flow_action_modify_field set_vlan_vid_mask = {0, };
7927 	struct rte_flow_action mf_actions[MLX5_HW_MAX_ACTS];
7928 	struct rte_flow_action mf_masks[MLX5_HW_MAX_ACTS];
7929 	uint32_t expand_mf_num = 0;
7930 	uint16_t src_off[MLX5_HW_MAX_ACTS] = {0, };
7931 	enum mlx5dr_action_type action_types[MLX5_HW_MAX_ACTS] = { MLX5DR_ACTION_TYP_LAST };
7932 	uint32_t tmpl_flags = 0;
7933 	int ret;
7934 
7935 	if (!nt_mode && mlx5_flow_hw_actions_validate(dev, attr, actions, masks,
7936 						      &action_flags, error))
7937 		return NULL;
7938 	for (i = 0; ra[i].type != RTE_FLOW_ACTION_TYPE_END; ++i) {
7939 		switch (ra[i].type) {
7940 		/* OF_PUSH_VLAN *MUST* come before OF_SET_VLAN_VID */
7941 		case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
7942 			i += is_of_vlan_pcp_present(ra + i) ?
7943 				MLX5_HW_VLAN_PUSH_PCP_IDX :
7944 				MLX5_HW_VLAN_PUSH_VID_IDX;
7945 			break;
7946 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
7947 			set_vlan_vid_ix = i;
7948 			break;
7949 		default:
7950 			break;
7951 		}
7952 	}
7953 	/*
7954 	 * Count flow actions to allocate required space for storing DR offsets and to check
7955 	 * if temporary buffer would not be overrun.
7956 	 */
7957 	act_num = i + 1;
7958 	if (act_num >= MLX5_HW_MAX_ACTS) {
7959 		rte_flow_error_set(error, EINVAL,
7960 				   RTE_FLOW_ERROR_TYPE_ACTION, NULL, "Too many actions");
7961 		return NULL;
7962 	}
7963 	if (set_vlan_vid_ix != -1) {
7964 		/* If temporary action buffer was not used, copy template actions to it */
7965 		if (ra == actions)
7966 			flow_hw_actions_template_replace_container(actions,
7967 								   masks,
7968 								   tmp_action,
7969 								   tmp_mask,
7970 								   &ra, &rm,
7971 								   act_num);
7972 		flow_hw_set_vlan_vid(dev, ra, rm,
7973 				     &set_vlan_vid_spec, &set_vlan_vid_mask,
7974 				     set_vlan_vid_ix);
7975 		action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
7976 	}
7977 	if (action_flags & MLX5_FLOW_ACTION_QUOTA) {
7978 		mf_actions[expand_mf_num] = quota_color_inc_action;
7979 		mf_masks[expand_mf_num] = quota_color_inc_mask;
7980 		expand_mf_num++;
7981 	}
7982 	if (priv->sh->config.dv_xmeta_en == MLX5_XMETA_MODE_META32_HWS &&
7983 	    priv->sh->config.dv_esw_en &&
7984 	    (action_flags & (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS))) {
7985 		/* Insert META copy */
7986 		mf_actions[expand_mf_num] = rx_meta_copy_action;
7987 		mf_masks[expand_mf_num] = rx_meta_copy_mask;
7988 		expand_mf_num++;
7989 	}
7990 	if (expand_mf_num) {
7991 		if (act_num + expand_mf_num > MLX5_HW_MAX_ACTS) {
7992 			rte_flow_error_set(error, E2BIG,
7993 					   RTE_FLOW_ERROR_TYPE_ACTION,
7994 					   NULL, "cannot expand: too many actions");
7995 			return NULL;
7996 		}
7997 		if (ra == actions)
7998 			flow_hw_actions_template_replace_container(actions,
7999 								   masks,
8000 								   tmp_action,
8001 								   tmp_mask,
8002 								   &ra, &rm,
8003 								   act_num);
8004 		/* Application should make sure only one Q/RSS exist in one rule. */
8005 		pos = flow_hw_template_expand_modify_field(ra, rm,
8006 							   mf_actions,
8007 							   mf_masks,
8008 							   action_flags,
8009 							   act_num,
8010 							   expand_mf_num);
8011 		if (pos == MLX5_HW_EXPAND_MH_FAILED) {
8012 			rte_flow_error_set(error, ENOMEM,
8013 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8014 					   NULL, "modify header expansion failed");
8015 			return NULL;
8016 		}
8017 		act_num += expand_mf_num;
8018 		for (i = pos + expand_mf_num; i < act_num; i++)
8019 			src_off[i] += expand_mf_num;
8020 		action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
8021 	}
8022 	act_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, NULL, 0, ra, error);
8023 	if (act_len <= 0)
8024 		return NULL;
8025 	len = RTE_ALIGN(act_len, 16);
8026 	mask_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, NULL, 0, rm, error);
8027 	if (mask_len <= 0)
8028 		return NULL;
8029 	len += RTE_ALIGN(mask_len, 16);
8030 	len += RTE_ALIGN(act_num * sizeof(*at->dr_off), 16);
8031 	len += RTE_ALIGN(act_num * sizeof(*at->src_off), 16);
8032 	orig_act_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, NULL, 0, actions, error);
8033 	if (orig_act_len <= 0)
8034 		return NULL;
8035 	len += RTE_ALIGN(orig_act_len, 16);
8036 	at = mlx5_malloc(MLX5_MEM_ZERO, len + sizeof(*at),
8037 			 RTE_CACHE_LINE_SIZE, rte_socket_id());
8038 	if (!at) {
8039 		rte_flow_error_set(error, ENOMEM,
8040 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8041 				   NULL,
8042 				   "cannot allocate action template");
8043 		return NULL;
8044 	}
8045 	/* Actions part is in the first part. */
8046 	at->attr = *attr;
8047 	at->actions = (struct rte_flow_action *)(at + 1);
8048 	act_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, at->actions,
8049 				len, ra, error);
8050 	if (act_len <= 0)
8051 		goto error;
8052 	/* Masks part is in the second part. */
8053 	at->masks = (struct rte_flow_action *)(((uint8_t *)at->actions) + act_len);
8054 	mask_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, at->masks,
8055 				 len - act_len, rm, error);
8056 	if (mask_len <= 0)
8057 		goto error;
8058 	/* DR actions offsets in the third part. */
8059 	at->dr_off = (uint16_t *)((uint8_t *)at->masks + mask_len);
8060 	at->src_off = RTE_PTR_ADD(at->dr_off,
8061 				  RTE_ALIGN(act_num * sizeof(*at->dr_off), 16));
8062 	memcpy(at->src_off, src_off, act_num * sizeof(at->src_off[0]));
8063 	at->orig_actions = RTE_PTR_ADD(at->src_off,
8064 				       RTE_ALIGN(act_num * sizeof(*at->src_off), 16));
8065 	orig_act_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, at->orig_actions, orig_act_len,
8066 				     actions, error);
8067 	if (orig_act_len <= 0)
8068 		goto error;
8069 	at->actions_num = act_num;
8070 	for (i = 0; i < at->actions_num; ++i)
8071 		at->dr_off[i] = UINT16_MAX;
8072 	at->reformat_off = UINT16_MAX;
8073 	at->mhdr_off = UINT16_MAX;
8074 	at->recom_off = UINT16_MAX;
8075 	for (i = 0; actions->type != RTE_FLOW_ACTION_TYPE_END;
8076 	     actions++, masks++, i++) {
8077 		const struct rte_flow_action_modify_field *info;
8078 
8079 		switch (actions->type) {
8080 		/*
8081 		 * mlx5 PMD hacks indirect action index directly to the action conf.
8082 		 * The rte_flow_conv() function copies the content from conf pointer.
8083 		 * Need to restore the indirect action index from action conf here.
8084 		 */
8085 		case RTE_FLOW_ACTION_TYPE_INDIRECT:
8086 			at->actions[i].conf = ra[i].conf;
8087 			at->masks[i].conf = rm[i].conf;
8088 			break;
8089 		case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
8090 			info = actions->conf;
8091 			if ((info->dst.field == RTE_FLOW_FIELD_FLEX_ITEM &&
8092 			     flow_hw_flex_item_acquire(dev, info->dst.flex_handle,
8093 						       &at->flex_item)) ||
8094 			    (info->src.field == RTE_FLOW_FIELD_FLEX_ITEM &&
8095 			     flow_hw_flex_item_acquire(dev, info->src.flex_handle,
8096 						       &at->flex_item)))
8097 				goto error;
8098 			break;
8099 		default:
8100 			break;
8101 		}
8102 	}
8103 	ret = flow_hw_parse_flow_actions_to_dr_actions(dev, at, action_types, &tmpl_flags);
8104 	if (ret)
8105 		goto error;
8106 	at->action_flags = action_flags;
8107 	/* In non template mode there is no need to create the dr template. */
8108 	if (nt_mode)
8109 		return at;
8110 	at->tmpl = mlx5dr_action_template_create(action_types, tmpl_flags);
8111 	if (!at->tmpl) {
8112 		DRV_LOG(ERR, "Failed to create DR action template: %d", rte_errno);
8113 		goto error;
8114 	}
8115 	rte_atomic_fetch_add_explicit(&at->refcnt, 1, rte_memory_order_relaxed);
8116 	LIST_INSERT_HEAD(&priv->flow_hw_at, at, next);
8117 	return at;
8118 error:
8119 	if (at) {
8120 		mlx5_free(at);
8121 	}
8122 	rte_flow_error_set(error, rte_errno,
8123 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8124 			   "Failed to create action template");
8125 	return NULL;
8126 }
8127 
8128 /**
8129  * Create flow action template.
8130  *
8131  * @param[in] dev
8132  *   Pointer to the rte_eth_dev structure.
8133  * @param[in] attr
8134  *   Pointer to the action template attributes.
8135  * @param[in] actions
8136  *   Associated actions (list terminated by the END action).
8137  * @param[in] masks
8138  *   List of actions that marks which of the action's member is constant.
8139  * @param[out] error
8140  *   Pointer to error structure.
8141  *
8142  * @return
8143  *   Action template pointer on success, NULL otherwise and rte_errno is set.
8144  */
8145 static struct rte_flow_actions_template *
8146 flow_hw_actions_template_create(struct rte_eth_dev *dev,
8147 			const struct rte_flow_actions_template_attr *attr,
8148 			const struct rte_flow_action actions[],
8149 			const struct rte_flow_action masks[],
8150 			struct rte_flow_error *error)
8151 {
8152 	return __flow_hw_actions_template_create(dev, attr, actions, masks, false, error);
8153 }
8154 
8155 /**
8156  * Destroy flow action template.
8157  *
8158  * @param[in] dev
8159  *   Pointer to the rte_eth_dev structure.
8160  * @param[in] template
8161  *   Pointer to the action template to be destroyed.
8162  * @param[out] error
8163  *   Pointer to error structure.
8164  *
8165  * @return
8166  *   0 on success, a negative errno value otherwise and rte_errno is set.
8167  */
8168 static int
8169 flow_hw_actions_template_destroy(struct rte_eth_dev *dev,
8170 				 struct rte_flow_actions_template *template,
8171 				 struct rte_flow_error *error __rte_unused)
8172 {
8173 	uint64_t flag = MLX5_FLOW_ACTION_IPV6_ROUTING_REMOVE |
8174 			MLX5_FLOW_ACTION_IPV6_ROUTING_PUSH;
8175 
8176 	if (rte_atomic_load_explicit(&template->refcnt, rte_memory_order_relaxed) > 1) {
8177 		DRV_LOG(WARNING, "Action template %p is still in use.",
8178 			(void *)template);
8179 		return rte_flow_error_set(error, EBUSY,
8180 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8181 				   NULL,
8182 				   "action template is in use");
8183 	}
8184 	if (template->action_flags & flag)
8185 		mlx5_free_srh_flex_parser(dev);
8186 	LIST_REMOVE(template, next);
8187 	flow_hw_flex_item_release(dev, &template->flex_item);
8188 	if (template->tmpl)
8189 		mlx5dr_action_template_destroy(template->tmpl);
8190 	mlx5_free(template);
8191 	return 0;
8192 }
8193 
8194 static struct rte_flow_item *
8195 flow_hw_prepend_item(const struct rte_flow_item *items,
8196 		     const uint32_t nb_items,
8197 		     const struct rte_flow_item *new_item,
8198 		     struct rte_flow_error *error)
8199 {
8200 	struct rte_flow_item *copied_items;
8201 	size_t size;
8202 
8203 	/* Allocate new array of items. */
8204 	size = sizeof(*copied_items) * (nb_items + 1);
8205 	copied_items = mlx5_malloc(MLX5_MEM_ZERO, size, 0, rte_socket_id());
8206 	if (!copied_items) {
8207 		rte_flow_error_set(error, ENOMEM,
8208 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8209 				   NULL,
8210 				   "cannot allocate item template");
8211 		return NULL;
8212 	}
8213 	/* Put new item at the beginning and copy the rest. */
8214 	copied_items[0] = *new_item;
8215 	rte_memcpy(&copied_items[1], items, sizeof(*items) * nb_items);
8216 	return copied_items;
8217 }
8218 
8219 static int
8220 flow_hw_item_compare_field_validate(enum rte_flow_field_id arg_field,
8221 				    enum rte_flow_field_id base_field,
8222 				    struct rte_flow_error *error)
8223 {
8224 	switch (arg_field) {
8225 	case RTE_FLOW_FIELD_TAG:
8226 	case RTE_FLOW_FIELD_META:
8227 	case RTE_FLOW_FIELD_ESP_SEQ_NUM:
8228 		break;
8229 	case RTE_FLOW_FIELD_RANDOM:
8230 		if (base_field == RTE_FLOW_FIELD_VALUE)
8231 			return 0;
8232 		return rte_flow_error_set(error, EINVAL,
8233 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8234 					  NULL,
8235 					  "compare random is supported only with immediate value");
8236 	default:
8237 		return rte_flow_error_set(error, ENOTSUP,
8238 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8239 					  NULL,
8240 					  "compare item argument field is not supported");
8241 	}
8242 	switch (base_field) {
8243 	case RTE_FLOW_FIELD_TAG:
8244 	case RTE_FLOW_FIELD_META:
8245 	case RTE_FLOW_FIELD_VALUE:
8246 	case RTE_FLOW_FIELD_ESP_SEQ_NUM:
8247 		break;
8248 	default:
8249 		return rte_flow_error_set(error, ENOTSUP,
8250 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8251 					  NULL,
8252 					  "compare item base field is not supported");
8253 	}
8254 	return 0;
8255 }
8256 
8257 static inline uint32_t
8258 flow_hw_item_compare_width_supported(enum rte_flow_field_id field)
8259 {
8260 	switch (field) {
8261 	case RTE_FLOW_FIELD_TAG:
8262 	case RTE_FLOW_FIELD_META:
8263 	case RTE_FLOW_FIELD_ESP_SEQ_NUM:
8264 		return 32;
8265 	case RTE_FLOW_FIELD_RANDOM:
8266 		return 16;
8267 	default:
8268 		break;
8269 	}
8270 	return 0;
8271 }
8272 
8273 static int
8274 flow_hw_validate_item_compare(const struct rte_flow_item *item,
8275 			      struct rte_flow_error *error)
8276 {
8277 	const struct rte_flow_item_compare *comp_m = item->mask;
8278 	const struct rte_flow_item_compare *comp_v = item->spec;
8279 	int ret;
8280 
8281 	if (unlikely(!comp_m))
8282 		return rte_flow_error_set(error, EINVAL,
8283 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8284 				   NULL,
8285 				   "compare item mask is missing");
8286 	if (comp_m->width != UINT32_MAX)
8287 		return rte_flow_error_set(error, EINVAL,
8288 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8289 				   NULL,
8290 				   "compare item only support full mask");
8291 	ret = flow_hw_item_compare_field_validate(comp_m->a.field,
8292 						  comp_m->b.field, error);
8293 	if (ret < 0)
8294 		return ret;
8295 	if (comp_v) {
8296 		uint32_t width;
8297 
8298 		if (comp_v->operation != comp_m->operation ||
8299 		    comp_v->a.field != comp_m->a.field ||
8300 		    comp_v->b.field != comp_m->b.field)
8301 			return rte_flow_error_set(error, EINVAL,
8302 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8303 					   NULL,
8304 					   "compare item spec/mask not matching");
8305 		width = flow_hw_item_compare_width_supported(comp_v->a.field);
8306 		MLX5_ASSERT(width > 0);
8307 		if ((comp_v->width & comp_m->width) != width)
8308 			return rte_flow_error_set(error, EINVAL,
8309 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8310 					   NULL,
8311 					   "compare item only support full mask");
8312 	}
8313 	return 0;
8314 }
8315 
8316 static inline int
8317 mlx5_hw_validate_item_nsh(struct rte_eth_dev *dev,
8318 			  const struct rte_flow_item *item,
8319 			  struct rte_flow_error *error)
8320 {
8321 	return mlx5_flow_validate_item_nsh(dev, item, error);
8322 }
8323 
8324 static bool
8325 mlx5_hw_flow_tunnel_ip_check(uint64_t last_item, uint64_t *item_flags)
8326 {
8327 	bool tunnel;
8328 
8329 	if (last_item == MLX5_FLOW_LAYER_OUTER_L3_IPV4) {
8330 		tunnel = true;
8331 		*item_flags |= MLX5_FLOW_LAYER_IPIP;
8332 	} else if (last_item == MLX5_FLOW_LAYER_OUTER_L3_IPV6 ||
8333 		   last_item == MLX5_FLOW_ITEM_OUTER_IPV6_ROUTING_EXT) {
8334 		tunnel = true;
8335 		*item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
8336 	} else {
8337 		tunnel = false;
8338 	}
8339 	return tunnel;
8340 }
8341 
8342 const struct rte_flow_item_ipv4 hws_nic_ipv4_mask = {
8343 	.hdr = {
8344 		.version = 0xf,
8345 		.ihl = 0xf,
8346 		.type_of_service = 0xff,
8347 		.total_length = RTE_BE16(0xffff),
8348 		.packet_id = RTE_BE16(0xffff),
8349 		.fragment_offset = RTE_BE16(0xffff),
8350 		.time_to_live = 0xff,
8351 		.next_proto_id = 0xff,
8352 		.src_addr = RTE_BE32(0xffffffff),
8353 		.dst_addr = RTE_BE32(0xffffffff),
8354 	},
8355 };
8356 
8357 const struct rte_flow_item_ipv6 hws_nic_ipv6_mask = {
8358 	.hdr = {
8359 		.vtc_flow = RTE_BE32(0xffffffff),
8360 		.payload_len = RTE_BE16(0xffff),
8361 		.proto = 0xff,
8362 		.hop_limits = 0xff,
8363 		.src_addr = RTE_IPV6_MASK_FULL,
8364 		.dst_addr = RTE_IPV6_MASK_FULL,
8365 	},
8366 	.has_frag_ext = 1,
8367 };
8368 
8369 static int
8370 flow_hw_validate_item_ptype(const struct rte_flow_item *item,
8371 			    struct rte_flow_error *error)
8372 {
8373 	const struct rte_flow_item_ptype *ptype = item->mask;
8374 
8375 	/* HWS does not allow empty PTYPE mask */
8376 	if (!ptype)
8377 		return rte_flow_error_set(error, EINVAL,
8378 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8379 					  NULL, "empty ptype mask");
8380 	if (!(ptype->packet_type &
8381 	      (RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK | RTE_PTYPE_L4_MASK |
8382 	       RTE_PTYPE_INNER_L2_MASK | RTE_PTYPE_INNER_L3_MASK |
8383 	       RTE_PTYPE_INNER_L4_MASK)))
8384 		return rte_flow_error_set(error, ENOTSUP,
8385 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8386 					  NULL, "ptype mask not supported");
8387 	return 0;
8388 }
8389 
8390 struct mlx5_hw_pattern_validation_ctx {
8391 	const struct rte_flow_item *geneve_item;
8392 	const struct rte_flow_item *flex_item;
8393 };
8394 
8395 static int
8396 flow_hw_pattern_validate(struct rte_eth_dev *dev,
8397 			 const struct rte_flow_pattern_template_attr *attr,
8398 			 const struct rte_flow_item items[],
8399 			 uint64_t *item_flags,
8400 			 struct rte_flow_error *error)
8401 {
8402 	struct mlx5_priv *priv = dev->data->dev_private;
8403 	const struct rte_flow_item *item;
8404 	const struct rte_flow_item *gtp_item = NULL;
8405 	const struct rte_flow_item *gre_item = NULL;
8406 	const struct rte_flow_attr flow_attr = {
8407 		.ingress = attr->ingress,
8408 		.egress = attr->egress,
8409 		.transfer = attr->transfer
8410 	};
8411 	int ret, tag_idx;
8412 	uint32_t tag_bitmap = 0;
8413 	uint64_t last_item = 0;
8414 
8415 	if (!mlx5_hw_ctx_validate(dev, error))
8416 		return -rte_errno;
8417 	if (!attr->ingress && !attr->egress && !attr->transfer)
8418 		return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR, NULL,
8419 					  "at least one of the direction attributes"
8420 					  " must be specified");
8421 	if (priv->sh->config.dv_esw_en) {
8422 		MLX5_ASSERT(priv->master || priv->representor);
8423 		if (priv->master) {
8424 			if ((attr->ingress && attr->egress) ||
8425 			    (attr->ingress && attr->transfer) ||
8426 			    (attr->egress && attr->transfer))
8427 				return rte_flow_error_set(error, EINVAL,
8428 							  RTE_FLOW_ERROR_TYPE_ATTR, NULL,
8429 							  "only one direction attribute at once"
8430 							  " can be used on transfer proxy port");
8431 		} else {
8432 			if (attr->transfer)
8433 				return rte_flow_error_set(error, EINVAL,
8434 							  RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, NULL,
8435 							  "transfer attribute cannot be used with"
8436 							  " port representors");
8437 			if (attr->ingress && attr->egress)
8438 				return rte_flow_error_set(error, EINVAL,
8439 							  RTE_FLOW_ERROR_TYPE_ATTR, NULL,
8440 							  "ingress and egress direction attributes"
8441 							  " cannot be used at the same time on"
8442 							  " port representors");
8443 		}
8444 	} else {
8445 		if (attr->transfer)
8446 			return rte_flow_error_set(error, EINVAL,
8447 						  RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, NULL,
8448 						  "transfer attribute cannot be used when"
8449 						  " E-Switch is disabled");
8450 	}
8451 	for (item = items; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
8452 		bool tunnel = *item_flags & MLX5_FLOW_LAYER_TUNNEL;
8453 
8454 		switch ((int)item->type) {
8455 		case RTE_FLOW_ITEM_TYPE_PTYPE:
8456 			ret = flow_hw_validate_item_ptype(item, error);
8457 			if (ret)
8458 				return ret;
8459 			last_item = MLX5_FLOW_ITEM_PTYPE;
8460 			break;
8461 		case RTE_FLOW_ITEM_TYPE_TAG:
8462 		{
8463 			const struct rte_flow_item_tag *tag =
8464 				(const struct rte_flow_item_tag *)item->spec;
8465 
8466 			if (tag == NULL)
8467 				return rte_flow_error_set(error, EINVAL,
8468 							  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8469 							  NULL,
8470 							  "Tag spec is NULL");
8471 			if (tag->index >= MLX5_FLOW_HW_TAGS_MAX &&
8472 			    tag->index != RTE_PMD_MLX5_LINEAR_HASH_TAG_INDEX)
8473 				return rte_flow_error_set(error, EINVAL,
8474 							  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8475 							  NULL,
8476 							  "Invalid tag index");
8477 			tag_idx = flow_hw_get_reg_id(dev, RTE_FLOW_ITEM_TYPE_TAG, tag->index);
8478 			if (tag_idx == REG_NON)
8479 				return rte_flow_error_set(error, EINVAL,
8480 							  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8481 							  NULL,
8482 							  "Unsupported tag index");
8483 			if (tag_bitmap & (1 << tag_idx))
8484 				return rte_flow_error_set(error, EINVAL,
8485 							  RTE_FLOW_ERROR_TYPE_ITEM,
8486 							  NULL,
8487 							  "Duplicated tag index");
8488 			tag_bitmap |= 1 << tag_idx;
8489 			last_item = MLX5_FLOW_ITEM_TAG;
8490 			break;
8491 		}
8492 		case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
8493 		{
8494 			const struct rte_flow_item_tag *tag =
8495 				(const struct rte_flow_item_tag *)item->spec;
8496 			uint16_t regcs = (uint8_t)priv->sh->cdev->config.hca_attr.set_reg_c;
8497 
8498 			if (!((1 << (tag->index - REG_C_0)) & regcs))
8499 				return rte_flow_error_set(error, EINVAL,
8500 							  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8501 							  NULL,
8502 							  "Unsupported internal tag index");
8503 			if (tag_bitmap & (1 << tag->index))
8504 				return rte_flow_error_set(error, EINVAL,
8505 							  RTE_FLOW_ERROR_TYPE_ITEM,
8506 							  NULL,
8507 							  "Duplicated tag index");
8508 			tag_bitmap |= 1 << tag->index;
8509 			break;
8510 		}
8511 		case RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT:
8512 			if (attr->ingress && priv->sh->config.repr_matching)
8513 				return rte_flow_error_set(error, EINVAL,
8514 						  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
8515 						  "represented port item cannot be used"
8516 						  " when ingress attribute is set");
8517 			if (attr->egress)
8518 				return rte_flow_error_set(error, EINVAL,
8519 						  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
8520 						  "represented port item cannot be used"
8521 						  " when egress attribute is set");
8522 			last_item = MLX5_FLOW_ITEM_REPRESENTED_PORT;
8523 			break;
8524 		case RTE_FLOW_ITEM_TYPE_META:
8525 			/* ingress + group 0 is not supported */
8526 			*item_flags |= MLX5_FLOW_ITEM_METADATA;
8527 			break;
8528 		case RTE_FLOW_ITEM_TYPE_METER_COLOR:
8529 		{
8530 			int reg = flow_hw_get_reg_id(dev,
8531 						     RTE_FLOW_ITEM_TYPE_METER_COLOR,
8532 						     0);
8533 			if (reg == REG_NON)
8534 				return rte_flow_error_set(error, EINVAL,
8535 							  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8536 							  NULL,
8537 							  "Unsupported meter color register");
8538 			if (*item_flags &
8539 			    (MLX5_FLOW_ITEM_QUOTA | MLX5_FLOW_LAYER_ASO_CT))
8540 				return rte_flow_error_set
8541 					(error, EINVAL,
8542 					 RTE_FLOW_ERROR_TYPE_ITEM, NULL, "Only one ASO item is supported");
8543 			last_item = MLX5_FLOW_ITEM_METER_COLOR;
8544 			break;
8545 		}
8546 		case RTE_FLOW_ITEM_TYPE_AGGR_AFFINITY:
8547 		{
8548 			if (!priv->sh->lag_rx_port_affinity_en)
8549 				return rte_flow_error_set(error, EINVAL,
8550 							  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
8551 							  "Unsupported aggregated affinity with Older FW");
8552 			if ((attr->transfer && priv->fdb_def_rule) || attr->egress)
8553 				return rte_flow_error_set(error, EINVAL,
8554 							  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
8555 							  "Aggregated affinity item not supported"
8556 							  " with egress or transfer"
8557 							  " attribute");
8558 			last_item = MLX5_FLOW_ITEM_AGGR_AFFINITY;
8559 			break;
8560 		}
8561 		case RTE_FLOW_ITEM_TYPE_GENEVE:
8562 			last_item = MLX5_FLOW_LAYER_GENEVE;
8563 			break;
8564 		case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
8565 		{
8566 			last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
8567 			ret = mlx5_flow_geneve_tlv_option_validate(priv, item,
8568 								   error);
8569 			if (ret < 0)
8570 				return ret;
8571 			break;
8572 		}
8573 		case RTE_FLOW_ITEM_TYPE_COMPARE:
8574 		{
8575 			last_item = MLX5_FLOW_ITEM_COMPARE;
8576 			ret = flow_hw_validate_item_compare(item, error);
8577 			if (ret)
8578 				return ret;
8579 			break;
8580 		}
8581 		case RTE_FLOW_ITEM_TYPE_ETH:
8582 			ret = mlx5_flow_validate_item_eth(dev, item,
8583 							  *item_flags,
8584 							  true, error);
8585 			if (ret < 0)
8586 				return ret;
8587 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
8588 				    MLX5_FLOW_LAYER_OUTER_L2;
8589 			break;
8590 		case RTE_FLOW_ITEM_TYPE_VLAN:
8591 			ret = mlx5_flow_dv_validate_item_vlan(item, *item_flags,
8592 							      dev, error);
8593 			if (ret < 0)
8594 				return ret;
8595 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
8596 				    MLX5_FLOW_LAYER_OUTER_VLAN;
8597 			break;
8598 		case RTE_FLOW_ITEM_TYPE_IPV4:
8599 			tunnel |= mlx5_hw_flow_tunnel_ip_check(last_item,
8600 							       item_flags);
8601 			ret = mlx5_flow_dv_validate_item_ipv4(dev, item,
8602 							      *item_flags,
8603 							      last_item, 0,
8604 							      &hws_nic_ipv4_mask,
8605 							      error);
8606 			if (ret)
8607 				return ret;
8608 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
8609 				    MLX5_FLOW_LAYER_OUTER_L3_IPV4;
8610 			break;
8611 		case RTE_FLOW_ITEM_TYPE_IPV6:
8612 			tunnel |= mlx5_hw_flow_tunnel_ip_check(last_item,
8613 							       item_flags);
8614 			ret = mlx5_flow_validate_item_ipv6(dev, item,
8615 							   *item_flags,
8616 							   last_item, 0,
8617 							   &hws_nic_ipv6_mask,
8618 							   error);
8619 			if (ret < 0)
8620 				return ret;
8621 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
8622 				    MLX5_FLOW_LAYER_OUTER_L3_IPV6;
8623 			break;
8624 		case RTE_FLOW_ITEM_TYPE_UDP:
8625 			ret = mlx5_flow_validate_item_udp(dev, item,
8626 							  *item_flags,
8627 							  0xff, error);
8628 			if (ret)
8629 				return ret;
8630 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
8631 				    MLX5_FLOW_LAYER_OUTER_L4_UDP;
8632 			break;
8633 		case RTE_FLOW_ITEM_TYPE_TCP:
8634 			ret = mlx5_flow_validate_item_tcp
8635 				(dev, item, *item_flags,
8636 				 0xff, &nic_tcp_mask, error);
8637 			if (ret < 0)
8638 				return ret;
8639 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
8640 				    MLX5_FLOW_LAYER_OUTER_L4_TCP;
8641 			break;
8642 		case RTE_FLOW_ITEM_TYPE_GTP:
8643 			gtp_item = item;
8644 			ret = mlx5_flow_dv_validate_item_gtp(dev, gtp_item,
8645 							     *item_flags, error);
8646 			if (ret < 0)
8647 				return ret;
8648 			last_item = MLX5_FLOW_LAYER_GTP;
8649 			break;
8650 		case RTE_FLOW_ITEM_TYPE_GTP_PSC:
8651 			ret = mlx5_flow_dv_validate_item_gtp_psc(dev, item,
8652 								 last_item,
8653 								 gtp_item,
8654 								 false, error);
8655 			if (ret < 0)
8656 				return ret;
8657 			last_item = MLX5_FLOW_LAYER_GTP_PSC;
8658 			break;
8659 		case RTE_FLOW_ITEM_TYPE_VXLAN:
8660 			ret = mlx5_flow_validate_item_vxlan(dev, 0, item,
8661 							    *item_flags,
8662 							    false, error);
8663 			if (ret < 0)
8664 				return ret;
8665 			last_item = MLX5_FLOW_LAYER_VXLAN;
8666 			break;
8667 		case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
8668 			ret = mlx5_flow_validate_item_vxlan_gpe(item,
8669 								*item_flags,
8670 								dev, error);
8671 			if (ret < 0)
8672 				return ret;
8673 			last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
8674 			break;
8675 		case RTE_FLOW_ITEM_TYPE_MPLS:
8676 			ret = mlx5_flow_validate_item_mpls(dev, item,
8677 							   *item_flags,
8678 							   last_item, error);
8679 			if (ret < 0)
8680 				return ret;
8681 			last_item = MLX5_FLOW_LAYER_MPLS;
8682 			break;
8683 		case MLX5_RTE_FLOW_ITEM_TYPE_SQ:
8684 		case RTE_FLOW_ITEM_TYPE_TX_QUEUE:
8685 			last_item = MLX5_FLOW_ITEM_SQ;
8686 			break;
8687 		case RTE_FLOW_ITEM_TYPE_GRE:
8688 			ret = mlx5_flow_validate_item_gre(dev, item,
8689 							  *item_flags,
8690 							  0xff, error);
8691 			if (ret < 0)
8692 				return ret;
8693 			gre_item = item;
8694 			last_item = MLX5_FLOW_LAYER_GRE;
8695 			break;
8696 		case RTE_FLOW_ITEM_TYPE_GRE_KEY:
8697 			if (!(*item_flags & MLX5_FLOW_LAYER_GRE))
8698 				return rte_flow_error_set
8699 					(error, EINVAL,
8700 					 RTE_FLOW_ERROR_TYPE_ITEM, item, "GRE item is missing");
8701 			ret = mlx5_flow_validate_item_gre_key
8702 				(dev, item, *item_flags, gre_item, error);
8703 			if (ret < 0)
8704 				return ret;
8705 			last_item = MLX5_FLOW_LAYER_GRE_KEY;
8706 			break;
8707 		case RTE_FLOW_ITEM_TYPE_GRE_OPTION:
8708 			if (!(*item_flags & MLX5_FLOW_LAYER_GRE))
8709 				return rte_flow_error_set
8710 					(error, EINVAL,
8711 					 RTE_FLOW_ERROR_TYPE_ITEM, item, "GRE item is missing");
8712 			ret = mlx5_flow_validate_item_gre_option(dev, item,
8713 								 *item_flags,
8714 								 &flow_attr,
8715 								 gre_item,
8716 								 error);
8717 			if (ret < 0)
8718 				return ret;
8719 			last_item = MLX5_FLOW_LAYER_GRE;
8720 			break;
8721 		case RTE_FLOW_ITEM_TYPE_NVGRE:
8722 			ret = mlx5_flow_validate_item_nvgre(dev, item,
8723 							    *item_flags, 0xff,
8724 							    error);
8725 			if (ret)
8726 				return ret;
8727 			last_item = MLX5_FLOW_LAYER_NVGRE;
8728 			break;
8729 		case RTE_FLOW_ITEM_TYPE_ICMP:
8730 			ret = mlx5_flow_validate_item_icmp(dev, item,
8731 							   *item_flags, 0xff,
8732 							   error);
8733 			if (ret < 0)
8734 				return ret;
8735 			last_item = MLX5_FLOW_LAYER_ICMP;
8736 			break;
8737 		case RTE_FLOW_ITEM_TYPE_ICMP6:
8738 			ret = mlx5_flow_validate_item_icmp6(dev, item,
8739 							    *item_flags, 0xff,
8740 							    error);
8741 			if (ret < 0)
8742 				return ret;
8743 			last_item = MLX5_FLOW_LAYER_ICMP6;
8744 			break;
8745 		case RTE_FLOW_ITEM_TYPE_ICMP6_ECHO_REQUEST:
8746 		case RTE_FLOW_ITEM_TYPE_ICMP6_ECHO_REPLY:
8747 			ret = mlx5_flow_validate_item_icmp6_echo(dev, item,
8748 								 *item_flags,
8749 								 0xff, error);
8750 			if (ret < 0)
8751 				return ret;
8752 			last_item = MLX5_FLOW_LAYER_ICMP6;
8753 			break;
8754 		case RTE_FLOW_ITEM_TYPE_CONNTRACK:
8755 			if (*item_flags &
8756 			    (MLX5_FLOW_ITEM_QUOTA | MLX5_FLOW_LAYER_ASO_CT))
8757 				return rte_flow_error_set
8758 					(error, EINVAL,
8759 					 RTE_FLOW_ERROR_TYPE_ITEM, NULL, "Only one ASO item is supported");
8760 			ret = mlx5_flow_dv_validate_item_aso_ct(dev, item,
8761 								item_flags,
8762 								error);
8763 			if (ret < 0)
8764 				return ret;
8765 			break;
8766 		case RTE_FLOW_ITEM_TYPE_QUOTA:
8767 			if (*item_flags &
8768 			    (MLX5_FLOW_ITEM_METER_COLOR |
8769 			     MLX5_FLOW_LAYER_ASO_CT))
8770 				return rte_flow_error_set
8771 					(error, EINVAL,
8772 					 RTE_FLOW_ERROR_TYPE_ITEM, NULL, "Only one ASO item is supported");
8773 			last_item = MLX5_FLOW_ITEM_QUOTA;
8774 			break;
8775 		case RTE_FLOW_ITEM_TYPE_ESP:
8776 			ret = mlx5_flow_os_validate_item_esp(dev, item,
8777 							     *item_flags, 0xff,
8778 							     error);
8779 			if (ret < 0)
8780 				return ret;
8781 			last_item = MLX5_FLOW_ITEM_ESP;
8782 			break;
8783 		case RTE_FLOW_ITEM_TYPE_IPV6_ROUTING_EXT:
8784 			last_item = tunnel ?
8785 				    MLX5_FLOW_ITEM_INNER_IPV6_ROUTING_EXT :
8786 				    MLX5_FLOW_ITEM_OUTER_IPV6_ROUTING_EXT;
8787 			break;
8788 		case RTE_FLOW_ITEM_TYPE_FLEX:
8789 			/* match mlx5dr_definer_conv_items_to_hl() */
8790 			last_item = tunnel ?
8791 				    MLX5_FLOW_ITEM_INNER_FLEX :
8792 				    MLX5_FLOW_ITEM_OUTER_FLEX;
8793 			break;
8794 		case RTE_FLOW_ITEM_TYPE_RANDOM:
8795 			last_item = MLX5_FLOW_ITEM_RANDOM;
8796 			break;
8797 		case RTE_FLOW_ITEM_TYPE_NSH:
8798 			last_item = MLX5_FLOW_ITEM_NSH;
8799 			ret = mlx5_hw_validate_item_nsh(dev, item, error);
8800 			if (ret < 0)
8801 				return ret;
8802 			break;
8803 		case RTE_FLOW_ITEM_TYPE_INTEGRITY:
8804 			/*
8805 			 * Integrity flow item validation require access to
8806 			 * both item mask and spec.
8807 			 * Current HWS model allows item mask in pattern
8808 			 * template and item spec in flow rule.
8809 			 */
8810 			break;
8811 		case RTE_FLOW_ITEM_TYPE_IB_BTH:
8812 		case RTE_FLOW_ITEM_TYPE_VOID:
8813 		case RTE_FLOW_ITEM_TYPE_END:
8814 			break;
8815 		default:
8816 			return rte_flow_error_set(error, EINVAL,
8817 						  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8818 						  NULL,
8819 						  "Unsupported item type");
8820 		}
8821 		*item_flags |= last_item;
8822 	}
8823 	return 1 + RTE_PTR_DIFF(item, items) / sizeof(item[0]);
8824 }
8825 
8826 /*
8827  * Verify that the tested flow patterns fits STE size limit in HWS group.
8828  *
8829  *
8830  * Return values:
8831  * 0       : Tested patterns fit STE size limit
8832  * -EINVAL : Invalid parameters detected
8833  * -E2BIG  : Tested patterns exceed STE size limit
8834  */
8835 static int
8836 pattern_template_validate(struct rte_eth_dev *dev,
8837 			  struct rte_flow_pattern_template *pt[],
8838 			  uint32_t pt_num,
8839 			  struct rte_flow_error *error)
8840 {
8841 	struct mlx5_flow_template_table_cfg tbl_cfg = {
8842 		.attr = {
8843 			.nb_flows = 64,
8844 			.insertion_type = RTE_FLOW_TABLE_INSERTION_TYPE_PATTERN,
8845 			.hash_func = RTE_FLOW_TABLE_HASH_FUNC_DEFAULT,
8846 			.flow_attr = {
8847 				.group = 1,
8848 				.ingress = pt[0]->attr.ingress,
8849 				.egress = pt[0]->attr.egress,
8850 				.transfer = pt[0]->attr.transfer
8851 			}
8852 		}
8853 	};
8854 	struct mlx5_priv *priv = dev->data->dev_private;
8855 	struct rte_flow_actions_template *action_template;
8856 	struct rte_flow_template_table *tmpl_tbl;
8857 	int ret;
8858 
8859 	if (pt[0]->attr.ingress) {
8860 		action_template =
8861 			priv->action_template_drop[MLX5DR_TABLE_TYPE_NIC_RX];
8862 	} else if (pt[0]->attr.egress) {
8863 		action_template =
8864 			priv->action_template_drop[MLX5DR_TABLE_TYPE_NIC_TX];
8865 	} else if (pt[0]->attr.transfer) {
8866 		action_template =
8867 			priv->action_template_drop[MLX5DR_TABLE_TYPE_FDB];
8868 	} else {
8869 		ret = EINVAL;
8870 		goto end;
8871 	}
8872 
8873 	if (pt[0]->item_flags & MLX5_FLOW_ITEM_COMPARE)
8874 		tbl_cfg.attr.nb_flows = 1;
8875 	tmpl_tbl = flow_hw_table_create(dev, &tbl_cfg, pt, pt_num,
8876 					&action_template, 1, error);
8877 	if (tmpl_tbl) {
8878 		ret = 0;
8879 		flow_hw_table_destroy(dev, tmpl_tbl, error);
8880 	} else {
8881 		switch (rte_errno) {
8882 		case E2BIG:
8883 			ret = E2BIG;
8884 			break;
8885 		case ENOTSUP:
8886 			ret = EINVAL;
8887 			break;
8888 		default:
8889 			ret = 0;
8890 			break;
8891 		}
8892 	}
8893 end:
8894 	if (ret)
8895 		rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8896 				   NULL, "failed to validate pattern template");
8897 	return -ret;
8898 }
8899 
8900 /**
8901  * Create flow item template.
8902  *
8903  * @param[in] dev
8904  *   Pointer to the rte_eth_dev structure.
8905  * @param[in] attr
8906  *   Pointer to the item template attributes.
8907  * @param[in] items
8908  *   The template item pattern.
8909  * @param[out] error
8910  *   Pointer to error structure.
8911  *
8912  * @return
8913  *  Item template pointer on success, NULL otherwise and rte_errno is set.
8914  */
8915 static struct rte_flow_pattern_template *
8916 flow_hw_pattern_template_create(struct rte_eth_dev *dev,
8917 			     const struct rte_flow_pattern_template_attr *attr,
8918 			     const struct rte_flow_item items[],
8919 			     struct rte_flow_error *error)
8920 {
8921 	struct mlx5_priv *priv = dev->data->dev_private;
8922 	struct rte_flow_pattern_template *it;
8923 	struct rte_flow_item *copied_items = NULL;
8924 	const struct rte_flow_item *tmpl_items;
8925 	uint64_t orig_item_nb, item_flags = 0;
8926 	struct rte_flow_item port = {
8927 		.type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,
8928 		.mask = &rte_flow_item_ethdev_mask,
8929 	};
8930 	struct rte_flow_item_tag tag_v = {
8931 		.data = 0,
8932 		.index = REG_C_0,
8933 	};
8934 	struct rte_flow_item_tag tag_m = {
8935 		.data = flow_hw_tx_tag_regc_mask(dev),
8936 		.index = 0xff,
8937 	};
8938 	struct rte_flow_item tag = {
8939 		.type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_TAG,
8940 		.spec = &tag_v,
8941 		.mask = &tag_m,
8942 		.last = NULL
8943 	};
8944 	int it_items_size;
8945 	unsigned int i = 0;
8946 	int rc;
8947 
8948 	/* Validate application items only */
8949 	rc = flow_hw_pattern_validate(dev, attr, items, &item_flags, error);
8950 	if (rc < 0)
8951 		return NULL;
8952 	orig_item_nb = rc;
8953 	if (priv->sh->config.dv_esw_en &&
8954 	    priv->sh->config.repr_matching &&
8955 	    attr->ingress && !attr->egress && !attr->transfer) {
8956 		copied_items = flow_hw_prepend_item(items, orig_item_nb, &port, error);
8957 		if (!copied_items)
8958 			return NULL;
8959 		tmpl_items = copied_items;
8960 	} else if (priv->sh->config.dv_esw_en &&
8961 		   priv->sh->config.repr_matching &&
8962 		   !attr->ingress && attr->egress && !attr->transfer) {
8963 		if (item_flags & MLX5_FLOW_ITEM_SQ) {
8964 			DRV_LOG(DEBUG, "Port %u omitting implicit REG_C_0 match for egress "
8965 				       "pattern template", dev->data->port_id);
8966 			tmpl_items = items;
8967 			goto setup_pattern_template;
8968 		}
8969 		copied_items = flow_hw_prepend_item(items, orig_item_nb, &tag, error);
8970 		if (!copied_items)
8971 			return NULL;
8972 		tmpl_items = copied_items;
8973 	} else {
8974 		tmpl_items = items;
8975 	}
8976 setup_pattern_template:
8977 	it = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*it), 0, rte_socket_id());
8978 	if (!it) {
8979 		rte_flow_error_set(error, ENOMEM,
8980 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8981 				   NULL,
8982 				   "cannot allocate item template");
8983 		goto error;
8984 	}
8985 	it->attr = *attr;
8986 	it->item_flags = item_flags;
8987 	it->orig_item_nb = orig_item_nb;
8988 	it_items_size = rte_flow_conv(RTE_FLOW_CONV_OP_PATTERN, NULL, 0, tmpl_items, error);
8989 	if (it_items_size <= 0) {
8990 		rte_flow_error_set(error, ENOMEM,
8991 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8992 				   NULL,
8993 				   "Failed to determine buffer size for pattern");
8994 		goto error;
8995 	}
8996 	it_items_size = RTE_ALIGN(it_items_size, 16);
8997 	it->items = mlx5_malloc(MLX5_MEM_ZERO, it_items_size, 0, rte_dev_numa_node(dev->device));
8998 	if (it->items == NULL) {
8999 		rte_flow_error_set(error, ENOMEM,
9000 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9001 				   NULL,
9002 				   "Cannot allocate memory for pattern");
9003 		goto error;
9004 	}
9005 	rc = rte_flow_conv(RTE_FLOW_CONV_OP_PATTERN, it->items, it_items_size, tmpl_items, error);
9006 	if (rc <= 0) {
9007 		rte_flow_error_set(error, ENOMEM,
9008 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9009 				   NULL,
9010 				   "Failed to store pattern");
9011 		goto error;
9012 	}
9013 	it->mt = mlx5dr_match_template_create(tmpl_items, attr->relaxed_matching);
9014 	if (!it->mt) {
9015 		rte_flow_error_set(error, rte_errno,
9016 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9017 				   NULL,
9018 				   "cannot create match template");
9019 		goto error;
9020 	}
9021 	if (copied_items) {
9022 		if (attr->ingress)
9023 			it->implicit_port = true;
9024 		else if (attr->egress)
9025 			it->implicit_tag = true;
9026 		mlx5_free(copied_items);
9027 		copied_items = NULL;
9028 	}
9029 	/* Either inner or outer, can't both. */
9030 	if (it->item_flags & (MLX5_FLOW_ITEM_OUTER_IPV6_ROUTING_EXT |
9031 			      MLX5_FLOW_ITEM_INNER_IPV6_ROUTING_EXT)) {
9032 		if (((it->item_flags & MLX5_FLOW_ITEM_OUTER_IPV6_ROUTING_EXT) &&
9033 		     (it->item_flags & MLX5_FLOW_ITEM_INNER_IPV6_ROUTING_EXT)) ||
9034 		    (mlx5_alloc_srh_flex_parser(dev))) {
9035 			rte_flow_error_set(error, rte_errno,
9036 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9037 					   "cannot create IPv6 routing extension support");
9038 			goto error;
9039 		}
9040 	}
9041 	if (it->item_flags & MLX5_FLOW_ITEM_FLEX) {
9042 		for (i = 0; items[i].type != RTE_FLOW_ITEM_TYPE_END; i++) {
9043 			const struct rte_flow_item_flex *spec = items[i].spec;
9044 			struct rte_flow_item_flex_handle *handle;
9045 
9046 			if (items[i].type != RTE_FLOW_ITEM_TYPE_FLEX)
9047 				continue;
9048 			handle = spec->handle;
9049 			if (flow_hw_flex_item_acquire(dev, handle,
9050 						      &it->flex_item)) {
9051 				rte_flow_error_set(error, EINVAL,
9052 						   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9053 						   NULL, "cannot create hw FLEX item");
9054 				goto error;
9055 			}
9056 		}
9057 	}
9058 	if (it->item_flags & MLX5_FLOW_LAYER_GENEVE_OPT) {
9059 		for (i = 0; items[i].type != RTE_FLOW_ITEM_TYPE_END; i++) {
9060 			const struct rte_flow_item_geneve_opt *spec =
9061 				items[i].spec;
9062 
9063 			if (items[i].type != RTE_FLOW_ITEM_TYPE_GENEVE_OPT)
9064 				continue;
9065 			if (mlx5_geneve_tlv_option_register(priv, spec,
9066 							    &it->geneve_opt_mng)) {
9067 				rte_flow_error_set(error, EINVAL,
9068 						   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9069 						   NULL, "cannot register GENEVE TLV option");
9070 				goto error;
9071 			}
9072 		}
9073 	}
9074 	rte_atomic_fetch_add_explicit(&it->refcnt, 1, rte_memory_order_relaxed);
9075 	rc = pattern_template_validate(dev, &it, 1, error);
9076 	if (rc)
9077 		goto error;
9078 	LIST_INSERT_HEAD(&priv->flow_hw_itt, it, next);
9079 	return it;
9080 error:
9081 	if (it) {
9082 		if (it->flex_item)
9083 			flow_hw_flex_item_release(dev, &it->flex_item);
9084 		if (it->geneve_opt_mng.nb_options)
9085 			mlx5_geneve_tlv_options_unregister(priv, &it->geneve_opt_mng);
9086 		if (it->mt)
9087 			claim_zero(mlx5dr_match_template_destroy(it->mt));
9088 		mlx5_free(it->items);
9089 		mlx5_free(it);
9090 	}
9091 	if (copied_items)
9092 		mlx5_free(copied_items);
9093 	return NULL;
9094 }
9095 
9096 /**
9097  * Destroy flow item template.
9098  *
9099  * @param[in] dev
9100  *   Pointer to the rte_eth_dev structure.
9101  * @param[in] template
9102  *   Pointer to the item template to be destroyed.
9103  * @param[out] error
9104  *   Pointer to error structure.
9105  *
9106  * @return
9107  *   0 on success, a negative errno value otherwise and rte_errno is set.
9108  */
9109 static int
9110 flow_hw_pattern_template_destroy(struct rte_eth_dev *dev,
9111 			      struct rte_flow_pattern_template *template,
9112 			      struct rte_flow_error *error __rte_unused)
9113 {
9114 	struct mlx5_priv *priv = dev->data->dev_private;
9115 
9116 	if (rte_atomic_load_explicit(&template->refcnt, rte_memory_order_relaxed) > 1) {
9117 		DRV_LOG(WARNING, "Item template %p is still in use.",
9118 			(void *)template);
9119 		return rte_flow_error_set(error, EBUSY,
9120 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9121 				   NULL,
9122 				   "item template is in use");
9123 	}
9124 	if (template->item_flags & (MLX5_FLOW_ITEM_OUTER_IPV6_ROUTING_EXT |
9125 				    MLX5_FLOW_ITEM_INNER_IPV6_ROUTING_EXT))
9126 		mlx5_free_srh_flex_parser(dev);
9127 	LIST_REMOVE(template, next);
9128 	flow_hw_flex_item_release(dev, &template->flex_item);
9129 	mlx5_geneve_tlv_options_unregister(priv, &template->geneve_opt_mng);
9130 	claim_zero(mlx5dr_match_template_destroy(template->mt));
9131 	mlx5_free(template->items);
9132 	mlx5_free(template);
9133 	return 0;
9134 }
9135 
9136 /*
9137  * Get information about HWS pre-configurable resources.
9138  *
9139  * @param[in] dev
9140  *   Pointer to the rte_eth_dev structure.
9141  * @param[out] port_info
9142  *   Pointer to port information.
9143  * @param[out] queue_info
9144  *   Pointer to queue information.
9145  * @param[out] error
9146  *   Pointer to error structure.
9147  *
9148  * @return
9149  *   0 on success, a negative errno value otherwise and rte_errno is set.
9150  */
9151 static int
9152 flow_hw_info_get(struct rte_eth_dev *dev,
9153 		 struct rte_flow_port_info *port_info,
9154 		 struct rte_flow_queue_info *queue_info,
9155 		 struct rte_flow_error *error __rte_unused)
9156 {
9157 	struct mlx5_priv *priv = dev->data->dev_private;
9158 	uint16_t port_id = dev->data->port_id;
9159 	struct rte_mtr_capabilities mtr_cap;
9160 	int ret;
9161 
9162 	memset(port_info, 0, sizeof(*port_info));
9163 	/* Queue size is unlimited from low-level. */
9164 	port_info->max_nb_queues = UINT32_MAX;
9165 	queue_info->max_size = UINT32_MAX;
9166 
9167 	memset(&mtr_cap, 0, sizeof(struct rte_mtr_capabilities));
9168 	ret = rte_mtr_capabilities_get(port_id, &mtr_cap, NULL);
9169 	if (!ret)
9170 		port_info->max_nb_meters = mtr_cap.n_max;
9171 	port_info->max_nb_counters = priv->sh->hws_max_nb_counters;
9172 	port_info->max_nb_aging_objects = port_info->max_nb_counters;
9173 	return 0;
9174 }
9175 
9176 /**
9177  * Create group callback.
9178  *
9179  * @param[in] tool_ctx
9180  *   Pointer to the hash list related context.
9181  * @param[in] cb_ctx
9182  *   Pointer to the group creation context.
9183  *
9184  * @return
9185  *   Group entry on success, NULL otherwise and rte_errno is set.
9186  */
9187 struct mlx5_list_entry *
9188 flow_hw_grp_create_cb(void *tool_ctx, void *cb_ctx)
9189 {
9190 	struct mlx5_dev_ctx_shared *sh = tool_ctx;
9191 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
9192 	struct rte_eth_dev *dev = ctx->dev;
9193 	struct rte_flow_attr *attr = (struct rte_flow_attr *)ctx->data;
9194 	struct mlx5_priv *priv = dev->data->dev_private;
9195 	struct mlx5dr_table_attr dr_tbl_attr = {0};
9196 	struct rte_flow_error *error = ctx->error;
9197 	struct mlx5_flow_group *grp_data;
9198 	struct mlx5dr_table *tbl = NULL;
9199 	struct mlx5dr_action *jump;
9200 	uint32_t idx = 0;
9201 	MKSTR(matcher_name, "%s_%s_%u_%u_matcher_list",
9202 	      attr->transfer ? "FDB" : "NIC", attr->egress ? "egress" : "ingress",
9203 	      attr->group, idx);
9204 
9205 	grp_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_HW_GRP], &idx);
9206 	if (!grp_data) {
9207 		rte_flow_error_set(error, ENOMEM,
9208 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9209 				   NULL,
9210 				   "cannot allocate flow table data entry");
9211 		return NULL;
9212 	}
9213 	dr_tbl_attr.level = attr->group;
9214 	if (attr->transfer)
9215 		dr_tbl_attr.type = MLX5DR_TABLE_TYPE_FDB;
9216 	else if (attr->egress)
9217 		dr_tbl_attr.type = MLX5DR_TABLE_TYPE_NIC_TX;
9218 	else
9219 		dr_tbl_attr.type = MLX5DR_TABLE_TYPE_NIC_RX;
9220 	tbl = mlx5dr_table_create(priv->dr_ctx, &dr_tbl_attr);
9221 	if (!tbl)
9222 		goto error;
9223 	grp_data->tbl = tbl;
9224 	if (attr->group) {
9225 		/* Jump action be used by non-root table. */
9226 		jump = mlx5dr_action_create_dest_table
9227 			(priv->dr_ctx, tbl,
9228 			 mlx5_hw_act_flag[!!attr->group][dr_tbl_attr.type]);
9229 		if (!jump)
9230 			goto error;
9231 		grp_data->jump.hws_action = jump;
9232 		/* Jump action be used by root table.  */
9233 		jump = mlx5dr_action_create_dest_table
9234 			(priv->dr_ctx, tbl,
9235 			 mlx5_hw_act_flag[MLX5_HW_ACTION_FLAG_ROOT]
9236 					 [dr_tbl_attr.type]);
9237 		if (!jump)
9238 			goto error;
9239 		grp_data->jump.root_action = jump;
9240 	}
9241 
9242 	grp_data->matchers = mlx5_list_create(matcher_name, sh, true,
9243 					      flow_matcher_create_cb,
9244 					      flow_matcher_match_cb,
9245 					      flow_matcher_remove_cb,
9246 					      flow_matcher_clone_cb,
9247 					      flow_matcher_clone_free_cb);
9248 	grp_data->dev = dev;
9249 	grp_data->idx = idx;
9250 	grp_data->group_id = attr->group;
9251 	grp_data->type = dr_tbl_attr.type;
9252 	return &grp_data->entry;
9253 error:
9254 	if (grp_data->jump.root_action)
9255 		mlx5dr_action_destroy(grp_data->jump.root_action);
9256 	if (grp_data->jump.hws_action)
9257 		mlx5dr_action_destroy(grp_data->jump.hws_action);
9258 	if (tbl)
9259 		mlx5dr_table_destroy(tbl);
9260 	if (idx)
9261 		mlx5_ipool_free(sh->ipool[MLX5_IPOOL_HW_GRP], idx);
9262 	rte_flow_error_set(error, ENOMEM,
9263 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9264 			   NULL,
9265 			   "cannot allocate flow dr table");
9266 	return NULL;
9267 }
9268 
9269 /**
9270  * Remove group callback.
9271  *
9272  * @param[in] tool_ctx
9273  *   Pointer to the hash list related context.
9274  * @param[in] entry
9275  *   Pointer to the entry to be removed.
9276  */
9277 void
9278 flow_hw_grp_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
9279 {
9280 	struct mlx5_dev_ctx_shared *sh = tool_ctx;
9281 	struct mlx5_flow_group *grp_data =
9282 		    container_of(entry, struct mlx5_flow_group, entry);
9283 
9284 	MLX5_ASSERT(entry && sh);
9285 	/* To use the wrapper glue functions instead. */
9286 	if (grp_data->jump.hws_action)
9287 		mlx5dr_action_destroy(grp_data->jump.hws_action);
9288 	if (grp_data->jump.root_action)
9289 		mlx5dr_action_destroy(grp_data->jump.root_action);
9290 	mlx5_list_destroy(grp_data->matchers);
9291 	mlx5dr_table_destroy(grp_data->tbl);
9292 	mlx5_ipool_free(sh->ipool[MLX5_IPOOL_HW_GRP], grp_data->idx);
9293 }
9294 
9295 /**
9296  * Match group callback.
9297  *
9298  * @param[in] tool_ctx
9299  *   Pointer to the hash list related context.
9300  * @param[in] entry
9301  *   Pointer to the group to be matched.
9302  * @param[in] cb_ctx
9303  *   Pointer to the group matching context.
9304  *
9305  * @return
9306  *   0 on matched, 1 on miss matched.
9307  */
9308 int
9309 flow_hw_grp_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry,
9310 		     void *cb_ctx)
9311 {
9312 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
9313 	struct mlx5_flow_group *grp_data =
9314 		container_of(entry, struct mlx5_flow_group, entry);
9315 	struct rte_flow_attr *attr =
9316 			(struct rte_flow_attr *)ctx->data;
9317 
9318 	return (grp_data->dev != ctx->dev) ||
9319 		(grp_data->group_id != attr->group) ||
9320 		((grp_data->type != MLX5DR_TABLE_TYPE_FDB) &&
9321 		attr->transfer) ||
9322 		((grp_data->type != MLX5DR_TABLE_TYPE_NIC_TX) &&
9323 		attr->egress) ||
9324 		((grp_data->type != MLX5DR_TABLE_TYPE_NIC_RX) &&
9325 		attr->ingress);
9326 }
9327 
9328 /**
9329  * Clone group entry callback.
9330  *
9331  * @param[in] tool_ctx
9332  *   Pointer to the hash list related context.
9333  * @param[in] entry
9334  *   Pointer to the group to be matched.
9335  * @param[in] cb_ctx
9336  *   Pointer to the group matching context.
9337  *
9338  * @return
9339  *   0 on matched, 1 on miss matched.
9340  */
9341 struct mlx5_list_entry *
9342 flow_hw_grp_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
9343 		     void *cb_ctx)
9344 {
9345 	struct mlx5_dev_ctx_shared *sh = tool_ctx;
9346 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
9347 	struct mlx5_flow_group *grp_data;
9348 	struct rte_flow_error *error = ctx->error;
9349 	uint32_t idx = 0;
9350 
9351 	grp_data = mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_HW_GRP], &idx);
9352 	if (!grp_data) {
9353 		rte_flow_error_set(error, ENOMEM,
9354 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9355 				   NULL,
9356 				   "cannot allocate flow table data entry");
9357 		return NULL;
9358 	}
9359 	memcpy(grp_data, oentry, sizeof(*grp_data));
9360 	grp_data->idx = idx;
9361 	return &grp_data->entry;
9362 }
9363 
9364 /**
9365  * Free cloned group entry callback.
9366  *
9367  * @param[in] tool_ctx
9368  *   Pointer to the hash list related context.
9369  * @param[in] entry
9370  *   Pointer to the group to be freed.
9371  */
9372 void
9373 flow_hw_grp_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
9374 {
9375 	struct mlx5_dev_ctx_shared *sh = tool_ctx;
9376 	struct mlx5_flow_group *grp_data =
9377 		    container_of(entry, struct mlx5_flow_group, entry);
9378 
9379 	mlx5_ipool_free(sh->ipool[MLX5_IPOOL_HW_GRP], grp_data->idx);
9380 }
9381 
9382 /**
9383  * Create and cache a vport action for given @p dev port. vport actions
9384  * cache is used in HWS with FDB flows.
9385  *
9386  * This function does not create any function if proxy port for @p dev port
9387  * was not configured for HW Steering.
9388  *
9389  * This function assumes that E-Switch is enabled and PMD is running with
9390  * HW Steering configured.
9391  *
9392  * @param dev
9393  *   Pointer to Ethernet device which will be the action destination.
9394  *
9395  * @return
9396  *   0 on success, positive value otherwise.
9397  */
9398 int
9399 flow_hw_create_vport_action(struct rte_eth_dev *dev)
9400 {
9401 	struct mlx5_priv *priv = dev->data->dev_private;
9402 	struct rte_eth_dev *proxy_dev;
9403 	struct mlx5_priv *proxy_priv;
9404 	uint16_t port_id = dev->data->port_id;
9405 	uint16_t proxy_port_id = port_id;
9406 	int ret;
9407 
9408 	ret = mlx5_flow_pick_transfer_proxy(dev, &proxy_port_id, NULL);
9409 	if (ret)
9410 		return ret;
9411 	proxy_dev = &rte_eth_devices[proxy_port_id];
9412 	proxy_priv = proxy_dev->data->dev_private;
9413 	if (!proxy_priv->hw_vport)
9414 		return 0;
9415 	if (proxy_priv->hw_vport[port_id]) {
9416 		DRV_LOG(ERR, "port %u HWS vport action already created",
9417 			port_id);
9418 		return -EINVAL;
9419 	}
9420 	proxy_priv->hw_vport[port_id] = mlx5dr_action_create_dest_vport
9421 			(proxy_priv->dr_ctx, priv->dev_port,
9422 			 MLX5DR_ACTION_FLAG_HWS_FDB);
9423 	if (!proxy_priv->hw_vport[port_id]) {
9424 		DRV_LOG(ERR, "port %u unable to create HWS vport action",
9425 			port_id);
9426 		return -EINVAL;
9427 	}
9428 	return 0;
9429 }
9430 
9431 /**
9432  * Destroys the vport action associated with @p dev device
9433  * from actions' cache.
9434  *
9435  * This function does not destroy any action if there is no action cached
9436  * for @p dev or proxy port was not configured for HW Steering.
9437  *
9438  * This function assumes that E-Switch is enabled and PMD is running with
9439  * HW Steering configured.
9440  *
9441  * @param dev
9442  *   Pointer to Ethernet device which will be the action destination.
9443  */
9444 void
9445 flow_hw_destroy_vport_action(struct rte_eth_dev *dev)
9446 {
9447 	struct rte_eth_dev *proxy_dev;
9448 	struct mlx5_priv *proxy_priv;
9449 	uint16_t port_id = dev->data->port_id;
9450 	uint16_t proxy_port_id = port_id;
9451 
9452 	if (mlx5_flow_pick_transfer_proxy(dev, &proxy_port_id, NULL))
9453 		return;
9454 	proxy_dev = &rte_eth_devices[proxy_port_id];
9455 	proxy_priv = proxy_dev->data->dev_private;
9456 	if (!proxy_priv->hw_vport || !proxy_priv->hw_vport[port_id])
9457 		return;
9458 	mlx5dr_action_destroy(proxy_priv->hw_vport[port_id]);
9459 	proxy_priv->hw_vport[port_id] = NULL;
9460 }
9461 
9462 static int
9463 flow_hw_create_vport_actions(struct mlx5_priv *priv)
9464 {
9465 	uint16_t port_id;
9466 
9467 	MLX5_ASSERT(!priv->hw_vport);
9468 	priv->hw_vport = mlx5_malloc(MLX5_MEM_ZERO,
9469 				     sizeof(*priv->hw_vport) * RTE_MAX_ETHPORTS,
9470 				     0, SOCKET_ID_ANY);
9471 	if (!priv->hw_vport)
9472 		return -ENOMEM;
9473 	DRV_LOG(DEBUG, "port %u :: creating vport actions", priv->dev_data->port_id);
9474 	DRV_LOG(DEBUG, "port %u ::    domain_id=%u", priv->dev_data->port_id, priv->domain_id);
9475 	MLX5_ETH_FOREACH_DEV(port_id, NULL) {
9476 		struct mlx5_priv *port_priv = rte_eth_devices[port_id].data->dev_private;
9477 
9478 		if (!port_priv ||
9479 		    port_priv->domain_id != priv->domain_id)
9480 			continue;
9481 		DRV_LOG(DEBUG, "port %u :: for port_id=%u, calling mlx5dr_action_create_dest_vport() with ibport=%u",
9482 			priv->dev_data->port_id, port_id, port_priv->dev_port);
9483 		priv->hw_vport[port_id] = mlx5dr_action_create_dest_vport
9484 				(priv->dr_ctx, port_priv->dev_port,
9485 				 MLX5DR_ACTION_FLAG_HWS_FDB);
9486 		DRV_LOG(DEBUG, "port %u :: priv->hw_vport[%u]=%p",
9487 			priv->dev_data->port_id, port_id, (void *)priv->hw_vport[port_id]);
9488 		if (!priv->hw_vport[port_id])
9489 			return -EINVAL;
9490 	}
9491 	return 0;
9492 }
9493 
9494 static void
9495 flow_hw_free_vport_actions(struct mlx5_priv *priv)
9496 {
9497 	uint16_t port_id;
9498 
9499 	if (!priv->hw_vport)
9500 		return;
9501 	for (port_id = 0; port_id < RTE_MAX_ETHPORTS; ++port_id)
9502 		if (priv->hw_vport[port_id])
9503 			mlx5dr_action_destroy(priv->hw_vport[port_id]);
9504 	mlx5_free(priv->hw_vport);
9505 	priv->hw_vport = NULL;
9506 }
9507 
9508 static void
9509 flow_hw_create_send_to_kernel_actions(struct mlx5_priv *priv __rte_unused)
9510 {
9511 #ifdef HAVE_MLX5DV_DR_ACTION_CREATE_DEST_ROOT_TABLE
9512 	int action_flag;
9513 	int i;
9514 	bool is_vf_sf_dev = priv->sh->dev_cap.vf || priv->sh->dev_cap.sf;
9515 
9516 	for (i = MLX5DR_TABLE_TYPE_NIC_RX; i < MLX5DR_TABLE_TYPE_MAX; i++) {
9517 		if ((!priv->sh->config.dv_esw_en || is_vf_sf_dev) &&
9518 		     i == MLX5DR_TABLE_TYPE_FDB)
9519 			continue;
9520 		action_flag = mlx5_hw_act_flag[1][i];
9521 		priv->hw_send_to_kernel[i] =
9522 				mlx5dr_action_create_dest_root(priv->dr_ctx,
9523 							MLX5_HW_LOWEST_PRIO_ROOT,
9524 							action_flag);
9525 		if (!priv->hw_send_to_kernel[i]) {
9526 			DRV_LOG(WARNING, "Unable to create HWS send to kernel action");
9527 			return;
9528 		}
9529 	}
9530 #endif
9531 }
9532 
9533 static void
9534 flow_hw_destroy_send_to_kernel_action(struct mlx5_priv *priv)
9535 {
9536 	int i;
9537 	for (i = MLX5DR_TABLE_TYPE_NIC_RX; i < MLX5DR_TABLE_TYPE_MAX; i++) {
9538 		if (priv->hw_send_to_kernel[i]) {
9539 			mlx5dr_action_destroy(priv->hw_send_to_kernel[i]);
9540 			priv->hw_send_to_kernel[i] = NULL;
9541 		}
9542 	}
9543 }
9544 
9545 static void
9546 flow_hw_destroy_nat64_actions(struct mlx5_priv *priv)
9547 {
9548 	uint32_t i;
9549 
9550 	for (i = MLX5DR_TABLE_TYPE_NIC_RX; i < MLX5DR_TABLE_TYPE_MAX; i++) {
9551 		if (priv->action_nat64[i][RTE_FLOW_NAT64_6TO4]) {
9552 			(void)mlx5dr_action_destroy(priv->action_nat64[i][RTE_FLOW_NAT64_6TO4]);
9553 			priv->action_nat64[i][RTE_FLOW_NAT64_6TO4] = NULL;
9554 		}
9555 		if (priv->action_nat64[i][RTE_FLOW_NAT64_4TO6]) {
9556 			(void)mlx5dr_action_destroy(priv->action_nat64[i][RTE_FLOW_NAT64_4TO6]);
9557 			priv->action_nat64[i][RTE_FLOW_NAT64_4TO6] = NULL;
9558 		}
9559 	}
9560 }
9561 
9562 static int
9563 flow_hw_create_nat64_actions(struct mlx5_priv *priv, struct rte_flow_error *error)
9564 {
9565 	struct mlx5dr_action_nat64_attr attr;
9566 	uint8_t regs[MLX5_FLOW_NAT64_REGS_MAX];
9567 	uint32_t i;
9568 	const uint32_t flags[MLX5DR_TABLE_TYPE_MAX] = {
9569 		MLX5DR_ACTION_FLAG_HWS_RX | MLX5DR_ACTION_FLAG_SHARED,
9570 		MLX5DR_ACTION_FLAG_HWS_TX | MLX5DR_ACTION_FLAG_SHARED,
9571 		MLX5DR_ACTION_FLAG_HWS_FDB | MLX5DR_ACTION_FLAG_SHARED,
9572 	};
9573 	struct mlx5dr_action *act;
9574 
9575 	attr.registers = regs;
9576 	/* Try to use 3 registers by default. */
9577 	attr.num_of_registers = MLX5_FLOW_NAT64_REGS_MAX;
9578 	for (i = 0; i < MLX5_FLOW_NAT64_REGS_MAX; i++) {
9579 		MLX5_ASSERT(priv->sh->registers.nat64_regs[i] != REG_NON);
9580 		regs[i] = mlx5_convert_reg_to_field(priv->sh->registers.nat64_regs[i]);
9581 	}
9582 	for (i = MLX5DR_TABLE_TYPE_NIC_RX; i < MLX5DR_TABLE_TYPE_MAX; i++) {
9583 		if (i == MLX5DR_TABLE_TYPE_FDB && !priv->sh->config.dv_esw_en)
9584 			continue;
9585 		attr.flags = (enum mlx5dr_action_nat64_flags)
9586 			     (MLX5DR_ACTION_NAT64_V6_TO_V4 | MLX5DR_ACTION_NAT64_BACKUP_ADDR);
9587 		act = mlx5dr_action_create_nat64(priv->dr_ctx, &attr, flags[i]);
9588 		if (!act)
9589 			return rte_flow_error_set(error, rte_errno,
9590 						  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9591 						  "Failed to create v6 to v4 action.");
9592 		priv->action_nat64[i][RTE_FLOW_NAT64_6TO4] = act;
9593 		attr.flags = (enum mlx5dr_action_nat64_flags)
9594 			     (MLX5DR_ACTION_NAT64_V4_TO_V6 | MLX5DR_ACTION_NAT64_BACKUP_ADDR);
9595 		act = mlx5dr_action_create_nat64(priv->dr_ctx, &attr, flags[i]);
9596 		if (!act)
9597 			return rte_flow_error_set(error, rte_errno,
9598 						  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9599 						  "Failed to create v4 to v6 action.");
9600 		priv->action_nat64[i][RTE_FLOW_NAT64_4TO6] = act;
9601 	}
9602 	return 0;
9603 }
9604 
9605 /**
9606  * Create an egress pattern template matching on source SQ.
9607  *
9608  * @param dev
9609  *   Pointer to Ethernet device.
9610  * @param[out] error
9611  *   Pointer to error structure.
9612  *
9613  * @return
9614  *   Pointer to pattern template on success. NULL otherwise, and rte_errno is set.
9615  */
9616 static struct rte_flow_pattern_template *
9617 flow_hw_create_tx_repr_sq_pattern_tmpl(struct rte_eth_dev *dev, struct rte_flow_error *error)
9618 {
9619 	struct rte_flow_pattern_template_attr attr = {
9620 		.relaxed_matching = 0,
9621 		.egress = 1,
9622 	};
9623 	struct mlx5_rte_flow_item_sq sq_mask = {
9624 		.queue = UINT32_MAX,
9625 	};
9626 	struct rte_flow_item items[] = {
9627 		{
9628 			.type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_SQ,
9629 			.mask = &sq_mask,
9630 		},
9631 		{
9632 			.type = RTE_FLOW_ITEM_TYPE_END,
9633 		},
9634 	};
9635 
9636 	return flow_hw_pattern_template_create(dev, &attr, items, error);
9637 }
9638 
9639 static __rte_always_inline uint32_t
9640 flow_hw_tx_tag_regc_mask(struct rte_eth_dev *dev)
9641 {
9642 	struct mlx5_priv *priv = dev->data->dev_private;
9643 	uint32_t mask = priv->sh->dv_regc0_mask;
9644 
9645 	/* Mask is verified during device initialization. Sanity checking here. */
9646 	MLX5_ASSERT(mask != 0);
9647 	/*
9648 	 * Availability of sufficient number of bits in REG_C_0 is verified on initialization.
9649 	 * Sanity checking here.
9650 	 */
9651 	MLX5_ASSERT(rte_popcount32(mask) >= rte_popcount32(priv->vport_meta_mask));
9652 	return mask;
9653 }
9654 
9655 static __rte_always_inline uint32_t
9656 flow_hw_tx_tag_regc_value(struct rte_eth_dev *dev)
9657 {
9658 	struct mlx5_priv *priv = dev->data->dev_private;
9659 	uint32_t tag;
9660 
9661 	/* Mask is verified during device initialization. Sanity checking here. */
9662 	MLX5_ASSERT(priv->vport_meta_mask != 0);
9663 	tag = priv->vport_meta_tag >> (rte_bsf32(priv->vport_meta_mask));
9664 	/*
9665 	 * Availability of sufficient number of bits in REG_C_0 is verified on initialization.
9666 	 * Sanity checking here.
9667 	 */
9668 	MLX5_ASSERT((tag & priv->sh->dv_regc0_mask) == tag);
9669 	return tag;
9670 }
9671 
9672 static void
9673 flow_hw_update_action_mask(struct rte_flow_action *action,
9674 			   struct rte_flow_action *mask,
9675 			   enum rte_flow_action_type type,
9676 			   void *conf_v,
9677 			   void *conf_m)
9678 {
9679 	action->type = type;
9680 	action->conf = conf_v;
9681 	mask->type = type;
9682 	mask->conf = conf_m;
9683 }
9684 
9685 /**
9686  * Create an egress actions template with MODIFY_FIELD action for setting unused REG_C_0 bits
9687  * to vport tag and JUMP action to group 1.
9688  *
9689  * If extended metadata mode is enabled, then MODIFY_FIELD action for copying software metadata
9690  * to REG_C_1 is added as well.
9691  *
9692  * @param dev
9693  *   Pointer to Ethernet device.
9694  * @param[out] error
9695  *   Pointer to error structure.
9696  *
9697  * @return
9698  *   Pointer to actions template on success. NULL otherwise, and rte_errno is set.
9699  */
9700 static struct rte_flow_actions_template *
9701 flow_hw_create_tx_repr_tag_jump_acts_tmpl(struct rte_eth_dev *dev,
9702 					  struct rte_flow_error *error)
9703 {
9704 	uint32_t tag_mask = flow_hw_tx_tag_regc_mask(dev);
9705 	uint32_t tag_value = flow_hw_tx_tag_regc_value(dev);
9706 	struct rte_flow_actions_template_attr attr = {
9707 		.egress = 1,
9708 	};
9709 	struct rte_flow_action_modify_field set_tag_v = {
9710 		.operation = RTE_FLOW_MODIFY_SET,
9711 		.dst = {
9712 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
9713 			.tag_index = REG_C_0,
9714 			.offset = rte_bsf32(tag_mask),
9715 		},
9716 		.src = {
9717 			.field = RTE_FLOW_FIELD_VALUE,
9718 		},
9719 		.width = rte_popcount32(tag_mask),
9720 	};
9721 	struct rte_flow_action_modify_field set_tag_m = {
9722 		.operation = RTE_FLOW_MODIFY_SET,
9723 		.dst = {
9724 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
9725 			.level = UINT8_MAX,
9726 			.tag_index = UINT8_MAX,
9727 			.offset = UINT32_MAX,
9728 		},
9729 		.src = {
9730 			.field = RTE_FLOW_FIELD_VALUE,
9731 		},
9732 		.width = UINT32_MAX,
9733 	};
9734 	struct rte_flow_action_modify_field copy_metadata_v = {
9735 		.operation = RTE_FLOW_MODIFY_SET,
9736 		.dst = {
9737 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
9738 			.tag_index = REG_C_1,
9739 		},
9740 		.src = {
9741 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
9742 			.tag_index = REG_A,
9743 		},
9744 		.width = 32,
9745 	};
9746 	struct rte_flow_action_modify_field copy_metadata_m = {
9747 		.operation = RTE_FLOW_MODIFY_SET,
9748 		.dst = {
9749 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
9750 			.level = UINT8_MAX,
9751 			.tag_index = UINT8_MAX,
9752 			.offset = UINT32_MAX,
9753 		},
9754 		.src = {
9755 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
9756 			.level = UINT8_MAX,
9757 			.tag_index = UINT8_MAX,
9758 			.offset = UINT32_MAX,
9759 		},
9760 		.width = UINT32_MAX,
9761 	};
9762 	struct rte_flow_action_jump jump_v = {
9763 		.group = MLX5_HW_LOWEST_USABLE_GROUP,
9764 	};
9765 	struct rte_flow_action_jump jump_m = {
9766 		.group = UINT32_MAX,
9767 	};
9768 	struct rte_flow_action actions_v[4] = { { 0 } };
9769 	struct rte_flow_action actions_m[4] = { { 0 } };
9770 	unsigned int idx = 0;
9771 
9772 	rte_memcpy(set_tag_v.src.value, &tag_value, sizeof(tag_value));
9773 	rte_memcpy(set_tag_m.src.value, &tag_mask, sizeof(tag_mask));
9774 	flow_hw_update_action_mask(&actions_v[idx], &actions_m[idx],
9775 				   RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
9776 				   &set_tag_v, &set_tag_m);
9777 	idx++;
9778 	if (MLX5_SH(dev)->config.dv_xmeta_en == MLX5_XMETA_MODE_META32_HWS) {
9779 		flow_hw_update_action_mask(&actions_v[idx], &actions_m[idx],
9780 					   RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
9781 					   &copy_metadata_v, &copy_metadata_m);
9782 		idx++;
9783 	}
9784 	flow_hw_update_action_mask(&actions_v[idx], &actions_m[idx], RTE_FLOW_ACTION_TYPE_JUMP,
9785 				   &jump_v, &jump_m);
9786 	idx++;
9787 	flow_hw_update_action_mask(&actions_v[idx], &actions_m[idx], RTE_FLOW_ACTION_TYPE_END,
9788 				   NULL, NULL);
9789 	idx++;
9790 	MLX5_ASSERT(idx <= RTE_DIM(actions_v));
9791 	return flow_hw_actions_template_create(dev, &attr, actions_v, actions_m, error);
9792 }
9793 
9794 static void
9795 flow_hw_cleanup_tx_repr_tagging(struct rte_eth_dev *dev)
9796 {
9797 	struct mlx5_priv *priv = dev->data->dev_private;
9798 
9799 	if (priv->hw_tx_repr_tagging_tbl) {
9800 		flow_hw_table_destroy(dev, priv->hw_tx_repr_tagging_tbl, NULL);
9801 		priv->hw_tx_repr_tagging_tbl = NULL;
9802 	}
9803 	if (priv->hw_tx_repr_tagging_at) {
9804 		flow_hw_actions_template_destroy(dev, priv->hw_tx_repr_tagging_at, NULL);
9805 		priv->hw_tx_repr_tagging_at = NULL;
9806 	}
9807 	if (priv->hw_tx_repr_tagging_pt) {
9808 		flow_hw_pattern_template_destroy(dev, priv->hw_tx_repr_tagging_pt, NULL);
9809 		priv->hw_tx_repr_tagging_pt = NULL;
9810 	}
9811 }
9812 
9813 /**
9814  * Setup templates and table used to create default Tx flow rules. These default rules
9815  * allow for matching Tx representor traffic using a vport tag placed in unused bits of
9816  * REG_C_0 register.
9817  *
9818  * @param dev
9819  *   Pointer to Ethernet device.
9820  * @param[out] error
9821  *   Pointer to error structure.
9822  *
9823  * @return
9824  *   0 on success, negative errno value otherwise.
9825  */
9826 static int
9827 flow_hw_setup_tx_repr_tagging(struct rte_eth_dev *dev, struct rte_flow_error *error)
9828 {
9829 	struct mlx5_priv *priv = dev->data->dev_private;
9830 	struct rte_flow_template_table_attr attr = {
9831 		.flow_attr = {
9832 			.group = 0,
9833 			.priority = MLX5_HW_LOWEST_PRIO_ROOT,
9834 			.egress = 1,
9835 		},
9836 		.nb_flows = MLX5_HW_CTRL_FLOW_NB_RULES,
9837 	};
9838 	struct mlx5_flow_template_table_cfg cfg = {
9839 		.attr = attr,
9840 		.external = false,
9841 	};
9842 
9843 	MLX5_ASSERT(priv->sh->config.dv_esw_en);
9844 	MLX5_ASSERT(priv->sh->config.repr_matching);
9845 	priv->hw_tx_repr_tagging_pt =
9846 		flow_hw_create_tx_repr_sq_pattern_tmpl(dev, error);
9847 	if (!priv->hw_tx_repr_tagging_pt)
9848 		goto err;
9849 	priv->hw_tx_repr_tagging_at =
9850 		flow_hw_create_tx_repr_tag_jump_acts_tmpl(dev, error);
9851 	if (!priv->hw_tx_repr_tagging_at)
9852 		goto err;
9853 	priv->hw_tx_repr_tagging_tbl = flow_hw_table_create(dev, &cfg,
9854 							    &priv->hw_tx_repr_tagging_pt, 1,
9855 							    &priv->hw_tx_repr_tagging_at, 1,
9856 							    error);
9857 	if (!priv->hw_tx_repr_tagging_tbl)
9858 		goto err;
9859 	return 0;
9860 err:
9861 	flow_hw_cleanup_tx_repr_tagging(dev);
9862 	return -rte_errno;
9863 }
9864 
9865 static uint32_t
9866 flow_hw_esw_mgr_regc_marker_mask(struct rte_eth_dev *dev)
9867 {
9868 	uint32_t mask = MLX5_SH(dev)->dv_regc0_mask;
9869 
9870 	/* Mask is verified during device initialization. */
9871 	MLX5_ASSERT(mask != 0);
9872 	return mask;
9873 }
9874 
9875 static uint32_t
9876 flow_hw_esw_mgr_regc_marker(struct rte_eth_dev *dev)
9877 {
9878 	uint32_t mask = MLX5_SH(dev)->dv_regc0_mask;
9879 
9880 	/* Mask is verified during device initialization. */
9881 	MLX5_ASSERT(mask != 0);
9882 	return RTE_BIT32(rte_bsf32(mask));
9883 }
9884 
9885 /**
9886  * Creates a flow pattern template used to match on E-Switch Manager.
9887  * This template is used to set up a table for SQ miss default flow.
9888  *
9889  * @param dev
9890  *   Pointer to Ethernet device.
9891  * @param error
9892  *   Pointer to error structure.
9893  *
9894  * @return
9895  *   Pointer to flow pattern template on success, NULL otherwise.
9896  */
9897 static struct rte_flow_pattern_template *
9898 flow_hw_create_ctrl_esw_mgr_pattern_template(struct rte_eth_dev *dev,
9899 					     struct rte_flow_error *error)
9900 {
9901 	struct rte_flow_pattern_template_attr attr = {
9902 		.relaxed_matching = 0,
9903 		.transfer = 1,
9904 	};
9905 	struct rte_flow_item_ethdev port_spec = {
9906 		.port_id = MLX5_REPRESENTED_PORT_ESW_MGR,
9907 	};
9908 	struct rte_flow_item_ethdev port_mask = {
9909 		.port_id = UINT16_MAX,
9910 	};
9911 	struct mlx5_rte_flow_item_sq sq_mask = {
9912 		.queue = UINT32_MAX,
9913 	};
9914 	struct rte_flow_item items[] = {
9915 		{
9916 			.type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,
9917 			.spec = &port_spec,
9918 			.mask = &port_mask,
9919 		},
9920 		{
9921 			.type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_SQ,
9922 			.mask = &sq_mask,
9923 		},
9924 		{
9925 			.type = RTE_FLOW_ITEM_TYPE_END,
9926 		},
9927 	};
9928 
9929 	return flow_hw_pattern_template_create(dev, &attr, items, error);
9930 }
9931 
9932 /**
9933  * Creates a flow pattern template used to match REG_C_0 and a SQ.
9934  * Matching on REG_C_0 is set up to match on all bits usable by user-space.
9935  * If traffic was sent from E-Switch Manager, then all usable bits will be set to 0,
9936  * except the least significant bit, which will be set to 1.
9937  *
9938  * This template is used to set up a table for SQ miss default flow.
9939  *
9940  * @param dev
9941  *   Pointer to Ethernet device.
9942  * @param error
9943  *   Pointer to error structure.
9944  *
9945  * @return
9946  *   Pointer to flow pattern template on success, NULL otherwise.
9947  */
9948 static struct rte_flow_pattern_template *
9949 flow_hw_create_ctrl_regc_sq_pattern_template(struct rte_eth_dev *dev,
9950 					     struct rte_flow_error *error)
9951 {
9952 	struct rte_flow_pattern_template_attr attr = {
9953 		.relaxed_matching = 0,
9954 		.transfer = 1,
9955 	};
9956 	struct rte_flow_item_tag reg_c0_spec = {
9957 		.index = (uint8_t)REG_C_0,
9958 	};
9959 	struct rte_flow_item_tag reg_c0_mask = {
9960 		.index = 0xff,
9961 		.data = flow_hw_esw_mgr_regc_marker_mask(dev),
9962 	};
9963 	struct mlx5_rte_flow_item_sq queue_mask = {
9964 		.queue = UINT32_MAX,
9965 	};
9966 	struct rte_flow_item items[] = {
9967 		{
9968 			.type = (enum rte_flow_item_type)
9969 				MLX5_RTE_FLOW_ITEM_TYPE_TAG,
9970 			.spec = &reg_c0_spec,
9971 			.mask = &reg_c0_mask,
9972 		},
9973 		{
9974 			.type = (enum rte_flow_item_type)
9975 				MLX5_RTE_FLOW_ITEM_TYPE_SQ,
9976 			.mask = &queue_mask,
9977 		},
9978 		{
9979 			.type = RTE_FLOW_ITEM_TYPE_END,
9980 		},
9981 	};
9982 
9983 	return flow_hw_pattern_template_create(dev, &attr, items, error);
9984 }
9985 
9986 /**
9987  * Creates a flow pattern template with unmasked represented port matching.
9988  * This template is used to set up a table for default transfer flows
9989  * directing packets to group 1.
9990  *
9991  * @param dev
9992  *   Pointer to Ethernet device.
9993  * @param error
9994  *   Pointer to error structure.
9995  *
9996  * @return
9997  *   Pointer to flow pattern template on success, NULL otherwise.
9998  */
9999 static struct rte_flow_pattern_template *
10000 flow_hw_create_ctrl_port_pattern_template(struct rte_eth_dev *dev,
10001 					  struct rte_flow_error *error)
10002 {
10003 	struct rte_flow_pattern_template_attr attr = {
10004 		.relaxed_matching = 0,
10005 		.transfer = 1,
10006 	};
10007 	struct rte_flow_item_ethdev port_mask = {
10008 		.port_id = UINT16_MAX,
10009 	};
10010 	struct rte_flow_item items[] = {
10011 		{
10012 			.type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,
10013 			.mask = &port_mask,
10014 		},
10015 		{
10016 			.type = RTE_FLOW_ITEM_TYPE_END,
10017 		},
10018 	};
10019 
10020 	return flow_hw_pattern_template_create(dev, &attr, items, error);
10021 }
10022 
10023 /*
10024  * Creating a flow pattern template with all ETH packets matching.
10025  * This template is used to set up a table for default Tx copy (Tx metadata
10026  * to REG_C_1) flow rule usage.
10027  *
10028  * @param dev
10029  *   Pointer to Ethernet device.
10030  * @param error
10031  *   Pointer to error structure.
10032  *
10033  * @return
10034  *   Pointer to flow pattern template on success, NULL otherwise.
10035  */
10036 static struct rte_flow_pattern_template *
10037 flow_hw_create_tx_default_mreg_copy_pattern_template(struct rte_eth_dev *dev,
10038 						     struct rte_flow_error *error)
10039 {
10040 	struct rte_flow_pattern_template_attr tx_pa_attr = {
10041 		.relaxed_matching = 0,
10042 		.egress = 1,
10043 	};
10044 	struct rte_flow_item_eth promisc = {
10045 		.hdr.dst_addr.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
10046 		.hdr.src_addr.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
10047 		.hdr.ether_type = 0,
10048 	};
10049 	struct rte_flow_item eth_all[] = {
10050 		[0] = {
10051 			.type = RTE_FLOW_ITEM_TYPE_ETH,
10052 			.spec = &promisc,
10053 			.mask = &promisc,
10054 		},
10055 		[1] = {
10056 			.type = RTE_FLOW_ITEM_TYPE_END,
10057 		},
10058 	};
10059 
10060 	return flow_hw_pattern_template_create(dev, &tx_pa_attr, eth_all, error);
10061 }
10062 
10063 /*
10064  * Creating a flow pattern template with all LACP packets matching, only for NIC
10065  * ingress domain.
10066  *
10067  * @param dev
10068  *   Pointer to Ethernet device.
10069  * @param error
10070  *   Pointer to error structure.
10071  *
10072  * @return
10073  *   Pointer to flow pattern template on success, NULL otherwise.
10074  */
10075 static struct rte_flow_pattern_template *
10076 flow_hw_create_lacp_rx_pattern_template(struct rte_eth_dev *dev, struct rte_flow_error *error)
10077 {
10078 	struct rte_flow_pattern_template_attr pa_attr = {
10079 		.relaxed_matching = 0,
10080 		.ingress = 1,
10081 	};
10082 	struct rte_flow_item_eth lacp_mask = {
10083 		.dst.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
10084 		.src.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
10085 		.type = 0xFFFF,
10086 	};
10087 	struct rte_flow_item eth_all[] = {
10088 		[0] = {
10089 			.type = RTE_FLOW_ITEM_TYPE_ETH,
10090 			.mask = &lacp_mask,
10091 		},
10092 		[1] = {
10093 			.type = RTE_FLOW_ITEM_TYPE_END,
10094 		},
10095 	};
10096 	return flow_hw_pattern_template_create(dev, &pa_attr, eth_all, error);
10097 }
10098 
10099 /**
10100  * Creates a flow actions template with modify field action and masked jump action.
10101  * Modify field action sets the least significant bit of REG_C_0 (usable by user-space)
10102  * to 1, meaning that packet was originated from E-Switch Manager. Jump action
10103  * transfers steering to group 1.
10104  *
10105  * @param dev
10106  *   Pointer to Ethernet device.
10107  * @param error
10108  *   Pointer to error structure.
10109  *
10110  * @return
10111  *   Pointer to flow actions template on success, NULL otherwise.
10112  */
10113 static struct rte_flow_actions_template *
10114 flow_hw_create_ctrl_regc_jump_actions_template(struct rte_eth_dev *dev,
10115 					       struct rte_flow_error *error)
10116 {
10117 	uint32_t marker_mask = flow_hw_esw_mgr_regc_marker_mask(dev);
10118 	uint32_t marker_bits = flow_hw_esw_mgr_regc_marker(dev);
10119 	struct rte_flow_actions_template_attr attr = {
10120 		.transfer = 1,
10121 	};
10122 	struct rte_flow_action_modify_field set_reg_v = {
10123 		.operation = RTE_FLOW_MODIFY_SET,
10124 		.dst = {
10125 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
10126 			.tag_index = REG_C_0,
10127 		},
10128 		.src = {
10129 			.field = RTE_FLOW_FIELD_VALUE,
10130 		},
10131 		.width = rte_popcount32(marker_mask),
10132 	};
10133 	struct rte_flow_action_modify_field set_reg_m = {
10134 		.operation = RTE_FLOW_MODIFY_SET,
10135 		.dst = {
10136 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
10137 			.level = UINT8_MAX,
10138 			.tag_index = UINT8_MAX,
10139 			.offset = UINT32_MAX,
10140 		},
10141 		.src = {
10142 			.field = RTE_FLOW_FIELD_VALUE,
10143 		},
10144 		.width = UINT32_MAX,
10145 	};
10146 	struct rte_flow_action_jump jump_v = {
10147 		.group = MLX5_HW_LOWEST_USABLE_GROUP,
10148 	};
10149 	struct rte_flow_action_jump jump_m = {
10150 		.group = UINT32_MAX,
10151 	};
10152 	struct rte_flow_action actions_v[] = {
10153 		{
10154 			.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
10155 			.conf = &set_reg_v,
10156 		},
10157 		{
10158 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
10159 			.conf = &jump_v,
10160 		},
10161 		{
10162 			.type = RTE_FLOW_ACTION_TYPE_END,
10163 		}
10164 	};
10165 	struct rte_flow_action actions_m[] = {
10166 		{
10167 			.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
10168 			.conf = &set_reg_m,
10169 		},
10170 		{
10171 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
10172 			.conf = &jump_m,
10173 		},
10174 		{
10175 			.type = RTE_FLOW_ACTION_TYPE_END,
10176 		}
10177 	};
10178 
10179 	set_reg_v.dst.offset = rte_bsf32(marker_mask);
10180 	rte_memcpy(set_reg_v.src.value, &marker_bits, sizeof(marker_bits));
10181 	rte_memcpy(set_reg_m.src.value, &marker_mask, sizeof(marker_mask));
10182 	return flow_hw_actions_template_create(dev, &attr, actions_v, actions_m, error);
10183 }
10184 
10185 /**
10186  * Creates a flow actions template with an unmasked JUMP action. Flows
10187  * based on this template will perform a jump to some group. This template
10188  * is used to set up tables for control flows.
10189  *
10190  * @param dev
10191  *   Pointer to Ethernet device.
10192  * @param group
10193  *   Destination group for this action template.
10194  * @param error
10195  *   Pointer to error structure.
10196  *
10197  * @return
10198  *   Pointer to flow actions template on success, NULL otherwise.
10199  */
10200 static struct rte_flow_actions_template *
10201 flow_hw_create_ctrl_jump_actions_template(struct rte_eth_dev *dev,
10202 					  uint32_t group,
10203 					  struct rte_flow_error *error)
10204 {
10205 	struct rte_flow_actions_template_attr attr = {
10206 		.transfer = 1,
10207 	};
10208 	struct rte_flow_action_jump jump_v = {
10209 		.group = group,
10210 	};
10211 	struct rte_flow_action_jump jump_m = {
10212 		.group = UINT32_MAX,
10213 	};
10214 	struct rte_flow_action actions_v[] = {
10215 		{
10216 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
10217 			.conf = &jump_v,
10218 		},
10219 		{
10220 			.type = RTE_FLOW_ACTION_TYPE_END,
10221 		}
10222 	};
10223 	struct rte_flow_action actions_m[] = {
10224 		{
10225 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
10226 			.conf = &jump_m,
10227 		},
10228 		{
10229 			.type = RTE_FLOW_ACTION_TYPE_END,
10230 		}
10231 	};
10232 
10233 	return flow_hw_actions_template_create(dev, &attr, actions_v,
10234 					       actions_m, error);
10235 }
10236 
10237 /**
10238  * Creates a flow action template with a unmasked REPRESENTED_PORT action.
10239  * It is used to create control flow tables.
10240  *
10241  * @param dev
10242  *   Pointer to Ethernet device.
10243  * @param error
10244  *   Pointer to error structure.
10245  *
10246  * @return
10247  *   Pointer to flow action template on success, NULL otherwise.
10248  */
10249 static struct rte_flow_actions_template *
10250 flow_hw_create_ctrl_port_actions_template(struct rte_eth_dev *dev,
10251 					  struct rte_flow_error *error)
10252 {
10253 	struct rte_flow_actions_template_attr attr = {
10254 		.transfer = 1,
10255 	};
10256 	struct rte_flow_action_ethdev port_v = {
10257 		.port_id = 0,
10258 	};
10259 	struct rte_flow_action actions_v[] = {
10260 		{
10261 			.type = RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT,
10262 			.conf = &port_v,
10263 		},
10264 		{
10265 			.type = RTE_FLOW_ACTION_TYPE_END,
10266 		}
10267 	};
10268 	struct rte_flow_action_ethdev port_m = {
10269 		.port_id = 0,
10270 	};
10271 	struct rte_flow_action actions_m[] = {
10272 		{
10273 			.type = RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT,
10274 			.conf = &port_m,
10275 		},
10276 		{
10277 			.type = RTE_FLOW_ACTION_TYPE_END,
10278 		}
10279 	};
10280 
10281 	return flow_hw_actions_template_create(dev, &attr, actions_v, actions_m, error);
10282 }
10283 
10284 /*
10285  * Creating an actions template to use header modify action for register
10286  * copying. This template is used to set up a table for copy flow.
10287  *
10288  * @param dev
10289  *   Pointer to Ethernet device.
10290  * @param error
10291  *   Pointer to error structure.
10292  *
10293  * @return
10294  *   Pointer to flow actions template on success, NULL otherwise.
10295  */
10296 static struct rte_flow_actions_template *
10297 flow_hw_create_tx_default_mreg_copy_actions_template(struct rte_eth_dev *dev,
10298 						     struct rte_flow_error *error)
10299 {
10300 	struct rte_flow_actions_template_attr tx_act_attr = {
10301 		.egress = 1,
10302 	};
10303 	const struct rte_flow_action_modify_field mreg_action = {
10304 		.operation = RTE_FLOW_MODIFY_SET,
10305 		.dst = {
10306 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
10307 			.tag_index = REG_C_1,
10308 		},
10309 		.src = {
10310 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
10311 			.tag_index = REG_A,
10312 		},
10313 		.width = 32,
10314 	};
10315 	const struct rte_flow_action_modify_field mreg_mask = {
10316 		.operation = RTE_FLOW_MODIFY_SET,
10317 		.dst = {
10318 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
10319 			.level = UINT8_MAX,
10320 			.tag_index = UINT8_MAX,
10321 			.offset = UINT32_MAX,
10322 		},
10323 		.src = {
10324 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
10325 			.level = UINT8_MAX,
10326 			.tag_index = UINT8_MAX,
10327 			.offset = UINT32_MAX,
10328 		},
10329 		.width = UINT32_MAX,
10330 	};
10331 	const struct rte_flow_action_jump jump_action = {
10332 		.group = 1,
10333 	};
10334 	const struct rte_flow_action_jump jump_mask = {
10335 		.group = UINT32_MAX,
10336 	};
10337 	const struct rte_flow_action actions[] = {
10338 		[0] = {
10339 			.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
10340 			.conf = &mreg_action,
10341 		},
10342 		[1] = {
10343 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
10344 			.conf = &jump_action,
10345 		},
10346 		[2] = {
10347 			.type = RTE_FLOW_ACTION_TYPE_END,
10348 		},
10349 	};
10350 	const struct rte_flow_action masks[] = {
10351 		[0] = {
10352 			.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
10353 			.conf = &mreg_mask,
10354 		},
10355 		[1] = {
10356 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
10357 			.conf = &jump_mask,
10358 		},
10359 		[2] = {
10360 			.type = RTE_FLOW_ACTION_TYPE_END,
10361 		},
10362 	};
10363 
10364 	return flow_hw_actions_template_create(dev, &tx_act_attr, actions,
10365 					       masks, error);
10366 }
10367 
10368 /*
10369  * Creating an actions template to use default miss to re-route packets to the
10370  * kernel driver stack.
10371  * On root table, only DEFAULT_MISS action can be used.
10372  *
10373  * @param dev
10374  *   Pointer to Ethernet device.
10375  * @param error
10376  *   Pointer to error structure.
10377  *
10378  * @return
10379  *   Pointer to flow actions template on success, NULL otherwise.
10380  */
10381 static struct rte_flow_actions_template *
10382 flow_hw_create_lacp_rx_actions_template(struct rte_eth_dev *dev, struct rte_flow_error *error)
10383 {
10384 	struct rte_flow_actions_template_attr act_attr = {
10385 		.ingress = 1,
10386 	};
10387 	const struct rte_flow_action actions[] = {
10388 		[0] = {
10389 			.type = (enum rte_flow_action_type)
10390 				MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS,
10391 		},
10392 		[1] = {
10393 			.type = RTE_FLOW_ACTION_TYPE_END,
10394 		},
10395 	};
10396 
10397 	return flow_hw_actions_template_create(dev, &act_attr, actions, actions, error);
10398 }
10399 
10400 /**
10401  * Creates a control flow table used to transfer traffic from E-Switch Manager
10402  * and TX queues from group 0 to group 1.
10403  *
10404  * @param dev
10405  *   Pointer to Ethernet device.
10406  * @param it
10407  *   Pointer to flow pattern template.
10408  * @param at
10409  *   Pointer to flow actions template.
10410  * @param error
10411  *   Pointer to error structure.
10412  *
10413  * @return
10414  *   Pointer to flow table on success, NULL otherwise.
10415  */
10416 static struct rte_flow_template_table*
10417 flow_hw_create_ctrl_sq_miss_root_table(struct rte_eth_dev *dev,
10418 				       struct rte_flow_pattern_template *it,
10419 				       struct rte_flow_actions_template *at,
10420 				       struct rte_flow_error *error)
10421 {
10422 	struct rte_flow_template_table_attr attr = {
10423 		.flow_attr = {
10424 			.group = 0,
10425 			.priority = MLX5_HW_LOWEST_PRIO_ROOT,
10426 			.ingress = 0,
10427 			.egress = 0,
10428 			.transfer = 1,
10429 		},
10430 		.nb_flows = MLX5_HW_CTRL_FLOW_NB_RULES,
10431 	};
10432 	struct mlx5_flow_template_table_cfg cfg = {
10433 		.attr = attr,
10434 		.external = false,
10435 	};
10436 
10437 	return flow_hw_table_create(dev, &cfg, &it, 1, &at, 1, error);
10438 }
10439 
10440 
10441 /**
10442  * Creates a control flow table used to transfer traffic from E-Switch Manager
10443  * and TX queues from group 0 to group 1.
10444  *
10445  * @param dev
10446  *   Pointer to Ethernet device.
10447  * @param it
10448  *   Pointer to flow pattern template.
10449  * @param at
10450  *   Pointer to flow actions template.
10451  * @param error
10452  *   Pointer to error structure.
10453  *
10454  * @return
10455  *   Pointer to flow table on success, NULL otherwise.
10456  */
10457 static struct rte_flow_template_table*
10458 flow_hw_create_ctrl_sq_miss_table(struct rte_eth_dev *dev,
10459 				  struct rte_flow_pattern_template *it,
10460 				  struct rte_flow_actions_template *at,
10461 				  struct rte_flow_error *error)
10462 {
10463 	struct rte_flow_template_table_attr attr = {
10464 		.flow_attr = {
10465 			.group = 1,
10466 			.priority = MLX5_HW_LOWEST_PRIO_NON_ROOT,
10467 			.ingress = 0,
10468 			.egress = 0,
10469 			.transfer = 1,
10470 		},
10471 		.nb_flows = MLX5_HW_CTRL_FLOW_NB_RULES,
10472 	};
10473 	struct mlx5_flow_template_table_cfg cfg = {
10474 		.attr = attr,
10475 		.external = false,
10476 	};
10477 
10478 	return flow_hw_table_create(dev, &cfg, &it, 1, &at, 1, error);
10479 }
10480 
10481 /*
10482  * Creating the default Tx metadata copy table on NIC Tx group 0.
10483  *
10484  * @param dev
10485  *   Pointer to Ethernet device.
10486  * @param pt
10487  *   Pointer to flow pattern template.
10488  * @param at
10489  *   Pointer to flow actions template.
10490  * @param error
10491  *   Pointer to error structure.
10492  *
10493  * @return
10494  *   Pointer to flow table on success, NULL otherwise.
10495  */
10496 static struct rte_flow_template_table*
10497 flow_hw_create_tx_default_mreg_copy_table(struct rte_eth_dev *dev,
10498 					  struct rte_flow_pattern_template *pt,
10499 					  struct rte_flow_actions_template *at,
10500 					  struct rte_flow_error *error)
10501 {
10502 	struct rte_flow_template_table_attr tx_tbl_attr = {
10503 		.flow_attr = {
10504 			.group = 0, /* Root */
10505 			.priority = MLX5_HW_LOWEST_PRIO_ROOT,
10506 			.egress = 1,
10507 		},
10508 		.nb_flows = 1, /* One default flow rule for all. */
10509 	};
10510 	struct mlx5_flow_template_table_cfg tx_tbl_cfg = {
10511 		.attr = tx_tbl_attr,
10512 		.external = false,
10513 	};
10514 
10515 	return flow_hw_table_create(dev, &tx_tbl_cfg, &pt, 1, &at, 1, error);
10516 }
10517 
10518 /**
10519  * Creates a control flow table used to transfer traffic
10520  * from group 0 to group 1.
10521  *
10522  * @param dev
10523  *   Pointer to Ethernet device.
10524  * @param it
10525  *   Pointer to flow pattern template.
10526  * @param at
10527  *   Pointer to flow actions template.
10528  * @param error
10529  *   Pointer to error structure.
10530  *
10531  * @return
10532  *   Pointer to flow table on success, NULL otherwise.
10533  */
10534 static struct rte_flow_template_table *
10535 flow_hw_create_ctrl_jump_table(struct rte_eth_dev *dev,
10536 			       struct rte_flow_pattern_template *it,
10537 			       struct rte_flow_actions_template *at,
10538 			       struct rte_flow_error *error)
10539 {
10540 	struct rte_flow_template_table_attr attr = {
10541 		.flow_attr = {
10542 			.group = 0,
10543 			.priority = 0,
10544 			.ingress = 0,
10545 			.egress = 0,
10546 			.transfer = 1,
10547 		},
10548 		.nb_flows = MLX5_HW_CTRL_FLOW_NB_RULES,
10549 	};
10550 	struct mlx5_flow_template_table_cfg cfg = {
10551 		.attr = attr,
10552 		.external = false,
10553 	};
10554 
10555 	return flow_hw_table_create(dev, &cfg, &it, 1, &at, 1, error);
10556 }
10557 
10558 /**
10559  * Cleans up all template tables and pattern, and actions templates used for
10560  * FDB control flow rules.
10561  *
10562  * @param dev
10563  *   Pointer to Ethernet device.
10564  */
10565 static void
10566 flow_hw_cleanup_ctrl_fdb_tables(struct rte_eth_dev *dev)
10567 {
10568 	struct mlx5_priv *priv = dev->data->dev_private;
10569 	struct mlx5_flow_hw_ctrl_fdb *hw_ctrl_fdb;
10570 
10571 	if (!priv->hw_ctrl_fdb)
10572 		return;
10573 	hw_ctrl_fdb = priv->hw_ctrl_fdb;
10574 	/* Clean up templates used for LACP default miss table. */
10575 	if (hw_ctrl_fdb->hw_lacp_rx_tbl)
10576 		claim_zero(flow_hw_table_destroy(dev, hw_ctrl_fdb->hw_lacp_rx_tbl, NULL));
10577 	if (hw_ctrl_fdb->lacp_rx_actions_tmpl)
10578 		claim_zero(flow_hw_actions_template_destroy(dev, hw_ctrl_fdb->lacp_rx_actions_tmpl,
10579 			   NULL));
10580 	if (hw_ctrl_fdb->lacp_rx_items_tmpl)
10581 		claim_zero(flow_hw_pattern_template_destroy(dev, hw_ctrl_fdb->lacp_rx_items_tmpl,
10582 			   NULL));
10583 	/* Clean up templates used for default Tx metadata copy. */
10584 	if (hw_ctrl_fdb->hw_tx_meta_cpy_tbl)
10585 		claim_zero(flow_hw_table_destroy(dev, hw_ctrl_fdb->hw_tx_meta_cpy_tbl, NULL));
10586 	if (hw_ctrl_fdb->tx_meta_actions_tmpl)
10587 		claim_zero(flow_hw_actions_template_destroy(dev, hw_ctrl_fdb->tx_meta_actions_tmpl,
10588 			   NULL));
10589 	if (hw_ctrl_fdb->tx_meta_items_tmpl)
10590 		claim_zero(flow_hw_pattern_template_destroy(dev, hw_ctrl_fdb->tx_meta_items_tmpl,
10591 			   NULL));
10592 	/* Clean up templates used for default FDB jump rule. */
10593 	if (hw_ctrl_fdb->hw_esw_zero_tbl)
10594 		claim_zero(flow_hw_table_destroy(dev, hw_ctrl_fdb->hw_esw_zero_tbl, NULL));
10595 	if (hw_ctrl_fdb->jump_one_actions_tmpl)
10596 		claim_zero(flow_hw_actions_template_destroy(dev, hw_ctrl_fdb->jump_one_actions_tmpl,
10597 			   NULL));
10598 	if (hw_ctrl_fdb->port_items_tmpl)
10599 		claim_zero(flow_hw_pattern_template_destroy(dev, hw_ctrl_fdb->port_items_tmpl,
10600 			   NULL));
10601 	/* Clean up templates used for default SQ miss flow rules - non-root table. */
10602 	if (hw_ctrl_fdb->hw_esw_sq_miss_tbl)
10603 		claim_zero(flow_hw_table_destroy(dev, hw_ctrl_fdb->hw_esw_sq_miss_tbl, NULL));
10604 	if (hw_ctrl_fdb->regc_sq_items_tmpl)
10605 		claim_zero(flow_hw_pattern_template_destroy(dev, hw_ctrl_fdb->regc_sq_items_tmpl,
10606 			   NULL));
10607 	if (hw_ctrl_fdb->port_actions_tmpl)
10608 		claim_zero(flow_hw_actions_template_destroy(dev, hw_ctrl_fdb->port_actions_tmpl,
10609 			   NULL));
10610 	/* Clean up templates used for default SQ miss flow rules - root table. */
10611 	if (hw_ctrl_fdb->hw_esw_sq_miss_root_tbl)
10612 		claim_zero(flow_hw_table_destroy(dev, hw_ctrl_fdb->hw_esw_sq_miss_root_tbl, NULL));
10613 	if (hw_ctrl_fdb->regc_jump_actions_tmpl)
10614 		claim_zero(flow_hw_actions_template_destroy(dev,
10615 			   hw_ctrl_fdb->regc_jump_actions_tmpl, NULL));
10616 	if (hw_ctrl_fdb->esw_mgr_items_tmpl)
10617 		claim_zero(flow_hw_pattern_template_destroy(dev, hw_ctrl_fdb->esw_mgr_items_tmpl,
10618 			   NULL));
10619 	/* Clean up templates structure for FDB control flow rules. */
10620 	mlx5_free(hw_ctrl_fdb);
10621 	priv->hw_ctrl_fdb = NULL;
10622 }
10623 
10624 /*
10625  * Create a table on the root group to for the LACP traffic redirecting.
10626  *
10627  * @param dev
10628  *   Pointer to Ethernet device.
10629  * @param it
10630  *   Pointer to flow pattern template.
10631  * @param at
10632  *   Pointer to flow actions template.
10633  *
10634  * @return
10635  *   Pointer to flow table on success, NULL otherwise.
10636  */
10637 static struct rte_flow_template_table *
10638 flow_hw_create_lacp_rx_table(struct rte_eth_dev *dev,
10639 			     struct rte_flow_pattern_template *it,
10640 			     struct rte_flow_actions_template *at,
10641 			     struct rte_flow_error *error)
10642 {
10643 	struct rte_flow_template_table_attr attr = {
10644 		.flow_attr = {
10645 			.group = 0,
10646 			.priority = 0,
10647 			.ingress = 1,
10648 			.egress = 0,
10649 			.transfer = 0,
10650 		},
10651 		.nb_flows = 1,
10652 	};
10653 	struct mlx5_flow_template_table_cfg cfg = {
10654 		.attr = attr,
10655 		.external = false,
10656 	};
10657 
10658 	return flow_hw_table_create(dev, &cfg, &it, 1, &at, 1, error);
10659 }
10660 
10661 /**
10662  * Creates a set of flow tables used to create control flows used
10663  * when E-Switch is engaged.
10664  *
10665  * @param dev
10666  *   Pointer to Ethernet device.
10667  * @param error
10668  *   Pointer to error structure.
10669  *
10670  * @return
10671  *   0 on success, negative values otherwise
10672  */
10673 static int
10674 flow_hw_create_ctrl_tables(struct rte_eth_dev *dev, struct rte_flow_error *error)
10675 {
10676 	struct mlx5_priv *priv = dev->data->dev_private;
10677 	struct mlx5_flow_hw_ctrl_fdb *hw_ctrl_fdb;
10678 	uint32_t xmeta = priv->sh->config.dv_xmeta_en;
10679 	uint32_t repr_matching = priv->sh->config.repr_matching;
10680 	uint32_t fdb_def_rule = priv->sh->config.fdb_def_rule;
10681 
10682 	MLX5_ASSERT(priv->hw_ctrl_fdb == NULL);
10683 	hw_ctrl_fdb = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*hw_ctrl_fdb), 0, SOCKET_ID_ANY);
10684 	if (!hw_ctrl_fdb) {
10685 		DRV_LOG(ERR, "port %u failed to allocate memory for FDB control flow templates",
10686 			dev->data->port_id);
10687 		rte_errno = ENOMEM;
10688 		goto err;
10689 	}
10690 	priv->hw_ctrl_fdb = hw_ctrl_fdb;
10691 	if (fdb_def_rule) {
10692 		/* Create templates and table for default SQ miss flow rules - root table. */
10693 		hw_ctrl_fdb->esw_mgr_items_tmpl =
10694 				flow_hw_create_ctrl_esw_mgr_pattern_template(dev, error);
10695 		if (!hw_ctrl_fdb->esw_mgr_items_tmpl) {
10696 			DRV_LOG(ERR, "port %u failed to create E-Switch Manager item"
10697 				" template for control flows", dev->data->port_id);
10698 			goto err;
10699 		}
10700 		hw_ctrl_fdb->regc_jump_actions_tmpl =
10701 				flow_hw_create_ctrl_regc_jump_actions_template(dev, error);
10702 		if (!hw_ctrl_fdb->regc_jump_actions_tmpl) {
10703 			DRV_LOG(ERR, "port %u failed to create REG_C set and jump action template"
10704 				" for control flows", dev->data->port_id);
10705 			goto err;
10706 		}
10707 		hw_ctrl_fdb->hw_esw_sq_miss_root_tbl =
10708 				flow_hw_create_ctrl_sq_miss_root_table
10709 					(dev, hw_ctrl_fdb->esw_mgr_items_tmpl,
10710 					 hw_ctrl_fdb->regc_jump_actions_tmpl, error);
10711 		if (!hw_ctrl_fdb->hw_esw_sq_miss_root_tbl) {
10712 			DRV_LOG(ERR, "port %u failed to create table for default sq miss (root table)"
10713 				" for control flows", dev->data->port_id);
10714 			goto err;
10715 		}
10716 		/* Create templates and table for default SQ miss flow rules - non-root table. */
10717 		hw_ctrl_fdb->regc_sq_items_tmpl =
10718 				flow_hw_create_ctrl_regc_sq_pattern_template(dev, error);
10719 		if (!hw_ctrl_fdb->regc_sq_items_tmpl) {
10720 			DRV_LOG(ERR, "port %u failed to create SQ item template for"
10721 				" control flows", dev->data->port_id);
10722 			goto err;
10723 		}
10724 		hw_ctrl_fdb->port_actions_tmpl =
10725 				flow_hw_create_ctrl_port_actions_template(dev, error);
10726 		if (!hw_ctrl_fdb->port_actions_tmpl) {
10727 			DRV_LOG(ERR, "port %u failed to create port action template"
10728 				" for control flows", dev->data->port_id);
10729 			goto err;
10730 		}
10731 		hw_ctrl_fdb->hw_esw_sq_miss_tbl =
10732 				flow_hw_create_ctrl_sq_miss_table
10733 					(dev, hw_ctrl_fdb->regc_sq_items_tmpl,
10734 					 hw_ctrl_fdb->port_actions_tmpl, error);
10735 		if (!hw_ctrl_fdb->hw_esw_sq_miss_tbl) {
10736 			DRV_LOG(ERR, "port %u failed to create table for default sq miss (non-root table)"
10737 				" for control flows", dev->data->port_id);
10738 			goto err;
10739 		}
10740 		/* Create templates and table for default FDB jump flow rules. */
10741 		hw_ctrl_fdb->port_items_tmpl =
10742 				flow_hw_create_ctrl_port_pattern_template(dev, error);
10743 		if (!hw_ctrl_fdb->port_items_tmpl) {
10744 			DRV_LOG(ERR, "port %u failed to create SQ item template for"
10745 				" control flows", dev->data->port_id);
10746 			goto err;
10747 		}
10748 		hw_ctrl_fdb->jump_one_actions_tmpl =
10749 				flow_hw_create_ctrl_jump_actions_template
10750 					(dev, MLX5_HW_LOWEST_USABLE_GROUP, error);
10751 		if (!hw_ctrl_fdb->jump_one_actions_tmpl) {
10752 			DRV_LOG(ERR, "port %u failed to create jump action template"
10753 				" for control flows", dev->data->port_id);
10754 			goto err;
10755 		}
10756 		hw_ctrl_fdb->hw_esw_zero_tbl = flow_hw_create_ctrl_jump_table
10757 				(dev, hw_ctrl_fdb->port_items_tmpl,
10758 				 hw_ctrl_fdb->jump_one_actions_tmpl, error);
10759 		if (!hw_ctrl_fdb->hw_esw_zero_tbl) {
10760 			DRV_LOG(ERR, "port %u failed to create table for default jump to group 1"
10761 				" for control flows", dev->data->port_id);
10762 			goto err;
10763 		}
10764 	}
10765 	/* Create templates and table for default Tx metadata copy flow rule. */
10766 	if (!repr_matching && xmeta == MLX5_XMETA_MODE_META32_HWS) {
10767 		hw_ctrl_fdb->tx_meta_items_tmpl =
10768 			flow_hw_create_tx_default_mreg_copy_pattern_template(dev, error);
10769 		if (!hw_ctrl_fdb->tx_meta_items_tmpl) {
10770 			DRV_LOG(ERR, "port %u failed to Tx metadata copy pattern"
10771 				" template for control flows", dev->data->port_id);
10772 			goto err;
10773 		}
10774 		hw_ctrl_fdb->tx_meta_actions_tmpl =
10775 			flow_hw_create_tx_default_mreg_copy_actions_template(dev, error);
10776 		if (!hw_ctrl_fdb->tx_meta_actions_tmpl) {
10777 			DRV_LOG(ERR, "port %u failed to Tx metadata copy actions"
10778 				" template for control flows", dev->data->port_id);
10779 			goto err;
10780 		}
10781 		hw_ctrl_fdb->hw_tx_meta_cpy_tbl =
10782 			flow_hw_create_tx_default_mreg_copy_table
10783 				(dev, hw_ctrl_fdb->tx_meta_items_tmpl,
10784 				 hw_ctrl_fdb->tx_meta_actions_tmpl, error);
10785 		if (!hw_ctrl_fdb->hw_tx_meta_cpy_tbl) {
10786 			DRV_LOG(ERR, "port %u failed to create table for default"
10787 				" Tx metadata copy flow rule", dev->data->port_id);
10788 			goto err;
10789 		}
10790 	}
10791 	/* Create LACP default miss table. */
10792 	if (!priv->sh->config.lacp_by_user && priv->pf_bond >= 0 && priv->master) {
10793 		hw_ctrl_fdb->lacp_rx_items_tmpl =
10794 				flow_hw_create_lacp_rx_pattern_template(dev, error);
10795 		if (!hw_ctrl_fdb->lacp_rx_items_tmpl) {
10796 			DRV_LOG(ERR, "port %u failed to create pattern template"
10797 				" for LACP Rx traffic", dev->data->port_id);
10798 			goto err;
10799 		}
10800 		hw_ctrl_fdb->lacp_rx_actions_tmpl =
10801 				flow_hw_create_lacp_rx_actions_template(dev, error);
10802 		if (!hw_ctrl_fdb->lacp_rx_actions_tmpl) {
10803 			DRV_LOG(ERR, "port %u failed to create actions template"
10804 				" for LACP Rx traffic", dev->data->port_id);
10805 			goto err;
10806 		}
10807 		hw_ctrl_fdb->hw_lacp_rx_tbl = flow_hw_create_lacp_rx_table
10808 				(dev, hw_ctrl_fdb->lacp_rx_items_tmpl,
10809 				 hw_ctrl_fdb->lacp_rx_actions_tmpl, error);
10810 		if (!hw_ctrl_fdb->hw_lacp_rx_tbl) {
10811 			DRV_LOG(ERR, "port %u failed to create template table for"
10812 				" for LACP Rx traffic", dev->data->port_id);
10813 			goto err;
10814 		}
10815 	}
10816 	return 0;
10817 
10818 err:
10819 	flow_hw_cleanup_ctrl_fdb_tables(dev);
10820 	return -EINVAL;
10821 }
10822 
10823 static void
10824 flow_hw_ct_mng_destroy(struct rte_eth_dev *dev,
10825 		       struct mlx5_aso_ct_pools_mng *ct_mng)
10826 {
10827 	struct mlx5_priv *priv = dev->data->dev_private;
10828 
10829 	mlx5_aso_ct_queue_uninit(priv->sh, ct_mng);
10830 	mlx5_free(ct_mng);
10831 }
10832 
10833 static void
10834 flow_hw_ct_pool_destroy(struct rte_eth_dev *dev,
10835 			struct mlx5_aso_ct_pool *pool)
10836 {
10837 	struct mlx5_priv *priv = dev->data->dev_private;
10838 
10839 	if (pool->dr_action)
10840 		mlx5dr_action_destroy(pool->dr_action);
10841 	if (!priv->shared_host) {
10842 		if (pool->devx_obj)
10843 			claim_zero(mlx5_devx_cmd_destroy(pool->devx_obj));
10844 		if (pool->cts)
10845 			mlx5_ipool_destroy(pool->cts);
10846 	}
10847 	mlx5_free(pool);
10848 }
10849 
10850 static struct mlx5_aso_ct_pool *
10851 flow_hw_ct_pool_create(struct rte_eth_dev *dev,
10852 		       uint32_t nb_conn_tracks)
10853 {
10854 	struct mlx5_priv *priv = dev->data->dev_private;
10855 	struct mlx5_aso_ct_pool *pool;
10856 	struct mlx5_devx_obj *obj;
10857 	uint32_t nb_cts = rte_align32pow2(nb_conn_tracks);
10858 	uint32_t log_obj_size = rte_log2_u32(nb_cts);
10859 	struct mlx5_indexed_pool_config cfg = {
10860 		.size = sizeof(struct mlx5_aso_ct_action),
10861 		.trunk_size = 1 << 12,
10862 		.per_core_cache = 1 << 13,
10863 		.need_lock = 1,
10864 		.release_mem_en = !!priv->sh->config.reclaim_mode,
10865 		.malloc = mlx5_malloc,
10866 		.free = mlx5_free,
10867 		.type = "mlx5_hw_ct_action",
10868 	};
10869 	int reg_id;
10870 	uint32_t flags = 0;
10871 
10872 	pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
10873 	if (!pool) {
10874 		rte_errno = ENOMEM;
10875 		return NULL;
10876 	}
10877 	if (!priv->shared_host) {
10878 		/*
10879 		 * No need for local cache if CT number is a small number. Since
10880 		 * flow insertion rate will be very limited in that case. Here let's
10881 		 * set the number to less than default trunk size 4K.
10882 		 */
10883 		if (nb_cts <= cfg.trunk_size) {
10884 			cfg.per_core_cache = 0;
10885 			cfg.trunk_size = nb_cts;
10886 		} else if (nb_cts <= MLX5_HW_IPOOL_SIZE_THRESHOLD) {
10887 			cfg.per_core_cache = MLX5_HW_IPOOL_CACHE_MIN;
10888 		}
10889 		cfg.max_idx = nb_cts;
10890 		pool->cts = mlx5_ipool_create(&cfg);
10891 		if (!pool->cts)
10892 			goto err;
10893 		obj = mlx5_devx_cmd_create_conn_track_offload_obj(priv->sh->cdev->ctx,
10894 								  priv->sh->cdev->pdn,
10895 								  log_obj_size);
10896 		if (!obj) {
10897 			rte_errno = ENODATA;
10898 			DRV_LOG(ERR, "Failed to create conn_track_offload_obj using DevX.");
10899 			goto err;
10900 		}
10901 		pool->devx_obj = obj;
10902 	} else {
10903 		struct rte_eth_dev *host_dev = priv->shared_host;
10904 		struct mlx5_priv *host_priv = host_dev->data->dev_private;
10905 
10906 		pool->devx_obj = host_priv->hws_ctpool->devx_obj;
10907 		pool->cts = host_priv->hws_ctpool->cts;
10908 		MLX5_ASSERT(pool->cts);
10909 		MLX5_ASSERT(!nb_conn_tracks);
10910 	}
10911 	reg_id = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, NULL);
10912 	flags |= MLX5DR_ACTION_FLAG_HWS_RX | MLX5DR_ACTION_FLAG_HWS_TX;
10913 	if (priv->sh->config.dv_esw_en && priv->master)
10914 		flags |= MLX5DR_ACTION_FLAG_HWS_FDB;
10915 	pool->dr_action = mlx5dr_action_create_aso_ct(priv->dr_ctx,
10916 						      (struct mlx5dr_devx_obj *)pool->devx_obj,
10917 						      reg_id - REG_C_0, flags);
10918 	if (!pool->dr_action)
10919 		goto err;
10920 	pool->sq = priv->ct_mng->aso_sqs;
10921 	/* Assign the last extra ASO SQ as public SQ. */
10922 	pool->shared_sq = &priv->ct_mng->aso_sqs[priv->nb_queue - 1];
10923 	return pool;
10924 err:
10925 	flow_hw_ct_pool_destroy(dev, pool);
10926 	return NULL;
10927 }
10928 
10929 static int
10930 mlx5_flow_ct_init(struct rte_eth_dev *dev,
10931 		  uint32_t nb_conn_tracks,
10932 		  uint16_t nb_queue)
10933 {
10934 	struct mlx5_priv *priv = dev->data->dev_private;
10935 	uint32_t mem_size;
10936 	int ret = -ENOMEM;
10937 
10938 	if (!priv->shared_host) {
10939 		mem_size = sizeof(struct mlx5_aso_sq) * nb_queue +
10940 				sizeof(*priv->ct_mng);
10941 		priv->ct_mng = mlx5_malloc(MLX5_MEM_ZERO, mem_size,
10942 						RTE_CACHE_LINE_SIZE,
10943 						SOCKET_ID_ANY);
10944 		if (!priv->ct_mng)
10945 			goto err;
10946 		ret = mlx5_aso_ct_queue_init(priv->sh, priv->ct_mng,
10947 						nb_queue);
10948 		if (ret)
10949 			goto err;
10950 	}
10951 	priv->hws_ctpool = flow_hw_ct_pool_create(dev, nb_conn_tracks);
10952 	if (!priv->hws_ctpool)
10953 		goto err;
10954 	priv->sh->ct_aso_en = 1;
10955 	return 0;
10956 
10957 err:
10958 	if (priv->hws_ctpool) {
10959 		flow_hw_ct_pool_destroy(dev, priv->hws_ctpool);
10960 		priv->hws_ctpool = NULL;
10961 	}
10962 	if (priv->ct_mng) {
10963 		flow_hw_ct_mng_destroy(dev, priv->ct_mng);
10964 		priv->ct_mng = NULL;
10965 	}
10966 	return ret;
10967 }
10968 
10969 static void
10970 flow_hw_destroy_vlan(struct rte_eth_dev *dev)
10971 {
10972 	struct mlx5_priv *priv = dev->data->dev_private;
10973 	enum mlx5dr_table_type i;
10974 
10975 	for (i = MLX5DR_TABLE_TYPE_NIC_RX; i < MLX5DR_TABLE_TYPE_MAX; i++) {
10976 		if (priv->hw_pop_vlan[i]) {
10977 			mlx5dr_action_destroy(priv->hw_pop_vlan[i]);
10978 			priv->hw_pop_vlan[i] = NULL;
10979 		}
10980 		if (priv->hw_push_vlan[i]) {
10981 			mlx5dr_action_destroy(priv->hw_push_vlan[i]);
10982 			priv->hw_push_vlan[i] = NULL;
10983 		}
10984 	}
10985 }
10986 
10987 static int
10988 flow_hw_create_vlan(struct rte_eth_dev *dev)
10989 {
10990 	struct mlx5_priv *priv = dev->data->dev_private;
10991 	enum mlx5dr_table_type i;
10992 	const enum mlx5dr_action_flags flags[MLX5DR_TABLE_TYPE_MAX] = {
10993 		MLX5DR_ACTION_FLAG_HWS_RX,
10994 		MLX5DR_ACTION_FLAG_HWS_TX,
10995 		MLX5DR_ACTION_FLAG_HWS_FDB
10996 	};
10997 
10998 	/* rte_errno is set in the mlx5dr_action* functions. */
10999 	for (i = MLX5DR_TABLE_TYPE_NIC_RX; i <= MLX5DR_TABLE_TYPE_NIC_TX; i++) {
11000 		priv->hw_pop_vlan[i] =
11001 			mlx5dr_action_create_pop_vlan(priv->dr_ctx, flags[i]);
11002 		if (!priv->hw_pop_vlan[i])
11003 			return -rte_errno;
11004 		priv->hw_push_vlan[i] =
11005 			mlx5dr_action_create_push_vlan(priv->dr_ctx, flags[i]);
11006 		if (!priv->hw_pop_vlan[i])
11007 			return -rte_errno;
11008 	}
11009 	if (priv->sh->config.dv_esw_en && priv->master) {
11010 		priv->hw_pop_vlan[MLX5DR_TABLE_TYPE_FDB] =
11011 			mlx5dr_action_create_pop_vlan
11012 				(priv->dr_ctx, MLX5DR_ACTION_FLAG_HWS_FDB);
11013 		if (!priv->hw_pop_vlan[MLX5DR_TABLE_TYPE_FDB])
11014 			return -rte_errno;
11015 		priv->hw_push_vlan[MLX5DR_TABLE_TYPE_FDB] =
11016 			mlx5dr_action_create_push_vlan
11017 				(priv->dr_ctx, MLX5DR_ACTION_FLAG_HWS_FDB);
11018 		if (!priv->hw_pop_vlan[MLX5DR_TABLE_TYPE_FDB])
11019 			return -rte_errno;
11020 	}
11021 	return 0;
11022 }
11023 
11024 static void
11025 flow_hw_cleanup_ctrl_rx_tables(struct rte_eth_dev *dev)
11026 {
11027 	struct mlx5_priv *priv = dev->data->dev_private;
11028 	unsigned int i;
11029 	unsigned int j;
11030 
11031 	if (!priv->hw_ctrl_rx)
11032 		return;
11033 	for (i = 0; i < MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_MAX; ++i) {
11034 		for (j = 0; j < MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX; ++j) {
11035 			struct rte_flow_template_table *tbl = priv->hw_ctrl_rx->tables[i][j].tbl;
11036 			struct rte_flow_pattern_template *pt = priv->hw_ctrl_rx->tables[i][j].pt;
11037 
11038 			if (tbl)
11039 				claim_zero(flow_hw_table_destroy(dev, tbl, NULL));
11040 			if (pt)
11041 				claim_zero(flow_hw_pattern_template_destroy(dev, pt, NULL));
11042 		}
11043 	}
11044 	for (i = 0; i < MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX; ++i) {
11045 		struct rte_flow_actions_template *at = priv->hw_ctrl_rx->rss[i];
11046 
11047 		if (at)
11048 			claim_zero(flow_hw_actions_template_destroy(dev, at, NULL));
11049 	}
11050 	mlx5_free(priv->hw_ctrl_rx);
11051 	priv->hw_ctrl_rx = NULL;
11052 }
11053 
11054 static uint64_t
11055 flow_hw_ctrl_rx_rss_type_hash_types(const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
11056 {
11057 	switch (rss_type) {
11058 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_NON_IP:
11059 		return 0;
11060 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4:
11061 		return RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | RTE_ETH_RSS_NONFRAG_IPV4_OTHER;
11062 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_UDP:
11063 		return RTE_ETH_RSS_NONFRAG_IPV4_UDP;
11064 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_TCP:
11065 		return RTE_ETH_RSS_NONFRAG_IPV4_TCP;
11066 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6:
11067 		return RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_OTHER;
11068 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_UDP:
11069 		return RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_IPV6_UDP_EX;
11070 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_TCP:
11071 		return RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_IPV6_TCP_EX;
11072 	default:
11073 		/* Should not reach here. */
11074 		MLX5_ASSERT(false);
11075 		return 0;
11076 	}
11077 }
11078 
11079 static struct rte_flow_actions_template *
11080 flow_hw_create_ctrl_rx_rss_template(struct rte_eth_dev *dev,
11081 				    const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
11082 {
11083 	struct mlx5_priv *priv = dev->data->dev_private;
11084 	struct rte_flow_actions_template_attr attr = {
11085 		.ingress = 1,
11086 	};
11087 	uint16_t queue[RTE_MAX_QUEUES_PER_PORT];
11088 	struct rte_flow_action_rss rss_conf = {
11089 		.func = RTE_ETH_HASH_FUNCTION_DEFAULT,
11090 		.level = 0,
11091 		.types = 0,
11092 		.key_len = priv->rss_conf.rss_key_len,
11093 		.key = priv->rss_conf.rss_key,
11094 		.queue_num = priv->reta_idx_n,
11095 		.queue = queue,
11096 	};
11097 	struct rte_flow_action actions[] = {
11098 		{
11099 			.type = RTE_FLOW_ACTION_TYPE_RSS,
11100 			.conf = &rss_conf,
11101 		},
11102 		{
11103 			.type = RTE_FLOW_ACTION_TYPE_END,
11104 		}
11105 	};
11106 	struct rte_flow_action masks[] = {
11107 		{
11108 			.type = RTE_FLOW_ACTION_TYPE_RSS,
11109 			.conf = &rss_conf,
11110 		},
11111 		{
11112 			.type = RTE_FLOW_ACTION_TYPE_END,
11113 		}
11114 	};
11115 	struct rte_flow_actions_template *at;
11116 	struct rte_flow_error error;
11117 	unsigned int i;
11118 
11119 	MLX5_ASSERT(priv->reta_idx_n > 0 && priv->reta_idx);
11120 	/* Select proper RSS hash types and based on that configure the actions template. */
11121 	rss_conf.types = flow_hw_ctrl_rx_rss_type_hash_types(rss_type);
11122 	if (rss_conf.types) {
11123 		for (i = 0; i < priv->reta_idx_n; ++i)
11124 			queue[i] = (*priv->reta_idx)[i];
11125 	} else {
11126 		rss_conf.queue_num = 1;
11127 		queue[0] = (*priv->reta_idx)[0];
11128 	}
11129 	at = flow_hw_actions_template_create(dev, &attr, actions, masks, &error);
11130 	if (!at)
11131 		DRV_LOG(ERR,
11132 			"Failed to create ctrl flow actions template: rte_errno(%d), type(%d): %s",
11133 			rte_errno, error.type,
11134 			error.message ? error.message : "(no stated reason)");
11135 	return at;
11136 }
11137 
11138 static uint32_t ctrl_rx_rss_priority_map[MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX] = {
11139 	[MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_NON_IP] = MLX5_HW_CTRL_RX_PRIO_L2,
11140 	[MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4] = MLX5_HW_CTRL_RX_PRIO_L3,
11141 	[MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_UDP] = MLX5_HW_CTRL_RX_PRIO_L4,
11142 	[MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_TCP] = MLX5_HW_CTRL_RX_PRIO_L4,
11143 	[MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6] = MLX5_HW_CTRL_RX_PRIO_L3,
11144 	[MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_UDP] = MLX5_HW_CTRL_RX_PRIO_L4,
11145 	[MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_TCP] = MLX5_HW_CTRL_RX_PRIO_L4,
11146 };
11147 
11148 static uint32_t ctrl_rx_nb_flows_map[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_MAX] = {
11149 	[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL] = 1,
11150 	[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL_MCAST] = 1,
11151 	[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST] = 1,
11152 	[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST_VLAN] = MLX5_MAX_VLAN_IDS,
11153 	[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST] = 1,
11154 	[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST_VLAN] = MLX5_MAX_VLAN_IDS,
11155 	[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST] = 1,
11156 	[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST_VLAN] = MLX5_MAX_VLAN_IDS,
11157 	[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC] = MLX5_MAX_UC_MAC_ADDRESSES,
11158 	[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN] =
11159 			MLX5_MAX_UC_MAC_ADDRESSES * MLX5_MAX_VLAN_IDS,
11160 };
11161 
11162 static struct rte_flow_template_table_attr
11163 flow_hw_get_ctrl_rx_table_attr(enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type,
11164 			       const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
11165 {
11166 	return (struct rte_flow_template_table_attr){
11167 		.flow_attr = {
11168 			.group = 0,
11169 			.priority = ctrl_rx_rss_priority_map[rss_type],
11170 			.ingress = 1,
11171 		},
11172 		.nb_flows = ctrl_rx_nb_flows_map[eth_pattern_type],
11173 	};
11174 }
11175 
11176 static struct rte_flow_item
11177 flow_hw_get_ctrl_rx_eth_item(const enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type)
11178 {
11179 	struct rte_flow_item item = {
11180 		.type = RTE_FLOW_ITEM_TYPE_ETH,
11181 		.mask = NULL,
11182 	};
11183 
11184 	switch (eth_pattern_type) {
11185 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL:
11186 		item.mask = &ctrl_rx_eth_promisc_mask;
11187 		break;
11188 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL_MCAST:
11189 		item.mask = &ctrl_rx_eth_mcast_mask;
11190 		break;
11191 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST:
11192 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST_VLAN:
11193 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC:
11194 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN:
11195 		item.mask = &ctrl_rx_eth_dmac_mask;
11196 		break;
11197 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST:
11198 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST_VLAN:
11199 		item.mask = &ctrl_rx_eth_ipv4_mcast_mask;
11200 		break;
11201 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST:
11202 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST_VLAN:
11203 		item.mask = &ctrl_rx_eth_ipv6_mcast_mask;
11204 		break;
11205 	default:
11206 		/* Should not reach here - ETH mask must be present. */
11207 		item.type = RTE_FLOW_ITEM_TYPE_END;
11208 		MLX5_ASSERT(false);
11209 		break;
11210 	}
11211 	return item;
11212 }
11213 
11214 static struct rte_flow_item
11215 flow_hw_get_ctrl_rx_vlan_item(const enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type)
11216 {
11217 	struct rte_flow_item item = {
11218 		.type = RTE_FLOW_ITEM_TYPE_VOID,
11219 		.mask = NULL,
11220 	};
11221 
11222 	switch (eth_pattern_type) {
11223 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST_VLAN:
11224 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST_VLAN:
11225 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST_VLAN:
11226 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN:
11227 		item.type = RTE_FLOW_ITEM_TYPE_VLAN;
11228 		item.mask = &rte_flow_item_vlan_mask;
11229 		break;
11230 	default:
11231 		/* Nothing to update. */
11232 		break;
11233 	}
11234 	return item;
11235 }
11236 
11237 static struct rte_flow_item
11238 flow_hw_get_ctrl_rx_l3_item(const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
11239 {
11240 	struct rte_flow_item item = {
11241 		.type = RTE_FLOW_ITEM_TYPE_VOID,
11242 		.mask = NULL,
11243 	};
11244 
11245 	switch (rss_type) {
11246 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4:
11247 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_UDP:
11248 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_TCP:
11249 		item.type = RTE_FLOW_ITEM_TYPE_IPV4;
11250 		break;
11251 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6:
11252 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_UDP:
11253 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_TCP:
11254 		item.type = RTE_FLOW_ITEM_TYPE_IPV6;
11255 		break;
11256 	default:
11257 		/* Nothing to update. */
11258 		break;
11259 	}
11260 	return item;
11261 }
11262 
11263 static struct rte_flow_item
11264 flow_hw_get_ctrl_rx_l4_item(const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
11265 {
11266 	struct rte_flow_item item = {
11267 		.type = RTE_FLOW_ITEM_TYPE_VOID,
11268 		.mask = NULL,
11269 	};
11270 
11271 	switch (rss_type) {
11272 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_UDP:
11273 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_UDP:
11274 		item.type = RTE_FLOW_ITEM_TYPE_UDP;
11275 		break;
11276 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_TCP:
11277 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_TCP:
11278 		item.type = RTE_FLOW_ITEM_TYPE_TCP;
11279 		break;
11280 	default:
11281 		/* Nothing to update. */
11282 		break;
11283 	}
11284 	return item;
11285 }
11286 
11287 static struct rte_flow_pattern_template *
11288 flow_hw_create_ctrl_rx_pattern_template
11289 		(struct rte_eth_dev *dev,
11290 		 const enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type,
11291 		 const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
11292 {
11293 	const struct rte_flow_pattern_template_attr attr = {
11294 		.relaxed_matching = 0,
11295 		.ingress = 1,
11296 	};
11297 	struct rte_flow_item items[] = {
11298 		/* Matching patterns */
11299 		flow_hw_get_ctrl_rx_eth_item(eth_pattern_type),
11300 		flow_hw_get_ctrl_rx_vlan_item(eth_pattern_type),
11301 		flow_hw_get_ctrl_rx_l3_item(rss_type),
11302 		flow_hw_get_ctrl_rx_l4_item(rss_type),
11303 		/* Terminate pattern */
11304 		{ .type = RTE_FLOW_ITEM_TYPE_END }
11305 	};
11306 
11307 	return flow_hw_pattern_template_create(dev, &attr, items, NULL);
11308 }
11309 
11310 static int
11311 flow_hw_create_ctrl_rx_tables(struct rte_eth_dev *dev)
11312 {
11313 	struct mlx5_priv *priv = dev->data->dev_private;
11314 	unsigned int i;
11315 	unsigned int j;
11316 	int ret;
11317 
11318 	MLX5_ASSERT(!priv->hw_ctrl_rx);
11319 	priv->hw_ctrl_rx = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*priv->hw_ctrl_rx),
11320 				       RTE_CACHE_LINE_SIZE, rte_socket_id());
11321 	if (!priv->hw_ctrl_rx) {
11322 		DRV_LOG(ERR, "Failed to allocate memory for Rx control flow tables");
11323 		rte_errno = ENOMEM;
11324 		return -rte_errno;
11325 	}
11326 	/* Create all pattern template variants. */
11327 	for (i = 0; i < MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_MAX; ++i) {
11328 		enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type = i;
11329 
11330 		for (j = 0; j < MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX; ++j) {
11331 			const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type = j;
11332 			struct rte_flow_template_table_attr attr;
11333 			struct rte_flow_pattern_template *pt;
11334 
11335 			attr = flow_hw_get_ctrl_rx_table_attr(eth_pattern_type, rss_type);
11336 			pt = flow_hw_create_ctrl_rx_pattern_template(dev, eth_pattern_type,
11337 								     rss_type);
11338 			if (!pt)
11339 				goto err;
11340 			priv->hw_ctrl_rx->tables[i][j].attr = attr;
11341 			priv->hw_ctrl_rx->tables[i][j].pt = pt;
11342 		}
11343 	}
11344 	return 0;
11345 err:
11346 	ret = rte_errno;
11347 	flow_hw_cleanup_ctrl_rx_tables(dev);
11348 	rte_errno = ret;
11349 	return -ret;
11350 }
11351 
11352 void
11353 mlx5_flow_hw_cleanup_ctrl_rx_templates(struct rte_eth_dev *dev)
11354 {
11355 	struct mlx5_priv *priv = dev->data->dev_private;
11356 	struct mlx5_flow_hw_ctrl_rx *hw_ctrl_rx;
11357 	unsigned int i;
11358 	unsigned int j;
11359 
11360 	if (!priv->dr_ctx)
11361 		return;
11362 	if (!priv->hw_ctrl_rx)
11363 		return;
11364 	hw_ctrl_rx = priv->hw_ctrl_rx;
11365 	for (i = 0; i < MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_MAX; ++i) {
11366 		for (j = 0; j < MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX; ++j) {
11367 			struct mlx5_flow_hw_ctrl_rx_table *tmpls = &hw_ctrl_rx->tables[i][j];
11368 
11369 			if (tmpls->tbl) {
11370 				claim_zero(flow_hw_table_destroy(dev, tmpls->tbl, NULL));
11371 				tmpls->tbl = NULL;
11372 			}
11373 		}
11374 	}
11375 	for (j = 0; j < MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX; ++j) {
11376 		if (hw_ctrl_rx->rss[j]) {
11377 			claim_zero(flow_hw_actions_template_destroy(dev, hw_ctrl_rx->rss[j], NULL));
11378 			hw_ctrl_rx->rss[j] = NULL;
11379 		}
11380 	}
11381 }
11382 
11383 /**
11384  * Copy the provided HWS configuration to a newly allocated buffer.
11385  *
11386  * @param[in] port_attr
11387  *   Port configuration attributes.
11388  * @param[in] nb_queue
11389  *   Number of queue.
11390  * @param[in] queue_attr
11391  *   Array that holds attributes for each flow queue.
11392  * @param[in] nt_mode
11393  *   Non template mode.
11394  *
11395  * @return
11396  *   Pointer to copied HWS configuration is returned on success.
11397  *   Otherwise, NULL is returned and rte_errno is set.
11398  */
11399 static struct mlx5_flow_hw_attr *
11400 flow_hw_alloc_copy_config(const struct rte_flow_port_attr *port_attr,
11401 			  const uint16_t nb_queue,
11402 			  const struct rte_flow_queue_attr *queue_attr[],
11403 			  bool nt_mode,
11404 			  struct rte_flow_error *error)
11405 {
11406 	struct mlx5_flow_hw_attr *hw_attr;
11407 	size_t hw_attr_size;
11408 	unsigned int i;
11409 
11410 	hw_attr_size = sizeof(*hw_attr) + nb_queue * sizeof(*hw_attr->queue_attr);
11411 	hw_attr = mlx5_malloc(MLX5_MEM_ZERO, hw_attr_size, 0, SOCKET_ID_ANY);
11412 	if (!hw_attr) {
11413 		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11414 				   "Not enough memory to store configuration");
11415 		return NULL;
11416 	}
11417 	memcpy(&hw_attr->port_attr, port_attr, sizeof(*port_attr));
11418 	hw_attr->nb_queue = nb_queue;
11419 	/* Queue attributes are placed after the mlx5_flow_hw_attr. */
11420 	hw_attr->queue_attr = (struct rte_flow_queue_attr *)(hw_attr + 1);
11421 	for (i = 0; i < nb_queue; ++i)
11422 		memcpy(&hw_attr->queue_attr[i], queue_attr[i], sizeof(hw_attr->queue_attr[i]));
11423 	hw_attr->nt_mode = nt_mode;
11424 	return hw_attr;
11425 }
11426 
11427 /**
11428  * Compares the preserved HWS configuration with the provided one.
11429  *
11430  * @param[in] hw_attr
11431  *   Pointer to preserved HWS configuration.
11432  * @param[in] new_pa
11433  *   Port configuration attributes to compare.
11434  * @param[in] new_nbq
11435  *   Number of queues to compare.
11436  * @param[in] new_qa
11437  *   Array that holds attributes for each flow queue.
11438  *
11439  * @return
11440  *   True if configurations are the same, false otherwise.
11441  */
11442 static bool
11443 flow_hw_compare_config(const struct mlx5_flow_hw_attr *hw_attr,
11444 		       const struct rte_flow_port_attr *new_pa,
11445 		       const uint16_t new_nbq,
11446 		       const struct rte_flow_queue_attr *new_qa[])
11447 {
11448 	const struct rte_flow_port_attr *old_pa = &hw_attr->port_attr;
11449 	const uint16_t old_nbq = hw_attr->nb_queue;
11450 	const struct rte_flow_queue_attr *old_qa = hw_attr->queue_attr;
11451 	unsigned int i;
11452 
11453 	if (old_pa->nb_counters != new_pa->nb_counters ||
11454 	    old_pa->nb_aging_objects != new_pa->nb_aging_objects ||
11455 	    old_pa->nb_meters != new_pa->nb_meters ||
11456 	    old_pa->nb_conn_tracks != new_pa->nb_conn_tracks ||
11457 	    old_pa->flags != new_pa->flags)
11458 		return false;
11459 	if (old_nbq != new_nbq)
11460 		return false;
11461 	for (i = 0; i < old_nbq; ++i)
11462 		if (old_qa[i].size != new_qa[i]->size)
11463 			return false;
11464 	return true;
11465 }
11466 
11467 /*
11468  * No need to explicitly release drop action templates on port stop.
11469  * Drop action templates release with other action templates during
11470  * mlx5_dev_close -> flow_hw_resource_release -> flow_hw_actions_template_destroy
11471  */
11472 static void
11473 flow_hw_action_template_drop_release(struct rte_eth_dev *dev)
11474 {
11475 	int i;
11476 	struct mlx5_priv *priv = dev->data->dev_private;
11477 
11478 	for (i = 0; i < MLX5DR_TABLE_TYPE_MAX; i++) {
11479 		if (!priv->action_template_drop[i])
11480 			continue;
11481 		flow_hw_actions_template_destroy(dev,
11482 						 priv->action_template_drop[i],
11483 						 NULL);
11484 		priv->action_template_drop[i] = NULL;
11485 	}
11486 }
11487 
11488 static int
11489 flow_hw_action_template_drop_init(struct rte_eth_dev *dev,
11490 			  struct rte_flow_error *error)
11491 {
11492 	const struct rte_flow_action drop[2] = {
11493 		[0] = { .type = RTE_FLOW_ACTION_TYPE_DROP },
11494 		[1] = { .type = RTE_FLOW_ACTION_TYPE_END },
11495 	};
11496 	const struct rte_flow_action *actions = drop;
11497 	const struct rte_flow_action *masks = drop;
11498 	const struct rte_flow_actions_template_attr attr[MLX5DR_TABLE_TYPE_MAX] = {
11499 		[MLX5DR_TABLE_TYPE_NIC_RX] = { .ingress = 1 },
11500 		[MLX5DR_TABLE_TYPE_NIC_TX] = { .egress = 1 },
11501 		[MLX5DR_TABLE_TYPE_FDB] = { .transfer = 1 }
11502 	};
11503 	struct mlx5_priv *priv = dev->data->dev_private;
11504 
11505 	priv->action_template_drop[MLX5DR_TABLE_TYPE_NIC_RX] =
11506 		flow_hw_actions_template_create(dev,
11507 						&attr[MLX5DR_TABLE_TYPE_NIC_RX],
11508 						actions, masks, error);
11509 	if (!priv->action_template_drop[MLX5DR_TABLE_TYPE_NIC_RX])
11510 		return -1;
11511 	priv->action_template_drop[MLX5DR_TABLE_TYPE_NIC_TX] =
11512 		flow_hw_actions_template_create(dev,
11513 						&attr[MLX5DR_TABLE_TYPE_NIC_TX],
11514 						actions, masks, error);
11515 	if (!priv->action_template_drop[MLX5DR_TABLE_TYPE_NIC_TX])
11516 		return -1;
11517 	if (priv->sh->config.dv_esw_en && priv->master) {
11518 		priv->action_template_drop[MLX5DR_TABLE_TYPE_FDB] =
11519 			flow_hw_actions_template_create(dev,
11520 							&attr[MLX5DR_TABLE_TYPE_FDB],
11521 							actions, masks, error);
11522 		if (!priv->action_template_drop[MLX5DR_TABLE_TYPE_FDB])
11523 			return -1;
11524 	}
11525 	return 0;
11526 }
11527 
11528 static void
11529 __flow_hw_resource_release(struct rte_eth_dev *dev, bool ctx_close)
11530 {
11531 	struct mlx5_priv *priv = dev->data->dev_private;
11532 	struct rte_flow_template_table *tbl, *temp_tbl;
11533 	struct rte_flow_pattern_template *it, *temp_it;
11534 	struct rte_flow_actions_template *at, *temp_at;
11535 	struct mlx5_flow_group *grp, *temp_grp;
11536 	uint32_t i;
11537 
11538 	flow_hw_rxq_flag_set(dev, false);
11539 	flow_hw_flush_all_ctrl_flows(dev);
11540 	flow_hw_cleanup_ctrl_fdb_tables(dev);
11541 	flow_hw_cleanup_tx_repr_tagging(dev);
11542 	flow_hw_cleanup_ctrl_rx_tables(dev);
11543 	flow_hw_action_template_drop_release(dev);
11544 	grp = LIST_FIRST(&priv->flow_hw_grp);
11545 	while (grp) {
11546 		temp_grp = LIST_NEXT(grp, next);
11547 		claim_zero(flow_hw_group_unset_miss_group(dev, grp, NULL));
11548 		grp = temp_grp;
11549 	}
11550 	tbl = LIST_FIRST(&priv->flow_hw_tbl_ongo);
11551 	while (tbl) {
11552 		temp_tbl = LIST_NEXT(tbl, next);
11553 		claim_zero(flow_hw_table_destroy(dev, tbl, NULL));
11554 		tbl = temp_tbl;
11555 	}
11556 	tbl = LIST_FIRST(&priv->flow_hw_tbl);
11557 	while (tbl) {
11558 		temp_tbl = LIST_NEXT(tbl, next);
11559 		claim_zero(flow_hw_table_destroy(dev, tbl, NULL));
11560 		tbl = temp_tbl;
11561 	}
11562 	it = LIST_FIRST(&priv->flow_hw_itt);
11563 	while (it) {
11564 		temp_it = LIST_NEXT(it, next);
11565 		claim_zero(flow_hw_pattern_template_destroy(dev, it, NULL));
11566 		it = temp_it;
11567 	}
11568 	at = LIST_FIRST(&priv->flow_hw_at);
11569 	while (at) {
11570 		temp_at = LIST_NEXT(at, next);
11571 		claim_zero(flow_hw_actions_template_destroy(dev, at, NULL));
11572 		at = temp_at;
11573 	}
11574 	for (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) {
11575 		if (priv->hw_drop[i])
11576 			mlx5dr_action_destroy(priv->hw_drop[i]);
11577 		if (priv->hw_tag[i])
11578 			mlx5dr_action_destroy(priv->hw_tag[i]);
11579 	}
11580 	if (priv->hw_def_miss)
11581 		mlx5dr_action_destroy(priv->hw_def_miss);
11582 	flow_hw_destroy_nat64_actions(priv);
11583 	flow_hw_destroy_vlan(dev);
11584 	flow_hw_destroy_send_to_kernel_action(priv);
11585 	flow_hw_free_vport_actions(priv);
11586 	if (priv->acts_ipool) {
11587 		mlx5_ipool_destroy(priv->acts_ipool);
11588 		priv->acts_ipool = NULL;
11589 	}
11590 	if (priv->hws_age_req)
11591 		mlx5_hws_age_pool_destroy(priv);
11592 	if (!priv->shared_host && priv->hws_cpool) {
11593 		mlx5_hws_cnt_pool_destroy(priv->sh, priv->hws_cpool);
11594 		priv->hws_cpool = NULL;
11595 	}
11596 	if (priv->hws_ctpool) {
11597 		flow_hw_ct_pool_destroy(dev, priv->hws_ctpool);
11598 		priv->hws_ctpool = NULL;
11599 	}
11600 	if (priv->ct_mng) {
11601 		flow_hw_ct_mng_destroy(dev, priv->ct_mng);
11602 		priv->ct_mng = NULL;
11603 	}
11604 	mlx5_flow_quota_destroy(dev);
11605 	if (priv->hw_q) {
11606 		for (i = 0; i < priv->nb_queue; i++) {
11607 			struct mlx5_hw_q *hwq = &priv->hw_q[i];
11608 			rte_ring_free(hwq->indir_iq);
11609 			rte_ring_free(hwq->indir_cq);
11610 			rte_ring_free(hwq->flow_transfer_pending);
11611 			rte_ring_free(hwq->flow_transfer_completed);
11612 		}
11613 		mlx5_free(priv->hw_q);
11614 		priv->hw_q = NULL;
11615 	}
11616 	if (ctx_close) {
11617 		if (priv->dr_ctx) {
11618 			claim_zero(mlx5dr_context_close(priv->dr_ctx));
11619 			priv->dr_ctx = NULL;
11620 		}
11621 	}
11622 	if (priv->shared_host) {
11623 		struct mlx5_priv *host_priv = priv->shared_host->data->dev_private;
11624 		rte_atomic_fetch_sub_explicit(&host_priv->shared_refcnt, 1,
11625 				rte_memory_order_relaxed);
11626 		priv->shared_host = NULL;
11627 	}
11628 	if (priv->hw_attr) {
11629 		mlx5_free(priv->hw_attr);
11630 		priv->hw_attr = NULL;
11631 	}
11632 	priv->nb_queue = 0;
11633 }
11634 
11635 static __rte_always_inline struct rte_ring *
11636 mlx5_hwq_ring_create(uint16_t port_id, uint32_t queue, uint32_t size, const char *str)
11637 {
11638 	char mz_name[RTE_MEMZONE_NAMESIZE];
11639 
11640 	snprintf(mz_name, sizeof(mz_name), "port_%u_%s_%u", port_id, str, queue);
11641 	return rte_ring_create(mz_name, size, SOCKET_ID_ANY,
11642 			       RING_F_SP_ENQ | RING_F_SC_DEQ | RING_F_EXACT_SZ);
11643 }
11644 
11645 static int
11646 flow_hw_validate_attributes(const struct rte_flow_port_attr *port_attr,
11647 			    uint16_t nb_queue,
11648 			    const struct rte_flow_queue_attr *queue_attr[],
11649 			    bool nt_mode, struct rte_flow_error *error)
11650 {
11651 	uint32_t size;
11652 	unsigned int i;
11653 
11654 	if (port_attr == NULL)
11655 		return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11656 					  "Port attributes must be non-NULL");
11657 
11658 	if (nb_queue == 0 && !nt_mode)
11659 		return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11660 					  "At least one flow queue is required");
11661 
11662 	if (queue_attr == NULL)
11663 		return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11664 					  "Queue attributes must be non-NULL");
11665 
11666 	size = queue_attr[0]->size;
11667 	for (i = 1; i < nb_queue; ++i) {
11668 		if (queue_attr[i]->size != size)
11669 			return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11670 						  NULL,
11671 						  "All flow queues must have the same size");
11672 	}
11673 
11674 	return 0;
11675 }
11676 
11677 /**
11678  * Configure port HWS resources.
11679  *
11680  * @param[in] dev
11681  *   Pointer to the rte_eth_dev structure.
11682  * @param[in] port_attr
11683  *   Port configuration attributes.
11684  * @param[in] nb_queue
11685  *   Number of queue.
11686  * @param[in] queue_attr
11687  *   Array that holds attributes for each flow queue.
11688  * @param[in] nt_mode
11689  *   Non-template mode.
11690  * @param[out] error
11691  *   Pointer to error structure.
11692  *
11693  * @return
11694  *   0 on success, a negative errno value otherwise and rte_errno is set.
11695  */
11696 static int
11697 __flow_hw_configure(struct rte_eth_dev *dev,
11698 		  const struct rte_flow_port_attr *port_attr,
11699 		  uint16_t nb_queue,
11700 		  const struct rte_flow_queue_attr *queue_attr[],
11701 		  bool nt_mode,
11702 		  struct rte_flow_error *error)
11703 {
11704 	struct mlx5_priv *priv = dev->data->dev_private;
11705 	struct mlx5_priv *host_priv = NULL;
11706 	struct mlx5dr_context_attr dr_ctx_attr = {0};
11707 	struct mlx5_hw_q *hw_q;
11708 	struct mlx5_hw_q_job *job = NULL;
11709 	uint32_t mem_size, i, j;
11710 	struct mlx5_indexed_pool_config cfg = {
11711 		.size = sizeof(struct mlx5_action_construct_data),
11712 		.trunk_size = 4096,
11713 		.need_lock = 1,
11714 		.release_mem_en = !!priv->sh->config.reclaim_mode,
11715 		.malloc = mlx5_malloc,
11716 		.free = mlx5_free,
11717 		.type = "mlx5_hw_action_construct_data",
11718 	};
11719 	/*
11720 	 * Adds one queue to be used by PMD.
11721 	 * The last queue will be used by the PMD.
11722 	 */
11723 	uint16_t nb_q_updated = 0;
11724 	struct rte_flow_queue_attr **_queue_attr = NULL;
11725 	struct rte_flow_queue_attr ctrl_queue_attr = {0};
11726 	bool is_proxy = !!(priv->sh->config.dv_esw_en && priv->master);
11727 	int ret = 0;
11728 	uint32_t action_flags;
11729 	bool strict_queue = false;
11730 
11731 	error->type = RTE_FLOW_ERROR_TYPE_NONE;
11732 	if (mlx5dr_rule_get_handle_size() != MLX5_DR_RULE_SIZE) {
11733 		rte_errno = EINVAL;
11734 		goto err;
11735 	}
11736 	if (flow_hw_validate_attributes(port_attr, nb_queue, queue_attr, nt_mode, error))
11737 		return -rte_errno;
11738 	/*
11739 	 * Calling rte_flow_configure() again is allowed if
11740 	 * provided configuration matches the initially provided one,
11741 	 * or previous configuration was default non template one.
11742 	 */
11743 	if (priv->dr_ctx) {
11744 		MLX5_ASSERT(priv->hw_attr != NULL);
11745 		for (i = 0; i < priv->nb_queue; i++) {
11746 			hw_q = &priv->hw_q[i];
11747 			/* Make sure all queues are empty. */
11748 			if (hw_q->size != hw_q->job_idx) {
11749 				rte_errno = EBUSY;
11750 				goto err;
11751 			}
11752 		}
11753 		/* If previous configuration was not default non template mode config. */
11754 		if (!priv->hw_attr->nt_mode) {
11755 			if (flow_hw_compare_config(priv->hw_attr, port_attr, nb_queue, queue_attr))
11756 				return 0;
11757 			else
11758 				return rte_flow_error_set(error, ENOTSUP,
11759 							RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11760 							"Changing HWS configuration attributes "
11761 							"is not supported");
11762 		}
11763 		/* Reconfiguration, need to release all resources from previous allocation. */
11764 		__flow_hw_resource_release(dev, true);
11765 	}
11766 	priv->hw_attr = flow_hw_alloc_copy_config(port_attr, nb_queue, queue_attr, nt_mode, error);
11767 	if (!priv->hw_attr) {
11768 		ret = -rte_errno;
11769 		goto err;
11770 	}
11771 	ctrl_queue_attr.size = queue_attr[0]->size;
11772 	nb_q_updated = nb_queue + 1;
11773 	_queue_attr = mlx5_malloc(MLX5_MEM_ZERO,
11774 				  nb_q_updated *
11775 				  sizeof(struct rte_flow_queue_attr *),
11776 				  64, SOCKET_ID_ANY);
11777 	if (!_queue_attr) {
11778 		rte_errno = ENOMEM;
11779 		goto err;
11780 	}
11781 
11782 	memcpy(_queue_attr, queue_attr, sizeof(void *) * nb_queue);
11783 	_queue_attr[nb_queue] = &ctrl_queue_attr;
11784 	priv->acts_ipool = mlx5_ipool_create(&cfg);
11785 	if (!priv->acts_ipool)
11786 		goto err;
11787 	/* Allocate the queue job descriptor LIFO. */
11788 	mem_size = sizeof(priv->hw_q[0]) * nb_q_updated;
11789 	for (i = 0; i < nb_q_updated; i++) {
11790 		mem_size += (sizeof(struct mlx5_hw_q_job *) +
11791 			     sizeof(struct mlx5_hw_q_job)) * _queue_attr[i]->size;
11792 	}
11793 	priv->hw_q = mlx5_malloc(MLX5_MEM_ZERO, mem_size,
11794 				 64, SOCKET_ID_ANY);
11795 	if (!priv->hw_q) {
11796 		rte_errno = ENOMEM;
11797 		goto err;
11798 	}
11799 	for (i = 0; i < nb_q_updated; i++) {
11800 		priv->hw_q[i].job_idx = _queue_attr[i]->size;
11801 		priv->hw_q[i].size = _queue_attr[i]->size;
11802 		priv->hw_q[i].ongoing_flow_ops = 0;
11803 		if (i == 0)
11804 			priv->hw_q[i].job = (struct mlx5_hw_q_job **)
11805 					    &priv->hw_q[nb_q_updated];
11806 		else
11807 			priv->hw_q[i].job = (struct mlx5_hw_q_job **)&job[_queue_attr[i - 1]->size];
11808 		job = (struct mlx5_hw_q_job *)
11809 		      &priv->hw_q[i].job[_queue_attr[i]->size];
11810 		for (j = 0; j < _queue_attr[i]->size; j++)
11811 			priv->hw_q[i].job[j] = &job[j];
11812 		/* Notice ring name length is limited. */
11813 		priv->hw_q[i].indir_cq = mlx5_hwq_ring_create
11814 			(dev->data->port_id, i, _queue_attr[i]->size, "indir_act_cq");
11815 		if (!priv->hw_q[i].indir_cq)
11816 			goto err;
11817 		priv->hw_q[i].indir_iq = mlx5_hwq_ring_create
11818 			(dev->data->port_id, i, _queue_attr[i]->size, "indir_act_iq");
11819 		if (!priv->hw_q[i].indir_iq)
11820 			goto err;
11821 		priv->hw_q[i].flow_transfer_pending = mlx5_hwq_ring_create
11822 			(dev->data->port_id, i, _queue_attr[i]->size, "tx_pending");
11823 		if (!priv->hw_q[i].flow_transfer_pending)
11824 			goto err;
11825 		priv->hw_q[i].flow_transfer_completed = mlx5_hwq_ring_create
11826 			(dev->data->port_id, i, _queue_attr[i]->size, "tx_done");
11827 		if (!priv->hw_q[i].flow_transfer_completed)
11828 			goto err;
11829 	}
11830 	dr_ctx_attr.pd = priv->sh->cdev->pd;
11831 	dr_ctx_attr.queues = nb_q_updated;
11832 	/* Assign initial value of STC numbers for representors. */
11833 	if (priv->representor)
11834 		dr_ctx_attr.initial_log_stc_memory = MLX5_REPR_STC_MEMORY_LOG;
11835 	/* Queue size should all be the same. Take the first one. */
11836 	dr_ctx_attr.queue_size = _queue_attr[0]->size;
11837 	if (port_attr->flags & RTE_FLOW_PORT_FLAG_SHARE_INDIRECT) {
11838 		struct rte_eth_dev *host_dev = NULL;
11839 		uint16_t port_id;
11840 
11841 		MLX5_ASSERT(rte_eth_dev_is_valid_port(port_attr->host_port_id));
11842 		if (is_proxy) {
11843 			DRV_LOG(ERR, "cross vHCA shared mode not supported "
11844 				"for E-Switch confgiurations");
11845 			rte_errno = ENOTSUP;
11846 			goto err;
11847 		}
11848 		MLX5_ETH_FOREACH_DEV(port_id, dev->device) {
11849 			if (port_id == port_attr->host_port_id) {
11850 				host_dev = &rte_eth_devices[port_id];
11851 				break;
11852 			}
11853 		}
11854 		if (!host_dev || host_dev == dev ||
11855 		    !host_dev->data || !host_dev->data->dev_private) {
11856 			DRV_LOG(ERR, "Invalid cross vHCA host port %u",
11857 				port_attr->host_port_id);
11858 			rte_errno = EINVAL;
11859 			goto err;
11860 		}
11861 		host_priv = host_dev->data->dev_private;
11862 		if (host_priv->sh->cdev->ctx == priv->sh->cdev->ctx) {
11863 			DRV_LOG(ERR, "Sibling ports %u and %u do not "
11864 				     "require cross vHCA sharing mode",
11865 				dev->data->port_id, port_attr->host_port_id);
11866 			rte_errno = EINVAL;
11867 			goto err;
11868 		}
11869 		if (host_priv->shared_host) {
11870 			DRV_LOG(ERR, "Host port %u is not the sharing base",
11871 				port_attr->host_port_id);
11872 			rte_errno = EINVAL;
11873 			goto err;
11874 		}
11875 		if (port_attr->nb_counters ||
11876 		    port_attr->nb_aging_objects ||
11877 		    port_attr->nb_meters ||
11878 		    port_attr->nb_conn_tracks) {
11879 			DRV_LOG(ERR,
11880 				"Object numbers on guest port must be zeros");
11881 			rte_errno = EINVAL;
11882 			goto err;
11883 		}
11884 		dr_ctx_attr.shared_ibv_ctx = host_priv->sh->cdev->ctx;
11885 		priv->shared_host = host_dev;
11886 		rte_atomic_fetch_add_explicit(&host_priv->shared_refcnt, 1,
11887 				rte_memory_order_relaxed);
11888 	}
11889 	/* Set backward compatibale mode to support non template RTE FLOW API.*/
11890 	dr_ctx_attr.bwc = true;
11891 	priv->dr_ctx = mlx5dr_context_open(priv->sh->cdev->ctx, &dr_ctx_attr);
11892 	/* rte_errno has been updated by HWS layer. */
11893 	if (!priv->dr_ctx)
11894 		goto err;
11895 	priv->nb_queue = nb_q_updated;
11896 	ret = flow_hw_action_template_drop_init(dev, error);
11897 	if (ret)
11898 		goto err;
11899 	ret = flow_hw_create_ctrl_rx_tables(dev);
11900 	if (ret) {
11901 		rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11902 				   "Failed to set up Rx control flow templates");
11903 		goto err;
11904 	}
11905 	/* Initialize quotas */
11906 	if (port_attr->nb_quotas || (host_priv && host_priv->quota_ctx.devx_obj)) {
11907 		ret = mlx5_flow_quota_init(dev, port_attr->nb_quotas);
11908 		if (ret) {
11909 			rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11910 					   "Failed to initialize quota.");
11911 			goto err;
11912 		}
11913 	}
11914 	/* Initialize meter library*/
11915 	if (port_attr->nb_meters || (host_priv && host_priv->hws_mpool))
11916 		if (mlx5_flow_meter_init(dev, port_attr->nb_meters, 0, 0, nb_q_updated))
11917 			goto err;
11918 	/* Add global actions. */
11919 	for (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) {
11920 		uint32_t act_flags = 0;
11921 
11922 		act_flags = mlx5_hw_act_flag[i][0] | mlx5_hw_act_flag[i][1];
11923 		if (is_proxy)
11924 			act_flags |= mlx5_hw_act_flag[i][2];
11925 		priv->hw_drop[i] = mlx5dr_action_create_dest_drop(priv->dr_ctx, act_flags);
11926 		if (!priv->hw_drop[i])
11927 			goto err;
11928 		priv->hw_tag[i] = mlx5dr_action_create_tag
11929 			(priv->dr_ctx, mlx5_hw_act_flag[i][0]);
11930 		if (!priv->hw_tag[i])
11931 			goto err;
11932 	}
11933 	if (priv->sh->config.dv_esw_en && priv->sh->config.repr_matching) {
11934 		ret = flow_hw_setup_tx_repr_tagging(dev, error);
11935 		if (ret)
11936 			goto err;
11937 	}
11938 	/*
11939 	 * DEFAULT_MISS action have different behaviors in different domains.
11940 	 * In FDB, it will steering the packets to the E-switch manager.
11941 	 * In NIC Rx root, it will steering the packet to the kernel driver stack.
11942 	 * An action with all bits set in the flag can be created and the HWS
11943 	 * layer will translate it properly when being used in different rules.
11944 	 */
11945 	action_flags = MLX5DR_ACTION_FLAG_ROOT_RX | MLX5DR_ACTION_FLAG_HWS_RX |
11946 		       MLX5DR_ACTION_FLAG_ROOT_TX | MLX5DR_ACTION_FLAG_HWS_TX;
11947 	if (is_proxy)
11948 		action_flags |= (MLX5DR_ACTION_FLAG_ROOT_FDB | MLX5DR_ACTION_FLAG_HWS_FDB);
11949 	priv->hw_def_miss = mlx5dr_action_create_default_miss(priv->dr_ctx, action_flags);
11950 	if (!priv->hw_def_miss)
11951 		goto err;
11952 	if (is_proxy) {
11953 		ret = flow_hw_create_vport_actions(priv);
11954 		if (ret) {
11955 			rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11956 					   NULL, "Failed to create vport actions.");
11957 			goto err;
11958 		}
11959 		ret = flow_hw_create_ctrl_tables(dev, error);
11960 		if (ret) {
11961 			rte_errno = -ret;
11962 			goto err;
11963 		}
11964 	}
11965 	if (!priv->shared_host)
11966 		flow_hw_create_send_to_kernel_actions(priv);
11967 	if (port_attr->nb_conn_tracks || (host_priv && host_priv->hws_ctpool)) {
11968 		if (mlx5_flow_ct_init(dev, port_attr->nb_conn_tracks, nb_q_updated))
11969 			goto err;
11970 	}
11971 	if (port_attr->nb_counters || (host_priv && host_priv->hws_cpool)) {
11972 		struct mlx5_hws_cnt_pool *hws_cpool = host_priv ? host_priv->hws_cpool : NULL;
11973 
11974 		ret = mlx5_hws_cnt_pool_create(dev, port_attr->nb_counters,
11975 					       nb_queue, hws_cpool, error);
11976 		if (ret)
11977 			goto err;
11978 	}
11979 	if (port_attr->nb_aging_objects) {
11980 		if (port_attr->nb_counters == 0) {
11981 			/*
11982 			 * Aging management uses counter. Number counters
11983 			 * requesting should take into account a counter for
11984 			 * each flow rules containing AGE without counter.
11985 			 */
11986 			DRV_LOG(ERR, "Port %u AGE objects are requested (%u) "
11987 				"without counters requesting.",
11988 				dev->data->port_id,
11989 				port_attr->nb_aging_objects);
11990 			rte_errno = EINVAL;
11991 			goto err;
11992 		}
11993 		if (port_attr->flags & RTE_FLOW_PORT_FLAG_SHARE_INDIRECT) {
11994 			DRV_LOG(ERR, "Aging is not supported "
11995 				"in cross vHCA sharing mode");
11996 			ret = -ENOTSUP;
11997 			goto err;
11998 		}
11999 		strict_queue = !!(port_attr->flags & RTE_FLOW_PORT_FLAG_STRICT_QUEUE);
12000 		ret = mlx5_hws_age_pool_init(dev, port_attr->nb_aging_objects,
12001 						nb_queue, strict_queue);
12002 		if (ret < 0)
12003 			goto err;
12004 	}
12005 	ret = flow_hw_create_vlan(dev);
12006 	if (ret) {
12007 		rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12008 				   NULL, "Failed to VLAN actions.");
12009 		goto err;
12010 	}
12011 	if (flow_hw_create_nat64_actions(priv, error))
12012 		DRV_LOG(WARNING, "Cannot create NAT64 action on port %u, "
12013 			"please check the FW version", dev->data->port_id);
12014 	if (_queue_attr)
12015 		mlx5_free(_queue_attr);
12016 	if (port_attr->flags & RTE_FLOW_PORT_FLAG_STRICT_QUEUE)
12017 		priv->hws_strict_queue = 1;
12018 	dev->flow_fp_ops = &mlx5_flow_hw_fp_ops;
12019 	return 0;
12020 err:
12021 	__flow_hw_resource_release(dev, true);
12022 	if (_queue_attr)
12023 		mlx5_free(_queue_attr);
12024 	/* Do not overwrite the internal errno information. */
12025 	if (ret && error->type != RTE_FLOW_ERROR_TYPE_NONE)
12026 		return ret;
12027 	return rte_flow_error_set(error, rte_errno,
12028 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
12029 				  "fail to configure port");
12030 }
12031 
12032 /**
12033  * Configure port HWS resources.
12034  *
12035  * @param[in] dev
12036  *   Pointer to the rte_eth_dev structure.
12037  * @param[in] port_attr
12038  *   Port configuration attributes.
12039  * @param[in] nb_queue
12040  *   Number of queue.
12041  * @param[in] queue_attr
12042  *   Array that holds attributes for each flow queue.
12043  * @param[out] error
12044  *   Pointer to error structure.
12045  *
12046  * @return
12047  *   0 on success, a negative errno value otherwise and rte_errno is set.
12048  */
12049 static int
12050 flow_hw_configure(struct rte_eth_dev *dev,
12051 		  const struct rte_flow_port_attr *port_attr,
12052 		  uint16_t nb_queue,
12053 		  const struct rte_flow_queue_attr *queue_attr[],
12054 		  struct rte_flow_error *error)
12055 {
12056 	struct rte_flow_error shadow_error = {0, };
12057 
12058 	if (!error)
12059 		error = &shadow_error;
12060 	return __flow_hw_configure(dev, port_attr, nb_queue, queue_attr, false, error);
12061 }
12062 
12063 /**
12064  * Release HWS resources.
12065  *
12066  * @param[in] dev
12067  *   Pointer to the rte_eth_dev structure.
12068  */
12069 void
12070 flow_hw_resource_release(struct rte_eth_dev *dev)
12071 {
12072 	struct mlx5_priv *priv = dev->data->dev_private;
12073 
12074 	if (!priv->dr_ctx)
12075 		return;
12076 	__flow_hw_resource_release(dev, false);
12077 }
12078 
12079 /* Sets vport tag and mask, for given port, used in HWS rules. */
12080 void
12081 flow_hw_set_port_info(struct rte_eth_dev *dev)
12082 {
12083 	struct mlx5_priv *priv = dev->data->dev_private;
12084 	uint16_t port_id = dev->data->port_id;
12085 	struct flow_hw_port_info *info;
12086 
12087 	MLX5_ASSERT(port_id < RTE_MAX_ETHPORTS);
12088 	info = &mlx5_flow_hw_port_infos[port_id];
12089 	info->regc_mask = priv->vport_meta_mask;
12090 	info->regc_value = priv->vport_meta_tag;
12091 	info->is_wire = mlx5_is_port_on_mpesw_device(priv) ? priv->mpesw_uplink : priv->master;
12092 }
12093 
12094 /* Clears vport tag and mask used for HWS rules. */
12095 void
12096 flow_hw_clear_port_info(struct rte_eth_dev *dev)
12097 {
12098 	uint16_t port_id = dev->data->port_id;
12099 	struct flow_hw_port_info *info;
12100 
12101 	MLX5_ASSERT(port_id < RTE_MAX_ETHPORTS);
12102 	info = &mlx5_flow_hw_port_infos[port_id];
12103 	info->regc_mask = 0;
12104 	info->regc_value = 0;
12105 	info->is_wire = 0;
12106 }
12107 
12108 static int
12109 flow_hw_conntrack_destroy(struct rte_eth_dev *dev,
12110 			  uint32_t idx,
12111 			  struct rte_flow_error *error)
12112 {
12113 	struct mlx5_priv *priv = dev->data->dev_private;
12114 	struct mlx5_aso_ct_pool *pool = priv->hws_ctpool;
12115 	struct mlx5_aso_ct_action *ct;
12116 
12117 	if (priv->shared_host)
12118 		return rte_flow_error_set(error, ENOTSUP,
12119 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12120 				NULL,
12121 				"CT destruction is not allowed to guest port");
12122 	ct = mlx5_ipool_get(pool->cts, idx);
12123 	if (!ct) {
12124 		return rte_flow_error_set(error, EINVAL,
12125 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12126 				NULL,
12127 				"Invalid CT destruction index");
12128 	}
12129 	rte_atomic_store_explicit(&ct->state, ASO_CONNTRACK_FREE,
12130 				 rte_memory_order_relaxed);
12131 	mlx5_ipool_free(pool->cts, idx);
12132 	return 0;
12133 }
12134 
12135 static int
12136 flow_hw_conntrack_query(struct rte_eth_dev *dev, uint32_t queue, uint32_t idx,
12137 			struct rte_flow_action_conntrack *profile,
12138 			void *user_data, bool push,
12139 			struct rte_flow_error *error)
12140 {
12141 	struct mlx5_priv *priv = dev->data->dev_private;
12142 	struct mlx5_aso_ct_pool *pool = priv->hws_ctpool;
12143 	struct mlx5_aso_ct_action *ct;
12144 
12145 	if (priv->shared_host)
12146 		return rte_flow_error_set(error, ENOTSUP,
12147 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12148 				NULL,
12149 				"CT query is not allowed to guest port");
12150 	ct = mlx5_ipool_get(pool->cts, idx);
12151 	if (!ct) {
12152 		return rte_flow_error_set(error, EINVAL,
12153 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12154 				NULL,
12155 				"Invalid CT query index");
12156 	}
12157 	profile->peer_port = ct->peer;
12158 	profile->is_original_dir = ct->is_original;
12159 	if (mlx5_aso_ct_query_by_wqe(priv->sh, queue, ct, profile, user_data, push))
12160 		return rte_flow_error_set(error, EIO,
12161 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12162 				NULL,
12163 				"Failed to query CT context");
12164 	return 0;
12165 }
12166 
12167 
12168 static int
12169 flow_hw_conntrack_update(struct rte_eth_dev *dev, uint32_t queue,
12170 			 const struct rte_flow_modify_conntrack *action_conf,
12171 			 uint32_t idx, void *user_data, bool push,
12172 			 struct rte_flow_error *error)
12173 {
12174 	struct mlx5_priv *priv = dev->data->dev_private;
12175 	struct mlx5_aso_ct_pool *pool = priv->hws_ctpool;
12176 	struct mlx5_aso_ct_action *ct;
12177 	const struct rte_flow_action_conntrack *new_prf;
12178 	int ret = 0;
12179 
12180 	if (priv->shared_host)
12181 		return rte_flow_error_set(error, ENOTSUP,
12182 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12183 				NULL,
12184 				"CT update is not allowed to guest port");
12185 	ct = mlx5_ipool_get(pool->cts, idx);
12186 	if (!ct) {
12187 		return rte_flow_error_set(error, EINVAL,
12188 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12189 				NULL,
12190 				"Invalid CT update index");
12191 	}
12192 	new_prf = &action_conf->new_ct;
12193 	if (action_conf->direction)
12194 		ct->is_original = !!new_prf->is_original_dir;
12195 	if (action_conf->state) {
12196 		/* Only validate the profile when it needs to be updated. */
12197 		ret = mlx5_validate_action_ct(dev, new_prf, error);
12198 		if (ret)
12199 			return ret;
12200 		ret = mlx5_aso_ct_update_by_wqe(priv->sh, queue, ct, new_prf,
12201 						user_data, push);
12202 		if (ret)
12203 			return rte_flow_error_set(error, EIO,
12204 					RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12205 					NULL,
12206 					"Failed to send CT context update WQE");
12207 		if (queue != MLX5_HW_INV_QUEUE)
12208 			return 0;
12209 		/* Block until ready or a failure in synchronous mode. */
12210 		ret = mlx5_aso_ct_available(priv->sh, queue, ct);
12211 		if (ret)
12212 			rte_flow_error_set(error, rte_errno,
12213 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12214 					   NULL,
12215 					   "Timeout to get the CT update");
12216 	}
12217 	return ret;
12218 }
12219 
12220 static struct rte_flow_action_handle *
12221 flow_hw_conntrack_create(struct rte_eth_dev *dev, uint32_t queue,
12222 			 const struct rte_flow_action_conntrack *pro,
12223 			 void *user_data, bool push,
12224 			 struct rte_flow_error *error)
12225 {
12226 	struct mlx5_priv *priv = dev->data->dev_private;
12227 	struct mlx5_aso_ct_pool *pool = priv->hws_ctpool;
12228 	struct mlx5_aso_ct_action *ct;
12229 	uint32_t ct_idx = 0;
12230 	int ret;
12231 	bool async = !!(queue != MLX5_HW_INV_QUEUE);
12232 
12233 	if (priv->shared_host) {
12234 		rte_flow_error_set(error, ENOTSUP,
12235 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12236 				NULL,
12237 				"CT create is not allowed to guest port");
12238 		return NULL;
12239 	}
12240 	if (!pool) {
12241 		rte_flow_error_set(error, EINVAL,
12242 				   RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12243 				   "CT is not enabled");
12244 		return 0;
12245 	}
12246 	ct = mlx5_ipool_zmalloc(pool->cts, &ct_idx);
12247 	if (!ct) {
12248 		rte_flow_error_set(error, rte_errno,
12249 				   RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12250 				   "Failed to allocate CT object");
12251 		return 0;
12252 	}
12253 	ct->offset = ct_idx - 1;
12254 	ct->is_original = !!pro->is_original_dir;
12255 	ct->peer = pro->peer_port;
12256 	ct->pool = pool;
12257 	if (mlx5_aso_ct_update_by_wqe(priv->sh, queue, ct, pro, user_data, push)) {
12258 		mlx5_ipool_free(pool->cts, ct_idx);
12259 		rte_flow_error_set(error, EBUSY,
12260 				   RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12261 				   "Failed to update CT");
12262 		return 0;
12263 	}
12264 	if (!async) {
12265 		ret = mlx5_aso_ct_available(priv->sh, queue, ct);
12266 		if (ret) {
12267 			mlx5_ipool_free(pool->cts, ct_idx);
12268 			rte_flow_error_set(error, rte_errno,
12269 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12270 					   NULL,
12271 					   "Timeout to get the CT update");
12272 			return 0;
12273 		}
12274 	}
12275 	return MLX5_INDIRECT_ACT_HWS_CT_GEN_IDX(ct_idx);
12276 }
12277 
12278 /**
12279  * Validate shared action.
12280  *
12281  * @param[in] dev
12282  *   Pointer to the rte_eth_dev structure.
12283  * @param[in] queue
12284  *   Which queue to be used.
12285  * @param[in] attr
12286  *   Operation attribute.
12287  * @param[in] conf
12288  *   Indirect action configuration.
12289  * @param[in] action
12290  *   rte_flow action detail.
12291  * @param[in] user_data
12292  *   Pointer to the user_data.
12293  * @param[out] error
12294  *   Pointer to error structure.
12295  *
12296  * @return
12297  *   0 on success, otherwise negative errno value.
12298  */
12299 static int
12300 flow_hw_action_handle_validate(struct rte_eth_dev *dev, uint32_t queue,
12301 			       const struct rte_flow_op_attr *attr,
12302 			       const struct rte_flow_indir_action_conf *conf,
12303 			       const struct rte_flow_action *action,
12304 			       void *user_data,
12305 			       struct rte_flow_error *error)
12306 {
12307 	struct mlx5_priv *priv = dev->data->dev_private;
12308 
12309 	RTE_SET_USED(attr);
12310 	RTE_SET_USED(queue);
12311 	RTE_SET_USED(user_data);
12312 	switch (action->type) {
12313 	case RTE_FLOW_ACTION_TYPE_AGE:
12314 		if (!priv->hws_age_req) {
12315 			if (flow_hw_allocate_actions(dev, MLX5_FLOW_ACTION_AGE,
12316 						     error))
12317 				return rte_flow_error_set
12318 					(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
12319 					 NULL, "aging pool not initialized");
12320 		}
12321 		break;
12322 	case RTE_FLOW_ACTION_TYPE_COUNT:
12323 		if (!priv->hws_cpool) {
12324 			if (flow_hw_allocate_actions(dev, MLX5_FLOW_ACTION_COUNT,
12325 						     error))
12326 				return rte_flow_error_set
12327 					(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
12328 					 NULL, "counters pool not initialized");
12329 		}
12330 		break;
12331 	case RTE_FLOW_ACTION_TYPE_CONNTRACK:
12332 		if (priv->hws_ctpool == NULL) {
12333 			if (flow_hw_allocate_actions(dev, MLX5_FLOW_ACTION_CT,
12334 						     error))
12335 				return rte_flow_error_set
12336 					(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
12337 					 NULL, "CT pool not initialized");
12338 		}
12339 		return mlx5_validate_action_ct(dev, action->conf, error);
12340 	case RTE_FLOW_ACTION_TYPE_METER_MARK:
12341 		return flow_hw_validate_action_meter_mark(dev, action, true, error);
12342 	case RTE_FLOW_ACTION_TYPE_RSS:
12343 		return flow_dv_action_validate(dev, conf, action, error);
12344 	case RTE_FLOW_ACTION_TYPE_QUOTA:
12345 		return 0;
12346 	default:
12347 		return rte_flow_error_set(error, ENOTSUP,
12348 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12349 					  "action type not supported");
12350 	}
12351 	return 0;
12352 }
12353 
12354 static __rte_always_inline bool
12355 flow_hw_action_push(const struct rte_flow_op_attr *attr)
12356 {
12357 	return attr ? !attr->postpone : true;
12358 }
12359 
12360 static __rte_always_inline struct mlx5_hw_q_job *
12361 flow_hw_action_job_init(struct mlx5_priv *priv, uint32_t queue,
12362 			const struct rte_flow_action_handle *handle,
12363 			void *user_data, void *query_data,
12364 			enum mlx5_hw_job_type type,
12365 			enum mlx5_hw_indirect_type indirect_type,
12366 			struct rte_flow_error *error)
12367 {
12368 	struct mlx5_hw_q_job *job;
12369 
12370 	if (queue == MLX5_HW_INV_QUEUE)
12371 		queue = CTRL_QUEUE_ID(priv);
12372 	job = flow_hw_job_get(priv, queue);
12373 	if (!job) {
12374 		rte_flow_error_set(error, ENOMEM,
12375 				   RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
12376 				   "Action destroy failed due to queue full.");
12377 		return NULL;
12378 	}
12379 	job->type = type;
12380 	job->action = handle;
12381 	job->user_data = user_data;
12382 	job->query.user = query_data;
12383 	job->indirect_type = indirect_type;
12384 	return job;
12385 }
12386 
12387 struct mlx5_hw_q_job *
12388 mlx5_flow_action_job_init(struct mlx5_priv *priv, uint32_t queue,
12389 			  const struct rte_flow_action_handle *handle,
12390 			  void *user_data, void *query_data,
12391 			  enum mlx5_hw_job_type type,
12392 			  struct rte_flow_error *error)
12393 {
12394 	return flow_hw_action_job_init(priv, queue, handle, user_data, query_data,
12395 				       type, MLX5_HW_INDIRECT_TYPE_LEGACY, error);
12396 }
12397 
12398 static __rte_always_inline void
12399 flow_hw_action_finalize(struct rte_eth_dev *dev, uint32_t queue,
12400 			struct mlx5_hw_q_job *job,
12401 			bool push, bool aso, bool status)
12402 {
12403 	struct mlx5_priv *priv = dev->data->dev_private;
12404 
12405 	if (queue == MLX5_HW_INV_QUEUE)
12406 		queue = CTRL_QUEUE_ID(priv);
12407 	if (likely(status)) {
12408 		/* 1. add new job to a queue */
12409 		if (!aso)
12410 			rte_ring_enqueue(push ?
12411 					 priv->hw_q[queue].indir_cq :
12412 					 priv->hw_q[queue].indir_iq,
12413 					 job);
12414 		/* 2. send pending jobs */
12415 		if (push)
12416 			__flow_hw_push_action(dev, queue);
12417 	} else {
12418 		flow_hw_job_put(priv, job, queue);
12419 	}
12420 }
12421 
12422 /**
12423  * Create shared action.
12424  *
12425  * @param[in] dev
12426  *   Pointer to the rte_eth_dev structure.
12427  * @param[in] queue
12428  *   Which queue to be used.
12429  * @param[in] attr
12430  *   Operation attribute.
12431  * @param[in] conf
12432  *   Indirect action configuration.
12433  * @param[in] action
12434  *   rte_flow action detail.
12435  * @param[in] user_data
12436  *   Pointer to the user_data.
12437  * @param[out] error
12438  *   Pointer to error structure.
12439  *
12440  * @return
12441  *   Action handle on success, NULL otherwise and rte_errno is set.
12442  */
12443 static struct rte_flow_action_handle *
12444 flow_hw_action_handle_create(struct rte_eth_dev *dev, uint32_t queue,
12445 			     const struct rte_flow_op_attr *attr,
12446 			     const struct rte_flow_indir_action_conf *conf,
12447 			     const struct rte_flow_action *action,
12448 			     void *user_data,
12449 			     struct rte_flow_error *error)
12450 {
12451 	struct rte_flow_action_handle *handle = NULL;
12452 	struct mlx5_hw_q_job *job = NULL;
12453 	struct mlx5_priv *priv = dev->data->dev_private;
12454 	const struct rte_flow_action_age *age;
12455 	struct mlx5_aso_mtr *aso_mtr;
12456 	cnt_id_t cnt_id;
12457 	uint32_t age_idx;
12458 	bool push = flow_hw_action_push(attr);
12459 	bool aso = false;
12460 	bool force_job = action->type == RTE_FLOW_ACTION_TYPE_METER_MARK;
12461 
12462 	if (!mlx5_hw_ctx_validate(dev, error))
12463 		return NULL;
12464 	if (attr || force_job) {
12465 		job = flow_hw_action_job_init(priv, queue, NULL, user_data,
12466 					      NULL, MLX5_HW_Q_JOB_TYPE_CREATE,
12467 					      MLX5_HW_INDIRECT_TYPE_LEGACY, error);
12468 		if (!job)
12469 			return NULL;
12470 	}
12471 	switch (action->type) {
12472 	case RTE_FLOW_ACTION_TYPE_AGE:
12473 		if (priv->hws_strict_queue) {
12474 			struct mlx5_age_info *info = GET_PORT_AGE_INFO(priv);
12475 
12476 			if (queue >= info->hw_q_age->nb_rings) {
12477 				rte_flow_error_set(error, EINVAL,
12478 						   RTE_FLOW_ERROR_TYPE_ACTION,
12479 						   NULL,
12480 						   "Invalid queue ID for indirect AGE.");
12481 				rte_errno = EINVAL;
12482 				return NULL;
12483 			}
12484 		}
12485 		age = action->conf;
12486 		age_idx = mlx5_hws_age_action_create(priv, queue, true, age,
12487 						     0, error);
12488 		if (age_idx == 0) {
12489 			rte_flow_error_set(error, ENODEV,
12490 					   RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12491 					   "AGE are not configured!");
12492 		} else {
12493 			age_idx = (MLX5_INDIRECT_ACTION_TYPE_AGE <<
12494 				   MLX5_INDIRECT_ACTION_TYPE_OFFSET) | age_idx;
12495 			handle =
12496 			    (struct rte_flow_action_handle *)(uintptr_t)age_idx;
12497 		}
12498 		break;
12499 	case RTE_FLOW_ACTION_TYPE_COUNT:
12500 		if (mlx5_hws_cnt_shared_get(priv->hws_cpool, &cnt_id, 0))
12501 			rte_flow_error_set(error, ENODEV,
12502 					RTE_FLOW_ERROR_TYPE_ACTION,
12503 					NULL,
12504 					"counter are not configured!");
12505 		else
12506 			handle = (struct rte_flow_action_handle *)
12507 				 (uintptr_t)cnt_id;
12508 		break;
12509 	case RTE_FLOW_ACTION_TYPE_CONNTRACK:
12510 		aso = true;
12511 		handle = flow_hw_conntrack_create(dev, queue, action->conf, job,
12512 						  push, error);
12513 		break;
12514 	case RTE_FLOW_ACTION_TYPE_METER_MARK:
12515 		aso = true;
12516 		aso_mtr = flow_hw_meter_mark_alloc(dev, queue, action, job, push, error);
12517 		if (!aso_mtr)
12518 			break;
12519 		handle = (void *)(uintptr_t)job->action;
12520 		break;
12521 	case RTE_FLOW_ACTION_TYPE_RSS:
12522 		handle = flow_dv_action_create(dev, conf, action, error);
12523 		break;
12524 	case RTE_FLOW_ACTION_TYPE_QUOTA:
12525 		aso = true;
12526 		handle = mlx5_quota_alloc(dev, queue, action->conf,
12527 					  job, push, error);
12528 		break;
12529 	default:
12530 		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
12531 				   NULL, "action type not supported");
12532 		break;
12533 	}
12534 	if (job && !force_job) {
12535 		job->action = handle;
12536 		flow_hw_action_finalize(dev, queue, job, push, aso,
12537 					handle != NULL);
12538 	}
12539 	return handle;
12540 }
12541 
12542 static int
12543 mlx5_flow_update_meter_mark(struct rte_eth_dev *dev, uint32_t queue,
12544 			    const struct rte_flow_update_meter_mark *upd_meter_mark,
12545 			    uint32_t idx, bool push,
12546 			    struct mlx5_hw_q_job *job, struct rte_flow_error *error)
12547 {
12548 	struct mlx5_priv *priv = dev->data->dev_private;
12549 	struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
12550 	const struct rte_flow_action_meter_mark *meter_mark = &upd_meter_mark->meter_mark;
12551 	struct mlx5_aso_mtr *aso_mtr = mlx5_ipool_get(pool->idx_pool, idx);
12552 	struct mlx5_flow_meter_info *fm;
12553 
12554 	if (!aso_mtr)
12555 		return rte_flow_error_set(error, EINVAL,
12556 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12557 					  NULL, "Invalid meter_mark update index");
12558 	fm = &aso_mtr->fm;
12559 	if (upd_meter_mark->profile_valid)
12560 		fm->profile = (struct mlx5_flow_meter_profile *)
12561 			(meter_mark->profile);
12562 	if (upd_meter_mark->color_mode_valid)
12563 		fm->color_aware = meter_mark->color_mode;
12564 	if (upd_meter_mark->state_valid)
12565 		fm->is_enable = meter_mark->state;
12566 	aso_mtr->state = (queue == MLX5_HW_INV_QUEUE) ?
12567 			 ASO_METER_WAIT : ASO_METER_WAIT_ASYNC;
12568 	/* Update ASO flow meter by wqe. */
12569 	if (mlx5_aso_meter_update_by_wqe(priv, queue,
12570 					 aso_mtr, &priv->mtr_bulk, job, push))
12571 		return rte_flow_error_set(error, EINVAL,
12572 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12573 					  NULL, "Unable to update ASO meter WQE");
12574 	/* Wait for ASO object completion. */
12575 	if (queue == MLX5_HW_INV_QUEUE &&
12576 	    mlx5_aso_mtr_wait(priv, aso_mtr, true))
12577 		return rte_flow_error_set(error, EINVAL,
12578 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12579 					  NULL, "Unable to wait for ASO meter CQE");
12580 	return 0;
12581 }
12582 
12583 /**
12584  * Update shared action.
12585  *
12586  * @param[in] dev
12587  *   Pointer to the rte_eth_dev structure.
12588  * @param[in] queue
12589  *   Which queue to be used.
12590  * @param[in] attr
12591  *   Operation attribute.
12592  * @param[in] handle
12593  *   Action handle to be updated.
12594  * @param[in] update
12595  *   Update value.
12596  * @param[in] user_data
12597  *   Pointer to the user_data.
12598  * @param[out] error
12599  *   Pointer to error structure.
12600  *
12601  * @return
12602  *   0 on success, negative value otherwise and rte_errno is set.
12603  */
12604 static int
12605 flow_hw_action_handle_update(struct rte_eth_dev *dev, uint32_t queue,
12606 			     const struct rte_flow_op_attr *attr,
12607 			     struct rte_flow_action_handle *handle,
12608 			     const void *update,
12609 			     void *user_data,
12610 			     struct rte_flow_error *error)
12611 {
12612 	struct mlx5_priv *priv = dev->data->dev_private;
12613 	const struct rte_flow_modify_conntrack *ct_conf =
12614 		(const struct rte_flow_modify_conntrack *)update;
12615 	struct mlx5_hw_q_job *job = NULL;
12616 	uint32_t act_idx = (uint32_t)(uintptr_t)handle;
12617 	uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
12618 	uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
12619 	int ret = 0;
12620 	bool push = flow_hw_action_push(attr);
12621 	bool aso = false;
12622 	bool force_job = type == MLX5_INDIRECT_ACTION_TYPE_METER_MARK;
12623 
12624 	if (attr || force_job) {
12625 		job = flow_hw_action_job_init(priv, queue, handle, user_data,
12626 					      NULL, MLX5_HW_Q_JOB_TYPE_UPDATE,
12627 					      MLX5_HW_INDIRECT_TYPE_LEGACY, error);
12628 		if (!job)
12629 			return -rte_errno;
12630 	}
12631 	switch (type) {
12632 	case MLX5_INDIRECT_ACTION_TYPE_AGE:
12633 		ret = mlx5_hws_age_action_update(priv, idx, update, error);
12634 		break;
12635 	case MLX5_INDIRECT_ACTION_TYPE_CT:
12636 		if (ct_conf->state)
12637 			aso = true;
12638 		ret = flow_hw_conntrack_update(dev, queue, update, idx,
12639 					       job, push, error);
12640 		break;
12641 	case MLX5_INDIRECT_ACTION_TYPE_METER_MARK:
12642 		aso = true;
12643 		ret = mlx5_flow_update_meter_mark(dev, queue, update, idx, push,
12644 						  job, error);
12645 		break;
12646 	case MLX5_INDIRECT_ACTION_TYPE_RSS:
12647 		ret = flow_dv_action_update(dev, handle, update, error);
12648 		break;
12649 	case MLX5_INDIRECT_ACTION_TYPE_QUOTA:
12650 		aso = true;
12651 		ret = mlx5_quota_query_update(dev, queue, handle, update, NULL,
12652 					      job, push, error);
12653 		break;
12654 	default:
12655 		ret = -ENOTSUP;
12656 		rte_flow_error_set(error, ENOTSUP,
12657 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12658 					  "action type not supported");
12659 		break;
12660 	}
12661 	if (job && !force_job)
12662 		flow_hw_action_finalize(dev, queue, job, push, aso, ret == 0);
12663 	return ret;
12664 }
12665 
12666 /**
12667  * Destroy shared action.
12668  *
12669  * @param[in] dev
12670  *   Pointer to the rte_eth_dev structure.
12671  * @param[in] queue
12672  *   Which queue to be used.
12673  * @param[in] attr
12674  *   Operation attribute.
12675  * @param[in] handle
12676  *   Action handle to be destroyed.
12677  * @param[in] user_data
12678  *   Pointer to the user_data.
12679  * @param[out] error
12680  *   Pointer to error structure.
12681  *
12682  * @return
12683  *   0 on success, negative value otherwise and rte_errno is set.
12684  */
12685 static int
12686 flow_hw_action_handle_destroy(struct rte_eth_dev *dev, uint32_t queue,
12687 			      const struct rte_flow_op_attr *attr,
12688 			      struct rte_flow_action_handle *handle,
12689 			      void *user_data,
12690 			      struct rte_flow_error *error)
12691 {
12692 	uint32_t act_idx = (uint32_t)(uintptr_t)handle;
12693 	uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
12694 	uint32_t age_idx = act_idx & MLX5_HWS_AGE_IDX_MASK;
12695 	uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
12696 	struct mlx5_priv *priv = dev->data->dev_private;
12697 	struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
12698 	struct mlx5_hw_q_job *job = NULL;
12699 	struct mlx5_aso_mtr *aso_mtr;
12700 	struct mlx5_flow_meter_info *fm;
12701 	bool push = flow_hw_action_push(attr);
12702 	bool aso = false;
12703 	int ret = 0;
12704 	bool force_job = type == MLX5_INDIRECT_ACTION_TYPE_METER_MARK;
12705 
12706 	if (attr || force_job) {
12707 		job = flow_hw_action_job_init(priv, queue, handle, user_data,
12708 					      NULL, MLX5_HW_Q_JOB_TYPE_DESTROY,
12709 					      MLX5_HW_INDIRECT_TYPE_LEGACY, error);
12710 		if (!job)
12711 			return -rte_errno;
12712 	}
12713 	switch (type) {
12714 	case MLX5_INDIRECT_ACTION_TYPE_AGE:
12715 		ret = mlx5_hws_age_action_destroy(priv, age_idx, error);
12716 		break;
12717 	case MLX5_INDIRECT_ACTION_TYPE_COUNT:
12718 		age_idx = mlx5_hws_cnt_age_get(priv->hws_cpool, act_idx);
12719 		if (age_idx != 0)
12720 			/*
12721 			 * If this counter belongs to indirect AGE, here is the
12722 			 * time to update the AGE.
12723 			 */
12724 			mlx5_hws_age_nb_cnt_decrease(priv, age_idx);
12725 		mlx5_hws_cnt_shared_put(priv->hws_cpool, &act_idx);
12726 		break;
12727 	case MLX5_INDIRECT_ACTION_TYPE_CT:
12728 		ret = flow_hw_conntrack_destroy(dev, idx, error);
12729 		break;
12730 	case MLX5_INDIRECT_ACTION_TYPE_METER_MARK:
12731 		aso_mtr = mlx5_ipool_get(pool->idx_pool, idx);
12732 		if (!aso_mtr) {
12733 			ret = -EINVAL;
12734 			rte_flow_error_set(error, EINVAL,
12735 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12736 				NULL, "Invalid meter_mark destroy index");
12737 			break;
12738 		}
12739 		fm = &aso_mtr->fm;
12740 		fm->is_enable = 0;
12741 		/* Update ASO flow meter by wqe. */
12742 		if (mlx5_aso_meter_update_by_wqe(priv, queue, aso_mtr,
12743 						 &priv->mtr_bulk, job, push)) {
12744 			ret = -EINVAL;
12745 			rte_flow_error_set(error, EINVAL,
12746 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12747 				NULL, "Unable to update ASO meter WQE");
12748 			break;
12749 		}
12750 		/* Wait for ASO object completion. */
12751 		if (queue == MLX5_HW_INV_QUEUE &&
12752 		    mlx5_aso_mtr_wait(priv, aso_mtr, true)) {
12753 			ret = -EINVAL;
12754 			rte_flow_error_set(error, EINVAL,
12755 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12756 				NULL, "Unable to wait for ASO meter CQE");
12757 			break;
12758 		}
12759 		aso = true;
12760 		break;
12761 	case MLX5_INDIRECT_ACTION_TYPE_RSS:
12762 		ret = flow_dv_action_destroy(dev, handle, error);
12763 		break;
12764 	case MLX5_INDIRECT_ACTION_TYPE_QUOTA:
12765 		break;
12766 	default:
12767 		ret = -ENOTSUP;
12768 		rte_flow_error_set(error, ENOTSUP,
12769 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12770 					  "action type not supported");
12771 		break;
12772 	}
12773 	if (job && !force_job)
12774 		flow_hw_action_finalize(dev, queue, job, push, aso, ret == 0);
12775 	return ret;
12776 }
12777 
12778 static int
12779 flow_hw_query_counter(const struct rte_eth_dev *dev, uint32_t counter,
12780 		      void *data, struct rte_flow_error *error)
12781 {
12782 	struct mlx5_hws_cnt_pool *hpool;
12783 	struct mlx5_priv *priv = dev->data->dev_private;
12784 	struct mlx5_hws_cnt *cnt;
12785 	struct rte_flow_query_count *qc = data;
12786 	uint32_t iidx;
12787 	uint64_t pkts, bytes;
12788 
12789 	if (!mlx5_hws_cnt_id_valid(counter))
12790 		return rte_flow_error_set(error, EINVAL,
12791 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
12792 				"counter are not available");
12793 	hpool = mlx5_hws_cnt_host_pool(priv->hws_cpool);
12794 	iidx = mlx5_hws_cnt_iidx(hpool, counter);
12795 	cnt = &hpool->pool[iidx];
12796 	__hws_cnt_query_raw(priv->hws_cpool, counter, &pkts, &bytes);
12797 	qc->hits_set = 1;
12798 	qc->bytes_set = 1;
12799 	qc->hits = pkts - cnt->reset.hits;
12800 	qc->bytes = bytes - cnt->reset.bytes;
12801 	if (qc->reset) {
12802 		cnt->reset.bytes = bytes;
12803 		cnt->reset.hits = pkts;
12804 	}
12805 	return 0;
12806 }
12807 
12808 /**
12809  * Query a flow rule AGE action for aging information.
12810  *
12811  * @param[in] dev
12812  *   Pointer to Ethernet device.
12813  * @param[in] age_idx
12814  *   Index of AGE action parameter.
12815  * @param[out] data
12816  *   Data retrieved by the query.
12817  * @param[out] error
12818  *   Perform verbose error reporting if not NULL.
12819  *
12820  * @return
12821  *   0 on success, a negative errno value otherwise and rte_errno is set.
12822  */
12823 static int
12824 flow_hw_query_age(const struct rte_eth_dev *dev, uint32_t age_idx, void *data,
12825 		  struct rte_flow_error *error)
12826 {
12827 	struct mlx5_priv *priv = dev->data->dev_private;
12828 	struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);
12829 	struct mlx5_indexed_pool *ipool = age_info->ages_ipool;
12830 	struct mlx5_hws_age_param *param = mlx5_ipool_get(ipool, age_idx);
12831 	struct rte_flow_query_age *resp = data;
12832 
12833 	if (!param || !param->timeout)
12834 		return rte_flow_error_set(error, EINVAL,
12835 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12836 					  NULL, "age data not available");
12837 	switch (rte_atomic_load_explicit(&param->state, rte_memory_order_relaxed)) {
12838 	case HWS_AGE_AGED_OUT_REPORTED:
12839 	case HWS_AGE_AGED_OUT_NOT_REPORTED:
12840 		resp->aged = 1;
12841 		break;
12842 	case HWS_AGE_CANDIDATE:
12843 	case HWS_AGE_CANDIDATE_INSIDE_RING:
12844 		resp->aged = 0;
12845 		break;
12846 	case HWS_AGE_FREE:
12847 		/*
12848 		 * When state is FREE the flow itself should be invalid.
12849 		 * Fall-through.
12850 		 */
12851 	default:
12852 		MLX5_ASSERT(0);
12853 		break;
12854 	}
12855 	resp->sec_since_last_hit_valid = !resp->aged;
12856 	if (resp->sec_since_last_hit_valid)
12857 		resp->sec_since_last_hit = rte_atomic_load_explicit
12858 				 (&param->sec_since_last_hit, rte_memory_order_relaxed);
12859 	return 0;
12860 }
12861 
12862 static int
12863 flow_hw_query(struct rte_eth_dev *dev, struct rte_flow *flow,
12864 	      const struct rte_flow_action *actions, void *data,
12865 	      struct rte_flow_error *error)
12866 {
12867 	int ret = -EINVAL;
12868 	struct rte_flow_hw *hw_flow = (struct rte_flow_hw *)flow;
12869 	struct rte_flow_hw_aux *aux;
12870 
12871 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
12872 		switch (actions->type) {
12873 		case RTE_FLOW_ACTION_TYPE_VOID:
12874 			break;
12875 		case RTE_FLOW_ACTION_TYPE_COUNT:
12876 			if (!(hw_flow->flags & MLX5_FLOW_HW_FLOW_FLAG_CNT_ID))
12877 				return rte_flow_error_set(error, EINVAL,
12878 							  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
12879 							  "counter not defined in the rule");
12880 			ret = flow_hw_query_counter(dev, hw_flow->cnt_id, data,
12881 						    error);
12882 			break;
12883 		case RTE_FLOW_ACTION_TYPE_AGE:
12884 			if (!(hw_flow->flags & MLX5_FLOW_HW_FLOW_FLAG_AGE_IDX))
12885 				return rte_flow_error_set(error, EINVAL,
12886 							  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
12887 							  "age data not available");
12888 			aux = mlx5_flow_hw_aux(dev->data->port_id, hw_flow);
12889 			ret = flow_hw_query_age(dev, mlx5_flow_hw_aux_get_age_idx(hw_flow, aux),
12890 						data, error);
12891 			break;
12892 		default:
12893 			return rte_flow_error_set(error, ENOTSUP,
12894 						  RTE_FLOW_ERROR_TYPE_ACTION,
12895 						  actions,
12896 						  "action not supported");
12897 		}
12898 	}
12899 	return ret;
12900 }
12901 
12902 /**
12903  * Validate indirect action.
12904  *
12905  * @param[in] dev
12906  *   Pointer to the Ethernet device structure.
12907  * @param[in] conf
12908  *   Shared action configuration.
12909  * @param[in] action
12910  *   Action specification used to create indirect action.
12911  * @param[out] error
12912  *   Perform verbose error reporting if not NULL. Initialized in case of
12913  *   error only.
12914  *
12915  * @return
12916  *   0 on success, otherwise negative errno value.
12917  */
12918 static int
12919 flow_hw_action_validate(struct rte_eth_dev *dev,
12920 			const struct rte_flow_indir_action_conf *conf,
12921 			const struct rte_flow_action *action,
12922 			struct rte_flow_error *err)
12923 {
12924 	struct rte_flow_error shadow_error = {0, };
12925 
12926 	if (!err)
12927 		err = &shadow_error;
12928 	return flow_hw_action_handle_validate(dev, MLX5_HW_INV_QUEUE, NULL,
12929 					      conf, action, NULL, err);
12930 }
12931 
12932 /**
12933  * Create indirect action.
12934  *
12935  * @param[in] dev
12936  *   Pointer to the Ethernet device structure.
12937  * @param[in] conf
12938  *   Shared action configuration.
12939  * @param[in] action
12940  *   Action specification used to create indirect action.
12941  * @param[out] error
12942  *   Perform verbose error reporting if not NULL. Initialized in case of
12943  *   error only.
12944  *
12945  * @return
12946  *   A valid shared action handle in case of success, NULL otherwise and
12947  *   rte_errno is set.
12948  */
12949 static struct rte_flow_action_handle *
12950 flow_hw_action_create(struct rte_eth_dev *dev,
12951 		       const struct rte_flow_indir_action_conf *conf,
12952 		       const struct rte_flow_action *action,
12953 		       struct rte_flow_error *err)
12954 {
12955 	return flow_hw_action_handle_create(dev, MLX5_HW_INV_QUEUE,
12956 					    NULL, conf, action, NULL, err);
12957 }
12958 
12959 /**
12960  * Destroy the indirect action.
12961  * Release action related resources on the NIC and the memory.
12962  * Lock free, (mutex should be acquired by caller).
12963  * Dispatcher for action type specific call.
12964  *
12965  * @param[in] dev
12966  *   Pointer to the Ethernet device structure.
12967  * @param[in] handle
12968  *   The indirect action object handle to be removed.
12969  * @param[out] error
12970  *   Perform verbose error reporting if not NULL. Initialized in case of
12971  *   error only.
12972  *
12973  * @return
12974  *   0 on success, otherwise negative errno value.
12975  */
12976 static int
12977 flow_hw_action_destroy(struct rte_eth_dev *dev,
12978 		       struct rte_flow_action_handle *handle,
12979 		       struct rte_flow_error *error)
12980 {
12981 	return flow_hw_action_handle_destroy(dev, MLX5_HW_INV_QUEUE,
12982 			NULL, handle, NULL, error);
12983 }
12984 
12985 /**
12986  * Updates in place shared action configuration.
12987  *
12988  * @param[in] dev
12989  *   Pointer to the Ethernet device structure.
12990  * @param[in] handle
12991  *   The indirect action object handle to be updated.
12992  * @param[in] update
12993  *   Action specification used to modify the action pointed by *handle*.
12994  *   *update* could be of same type with the action pointed by the *handle*
12995  *   handle argument, or some other structures like a wrapper, depending on
12996  *   the indirect action type.
12997  * @param[out] error
12998  *   Perform verbose error reporting if not NULL. Initialized in case of
12999  *   error only.
13000  *
13001  * @return
13002  *   0 on success, otherwise negative errno value.
13003  */
13004 static int
13005 flow_hw_action_update(struct rte_eth_dev *dev,
13006 		      struct rte_flow_action_handle *handle,
13007 		      const void *update,
13008 		      struct rte_flow_error *err)
13009 {
13010 	return flow_hw_action_handle_update(dev, MLX5_HW_INV_QUEUE,
13011 			NULL, handle, update, NULL, err);
13012 }
13013 
13014 static int
13015 flow_hw_action_handle_query(struct rte_eth_dev *dev, uint32_t queue,
13016 			    const struct rte_flow_op_attr *attr,
13017 			    const struct rte_flow_action_handle *handle,
13018 			    void *data, void *user_data,
13019 			    struct rte_flow_error *error)
13020 {
13021 	struct mlx5_priv *priv = dev->data->dev_private;
13022 	struct mlx5_hw_q_job *job = NULL;
13023 	uint32_t act_idx = (uint32_t)(uintptr_t)handle;
13024 	uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
13025 	uint32_t idx = MLX5_INDIRECT_ACTION_IDX_GET(handle);
13026 	uint32_t age_idx = act_idx & MLX5_HWS_AGE_IDX_MASK;
13027 	int ret;
13028 	bool push = flow_hw_action_push(attr);
13029 	bool aso = false;
13030 
13031 	if (attr) {
13032 		job = flow_hw_action_job_init(priv, queue, handle, user_data,
13033 					      data, MLX5_HW_Q_JOB_TYPE_QUERY,
13034 					      MLX5_HW_INDIRECT_TYPE_LEGACY, error);
13035 		if (!job)
13036 			return -rte_errno;
13037 	}
13038 	switch (type) {
13039 	case MLX5_INDIRECT_ACTION_TYPE_AGE:
13040 		ret = flow_hw_query_age(dev, age_idx, data, error);
13041 		break;
13042 	case MLX5_INDIRECT_ACTION_TYPE_COUNT:
13043 		ret = flow_hw_query_counter(dev, act_idx, data, error);
13044 		break;
13045 	case MLX5_INDIRECT_ACTION_TYPE_CT:
13046 		aso = true;
13047 		if (job)
13048 			job->query.user = data;
13049 		ret = flow_hw_conntrack_query(dev, queue, idx, data,
13050 					      job, push, error);
13051 		break;
13052 	case MLX5_INDIRECT_ACTION_TYPE_QUOTA:
13053 		aso = true;
13054 		ret = mlx5_quota_query(dev, queue, handle, data,
13055 				       job, push, error);
13056 		break;
13057 	default:
13058 		ret = -ENOTSUP;
13059 		rte_flow_error_set(error, ENOTSUP,
13060 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
13061 					  "action type not supported");
13062 		break;
13063 	}
13064 	if (job)
13065 		flow_hw_action_finalize(dev, queue, job, push, aso, ret == 0);
13066 	return ret;
13067 }
13068 
13069 static int
13070 flow_hw_async_action_handle_query_update
13071 			(struct rte_eth_dev *dev, uint32_t queue,
13072 			 const struct rte_flow_op_attr *attr,
13073 			 struct rte_flow_action_handle *handle,
13074 			 const void *update, void *query,
13075 			 enum rte_flow_query_update_mode qu_mode,
13076 			 void *user_data, struct rte_flow_error *error)
13077 {
13078 	struct mlx5_priv *priv = dev->data->dev_private;
13079 	bool push = flow_hw_action_push(attr);
13080 	bool aso = false;
13081 	struct mlx5_hw_q_job *job = NULL;
13082 	int ret = 0;
13083 
13084 	if (attr) {
13085 		job = flow_hw_action_job_init(priv, queue, handle, user_data,
13086 					      query,
13087 					      MLX5_HW_Q_JOB_TYPE_UPDATE_QUERY,
13088 					      MLX5_HW_INDIRECT_TYPE_LEGACY, error);
13089 		if (!job)
13090 			return -rte_errno;
13091 	}
13092 	switch (MLX5_INDIRECT_ACTION_TYPE_GET(handle)) {
13093 	case MLX5_INDIRECT_ACTION_TYPE_QUOTA:
13094 		if (qu_mode != RTE_FLOW_QU_QUERY_FIRST) {
13095 			ret = rte_flow_error_set
13096 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF,
13097 				 NULL, "quota action must query before update");
13098 			break;
13099 		}
13100 		aso = true;
13101 		ret = mlx5_quota_query_update(dev, queue, handle,
13102 					      update, query, job, push, error);
13103 		break;
13104 	default:
13105 		ret = rte_flow_error_set(error, ENOTSUP,
13106 					 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, "update and query not supportred");
13107 	}
13108 	if (job)
13109 		flow_hw_action_finalize(dev, queue, job, push, aso, ret == 0);
13110 	return ret;
13111 }
13112 
13113 static int
13114 flow_hw_action_query(struct rte_eth_dev *dev,
13115 		     const struct rte_flow_action_handle *handle, void *data,
13116 		     struct rte_flow_error *error)
13117 {
13118 	return flow_hw_action_handle_query(dev, MLX5_HW_INV_QUEUE, NULL,
13119 			handle, data, NULL, error);
13120 }
13121 
13122 static int
13123 flow_hw_action_query_update(struct rte_eth_dev *dev,
13124 			    struct rte_flow_action_handle *handle,
13125 			    const void *update, void *query,
13126 			    enum rte_flow_query_update_mode qu_mode,
13127 			    struct rte_flow_error *error)
13128 {
13129 	return flow_hw_async_action_handle_query_update(dev, MLX5_HW_INV_QUEUE,
13130 							NULL, handle, update,
13131 							query, qu_mode, NULL,
13132 							error);
13133 }
13134 
13135 /**
13136  * Get aged-out flows of a given port on the given HWS flow queue.
13137  *
13138  * @param[in] dev
13139  *   Pointer to the Ethernet device structure.
13140  * @param[in] queue_id
13141  *   Flow queue to query. Ignored when RTE_FLOW_PORT_FLAG_STRICT_QUEUE not set.
13142  * @param[in, out] contexts
13143  *   The address of an array of pointers to the aged-out flows contexts.
13144  * @param[in] nb_contexts
13145  *   The length of context array pointers.
13146  * @param[out] error
13147  *   Perform verbose error reporting if not NULL. Initialized in case of
13148  *   error only.
13149  *
13150  * @return
13151  *   if nb_contexts is 0, return the amount of all aged contexts.
13152  *   if nb_contexts is not 0 , return the amount of aged flows reported
13153  *   in the context array, otherwise negative errno value.
13154  */
13155 static int
13156 flow_hw_get_q_aged_flows(struct rte_eth_dev *dev, uint32_t queue_id,
13157 			 void **contexts, uint32_t nb_contexts,
13158 			 struct rte_flow_error *error)
13159 {
13160 	struct mlx5_priv *priv = dev->data->dev_private;
13161 	struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);
13162 	struct rte_ring *r;
13163 	int nb_flows = 0;
13164 
13165 	if (nb_contexts && !contexts)
13166 		return rte_flow_error_set(error, EINVAL,
13167 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13168 					  NULL, "empty context");
13169 	if (!priv->hws_age_req)
13170 		return rte_flow_error_set(error, ENOENT,
13171 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13172 					  NULL, "No aging initialized");
13173 	if (priv->hws_strict_queue) {
13174 		if (queue_id >= age_info->hw_q_age->nb_rings)
13175 			return rte_flow_error_set(error, EINVAL,
13176 						RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13177 						NULL, "invalid queue id");
13178 		r = age_info->hw_q_age->aged_lists[queue_id];
13179 	} else {
13180 		r = age_info->hw_age.aged_list;
13181 		MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
13182 	}
13183 	if (nb_contexts == 0)
13184 		return rte_ring_count(r);
13185 	while ((uint32_t)nb_flows < nb_contexts) {
13186 		uint32_t age_idx;
13187 
13188 		if (rte_ring_dequeue_elem(r, &age_idx, sizeof(uint32_t)) < 0)
13189 			break;
13190 		/* get the AGE context if the aged-out index is still valid. */
13191 		contexts[nb_flows] = mlx5_hws_age_context_get(priv, age_idx);
13192 		if (!contexts[nb_flows])
13193 			continue;
13194 		nb_flows++;
13195 	}
13196 	return nb_flows;
13197 }
13198 
13199 /**
13200  * Get aged-out flows.
13201  *
13202  * This function is relevant only if RTE_FLOW_PORT_FLAG_STRICT_QUEUE isn't set.
13203  *
13204  * @param[in] dev
13205  *   Pointer to the Ethernet device structure.
13206  * @param[in] contexts
13207  *   The address of an array of pointers to the aged-out flows contexts.
13208  * @param[in] nb_contexts
13209  *   The length of context array pointers.
13210  * @param[out] error
13211  *   Perform verbose error reporting if not NULL. Initialized in case of
13212  *   error only.
13213  *
13214  * @return
13215  *   how many contexts get in success, otherwise negative errno value.
13216  *   if nb_contexts is 0, return the amount of all aged contexts.
13217  *   if nb_contexts is not 0 , return the amount of aged flows reported
13218  *   in the context array.
13219  */
13220 static int
13221 flow_hw_get_aged_flows(struct rte_eth_dev *dev, void **contexts,
13222 		       uint32_t nb_contexts, struct rte_flow_error *error)
13223 {
13224 	struct mlx5_priv *priv = dev->data->dev_private;
13225 
13226 	if (priv->hws_strict_queue)
13227 		DRV_LOG(WARNING,
13228 			"port %u get aged flows called in strict queue mode.",
13229 			dev->data->port_id);
13230 	return flow_hw_get_q_aged_flows(dev, 0, contexts, nb_contexts, error);
13231 }
13232 /**
13233  * Initialization function for non template API which calls
13234  * flow_hw_configure with default values.
13235  * Configure non queues cause 1 queue is configured by default for inner usage.
13236  *
13237  * @param[in] dev
13238  *   Pointer to the Ethernet device structure.
13239  * @param[out] error
13240  *   Pointer to the error structure.
13241  *
13242  * @return
13243  *   0 on success, a negative errno value otherwise and rte_errno is set.
13244  */
13245 int
13246 flow_hw_init(struct rte_eth_dev *dev,
13247 	     struct rte_flow_error *error)
13248 {
13249 	const struct rte_flow_port_attr port_attr = {0};
13250 	const struct rte_flow_queue_attr queue_attr = {.size = MLX5_NT_DEFAULT_QUEUE_SIZE};
13251 	const struct rte_flow_queue_attr *attr_list = &queue_attr;
13252 
13253 	/**
13254 	 * If user uses template and non template API:
13255 	 * User will call flow_hw_configure and non template
13256 	 * API will use the allocated actions.
13257 	 * Init function will not call flow_hw_configure.
13258 	 *
13259 	 * If user uses only non template API's:
13260 	 * Init function will call flow_hw_configure.
13261 	 * It will not allocate memory for actions.
13262 	 * When needed allocation, it will handle same as for SWS today,
13263 	 * meaning using bulk allocations and resize as needed.
13264 	 */
13265 	/* Configure hws with default values. */
13266 	DRV_LOG(DEBUG, "Apply default configuration, zero number of queues, inner control queue size is %u",
13267 		MLX5_NT_DEFAULT_QUEUE_SIZE);
13268 	return __flow_hw_configure(dev, &port_attr, 0, &attr_list, true, error);
13269 }
13270 
13271 static int flow_hw_prepare(struct rte_eth_dev *dev,
13272 			   const struct rte_flow_action actions[] __rte_unused,
13273 			   enum mlx5_flow_type type,
13274 			   struct rte_flow_hw **flow,
13275 			   struct rte_flow_error *error)
13276 {
13277 	struct mlx5_priv *priv = dev->data->dev_private;
13278 	uint32_t idx = 0;
13279 
13280 	 /*
13281 	  * Notice pool idx size = (sizeof(struct rte_flow_hw)
13282 	  * + sizeof(struct rte_flow_nt2hws)) for HWS mode.
13283 	  */
13284 	*flow = mlx5_ipool_zmalloc(priv->flows[type], &idx);
13285 	if (!(*flow))
13286 		return rte_flow_error_set(error, ENOMEM,
13287 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13288 			"cannot allocate flow memory");
13289 	/* Allocating 2 structures in one pool slot, updating nt2hw pointer.*/
13290 	(*flow)->nt2hws = (struct rte_flow_nt2hws *)
13291 				((uintptr_t)(*flow) + sizeof(struct rte_flow_hw));
13292 	(*flow)->idx = idx;
13293 	(*flow)->nt2hws->flow_aux = mlx5_malloc(MLX5_MEM_ZERO, sizeof(struct rte_flow_hw_aux),
13294 				    RTE_CACHE_LINE_SIZE, rte_dev_numa_node(dev->device));
13295 	if (!(*flow)->nt2hws->flow_aux)
13296 		return rte_flow_error_set(error, ENOMEM,
13297 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13298 				"cannot allocate flow aux memory");
13299 	return 0;
13300 }
13301 
13302 #define FLOW_HW_SET_DV_FIELDS(flow_attr, root, dv_resource) {					\
13303 	typeof(flow_attr) _flow_attr = (flow_attr);						\
13304 	if (_flow_attr->transfer)								\
13305 		dv_resource.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;				\
13306 	else											\
13307 		dv_resource.ft_type = _flow_attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :	\
13308 					     MLX5DV_FLOW_TABLE_TYPE_NIC_RX;			\
13309 	root = _flow_attr->group ? 0 : 1;							\
13310 	dv_resource.flags =									\
13311 		mlx5_hw_act_flag[!!_flow_attr->group][get_mlx5dr_table_type(_flow_attr)];	\
13312 }
13313 
13314 static int
13315 flow_hw_modify_hdr_resource_register
13316 			(struct rte_eth_dev *dev,
13317 			 struct rte_flow_template_table *table,
13318 			 struct mlx5_hw_actions *hw_acts,
13319 			 struct rte_flow_hw *dev_flow,
13320 			 struct rte_flow_error *error)
13321 {
13322 	struct rte_flow_attr *attr = &table->cfg.attr.flow_attr;
13323 	struct mlx5_flow_dv_modify_hdr_resource *dv_resource_ptr = NULL;
13324 	union {
13325 		struct mlx5_flow_dv_modify_hdr_resource dv_resource;
13326 		uint8_t data[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
13327 			     sizeof(struct mlx5_modification_cmd) * MLX5_MHDR_MAX_CMD];
13328 	} dummy;
13329 	int ret;
13330 
13331 	if (hw_acts->mhdr) {
13332 		dummy.dv_resource.actions_num = hw_acts->mhdr->mhdr_cmds_num;
13333 		memcpy(dummy.dv_resource.actions, hw_acts->mhdr->mhdr_cmds,
13334 			sizeof(struct mlx5_modification_cmd) * dummy.dv_resource.actions_num);
13335 	} else {
13336 		return 0;
13337 	}
13338 	FLOW_HW_SET_DV_FIELDS(attr, dummy.dv_resource.root, dummy.dv_resource);
13339 	dummy.dv_resource.flags |= MLX5DR_ACTION_FLAG_SHARED;
13340 	ret = __flow_modify_hdr_resource_register(dev, &dummy.dv_resource,
13341 		&dv_resource_ptr, error);
13342 	if (ret)
13343 		return ret;
13344 	MLX5_ASSERT(dv_resource_ptr);
13345 	dev_flow->nt2hws->modify_hdr = dv_resource_ptr;
13346 	/* keep action for the rule construction. */
13347 	hw_acts->rule_acts[hw_acts->mhdr->pos].action = dv_resource_ptr->action;
13348 	/* Bulk size is 1, so index is 1. */
13349 	dev_flow->res_idx = 1;
13350 	return 0;
13351 }
13352 
13353 static int
13354 flow_hw_encap_decap_resource_register
13355 			(struct rte_eth_dev *dev,
13356 			 struct rte_flow_template_table *table,
13357 			 struct mlx5_hw_actions *hw_acts,
13358 			 struct rte_flow_hw *dev_flow,
13359 			 struct rte_flow_error *error)
13360 {
13361 	struct rte_flow_attr *attr = &table->cfg.attr.flow_attr;
13362 	struct mlx5_flow_dv_encap_decap_resource *dv_resource_ptr = NULL;
13363 	struct mlx5_flow_dv_encap_decap_resource dv_resource;
13364 	struct mlx5_tbl_multi_pattern_ctx *mpctx = &table->mpctx;
13365 	int ret;
13366 	bool is_root;
13367 	int ix;
13368 
13369 	if (hw_acts->encap_decap)
13370 		dv_resource.reformat_type = hw_acts->encap_decap->action_type;
13371 	else
13372 		return 0;
13373 	FLOW_HW_SET_DV_FIELDS(attr, is_root, dv_resource);
13374 	ix = mlx5_bwc_multi_pattern_reformat_to_index((enum mlx5dr_action_type)
13375 			dv_resource.reformat_type);
13376 	if (ix < 0)
13377 		return ix;
13378 	if (hw_acts->encap_decap->shared) {
13379 		dv_resource.size = hw_acts->encap_decap->data_size;
13380 		MLX5_ASSERT(dv_resource.size <= MLX5_ENCAP_MAX_LEN);
13381 		memcpy(&dv_resource.buf, hw_acts->encap_decap->data, dv_resource.size);
13382 		dv_resource.flags |= MLX5DR_ACTION_FLAG_SHARED;
13383 	} else {
13384 		typeof(mpctx->reformat[0]) *reformat = mpctx->reformat + ix;
13385 		if (!reformat->elements_num)
13386 			return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
13387 					NULL, "No reformat action exist in the table.");
13388 		dv_resource.size = reformat->reformat_hdr->sz;
13389 		MLX5_ASSERT(dv_resource.size <= MLX5_ENCAP_MAX_LEN);
13390 		memcpy(&dv_resource.buf, reformat->reformat_hdr->data, dv_resource.size);
13391 	}
13392 	ret = __flow_encap_decap_resource_register(dev, &dv_resource, is_root,
13393 		&dv_resource_ptr, error);
13394 	if (ret)
13395 		return ret;
13396 	MLX5_ASSERT(dv_resource_ptr);
13397 	dev_flow->nt2hws->rix_encap_decap = dv_resource_ptr->idx;
13398 	/* keep action for the rule construction. */
13399 	if (hw_acts->encap_decap->shared)
13400 		hw_acts->rule_acts[hw_acts->encap_decap_pos].action = dv_resource_ptr->action;
13401 	else
13402 		mpctx->segments[0].reformat_action[ix] = dv_resource_ptr->action;
13403 	/* Bulk size is 1, so index is 1. */
13404 	dev_flow->res_idx = 1;
13405 	return 0;
13406 }
13407 
13408 static enum rte_flow_action_type
13409 flow_nta_get_indirect_action_type(const struct rte_flow_action *action)
13410 {
13411 	switch (MLX5_INDIRECT_ACTION_TYPE_GET(action->conf)) {
13412 	case MLX5_INDIRECT_ACTION_TYPE_RSS:
13413 		return RTE_FLOW_ACTION_TYPE_RSS;
13414 	case MLX5_INDIRECT_ACTION_TYPE_AGE:
13415 		return RTE_FLOW_ACTION_TYPE_AGE;
13416 	case MLX5_INDIRECT_ACTION_TYPE_COUNT:
13417 		return RTE_FLOW_ACTION_TYPE_COUNT;
13418 	case MLX5_INDIRECT_ACTION_TYPE_CT:
13419 		return RTE_FLOW_ACTION_TYPE_CONNTRACK;
13420 	default:
13421 		break;
13422 	}
13423 	return RTE_FLOW_ACTION_TYPE_END;
13424 }
13425 
13426 static void
13427 flow_nta_set_mh_mask_conf(const struct rte_flow_action_modify_field *action_conf,
13428 			  struct rte_flow_action_modify_field *mask_conf)
13429 {
13430 	memset(mask_conf, 0xff, sizeof(*mask_conf));
13431 	mask_conf->operation = action_conf->operation;
13432 	mask_conf->dst.field = action_conf->dst.field;
13433 	mask_conf->src.field = action_conf->src.field;
13434 }
13435 
13436 union actions_conf {
13437 	struct rte_flow_action_modify_field modify_field;
13438 	struct rte_flow_action_raw_encap raw_encap;
13439 	struct rte_flow_action_vxlan_encap vxlan_encap;
13440 	struct rte_flow_action_nvgre_encap nvgre_encap;
13441 };
13442 
13443 static int
13444 flow_nta_build_template_mask(const struct rte_flow_action actions[],
13445 			     struct rte_flow_action masks[MLX5_HW_MAX_ACTS],
13446 			     union actions_conf mask_conf[MLX5_HW_MAX_ACTS])
13447 {
13448 	int i;
13449 
13450 	for (i = 0; i == 0 || actions[i - 1].type != RTE_FLOW_ACTION_TYPE_END; i++) {
13451 		const struct rte_flow_action *action = &actions[i];
13452 		struct rte_flow_action *mask = &masks[i];
13453 		union actions_conf *conf = &mask_conf[i];
13454 
13455 		mask->type = action->type;
13456 		switch (action->type) {
13457 		case RTE_FLOW_ACTION_TYPE_INDIRECT:
13458 			mask->type = flow_nta_get_indirect_action_type(action);
13459 			if (!mask->type)
13460 				return -EINVAL;
13461 			break;
13462 		case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
13463 			flow_nta_set_mh_mask_conf(action->conf, (void *)conf);
13464 			mask->conf = conf;
13465 			break;
13466 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
13467 			/* This mask will set this action as shared. */
13468 			memset(conf, 0xff, sizeof(struct rte_flow_action_raw_encap));
13469 			mask->conf = conf;
13470 			break;
13471 		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
13472 			/* This mask will set this action as shared. */
13473 			conf->vxlan_encap.definition =
13474 				((const struct rte_flow_action_vxlan_encap *)
13475 					action->conf)->definition;
13476 			mask->conf = conf;
13477 			break;
13478 		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
13479 			/* This mask will set this action as shared. */
13480 			conf->nvgre_encap.definition =
13481 				((const struct rte_flow_action_nvgre_encap *)
13482 					action->conf)->definition;
13483 			mask->conf = conf;
13484 			break;
13485 		default:
13486 			break;
13487 		}
13488 	}
13489 	return 0;
13490 #undef NTA_CHECK_CONF_BUF_SIZE
13491 }
13492 
13493 static int
13494 flow_hw_translate_flow_actions(struct rte_eth_dev *dev,
13495 			  const struct rte_flow_attr *attr,
13496 			  const struct rte_flow_action actions[],
13497 			  struct rte_flow_hw *flow,
13498 			  struct mlx5_flow_hw_action_params *ap,
13499 			  struct mlx5_hw_actions *hw_acts,
13500 			  uint64_t item_flags, uint64_t action_flags,
13501 			  bool external,
13502 			  struct rte_flow_error *error)
13503 {
13504 	int ret = 0;
13505 	uint32_t src_group = 0;
13506 	enum mlx5dr_table_type table_type;
13507 	struct rte_flow_template_table *table = NULL;
13508 	struct mlx5_flow_group grp;
13509 	struct rte_flow_actions_template *at = NULL;
13510 	struct rte_flow_actions_template_attr template_attr = {
13511 		.egress = attr->egress,
13512 		.ingress = attr->ingress,
13513 		.transfer = attr->transfer,
13514 	};
13515 	struct rte_flow_action masks[MLX5_HW_MAX_ACTS];
13516 	union actions_conf mask_conf[MLX5_HW_MAX_ACTS];
13517 
13518 	RTE_SET_USED(action_flags);
13519 	memset(masks, 0, sizeof(masks));
13520 	memset(mask_conf, 0, sizeof(mask_conf));
13521 	/*
13522 	 * Notice All direct actions will be unmasked,
13523 	 * except for modify header and encap,
13524 	 * and therefore will be parsed as part of action construct.
13525 	 * Modify header is always shared in HWS,
13526 	 * encap is masked such that it will be treated as shared.
13527 	 * shared actions will be parsed as part of template translation
13528 	 * and not during action construct.
13529 	 */
13530 	flow_nta_build_template_mask(actions, masks, mask_conf);
13531 	/* The group in the attribute translation was done in advance. */
13532 	ret = __translate_group(dev, attr, external, attr->group, &src_group, error);
13533 	if (ret)
13534 		return ret;
13535 	if (attr->transfer)
13536 		table_type = MLX5DR_TABLE_TYPE_FDB;
13537 	else if (attr->egress)
13538 		table_type = MLX5DR_TABLE_TYPE_NIC_TX;
13539 	else
13540 		table_type = MLX5DR_TABLE_TYPE_NIC_RX;
13541 	/* TODO: consider to reuse the workspace per thread. */
13542 	table = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*table), 0, SOCKET_ID_ANY);
13543 	if (!table)
13544 		return rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ACTION,
13545 				   actions, "Failed to allocate dummy table");
13546 	at = __flow_hw_actions_template_create(dev, &template_attr, actions, masks, true, error);
13547 	if (!at) {
13548 		ret = -rte_errno;
13549 		goto end;
13550 	}
13551 	grp.group_id = src_group;
13552 	table->grp = &grp;
13553 	table->type = table_type;
13554 	table->cfg.external = external;
13555 	table->nb_action_templates = 1;
13556 	memcpy(&table->cfg.attr.flow_attr, attr, sizeof(*attr));
13557 	table->ats[0].action_template = at;
13558 	ret = __flow_hw_translate_actions_template(dev, &table->cfg, hw_acts, at,
13559 		&table->mpctx, true, error);
13560 	if (ret)
13561 		goto end;
13562 	/* handle bulk actions register. */
13563 	ret = flow_hw_encap_decap_resource_register(dev, table, hw_acts, flow, error);
13564 	if (ret)
13565 		goto clean_up;
13566 	ret = flow_hw_modify_hdr_resource_register(dev, table, hw_acts, flow, error);
13567 	if (ret)
13568 		goto clean_up;
13569 	table->ats[0].acts = *hw_acts;
13570 	ret = flow_hw_actions_construct(dev, flow, ap,
13571 		&table->ats[0], item_flags, table,
13572 		actions, hw_acts->rule_acts, 0, error);
13573 	if (ret)
13574 		goto clean_up;
13575 	goto end;
13576 clean_up:
13577 	/* Make sure that there is no garbage in the actions. */
13578 	__flow_hw_action_template_destroy(dev, hw_acts);
13579 end:
13580 	if (table)
13581 		mlx5_free(table);
13582 	if (at)
13583 		mlx5_free(at);
13584 	return ret;
13585 }
13586 
13587 static int
13588 flow_hw_unregister_matcher(struct rte_eth_dev *dev,
13589 			   struct mlx5_flow_dv_matcher *matcher)
13590 {
13591 	struct mlx5_priv *priv = dev->data->dev_private;
13592 	struct mlx5_flow_group *group = matcher->group;
13593 	int ret = 0;
13594 
13595 	if (group) {
13596 		if (matcher->matcher_object)
13597 			ret |= mlx5_list_unregister(group->matchers, &matcher->entry);
13598 		ret |= mlx5_hlist_unregister(priv->sh->groups, &group->entry);
13599 	}
13600 	return ret;
13601 }
13602 
13603 static int flow_hw_register_matcher(struct rte_eth_dev *dev,
13604 				    const struct rte_flow_attr *attr,
13605 				    const struct rte_flow_item items[],
13606 				    bool external,
13607 				    struct rte_flow_hw *flow,
13608 				    struct mlx5_flow_dv_matcher *matcher,
13609 				    struct rte_flow_error *error)
13610 {
13611 	struct mlx5_priv *priv = dev->data->dev_private;
13612 	struct rte_flow_error sub_error = {
13613 		.type = RTE_FLOW_ERROR_TYPE_NONE,
13614 		.cause = NULL,
13615 		.message = NULL,
13616 	};
13617 	struct rte_flow_attr flow_attr = *attr;
13618 	uint32_t specialize = 0; /* No unified FDB. */
13619 	struct mlx5_flow_cb_ctx ctx = {
13620 		.dev = dev,
13621 		.error = &sub_error,
13622 		.data = &flow_attr,
13623 		.data2 = &specialize,
13624 	};
13625 	void *items_ptr = &items;
13626 	struct mlx5_flow_cb_ctx matcher_ctx = {
13627 		.error = &sub_error,
13628 		.data = matcher,
13629 		.data2 = items_ptr,
13630 	};
13631 	struct mlx5_list_entry *group_entry = NULL;
13632 	struct mlx5_list_entry *matcher_entry = NULL;
13633 	struct mlx5_flow_dv_matcher *resource;
13634 	struct mlx5_list *matchers_list;
13635 	struct mlx5_flow_group *flow_group;
13636 	int ret;
13637 
13638 
13639 	matcher->crc = rte_raw_cksum((const void *)matcher->mask.buf,
13640 				    matcher->mask.size);
13641 	matcher->priority = attr->priority;
13642 	ret = __translate_group(dev, attr, external, attr->group, &flow_attr.group, error);
13643 	if (ret)
13644 		return ret;
13645 
13646 	/* Register the flow group. */
13647 	group_entry = mlx5_hlist_register(priv->sh->groups, flow_attr.group, &ctx);
13648 	if (!group_entry)
13649 		goto error;
13650 	flow_group = container_of(group_entry, struct mlx5_flow_group, entry);
13651 
13652 	matchers_list = flow_group->matchers;
13653 	matcher->group = flow_group;
13654 	matcher_entry = mlx5_list_register(matchers_list, &matcher_ctx);
13655 	if (!matcher_entry)
13656 		goto error;
13657 	resource = container_of(matcher_entry, typeof(*resource), entry);
13658 	flow->nt2hws->matcher = resource;
13659 	return 0;
13660 
13661 error:
13662 	if (group_entry)
13663 		mlx5_hlist_unregister(priv->sh->groups, group_entry);
13664 	if (error) {
13665 		if (sub_error.type != RTE_FLOW_ERROR_TYPE_NONE)
13666 			rte_memcpy(error, &sub_error, sizeof(sub_error));
13667 	}
13668 	return rte_flow_error_set(error, ENOMEM,
13669 					RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13670 					NULL, "fail to register matcher");
13671 }
13672 
13673 static int
13674 flow_hw_allocate_actions(struct rte_eth_dev *dev,
13675 			 uint64_t action_flags,
13676 			 struct rte_flow_error *error)
13677 {
13678 	struct mlx5_priv *priv = dev->data->dev_private;
13679 	int ret;
13680 	uint obj_num;
13681 
13682 	error->type = RTE_FLOW_ERROR_TYPE_NONE;
13683 	if (action_flags & MLX5_FLOW_ACTION_AGE) {
13684 		/* If no age objects were previously allocated. */
13685 		if (!priv->hws_age_req) {
13686 			/* If no counters were previously allocated. */
13687 			if (!priv->hws_cpool) {
13688 				obj_num = MLX5_CNT_NT_MAX(priv);
13689 				ret = mlx5_hws_cnt_pool_create(dev, obj_num,
13690 							       priv->nb_queue,
13691 							       NULL, error);
13692 				if (ret)
13693 					goto err;
13694 			}
13695 			/* Allocate same number of counters. */
13696 			ret = mlx5_hws_age_pool_init(dev, priv->hws_cpool->cfg.request_num,
13697 						     priv->nb_queue, false);
13698 			if (ret)
13699 				goto err;
13700 		}
13701 	}
13702 	if (action_flags & MLX5_FLOW_ACTION_COUNT) {
13703 		/* If no counters were previously allocated. */
13704 		if (!priv->hws_cpool) {
13705 			obj_num = MLX5_CNT_NT_MAX(priv);
13706 			ret = mlx5_hws_cnt_pool_create(dev, obj_num,
13707 						       priv->nb_queue, NULL,
13708 						       error);
13709 			if (ret)
13710 				goto err;
13711 		}
13712 	}
13713 	if (action_flags & MLX5_FLOW_ACTION_CT) {
13714 		/* If no CT were previously allocated. */
13715 		if (!priv->hws_ctpool) {
13716 			obj_num = MLX5_CT_NT_MAX(priv);
13717 			ret = mlx5_flow_ct_init(dev, obj_num, priv->nb_queue);
13718 			if (ret)
13719 				goto err;
13720 		}
13721 	}
13722 	if (action_flags & MLX5_FLOW_ACTION_METER) {
13723 		/* If no meters were previously allocated. */
13724 		if (!priv->hws_mpool) {
13725 			obj_num = MLX5_MTR_NT_MAX(priv);
13726 			ret = mlx5_flow_meter_init(dev, obj_num, 0, 0,
13727 						   priv->nb_queue);
13728 			if (ret)
13729 				goto err;
13730 		}
13731 	}
13732 	return 0;
13733 err:
13734 	if (ret && error->type != RTE_FLOW_ERROR_TYPE_NONE)
13735 		return ret;
13736 	return rte_flow_error_set(error, ret,
13737 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13738 				  NULL, "fail to allocate actions");
13739 }
13740 
13741 static int flow_hw_apply(const struct rte_flow_item items[],
13742 			 struct mlx5dr_rule_action rule_actions[],
13743 			 struct rte_flow_hw *flow,
13744 			 struct rte_flow_error *error)
13745 {
13746 	struct mlx5dr_bwc_rule *rule = NULL;
13747 
13748 	rule = mlx5dr_bwc_rule_create((struct mlx5dr_bwc_matcher *)
13749 		flow->nt2hws->matcher->matcher_object,
13750 		items, rule_actions);
13751 	flow->nt2hws->nt_rule = rule;
13752 	if (!rule) {
13753 		return rte_flow_error_set(error, EINVAL,
13754 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13755 			NULL, "fail to create rte flow");
13756 	}
13757 	return 0;
13758 }
13759 
13760 #ifdef HAVE_MLX5_HWS_SUPPORT
13761 /**
13762  * Create a flow.
13763  *
13764  * @param[in] dev
13765  *   Pointer to Ethernet device.
13766  * @param[in] type
13767  *   Flow type.
13768  * @param[in] attr
13769  *   Flow rule attributes.
13770  * @param[in] items
13771  *   Pattern specification (list terminated by the END pattern item).
13772  * @param[in] actions
13773  *   Associated actions (list terminated by the END action).
13774  * @param[in] external
13775  *   This flow rule is created by request external to PMD.
13776  * @param[out] flow
13777  *   Flow pointer
13778  * @param[out] error
13779  *   Perform verbose error reporting if not NULL.
13780  *
13781  * @return
13782  *   0 on success, negative errno value otherwise and rte_errno set.
13783  */
13784 int
13785 flow_hw_create_flow(struct rte_eth_dev *dev, enum mlx5_flow_type type,
13786 		    const struct rte_flow_attr *attr,
13787 		    const struct rte_flow_item items[],
13788 		    const struct rte_flow_action actions[],
13789 		    uint64_t item_flags, uint64_t action_flags, bool external,
13790 		    struct rte_flow_hw **flow, struct rte_flow_error *error)
13791 {
13792 	int ret;
13793 	struct mlx5_hw_actions hw_act = { { NULL } };
13794 	struct mlx5_flow_hw_action_params ap;
13795 	struct mlx5_flow_dv_matcher matcher = {
13796 		.mask = {
13797 			.size = sizeof(matcher.mask.buf),
13798 		},
13799 	};
13800 	uint32_t tbl_type;
13801 
13802 	struct mlx5_flow_attr flow_attr = {
13803 		.port_id = dev->data->port_id,
13804 		.group = attr->group,
13805 		.priority = attr->priority,
13806 		.rss_level = 0,
13807 		.act_flags = action_flags,
13808 		.tbl_type = 0,
13809 	};
13810 
13811 	if (attr->transfer)
13812 		tbl_type = MLX5DR_TABLE_TYPE_FDB;
13813 	else if (attr->egress)
13814 		tbl_type = MLX5DR_TABLE_TYPE_NIC_TX;
13815 	else
13816 		tbl_type = MLX5DR_TABLE_TYPE_NIC_RX;
13817 	flow_attr.tbl_type = tbl_type;
13818 
13819 	/* Allocate needed memory. */
13820 	ret = flow_hw_prepare(dev, actions, type, flow, error);
13821 	if (ret)
13822 		goto error;
13823 
13824 	/* TODO TBD flow_hw_handle_tunnel_offload(). */
13825 	(*flow)->nt_rule = true;
13826 	(*flow)->nt2hws->matcher = &matcher;
13827 	ret = flow_dv_translate_items_hws(items, &flow_attr, &matcher.mask.buf,
13828 					MLX5_SET_MATCHER_HS_M, NULL,
13829 					NULL, error);
13830 
13831 	if (ret)
13832 		goto error;
13833 
13834 	ret = flow_hw_register_matcher(dev, attr, items, external, *flow, &matcher, error);
13835 	if (ret)
13836 		goto error;
13837 
13838 	/*
13839 	 * ASO allocation – iterating on actions list to allocate missing resources.
13840 	 * In the future when validate function in hws will be added,
13841 	 * The output actions bit mask instead of
13842 	 * looping on the actions array twice.
13843 	 */
13844 	ret = flow_hw_allocate_actions(dev, action_flags, error);
13845 	if (ret)
13846 		goto error;
13847 
13848 	/* Note: the actions should be saved in the sub-flow rule itself for reference. */
13849 	ret = flow_hw_translate_flow_actions(dev, attr, actions, *flow, &ap, &hw_act,
13850 					item_flags, action_flags, external, error);
13851 	if (ret)
13852 		goto error;
13853 
13854 	/*
13855 	 * If the flow is external (from application) OR device is started,
13856 	 * OR mreg discover, then apply immediately.
13857 	 */
13858 	if (external || dev->data->dev_started ||
13859 	    (attr->group == MLX5_FLOW_MREG_CP_TABLE_GROUP &&
13860 	     attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR)) {
13861 		ret = flow_hw_apply(items, hw_act.rule_acts, *flow, error);
13862 		if (ret)
13863 			goto error;
13864 	}
13865 	ret = 0;
13866 error:
13867 	/*
13868 	 * Release memory allocated.
13869 	 * Cannot use __flow_hw_actions_release(dev, &hw_act);
13870 	 * since it destroys the actions as well.
13871 	 */
13872 	if (hw_act.encap_decap)
13873 		mlx5_free(hw_act.encap_decap);
13874 	if (hw_act.push_remove)
13875 		mlx5_free(hw_act.push_remove);
13876 	if (hw_act.mhdr)
13877 		mlx5_free(hw_act.mhdr);
13878 	if (ret) {
13879 		/* release after actual error */
13880 		if ((*flow)->nt2hws && (*flow)->nt2hws->matcher)
13881 			flow_hw_unregister_matcher(dev, (*flow)->nt2hws->matcher);
13882 	}
13883 	return ret;
13884 }
13885 #endif
13886 
13887 void
13888 flow_hw_destroy(struct rte_eth_dev *dev, struct rte_flow_hw *flow)
13889 {
13890 	int ret;
13891 	struct mlx5_priv *priv = dev->data->dev_private;
13892 
13893 	if (!flow || !flow->nt2hws)
13894 		return;
13895 
13896 	if (flow->nt2hws->nt_rule) {
13897 		ret = mlx5dr_bwc_rule_destroy(flow->nt2hws->nt_rule);
13898 		if (ret)
13899 			DRV_LOG(ERR, "bwc rule destroy failed");
13900 	}
13901 	flow->operation_type = MLX5_FLOW_HW_FLOW_OP_TYPE_DESTROY;
13902 	/* Notice this function does not handle shared/static actions. */
13903 	hw_cmpl_flow_update_or_destroy(dev, flow, 0, NULL);
13904 
13905 	/**
13906 	 * TODO: TBD - Release tunnel related memory allocations(mlx5_flow_tunnel_free)
13907 	 * – needed only if supporting tunnel offloads, notice update RX queue flags in SWS.
13908 	 */
13909 
13910 	 /**
13911 	  * Notice matcher destroy will take place when matcher's list is destroyed
13912 	  * , same as for DV.
13913 	  */
13914 	if (flow->nt2hws->flow_aux)
13915 		mlx5_free(flow->nt2hws->flow_aux);
13916 
13917 	if (flow->nt2hws->rix_encap_decap)
13918 		flow_encap_decap_resource_release(dev, flow->nt2hws->rix_encap_decap);
13919 	if (flow->nt2hws->modify_hdr) {
13920 		MLX5_ASSERT(flow->nt2hws->modify_hdr->action);
13921 		mlx5_hlist_unregister(priv->sh->modify_cmds,
13922 				      &flow->nt2hws->modify_hdr->entry);
13923 	}
13924 	if (flow->nt2hws->matcher)
13925 		flow_hw_unregister_matcher(dev, flow->nt2hws->matcher);
13926 }
13927 
13928 #ifdef HAVE_MLX5_HWS_SUPPORT
13929 /**
13930  * Destroy a flow.
13931  *
13932  * @param[in] dev
13933  *   Pointer to Ethernet device.
13934  * @param[in] type
13935  *   Flow type.
13936  * @param[in] flow_addr
13937  *   Address of flow to destroy.
13938  */
13939 void
13940 flow_hw_list_destroy(struct rte_eth_dev *dev, enum mlx5_flow_type type,
13941 		     uintptr_t flow_addr)
13942 {
13943 	struct mlx5_priv *priv = dev->data->dev_private;
13944 	struct rte_flow_hw *flow = (struct rte_flow_hw *)flow_addr;
13945 	struct mlx5_nta_rss_flow_head head = { .slh_first = flow };
13946 
13947 	if (!flow || !flow->nt2hws || flow->nt2hws->chaned_flow)
13948 		return;
13949 	mlx5_flow_nta_del_copy_action(dev, flow->nt2hws->rix_mreg_copy);
13950 	while (!SLIST_EMPTY(&head)) {
13951 		flow = SLIST_FIRST(&head);
13952 		SLIST_REMOVE_HEAD(&head, nt2hws->next);
13953 		flow_hw_destroy(dev, flow);
13954 		/* Release flow memory by idx */
13955 		mlx5_ipool_free(priv->flows[type], flow->idx);
13956 	}
13957 }
13958 #endif
13959 
13960 /**
13961  * Create a flow.
13962  *
13963  * @param[in] dev
13964  *   Pointer to Ethernet device.
13965  * @param[in] type
13966  *   Flow type.
13967  * @param[in] attr
13968  *   Flow rule attributes.
13969  * @param[in] items
13970  *   Pattern specification (list terminated by the END pattern item).
13971  * @param[in] actions
13972  *   Associated actions (list terminated by the END action).
13973  * @param[in] external
13974  *   This flow rule is created by request external to PMD.
13975  * @param[out] error
13976  *   Perform verbose error reporting if not NULL.
13977  *
13978  * @return
13979  *   A flow addr on success, 0 otherwise and rte_errno is set.
13980  */
13981 static uintptr_t flow_hw_list_create(struct rte_eth_dev *dev,
13982 				     enum mlx5_flow_type type,
13983 				     const struct rte_flow_attr *attr,
13984 				     const struct rte_flow_item items[],
13985 				     const struct rte_flow_action actions[],
13986 				     bool external,
13987 				     struct rte_flow_error *error)
13988 {
13989 	int ret;
13990 	int split;
13991 	int encap_idx;
13992 	uint32_t cpy_idx = 0;
13993 	int actions_n = 0;
13994 	struct rte_flow_hw *flow = NULL;
13995 	struct rte_flow_hw *prfx_flow = NULL;
13996 	const struct rte_flow_action *qrss = NULL;
13997 	const struct rte_flow_action *mark = NULL;
13998 	uint64_t item_flags = flow_hw_matching_item_flags_get(items);
13999 	uint64_t action_flags = flow_hw_action_flags_get(actions, &qrss, &mark,
14000 							 &encap_idx, &actions_n, error);
14001 	struct mlx5_flow_hw_split_resource resource = {
14002 		.suffix = {
14003 			.attr = attr,
14004 			.items = items,
14005 			.actions = actions,
14006 		},
14007 	};
14008 	struct rte_flow_error shadow_error = {0, };
14009 
14010 	/*
14011 	 * TODO: add a call to flow_hw_validate function once it exist.
14012 	 * and update mlx5_flow_hw_drv_ops accordingly.
14013 	 */
14014 
14015 	RTE_SET_USED(encap_idx);
14016 	if (!error)
14017 		error = &shadow_error;
14018 	split = mlx5_flow_nta_split_metadata(dev, attr, actions, qrss, action_flags,
14019 					     actions_n, external, &resource, error);
14020 	if (split < 0)
14021 		return split;
14022 
14023 	/* Update the metadata copy table - MLX5_FLOW_MREG_CP_TABLE_GROUP */
14024 	if (((attr->ingress && attr->group != MLX5_FLOW_MREG_CP_TABLE_GROUP) ||
14025 	     attr->transfer) && external) {
14026 		ret = mlx5_flow_nta_update_copy_table(dev, &cpy_idx, mark,
14027 						      action_flags, error);
14028 		if (ret)
14029 			goto free;
14030 	}
14031 
14032 	if (action_flags & MLX5_FLOW_ACTION_RSS) {
14033 		const struct rte_flow_action_rss
14034 			*rss_conf = flow_nta_locate_rss(dev, actions, error);
14035 		flow = flow_nta_handle_rss(dev, attr, items, actions, rss_conf,
14036 					   item_flags, action_flags, external,
14037 					   type, error);
14038 		if (flow) {
14039 			flow->nt2hws->rix_mreg_copy = cpy_idx;
14040 			cpy_idx = 0;
14041 			if (!split)
14042 				return (uintptr_t)flow;
14043 			goto prefix_flow;
14044 		}
14045 		goto free;
14046 	}
14047 	/* Create single flow. */
14048 	ret = flow_hw_create_flow(dev, type, resource.suffix.attr, resource.suffix.items,
14049 				  resource.suffix.actions, item_flags, action_flags,
14050 				  external, &flow, error);
14051 	if (ret)
14052 		goto free;
14053 	if (flow) {
14054 		flow->nt2hws->rix_mreg_copy = cpy_idx;
14055 		cpy_idx = 0;
14056 		if (!split)
14057 			return (uintptr_t)flow;
14058 		/* Fall Through to prefix flow creation. */
14059 	}
14060 prefix_flow:
14061 	ret = flow_hw_create_flow(dev, type, attr, items, resource.prefix.actions,
14062 				  item_flags, action_flags, external, &prfx_flow, error);
14063 	if (ret)
14064 		goto free;
14065 	if (prfx_flow) {
14066 		prfx_flow->nt2hws->rix_mreg_copy = flow->nt2hws->rix_mreg_copy;
14067 		flow->nt2hws->chaned_flow = 1;
14068 		SLIST_INSERT_AFTER(prfx_flow, flow, nt2hws->next);
14069 		mlx5_flow_nta_split_resource_free(dev, &resource);
14070 		return (uintptr_t)prfx_flow;
14071 	}
14072 free:
14073 	if (prfx_flow)
14074 		flow_hw_list_destroy(dev, type, (uintptr_t)prfx_flow);
14075 	if (flow)
14076 		flow_hw_list_destroy(dev, type, (uintptr_t)flow);
14077 	if (cpy_idx)
14078 		mlx5_flow_nta_del_copy_action(dev, cpy_idx);
14079 	if (split > 0)
14080 		mlx5_flow_nta_split_resource_free(dev, &resource);
14081 	return 0;
14082 }
14083 
14084 static void
14085 mlx5_mirror_destroy_clone(struct rte_eth_dev *dev,
14086 			  struct mlx5_mirror_clone *clone)
14087 {
14088 	switch (clone->type) {
14089 	case RTE_FLOW_ACTION_TYPE_RSS:
14090 	case RTE_FLOW_ACTION_TYPE_QUEUE:
14091 		mlx5_hrxq_release(dev,
14092 				  ((struct mlx5_hrxq *)(clone->action_ctx))->idx);
14093 		break;
14094 	case RTE_FLOW_ACTION_TYPE_JUMP:
14095 		flow_hw_jump_release(dev, clone->action_ctx);
14096 		break;
14097 	case RTE_FLOW_ACTION_TYPE_JUMP_TO_TABLE_INDEX:
14098 	case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
14099 	case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
14100 	case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
14101 	case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
14102 	case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
14103 	case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
14104 	default:
14105 		break;
14106 	}
14107 }
14108 
14109 void
14110 mlx5_hw_mirror_destroy(struct rte_eth_dev *dev, struct mlx5_mirror *mirror)
14111 {
14112 	uint32_t i;
14113 
14114 	mlx5_indirect_list_remove_entry(&mirror->indirect);
14115 	for (i = 0; i < mirror->clones_num; i++)
14116 		mlx5_mirror_destroy_clone(dev, &mirror->clone[i]);
14117 	if (mirror->mirror_action)
14118 		mlx5dr_action_destroy(mirror->mirror_action);
14119 	mlx5_free(mirror);
14120 }
14121 
14122 static __rte_always_inline bool
14123 mlx5_mirror_terminal_action(const struct rte_flow_action *action)
14124 {
14125 	switch (action->type) {
14126 	case RTE_FLOW_ACTION_TYPE_JUMP:
14127 	case RTE_FLOW_ACTION_TYPE_RSS:
14128 	case RTE_FLOW_ACTION_TYPE_QUEUE:
14129 	case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
14130 	case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
14131 	case RTE_FLOW_ACTION_TYPE_JUMP_TO_TABLE_INDEX:
14132 		return true;
14133 	default:
14134 		break;
14135 	}
14136 	return false;
14137 }
14138 
14139 static bool
14140 mlx5_mirror_validate_sample_action(struct rte_eth_dev *dev,
14141 				   const struct rte_flow_attr *flow_attr,
14142 				   const struct rte_flow_action *action)
14143 {
14144 	struct mlx5_priv *priv = dev->data->dev_private;
14145 	const struct rte_flow_action_ethdev *port = NULL;
14146 	bool is_proxy = MLX5_HW_PORT_IS_PROXY(priv);
14147 
14148 	if (!action)
14149 		return false;
14150 	switch (action->type) {
14151 	case RTE_FLOW_ACTION_TYPE_QUEUE:
14152 	case RTE_FLOW_ACTION_TYPE_RSS:
14153 		if (flow_attr->transfer)
14154 			return false;
14155 		break;
14156 	case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
14157 		if (!is_proxy || !flow_attr->transfer)
14158 			return false;
14159 		port = action->conf;
14160 		if (!port || port->port_id != MLX5_REPRESENTED_PORT_ESW_MGR)
14161 			return false;
14162 		break;
14163 	case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
14164 	case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
14165 	case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
14166 	case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
14167 	case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
14168 		if (!is_proxy || !flow_attr->transfer)
14169 			return false;
14170 		if (action[0].type == RTE_FLOW_ACTION_TYPE_RAW_DECAP &&
14171 		    action[1].type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP)
14172 			return false;
14173 		break;
14174 	case RTE_FLOW_ACTION_TYPE_JUMP_TO_TABLE_INDEX:
14175 		break;
14176 	default:
14177 		return false;
14178 	}
14179 	return true;
14180 }
14181 
14182 /**
14183  * Valid mirror actions list includes one or two SAMPLE actions
14184  * followed by JUMP.
14185  *
14186  * @return
14187  * Number of mirrors *action* list was valid.
14188  * -EINVAL otherwise.
14189  */
14190 static int
14191 mlx5_hw_mirror_actions_list_validate(struct rte_eth_dev *dev,
14192 				     const struct rte_flow_attr *flow_attr,
14193 				     const struct rte_flow_action *actions)
14194 {
14195 	if (actions[0].type == RTE_FLOW_ACTION_TYPE_SAMPLE) {
14196 		int i = 1;
14197 		bool valid;
14198 		const struct rte_flow_action_sample *sample = actions[0].conf;
14199 		valid = mlx5_mirror_validate_sample_action(dev, flow_attr,
14200 							   sample->actions);
14201 		if (!valid)
14202 			return -EINVAL;
14203 		if (actions[1].type == RTE_FLOW_ACTION_TYPE_SAMPLE) {
14204 			i = 2;
14205 			sample = actions[1].conf;
14206 			valid = mlx5_mirror_validate_sample_action(dev, flow_attr,
14207 								   sample->actions);
14208 			if (!valid)
14209 				return -EINVAL;
14210 		}
14211 		return mlx5_mirror_terminal_action(actions + i) ? i + 1 : -EINVAL;
14212 	}
14213 	return -EINVAL;
14214 }
14215 
14216 static int
14217 mirror_format_tir(struct rte_eth_dev *dev,
14218 		  struct mlx5_mirror_clone *clone,
14219 		  const struct mlx5_flow_template_table_cfg *table_cfg,
14220 		  const struct rte_flow_action *action,
14221 		  struct mlx5dr_action_dest_attr *dest_attr,
14222 		  struct rte_flow_error *error)
14223 {
14224 	uint32_t hws_flags;
14225 	enum mlx5dr_table_type table_type;
14226 	struct mlx5_hrxq *tir_ctx;
14227 
14228 	table_type = get_mlx5dr_table_type(&table_cfg->attr.flow_attr);
14229 	hws_flags = mlx5_hw_act_flag[MLX5_HW_ACTION_FLAG_NONE_ROOT][table_type];
14230 	tir_ctx = flow_hw_tir_action_register(dev, hws_flags, action);
14231 	if (!tir_ctx)
14232 		return rte_flow_error_set(error, EINVAL,
14233 					  RTE_FLOW_ERROR_TYPE_ACTION,
14234 					  action, "failed to create QUEUE action for mirror clone");
14235 	dest_attr->dest = tir_ctx->action;
14236 	clone->action_ctx = tir_ctx;
14237 	return 0;
14238 }
14239 
14240 static int
14241 mirror_format_jump(struct rte_eth_dev *dev,
14242 		   struct mlx5_mirror_clone *clone,
14243 		   const struct mlx5_flow_template_table_cfg *table_cfg,
14244 		   const struct rte_flow_action *action,
14245 		   struct mlx5dr_action_dest_attr *dest_attr,
14246 		   struct rte_flow_error *error)
14247 {
14248 	const struct rte_flow_action_jump *jump_conf = action->conf;
14249 	struct mlx5_hw_jump_action *jump = flow_hw_jump_action_register
14250 						(dev, table_cfg,
14251 						 jump_conf->group, error);
14252 
14253 	if (!jump)
14254 		return rte_flow_error_set(error, EINVAL,
14255 					  RTE_FLOW_ERROR_TYPE_ACTION,
14256 					  action, "failed to create JUMP action for mirror clone");
14257 	dest_attr->dest = jump->hws_action;
14258 	clone->action_ctx = jump;
14259 	return 0;
14260 }
14261 
14262 static int
14263 mirror_format_port(struct rte_eth_dev *dev,
14264 		   const struct rte_flow_action *action,
14265 		   struct mlx5dr_action_dest_attr *dest_attr,
14266 		   struct rte_flow_error __rte_unused *error)
14267 {
14268 	struct mlx5_priv *priv = dev->data->dev_private;
14269 	const struct rte_flow_action_ethdev *port_action = action->conf;
14270 
14271 	dest_attr->dest = priv->hw_vport[port_action->port_id];
14272 	return 0;
14273 }
14274 
14275 static int
14276 hw_mirror_clone_reformat(const struct rte_flow_action *actions,
14277 			 struct mlx5dr_action_dest_attr *dest_attr,
14278 			 enum mlx5dr_action_type *action_type,
14279 			 uint8_t *reformat_buf, bool decap)
14280 {
14281 	int ret;
14282 	const struct rte_flow_item *encap_item = NULL;
14283 	const struct rte_flow_action_raw_encap *encap_conf = NULL;
14284 	typeof(dest_attr->reformat) *reformat = &dest_attr->reformat;
14285 
14286 	switch (actions[0].type) {
14287 	case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
14288 		encap_conf = actions[0].conf;
14289 		break;
14290 	case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
14291 		encap_item = MLX5_CONST_ENCAP_ITEM(rte_flow_action_vxlan_encap,
14292 						   actions);
14293 		break;
14294 	case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
14295 		encap_item = MLX5_CONST_ENCAP_ITEM(rte_flow_action_nvgre_encap,
14296 						   actions);
14297 		break;
14298 	default:
14299 		return -EINVAL;
14300 	}
14301 	*action_type = decap ?
14302 		       MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3 :
14303 		       MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
14304 	if (encap_item) {
14305 		ret = flow_dv_convert_encap_data(encap_item, reformat_buf,
14306 						 &reformat->reformat_data_sz, NULL);
14307 		if (ret)
14308 			return -EINVAL;
14309 		reformat->reformat_data = reformat_buf;
14310 	} else {
14311 		reformat->reformat_data = (void *)(uintptr_t)encap_conf->data;
14312 		reformat->reformat_data_sz = encap_conf->size;
14313 	}
14314 	return 0;
14315 }
14316 
14317 static int
14318 hw_mirror_format_clone(struct rte_eth_dev *dev,
14319 			struct mlx5_mirror_clone *clone,
14320 			const struct mlx5_flow_template_table_cfg *table_cfg,
14321 			const struct rte_flow_action *actions,
14322 			struct mlx5dr_action_dest_attr *dest_attr,
14323 			uint8_t *reformat_buf, struct rte_flow_error *error)
14324 {
14325 	struct mlx5_priv *priv = dev->data->dev_private;
14326 	int ret;
14327 	uint32_t i;
14328 	bool decap_seen = false;
14329 
14330 	for (i = 0; actions[i].type != RTE_FLOW_ACTION_TYPE_END; i++) {
14331 		dest_attr->action_type[i] = mlx5_hw_dr_action_types[actions[i].type];
14332 		switch (actions[i].type) {
14333 		case RTE_FLOW_ACTION_TYPE_QUEUE:
14334 		case RTE_FLOW_ACTION_TYPE_RSS:
14335 			ret = mirror_format_tir(dev, clone, table_cfg,
14336 						&actions[i], dest_attr, error);
14337 			if (ret)
14338 				return ret;
14339 			break;
14340 		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
14341 			ret = mirror_format_port(dev, &actions[i],
14342 						 dest_attr, error);
14343 			if (ret)
14344 				return ret;
14345 			break;
14346 		case RTE_FLOW_ACTION_TYPE_JUMP:
14347 			ret = mirror_format_jump(dev, clone, table_cfg,
14348 						 &actions[i], dest_attr, error);
14349 			if (ret)
14350 				return ret;
14351 			break;
14352 		case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
14353 			dest_attr->dest = priv->hw_def_miss;
14354 			break;
14355 		case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
14356 			decap_seen = true;
14357 			break;
14358 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
14359 		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
14360 		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
14361 			ret = hw_mirror_clone_reformat(&actions[i], dest_attr,
14362 						       &dest_attr->action_type[i],
14363 						       reformat_buf, decap_seen);
14364 			if (ret < 0)
14365 				return rte_flow_error_set(error, EINVAL,
14366 							  RTE_FLOW_ERROR_TYPE_ACTION,
14367 							  &actions[i],
14368 							  "failed to create reformat action");
14369 			break;
14370 		default:
14371 			return rte_flow_error_set(error, EINVAL,
14372 						  RTE_FLOW_ERROR_TYPE_ACTION,
14373 						  &actions[i], "unsupported sample action");
14374 		}
14375 		clone->type = actions->type;
14376 	}
14377 	dest_attr->action_type[i] = MLX5DR_ACTION_TYP_LAST;
14378 	return 0;
14379 }
14380 
14381 static struct rte_flow_action_list_handle *
14382 mlx5_hw_mirror_handle_create(struct rte_eth_dev *dev,
14383 			     const struct mlx5_flow_template_table_cfg *table_cfg,
14384 			     const struct rte_flow_action *actions,
14385 			     struct rte_flow_error *error)
14386 {
14387 	uint32_t hws_flags;
14388 	int ret = 0, i, clones_num;
14389 	struct mlx5_mirror *mirror;
14390 	enum mlx5dr_table_type table_type;
14391 	struct mlx5_priv *priv = dev->data->dev_private;
14392 	const struct rte_flow_attr *flow_attr = &table_cfg->attr.flow_attr;
14393 	uint8_t reformat_buf[MLX5_MIRROR_MAX_CLONES_NUM][MLX5_ENCAP_MAX_LEN];
14394 	struct mlx5dr_action_dest_attr mirror_attr[MLX5_MIRROR_MAX_CLONES_NUM + 1];
14395 	enum mlx5dr_action_type array_action_types[MLX5_MIRROR_MAX_CLONES_NUM + 1]
14396 						  [MLX5_MIRROR_MAX_SAMPLE_ACTIONS_LEN + 1];
14397 
14398 	memset(mirror_attr, 0, sizeof(mirror_attr));
14399 	memset(array_action_types, 0, sizeof(array_action_types));
14400 	table_type = get_mlx5dr_table_type(flow_attr);
14401 	hws_flags = mlx5_hw_act_flag[MLX5_HW_ACTION_FLAG_NONE_ROOT][table_type];
14402 	clones_num = mlx5_hw_mirror_actions_list_validate(dev, flow_attr,
14403 							  actions);
14404 	if (clones_num < 0) {
14405 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
14406 				   actions, "Invalid mirror list format");
14407 		return NULL;
14408 	}
14409 	mirror = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*mirror),
14410 			     0, SOCKET_ID_ANY);
14411 	if (!mirror) {
14412 		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ACTION,
14413 				   actions, "Failed to allocate mirror context");
14414 		return NULL;
14415 	}
14416 
14417 	mirror->indirect.type = MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR;
14418 	mirror->clones_num = clones_num;
14419 	for (i = 0; i < clones_num; i++) {
14420 		const struct rte_flow_action *clone_actions;
14421 
14422 		mirror_attr[i].action_type = array_action_types[i];
14423 		if (actions[i].type == RTE_FLOW_ACTION_TYPE_SAMPLE) {
14424 			const struct rte_flow_action_sample *sample = actions[i].conf;
14425 
14426 			clone_actions = sample->actions;
14427 		} else {
14428 			clone_actions = &actions[i];
14429 		}
14430 		ret = hw_mirror_format_clone(dev, &mirror->clone[i], table_cfg,
14431 					     clone_actions, &mirror_attr[i],
14432 					     reformat_buf[i], error);
14433 
14434 		if (ret)
14435 			goto error;
14436 	}
14437 	hws_flags |= MLX5DR_ACTION_FLAG_SHARED;
14438 	mirror->mirror_action = mlx5dr_action_create_dest_array(priv->dr_ctx,
14439 								clones_num,
14440 								mirror_attr,
14441 								hws_flags);
14442 	if (!mirror->mirror_action) {
14443 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
14444 				   actions, "Failed to create HWS mirror action");
14445 		goto error;
14446 	}
14447 
14448 	mlx5_indirect_list_add_entry(&priv->indirect_list_head, &mirror->indirect);
14449 	return (struct rte_flow_action_list_handle *)mirror;
14450 
14451 error:
14452 	mlx5_hw_mirror_destroy(dev, mirror);
14453 	return NULL;
14454 }
14455 
14456 void
14457 mlx5_destroy_legacy_indirect(__rte_unused struct rte_eth_dev *dev,
14458 			     struct mlx5_indirect_list *ptr)
14459 {
14460 	struct mlx5_indlst_legacy *obj = (typeof(obj))ptr;
14461 
14462 	switch (obj->legacy_type) {
14463 	case RTE_FLOW_ACTION_TYPE_METER_MARK:
14464 		break; /* ASO meters were released in mlx5_flow_meter_flush() */
14465 	default:
14466 		break;
14467 	}
14468 	mlx5_free(obj);
14469 }
14470 
14471 static struct rte_flow_action_list_handle *
14472 mlx5_create_legacy_indlst(struct rte_eth_dev *dev, uint32_t queue,
14473 			  const struct rte_flow_op_attr *attr,
14474 			  const struct rte_flow_indir_action_conf *conf,
14475 			  const struct rte_flow_action *actions,
14476 			  void *user_data, struct rte_flow_error *error)
14477 {
14478 	struct mlx5_priv *priv = dev->data->dev_private;
14479 	struct mlx5_indlst_legacy *indlst_obj = mlx5_malloc(MLX5_MEM_ZERO,
14480 							    sizeof(*indlst_obj),
14481 							    0, SOCKET_ID_ANY);
14482 
14483 	if (!indlst_obj)
14484 		return NULL;
14485 	indlst_obj->handle = flow_hw_action_handle_create(dev, queue, attr, conf,
14486 							  actions, user_data,
14487 							  error);
14488 	if (!indlst_obj->handle) {
14489 		mlx5_free(indlst_obj);
14490 		return NULL;
14491 	}
14492 	indlst_obj->legacy_type = actions[0].type;
14493 	indlst_obj->indirect.type = MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY;
14494 	mlx5_indirect_list_add_entry(&priv->indirect_list_head, &indlst_obj->indirect);
14495 	return (struct rte_flow_action_list_handle *)indlst_obj;
14496 }
14497 
14498 static __rte_always_inline enum mlx5_indirect_list_type
14499 flow_hw_inlist_type_get(const struct rte_flow_action *actions)
14500 {
14501 	switch (actions[0].type) {
14502 	case RTE_FLOW_ACTION_TYPE_SAMPLE:
14503 		return MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR;
14504 	case RTE_FLOW_ACTION_TYPE_METER_MARK:
14505 		return actions[1].type == RTE_FLOW_ACTION_TYPE_END ?
14506 		       MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY :
14507 		       MLX5_INDIRECT_ACTION_LIST_TYPE_ERR;
14508 	case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
14509 	case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
14510 		return MLX5_INDIRECT_ACTION_LIST_TYPE_REFORMAT;
14511 	default:
14512 		break;
14513 	}
14514 	return MLX5_INDIRECT_ACTION_LIST_TYPE_ERR;
14515 }
14516 
14517 static struct rte_flow_action_list_handle*
14518 mlx5_hw_decap_encap_handle_create(struct rte_eth_dev *dev,
14519 				  const struct mlx5_flow_template_table_cfg *table_cfg,
14520 				  const struct rte_flow_action *actions,
14521 				  struct rte_flow_error *error)
14522 {
14523 	struct mlx5_priv *priv = dev->data->dev_private;
14524 	const struct rte_flow_attr *flow_attr = &table_cfg->attr.flow_attr;
14525 	const struct rte_flow_action *encap = NULL;
14526 	const struct rte_flow_action *decap = NULL;
14527 	struct rte_flow_indir_action_conf indirect_conf = {
14528 		.ingress = flow_attr->ingress,
14529 		.egress = flow_attr->egress,
14530 		.transfer = flow_attr->transfer,
14531 	};
14532 	struct mlx5_hw_encap_decap_action *handle;
14533 	uint64_t action_flags = 0;
14534 
14535 	/*
14536 	 * Allow
14537 	 * 1. raw_decap / raw_encap / end
14538 	 * 2. raw_encap / end
14539 	 * 3. raw_decap / end
14540 	 */
14541 	while (actions->type != RTE_FLOW_ACTION_TYPE_END) {
14542 		if (actions->type == RTE_FLOW_ACTION_TYPE_RAW_DECAP) {
14543 			if (action_flags) {
14544 				rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
14545 						   actions, "Invalid indirect action list sequence");
14546 				return NULL;
14547 			}
14548 			action_flags |= MLX5_FLOW_ACTION_DECAP;
14549 			decap = actions;
14550 		} else if (actions->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
14551 			if (action_flags & MLX5_FLOW_ACTION_ENCAP) {
14552 				rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
14553 						   actions, "Invalid indirect action list sequence");
14554 				return NULL;
14555 			}
14556 			action_flags |= MLX5_FLOW_ACTION_ENCAP;
14557 			encap = actions;
14558 		} else {
14559 			rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
14560 					   actions, "Invalid indirect action type in list");
14561 			return NULL;
14562 		}
14563 		actions++;
14564 	}
14565 	if (!decap && !encap) {
14566 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
14567 				   actions, "Invalid indirect action combinations");
14568 		return NULL;
14569 	}
14570 	handle = mlx5_reformat_action_create(dev, &indirect_conf, encap, decap, error);
14571 	if (!handle) {
14572 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
14573 				   actions, "Failed to create HWS decap_encap action");
14574 		return NULL;
14575 	}
14576 	handle->indirect.type = MLX5_INDIRECT_ACTION_LIST_TYPE_REFORMAT;
14577 	LIST_INSERT_HEAD(&priv->indirect_list_head, &handle->indirect, entry);
14578 	return (struct rte_flow_action_list_handle *)handle;
14579 }
14580 
14581 static struct rte_flow_action_list_handle *
14582 flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue,
14583 					const struct rte_flow_op_attr *attr,
14584 					const struct rte_flow_indir_action_conf *conf,
14585 					const struct rte_flow_action *actions,
14586 					void *user_data,
14587 					struct rte_flow_error *error)
14588 {
14589 	struct mlx5_hw_q_job *job = NULL;
14590 	bool push = flow_hw_action_push(attr);
14591 	enum mlx5_indirect_list_type list_type;
14592 	struct rte_flow_action_list_handle *handle;
14593 	struct mlx5_priv *priv = dev->data->dev_private;
14594 	const struct mlx5_flow_template_table_cfg table_cfg = {
14595 		.external = true,
14596 		.attr = {
14597 			.flow_attr = {
14598 				.ingress = conf->ingress,
14599 				.egress = conf->egress,
14600 				.transfer = conf->transfer
14601 			}
14602 		}
14603 	};
14604 
14605 	if (!mlx5_hw_ctx_validate(dev, error))
14606 		return NULL;
14607 	if (!actions) {
14608 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
14609 				   NULL, "No action list");
14610 		return NULL;
14611 	}
14612 	list_type = flow_hw_inlist_type_get(actions);
14613 	if (list_type == MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY) {
14614 		/*
14615 		 * Legacy indirect actions already have
14616 		 * async resources management. No need to do it twice.
14617 		 */
14618 		handle = mlx5_create_legacy_indlst(dev, queue, attr, conf,
14619 						   actions, user_data, error);
14620 		goto end;
14621 	}
14622 	if (attr) {
14623 		job = flow_hw_action_job_init(priv, queue, NULL, user_data,
14624 					      NULL, MLX5_HW_Q_JOB_TYPE_CREATE,
14625 					      MLX5_HW_INDIRECT_TYPE_LIST, error);
14626 		if (!job)
14627 			return NULL;
14628 	}
14629 	switch (list_type) {
14630 	case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
14631 		handle = mlx5_hw_mirror_handle_create(dev, &table_cfg,
14632 						      actions, error);
14633 		break;
14634 	case MLX5_INDIRECT_ACTION_LIST_TYPE_REFORMAT:
14635 		handle = mlx5_hw_decap_encap_handle_create(dev, &table_cfg,
14636 							   actions, error);
14637 		break;
14638 	default:
14639 		handle = NULL;
14640 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
14641 				   actions, "Invalid list");
14642 	}
14643 	if (job) {
14644 		job->action = handle;
14645 		flow_hw_action_finalize(dev, queue, job, push, false,
14646 					handle != NULL);
14647 	}
14648 end:
14649 	return handle;
14650 }
14651 
14652 static struct rte_flow_action_list_handle *
14653 flow_hw_action_list_handle_create(struct rte_eth_dev *dev,
14654 				  const struct rte_flow_indir_action_conf *conf,
14655 				  const struct rte_flow_action *actions,
14656 				  struct rte_flow_error *error)
14657 {
14658 	return flow_hw_async_action_list_handle_create(dev, MLX5_HW_INV_QUEUE,
14659 						       NULL, conf, actions,
14660 						       NULL, error);
14661 }
14662 
14663 static int
14664 flow_hw_async_action_list_handle_destroy
14665 			(struct rte_eth_dev *dev, uint32_t queue,
14666 			 const struct rte_flow_op_attr *attr,
14667 			 struct rte_flow_action_list_handle *handle,
14668 			 void *user_data, struct rte_flow_error *error)
14669 {
14670 	int ret = 0;
14671 	struct mlx5_hw_q_job *job = NULL;
14672 	bool push = flow_hw_action_push(attr);
14673 	struct mlx5_priv *priv = dev->data->dev_private;
14674 	enum mlx5_indirect_list_type type =
14675 		mlx5_get_indirect_list_type((void *)handle);
14676 
14677 	if (type == MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY) {
14678 		struct mlx5_indlst_legacy *legacy = (typeof(legacy))handle;
14679 
14680 		ret = flow_hw_action_handle_destroy(dev, queue, attr,
14681 						    legacy->handle,
14682 						    user_data, error);
14683 		mlx5_indirect_list_remove_entry(&legacy->indirect);
14684 		goto end;
14685 	}
14686 	if (attr) {
14687 		job = flow_hw_action_job_init(priv, queue, NULL, user_data,
14688 					      NULL, MLX5_HW_Q_JOB_TYPE_DESTROY,
14689 					      MLX5_HW_INDIRECT_TYPE_LIST, error);
14690 		if (!job)
14691 			return rte_errno;
14692 	}
14693 	switch (type) {
14694 	case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
14695 		mlx5_hw_mirror_destroy(dev, (struct mlx5_mirror *)handle);
14696 		break;
14697 	case MLX5_INDIRECT_ACTION_LIST_TYPE_REFORMAT:
14698 		LIST_REMOVE(&((struct mlx5_hw_encap_decap_action *)handle)->indirect,
14699 			    entry);
14700 		mlx5_reformat_action_destroy(dev, handle, error);
14701 		break;
14702 	default:
14703 		ret = rte_flow_error_set(error, EINVAL,
14704 					  RTE_FLOW_ERROR_TYPE_ACTION,
14705 					  NULL, "Invalid indirect list handle");
14706 	}
14707 	if (job) {
14708 		flow_hw_action_finalize(dev, queue, job, push, false, true);
14709 	}
14710 end:
14711 	return ret;
14712 }
14713 
14714 static int
14715 flow_hw_action_list_handle_destroy(struct rte_eth_dev *dev,
14716 				   struct rte_flow_action_list_handle *handle,
14717 				   struct rte_flow_error *error)
14718 {
14719 	return flow_hw_async_action_list_handle_destroy(dev, MLX5_HW_INV_QUEUE,
14720 							NULL, handle, NULL,
14721 							error);
14722 }
14723 
14724 static int
14725 flow_hw_async_action_list_handle_query_update
14726 		(struct rte_eth_dev *dev, uint32_t queue_id,
14727 		 const struct rte_flow_op_attr *attr,
14728 		 const struct rte_flow_action_list_handle *handle,
14729 		 const void **update, void **query,
14730 		 enum rte_flow_query_update_mode mode,
14731 		 void *user_data, struct rte_flow_error *error)
14732 {
14733 	enum mlx5_indirect_list_type type =
14734 		mlx5_get_indirect_list_type((const void *)handle);
14735 
14736 	if (type == MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY) {
14737 		struct mlx5_indlst_legacy *legacy = (void *)(uintptr_t)handle;
14738 
14739 		if (update && query)
14740 			return flow_hw_async_action_handle_query_update
14741 				(dev, queue_id, attr, legacy->handle,
14742 				 update, query, mode, user_data, error);
14743 		else if (update && update[0])
14744 			return flow_hw_action_handle_update(dev, queue_id, attr,
14745 							    legacy->handle, update[0],
14746 							    user_data, error);
14747 		else if (query && query[0])
14748 			return flow_hw_action_handle_query(dev, queue_id, attr,
14749 							   legacy->handle, query[0],
14750 							   user_data, error);
14751 		else
14752 			return rte_flow_error_set(error, EINVAL,
14753 						  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14754 						  NULL, "invalid legacy handle query_update parameters");
14755 	}
14756 	return -ENOTSUP;
14757 }
14758 
14759 static int
14760 flow_hw_action_list_handle_query_update(struct rte_eth_dev *dev,
14761 					const struct rte_flow_action_list_handle *handle,
14762 					const void **update, void **query,
14763 					enum rte_flow_query_update_mode mode,
14764 					struct rte_flow_error *error)
14765 {
14766 	return flow_hw_async_action_list_handle_query_update
14767 					(dev, MLX5_HW_INV_QUEUE, NULL, handle,
14768 					 update, query, mode, NULL, error);
14769 }
14770 
14771 static int
14772 flow_hw_calc_table_hash(struct rte_eth_dev *dev,
14773 			 const struct rte_flow_template_table *table,
14774 			 const struct rte_flow_item pattern[],
14775 			 uint8_t pattern_template_index,
14776 			 uint32_t *hash, struct rte_flow_error *error)
14777 {
14778 	const struct rte_flow_item *items;
14779 	struct mlx5_flow_hw_pattern_params pp;
14780 	int res;
14781 
14782 	items = flow_hw_get_rule_items(dev, table, pattern,
14783 				       pattern_template_index,
14784 				       &pp);
14785 	res = mlx5dr_rule_hash_calculate(mlx5_table_matcher(table), items,
14786 					 pattern_template_index,
14787 					 MLX5DR_RULE_HASH_CALC_MODE_RAW,
14788 					 hash);
14789 	if (res)
14790 		return rte_flow_error_set(error, res,
14791 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14792 					  NULL,
14793 					  "hash could not be calculated");
14794 	return 0;
14795 }
14796 
14797 static int
14798 flow_hw_calc_encap_hash(struct rte_eth_dev *dev,
14799 			const struct rte_flow_item pattern[],
14800 			enum rte_flow_encap_hash_field dest_field,
14801 			uint8_t *hash,
14802 			struct rte_flow_error *error)
14803 {
14804 	struct mlx5_priv *priv = dev->data->dev_private;
14805 	struct mlx5dr_crc_encap_entropy_hash_fields data;
14806 	enum mlx5dr_crc_encap_entropy_hash_size res_size =
14807 			dest_field == RTE_FLOW_ENCAP_HASH_FIELD_SRC_PORT ?
14808 				MLX5DR_CRC_ENCAP_ENTROPY_HASH_SIZE_16 :
14809 				MLX5DR_CRC_ENCAP_ENTROPY_HASH_SIZE_8;
14810 	int res;
14811 
14812 	memset(&data, 0, sizeof(struct mlx5dr_crc_encap_entropy_hash_fields));
14813 
14814 	for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
14815 		switch (pattern->type) {
14816 		case RTE_FLOW_ITEM_TYPE_IPV4:
14817 			data.dst.ipv4_addr =
14818 				((const struct rte_flow_item_ipv4 *)(pattern->spec))->hdr.dst_addr;
14819 			data.src.ipv4_addr =
14820 				((const struct rte_flow_item_ipv4 *)(pattern->spec))->hdr.src_addr;
14821 			break;
14822 		case RTE_FLOW_ITEM_TYPE_IPV6:
14823 			memcpy(data.dst.ipv6_addr,
14824 			       &((const struct rte_flow_item_ipv6 *)(pattern->spec))->hdr.dst_addr,
14825 			       sizeof(data.dst.ipv6_addr));
14826 			memcpy(data.src.ipv6_addr,
14827 			       &((const struct rte_flow_item_ipv6 *)(pattern->spec))->hdr.src_addr,
14828 			       sizeof(data.src.ipv6_addr));
14829 			break;
14830 		case RTE_FLOW_ITEM_TYPE_UDP:
14831 			data.next_protocol = IPPROTO_UDP;
14832 			data.dst_port =
14833 				((const struct rte_flow_item_udp *)(pattern->spec))->hdr.dst_port;
14834 			data.src_port =
14835 				((const struct rte_flow_item_udp *)(pattern->spec))->hdr.src_port;
14836 			break;
14837 		case RTE_FLOW_ITEM_TYPE_TCP:
14838 			data.next_protocol = IPPROTO_TCP;
14839 			data.dst_port =
14840 				((const struct rte_flow_item_tcp *)(pattern->spec))->hdr.dst_port;
14841 			data.src_port =
14842 				((const struct rte_flow_item_tcp *)(pattern->spec))->hdr.src_port;
14843 			break;
14844 		case RTE_FLOW_ITEM_TYPE_ICMP:
14845 			data.next_protocol = IPPROTO_ICMP;
14846 			break;
14847 		case RTE_FLOW_ITEM_TYPE_ICMP6:
14848 			data.next_protocol = IPPROTO_ICMPV6;
14849 			break;
14850 		default:
14851 			break;
14852 		}
14853 	}
14854 	res = mlx5dr_crc_encap_entropy_hash_calc(priv->dr_ctx, &data, hash, res_size);
14855 	if (res)
14856 		return rte_flow_error_set(error, res,
14857 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14858 					  NULL, "error while calculating encap hash");
14859 	return 0;
14860 }
14861 
14862 static int
14863 flow_hw_table_resize_multi_pattern_actions(struct rte_eth_dev *dev,
14864 					   struct rte_flow_template_table *table,
14865 					   uint32_t nb_flows,
14866 					   struct rte_flow_error *error)
14867 {
14868 	struct mlx5_multi_pattern_segment *segment = table->mpctx.segments;
14869 	uint32_t bulk_size;
14870 	int i, ret;
14871 
14872 	/**
14873 	 * Segment always allocates Modify Header Argument Objects number in
14874 	 * powers of 2.
14875 	 * On resize, PMD adds minimal required argument objects number.
14876 	 * For example, if table size was 10, it allocated 16 argument objects.
14877 	 * Resize to 15 will not add new objects.
14878 	 */
14879 	for (i = 1;
14880 	     i < MLX5_MAX_TABLE_RESIZE_NUM && segment->capacity;
14881 	     i++, segment++) {
14882 		/* keep the devtools/checkpatches.sh happy */
14883 	}
14884 	if (i == MLX5_MAX_TABLE_RESIZE_NUM)
14885 		return rte_flow_error_set(error, EINVAL,
14886 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14887 					  table, "too many resizes");
14888 	if (segment->head_index - 1 >= nb_flows)
14889 		return 0;
14890 	bulk_size = rte_align32pow2(nb_flows - segment->head_index + 1);
14891 	ret = mlx5_tbl_multi_pattern_process(dev, table, segment,
14892 					     rte_log2_u32(bulk_size),
14893 					     error);
14894 	if (ret)
14895 		return rte_flow_error_set(error, EINVAL,
14896 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14897 					  table, "too many resizes");
14898 	return i;
14899 }
14900 
14901 static int
14902 flow_hw_table_resize(struct rte_eth_dev *dev,
14903 		     struct rte_flow_template_table *table,
14904 		     uint32_t nb_flows,
14905 		     struct rte_flow_error *error)
14906 {
14907 	struct mlx5dr_action_template *at[MLX5_HW_TBL_MAX_ACTION_TEMPLATE];
14908 	struct mlx5dr_match_template *mt[MLX5_HW_TBL_MAX_ITEM_TEMPLATE];
14909 	struct mlx5dr_matcher_attr matcher_attr = table->matcher_attr;
14910 	struct mlx5dr_action_jump_to_matcher_attr jump_attr = {
14911 		.type = MLX5DR_ACTION_JUMP_TO_MATCHER_BY_INDEX,
14912 		.matcher = NULL,
14913 	};
14914 	struct mlx5_multi_pattern_segment *segment = NULL;
14915 	struct mlx5dr_matcher *matcher = NULL;
14916 	struct mlx5dr_action *jump = NULL;
14917 	struct mlx5_priv *priv = dev->data->dev_private;
14918 	uint32_t i, selector = table->matcher_selector;
14919 	uint32_t other_selector = (selector + 1) & 1;
14920 	int ret;
14921 
14922 	if (!rte_flow_template_table_resizable(dev->data->port_id, &table->cfg.attr))
14923 		return rte_flow_error_set(error, EINVAL,
14924 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14925 					  table, "no resizable attribute");
14926 	if (table->matcher_info[other_selector].matcher)
14927 		return rte_flow_error_set(error, EINVAL,
14928 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14929 					  table, "last table resize was not completed");
14930 	if (nb_flows <= table->cfg.attr.nb_flows)
14931 		return rte_flow_error_set(error, EINVAL,
14932 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14933 					  table, "shrinking table is not supported");
14934 	ret = mlx5_ipool_resize(table->flow, nb_flows, error);
14935 	if (ret)
14936 		return ret;
14937 	/*
14938 	 * A resizable matcher doesn't support rule update. In this case, the ipool
14939 	 * for the resource is not created and there is no need to resize it.
14940 	 */
14941 	MLX5_ASSERT(!table->resource);
14942 	if (mlx5_is_multi_pattern_active(&table->mpctx)) {
14943 		ret = flow_hw_table_resize_multi_pattern_actions(dev, table, nb_flows, error);
14944 		if (ret < 0)
14945 			return ret;
14946 		if (ret > 0)
14947 			segment = table->mpctx.segments + ret;
14948 	}
14949 	for (i = 0; i < table->nb_item_templates; i++)
14950 		mt[i] = table->its[i]->mt;
14951 	for (i = 0; i < table->nb_action_templates; i++)
14952 		at[i] = table->ats[i].action_template->tmpl;
14953 	nb_flows = rte_align32pow2(nb_flows);
14954 	matcher_attr.rule.num_log = rte_log2_u32(nb_flows);
14955 	matcher = mlx5dr_matcher_create(table->grp->tbl, mt,
14956 					table->nb_item_templates, at,
14957 					table->nb_action_templates,
14958 					&matcher_attr);
14959 	if (!matcher) {
14960 		ret = rte_flow_error_set(error, rte_errno,
14961 					 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14962 					 table, "failed to create new matcher");
14963 		goto error;
14964 	}
14965 	if (matcher_attr.isolated) {
14966 		jump_attr.matcher = matcher;
14967 		jump = mlx5dr_action_create_jump_to_matcher(priv->dr_ctx, &jump_attr,
14968 			mlx5_hw_act_flag[!!table->cfg.attr.flow_attr.group][table->type]);
14969 		if (!jump) {
14970 			ret = rte_flow_error_set(error, rte_errno,
14971 						RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14972 						table, "failed to create jump to matcher action");
14973 			goto error;
14974 		}
14975 	}
14976 	rte_rwlock_write_lock(&table->matcher_replace_rwlk);
14977 	ret = mlx5dr_matcher_resize_set_target
14978 			(table->matcher_info[selector].matcher, matcher);
14979 	if (ret) {
14980 		rte_rwlock_write_unlock(&table->matcher_replace_rwlk);
14981 		ret = rte_flow_error_set(error, rte_errno,
14982 					 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14983 					 table, "failed to initiate matcher swap");
14984 		goto error;
14985 	}
14986 	table->cfg.attr.nb_flows = nb_flows;
14987 	table->matcher_info[other_selector].matcher = matcher;
14988 	table->matcher_info[other_selector].jump = jump;
14989 	table->matcher_selector = other_selector;
14990 	rte_atomic_store_explicit(&table->matcher_info[other_selector].refcnt,
14991 				  0, rte_memory_order_relaxed);
14992 	rte_rwlock_write_unlock(&table->matcher_replace_rwlk);
14993 	return 0;
14994 error:
14995 	if (segment)
14996 		mlx5_destroy_multi_pattern_segment(segment);
14997 	if (jump)
14998 		mlx5dr_action_destroy(jump);
14999 	if (matcher) {
15000 		ret = mlx5dr_matcher_destroy(matcher);
15001 		return rte_flow_error_set(error, rte_errno,
15002 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15003 					  table, "failed to destroy new matcher");
15004 	}
15005 	return ret;
15006 }
15007 
15008 static int
15009 flow_hw_table_resize_complete(__rte_unused struct rte_eth_dev *dev,
15010 			      struct rte_flow_template_table *table,
15011 			      struct rte_flow_error *error)
15012 {
15013 	int ret;
15014 	uint32_t selector = table->matcher_selector;
15015 	uint32_t other_selector = (selector + 1) & 1;
15016 	struct mlx5_matcher_info *matcher_info = &table->matcher_info[other_selector];
15017 	uint32_t matcher_refcnt;
15018 
15019 	if (!rte_flow_template_table_resizable(dev->data->port_id, &table->cfg.attr))
15020 		return rte_flow_error_set(error, EINVAL,
15021 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15022 					  table, "no resizable attribute");
15023 	matcher_refcnt = rte_atomic_load_explicit(&matcher_info->refcnt,
15024 						  rte_memory_order_relaxed);
15025 	if (!matcher_info->matcher || matcher_refcnt)
15026 		return rte_flow_error_set(error, EBUSY,
15027 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15028 					  table, "cannot complete table resize");
15029 	if (matcher_info->jump)
15030 		mlx5dr_action_destroy(matcher_info->jump);
15031 	ret = mlx5dr_matcher_destroy(matcher_info->matcher);
15032 	if (ret)
15033 		return rte_flow_error_set(error, rte_errno,
15034 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15035 					  table, "failed to destroy retired matcher");
15036 	matcher_info->matcher = NULL;
15037 	return 0;
15038 }
15039 
15040 static int
15041 flow_hw_update_resized(struct rte_eth_dev *dev, uint32_t queue,
15042 		       const struct rte_flow_op_attr *attr,
15043 		       struct rte_flow *flow, void *user_data,
15044 		       struct rte_flow_error *error)
15045 {
15046 	int ret;
15047 	struct mlx5_priv *priv = dev->data->dev_private;
15048 	struct rte_flow_hw *hw_flow = (struct rte_flow_hw *)flow;
15049 	struct rte_flow_template_table *table = hw_flow->table;
15050 	struct rte_flow_hw_aux *aux = mlx5_flow_hw_aux(dev->data->port_id, hw_flow);
15051 	uint32_t table_selector = table->matcher_selector;
15052 	uint32_t rule_selector = aux->matcher_selector;
15053 	uint32_t other_selector;
15054 	struct mlx5dr_matcher *other_matcher;
15055 	struct mlx5dr_rule_attr rule_attr = {
15056 		.queue_id = queue,
15057 		.burst = attr->postpone,
15058 	};
15059 
15060 	MLX5_ASSERT(hw_flow->flags & MLX5_FLOW_HW_FLOW_FLAG_MATCHER_SELECTOR);
15061 	/**
15062 	 * mlx5dr_matcher_resize_rule_move() accepts original table matcher -
15063 	 * the one that was used BEFORE table resize.
15064 	 * Since the function is called AFTER table resize,
15065 	 * `table->matcher_selector` always points to the new matcher and
15066 	 * `aux->matcher_selector` points to a matcher used to create the flow.
15067 	 */
15068 	other_selector = rule_selector == table_selector ?
15069 			 (rule_selector + 1) & 1 : rule_selector;
15070 	other_matcher = table->matcher_info[other_selector].matcher;
15071 	if (!other_matcher)
15072 		return rte_flow_error_set(error, EINVAL,
15073 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
15074 					  "no active table resize");
15075 	hw_flow->operation_type = MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_MOVE;
15076 	hw_flow->user_data = user_data;
15077 	rule_attr.user_data = hw_flow;
15078 	if (rule_selector == table_selector) {
15079 		struct rte_ring *ring = !attr->postpone ?
15080 					priv->hw_q[queue].flow_transfer_completed :
15081 					priv->hw_q[queue].flow_transfer_pending;
15082 		rte_ring_enqueue(ring, hw_flow);
15083 		flow_hw_q_inc_flow_ops(priv, queue);
15084 		return 0;
15085 	}
15086 	ret = mlx5dr_matcher_resize_rule_move(other_matcher,
15087 					      (struct mlx5dr_rule *)hw_flow->rule,
15088 					      &rule_attr);
15089 	if (ret) {
15090 		return rte_flow_error_set(error, rte_errno,
15091 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
15092 					  "flow transfer failed");
15093 	}
15094 	flow_hw_q_inc_flow_ops(priv, queue);
15095 	return 0;
15096 }
15097 
15098 const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {
15099 	.list_create = flow_hw_list_create,
15100 	.list_destroy = flow_hw_list_destroy,
15101 	.validate = flow_dv_validate,
15102 	.info_get = flow_hw_info_get,
15103 	.configure = flow_hw_configure,
15104 	.pattern_validate = flow_hw_pattern_validate,
15105 	.pattern_template_create = flow_hw_pattern_template_create,
15106 	.pattern_template_destroy = flow_hw_pattern_template_destroy,
15107 	.actions_validate = flow_hw_actions_validate,
15108 	.actions_template_create = flow_hw_actions_template_create,
15109 	.actions_template_destroy = flow_hw_actions_template_destroy,
15110 	.template_table_create = flow_hw_template_table_create,
15111 	.template_table_destroy = flow_hw_table_destroy,
15112 	.table_resize = flow_hw_table_resize,
15113 	.group_set_miss_actions = flow_hw_group_set_miss_actions,
15114 	.async_flow_create = flow_hw_async_flow_create,
15115 	.async_flow_create_by_index = flow_hw_async_flow_create_by_index,
15116 	.async_flow_update = flow_hw_async_flow_update,
15117 	.async_flow_destroy = flow_hw_async_flow_destroy,
15118 	.flow_update_resized = flow_hw_update_resized,
15119 	.table_resize_complete = flow_hw_table_resize_complete,
15120 	.pull = flow_hw_pull,
15121 	.push = flow_hw_push,
15122 	.async_action_create = flow_hw_action_handle_create,
15123 	.async_action_destroy = flow_hw_action_handle_destroy,
15124 	.async_action_update = flow_hw_action_handle_update,
15125 	.async_action_query_update = flow_hw_async_action_handle_query_update,
15126 	.async_action_query = flow_hw_action_handle_query,
15127 	.action_validate = flow_hw_action_validate,
15128 	.action_create = flow_hw_action_create,
15129 	.action_destroy = flow_hw_action_destroy,
15130 	.action_update = flow_hw_action_update,
15131 	.action_query = flow_hw_action_query,
15132 	.action_query_update = flow_hw_action_query_update,
15133 	.action_list_handle_create = flow_hw_action_list_handle_create,
15134 	.action_list_handle_destroy = flow_hw_action_list_handle_destroy,
15135 	.action_list_handle_query_update =
15136 		flow_hw_action_list_handle_query_update,
15137 	.async_action_list_handle_create =
15138 		flow_hw_async_action_list_handle_create,
15139 	.async_action_list_handle_destroy =
15140 		flow_hw_async_action_list_handle_destroy,
15141 	.async_action_list_handle_query_update =
15142 		flow_hw_async_action_list_handle_query_update,
15143 	.query = flow_hw_query,
15144 	.get_aged_flows = flow_hw_get_aged_flows,
15145 	.get_q_aged_flows = flow_hw_get_q_aged_flows,
15146 	.item_create = flow_dv_item_create,
15147 	.item_release = flow_dv_item_release,
15148 	.flow_calc_table_hash = flow_hw_calc_table_hash,
15149 	.flow_calc_encap_hash = flow_hw_calc_encap_hash,
15150 };
15151 
15152 /**
15153  * Creates a control flow using flow template API on @p proxy_dev device,
15154  * on behalf of @p owner_dev device.
15155  *
15156  * This function uses locks internally to synchronize access to the
15157  * flow queue.
15158  *
15159  * Created flow is stored in private list associated with @p proxy_dev device.
15160  *
15161  * @param owner_dev
15162  *   Pointer to Ethernet device on behalf of which flow is created.
15163  * @param proxy_dev
15164  *   Pointer to Ethernet device on which flow is created.
15165  * @param table
15166  *   Pointer to flow table.
15167  * @param items
15168  *   Pointer to flow rule items.
15169  * @param item_template_idx
15170  *   Index of an item template associated with @p table.
15171  * @param actions
15172  *   Pointer to flow rule actions.
15173  * @param action_template_idx
15174  *   Index of an action template associated with @p table.
15175  * @param info
15176  *   Additional info about control flow rule.
15177  * @param external
15178  *   External ctrl flow.
15179  *
15180  * @return
15181  *   0 on success, negative errno value otherwise and rte_errno set.
15182  */
15183 static __rte_unused int
15184 flow_hw_create_ctrl_flow(struct rte_eth_dev *owner_dev,
15185 			 struct rte_eth_dev *proxy_dev,
15186 			 struct rte_flow_template_table *table,
15187 			 struct rte_flow_item items[],
15188 			 uint8_t item_template_idx,
15189 			 struct rte_flow_action actions[],
15190 			 uint8_t action_template_idx,
15191 			 struct mlx5_ctrl_flow_info *info,
15192 			 bool external)
15193 {
15194 	struct mlx5_priv *priv = proxy_dev->data->dev_private;
15195 	uint32_t queue = CTRL_QUEUE_ID(priv);
15196 	struct rte_flow_op_attr op_attr = {
15197 		.postpone = 0,
15198 	};
15199 	struct rte_flow *flow = NULL;
15200 	struct mlx5_ctrl_flow_entry *entry = NULL;
15201 	int ret;
15202 
15203 	rte_spinlock_lock(&priv->hw_ctrl_lock);
15204 	entry = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_SYS, sizeof(*entry),
15205 			    0, SOCKET_ID_ANY);
15206 	if (!entry) {
15207 		DRV_LOG(ERR, "port %u not enough memory to create control flows",
15208 			proxy_dev->data->port_id);
15209 		rte_errno = ENOMEM;
15210 		ret = -rte_errno;
15211 		goto error;
15212 	}
15213 	flow = flow_hw_async_flow_create(proxy_dev, queue, &op_attr, table,
15214 					 items, item_template_idx,
15215 					 actions, action_template_idx,
15216 					 NULL, NULL);
15217 	if (!flow) {
15218 		DRV_LOG(ERR, "port %u failed to enqueue create control"
15219 			" flow operation", proxy_dev->data->port_id);
15220 		ret = -rte_errno;
15221 		goto error;
15222 	}
15223 	ret = __flow_hw_pull_comp(proxy_dev, queue, NULL);
15224 	if (ret) {
15225 		DRV_LOG(ERR, "port %u failed to insert control flow",
15226 			proxy_dev->data->port_id);
15227 		rte_errno = EINVAL;
15228 		ret = -rte_errno;
15229 		goto error;
15230 	}
15231 	entry->owner_dev = owner_dev;
15232 	entry->flow = flow;
15233 	if (info)
15234 		entry->info = *info;
15235 	else
15236 		entry->info.type = MLX5_CTRL_FLOW_TYPE_GENERAL;
15237 	if (external)
15238 		LIST_INSERT_HEAD(&priv->hw_ext_ctrl_flows, entry, next);
15239 	else
15240 		LIST_INSERT_HEAD(&priv->hw_ctrl_flows, entry, next);
15241 	rte_spinlock_unlock(&priv->hw_ctrl_lock);
15242 	return 0;
15243 error:
15244 	if (entry)
15245 		mlx5_free(entry);
15246 	rte_spinlock_unlock(&priv->hw_ctrl_lock);
15247 	return ret;
15248 }
15249 
15250 /**
15251  * Destroys a control flow @p flow using flow template API on @p dev device.
15252  *
15253  * This function uses locks internally to synchronize access to the
15254  * flow queue.
15255  *
15256  * If the @p flow is stored on any private list/pool, then caller must free up
15257  * the relevant resources.
15258  *
15259  * @param dev
15260  *   Pointer to Ethernet device.
15261  * @param flow
15262  *   Pointer to flow rule.
15263  *
15264  * @return
15265  *   0 on success, non-zero value otherwise.
15266  */
15267 static int
15268 flow_hw_destroy_ctrl_flow(struct rte_eth_dev *dev, struct rte_flow *flow)
15269 {
15270 	struct mlx5_priv *priv = dev->data->dev_private;
15271 	uint32_t queue = CTRL_QUEUE_ID(priv);
15272 	struct rte_flow_op_attr op_attr = {
15273 		.postpone = 0,
15274 	};
15275 	int ret;
15276 
15277 	rte_spinlock_lock(&priv->hw_ctrl_lock);
15278 	ret = flow_hw_async_flow_destroy(dev, queue, &op_attr, flow, NULL, NULL);
15279 	if (ret) {
15280 		DRV_LOG(ERR, "port %u failed to enqueue destroy control"
15281 			" flow operation", dev->data->port_id);
15282 		goto exit;
15283 	}
15284 	ret = __flow_hw_pull_comp(dev, queue, NULL);
15285 	if (ret) {
15286 		DRV_LOG(ERR, "port %u failed to destroy control flow",
15287 			dev->data->port_id);
15288 		rte_errno = EINVAL;
15289 		ret = -rte_errno;
15290 		goto exit;
15291 	}
15292 exit:
15293 	rte_spinlock_unlock(&priv->hw_ctrl_lock);
15294 	return ret;
15295 }
15296 
15297 /**
15298  * Destroys control flows created on behalf of @p owner device on @p dev device.
15299  *
15300  * @param dev
15301  *   Pointer to Ethernet device on which control flows were created.
15302  * @param owner
15303  *   Pointer to Ethernet device owning control flows.
15304  *
15305  * @return
15306  *   0 on success, otherwise negative error code is returned and
15307  *   rte_errno is set.
15308  */
15309 static int
15310 flow_hw_flush_ctrl_flows_owned_by(struct rte_eth_dev *dev, struct rte_eth_dev *owner)
15311 {
15312 	struct mlx5_priv *priv = dev->data->dev_private;
15313 	struct mlx5_ctrl_flow_entry *cf;
15314 	struct mlx5_ctrl_flow_entry *cf_next;
15315 	int ret;
15316 
15317 	cf = LIST_FIRST(&priv->hw_ctrl_flows);
15318 	while (cf != NULL) {
15319 		cf_next = LIST_NEXT(cf, next);
15320 		if (cf->owner_dev == owner) {
15321 			ret = flow_hw_destroy_ctrl_flow(dev, cf->flow);
15322 			if (ret) {
15323 				rte_errno = ret;
15324 				return -ret;
15325 			}
15326 			LIST_REMOVE(cf, next);
15327 			mlx5_free(cf);
15328 		}
15329 		cf = cf_next;
15330 	}
15331 	return 0;
15332 }
15333 
15334 /**
15335  * Destroys control flows created for @p owner_dev device.
15336  *
15337  * @param owner_dev
15338  *   Pointer to Ethernet device owning control flows.
15339  *
15340  * @return
15341  *   0 on success, otherwise negative error code is returned and
15342  *   rte_errno is set.
15343  */
15344 int
15345 mlx5_flow_hw_flush_ctrl_flows(struct rte_eth_dev *owner_dev)
15346 {
15347 	struct mlx5_priv *owner_priv = owner_dev->data->dev_private;
15348 	struct rte_eth_dev *proxy_dev;
15349 	uint16_t owner_port_id = owner_dev->data->port_id;
15350 	uint16_t proxy_port_id = owner_dev->data->port_id;
15351 	int ret;
15352 
15353 	/* Flush all flows created by this port for itself. */
15354 	ret = flow_hw_flush_ctrl_flows_owned_by(owner_dev, owner_dev);
15355 	if (ret)
15356 		return ret;
15357 	/* Flush all flows created for this port on proxy port. */
15358 	if (owner_priv->sh->config.dv_esw_en) {
15359 		ret = rte_flow_pick_transfer_proxy(owner_port_id, &proxy_port_id, NULL);
15360 		if (ret == -ENODEV) {
15361 			DRV_LOG(DEBUG, "Unable to find transfer proxy port for port %u. It was "
15362 				       "probably closed. Control flows were cleared.",
15363 				       owner_port_id);
15364 			rte_errno = 0;
15365 			return 0;
15366 		} else if (ret) {
15367 			DRV_LOG(ERR, "Unable to find proxy port for port %u (ret = %d)",
15368 				owner_port_id, ret);
15369 			return ret;
15370 		}
15371 		proxy_dev = &rte_eth_devices[proxy_port_id];
15372 	} else {
15373 		proxy_dev = owner_dev;
15374 	}
15375 	return flow_hw_flush_ctrl_flows_owned_by(proxy_dev, owner_dev);
15376 }
15377 
15378 /**
15379  * Destroys all control flows created on @p dev device.
15380  *
15381  * @param owner_dev
15382  *   Pointer to Ethernet device.
15383  *
15384  * @return
15385  *   0 on success, otherwise negative error code is returned and
15386  *   rte_errno is set.
15387  */
15388 static int
15389 flow_hw_flush_all_ctrl_flows(struct rte_eth_dev *dev)
15390 {
15391 	struct mlx5_priv *priv = dev->data->dev_private;
15392 	struct mlx5_ctrl_flow_entry *cf;
15393 	struct mlx5_ctrl_flow_entry *cf_next;
15394 	int ret;
15395 
15396 	cf = LIST_FIRST(&priv->hw_ctrl_flows);
15397 	while (cf != NULL) {
15398 		cf_next = LIST_NEXT(cf, next);
15399 		ret = flow_hw_destroy_ctrl_flow(dev, cf->flow);
15400 		if (ret) {
15401 			rte_errno = ret;
15402 			return -ret;
15403 		}
15404 		LIST_REMOVE(cf, next);
15405 		mlx5_free(cf);
15406 		cf = cf_next;
15407 	}
15408 	cf = LIST_FIRST(&priv->hw_ext_ctrl_flows);
15409 	while (cf != NULL) {
15410 		cf_next = LIST_NEXT(cf, next);
15411 		ret = flow_hw_destroy_ctrl_flow(dev, cf->flow);
15412 		if (ret) {
15413 			rte_errno = ret;
15414 			return -ret;
15415 		}
15416 		LIST_REMOVE(cf, next);
15417 		mlx5_free(cf);
15418 		cf = cf_next;
15419 	}
15420 	return 0;
15421 }
15422 
15423 int
15424 mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn, bool external)
15425 {
15426 	uint16_t port_id = dev->data->port_id;
15427 	struct rte_flow_item_ethdev esw_mgr_spec = {
15428 		.port_id = MLX5_REPRESENTED_PORT_ESW_MGR,
15429 	};
15430 	struct rte_flow_item_ethdev esw_mgr_mask = {
15431 		.port_id = MLX5_REPRESENTED_PORT_ESW_MGR,
15432 	};
15433 	struct rte_flow_item_tag reg_c0_spec = {
15434 		.index = (uint8_t)REG_C_0,
15435 		.data = flow_hw_esw_mgr_regc_marker(dev),
15436 	};
15437 	struct rte_flow_item_tag reg_c0_mask = {
15438 		.index = 0xff,
15439 		.data = flow_hw_esw_mgr_regc_marker_mask(dev),
15440 	};
15441 	struct mlx5_rte_flow_item_sq sq_spec = {
15442 		.queue = sqn,
15443 	};
15444 	struct rte_flow_action_ethdev port = {
15445 		.port_id = port_id,
15446 	};
15447 	struct rte_flow_item items[3] = { { 0 } };
15448 	struct rte_flow_action actions[3] = { { 0 } };
15449 	struct mlx5_ctrl_flow_info flow_info = {
15450 		.type = MLX5_CTRL_FLOW_TYPE_SQ_MISS_ROOT,
15451 		.esw_mgr_sq = sqn,
15452 	};
15453 	struct rte_eth_dev *proxy_dev;
15454 	struct mlx5_priv *proxy_priv;
15455 	uint16_t proxy_port_id = dev->data->port_id;
15456 	int ret;
15457 
15458 	ret = rte_flow_pick_transfer_proxy(port_id, &proxy_port_id, NULL);
15459 	if (ret) {
15460 		DRV_LOG(ERR, "Unable to pick transfer proxy port for port %u. Transfer proxy "
15461 			     "port must be present to create default SQ miss flows.",
15462 			     port_id);
15463 		return ret;
15464 	}
15465 	proxy_dev = &rte_eth_devices[proxy_port_id];
15466 	proxy_priv = proxy_dev->data->dev_private;
15467 	if (!proxy_priv->dr_ctx) {
15468 		DRV_LOG(DEBUG, "Transfer proxy port (port %u) of port %u must be configured "
15469 			       "for HWS to create default SQ miss flows. Default flows will "
15470 			       "not be created.",
15471 			       proxy_port_id, port_id);
15472 		return 0;
15473 	}
15474 	if (!proxy_priv->hw_ctrl_fdb ||
15475 	    !proxy_priv->hw_ctrl_fdb->hw_esw_sq_miss_root_tbl ||
15476 	    !proxy_priv->hw_ctrl_fdb->hw_esw_sq_miss_tbl) {
15477 		DRV_LOG(ERR, "Transfer proxy port (port %u) of port %u was configured, but "
15478 			     "default flow tables were not created.",
15479 			     proxy_port_id, port_id);
15480 		rte_errno = ENOMEM;
15481 		return -rte_errno;
15482 	}
15483 	/*
15484 	 * Create a root SQ miss flow rule - match E-Switch Manager and SQ,
15485 	 * and jump to group 1.
15486 	 */
15487 	items[0] = (struct rte_flow_item){
15488 		.type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,
15489 		.spec = &esw_mgr_spec,
15490 		.mask = &esw_mgr_mask,
15491 	};
15492 	items[1] = (struct rte_flow_item){
15493 		.type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_SQ,
15494 		.spec = &sq_spec,
15495 	};
15496 	items[2] = (struct rte_flow_item){
15497 		.type = RTE_FLOW_ITEM_TYPE_END,
15498 	};
15499 	actions[0] = (struct rte_flow_action){
15500 		.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
15501 	};
15502 	actions[1] = (struct rte_flow_action){
15503 		.type = RTE_FLOW_ACTION_TYPE_JUMP,
15504 	};
15505 	actions[2] = (struct rte_flow_action) {
15506 		.type = RTE_FLOW_ACTION_TYPE_END,
15507 	};
15508 	ret = flow_hw_create_ctrl_flow(dev, proxy_dev,
15509 				       proxy_priv->hw_ctrl_fdb->hw_esw_sq_miss_root_tbl,
15510 				       items, 0, actions, 0, &flow_info, external);
15511 	if (ret) {
15512 		DRV_LOG(ERR, "Port %u failed to create root SQ miss flow rule for SQ %u, ret %d",
15513 			port_id, sqn, ret);
15514 		return ret;
15515 	}
15516 	/*
15517 	 * Create a non-root SQ miss flow rule - match REG_C_0 marker and SQ,
15518 	 * and forward to port.
15519 	 */
15520 	items[0] = (struct rte_flow_item){
15521 		.type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_TAG,
15522 		.spec = &reg_c0_spec,
15523 		.mask = &reg_c0_mask,
15524 	};
15525 	items[1] = (struct rte_flow_item){
15526 		.type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_SQ,
15527 		.spec = &sq_spec,
15528 	};
15529 	items[2] = (struct rte_flow_item){
15530 		.type = RTE_FLOW_ITEM_TYPE_END,
15531 	};
15532 	actions[0] = (struct rte_flow_action){
15533 		.type = RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT,
15534 		.conf = &port,
15535 	};
15536 	actions[1] = (struct rte_flow_action){
15537 		.type = RTE_FLOW_ACTION_TYPE_END,
15538 	};
15539 	flow_info.type = MLX5_CTRL_FLOW_TYPE_SQ_MISS;
15540 	ret = flow_hw_create_ctrl_flow(dev, proxy_dev,
15541 				       proxy_priv->hw_ctrl_fdb->hw_esw_sq_miss_tbl,
15542 				       items, 0, actions, 0, &flow_info, external);
15543 	if (ret) {
15544 		DRV_LOG(ERR, "Port %u failed to create HWS SQ miss flow rule for SQ %u, ret %d",
15545 			port_id, sqn, ret);
15546 		return ret;
15547 	}
15548 	return 0;
15549 }
15550 
15551 static bool
15552 flow_hw_is_matching_sq_miss_flow(struct mlx5_ctrl_flow_entry *cf,
15553 				 struct rte_eth_dev *dev,
15554 				 uint32_t sqn)
15555 {
15556 	if (cf->owner_dev != dev)
15557 		return false;
15558 	if (cf->info.type == MLX5_CTRL_FLOW_TYPE_SQ_MISS_ROOT && cf->info.esw_mgr_sq == sqn)
15559 		return true;
15560 	if (cf->info.type == MLX5_CTRL_FLOW_TYPE_SQ_MISS && cf->info.esw_mgr_sq == sqn)
15561 		return true;
15562 	return false;
15563 }
15564 
15565 int
15566 mlx5_flow_hw_esw_destroy_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn)
15567 {
15568 	uint16_t port_id = dev->data->port_id;
15569 	uint16_t proxy_port_id = dev->data->port_id;
15570 	struct rte_eth_dev *proxy_dev;
15571 	struct mlx5_priv *proxy_priv;
15572 	struct mlx5_ctrl_flow_entry *cf;
15573 	struct mlx5_ctrl_flow_entry *cf_next;
15574 	int ret;
15575 
15576 	ret = rte_flow_pick_transfer_proxy(port_id, &proxy_port_id, NULL);
15577 	if (ret) {
15578 		DRV_LOG(ERR, "Unable to pick transfer proxy port for port %u. Transfer proxy "
15579 			     "port must be present for default SQ miss flow rules to exist.",
15580 			     port_id);
15581 		return ret;
15582 	}
15583 	proxy_dev = &rte_eth_devices[proxy_port_id];
15584 	proxy_priv = proxy_dev->data->dev_private;
15585 	/* FDB default flow rules must be enabled. */
15586 	MLX5_ASSERT(proxy_priv->sh->config.fdb_def_rule);
15587 	if (!proxy_priv->dr_ctx)
15588 		return 0;
15589 	if (!proxy_priv->hw_ctrl_fdb ||
15590 	    !proxy_priv->hw_ctrl_fdb->hw_esw_sq_miss_root_tbl ||
15591 	    !proxy_priv->hw_ctrl_fdb->hw_esw_sq_miss_tbl)
15592 		return 0;
15593 	cf = LIST_FIRST(&proxy_priv->hw_ctrl_flows);
15594 	while (cf != NULL) {
15595 		cf_next = LIST_NEXT(cf, next);
15596 		if (flow_hw_is_matching_sq_miss_flow(cf, dev, sqn)) {
15597 			claim_zero(flow_hw_destroy_ctrl_flow(proxy_dev, cf->flow));
15598 			LIST_REMOVE(cf, next);
15599 			mlx5_free(cf);
15600 		}
15601 		cf = cf_next;
15602 	}
15603 	return 0;
15604 }
15605 
15606 int
15607 mlx5_flow_hw_esw_create_default_jump_flow(struct rte_eth_dev *dev)
15608 {
15609 	uint16_t port_id = dev->data->port_id;
15610 	struct rte_flow_item_ethdev port_spec = {
15611 		.port_id = port_id,
15612 	};
15613 	struct rte_flow_item items[] = {
15614 		{
15615 			.type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,
15616 			.spec = &port_spec,
15617 		},
15618 		{
15619 			.type = RTE_FLOW_ITEM_TYPE_END,
15620 		},
15621 	};
15622 	struct rte_flow_action_jump jump = {
15623 		.group = 1,
15624 	};
15625 	struct rte_flow_action actions[] = {
15626 		{
15627 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
15628 			.conf = &jump,
15629 		},
15630 		{
15631 			.type = RTE_FLOW_ACTION_TYPE_END,
15632 		}
15633 	};
15634 	struct mlx5_ctrl_flow_info flow_info = {
15635 		.type = MLX5_CTRL_FLOW_TYPE_DEFAULT_JUMP,
15636 	};
15637 	struct rte_eth_dev *proxy_dev;
15638 	struct mlx5_priv *proxy_priv;
15639 	uint16_t proxy_port_id = dev->data->port_id;
15640 	int ret;
15641 
15642 	ret = rte_flow_pick_transfer_proxy(port_id, &proxy_port_id, NULL);
15643 	if (ret) {
15644 		DRV_LOG(ERR, "Unable to pick transfer proxy port for port %u. Transfer proxy "
15645 			     "port must be present to create default FDB jump rule.",
15646 			     port_id);
15647 		return ret;
15648 	}
15649 	proxy_dev = &rte_eth_devices[proxy_port_id];
15650 	proxy_priv = proxy_dev->data->dev_private;
15651 	/* FDB default flow rules must be enabled. */
15652 	MLX5_ASSERT(proxy_priv->sh->config.fdb_def_rule);
15653 	if (!proxy_priv->dr_ctx) {
15654 		DRV_LOG(DEBUG, "Transfer proxy port (port %u) of port %u must be configured "
15655 			       "for HWS to create default FDB jump rule. Default rule will "
15656 			       "not be created.",
15657 			       proxy_port_id, port_id);
15658 		return 0;
15659 	}
15660 	if (!proxy_priv->hw_ctrl_fdb || !proxy_priv->hw_ctrl_fdb->hw_esw_zero_tbl) {
15661 		DRV_LOG(ERR, "Transfer proxy port (port %u) of port %u was configured, but "
15662 			     "default flow tables were not created.",
15663 			     proxy_port_id, port_id);
15664 		rte_errno = EINVAL;
15665 		return -rte_errno;
15666 	}
15667 	return flow_hw_create_ctrl_flow(dev, proxy_dev,
15668 					proxy_priv->hw_ctrl_fdb->hw_esw_zero_tbl,
15669 					items, 0, actions, 0, &flow_info, false);
15670 }
15671 
15672 int
15673 mlx5_flow_hw_create_tx_default_mreg_copy_flow(struct rte_eth_dev *dev)
15674 {
15675 	struct mlx5_priv *priv = dev->data->dev_private;
15676 	struct rte_flow_item_eth promisc = {
15677 		.hdr.dst_addr.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
15678 		.hdr.src_addr.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
15679 		.hdr.ether_type = 0,
15680 	};
15681 	struct rte_flow_item eth_all[] = {
15682 		[0] = {
15683 			.type = RTE_FLOW_ITEM_TYPE_ETH,
15684 			.spec = &promisc,
15685 			.mask = &promisc,
15686 		},
15687 		[1] = {
15688 			.type = RTE_FLOW_ITEM_TYPE_END,
15689 		},
15690 	};
15691 	struct rte_flow_action_modify_field mreg_action = {
15692 		.operation = RTE_FLOW_MODIFY_SET,
15693 		.dst = {
15694 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
15695 			.tag_index = REG_C_1,
15696 		},
15697 		.src = {
15698 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
15699 			.tag_index = REG_A,
15700 		},
15701 		.width = 32,
15702 	};
15703 	struct rte_flow_action copy_reg_action[] = {
15704 		[0] = {
15705 			.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
15706 			.conf = &mreg_action,
15707 		},
15708 		[1] = {
15709 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
15710 		},
15711 		[2] = {
15712 			.type = RTE_FLOW_ACTION_TYPE_END,
15713 		},
15714 	};
15715 	struct mlx5_ctrl_flow_info flow_info = {
15716 		.type = MLX5_CTRL_FLOW_TYPE_TX_META_COPY,
15717 	};
15718 
15719 	MLX5_ASSERT(priv->master);
15720 	if (!priv->dr_ctx ||
15721 	    !priv->hw_ctrl_fdb ||
15722 	    !priv->hw_ctrl_fdb->hw_tx_meta_cpy_tbl)
15723 		return 0;
15724 	return flow_hw_create_ctrl_flow(dev, dev,
15725 					priv->hw_ctrl_fdb->hw_tx_meta_cpy_tbl,
15726 					eth_all, 0, copy_reg_action, 0, &flow_info, false);
15727 }
15728 
15729 int
15730 mlx5_flow_hw_tx_repr_matching_flow(struct rte_eth_dev *dev, uint32_t sqn, bool external)
15731 {
15732 	struct mlx5_priv *priv = dev->data->dev_private;
15733 	struct mlx5_rte_flow_item_sq sq_spec = {
15734 		.queue = sqn,
15735 	};
15736 	struct rte_flow_item items[] = {
15737 		{
15738 			.type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_SQ,
15739 			.spec = &sq_spec,
15740 		},
15741 		{
15742 			.type = RTE_FLOW_ITEM_TYPE_END,
15743 		},
15744 	};
15745 	/*
15746 	 * Allocate actions array suitable for all cases - extended metadata enabled or not.
15747 	 * With extended metadata there will be an additional MODIFY_FIELD action before JUMP.
15748 	 */
15749 	struct rte_flow_action actions[] = {
15750 		{ .type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD },
15751 		{ .type = RTE_FLOW_ACTION_TYPE_JUMP },
15752 		{ .type = RTE_FLOW_ACTION_TYPE_END },
15753 		{ .type = RTE_FLOW_ACTION_TYPE_END },
15754 	};
15755 	struct mlx5_ctrl_flow_info flow_info = {
15756 		.type = MLX5_CTRL_FLOW_TYPE_TX_REPR_MATCH,
15757 		.tx_repr_sq = sqn,
15758 	};
15759 
15760 	/* It is assumed that caller checked for representor matching. */
15761 	MLX5_ASSERT(priv->sh->config.repr_matching);
15762 	if (!priv->dr_ctx) {
15763 		DRV_LOG(DEBUG, "Port %u must be configured for HWS, before creating "
15764 			       "default egress flow rules. Omitting creation.",
15765 			       dev->data->port_id);
15766 		return 0;
15767 	}
15768 	if (!priv->hw_tx_repr_tagging_tbl) {
15769 		DRV_LOG(ERR, "Port %u is configured for HWS, but table for default "
15770 			     "egress flow rules does not exist.",
15771 			     dev->data->port_id);
15772 		rte_errno = EINVAL;
15773 		return -rte_errno;
15774 	}
15775 	/*
15776 	 * If extended metadata mode is enabled, then an additional MODIFY_FIELD action must be
15777 	 * placed before terminating JUMP action.
15778 	 */
15779 	if (priv->sh->config.dv_xmeta_en == MLX5_XMETA_MODE_META32_HWS) {
15780 		actions[1].type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD;
15781 		actions[2].type = RTE_FLOW_ACTION_TYPE_JUMP;
15782 	}
15783 	return flow_hw_create_ctrl_flow(dev, dev, priv->hw_tx_repr_tagging_tbl,
15784 					items, 0, actions, 0, &flow_info, external);
15785 }
15786 
15787 int
15788 mlx5_flow_hw_lacp_rx_flow(struct rte_eth_dev *dev)
15789 {
15790 	struct mlx5_priv *priv = dev->data->dev_private;
15791 	struct rte_flow_item_eth lacp_item = {
15792 		.type = RTE_BE16(RTE_ETHER_TYPE_SLOW),
15793 	};
15794 	struct rte_flow_item eth_lacp[] = {
15795 		[0] = {
15796 			.type = RTE_FLOW_ITEM_TYPE_ETH,
15797 			.spec = &lacp_item,
15798 			.mask = &lacp_item,
15799 		},
15800 		[1] = {
15801 			.type = RTE_FLOW_ITEM_TYPE_END,
15802 		},
15803 	};
15804 	struct rte_flow_action miss_action[] = {
15805 		[0] = {
15806 			.type = (enum rte_flow_action_type)
15807 				MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS,
15808 		},
15809 		[1] = {
15810 			.type = RTE_FLOW_ACTION_TYPE_END,
15811 		},
15812 	};
15813 	struct mlx5_ctrl_flow_info flow_info = {
15814 		.type = MLX5_CTRL_FLOW_TYPE_LACP_RX,
15815 	};
15816 
15817 	if (!priv->dr_ctx || !priv->hw_ctrl_fdb || !priv->hw_ctrl_fdb->hw_lacp_rx_tbl)
15818 		return 0;
15819 	return flow_hw_create_ctrl_flow(dev, dev,
15820 					priv->hw_ctrl_fdb->hw_lacp_rx_tbl,
15821 					eth_lacp, 0, miss_action, 0, &flow_info, false);
15822 }
15823 
15824 static uint32_t
15825 __calc_pattern_flags(const enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type)
15826 {
15827 	switch (eth_pattern_type) {
15828 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL:
15829 		return MLX5_CTRL_PROMISCUOUS;
15830 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL_MCAST:
15831 		return MLX5_CTRL_ALL_MULTICAST;
15832 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST:
15833 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST_VLAN:
15834 		return MLX5_CTRL_BROADCAST;
15835 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST:
15836 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST_VLAN:
15837 		return MLX5_CTRL_IPV4_MULTICAST;
15838 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST:
15839 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST_VLAN:
15840 		return MLX5_CTRL_IPV6_MULTICAST;
15841 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC:
15842 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN:
15843 		return MLX5_CTRL_DMAC;
15844 	default:
15845 		/* Should not reach here. */
15846 		MLX5_ASSERT(false);
15847 		return 0;
15848 	}
15849 }
15850 
15851 static uint32_t
15852 __calc_vlan_flags(const enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type)
15853 {
15854 	switch (eth_pattern_type) {
15855 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST_VLAN:
15856 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST_VLAN:
15857 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST_VLAN:
15858 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN:
15859 		return MLX5_CTRL_VLAN_FILTER;
15860 	default:
15861 		return 0;
15862 	}
15863 }
15864 
15865 static bool
15866 eth_pattern_type_is_requested(const enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type,
15867 			      uint32_t flags)
15868 {
15869 	uint32_t pattern_flags = __calc_pattern_flags(eth_pattern_type);
15870 	uint32_t vlan_flags = __calc_vlan_flags(eth_pattern_type);
15871 	bool pattern_requested = !!(pattern_flags & flags);
15872 	bool consider_vlan = vlan_flags || (MLX5_CTRL_VLAN_FILTER & flags);
15873 	bool vlan_requested = !!(vlan_flags & flags);
15874 
15875 	if (consider_vlan)
15876 		return pattern_requested && vlan_requested;
15877 	else
15878 		return pattern_requested;
15879 }
15880 
15881 static bool
15882 rss_type_is_requested(struct mlx5_priv *priv,
15883 		      const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
15884 {
15885 	struct rte_flow_actions_template *at = priv->hw_ctrl_rx->rss[rss_type];
15886 	unsigned int i;
15887 
15888 	for (i = 0; at->actions[i].type != RTE_FLOW_ACTION_TYPE_END; ++i) {
15889 		if (at->actions[i].type == RTE_FLOW_ACTION_TYPE_RSS) {
15890 			const struct rte_flow_action_rss *rss = at->actions[i].conf;
15891 			uint64_t rss_types = rss->types;
15892 
15893 			if ((rss_types & priv->rss_conf.rss_hf) != rss_types)
15894 				return false;
15895 		}
15896 	}
15897 	return true;
15898 }
15899 
15900 static const struct rte_flow_item_eth *
15901 __get_eth_spec(const enum mlx5_flow_ctrl_rx_eth_pattern_type pattern)
15902 {
15903 	switch (pattern) {
15904 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL:
15905 		return &ctrl_rx_eth_promisc_spec;
15906 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL_MCAST:
15907 		return &ctrl_rx_eth_mcast_spec;
15908 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST:
15909 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST_VLAN:
15910 		return &ctrl_rx_eth_bcast_spec;
15911 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST:
15912 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST_VLAN:
15913 		return &ctrl_rx_eth_ipv4_mcast_spec;
15914 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST:
15915 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST_VLAN:
15916 		return &ctrl_rx_eth_ipv6_mcast_spec;
15917 	default:
15918 		/* This case should not be reached. */
15919 		MLX5_ASSERT(false);
15920 		return NULL;
15921 	}
15922 }
15923 
15924 static int
15925 __flow_hw_ctrl_flows_single(struct rte_eth_dev *dev,
15926 			    struct rte_flow_template_table *tbl,
15927 			    const enum mlx5_flow_ctrl_rx_eth_pattern_type pattern_type,
15928 			    const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
15929 {
15930 	const struct rte_flow_item_eth *eth_spec = __get_eth_spec(pattern_type);
15931 	struct rte_flow_item items[5];
15932 	struct rte_flow_action actions[] = {
15933 		{ .type = RTE_FLOW_ACTION_TYPE_RSS },
15934 		{ .type = RTE_FLOW_ACTION_TYPE_END },
15935 	};
15936 	struct mlx5_ctrl_flow_info flow_info = {
15937 		.type = MLX5_CTRL_FLOW_TYPE_DEFAULT_RX_RSS,
15938 	};
15939 
15940 	if (!eth_spec)
15941 		return -EINVAL;
15942 	memset(items, 0, sizeof(items));
15943 	items[0] = (struct rte_flow_item){
15944 		.type = RTE_FLOW_ITEM_TYPE_ETH,
15945 		.spec = eth_spec,
15946 	};
15947 	items[1] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_VOID };
15948 	items[2] = flow_hw_get_ctrl_rx_l3_item(rss_type);
15949 	items[3] = flow_hw_get_ctrl_rx_l4_item(rss_type);
15950 	items[4] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_END };
15951 	/* Without VLAN filtering, only a single flow rule must be created. */
15952 	return flow_hw_create_ctrl_flow(dev, dev, tbl, items, 0, actions, 0, &flow_info, false);
15953 }
15954 
15955 static int
15956 __flow_hw_ctrl_flows_single_vlan(struct rte_eth_dev *dev,
15957 				 struct rte_flow_template_table *tbl,
15958 				 const enum mlx5_flow_ctrl_rx_eth_pattern_type pattern_type,
15959 				 const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
15960 {
15961 	struct mlx5_priv *priv = dev->data->dev_private;
15962 	const struct rte_flow_item_eth *eth_spec = __get_eth_spec(pattern_type);
15963 	struct rte_flow_item items[5];
15964 	struct rte_flow_action actions[] = {
15965 		{ .type = RTE_FLOW_ACTION_TYPE_RSS },
15966 		{ .type = RTE_FLOW_ACTION_TYPE_END },
15967 	};
15968 	struct mlx5_ctrl_flow_info flow_info = {
15969 		.type = MLX5_CTRL_FLOW_TYPE_DEFAULT_RX_RSS,
15970 	};
15971 	unsigned int i;
15972 
15973 	if (!eth_spec)
15974 		return -EINVAL;
15975 	memset(items, 0, sizeof(items));
15976 	items[0] = (struct rte_flow_item){
15977 		.type = RTE_FLOW_ITEM_TYPE_ETH,
15978 		.spec = eth_spec,
15979 	};
15980 	/* Optional VLAN for now will be VOID - will be filled later. */
15981 	items[1] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_VLAN };
15982 	items[2] = flow_hw_get_ctrl_rx_l3_item(rss_type);
15983 	items[3] = flow_hw_get_ctrl_rx_l4_item(rss_type);
15984 	items[4] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_END };
15985 	/* Since VLAN filtering is done, create a single flow rule for each registered vid. */
15986 	for (i = 0; i < priv->vlan_filter_n; ++i) {
15987 		uint16_t vlan = priv->vlan_filter[i];
15988 		struct rte_flow_item_vlan vlan_spec = {
15989 			.hdr.vlan_tci = rte_cpu_to_be_16(vlan),
15990 		};
15991 
15992 		items[1].spec = &vlan_spec;
15993 		if (flow_hw_create_ctrl_flow(dev, dev,
15994 					     tbl, items, 0, actions, 0, &flow_info, false))
15995 			return -rte_errno;
15996 	}
15997 	return 0;
15998 }
15999 
16000 static int
16001 __flow_hw_ctrl_flows_unicast_create(struct rte_eth_dev *dev,
16002 				    struct rte_flow_template_table *tbl,
16003 				    const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type,
16004 				    const struct rte_ether_addr *addr)
16005 {
16006 	struct rte_flow_item_eth eth_spec = {
16007 		.hdr.dst_addr = *addr,
16008 	};
16009 	struct rte_flow_item items[5];
16010 	struct rte_flow_action actions[] = {
16011 		{ .type = RTE_FLOW_ACTION_TYPE_RSS },
16012 		{ .type = RTE_FLOW_ACTION_TYPE_END },
16013 	};
16014 	struct mlx5_ctrl_flow_info flow_info = {
16015 		.type = MLX5_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC,
16016 		.uc = {
16017 			.dmac = *addr,
16018 		},
16019 	};
16020 
16021 	memset(items, 0, sizeof(items));
16022 	items[0] = (struct rte_flow_item){
16023 		.type = RTE_FLOW_ITEM_TYPE_ETH,
16024 		.spec = &eth_spec,
16025 	};
16026 	items[1] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_VOID };
16027 	items[2] = flow_hw_get_ctrl_rx_l3_item(rss_type);
16028 	items[3] = flow_hw_get_ctrl_rx_l4_item(rss_type);
16029 	items[4] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_END };
16030 
16031 	if (flow_hw_create_ctrl_flow(dev, dev, tbl, items, 0, actions, 0, &flow_info, false))
16032 		return -rte_errno;
16033 
16034 	return 0;
16035 }
16036 
16037 static int
16038 __flow_hw_ctrl_flows_unicast(struct rte_eth_dev *dev,
16039 			     struct rte_flow_template_table *tbl,
16040 			     const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
16041 {
16042 	unsigned int i;
16043 	int ret;
16044 
16045 	for (i = 0; i < MLX5_MAX_MAC_ADDRESSES; ++i) {
16046 		struct rte_ether_addr *mac = &dev->data->mac_addrs[i];
16047 
16048 		if (rte_is_zero_ether_addr(mac))
16049 			continue;
16050 
16051 		ret = __flow_hw_ctrl_flows_unicast_create(dev, tbl, rss_type, mac);
16052 		if (ret < 0)
16053 			return ret;
16054 	}
16055 	return 0;
16056 }
16057 
16058 static int
16059 __flow_hw_ctrl_flows_unicast_vlan_create(struct rte_eth_dev *dev,
16060 					 struct rte_flow_template_table *tbl,
16061 					 const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type,
16062 					 const struct rte_ether_addr *addr,
16063 					 const uint16_t vid)
16064 {
16065 	struct rte_flow_item_eth eth_spec = {
16066 		.hdr.dst_addr = *addr,
16067 	};
16068 	struct rte_flow_item_vlan vlan_spec = {
16069 		.tci = rte_cpu_to_be_16(vid),
16070 	};
16071 	struct rte_flow_item items[5];
16072 	struct rte_flow_action actions[] = {
16073 		{ .type = RTE_FLOW_ACTION_TYPE_RSS },
16074 		{ .type = RTE_FLOW_ACTION_TYPE_END },
16075 	};
16076 	struct mlx5_ctrl_flow_info flow_info = {
16077 		.type = MLX5_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC_VLAN,
16078 		.uc = {
16079 			.dmac = *addr,
16080 			.vlan = vid,
16081 		},
16082 	};
16083 
16084 	memset(items, 0, sizeof(items));
16085 	items[0] = (struct rte_flow_item){
16086 		.type = RTE_FLOW_ITEM_TYPE_ETH,
16087 		.spec = &eth_spec,
16088 	};
16089 	items[1] = (struct rte_flow_item){
16090 		.type = RTE_FLOW_ITEM_TYPE_VLAN,
16091 		.spec = &vlan_spec,
16092 	};
16093 	items[2] = flow_hw_get_ctrl_rx_l3_item(rss_type);
16094 	items[3] = flow_hw_get_ctrl_rx_l4_item(rss_type);
16095 	items[4] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_END };
16096 
16097 	if (flow_hw_create_ctrl_flow(dev, dev, tbl, items, 0, actions, 0, &flow_info, false))
16098 		return -rte_errno;
16099 
16100 	return 0;
16101 }
16102 
16103 static int
16104 __flow_hw_ctrl_flows_unicast_vlan(struct rte_eth_dev *dev,
16105 				  struct rte_flow_template_table *tbl,
16106 				  const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
16107 {
16108 	struct mlx5_priv *priv = dev->data->dev_private;
16109 	unsigned int i;
16110 	unsigned int j;
16111 
16112 	for (i = 0; i < MLX5_MAX_MAC_ADDRESSES; ++i) {
16113 		struct rte_ether_addr *mac = &dev->data->mac_addrs[i];
16114 
16115 		if (rte_is_zero_ether_addr(mac))
16116 			continue;
16117 
16118 		for (j = 0; j < priv->vlan_filter_n; ++j) {
16119 			uint16_t vlan = priv->vlan_filter[j];
16120 			int ret;
16121 
16122 			ret = __flow_hw_ctrl_flows_unicast_vlan_create(dev, tbl, rss_type,
16123 								       mac, vlan);
16124 			if (ret < 0)
16125 				return ret;
16126 		}
16127 	}
16128 	return 0;
16129 }
16130 
16131 static int
16132 __flow_hw_ctrl_flows(struct rte_eth_dev *dev,
16133 		     struct rte_flow_template_table *tbl,
16134 		     const enum mlx5_flow_ctrl_rx_eth_pattern_type pattern_type,
16135 		     const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
16136 {
16137 	switch (pattern_type) {
16138 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL:
16139 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL_MCAST:
16140 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST:
16141 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST:
16142 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST:
16143 		return __flow_hw_ctrl_flows_single(dev, tbl, pattern_type, rss_type);
16144 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST_VLAN:
16145 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST_VLAN:
16146 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST_VLAN:
16147 		return __flow_hw_ctrl_flows_single_vlan(dev, tbl, pattern_type, rss_type);
16148 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC:
16149 		return __flow_hw_ctrl_flows_unicast(dev, tbl, rss_type);
16150 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN:
16151 		return __flow_hw_ctrl_flows_unicast_vlan(dev, tbl, rss_type);
16152 	default:
16153 		/* Should not reach here. */
16154 		MLX5_ASSERT(false);
16155 		rte_errno = EINVAL;
16156 		return -EINVAL;
16157 	}
16158 }
16159 
16160 
16161 int
16162 mlx5_flow_hw_ctrl_flows(struct rte_eth_dev *dev, uint32_t flags)
16163 {
16164 	struct mlx5_priv *priv = dev->data->dev_private;
16165 	struct mlx5_flow_hw_ctrl_rx *hw_ctrl_rx;
16166 	unsigned int i;
16167 	int j;
16168 	int ret = 0;
16169 
16170 	RTE_SET_USED(priv);
16171 	RTE_SET_USED(flags);
16172 	if (!priv->dr_ctx) {
16173 		DRV_LOG(DEBUG, "port %u Control flow rules will not be created. "
16174 			       "HWS needs to be configured beforehand.",
16175 			       dev->data->port_id);
16176 		return 0;
16177 	}
16178 	if (!priv->hw_ctrl_rx) {
16179 		DRV_LOG(ERR, "port %u Control flow rules templates were not created.",
16180 			dev->data->port_id);
16181 		rte_errno = EINVAL;
16182 		return -rte_errno;
16183 	}
16184 	hw_ctrl_rx = priv->hw_ctrl_rx;
16185 	for (i = 0; i < MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_MAX; ++i) {
16186 		const enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type = i;
16187 
16188 		if (!eth_pattern_type_is_requested(eth_pattern_type, flags))
16189 			continue;
16190 		for (j = 0; j < MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX; ++j) {
16191 			const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type = j;
16192 			struct rte_flow_actions_template *at;
16193 			struct mlx5_flow_hw_ctrl_rx_table *tmpls = &hw_ctrl_rx->tables[i][j];
16194 			const struct mlx5_flow_template_table_cfg cfg = {
16195 				.attr = tmpls->attr,
16196 				.external = 0,
16197 			};
16198 
16199 			if (!hw_ctrl_rx->rss[rss_type]) {
16200 				at = flow_hw_create_ctrl_rx_rss_template(dev, rss_type);
16201 				if (!at)
16202 					return -rte_errno;
16203 				hw_ctrl_rx->rss[rss_type] = at;
16204 			} else {
16205 				at = hw_ctrl_rx->rss[rss_type];
16206 			}
16207 			if (!rss_type_is_requested(priv, rss_type))
16208 				continue;
16209 			if (!tmpls->tbl) {
16210 				tmpls->tbl = flow_hw_table_create(dev, &cfg,
16211 								  &tmpls->pt, 1, &at, 1, NULL);
16212 				if (!tmpls->tbl) {
16213 					DRV_LOG(ERR, "port %u Failed to create template table "
16214 						     "for control flow rules. Unable to create "
16215 						     "control flow rules.",
16216 						     dev->data->port_id);
16217 					return -rte_errno;
16218 				}
16219 			}
16220 
16221 			ret = __flow_hw_ctrl_flows(dev, tmpls->tbl, eth_pattern_type, rss_type);
16222 			if (ret) {
16223 				DRV_LOG(ERR, "port %u Failed to create control flow rule.",
16224 					dev->data->port_id);
16225 				return ret;
16226 			}
16227 		}
16228 	}
16229 	return 0;
16230 }
16231 
16232 static int
16233 mlx5_flow_hw_ctrl_flow_single(struct rte_eth_dev *dev,
16234 			      const enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type,
16235 			      const struct rte_ether_addr *addr,
16236 			      const uint16_t vlan)
16237 {
16238 	struct mlx5_priv *priv = dev->data->dev_private;
16239 	struct mlx5_flow_hw_ctrl_rx *hw_ctrl_rx;
16240 	unsigned int j;
16241 	int ret = 0;
16242 
16243 	if (!priv->dr_ctx) {
16244 		DRV_LOG(DEBUG, "port %u Control flow rules will not be created. "
16245 			       "HWS needs to be configured beforehand.",
16246 			       dev->data->port_id);
16247 		return 0;
16248 	}
16249 	if (!priv->hw_ctrl_rx) {
16250 		DRV_LOG(ERR, "port %u Control flow rules templates were not created.",
16251 			dev->data->port_id);
16252 		rte_errno = EINVAL;
16253 		return -rte_errno;
16254 	}
16255 	hw_ctrl_rx = priv->hw_ctrl_rx;
16256 
16257 	/* TODO: this part should be somehow refactored. It's common with common flow creation. */
16258 	for (j = 0; j < MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX; ++j) {
16259 		const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type = j;
16260 		const unsigned int pti = eth_pattern_type;
16261 		struct rte_flow_actions_template *at;
16262 		struct mlx5_flow_hw_ctrl_rx_table *tmpls = &hw_ctrl_rx->tables[pti][j];
16263 		const struct mlx5_flow_template_table_cfg cfg = {
16264 			.attr = tmpls->attr,
16265 			.external = 0,
16266 		};
16267 
16268 		if (!hw_ctrl_rx->rss[rss_type]) {
16269 			at = flow_hw_create_ctrl_rx_rss_template(dev, rss_type);
16270 			if (!at)
16271 				return -rte_errno;
16272 			hw_ctrl_rx->rss[rss_type] = at;
16273 		} else {
16274 			at = hw_ctrl_rx->rss[rss_type];
16275 		}
16276 		if (!rss_type_is_requested(priv, rss_type))
16277 			continue;
16278 		if (!tmpls->tbl) {
16279 			tmpls->tbl = flow_hw_table_create(dev, &cfg,
16280 							  &tmpls->pt, 1, &at, 1, NULL);
16281 			if (!tmpls->tbl) {
16282 				DRV_LOG(ERR, "port %u Failed to create template table "
16283 					     "for control flow rules. Unable to create "
16284 					     "control flow rules.",
16285 					     dev->data->port_id);
16286 				return -rte_errno;
16287 			}
16288 		}
16289 
16290 		MLX5_ASSERT(eth_pattern_type == MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC ||
16291 			    eth_pattern_type == MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN);
16292 
16293 		if (eth_pattern_type == MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC)
16294 			ret = __flow_hw_ctrl_flows_unicast_create(dev, tmpls->tbl, rss_type, addr);
16295 		else
16296 			ret = __flow_hw_ctrl_flows_unicast_vlan_create(dev, tmpls->tbl, rss_type,
16297 								       addr, vlan);
16298 		if (ret) {
16299 			DRV_LOG(ERR, "port %u Failed to create unicast control flow rule.",
16300 				dev->data->port_id);
16301 			return ret;
16302 		}
16303 	}
16304 
16305 	return 0;
16306 }
16307 
16308 int
16309 mlx5_flow_hw_ctrl_flow_dmac(struct rte_eth_dev *dev,
16310 			    const struct rte_ether_addr *addr)
16311 {
16312 	return mlx5_flow_hw_ctrl_flow_single(dev, MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC,
16313 					     addr, 0);
16314 }
16315 
16316 int
16317 mlx5_flow_hw_ctrl_flow_dmac_destroy(struct rte_eth_dev *dev,
16318 				    const struct rte_ether_addr *addr)
16319 {
16320 	struct mlx5_priv *priv = dev->data->dev_private;
16321 	struct mlx5_ctrl_flow_entry *entry;
16322 	struct mlx5_ctrl_flow_entry *tmp;
16323 	int ret;
16324 
16325 	/*
16326 	 * HWS does not have automatic RSS flow expansion,
16327 	 * so each variant of the control flow rule is a separate entry in the list.
16328 	 * In that case, the whole list must be traversed.
16329 	 */
16330 	entry = LIST_FIRST(&priv->hw_ctrl_flows);
16331 	while (entry != NULL) {
16332 		tmp = LIST_NEXT(entry, next);
16333 
16334 		if (entry->info.type != MLX5_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC ||
16335 		    !rte_is_same_ether_addr(addr, &entry->info.uc.dmac)) {
16336 			entry = tmp;
16337 			continue;
16338 		}
16339 
16340 		ret = flow_hw_destroy_ctrl_flow(dev, entry->flow);
16341 		LIST_REMOVE(entry, next);
16342 		mlx5_free(entry);
16343 		if (ret)
16344 			return ret;
16345 
16346 		entry = tmp;
16347 	}
16348 	return 0;
16349 }
16350 
16351 int
16352 mlx5_flow_hw_ctrl_flow_dmac_vlan(struct rte_eth_dev *dev,
16353 				 const struct rte_ether_addr *addr,
16354 				 const uint16_t vlan)
16355 {
16356 	return mlx5_flow_hw_ctrl_flow_single(dev, MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN,
16357 					     addr, vlan);
16358 }
16359 
16360 int
16361 mlx5_flow_hw_ctrl_flow_dmac_vlan_destroy(struct rte_eth_dev *dev,
16362 					 const struct rte_ether_addr *addr,
16363 					 const uint16_t vlan)
16364 {
16365 	struct mlx5_priv *priv = dev->data->dev_private;
16366 	struct mlx5_ctrl_flow_entry *entry;
16367 	struct mlx5_ctrl_flow_entry *tmp;
16368 	int ret;
16369 
16370 	/*
16371 	 * HWS does not have automatic RSS flow expansion,
16372 	 * so each variant of the control flow rule is a separate entry in the list.
16373 	 * In that case, the whole list must be traversed.
16374 	 */
16375 	entry = LIST_FIRST(&priv->hw_ctrl_flows);
16376 	while (entry != NULL) {
16377 		tmp = LIST_NEXT(entry, next);
16378 
16379 		if (entry->info.type != MLX5_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC_VLAN ||
16380 		    !rte_is_same_ether_addr(addr, &entry->info.uc.dmac) ||
16381 		    vlan != entry->info.uc.vlan) {
16382 			entry = tmp;
16383 			continue;
16384 		}
16385 
16386 		ret = flow_hw_destroy_ctrl_flow(dev, entry->flow);
16387 		LIST_REMOVE(entry, next);
16388 		mlx5_free(entry);
16389 		if (ret)
16390 			return ret;
16391 
16392 		entry = tmp;
16393 	}
16394 	return 0;
16395 }
16396 
16397 static __rte_always_inline uint32_t
16398 mlx5_reformat_domain_to_tbl_type(const struct rte_flow_indir_action_conf *domain)
16399 {
16400 	uint32_t tbl_type;
16401 
16402 	if (domain->transfer)
16403 		tbl_type = MLX5DR_ACTION_FLAG_HWS_FDB;
16404 	else if (domain->egress)
16405 		tbl_type = MLX5DR_ACTION_FLAG_HWS_TX;
16406 	else if (domain->ingress)
16407 		tbl_type = MLX5DR_ACTION_FLAG_HWS_RX;
16408 	else
16409 		tbl_type = UINT32_MAX;
16410 	return tbl_type;
16411 }
16412 
16413 static struct mlx5_hw_encap_decap_action *
16414 __mlx5_reformat_create(struct rte_eth_dev *dev,
16415 		       const struct rte_flow_action_raw_encap *encap_conf,
16416 		       const struct rte_flow_indir_action_conf *domain,
16417 		       enum mlx5dr_action_type type)
16418 {
16419 	struct mlx5_priv *priv = dev->data->dev_private;
16420 	struct mlx5_hw_encap_decap_action *handle;
16421 	struct mlx5dr_action_reformat_header hdr;
16422 	uint32_t flags;
16423 
16424 	flags = mlx5_reformat_domain_to_tbl_type(domain);
16425 	flags |= (uint32_t)MLX5DR_ACTION_FLAG_SHARED;
16426 	if (flags == UINT32_MAX) {
16427 		DRV_LOG(ERR, "Reformat: invalid indirect action configuration");
16428 		return NULL;
16429 	}
16430 	/* Allocate new list entry. */
16431 	handle = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*handle), 0, SOCKET_ID_ANY);
16432 	if (!handle) {
16433 		DRV_LOG(ERR, "Reformat: failed to allocate reformat entry");
16434 		return NULL;
16435 	}
16436 	handle->action_type = type;
16437 	hdr.sz = encap_conf ? encap_conf->size : 0;
16438 	hdr.data = encap_conf ? encap_conf->data : NULL;
16439 	handle->action = mlx5dr_action_create_reformat(priv->dr_ctx,
16440 					type, 1, &hdr, 0, flags);
16441 	if (!handle->action) {
16442 		DRV_LOG(ERR, "Reformat: failed to create reformat action");
16443 		mlx5_free(handle);
16444 		return NULL;
16445 	}
16446 	return handle;
16447 }
16448 
16449 /**
16450  * Create mlx5 reformat action.
16451  *
16452  * @param[in] dev
16453  *   Pointer to rte_eth_dev structure.
16454  * @param[in] conf
16455  *   Pointer to the indirect action parameters.
16456  * @param[in] encap_action
16457  *   Pointer to the raw_encap action configuration.
16458  * @param[in] decap_action
16459  *   Pointer to the raw_decap action configuration.
16460  * @param[out] error
16461  *   Pointer to error structure.
16462  *
16463  * @return
16464  *   A valid shared action handle in case of success, NULL otherwise and
16465  *   rte_errno is set.
16466  */
16467 struct mlx5_hw_encap_decap_action*
16468 mlx5_reformat_action_create(struct rte_eth_dev *dev,
16469 			    const struct rte_flow_indir_action_conf *conf,
16470 			    const struct rte_flow_action *encap_action,
16471 			    const struct rte_flow_action *decap_action,
16472 			    struct rte_flow_error *error)
16473 {
16474 	struct mlx5_priv *priv = dev->data->dev_private;
16475 	struct mlx5_hw_encap_decap_action *handle;
16476 	const struct rte_flow_action_raw_encap *encap = NULL;
16477 	const struct rte_flow_action_raw_decap *decap = NULL;
16478 	enum mlx5dr_action_type type = MLX5DR_ACTION_TYP_LAST;
16479 
16480 	MLX5_ASSERT(!encap_action || encap_action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP);
16481 	MLX5_ASSERT(!decap_action || decap_action->type == RTE_FLOW_ACTION_TYPE_RAW_DECAP);
16482 	if (priv->sh->config.dv_flow_en != 2) {
16483 		rte_flow_error_set(error, ENOTSUP,
16484 				   RTE_FLOW_ERROR_TYPE_ACTION, encap_action,
16485 				   "Reformat: hardware does not support");
16486 		return NULL;
16487 	}
16488 	if (!conf || (conf->transfer + conf->egress + conf->ingress != 1)) {
16489 		rte_flow_error_set(error, EINVAL,
16490 				   RTE_FLOW_ERROR_TYPE_ACTION, encap_action,
16491 				   "Reformat: domain should be specified");
16492 		return NULL;
16493 	}
16494 	if ((encap_action && !encap_action->conf) || (decap_action && !decap_action->conf)) {
16495 		rte_flow_error_set(error, EINVAL,
16496 				   RTE_FLOW_ERROR_TYPE_ACTION, encap_action,
16497 				   "Reformat: missed action configuration");
16498 		return NULL;
16499 	}
16500 	if (encap_action && !decap_action) {
16501 		encap = (const struct rte_flow_action_raw_encap *)encap_action->conf;
16502 		if (!encap->size || encap->size > MLX5_ENCAP_MAX_LEN ||
16503 		    encap->size < MLX5_ENCAPSULATION_DECISION_SIZE) {
16504 			rte_flow_error_set(error, EINVAL,
16505 					   RTE_FLOW_ERROR_TYPE_ACTION, encap_action,
16506 					   "Reformat: Invalid encap length");
16507 			return NULL;
16508 		}
16509 		type = MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
16510 	} else if (decap_action && !encap_action) {
16511 		decap = (const struct rte_flow_action_raw_decap *)decap_action->conf;
16512 		if (!decap->size || decap->size < MLX5_ENCAPSULATION_DECISION_SIZE) {
16513 			rte_flow_error_set(error, EINVAL,
16514 					   RTE_FLOW_ERROR_TYPE_ACTION, encap_action,
16515 					   "Reformat: Invalid decap length");
16516 			return NULL;
16517 		}
16518 		type = MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2;
16519 	} else if (encap_action && decap_action) {
16520 		decap = (const struct rte_flow_action_raw_decap *)decap_action->conf;
16521 		encap = (const struct rte_flow_action_raw_encap *)encap_action->conf;
16522 		if (decap->size < MLX5_ENCAPSULATION_DECISION_SIZE &&
16523 		    encap->size >= MLX5_ENCAPSULATION_DECISION_SIZE &&
16524 		    encap->size <= MLX5_ENCAP_MAX_LEN) {
16525 			type = MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3;
16526 		} else if (decap->size >= MLX5_ENCAPSULATION_DECISION_SIZE &&
16527 			   encap->size < MLX5_ENCAPSULATION_DECISION_SIZE) {
16528 			type = MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2;
16529 		} else {
16530 			rte_flow_error_set(error, EINVAL,
16531 					   RTE_FLOW_ERROR_TYPE_ACTION, encap_action,
16532 					   "Reformat: Invalid decap & encap length");
16533 			return NULL;
16534 		}
16535 	} else if (!encap_action && !decap_action) {
16536 		rte_flow_error_set(error, EINVAL,
16537 				   RTE_FLOW_ERROR_TYPE_ACTION, encap_action,
16538 				   "Reformat: Invalid decap & encap configurations");
16539 		return NULL;
16540 	}
16541 	if (!priv->dr_ctx) {
16542 		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
16543 				   encap_action, "Reformat: HWS not supported");
16544 		return NULL;
16545 	}
16546 	handle = __mlx5_reformat_create(dev, encap, conf, type);
16547 	if (!handle) {
16548 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, encap_action,
16549 				   "Reformat: failed to create indirect action");
16550 		return NULL;
16551 	}
16552 	return handle;
16553 }
16554 
16555 /**
16556  * Destroy the indirect reformat action.
16557  * Release action related resources on the NIC and the memory.
16558  * Lock free, (mutex should be acquired by caller).
16559  *
16560  * @param[in] dev
16561  *   Pointer to the Ethernet device structure.
16562  * @param[in] handle
16563  *   The indirect action list handle to be removed.
16564  * @param[out] error
16565  *   Perform verbose error reporting if not NULL. Initialized in case of
16566  *   error only.
16567  *
16568  * @return
16569  *   0 on success, otherwise negative errno value.
16570  */
16571 int
16572 mlx5_reformat_action_destroy(struct rte_eth_dev *dev,
16573 			     struct rte_flow_action_list_handle *handle,
16574 			     struct rte_flow_error *error)
16575 {
16576 	struct mlx5_priv *priv = dev->data->dev_private;
16577 	struct mlx5_hw_encap_decap_action *action;
16578 
16579 	action = (struct mlx5_hw_encap_decap_action *)handle;
16580 	if (!priv->dr_ctx || !action)
16581 		return rte_flow_error_set(error, ENOTSUP,
16582 					  RTE_FLOW_ERROR_TYPE_ACTION, handle,
16583 					  "Reformat: invalid action handle");
16584 	mlx5dr_action_destroy(action->action);
16585 	mlx5_free(handle);
16586 	return 0;
16587 }
16588 
16589 static bool
16590 flow_hw_is_item_masked(const struct rte_flow_item *item)
16591 {
16592 	const uint8_t *byte;
16593 	int size;
16594 	int i;
16595 
16596 	if (item->mask == NULL)
16597 		return false;
16598 
16599 	switch ((int)item->type) {
16600 	case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
16601 		size = sizeof(struct rte_flow_item_tag);
16602 		break;
16603 	case MLX5_RTE_FLOW_ITEM_TYPE_SQ:
16604 		size = sizeof(struct mlx5_rte_flow_item_sq);
16605 		break;
16606 	default:
16607 		size = rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_MASK, NULL, 0, item, NULL);
16608 		/*
16609 		 * Pattern template items are passed to this function.
16610 		 * These items were already validated, so error is not expected.
16611 		 * Also, if mask is NULL, then spec size is bigger than 0 always.
16612 		 */
16613 		MLX5_ASSERT(size > 0);
16614 	}
16615 
16616 	byte = (const uint8_t *)item->mask;
16617 	for (i = 0; i < size; ++i)
16618 		if (byte[i])
16619 			return true;
16620 
16621 	return false;
16622 }
16623 
16624 static int
16625 flow_hw_validate_rule_pattern(struct rte_eth_dev *dev,
16626 			      const struct rte_flow_template_table *table,
16627 			      const uint8_t pattern_template_idx,
16628 			      const struct rte_flow_item items[],
16629 			      struct rte_flow_error *error)
16630 {
16631 	const struct rte_flow_pattern_template *pt;
16632 	const struct rte_flow_item *pt_item;
16633 
16634 	if (pattern_template_idx >= table->nb_item_templates)
16635 		return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
16636 					  "Pattern template index out of range");
16637 
16638 	pt = table->its[pattern_template_idx];
16639 	pt_item = pt->items;
16640 
16641 	/* If any item was prepended, skip it. */
16642 	if (pt->implicit_port || pt->implicit_tag)
16643 		pt_item++;
16644 
16645 	for (; pt_item->type != RTE_FLOW_ITEM_TYPE_END; pt_item++, items++) {
16646 		if (pt_item->type != items->type)
16647 			return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
16648 						  items, "Item type does not match the template");
16649 
16650 		/*
16651 		 * Assumptions:
16652 		 * - Currently mlx5dr layer contains info on which fields in masks are supported.
16653 		 * - This info is not exposed to PMD directly.
16654 		 * - Because of that, it is assumed that since pattern template is correct,
16655 		 *   then, items' masks in pattern template have nonzero values only in
16656 		 *   supported fields.
16657 		 *   This is known, because a temporary mlx5dr matcher is created during pattern
16658 		 *   template creation to validate the template.
16659 		 * - As a result, it is safe to look for nonzero bytes in mask to determine if
16660 		 *   item spec is needed in a flow rule.
16661 		 */
16662 		if (!flow_hw_is_item_masked(pt_item))
16663 			continue;
16664 
16665 		if (items->spec == NULL)
16666 			return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
16667 						  items, "Item spec is required");
16668 
16669 		switch (items->type) {
16670 		const struct rte_flow_item_ethdev *ethdev;
16671 		const struct rte_flow_item_tx_queue *tx_queue;
16672 		struct mlx5_txq_ctrl *txq;
16673 
16674 		case RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT:
16675 			ethdev = items->spec;
16676 			if (flow_hw_validate_target_port_id(dev, ethdev->port_id)) {
16677 				return rte_flow_error_set(error, EINVAL,
16678 							  RTE_FLOW_ERROR_TYPE_ITEM_SPEC, items,
16679 							  "Invalid port");
16680 			}
16681 			break;
16682 		case RTE_FLOW_ITEM_TYPE_TX_QUEUE:
16683 			tx_queue = items->spec;
16684 			if (mlx5_is_external_txq(dev, tx_queue->tx_queue))
16685 				continue;
16686 			txq = mlx5_txq_get(dev, tx_queue->tx_queue);
16687 			if (!txq)
16688 				return rte_flow_error_set(error, EINVAL,
16689 							  RTE_FLOW_ERROR_TYPE_ITEM_SPEC, items,
16690 							  "Invalid Tx queue");
16691 			mlx5_txq_release(dev, tx_queue->tx_queue);
16692 		default:
16693 			break;
16694 		}
16695 	}
16696 
16697 	return 0;
16698 }
16699 
16700 static bool
16701 flow_hw_valid_indirect_action_type(const struct rte_flow_action *user_action,
16702 				   const enum rte_flow_action_type expected_type)
16703 {
16704 	uint32_t user_indirect_type = MLX5_INDIRECT_ACTION_TYPE_GET(user_action->conf);
16705 	uint32_t expected_indirect_type;
16706 
16707 	switch ((int)expected_type) {
16708 	case RTE_FLOW_ACTION_TYPE_RSS:
16709 	case MLX5_RTE_FLOW_ACTION_TYPE_RSS:
16710 		expected_indirect_type = MLX5_INDIRECT_ACTION_TYPE_RSS;
16711 		break;
16712 	case RTE_FLOW_ACTION_TYPE_COUNT:
16713 	case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
16714 		expected_indirect_type = MLX5_INDIRECT_ACTION_TYPE_COUNT;
16715 		break;
16716 	case RTE_FLOW_ACTION_TYPE_AGE:
16717 		expected_indirect_type = MLX5_INDIRECT_ACTION_TYPE_AGE;
16718 		break;
16719 	case RTE_FLOW_ACTION_TYPE_CONNTRACK:
16720 		expected_indirect_type = MLX5_INDIRECT_ACTION_TYPE_CT;
16721 		break;
16722 	case RTE_FLOW_ACTION_TYPE_METER_MARK:
16723 	case MLX5_RTE_FLOW_ACTION_TYPE_METER_MARK:
16724 		expected_indirect_type = MLX5_INDIRECT_ACTION_TYPE_METER_MARK;
16725 		break;
16726 	case RTE_FLOW_ACTION_TYPE_QUOTA:
16727 		expected_indirect_type = MLX5_INDIRECT_ACTION_TYPE_QUOTA;
16728 		break;
16729 	default:
16730 		return false;
16731 	}
16732 
16733 	return user_indirect_type == expected_indirect_type;
16734 }
16735 
16736 static int
16737 flow_hw_validate_rule_actions(struct rte_eth_dev *dev,
16738 			      const struct rte_flow_template_table *table,
16739 			      const uint8_t actions_template_idx,
16740 			      const struct rte_flow_action actions[],
16741 			      struct rte_flow_error *error)
16742 {
16743 	const struct rte_flow_actions_template *at;
16744 	const struct mlx5_hw_actions *hw_acts;
16745 	const struct mlx5_action_construct_data *act_data;
16746 	unsigned int idx;
16747 
16748 	if (actions_template_idx >= table->nb_action_templates)
16749 		return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
16750 					  "Actions template index out of range");
16751 
16752 	at = table->ats[actions_template_idx].action_template;
16753 	hw_acts = &table->ats[actions_template_idx].acts;
16754 
16755 	for (idx = 0; actions[idx].type != RTE_FLOW_ACTION_TYPE_END; ++idx) {
16756 		const struct rte_flow_action *user_action = &actions[idx];
16757 		const struct rte_flow_action *tmpl_action = &at->orig_actions[idx];
16758 
16759 		if (user_action->type != tmpl_action->type)
16760 			return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
16761 						  user_action,
16762 						  "Action type does not match type specified in "
16763 						  "actions template");
16764 	}
16765 
16766 	/*
16767 	 * Only go through unmasked actions and check if configuration is provided.
16768 	 * Configuration of masked actions is ignored.
16769 	 */
16770 	LIST_FOREACH(act_data, &hw_acts->act_list, next) {
16771 		const struct rte_flow_action *user_action;
16772 
16773 		user_action = &actions[act_data->action_src];
16774 
16775 		/* Skip actions which do not require conf. */
16776 		switch ((int)act_data->type) {
16777 		case RTE_FLOW_ACTION_TYPE_COUNT:
16778 		case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
16779 		case MLX5_RTE_FLOW_ACTION_TYPE_METER_MARK:
16780 		case MLX5_RTE_FLOW_ACTION_TYPE_RSS:
16781 			continue;
16782 		default:
16783 			break;
16784 		}
16785 
16786 		if (user_action->conf == NULL)
16787 			return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
16788 						  user_action,
16789 						  "Action requires configuration");
16790 
16791 		switch ((int)user_action->type) {
16792 		enum rte_flow_action_type expected_type;
16793 		const struct rte_flow_action_ethdev *ethdev;
16794 		const struct rte_flow_action_modify_field *mf;
16795 
16796 		case RTE_FLOW_ACTION_TYPE_INDIRECT:
16797 			expected_type = act_data->indirect.expected_type;
16798 			if (!flow_hw_valid_indirect_action_type(user_action, expected_type))
16799 				return rte_flow_error_set(error, EINVAL,
16800 							  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
16801 							  user_action,
16802 							  "Indirect action type does not match "
16803 							  "the type specified in the mask");
16804 			break;
16805 		case RTE_FLOW_ACTION_TYPE_QUEUE:
16806 			if (mlx5_flow_validate_target_queue(dev, user_action, error))
16807 				return -rte_errno;
16808 			break;
16809 		case RTE_FLOW_ACTION_TYPE_RSS:
16810 			if (mlx5_validate_action_rss(dev, user_action, error))
16811 				return -rte_errno;
16812 			break;
16813 		case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
16814 			/* TODO: Compare other fields if needed. */
16815 			mf = user_action->conf;
16816 			if (mf->operation != act_data->modify_header.action.operation ||
16817 			    mf->src.field != act_data->modify_header.action.src.field ||
16818 			    mf->dst.field != act_data->modify_header.action.dst.field ||
16819 			    mf->width != act_data->modify_header.action.width)
16820 				return rte_flow_error_set(error, EINVAL,
16821 							  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
16822 							  user_action,
16823 							  "Modify field configuration does not "
16824 							  "match configuration from actions "
16825 							  "template");
16826 			break;
16827 		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
16828 			ethdev = user_action->conf;
16829 			if (flow_hw_validate_target_port_id(dev, ethdev->port_id)) {
16830 				return rte_flow_error_set(error, EINVAL,
16831 							  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
16832 							  user_action, "Invalid port");
16833 			}
16834 			break;
16835 		default:
16836 			break;
16837 		}
16838 	}
16839 
16840 	return 0;
16841 }
16842 
16843 static int
16844 flow_hw_async_op_validate(struct rte_eth_dev *dev,
16845 			  const uint32_t queue,
16846 			  const struct rte_flow_template_table *table,
16847 			  struct rte_flow_error *error)
16848 {
16849 	struct mlx5_priv *priv = dev->data->dev_private;
16850 
16851 	MLX5_ASSERT(table != NULL);
16852 
16853 	if (table->cfg.external && queue >= priv->hw_attr->nb_queue)
16854 		return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
16855 					  "Incorrect queue");
16856 
16857 	return 0;
16858 }
16859 
16860 /**
16861  * Validate user input for rte_flow_async_create() implementation.
16862  *
16863  * If RTE_LIBRTE_MLX5_DEBUG macro is not defined, this function is a no-op.
16864  *
16865  * @param[in] dev
16866  *   Pointer to the rte_eth_dev structure.
16867  * @param[in] queue
16868  *   The queue to create the flow.
16869  * @param[in] table
16870  *   Pointer to template table.
16871  * @param[in] rule_index
16872  *   The item pattern flow follows from the table.
16873  * @param[in] items
16874  *   Items with flow spec value.
16875  * @param[in] pattern_template_index
16876  *   The item pattern flow follows from the table.
16877  * @param[in] actions
16878  *   Action with flow spec value.
16879  * @param[in] action_template_index
16880  *   The action pattern flow follows from the table.
16881  * @param[out] error
16882  *   Pointer to error structure.
16883  *
16884  * @return
16885  *    0 if user input is valid.
16886  *    Negative errno otherwise, rte_errno and error struct is populated.
16887  */
16888 static int
16889 flow_hw_async_create_validate(struct rte_eth_dev *dev,
16890 			      const uint32_t queue,
16891 			      const struct rte_flow_template_table *table,
16892 			      enum rte_flow_table_insertion_type insertion_type,
16893 			      uint32_t rule_index,
16894 			      const struct rte_flow_item items[],
16895 			      const uint8_t pattern_template_index,
16896 			      const struct rte_flow_action actions[],
16897 			      const uint8_t action_template_index,
16898 			      struct rte_flow_error *error)
16899 {
16900 	if (flow_hw_async_op_validate(dev, queue, table, error))
16901 		return -rte_errno;
16902 
16903 	if (insertion_type != table->cfg.attr.insertion_type)
16904 		return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
16905 					  NULL, "Flow rule insertion type mismatch with table configuration");
16906 
16907 	if (table->cfg.attr.insertion_type != RTE_FLOW_TABLE_INSERTION_TYPE_PATTERN)
16908 		if (rule_index >= table->cfg.attr.nb_flows)
16909 			return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
16910 						  NULL, "Flow rule index exceeds table size");
16911 
16912 	if (table->cfg.attr.insertion_type != RTE_FLOW_TABLE_INSERTION_TYPE_INDEX)
16913 		if (flow_hw_validate_rule_pattern(dev, table, pattern_template_index, items, error))
16914 			return -rte_errno;
16915 
16916 	if (flow_hw_validate_rule_actions(dev, table, action_template_index, actions, error))
16917 		return -rte_errno;
16918 
16919 	return 0;
16920 }
16921 
16922 /**
16923  * Validate user input for rte_flow_async_update() implementation.
16924  *
16925  * If RTE_LIBRTE_MLX5_DEBUG macro is not defined, this function is a no-op.
16926  *
16927  * @param[in] dev
16928  *   Pointer to the rte_eth_dev structure.
16929  * @param[in] queue
16930  *   The queue to create the flow.
16931  * @param[in] flow
16932  *   Flow rule to be updated.
16933  * @param[in] actions
16934  *   Action with flow spec value.
16935  * @param[in] action_template_index
16936  *   The action pattern flow follows from the table.
16937  * @param[out] error
16938  *   Pointer to error structure.
16939  *
16940  * @return
16941  *    0 if user input is valid.
16942  *    Negative errno otherwise, rte_errno and error struct is set.
16943  */
16944 static int
16945 flow_hw_async_update_validate(struct rte_eth_dev *dev,
16946 			      const uint32_t queue,
16947 			      const struct rte_flow_hw *flow,
16948 			      const struct rte_flow_action actions[],
16949 			      const uint8_t action_template_index,
16950 			      struct rte_flow_error *error)
16951 {
16952 	if (flow_hw_async_op_validate(dev, queue, flow->table, error))
16953 		return -rte_errno;
16954 
16955 	if (flow_hw_validate_rule_actions(dev, flow->table, action_template_index, actions, error))
16956 		return -rte_errno;
16957 
16958 	return 0;
16959 }
16960 
16961 /**
16962  * Validate user input for rte_flow_async_destroy() implementation.
16963  *
16964  * If RTE_LIBRTE_MLX5_DEBUG macro is not defined, this function is a no-op.
16965  *
16966  * @param[in] dev
16967  *   Pointer to the rte_eth_dev structure.
16968  * @param[in] queue
16969  *   The queue to create the flow.
16970  * @param[in] flow
16971  *   Flow rule to be destroyed.
16972  * @param[out] error
16973  *   Pointer to error structure.
16974  *
16975  * @return
16976  *    0 if user input is valid.
16977  *    Negative errno otherwise, rte_errno and error struct is set.
16978  */
16979 static int
16980 flow_hw_async_destroy_validate(struct rte_eth_dev *dev,
16981 			       const uint32_t queue,
16982 			       const struct rte_flow_hw *flow,
16983 			       struct rte_flow_error *error)
16984 {
16985 	if (flow_hw_async_op_validate(dev, queue, flow->table, error))
16986 		return -rte_errno;
16987 
16988 	return 0;
16989 }
16990 
16991 static struct rte_flow_fp_ops mlx5_flow_hw_fp_ops = {
16992 	.async_create = flow_hw_async_flow_create,
16993 	.async_create_by_index = flow_hw_async_flow_create_by_index,
16994 	.async_create_by_index_with_pattern = flow_hw_async_flow_create_by_index_with_pattern,
16995 	.async_actions_update = flow_hw_async_flow_update,
16996 	.async_destroy = flow_hw_async_flow_destroy,
16997 	.push = flow_hw_push,
16998 	.pull = flow_hw_pull,
16999 	.async_action_handle_create = flow_hw_action_handle_create,
17000 	.async_action_handle_destroy = flow_hw_action_handle_destroy,
17001 	.async_action_handle_update = flow_hw_action_handle_update,
17002 	.async_action_handle_query = flow_hw_action_handle_query,
17003 	.async_action_handle_query_update = flow_hw_async_action_handle_query_update,
17004 	.async_action_list_handle_create = flow_hw_async_action_list_handle_create,
17005 	.async_action_list_handle_destroy = flow_hw_async_action_list_handle_destroy,
17006 	.async_action_list_handle_query_update =
17007 		flow_hw_async_action_list_handle_query_update,
17008 };
17009 
17010 #endif
17011