xref: /dpdk/drivers/net/mlx5/mlx5_flow_hw.c (revision 4aa10e5dc1b0fd6cc5b1b18770ac603e2c33a66c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2022 NVIDIA Corporation & Affiliates
3  */
4 
5 #include <rte_flow.h>
6 
7 #include <mlx5_malloc.h>
8 #include "mlx5_defs.h"
9 #include "mlx5_flow.h"
10 #include "mlx5_rx.h"
11 
12 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
13 #include "mlx5_hws_cnt.h"
14 
15 /* The maximum actions support in the flow. */
16 #define MLX5_HW_MAX_ACTS 16
17 
18 /*
19  * The default ipool threshold value indicates which per_core_cache
20  * value to set.
21  */
22 #define MLX5_HW_IPOOL_SIZE_THRESHOLD (1 << 19)
23 /* The default min local cache size. */
24 #define MLX5_HW_IPOOL_CACHE_MIN (1 << 9)
25 
26 /* Default push burst threshold. */
27 #define BURST_THR 32u
28 
29 /* Default queue to flush the flows. */
30 #define MLX5_DEFAULT_FLUSH_QUEUE 0
31 
32 /* Maximum number of rules in control flow tables. */
33 #define MLX5_HW_CTRL_FLOW_NB_RULES (4096)
34 
35 /* Lowest flow group usable by an application if group translation is done. */
36 #define MLX5_HW_LOWEST_USABLE_GROUP (1)
37 
38 /* Maximum group index usable by user applications for transfer flows. */
39 #define MLX5_HW_MAX_TRANSFER_GROUP (UINT32_MAX - 1)
40 
41 /* Maximum group index usable by user applications for egress flows. */
42 #define MLX5_HW_MAX_EGRESS_GROUP (UINT32_MAX - 1)
43 
44 /* Lowest priority for HW root table. */
45 #define MLX5_HW_LOWEST_PRIO_ROOT 15
46 
47 /* Lowest priority for HW non-root table. */
48 #define MLX5_HW_LOWEST_PRIO_NON_ROOT (UINT32_MAX)
49 
50 /* Priorities for Rx control flow rules. */
51 #define MLX5_HW_CTRL_RX_PRIO_L2 (MLX5_HW_LOWEST_PRIO_ROOT)
52 #define MLX5_HW_CTRL_RX_PRIO_L3 (MLX5_HW_LOWEST_PRIO_ROOT - 1)
53 #define MLX5_HW_CTRL_RX_PRIO_L4 (MLX5_HW_LOWEST_PRIO_ROOT - 2)
54 
55 #define MLX5_HW_VLAN_PUSH_TYPE_IDX 0
56 #define MLX5_HW_VLAN_PUSH_VID_IDX 1
57 #define MLX5_HW_VLAN_PUSH_PCP_IDX 2
58 
59 static int flow_hw_flush_all_ctrl_flows(struct rte_eth_dev *dev);
60 static int flow_hw_translate_group(struct rte_eth_dev *dev,
61 				   const struct mlx5_flow_template_table_cfg *cfg,
62 				   uint32_t group,
63 				   uint32_t *table_group,
64 				   struct rte_flow_error *error);
65 static __rte_always_inline int
66 flow_hw_set_vlan_vid_construct(struct rte_eth_dev *dev,
67 			       struct mlx5_hw_q_job *job,
68 			       struct mlx5_action_construct_data *act_data,
69 			       const struct mlx5_hw_actions *hw_acts,
70 			       const struct rte_flow_action *action);
71 
72 static __rte_always_inline uint32_t flow_hw_tx_tag_regc_mask(struct rte_eth_dev *dev);
73 static __rte_always_inline uint32_t flow_hw_tx_tag_regc_value(struct rte_eth_dev *dev);
74 
75 const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops;
76 
77 /* DR action flags with different table. */
78 static uint32_t mlx5_hw_act_flag[MLX5_HW_ACTION_FLAG_MAX]
79 				[MLX5DR_TABLE_TYPE_MAX] = {
80 	{
81 		MLX5DR_ACTION_FLAG_ROOT_RX,
82 		MLX5DR_ACTION_FLAG_ROOT_TX,
83 		MLX5DR_ACTION_FLAG_ROOT_FDB,
84 	},
85 	{
86 		MLX5DR_ACTION_FLAG_HWS_RX,
87 		MLX5DR_ACTION_FLAG_HWS_TX,
88 		MLX5DR_ACTION_FLAG_HWS_FDB,
89 	},
90 };
91 
92 /* Ethernet item spec for promiscuous mode. */
93 static const struct rte_flow_item_eth ctrl_rx_eth_promisc_spec = {
94 	.hdr.dst_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
95 	.hdr.src_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
96 	.hdr.ether_type = 0,
97 };
98 /* Ethernet item mask for promiscuous mode. */
99 static const struct rte_flow_item_eth ctrl_rx_eth_promisc_mask = {
100 	.hdr.dst_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
101 	.hdr.src_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
102 	.hdr.ether_type = 0,
103 };
104 
105 /* Ethernet item spec for all multicast mode. */
106 static const struct rte_flow_item_eth ctrl_rx_eth_mcast_spec = {
107 	.hdr.dst_addr.addr_bytes = "\x01\x00\x00\x00\x00\x00",
108 	.hdr.src_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
109 	.hdr.ether_type = 0,
110 };
111 /* Ethernet item mask for all multicast mode. */
112 static const struct rte_flow_item_eth ctrl_rx_eth_mcast_mask = {
113 	.hdr.dst_addr.addr_bytes = "\x01\x00\x00\x00\x00\x00",
114 	.hdr.src_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
115 	.hdr.ether_type = 0,
116 };
117 
118 /* Ethernet item spec for IPv4 multicast traffic. */
119 static const struct rte_flow_item_eth ctrl_rx_eth_ipv4_mcast_spec = {
120 	.hdr.dst_addr.addr_bytes = "\x01\x00\x5e\x00\x00\x00",
121 	.hdr.src_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
122 	.hdr.ether_type = 0,
123 };
124 /* Ethernet item mask for IPv4 multicast traffic. */
125 static const struct rte_flow_item_eth ctrl_rx_eth_ipv4_mcast_mask = {
126 	.hdr.dst_addr.addr_bytes = "\xff\xff\xff\x00\x00\x00",
127 	.hdr.src_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
128 	.hdr.ether_type = 0,
129 };
130 
131 /* Ethernet item spec for IPv6 multicast traffic. */
132 static const struct rte_flow_item_eth ctrl_rx_eth_ipv6_mcast_spec = {
133 	.hdr.dst_addr.addr_bytes = "\x33\x33\x00\x00\x00\x00",
134 	.hdr.src_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
135 	.hdr.ether_type = 0,
136 };
137 /* Ethernet item mask for IPv6 multicast traffic. */
138 static const struct rte_flow_item_eth ctrl_rx_eth_ipv6_mcast_mask = {
139 	.hdr.dst_addr.addr_bytes = "\xff\xff\x00\x00\x00\x00",
140 	.hdr.src_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
141 	.hdr.ether_type = 0,
142 };
143 
144 /* Ethernet item mask for unicast traffic. */
145 static const struct rte_flow_item_eth ctrl_rx_eth_dmac_mask = {
146 	.hdr.dst_addr.addr_bytes = "\xff\xff\xff\xff\xff\xff",
147 	.hdr.src_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
148 	.hdr.ether_type = 0,
149 };
150 
151 /* Ethernet item spec for broadcast. */
152 static const struct rte_flow_item_eth ctrl_rx_eth_bcast_spec = {
153 	.hdr.dst_addr.addr_bytes = "\xff\xff\xff\xff\xff\xff",
154 	.hdr.src_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
155 	.hdr.ether_type = 0,
156 };
157 
158 /**
159  * Set the hash fields according to the @p rss_desc information.
160  *
161  * @param[in] rss_desc
162  *   Pointer to the mlx5_flow_rss_desc.
163  * @param[out] hash_fields
164  *   Pointer to the RSS hash fields.
165  */
166 static void
167 flow_hw_hashfields_set(struct mlx5_flow_rss_desc *rss_desc,
168 		       uint64_t *hash_fields)
169 {
170 	uint64_t fields = 0;
171 	int rss_inner = 0;
172 	uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
173 
174 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
175 	if (rss_desc->level >= 2)
176 		rss_inner = 1;
177 #endif
178 	if (rss_types & MLX5_IPV4_LAYER_TYPES) {
179 		if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
180 			fields |= IBV_RX_HASH_SRC_IPV4;
181 		else if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
182 			fields |= IBV_RX_HASH_DST_IPV4;
183 		else
184 			fields |= MLX5_IPV4_IBV_RX_HASH;
185 	} else if (rss_types & MLX5_IPV6_LAYER_TYPES) {
186 		if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
187 			fields |= IBV_RX_HASH_SRC_IPV6;
188 		else if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
189 			fields |= IBV_RX_HASH_DST_IPV6;
190 		else
191 			fields |= MLX5_IPV6_IBV_RX_HASH;
192 	}
193 	if (rss_types & RTE_ETH_RSS_UDP) {
194 		if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
195 			fields |= IBV_RX_HASH_SRC_PORT_UDP;
196 		else if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
197 			fields |= IBV_RX_HASH_DST_PORT_UDP;
198 		else
199 			fields |= MLX5_UDP_IBV_RX_HASH;
200 	} else if (rss_types & RTE_ETH_RSS_TCP) {
201 		if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
202 			fields |= IBV_RX_HASH_SRC_PORT_TCP;
203 		else if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
204 			fields |= IBV_RX_HASH_DST_PORT_TCP;
205 		else
206 			fields |= MLX5_TCP_IBV_RX_HASH;
207 	}
208 	if (rss_types & RTE_ETH_RSS_ESP)
209 		fields |= IBV_RX_HASH_IPSEC_SPI;
210 	if (rss_inner)
211 		fields |= IBV_RX_HASH_INNER;
212 	*hash_fields = fields;
213 }
214 
215 /**
216  * Generate the pattern item flags.
217  * Will be used for shared RSS action.
218  *
219  * @param[in] items
220  *   Pointer to the list of items.
221  *
222  * @return
223  *   Item flags.
224  */
225 static uint64_t
226 flow_hw_rss_item_flags_get(const struct rte_flow_item items[])
227 {
228 	uint64_t item_flags = 0;
229 	uint64_t last_item = 0;
230 
231 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
232 		int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
233 		int item_type = items->type;
234 
235 		switch (item_type) {
236 		case RTE_FLOW_ITEM_TYPE_IPV4:
237 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
238 					     MLX5_FLOW_LAYER_OUTER_L3_IPV4;
239 			break;
240 		case RTE_FLOW_ITEM_TYPE_IPV6:
241 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
242 					     MLX5_FLOW_LAYER_OUTER_L3_IPV6;
243 			break;
244 		case RTE_FLOW_ITEM_TYPE_TCP:
245 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
246 					     MLX5_FLOW_LAYER_OUTER_L4_TCP;
247 			break;
248 		case RTE_FLOW_ITEM_TYPE_UDP:
249 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
250 					     MLX5_FLOW_LAYER_OUTER_L4_UDP;
251 			break;
252 		case RTE_FLOW_ITEM_TYPE_GRE:
253 			last_item = MLX5_FLOW_LAYER_GRE;
254 			break;
255 		case RTE_FLOW_ITEM_TYPE_NVGRE:
256 			last_item = MLX5_FLOW_LAYER_GRE;
257 			break;
258 		case RTE_FLOW_ITEM_TYPE_VXLAN:
259 			last_item = MLX5_FLOW_LAYER_VXLAN;
260 			break;
261 		case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
262 			last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
263 			break;
264 		case RTE_FLOW_ITEM_TYPE_GENEVE:
265 			last_item = MLX5_FLOW_LAYER_GENEVE;
266 			break;
267 		case RTE_FLOW_ITEM_TYPE_MPLS:
268 			last_item = MLX5_FLOW_LAYER_MPLS;
269 			break;
270 		case RTE_FLOW_ITEM_TYPE_GTP:
271 			last_item = MLX5_FLOW_LAYER_GTP;
272 			break;
273 		default:
274 			break;
275 		}
276 		item_flags |= last_item;
277 	}
278 	return item_flags;
279 }
280 
281 /**
282  * Register destination table DR jump action.
283  *
284  * @param[in] dev
285  *   Pointer to the rte_eth_dev structure.
286  * @param[in] table_attr
287  *   Pointer to the flow attributes.
288  * @param[in] dest_group
289  *   The destination group ID.
290  * @param[out] error
291  *   Pointer to error structure.
292  *
293  * @return
294  *    Table on success, NULL otherwise and rte_errno is set.
295  */
296 static struct mlx5_hw_jump_action *
297 flow_hw_jump_action_register(struct rte_eth_dev *dev,
298 			     const struct mlx5_flow_template_table_cfg *cfg,
299 			     uint32_t dest_group,
300 			     struct rte_flow_error *error)
301 {
302 	struct mlx5_priv *priv = dev->data->dev_private;
303 	struct rte_flow_attr jattr = cfg->attr.flow_attr;
304 	struct mlx5_flow_group *grp;
305 	struct mlx5_flow_cb_ctx ctx = {
306 		.dev = dev,
307 		.error = error,
308 		.data = &jattr,
309 	};
310 	struct mlx5_list_entry *ge;
311 	uint32_t target_group;
312 
313 	target_group = dest_group;
314 	if (flow_hw_translate_group(dev, cfg, dest_group, &target_group, error))
315 		return NULL;
316 	jattr.group = target_group;
317 	ge = mlx5_hlist_register(priv->sh->flow_tbls, target_group, &ctx);
318 	if (!ge)
319 		return NULL;
320 	grp = container_of(ge, struct mlx5_flow_group, entry);
321 	return &grp->jump;
322 }
323 
324 /**
325  * Release jump action.
326  *
327  * @param[in] dev
328  *   Pointer to the rte_eth_dev structure.
329  * @param[in] jump
330  *   Pointer to the jump action.
331  */
332 
333 static void
334 flow_hw_jump_release(struct rte_eth_dev *dev, struct mlx5_hw_jump_action *jump)
335 {
336 	struct mlx5_priv *priv = dev->data->dev_private;
337 	struct mlx5_flow_group *grp;
338 
339 	grp = container_of
340 		(jump, struct mlx5_flow_group, jump);
341 	mlx5_hlist_unregister(priv->sh->flow_tbls, &grp->entry);
342 }
343 
344 /**
345  * Register queue/RSS action.
346  *
347  * @param[in] dev
348  *   Pointer to the rte_eth_dev structure.
349  * @param[in] hws_flags
350  *   DR action flags.
351  * @param[in] action
352  *   rte flow action.
353  *
354  * @return
355  *    Table on success, NULL otherwise and rte_errno is set.
356  */
357 static inline struct mlx5_hrxq*
358 flow_hw_tir_action_register(struct rte_eth_dev *dev,
359 			    uint32_t hws_flags,
360 			    const struct rte_flow_action *action)
361 {
362 	struct mlx5_flow_rss_desc rss_desc = {
363 		.hws_flags = hws_flags,
364 	};
365 	struct mlx5_hrxq *hrxq;
366 
367 	if (action->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
368 		const struct rte_flow_action_queue *queue = action->conf;
369 
370 		rss_desc.const_q = &queue->index;
371 		rss_desc.queue_num = 1;
372 	} else {
373 		const struct rte_flow_action_rss *rss = action->conf;
374 
375 		rss_desc.queue_num = rss->queue_num;
376 		rss_desc.const_q = rss->queue;
377 		memcpy(rss_desc.key,
378 		       !rss->key ? rss_hash_default_key : rss->key,
379 		       MLX5_RSS_HASH_KEY_LEN);
380 		rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
381 		rss_desc.types = !rss->types ? RTE_ETH_RSS_IP : rss->types;
382 		flow_hw_hashfields_set(&rss_desc, &rss_desc.hash_fields);
383 		flow_dv_action_rss_l34_hash_adjust(rss->types,
384 						   &rss_desc.hash_fields);
385 		if (rss->level > 1) {
386 			rss_desc.hash_fields |= IBV_RX_HASH_INNER;
387 			rss_desc.tunnel = 1;
388 		}
389 	}
390 	hrxq = mlx5_hrxq_get(dev, &rss_desc);
391 	return hrxq;
392 }
393 
394 static __rte_always_inline int
395 flow_hw_ct_compile(struct rte_eth_dev *dev,
396 		   uint32_t queue, uint32_t idx,
397 		   struct mlx5dr_rule_action *rule_act)
398 {
399 	struct mlx5_priv *priv = dev->data->dev_private;
400 	struct mlx5_aso_ct_action *ct;
401 
402 	ct = mlx5_ipool_get(priv->hws_ctpool->cts, MLX5_ACTION_CTX_CT_GET_IDX(idx));
403 	if (!ct || mlx5_aso_ct_available(priv->sh, queue, ct))
404 		return -1;
405 	rule_act->action = priv->hws_ctpool->dr_action;
406 	rule_act->aso_ct.offset = ct->offset;
407 	rule_act->aso_ct.direction = ct->is_original ?
408 		MLX5DR_ACTION_ASO_CT_DIRECTION_INITIATOR :
409 		MLX5DR_ACTION_ASO_CT_DIRECTION_RESPONDER;
410 	return 0;
411 }
412 
413 /**
414  * Destroy DR actions created by action template.
415  *
416  * For DR actions created during table creation's action translate.
417  * Need to destroy the DR action when destroying the table.
418  *
419  * @param[in] dev
420  *   Pointer to the rte_eth_dev structure.
421  * @param[in] acts
422  *   Pointer to the template HW steering DR actions.
423  */
424 static void
425 __flow_hw_action_template_destroy(struct rte_eth_dev *dev,
426 				 struct mlx5_hw_actions *acts)
427 {
428 	struct mlx5_priv *priv = dev->data->dev_private;
429 	struct mlx5_action_construct_data *data;
430 
431 	while (!LIST_EMPTY(&acts->act_list)) {
432 		data = LIST_FIRST(&acts->act_list);
433 		LIST_REMOVE(data, next);
434 		mlx5_ipool_free(priv->acts_ipool, data->idx);
435 	}
436 
437 	if (acts->mark)
438 		if (!__atomic_sub_fetch(&priv->hws_mark_refcnt, 1, __ATOMIC_RELAXED))
439 			flow_hw_rxq_flag_set(dev, false);
440 
441 	if (acts->jump) {
442 		struct mlx5_flow_group *grp;
443 
444 		grp = container_of
445 			(acts->jump, struct mlx5_flow_group, jump);
446 		mlx5_hlist_unregister(priv->sh->flow_tbls, &grp->entry);
447 		acts->jump = NULL;
448 	}
449 	if (acts->tir) {
450 		mlx5_hrxq_release(dev, acts->tir->idx);
451 		acts->tir = NULL;
452 	}
453 	if (acts->encap_decap) {
454 		if (acts->encap_decap->action)
455 			mlx5dr_action_destroy(acts->encap_decap->action);
456 		mlx5_free(acts->encap_decap);
457 		acts->encap_decap = NULL;
458 	}
459 	if (acts->mhdr) {
460 		if (acts->mhdr->action)
461 			mlx5dr_action_destroy(acts->mhdr->action);
462 		mlx5_free(acts->mhdr);
463 		acts->mhdr = NULL;
464 	}
465 	if (mlx5_hws_cnt_id_valid(acts->cnt_id)) {
466 		mlx5_hws_cnt_shared_put(priv->hws_cpool, &acts->cnt_id);
467 		acts->cnt_id = 0;
468 	}
469 	if (acts->mtr_id) {
470 		mlx5_ipool_free(priv->hws_mpool->idx_pool, acts->mtr_id);
471 		acts->mtr_id = 0;
472 	}
473 }
474 
475 /**
476  * Append dynamic action to the dynamic action list.
477  *
478  * @param[in] priv
479  *   Pointer to the port private data structure.
480  * @param[in] acts
481  *   Pointer to the template HW steering DR actions.
482  * @param[in] type
483  *   Action type.
484  * @param[in] action_src
485  *   Offset of source rte flow action.
486  * @param[in] action_dst
487  *   Offset of destination DR action.
488  *
489  * @return
490  *    0 on success, negative value otherwise and rte_errno is set.
491  */
492 static __rte_always_inline struct mlx5_action_construct_data *
493 __flow_hw_act_data_alloc(struct mlx5_priv *priv,
494 			 enum rte_flow_action_type type,
495 			 uint16_t action_src,
496 			 uint16_t action_dst)
497 {
498 	struct mlx5_action_construct_data *act_data;
499 	uint32_t idx = 0;
500 
501 	act_data = mlx5_ipool_zmalloc(priv->acts_ipool, &idx);
502 	if (!act_data)
503 		return NULL;
504 	act_data->idx = idx;
505 	act_data->type = type;
506 	act_data->action_src = action_src;
507 	act_data->action_dst = action_dst;
508 	return act_data;
509 }
510 
511 /**
512  * Append dynamic action to the dynamic action list.
513  *
514  * @param[in] priv
515  *   Pointer to the port private data structure.
516  * @param[in] acts
517  *   Pointer to the template HW steering DR actions.
518  * @param[in] type
519  *   Action type.
520  * @param[in] action_src
521  *   Offset of source rte flow action.
522  * @param[in] action_dst
523  *   Offset of destination DR action.
524  *
525  * @return
526  *    0 on success, negative value otherwise and rte_errno is set.
527  */
528 static __rte_always_inline int
529 __flow_hw_act_data_general_append(struct mlx5_priv *priv,
530 				  struct mlx5_hw_actions *acts,
531 				  enum rte_flow_action_type type,
532 				  uint16_t action_src,
533 				  uint16_t action_dst)
534 {
535 	struct mlx5_action_construct_data *act_data;
536 
537 	act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
538 	if (!act_data)
539 		return -1;
540 	LIST_INSERT_HEAD(&acts->act_list, act_data, next);
541 	return 0;
542 }
543 
544 /**
545  * Append dynamic encap action to the dynamic action list.
546  *
547  * @param[in] priv
548  *   Pointer to the port private data structure.
549  * @param[in] acts
550  *   Pointer to the template HW steering DR actions.
551  * @param[in] type
552  *   Action type.
553  * @param[in] action_src
554  *   Offset of source rte flow action.
555  * @param[in] action_dst
556  *   Offset of destination DR action.
557  * @param[in] len
558  *   Length of the data to be updated.
559  *
560  * @return
561  *    0 on success, negative value otherwise and rte_errno is set.
562  */
563 static __rte_always_inline int
564 __flow_hw_act_data_encap_append(struct mlx5_priv *priv,
565 				struct mlx5_hw_actions *acts,
566 				enum rte_flow_action_type type,
567 				uint16_t action_src,
568 				uint16_t action_dst,
569 				uint16_t len)
570 {
571 	struct mlx5_action_construct_data *act_data;
572 
573 	act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
574 	if (!act_data)
575 		return -1;
576 	act_data->encap.len = len;
577 	LIST_INSERT_HEAD(&acts->act_list, act_data, next);
578 	return 0;
579 }
580 
581 static __rte_always_inline int
582 __flow_hw_act_data_hdr_modify_append(struct mlx5_priv *priv,
583 				     struct mlx5_hw_actions *acts,
584 				     enum rte_flow_action_type type,
585 				     uint16_t action_src,
586 				     uint16_t action_dst,
587 				     uint16_t mhdr_cmds_off,
588 				     uint16_t mhdr_cmds_end,
589 				     bool shared,
590 				     struct field_modify_info *field,
591 				     struct field_modify_info *dcopy,
592 				     uint32_t *mask)
593 {
594 	struct mlx5_action_construct_data *act_data;
595 
596 	act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
597 	if (!act_data)
598 		return -1;
599 	act_data->modify_header.mhdr_cmds_off = mhdr_cmds_off;
600 	act_data->modify_header.mhdr_cmds_end = mhdr_cmds_end;
601 	act_data->modify_header.shared = shared;
602 	rte_memcpy(act_data->modify_header.field, field,
603 		   sizeof(*field) * MLX5_ACT_MAX_MOD_FIELDS);
604 	rte_memcpy(act_data->modify_header.dcopy, dcopy,
605 		   sizeof(*dcopy) * MLX5_ACT_MAX_MOD_FIELDS);
606 	rte_memcpy(act_data->modify_header.mask, mask,
607 		   sizeof(*mask) * MLX5_ACT_MAX_MOD_FIELDS);
608 	LIST_INSERT_HEAD(&acts->act_list, act_data, next);
609 	return 0;
610 }
611 
612 /**
613  * Append shared RSS action to the dynamic action list.
614  *
615  * @param[in] priv
616  *   Pointer to the port private data structure.
617  * @param[in] acts
618  *   Pointer to the template HW steering DR actions.
619  * @param[in] type
620  *   Action type.
621  * @param[in] action_src
622  *   Offset of source rte flow action.
623  * @param[in] action_dst
624  *   Offset of destination DR action.
625  * @param[in] idx
626  *   Shared RSS index.
627  * @param[in] rss
628  *   Pointer to the shared RSS info.
629  *
630  * @return
631  *    0 on success, negative value otherwise and rte_errno is set.
632  */
633 static __rte_always_inline int
634 __flow_hw_act_data_shared_rss_append(struct mlx5_priv *priv,
635 				     struct mlx5_hw_actions *acts,
636 				     enum rte_flow_action_type type,
637 				     uint16_t action_src,
638 				     uint16_t action_dst,
639 				     uint32_t idx,
640 				     struct mlx5_shared_action_rss *rss)
641 {
642 	struct mlx5_action_construct_data *act_data;
643 
644 	act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
645 	if (!act_data)
646 		return -1;
647 	act_data->shared_rss.level = rss->origin.level;
648 	act_data->shared_rss.types = !rss->origin.types ? RTE_ETH_RSS_IP :
649 				     rss->origin.types;
650 	act_data->shared_rss.idx = idx;
651 	LIST_INSERT_HEAD(&acts->act_list, act_data, next);
652 	return 0;
653 }
654 
655 /**
656  * Append shared counter action to the dynamic action list.
657  *
658  * @param[in] priv
659  *   Pointer to the port private data structure.
660  * @param[in] acts
661  *   Pointer to the template HW steering DR actions.
662  * @param[in] type
663  *   Action type.
664  * @param[in] action_src
665  *   Offset of source rte flow action.
666  * @param[in] action_dst
667  *   Offset of destination DR action.
668  * @param[in] cnt_id
669  *   Shared counter id.
670  *
671  * @return
672  *    0 on success, negative value otherwise and rte_errno is set.
673  */
674 static __rte_always_inline int
675 __flow_hw_act_data_shared_cnt_append(struct mlx5_priv *priv,
676 				     struct mlx5_hw_actions *acts,
677 				     enum rte_flow_action_type type,
678 				     uint16_t action_src,
679 				     uint16_t action_dst,
680 				     cnt_id_t cnt_id)
681 {
682 	struct mlx5_action_construct_data *act_data;
683 
684 	act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
685 	if (!act_data)
686 		return -1;
687 	act_data->type = type;
688 	act_data->shared_counter.id = cnt_id;
689 	LIST_INSERT_HEAD(&acts->act_list, act_data, next);
690 	return 0;
691 }
692 
693 /**
694  * Append shared meter_mark action to the dynamic action list.
695  *
696  * @param[in] priv
697  *   Pointer to the port private data structure.
698  * @param[in] acts
699  *   Pointer to the template HW steering DR actions.
700  * @param[in] type
701  *   Action type.
702  * @param[in] action_src
703  *   Offset of source rte flow action.
704  * @param[in] action_dst
705  *   Offset of destination DR action.
706  * @param[in] mtr_id
707  *   Shared meter id.
708  *
709  * @return
710  *    0 on success, negative value otherwise and rte_errno is set.
711  */
712 static __rte_always_inline int
713 __flow_hw_act_data_shared_mtr_append(struct mlx5_priv *priv,
714 				     struct mlx5_hw_actions *acts,
715 				     enum rte_flow_action_type type,
716 				     uint16_t action_src,
717 				     uint16_t action_dst,
718 				     cnt_id_t mtr_id)
719 {	struct mlx5_action_construct_data *act_data;
720 
721 	act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
722 	if (!act_data)
723 		return -1;
724 	act_data->type = type;
725 	act_data->shared_meter.id = mtr_id;
726 	LIST_INSERT_HEAD(&acts->act_list, act_data, next);
727 	return 0;
728 }
729 
730 /**
731  * Translate shared indirect action.
732  *
733  * @param[in] dev
734  *   Pointer to the rte_eth_dev data structure.
735  * @param[in] action
736  *   Pointer to the shared indirect rte_flow action.
737  * @param[in] acts
738  *   Pointer to the template HW steering DR actions.
739  * @param[in] action_src
740  *   Offset of source rte flow action.
741  * @param[in] action_dst
742  *   Offset of destination DR action.
743  *
744  * @return
745  *    0 on success, negative value otherwise and rte_errno is set.
746  */
747 static __rte_always_inline int
748 flow_hw_shared_action_translate(struct rte_eth_dev *dev,
749 				const struct rte_flow_action *action,
750 				struct mlx5_hw_actions *acts,
751 				uint16_t action_src,
752 				uint16_t action_dst)
753 {
754 	struct mlx5_priv *priv = dev->data->dev_private;
755 	struct mlx5_shared_action_rss *shared_rss;
756 	uint32_t act_idx = (uint32_t)(uintptr_t)action->conf;
757 	uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
758 	uint32_t idx = act_idx &
759 		       ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
760 
761 	switch (type) {
762 	case MLX5_INDIRECT_ACTION_TYPE_RSS:
763 		shared_rss = mlx5_ipool_get
764 		  (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
765 		if (!shared_rss || __flow_hw_act_data_shared_rss_append
766 		    (priv, acts,
767 		    (enum rte_flow_action_type)MLX5_RTE_FLOW_ACTION_TYPE_RSS,
768 		    action_src, action_dst, idx, shared_rss))
769 			return -1;
770 		break;
771 	case MLX5_INDIRECT_ACTION_TYPE_COUNT:
772 		if (__flow_hw_act_data_shared_cnt_append(priv, acts,
773 			(enum rte_flow_action_type)
774 			MLX5_RTE_FLOW_ACTION_TYPE_COUNT,
775 			action_src, action_dst, act_idx))
776 			return -1;
777 		break;
778 	case MLX5_INDIRECT_ACTION_TYPE_AGE:
779 		/* Not supported, prevent by validate function. */
780 		MLX5_ASSERT(0);
781 		break;
782 	case MLX5_INDIRECT_ACTION_TYPE_CT:
783 		if (flow_hw_ct_compile(dev, MLX5_HW_INV_QUEUE,
784 				       idx, &acts->rule_acts[action_dst]))
785 			return -1;
786 		break;
787 	case MLX5_INDIRECT_ACTION_TYPE_METER_MARK:
788 		if (__flow_hw_act_data_shared_mtr_append(priv, acts,
789 			(enum rte_flow_action_type)
790 			MLX5_RTE_FLOW_ACTION_TYPE_METER_MARK,
791 			action_src, action_dst, idx))
792 			return -1;
793 		break;
794 	default:
795 		DRV_LOG(WARNING, "Unsupported shared action type:%d", type);
796 		break;
797 	}
798 	return 0;
799 }
800 
801 static __rte_always_inline bool
802 flow_hw_action_modify_field_is_shared(const struct rte_flow_action *action,
803 				      const struct rte_flow_action *mask)
804 {
805 	const struct rte_flow_action_modify_field *v = action->conf;
806 	const struct rte_flow_action_modify_field *m = mask->conf;
807 
808 	if (v->src.field == RTE_FLOW_FIELD_VALUE) {
809 		uint32_t j;
810 
811 		if (m == NULL)
812 			return false;
813 		for (j = 0; j < RTE_DIM(m->src.value); ++j) {
814 			/*
815 			 * Immediate value is considered to be masked
816 			 * (and thus shared by all flow rules), if mask
817 			 * is non-zero. Partial mask over immediate value
818 			 * is not allowed.
819 			 */
820 			if (m->src.value[j])
821 				return true;
822 		}
823 		return false;
824 	}
825 	if (v->src.field == RTE_FLOW_FIELD_POINTER)
826 		return m->src.pvalue != NULL;
827 	/*
828 	 * Source field types other than VALUE and
829 	 * POINTER are always shared.
830 	 */
831 	return true;
832 }
833 
834 static __rte_always_inline bool
835 flow_hw_should_insert_nop(const struct mlx5_hw_modify_header_action *mhdr,
836 			  const struct mlx5_modification_cmd *cmd)
837 {
838 	struct mlx5_modification_cmd last_cmd = { { 0 } };
839 	struct mlx5_modification_cmd new_cmd = { { 0 } };
840 	const uint32_t cmds_num = mhdr->mhdr_cmds_num;
841 	unsigned int last_type;
842 	bool should_insert = false;
843 
844 	if (cmds_num == 0)
845 		return false;
846 	last_cmd = *(&mhdr->mhdr_cmds[cmds_num - 1]);
847 	last_cmd.data0 = rte_be_to_cpu_32(last_cmd.data0);
848 	last_cmd.data1 = rte_be_to_cpu_32(last_cmd.data1);
849 	last_type = last_cmd.action_type;
850 	new_cmd = *cmd;
851 	new_cmd.data0 = rte_be_to_cpu_32(new_cmd.data0);
852 	new_cmd.data1 = rte_be_to_cpu_32(new_cmd.data1);
853 	switch (new_cmd.action_type) {
854 	case MLX5_MODIFICATION_TYPE_SET:
855 	case MLX5_MODIFICATION_TYPE_ADD:
856 		if (last_type == MLX5_MODIFICATION_TYPE_SET ||
857 		    last_type == MLX5_MODIFICATION_TYPE_ADD)
858 			should_insert = new_cmd.field == last_cmd.field;
859 		else if (last_type == MLX5_MODIFICATION_TYPE_COPY)
860 			should_insert = new_cmd.field == last_cmd.dst_field;
861 		else if (last_type == MLX5_MODIFICATION_TYPE_NOP)
862 			should_insert = false;
863 		else
864 			MLX5_ASSERT(false); /* Other types are not supported. */
865 		break;
866 	case MLX5_MODIFICATION_TYPE_COPY:
867 		if (last_type == MLX5_MODIFICATION_TYPE_SET ||
868 		    last_type == MLX5_MODIFICATION_TYPE_ADD)
869 			should_insert = (new_cmd.field == last_cmd.field ||
870 					 new_cmd.dst_field == last_cmd.field);
871 		else if (last_type == MLX5_MODIFICATION_TYPE_COPY)
872 			should_insert = (new_cmd.field == last_cmd.dst_field ||
873 					 new_cmd.dst_field == last_cmd.dst_field);
874 		else if (last_type == MLX5_MODIFICATION_TYPE_NOP)
875 			should_insert = false;
876 		else
877 			MLX5_ASSERT(false); /* Other types are not supported. */
878 		break;
879 	default:
880 		/* Other action types should be rejected on AT validation. */
881 		MLX5_ASSERT(false);
882 		break;
883 	}
884 	return should_insert;
885 }
886 
887 static __rte_always_inline int
888 flow_hw_mhdr_cmd_nop_append(struct mlx5_hw_modify_header_action *mhdr)
889 {
890 	struct mlx5_modification_cmd *nop;
891 	uint32_t num = mhdr->mhdr_cmds_num;
892 
893 	if (num + 1 >= MLX5_MHDR_MAX_CMD)
894 		return -ENOMEM;
895 	nop = mhdr->mhdr_cmds + num;
896 	nop->data0 = 0;
897 	nop->action_type = MLX5_MODIFICATION_TYPE_NOP;
898 	nop->data0 = rte_cpu_to_be_32(nop->data0);
899 	nop->data1 = 0;
900 	mhdr->mhdr_cmds_num = num + 1;
901 	return 0;
902 }
903 
904 static __rte_always_inline int
905 flow_hw_mhdr_cmd_append(struct mlx5_hw_modify_header_action *mhdr,
906 			struct mlx5_modification_cmd *cmd)
907 {
908 	uint32_t num = mhdr->mhdr_cmds_num;
909 
910 	if (num + 1 >= MLX5_MHDR_MAX_CMD)
911 		return -ENOMEM;
912 	mhdr->mhdr_cmds[num] = *cmd;
913 	mhdr->mhdr_cmds_num = num + 1;
914 	return 0;
915 }
916 
917 static __rte_always_inline int
918 flow_hw_converted_mhdr_cmds_append(struct mlx5_hw_modify_header_action *mhdr,
919 				   struct mlx5_flow_dv_modify_hdr_resource *resource)
920 {
921 	uint32_t idx;
922 	int ret;
923 
924 	for (idx = 0; idx < resource->actions_num; ++idx) {
925 		struct mlx5_modification_cmd *src = &resource->actions[idx];
926 
927 		if (flow_hw_should_insert_nop(mhdr, src)) {
928 			ret = flow_hw_mhdr_cmd_nop_append(mhdr);
929 			if (ret)
930 				return ret;
931 		}
932 		ret = flow_hw_mhdr_cmd_append(mhdr, src);
933 		if (ret)
934 			return ret;
935 	}
936 	return 0;
937 }
938 
939 static __rte_always_inline void
940 flow_hw_modify_field_init(struct mlx5_hw_modify_header_action *mhdr,
941 			  struct rte_flow_actions_template *at)
942 {
943 	memset(mhdr, 0, sizeof(*mhdr));
944 	/* Modify header action without any commands is shared by default. */
945 	mhdr->shared = true;
946 	mhdr->pos = at->mhdr_off;
947 }
948 
949 static __rte_always_inline int
950 flow_hw_modify_field_compile(struct rte_eth_dev *dev,
951 			     const struct rte_flow_attr *attr,
952 			     const struct rte_flow_action *action_start, /* Start of AT actions. */
953 			     const struct rte_flow_action *action, /* Current action from AT. */
954 			     const struct rte_flow_action *action_mask, /* Current mask from AT. */
955 			     struct mlx5_hw_actions *acts,
956 			     struct mlx5_hw_modify_header_action *mhdr,
957 			     struct rte_flow_error *error)
958 {
959 	struct mlx5_priv *priv = dev->data->dev_private;
960 	const struct rte_flow_action_modify_field *conf = action->conf;
961 	union {
962 		struct mlx5_flow_dv_modify_hdr_resource resource;
963 		uint8_t data[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
964 			     sizeof(struct mlx5_modification_cmd) * MLX5_MHDR_MAX_CMD];
965 	} dummy;
966 	struct mlx5_flow_dv_modify_hdr_resource *resource;
967 	struct rte_flow_item item = {
968 		.spec = NULL,
969 		.mask = NULL
970 	};
971 	struct field_modify_info field[MLX5_ACT_MAX_MOD_FIELDS] = {
972 						{0, 0, MLX5_MODI_OUT_NONE} };
973 	struct field_modify_info dcopy[MLX5_ACT_MAX_MOD_FIELDS] = {
974 						{0, 0, MLX5_MODI_OUT_NONE} };
975 	uint32_t mask[MLX5_ACT_MAX_MOD_FIELDS] = { 0 };
976 	uint32_t type, value = 0;
977 	uint16_t cmds_start, cmds_end;
978 	bool shared;
979 	int ret;
980 
981 	/*
982 	 * Modify header action is shared if previous modify_field actions
983 	 * are shared and currently compiled action is shared.
984 	 */
985 	shared = flow_hw_action_modify_field_is_shared(action, action_mask);
986 	mhdr->shared &= shared;
987 	if (conf->src.field == RTE_FLOW_FIELD_POINTER ||
988 	    conf->src.field == RTE_FLOW_FIELD_VALUE) {
989 		type = conf->operation == RTE_FLOW_MODIFY_SET ? MLX5_MODIFICATION_TYPE_SET :
990 								MLX5_MODIFICATION_TYPE_ADD;
991 		/* For SET/ADD fill the destination field (field) first. */
992 		mlx5_flow_field_id_to_modify_info(&conf->dst, field, mask,
993 						  conf->width, dev,
994 						  attr, error);
995 		item.spec = conf->src.field == RTE_FLOW_FIELD_POINTER ?
996 				(void *)(uintptr_t)conf->src.pvalue :
997 				(void *)(uintptr_t)&conf->src.value;
998 		if (conf->dst.field == RTE_FLOW_FIELD_META ||
999 		    conf->dst.field == RTE_FLOW_FIELD_TAG ||
1000 		    conf->dst.field == RTE_FLOW_FIELD_METER_COLOR ||
1001 		    conf->dst.field == (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG) {
1002 			value = *(const unaligned_uint32_t *)item.spec;
1003 			value = rte_cpu_to_be_32(value);
1004 			item.spec = &value;
1005 		} else if (conf->dst.field == RTE_FLOW_FIELD_GTP_PSC_QFI) {
1006 			/*
1007 			 * QFI is passed as an uint8_t integer, but it is accessed through
1008 			 * a 2nd least significant byte of a 32-bit field in modify header command.
1009 			 */
1010 			value = *(const uint8_t *)item.spec;
1011 			value = rte_cpu_to_be_32(value << 8);
1012 			item.spec = &value;
1013 		}
1014 	} else {
1015 		type = MLX5_MODIFICATION_TYPE_COPY;
1016 		/* For COPY fill the destination field (dcopy) without mask. */
1017 		mlx5_flow_field_id_to_modify_info(&conf->dst, dcopy, NULL,
1018 						  conf->width, dev,
1019 						  attr, error);
1020 		/* Then construct the source field (field) with mask. */
1021 		mlx5_flow_field_id_to_modify_info(&conf->src, field, mask,
1022 						  conf->width, dev,
1023 						  attr, error);
1024 	}
1025 	item.mask = &mask;
1026 	memset(&dummy, 0, sizeof(dummy));
1027 	resource = &dummy.resource;
1028 	ret = flow_dv_convert_modify_action(&item, field, dcopy, resource, type, error);
1029 	if (ret)
1030 		return ret;
1031 	MLX5_ASSERT(resource->actions_num > 0);
1032 	/*
1033 	 * If previous modify field action collide with this one, then insert NOP command.
1034 	 * This NOP command will not be a part of action's command range used to update commands
1035 	 * on rule creation.
1036 	 */
1037 	if (flow_hw_should_insert_nop(mhdr, &resource->actions[0])) {
1038 		ret = flow_hw_mhdr_cmd_nop_append(mhdr);
1039 		if (ret)
1040 			return rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1041 						  NULL, "too many modify field operations specified");
1042 	}
1043 	cmds_start = mhdr->mhdr_cmds_num;
1044 	ret = flow_hw_converted_mhdr_cmds_append(mhdr, resource);
1045 	if (ret)
1046 		return rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1047 					  NULL, "too many modify field operations specified");
1048 
1049 	cmds_end = mhdr->mhdr_cmds_num;
1050 	if (shared)
1051 		return 0;
1052 	ret = __flow_hw_act_data_hdr_modify_append(priv, acts, RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
1053 						   action - action_start, mhdr->pos,
1054 						   cmds_start, cmds_end, shared,
1055 						   field, dcopy, mask);
1056 	if (ret)
1057 		return rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1058 					  NULL, "not enough memory to store modify field metadata");
1059 	return 0;
1060 }
1061 
1062 static int
1063 flow_hw_represented_port_compile(struct rte_eth_dev *dev,
1064 				 const struct rte_flow_attr *attr,
1065 				 const struct rte_flow_action *action_start,
1066 				 const struct rte_flow_action *action,
1067 				 const struct rte_flow_action *action_mask,
1068 				 struct mlx5_hw_actions *acts,
1069 				 uint16_t action_dst,
1070 				 struct rte_flow_error *error)
1071 {
1072 	struct mlx5_priv *priv = dev->data->dev_private;
1073 	const struct rte_flow_action_ethdev *v = action->conf;
1074 	const struct rte_flow_action_ethdev *m = action_mask->conf;
1075 	int ret;
1076 
1077 	if (!attr->group)
1078 		return rte_flow_error_set(error, EINVAL,
1079 					  RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1080 					  "represented_port action cannot"
1081 					  " be used on group 0");
1082 	if (!attr->transfer)
1083 		return rte_flow_error_set(error, EINVAL,
1084 					  RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1085 					  NULL,
1086 					  "represented_port action requires"
1087 					  " transfer attribute");
1088 	if (attr->ingress || attr->egress)
1089 		return rte_flow_error_set(error, EINVAL,
1090 					  RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1091 					  "represented_port action cannot"
1092 					  " be used with direction attributes");
1093 	if (!priv->master)
1094 		return rte_flow_error_set(error, EINVAL,
1095 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1096 					  "represented_port acton must"
1097 					  " be used on proxy port");
1098 	if (m && !!m->port_id) {
1099 		struct mlx5_priv *port_priv;
1100 
1101 		if (!v)
1102 			return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1103 						  action, "port index was not provided");
1104 		port_priv = mlx5_port_to_eswitch_info(v->port_id, false);
1105 		if (port_priv == NULL)
1106 			return rte_flow_error_set
1107 					(error, EINVAL,
1108 					 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1109 					 "port does not exist or unable to"
1110 					 " obtain E-Switch info for port");
1111 		MLX5_ASSERT(priv->hw_vport != NULL);
1112 		if (priv->hw_vport[v->port_id]) {
1113 			acts->rule_acts[action_dst].action =
1114 					priv->hw_vport[v->port_id];
1115 		} else {
1116 			return rte_flow_error_set
1117 					(error, EINVAL,
1118 					 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1119 					 "cannot use represented_port action"
1120 					 " with this port");
1121 		}
1122 	} else {
1123 		ret = __flow_hw_act_data_general_append
1124 				(priv, acts, action->type,
1125 				 action - action_start, action_dst);
1126 		if (ret)
1127 			return rte_flow_error_set
1128 					(error, ENOMEM,
1129 					 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1130 					 "not enough memory to store"
1131 					 " vport action");
1132 	}
1133 	return 0;
1134 }
1135 
1136 static __rte_always_inline int
1137 flow_hw_meter_compile(struct rte_eth_dev *dev,
1138 		      const struct mlx5_flow_template_table_cfg *cfg,
1139 		      uint16_t aso_mtr_pos,
1140 		      uint16_t jump_pos,
1141 		      const struct rte_flow_action *action,
1142 		      struct mlx5_hw_actions *acts,
1143 		      struct rte_flow_error *error)
1144 {
1145 	struct mlx5_priv *priv = dev->data->dev_private;
1146 	struct mlx5_aso_mtr *aso_mtr;
1147 	const struct rte_flow_action_meter *meter = action->conf;
1148 	uint32_t group = cfg->attr.flow_attr.group;
1149 
1150 	aso_mtr = mlx5_aso_meter_by_idx(priv, meter->mtr_id);
1151 	acts->rule_acts[aso_mtr_pos].action = priv->mtr_bulk.action;
1152 	acts->rule_acts[aso_mtr_pos].aso_meter.offset = aso_mtr->offset;
1153 	acts->jump = flow_hw_jump_action_register
1154 		(dev, cfg, aso_mtr->fm.group, error);
1155 	if (!acts->jump)
1156 		return -ENOMEM;
1157 	acts->rule_acts[jump_pos].action = (!!group) ?
1158 				    acts->jump->hws_action :
1159 				    acts->jump->root_action;
1160 	if (mlx5_aso_mtr_wait(priv->sh, MLX5_HW_INV_QUEUE, aso_mtr))
1161 		return -ENOMEM;
1162 	return 0;
1163 }
1164 
1165 static __rte_always_inline int
1166 flow_hw_cnt_compile(struct rte_eth_dev *dev, uint32_t  start_pos,
1167 		      struct mlx5_hw_actions *acts)
1168 {
1169 	struct mlx5_priv *priv = dev->data->dev_private;
1170 	uint32_t pos = start_pos;
1171 	cnt_id_t cnt_id;
1172 	int ret;
1173 
1174 	ret = mlx5_hws_cnt_shared_get(priv->hws_cpool, &cnt_id, 0);
1175 	if (ret != 0)
1176 		return ret;
1177 	ret = mlx5_hws_cnt_pool_get_action_offset
1178 				(priv->hws_cpool,
1179 				 cnt_id,
1180 				 &acts->rule_acts[pos].action,
1181 				 &acts->rule_acts[pos].counter.offset);
1182 	if (ret != 0)
1183 		return ret;
1184 	acts->cnt_id = cnt_id;
1185 	return 0;
1186 }
1187 
1188 static __rte_always_inline bool
1189 is_of_vlan_pcp_present(const struct rte_flow_action *actions)
1190 {
1191 	/*
1192 	 * Order of RTE VLAN push actions is
1193 	 * OF_PUSH_VLAN / OF_SET_VLAN_VID [ / OF_SET_VLAN_PCP ]
1194 	 */
1195 	return actions[MLX5_HW_VLAN_PUSH_PCP_IDX].type ==
1196 		RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP;
1197 }
1198 
1199 static __rte_always_inline bool
1200 is_template_masked_push_vlan(const struct rte_flow_action_of_push_vlan *mask)
1201 {
1202 	/*
1203 	 * In masked push VLAN template all RTE push actions are masked.
1204 	 */
1205 	return mask && mask->ethertype != 0;
1206 }
1207 
1208 static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
1209 {
1210 /*
1211  * OpenFlow Switch Specification defines 801.1q VID as 12+1 bits.
1212  */
1213 	rte_be32_t type, vid, pcp;
1214 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1215 	rte_be32_t vid_lo, vid_hi;
1216 #endif
1217 
1218 	type = ((const struct rte_flow_action_of_push_vlan *)
1219 		actions[MLX5_HW_VLAN_PUSH_TYPE_IDX].conf)->ethertype;
1220 	vid = ((const struct rte_flow_action_of_set_vlan_vid *)
1221 		actions[MLX5_HW_VLAN_PUSH_VID_IDX].conf)->vlan_vid;
1222 	pcp = is_of_vlan_pcp_present(actions) ?
1223 	      ((const struct rte_flow_action_of_set_vlan_pcp *)
1224 		      actions[MLX5_HW_VLAN_PUSH_PCP_IDX].conf)->vlan_pcp : 0;
1225 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1226 	vid_hi = vid & 0xff;
1227 	vid_lo = vid >> 8;
1228 	return (((vid_lo << 8) | (pcp << 5) | vid_hi) << 16) | type;
1229 #else
1230 	return (type << 16) | (pcp << 13) | vid;
1231 #endif
1232 }
1233 
1234 static __rte_always_inline struct mlx5_aso_mtr *
1235 flow_hw_meter_mark_alloc(struct rte_eth_dev *dev, uint32_t queue,
1236 			 const struct rte_flow_action *action,
1237 			 void *user_data, bool push)
1238 {
1239 	struct mlx5_priv *priv = dev->data->dev_private;
1240 	struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
1241 	const struct rte_flow_action_meter_mark *meter_mark = action->conf;
1242 	struct mlx5_aso_mtr *aso_mtr;
1243 	struct mlx5_flow_meter_info *fm;
1244 	uint32_t mtr_id;
1245 
1246 	aso_mtr = mlx5_ipool_malloc(priv->hws_mpool->idx_pool, &mtr_id);
1247 	if (!aso_mtr)
1248 		return NULL;
1249 	/* Fill the flow meter parameters. */
1250 	aso_mtr->type = ASO_METER_INDIRECT;
1251 	fm = &aso_mtr->fm;
1252 	fm->meter_id = mtr_id;
1253 	fm->profile = (struct mlx5_flow_meter_profile *)(meter_mark->profile);
1254 	fm->is_enable = meter_mark->state;
1255 	fm->color_aware = meter_mark->color_mode;
1256 	aso_mtr->pool = pool;
1257 	aso_mtr->state = (queue == MLX5_HW_INV_QUEUE) ?
1258 			  ASO_METER_WAIT : ASO_METER_WAIT_ASYNC;
1259 	aso_mtr->offset = mtr_id - 1;
1260 	aso_mtr->init_color = (meter_mark->color_mode) ?
1261 		meter_mark->init_color : RTE_COLOR_GREEN;
1262 	/* Update ASO flow meter by wqe. */
1263 	if (mlx5_aso_meter_update_by_wqe(priv->sh, queue, aso_mtr,
1264 					 &priv->mtr_bulk, user_data, push)) {
1265 		mlx5_ipool_free(pool->idx_pool, mtr_id);
1266 		return NULL;
1267 	}
1268 	/* Wait for ASO object completion. */
1269 	if (queue == MLX5_HW_INV_QUEUE &&
1270 	    mlx5_aso_mtr_wait(priv->sh, MLX5_HW_INV_QUEUE, aso_mtr)) {
1271 		mlx5_ipool_free(pool->idx_pool, mtr_id);
1272 		return NULL;
1273 	}
1274 	return aso_mtr;
1275 }
1276 
1277 static __rte_always_inline int
1278 flow_hw_meter_mark_compile(struct rte_eth_dev *dev,
1279 			   uint16_t aso_mtr_pos,
1280 			   const struct rte_flow_action *action,
1281 			   struct mlx5dr_rule_action *acts,
1282 			   uint32_t *index,
1283 			   uint32_t queue)
1284 {
1285 	struct mlx5_priv *priv = dev->data->dev_private;
1286 	struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
1287 	struct mlx5_aso_mtr *aso_mtr;
1288 
1289 	aso_mtr = flow_hw_meter_mark_alloc(dev, queue, action, NULL, true);
1290 	if (!aso_mtr)
1291 		return -1;
1292 
1293 	/* Compile METER_MARK action */
1294 	acts[aso_mtr_pos].action = pool->action;
1295 	acts[aso_mtr_pos].aso_meter.offset = aso_mtr->offset;
1296 	acts[aso_mtr_pos].aso_meter.init_color =
1297 		(enum mlx5dr_action_aso_meter_color)
1298 		rte_col_2_mlx5_col(aso_mtr->init_color);
1299 	*index = aso_mtr->fm.meter_id;
1300 	return 0;
1301 }
1302 
1303 /**
1304  * Translate rte_flow actions to DR action.
1305  *
1306  * As the action template has already indicated the actions. Translate
1307  * the rte_flow actions to DR action if possbile. So in flow create
1308  * stage we will save cycles from handing the actions' organizing.
1309  * For the actions with limited information, need to add these to a
1310  * list.
1311  *
1312  * @param[in] dev
1313  *   Pointer to the rte_eth_dev structure.
1314  * @param[in] cfg
1315  *   Pointer to the table configuration.
1316  * @param[in/out] acts
1317  *   Pointer to the template HW steering DR actions.
1318  * @param[in] at
1319  *   Action template.
1320  * @param[out] error
1321  *   Pointer to error structure.
1322  *
1323  * @return
1324  *   0 on success, a negative errno otherwise and rte_errno is set.
1325  */
1326 static int
1327 __flow_hw_actions_translate(struct rte_eth_dev *dev,
1328 			    const struct mlx5_flow_template_table_cfg *cfg,
1329 			    struct mlx5_hw_actions *acts,
1330 			    struct rte_flow_actions_template *at,
1331 			    struct rte_flow_error *error)
1332 {
1333 	struct mlx5_priv *priv = dev->data->dev_private;
1334 	const struct rte_flow_template_table_attr *table_attr = &cfg->attr;
1335 	const struct rte_flow_attr *attr = &table_attr->flow_attr;
1336 	struct rte_flow_action *actions = at->actions;
1337 	struct rte_flow_action *action_start = actions;
1338 	struct rte_flow_action *masks = at->masks;
1339 	enum mlx5dr_action_reformat_type refmt_type = 0;
1340 	const struct rte_flow_action_raw_encap *raw_encap_data;
1341 	const struct rte_flow_item *enc_item = NULL, *enc_item_m = NULL;
1342 	uint16_t reformat_src = 0;
1343 	uint8_t *encap_data = NULL, *encap_data_m = NULL;
1344 	size_t data_size = 0;
1345 	struct mlx5_hw_modify_header_action mhdr = { 0 };
1346 	bool actions_end = false;
1347 	uint32_t type;
1348 	bool reformat_used = false;
1349 	unsigned int of_vlan_offset;
1350 	uint16_t action_pos;
1351 	uint16_t jump_pos;
1352 	uint32_t ct_idx;
1353 	int err;
1354 	uint32_t target_grp = 0;
1355 
1356 	flow_hw_modify_field_init(&mhdr, at);
1357 	if (attr->transfer)
1358 		type = MLX5DR_TABLE_TYPE_FDB;
1359 	else if (attr->egress)
1360 		type = MLX5DR_TABLE_TYPE_NIC_TX;
1361 	else
1362 		type = MLX5DR_TABLE_TYPE_NIC_RX;
1363 	for (; !actions_end; actions++, masks++) {
1364 		switch (actions->type) {
1365 		case RTE_FLOW_ACTION_TYPE_INDIRECT:
1366 			action_pos = at->actions_off[actions - at->actions];
1367 			if (!attr->group) {
1368 				DRV_LOG(ERR, "Indirect action is not supported in root table.");
1369 				goto err;
1370 			}
1371 			if (actions->conf && masks->conf) {
1372 				if (flow_hw_shared_action_translate
1373 				(dev, actions, acts, actions - action_start, action_pos))
1374 					goto err;
1375 			} else if (__flow_hw_act_data_general_append
1376 					(priv, acts, actions->type,
1377 					 actions - action_start, action_pos)){
1378 				goto err;
1379 			}
1380 			break;
1381 		case RTE_FLOW_ACTION_TYPE_VOID:
1382 			break;
1383 		case RTE_FLOW_ACTION_TYPE_DROP:
1384 			action_pos = at->actions_off[actions - at->actions];
1385 			acts->rule_acts[action_pos].action =
1386 				priv->hw_drop[!!attr->group];
1387 			break;
1388 		case RTE_FLOW_ACTION_TYPE_MARK:
1389 			action_pos = at->actions_off[actions - at->actions];
1390 			acts->mark = true;
1391 			if (masks->conf &&
1392 			    ((const struct rte_flow_action_mark *)
1393 			     masks->conf)->id)
1394 				acts->rule_acts[action_pos].tag.value =
1395 					mlx5_flow_mark_set
1396 					(((const struct rte_flow_action_mark *)
1397 					(actions->conf))->id);
1398 			else if (__flow_hw_act_data_general_append(priv, acts,
1399 				actions->type, actions - action_start, action_pos))
1400 				goto err;
1401 			acts->rule_acts[action_pos].action =
1402 				priv->hw_tag[!!attr->group];
1403 			__atomic_add_fetch(&priv->hws_mark_refcnt, 1, __ATOMIC_RELAXED);
1404 			flow_hw_rxq_flag_set(dev, true);
1405 			break;
1406 		case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
1407 			action_pos = at->actions_off[actions - at->actions];
1408 			acts->rule_acts[action_pos].action =
1409 				priv->hw_push_vlan[type];
1410 			if (is_template_masked_push_vlan(masks->conf))
1411 				acts->rule_acts[action_pos].push_vlan.vlan_hdr =
1412 					vlan_hdr_to_be32(actions);
1413 			else if (__flow_hw_act_data_general_append
1414 					(priv, acts, actions->type,
1415 					 actions - action_start, action_pos))
1416 				goto err;
1417 			of_vlan_offset = is_of_vlan_pcp_present(actions) ?
1418 					MLX5_HW_VLAN_PUSH_PCP_IDX :
1419 					MLX5_HW_VLAN_PUSH_VID_IDX;
1420 			actions += of_vlan_offset;
1421 			masks += of_vlan_offset;
1422 			break;
1423 		case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
1424 			action_pos = at->actions_off[actions - at->actions];
1425 			acts->rule_acts[action_pos].action =
1426 				priv->hw_pop_vlan[type];
1427 			break;
1428 		case RTE_FLOW_ACTION_TYPE_JUMP:
1429 			action_pos = at->actions_off[actions - at->actions];
1430 			if (masks->conf &&
1431 			    ((const struct rte_flow_action_jump *)
1432 			     masks->conf)->group) {
1433 				uint32_t jump_group =
1434 					((const struct rte_flow_action_jump *)
1435 					actions->conf)->group;
1436 				acts->jump = flow_hw_jump_action_register
1437 						(dev, cfg, jump_group, error);
1438 				if (!acts->jump)
1439 					goto err;
1440 				acts->rule_acts[action_pos].action = (!!attr->group) ?
1441 						acts->jump->hws_action :
1442 						acts->jump->root_action;
1443 			} else if (__flow_hw_act_data_general_append
1444 					(priv, acts, actions->type,
1445 					 actions - action_start, action_pos)){
1446 				goto err;
1447 			}
1448 			break;
1449 		case RTE_FLOW_ACTION_TYPE_QUEUE:
1450 			action_pos = at->actions_off[actions - at->actions];
1451 			if (masks->conf &&
1452 			    ((const struct rte_flow_action_queue *)
1453 			     masks->conf)->index) {
1454 				acts->tir = flow_hw_tir_action_register
1455 				(dev,
1456 				 mlx5_hw_act_flag[!!attr->group][type],
1457 				 actions);
1458 				if (!acts->tir)
1459 					goto err;
1460 				acts->rule_acts[action_pos].action =
1461 					acts->tir->action;
1462 			} else if (__flow_hw_act_data_general_append
1463 					(priv, acts, actions->type,
1464 					 actions - action_start, action_pos)) {
1465 				goto err;
1466 			}
1467 			break;
1468 		case RTE_FLOW_ACTION_TYPE_RSS:
1469 			action_pos = at->actions_off[actions - at->actions];
1470 			if (actions->conf && masks->conf) {
1471 				acts->tir = flow_hw_tir_action_register
1472 				(dev,
1473 				 mlx5_hw_act_flag[!!attr->group][type],
1474 				 actions);
1475 				if (!acts->tir)
1476 					goto err;
1477 				acts->rule_acts[action_pos].action =
1478 					acts->tir->action;
1479 			} else if (__flow_hw_act_data_general_append
1480 					(priv, acts, actions->type,
1481 					 actions - action_start, action_pos)) {
1482 				goto err;
1483 			}
1484 			break;
1485 		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
1486 			MLX5_ASSERT(!reformat_used);
1487 			enc_item = ((const struct rte_flow_action_vxlan_encap *)
1488 				   actions->conf)->definition;
1489 			if (masks->conf)
1490 				enc_item_m = ((const struct rte_flow_action_vxlan_encap *)
1491 					     masks->conf)->definition;
1492 			reformat_used = true;
1493 			reformat_src = actions - action_start;
1494 			refmt_type = MLX5DR_ACTION_REFORMAT_TYPE_L2_TO_TNL_L2;
1495 			break;
1496 		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
1497 			MLX5_ASSERT(!reformat_used);
1498 			enc_item = ((const struct rte_flow_action_nvgre_encap *)
1499 				   actions->conf)->definition;
1500 			if (masks->conf)
1501 				enc_item_m = ((const struct rte_flow_action_nvgre_encap *)
1502 					     masks->conf)->definition;
1503 			reformat_used = true;
1504 			reformat_src = actions - action_start;
1505 			refmt_type = MLX5DR_ACTION_REFORMAT_TYPE_L2_TO_TNL_L2;
1506 			break;
1507 		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
1508 		case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
1509 			MLX5_ASSERT(!reformat_used);
1510 			reformat_used = true;
1511 			refmt_type = MLX5DR_ACTION_REFORMAT_TYPE_TNL_L2_TO_L2;
1512 			break;
1513 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
1514 			raw_encap_data =
1515 				(const struct rte_flow_action_raw_encap *)
1516 				 masks->conf;
1517 			if (raw_encap_data)
1518 				encap_data_m = raw_encap_data->data;
1519 			raw_encap_data =
1520 				(const struct rte_flow_action_raw_encap *)
1521 				 actions->conf;
1522 			encap_data = raw_encap_data->data;
1523 			data_size = raw_encap_data->size;
1524 			if (reformat_used) {
1525 				refmt_type = data_size <
1526 				MLX5_ENCAPSULATION_DECISION_SIZE ?
1527 				MLX5DR_ACTION_REFORMAT_TYPE_TNL_L3_TO_L2 :
1528 				MLX5DR_ACTION_REFORMAT_TYPE_L2_TO_TNL_L3;
1529 			} else {
1530 				reformat_used = true;
1531 				refmt_type =
1532 				MLX5DR_ACTION_REFORMAT_TYPE_L2_TO_TNL_L2;
1533 			}
1534 			reformat_src = actions - action_start;
1535 			break;
1536 		case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
1537 			reformat_used = true;
1538 			refmt_type = MLX5DR_ACTION_REFORMAT_TYPE_TNL_L2_TO_L2;
1539 			break;
1540 		case RTE_FLOW_ACTION_TYPE_SEND_TO_KERNEL:
1541 			DRV_LOG(ERR, "send to kernel action is not supported in HW steering.");
1542 			goto err;
1543 		case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
1544 			err = flow_hw_modify_field_compile(dev, attr, action_start,
1545 							   actions, masks, acts, &mhdr,
1546 							   error);
1547 			if (err)
1548 				goto err;
1549 			/*
1550 			 * Adjust the action source position for the following.
1551 			 * ... / MODIFY_FIELD: rx_cpy_pos / (QUEUE|RSS) / ...
1552 			 * The next action will be Q/RSS, there will not be
1553 			 * another adjustment and the real source position of
1554 			 * the following actions will be decreased by 1.
1555 			 * No change of the total actions in the new template.
1556 			 */
1557 			if ((actions - action_start) == at->rx_cpy_pos)
1558 				action_start += 1;
1559 			break;
1560 		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
1561 			action_pos = at->actions_off[actions - at->actions];
1562 			if (flow_hw_represented_port_compile
1563 					(dev, attr, action_start, actions,
1564 					 masks, acts, action_pos, error))
1565 				goto err;
1566 			break;
1567 		case RTE_FLOW_ACTION_TYPE_METER:
1568 			/*
1569 			 * METER action is compiled to 2 DR actions - ASO_METER and FT.
1570 			 * Calculated DR offset is stored only for ASO_METER and FT
1571 			 * is assumed to be the next action.
1572 			 */
1573 			action_pos = at->actions_off[actions - at->actions];
1574 			jump_pos = action_pos + 1;
1575 			if (actions->conf && masks->conf &&
1576 			    ((const struct rte_flow_action_meter *)
1577 			     masks->conf)->mtr_id) {
1578 				err = flow_hw_meter_compile(dev, cfg,
1579 						action_pos, jump_pos, actions, acts, error);
1580 				if (err)
1581 					goto err;
1582 			} else if (__flow_hw_act_data_general_append(priv, acts,
1583 							actions->type,
1584 							actions - action_start,
1585 							action_pos))
1586 				goto err;
1587 			break;
1588 		case RTE_FLOW_ACTION_TYPE_AGE:
1589 			flow_hw_translate_group(dev, cfg, attr->group,
1590 						&target_grp, error);
1591 			if (target_grp == 0) {
1592 				__flow_hw_action_template_destroy(dev, acts);
1593 				return rte_flow_error_set(error, ENOTSUP,
1594 						RTE_FLOW_ERROR_TYPE_ACTION,
1595 						NULL,
1596 						"Age action on root table is not supported in HW steering mode");
1597 			}
1598 			action_pos = at->actions_off[actions - at->actions];
1599 			if (__flow_hw_act_data_general_append(priv, acts,
1600 							 actions->type,
1601 							 actions - action_start,
1602 							 action_pos))
1603 				goto err;
1604 			break;
1605 		case RTE_FLOW_ACTION_TYPE_COUNT:
1606 			flow_hw_translate_group(dev, cfg, attr->group,
1607 						&target_grp, error);
1608 			if (target_grp == 0) {
1609 				__flow_hw_action_template_destroy(dev, acts);
1610 				return rte_flow_error_set(error, ENOTSUP,
1611 						RTE_FLOW_ERROR_TYPE_ACTION,
1612 						NULL,
1613 						"Counter action on root table is not supported in HW steering mode");
1614 			}
1615 			if ((at->action_flags & MLX5_FLOW_ACTION_AGE) ||
1616 			    (at->action_flags & MLX5_FLOW_ACTION_INDIRECT_AGE))
1617 				/*
1618 				 * When both COUNT and AGE are requested, it is
1619 				 * saved as AGE action which creates also the
1620 				 * counter.
1621 				 */
1622 				break;
1623 			action_pos = at->actions_off[actions - at->actions];
1624 			if (masks->conf &&
1625 			    ((const struct rte_flow_action_count *)
1626 			     masks->conf)->id) {
1627 				err = flow_hw_cnt_compile(dev, action_pos, acts);
1628 				if (err)
1629 					goto err;
1630 			} else if (__flow_hw_act_data_general_append
1631 					(priv, acts, actions->type,
1632 					 actions - action_start, action_pos)) {
1633 				goto err;
1634 			}
1635 			break;
1636 		case RTE_FLOW_ACTION_TYPE_CONNTRACK:
1637 			action_pos = at->actions_off[actions - at->actions];
1638 			if (masks->conf) {
1639 				ct_idx = MLX5_ACTION_CTX_CT_GET_IDX
1640 					 ((uint32_t)(uintptr_t)actions->conf);
1641 				if (flow_hw_ct_compile(dev, MLX5_HW_INV_QUEUE, ct_idx,
1642 						       &acts->rule_acts[action_pos]))
1643 					goto err;
1644 			} else if (__flow_hw_act_data_general_append
1645 					(priv, acts, actions->type,
1646 					 actions - action_start, action_pos)) {
1647 				goto err;
1648 			}
1649 			break;
1650 		case RTE_FLOW_ACTION_TYPE_METER_MARK:
1651 			action_pos = at->actions_off[actions - at->actions];
1652 			if (actions->conf && masks->conf &&
1653 			    ((const struct rte_flow_action_meter_mark *)
1654 			     masks->conf)->profile) {
1655 				err = flow_hw_meter_mark_compile(dev,
1656 							action_pos, actions,
1657 							acts->rule_acts,
1658 							&acts->mtr_id,
1659 							MLX5_HW_INV_QUEUE);
1660 				if (err)
1661 					goto err;
1662 			} else if (__flow_hw_act_data_general_append(priv, acts,
1663 							actions->type,
1664 							actions - action_start,
1665 							action_pos))
1666 				goto err;
1667 			break;
1668 		case RTE_FLOW_ACTION_TYPE_END:
1669 			actions_end = true;
1670 			break;
1671 		default:
1672 			break;
1673 		}
1674 	}
1675 	if (mhdr.pos != UINT16_MAX) {
1676 		uint32_t flags;
1677 		uint32_t bulk_size;
1678 		size_t mhdr_len;
1679 
1680 		acts->mhdr = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*acts->mhdr),
1681 					 0, SOCKET_ID_ANY);
1682 		if (!acts->mhdr)
1683 			goto err;
1684 		rte_memcpy(acts->mhdr, &mhdr, sizeof(*acts->mhdr));
1685 		mhdr_len = sizeof(struct mlx5_modification_cmd) * acts->mhdr->mhdr_cmds_num;
1686 		flags = mlx5_hw_act_flag[!!attr->group][type];
1687 		if (acts->mhdr->shared) {
1688 			flags |= MLX5DR_ACTION_FLAG_SHARED;
1689 			bulk_size = 0;
1690 		} else {
1691 			bulk_size = rte_log2_u32(table_attr->nb_flows);
1692 		}
1693 		acts->mhdr->action = mlx5dr_action_create_modify_header
1694 				(priv->dr_ctx, mhdr_len, (__be64 *)acts->mhdr->mhdr_cmds,
1695 				 bulk_size, flags);
1696 		if (!acts->mhdr->action)
1697 			goto err;
1698 		acts->rule_acts[acts->mhdr->pos].action = acts->mhdr->action;
1699 	}
1700 	if (reformat_used) {
1701 		uint8_t buf[MLX5_ENCAP_MAX_LEN];
1702 		bool shared_rfmt = true;
1703 
1704 		MLX5_ASSERT(at->reformat_off != UINT16_MAX);
1705 		if (enc_item) {
1706 			MLX5_ASSERT(!encap_data);
1707 			if (flow_dv_convert_encap_data(enc_item, buf, &data_size, error))
1708 				goto err;
1709 			encap_data = buf;
1710 			if (!enc_item_m)
1711 				shared_rfmt = false;
1712 		} else if (encap_data && !encap_data_m) {
1713 			shared_rfmt = false;
1714 		}
1715 		acts->encap_decap = mlx5_malloc(MLX5_MEM_ZERO,
1716 				    sizeof(*acts->encap_decap) + data_size,
1717 				    0, SOCKET_ID_ANY);
1718 		if (!acts->encap_decap)
1719 			goto err;
1720 		if (data_size) {
1721 			acts->encap_decap->data_size = data_size;
1722 			memcpy(acts->encap_decap->data, encap_data, data_size);
1723 		}
1724 		acts->encap_decap->action = mlx5dr_action_create_reformat
1725 				(priv->dr_ctx, refmt_type,
1726 				 data_size, encap_data,
1727 				 shared_rfmt ? 0 : rte_log2_u32(table_attr->nb_flows),
1728 				 mlx5_hw_act_flag[!!attr->group][type] |
1729 				 (shared_rfmt ? MLX5DR_ACTION_FLAG_SHARED : 0));
1730 		if (!acts->encap_decap->action)
1731 			goto err;
1732 		acts->rule_acts[at->reformat_off].action = acts->encap_decap->action;
1733 		acts->rule_acts[at->reformat_off].reformat.data = acts->encap_decap->data;
1734 		if (shared_rfmt)
1735 			acts->rule_acts[at->reformat_off].reformat.offset = 0;
1736 		else if (__flow_hw_act_data_encap_append(priv, acts,
1737 				 (action_start + reformat_src)->type,
1738 				 reformat_src, at->reformat_off, data_size))
1739 			goto err;
1740 		acts->encap_decap->shared = shared_rfmt;
1741 		acts->encap_decap_pos = at->reformat_off;
1742 	}
1743 	return 0;
1744 err:
1745 	err = rte_errno;
1746 	__flow_hw_action_template_destroy(dev, acts);
1747 	return rte_flow_error_set(error, err,
1748 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1749 				  "fail to create rte table");
1750 }
1751 
1752 /**
1753  * Translate rte_flow actions to DR action.
1754  *
1755  * @param[in] dev
1756  *   Pointer to the rte_eth_dev structure.
1757  * @param[in] tbl
1758  *   Pointer to the flow template table.
1759  * @param[out] error
1760  *   Pointer to error structure.
1761  *
1762  * @return
1763  *    0 on success, negative value otherwise and rte_errno is set.
1764  */
1765 static int
1766 flow_hw_actions_translate(struct rte_eth_dev *dev,
1767 			  struct rte_flow_template_table *tbl,
1768 			  struct rte_flow_error *error)
1769 {
1770 	uint32_t i;
1771 
1772 	for (i = 0; i < tbl->nb_action_templates; i++) {
1773 		if (__flow_hw_actions_translate(dev, &tbl->cfg,
1774 						&tbl->ats[i].acts,
1775 						tbl->ats[i].action_template,
1776 						error))
1777 			goto err;
1778 	}
1779 	return 0;
1780 err:
1781 	while (i--)
1782 		__flow_hw_action_template_destroy(dev, &tbl->ats[i].acts);
1783 	return -1;
1784 }
1785 
1786 /**
1787  * Get shared indirect action.
1788  *
1789  * @param[in] dev
1790  *   Pointer to the rte_eth_dev data structure.
1791  * @param[in] act_data
1792  *   Pointer to the recorded action construct data.
1793  * @param[in] item_flags
1794  *   The matcher itme_flags used for RSS lookup.
1795  * @param[in] rule_act
1796  *   Pointer to the shared action's destination rule DR action.
1797  *
1798  * @return
1799  *    0 on success, negative value otherwise and rte_errno is set.
1800  */
1801 static __rte_always_inline int
1802 flow_hw_shared_action_get(struct rte_eth_dev *dev,
1803 			  struct mlx5_action_construct_data *act_data,
1804 			  const uint64_t item_flags,
1805 			  struct mlx5dr_rule_action *rule_act)
1806 {
1807 	struct mlx5_priv *priv = dev->data->dev_private;
1808 	struct mlx5_flow_rss_desc rss_desc = { 0 };
1809 	uint64_t hash_fields = 0;
1810 	uint32_t hrxq_idx = 0;
1811 	struct mlx5_hrxq *hrxq = NULL;
1812 	int act_type = act_data->type;
1813 
1814 	switch (act_type) {
1815 	case MLX5_RTE_FLOW_ACTION_TYPE_RSS:
1816 		rss_desc.level = act_data->shared_rss.level;
1817 		rss_desc.types = act_data->shared_rss.types;
1818 		flow_dv_hashfields_set(item_flags, &rss_desc, &hash_fields);
1819 		hrxq_idx = flow_dv_action_rss_hrxq_lookup
1820 			(dev, act_data->shared_rss.idx, hash_fields);
1821 		if (hrxq_idx)
1822 			hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
1823 					      hrxq_idx);
1824 		if (hrxq) {
1825 			rule_act->action = hrxq->action;
1826 			return 0;
1827 		}
1828 		break;
1829 	default:
1830 		DRV_LOG(WARNING, "Unsupported shared action type:%d",
1831 			act_data->type);
1832 		break;
1833 	}
1834 	return -1;
1835 }
1836 
1837 /**
1838  * Construct shared indirect action.
1839  *
1840  * @param[in] dev
1841  *   Pointer to the rte_eth_dev data structure.
1842  * @param[in] queue
1843  *   The flow creation queue index.
1844  * @param[in] action
1845  *   Pointer to the shared indirect rte_flow action.
1846  * @param[in] table
1847  *   Pointer to the flow table.
1848  * @param[in] it_idx
1849  *   Item template index the action template refer to.
1850  * @param[in] action_flags
1851  *   Actions bit-map detected in this template.
1852  * @param[in, out] flow
1853  *   Pointer to the flow containing the counter.
1854  * @param[in] rule_act
1855  *   Pointer to the shared action's destination rule DR action.
1856  *
1857  * @return
1858  *    0 on success, negative value otherwise and rte_errno is set.
1859  */
1860 static __rte_always_inline int
1861 flow_hw_shared_action_construct(struct rte_eth_dev *dev, uint32_t queue,
1862 				const struct rte_flow_action *action,
1863 				struct rte_flow_template_table *table,
1864 				const uint8_t it_idx, uint64_t action_flags,
1865 				struct rte_flow_hw *flow,
1866 				struct mlx5dr_rule_action *rule_act)
1867 {
1868 	struct mlx5_priv *priv = dev->data->dev_private;
1869 	struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
1870 	struct mlx5_action_construct_data act_data;
1871 	struct mlx5_shared_action_rss *shared_rss;
1872 	struct mlx5_aso_mtr *aso_mtr;
1873 	struct mlx5_age_info *age_info;
1874 	struct mlx5_hws_age_param *param;
1875 	uint32_t act_idx = (uint32_t)(uintptr_t)action->conf;
1876 	uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
1877 	uint32_t idx = act_idx &
1878 		       ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
1879 	uint64_t item_flags;
1880 	cnt_id_t age_cnt;
1881 
1882 	memset(&act_data, 0, sizeof(act_data));
1883 	switch (type) {
1884 	case MLX5_INDIRECT_ACTION_TYPE_RSS:
1885 		act_data.type = MLX5_RTE_FLOW_ACTION_TYPE_RSS;
1886 		shared_rss = mlx5_ipool_get
1887 			(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
1888 		if (!shared_rss)
1889 			return -1;
1890 		act_data.shared_rss.idx = idx;
1891 		act_data.shared_rss.level = shared_rss->origin.level;
1892 		act_data.shared_rss.types = !shared_rss->origin.types ?
1893 					    RTE_ETH_RSS_IP :
1894 					    shared_rss->origin.types;
1895 		item_flags = table->its[it_idx]->item_flags;
1896 		if (flow_hw_shared_action_get
1897 				(dev, &act_data, item_flags, rule_act))
1898 			return -1;
1899 		break;
1900 	case MLX5_INDIRECT_ACTION_TYPE_COUNT:
1901 		if (mlx5_hws_cnt_pool_get_action_offset(priv->hws_cpool,
1902 				act_idx,
1903 				&rule_act->action,
1904 				&rule_act->counter.offset))
1905 			return -1;
1906 		flow->cnt_id = act_idx;
1907 		break;
1908 	case MLX5_INDIRECT_ACTION_TYPE_AGE:
1909 		/*
1910 		 * Save the index with the indirect type, to recognize
1911 		 * it in flow destroy.
1912 		 */
1913 		flow->age_idx = act_idx;
1914 		if (action_flags & MLX5_FLOW_ACTION_INDIRECT_COUNT)
1915 			/*
1916 			 * The mutual update for idirect AGE & COUNT will be
1917 			 * performed later after we have ID for both of them.
1918 			 */
1919 			break;
1920 		age_info = GET_PORT_AGE_INFO(priv);
1921 		param = mlx5_ipool_get(age_info->ages_ipool, idx);
1922 		if (param == NULL)
1923 			return -1;
1924 		if (action_flags & MLX5_FLOW_ACTION_COUNT) {
1925 			if (mlx5_hws_cnt_pool_get(priv->hws_cpool,
1926 						  &param->queue_id, &age_cnt,
1927 						  idx) < 0)
1928 				return -1;
1929 			flow->cnt_id = age_cnt;
1930 			param->nb_cnts++;
1931 		} else {
1932 			/*
1933 			 * Get the counter of this indirect AGE or create one
1934 			 * if doesn't exist.
1935 			 */
1936 			age_cnt = mlx5_hws_age_cnt_get(priv, param, idx);
1937 			if (age_cnt == 0)
1938 				return -1;
1939 		}
1940 		if (mlx5_hws_cnt_pool_get_action_offset(priv->hws_cpool,
1941 						     age_cnt, &rule_act->action,
1942 						     &rule_act->counter.offset))
1943 			return -1;
1944 		break;
1945 	case MLX5_INDIRECT_ACTION_TYPE_CT:
1946 		if (flow_hw_ct_compile(dev, queue, idx, rule_act))
1947 			return -1;
1948 		break;
1949 	case MLX5_INDIRECT_ACTION_TYPE_METER_MARK:
1950 		/* Find ASO object. */
1951 		aso_mtr = mlx5_ipool_get(pool->idx_pool, idx);
1952 		if (!aso_mtr)
1953 			return -1;
1954 		rule_act->action = pool->action;
1955 		rule_act->aso_meter.offset = aso_mtr->offset;
1956 		rule_act->aso_meter.init_color =
1957 			(enum mlx5dr_action_aso_meter_color)
1958 			rte_col_2_mlx5_col(aso_mtr->init_color);
1959 		break;
1960 	default:
1961 		DRV_LOG(WARNING, "Unsupported shared action type:%d", type);
1962 		break;
1963 	}
1964 	return 0;
1965 }
1966 
1967 static __rte_always_inline int
1968 flow_hw_mhdr_cmd_is_nop(const struct mlx5_modification_cmd *cmd)
1969 {
1970 	struct mlx5_modification_cmd cmd_he = {
1971 		.data0 = rte_be_to_cpu_32(cmd->data0),
1972 		.data1 = 0,
1973 	};
1974 
1975 	return cmd_he.action_type == MLX5_MODIFICATION_TYPE_NOP;
1976 }
1977 
1978 /**
1979  * Construct flow action array.
1980  *
1981  * For action template contains dynamic actions, these actions need to
1982  * be updated according to the rte_flow action during flow creation.
1983  *
1984  * @param[in] dev
1985  *   Pointer to the rte_eth_dev structure.
1986  * @param[in] job
1987  *   Pointer to job descriptor.
1988  * @param[in] hw_acts
1989  *   Pointer to translated actions from template.
1990  * @param[in] it_idx
1991  *   Item template index the action template refer to.
1992  * @param[in] actions
1993  *   Array of rte_flow action need to be checked.
1994  * @param[in] rule_acts
1995  *   Array of DR rule actions to be used during flow creation..
1996  * @param[in] acts_num
1997  *   Pointer to the real acts_num flow has.
1998  *
1999  * @return
2000  *    0 on success, negative value otherwise and rte_errno is set.
2001  */
2002 static __rte_always_inline int
2003 flow_hw_modify_field_construct(struct mlx5_hw_q_job *job,
2004 			       struct mlx5_action_construct_data *act_data,
2005 			       const struct mlx5_hw_actions *hw_acts,
2006 			       const struct rte_flow_action *action)
2007 {
2008 	const struct rte_flow_action_modify_field *mhdr_action = action->conf;
2009 	uint8_t values[16] = { 0 };
2010 	unaligned_uint32_t *value_p;
2011 	uint32_t i;
2012 	struct field_modify_info *field;
2013 
2014 	if (!hw_acts->mhdr)
2015 		return -1;
2016 	if (hw_acts->mhdr->shared || act_data->modify_header.shared)
2017 		return 0;
2018 	MLX5_ASSERT(mhdr_action->operation == RTE_FLOW_MODIFY_SET ||
2019 		    mhdr_action->operation == RTE_FLOW_MODIFY_ADD);
2020 	if (mhdr_action->src.field != RTE_FLOW_FIELD_VALUE &&
2021 	    mhdr_action->src.field != RTE_FLOW_FIELD_POINTER)
2022 		return 0;
2023 	if (mhdr_action->src.field == RTE_FLOW_FIELD_VALUE)
2024 		rte_memcpy(values, &mhdr_action->src.value, sizeof(values));
2025 	else
2026 		rte_memcpy(values, mhdr_action->src.pvalue, sizeof(values));
2027 	if (mhdr_action->dst.field == RTE_FLOW_FIELD_META ||
2028 	    mhdr_action->dst.field == RTE_FLOW_FIELD_TAG ||
2029 	    mhdr_action->dst.field == RTE_FLOW_FIELD_METER_COLOR ||
2030 	    mhdr_action->dst.field == (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG) {
2031 		value_p = (unaligned_uint32_t *)values;
2032 		*value_p = rte_cpu_to_be_32(*value_p);
2033 	} else if (mhdr_action->dst.field == RTE_FLOW_FIELD_GTP_PSC_QFI) {
2034 		uint32_t tmp;
2035 
2036 		/*
2037 		 * QFI is passed as an uint8_t integer, but it is accessed through
2038 		 * a 2nd least significant byte of a 32-bit field in modify header command.
2039 		 */
2040 		tmp = values[0];
2041 		value_p = (unaligned_uint32_t *)values;
2042 		*value_p = rte_cpu_to_be_32(tmp << 8);
2043 	}
2044 	i = act_data->modify_header.mhdr_cmds_off;
2045 	field = act_data->modify_header.field;
2046 	do {
2047 		uint32_t off_b;
2048 		uint32_t mask;
2049 		uint32_t data;
2050 		const uint8_t *mask_src;
2051 
2052 		if (i >= act_data->modify_header.mhdr_cmds_end)
2053 			return -1;
2054 		if (flow_hw_mhdr_cmd_is_nop(&job->mhdr_cmd[i])) {
2055 			++i;
2056 			continue;
2057 		}
2058 		mask_src = (const uint8_t *)act_data->modify_header.mask;
2059 		mask = flow_dv_fetch_field(mask_src + field->offset, field->size);
2060 		if (!mask) {
2061 			++field;
2062 			continue;
2063 		}
2064 		off_b = rte_bsf32(mask);
2065 		data = flow_dv_fetch_field(values + field->offset, field->size);
2066 		data = (data & mask) >> off_b;
2067 		job->mhdr_cmd[i++].data1 = rte_cpu_to_be_32(data);
2068 		++field;
2069 	} while (field->size);
2070 	return 0;
2071 }
2072 
2073 /**
2074  * Construct flow action array.
2075  *
2076  * For action template contains dynamic actions, these actions need to
2077  * be updated according to the rte_flow action during flow creation.
2078  *
2079  * @param[in] dev
2080  *   Pointer to the rte_eth_dev structure.
2081  * @param[in] job
2082  *   Pointer to job descriptor.
2083  * @param[in] hw_acts
2084  *   Pointer to translated actions from template.
2085  * @param[in] it_idx
2086  *   Item template index the action template refer to.
2087  * @param[in] actions
2088  *   Array of rte_flow action need to be checked.
2089  * @param[in] rule_acts
2090  *   Array of DR rule actions to be used during flow creation..
2091  * @param[in] acts_num
2092  *   Pointer to the real acts_num flow has.
2093  *
2094  * @return
2095  *    0 on success, negative value otherwise and rte_errno is set.
2096  */
2097 static __rte_always_inline int
2098 flow_hw_actions_construct(struct rte_eth_dev *dev,
2099 			  struct mlx5_hw_q_job *job,
2100 			  const struct mlx5_hw_action_template *hw_at,
2101 			  const uint8_t it_idx,
2102 			  const struct rte_flow_action actions[],
2103 			  struct mlx5dr_rule_action *rule_acts,
2104 			  uint32_t queue,
2105 			  struct rte_flow_error *error)
2106 {
2107 	struct mlx5_priv *priv = dev->data->dev_private;
2108 	struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
2109 	struct rte_flow_template_table *table = job->flow->table;
2110 	struct mlx5_action_construct_data *act_data;
2111 	const struct rte_flow_actions_template *at = hw_at->action_template;
2112 	const struct mlx5_hw_actions *hw_acts = &hw_at->acts;
2113 	const struct rte_flow_action *action;
2114 	const struct rte_flow_action_raw_encap *raw_encap_data;
2115 	const struct rte_flow_item *enc_item = NULL;
2116 	const struct rte_flow_action_ethdev *port_action = NULL;
2117 	const struct rte_flow_action_meter *meter = NULL;
2118 	const struct rte_flow_action_age *age = NULL;
2119 	uint8_t *buf = job->encap_data;
2120 	struct rte_flow_attr attr = {
2121 			.ingress = 1,
2122 	};
2123 	uint32_t ft_flag;
2124 	size_t encap_len = 0;
2125 	int ret;
2126 	uint32_t age_idx = 0;
2127 	struct mlx5_aso_mtr *aso_mtr;
2128 
2129 	rte_memcpy(rule_acts, hw_acts->rule_acts, sizeof(*rule_acts) * at->dr_actions_num);
2130 	attr.group = table->grp->group_id;
2131 	ft_flag = mlx5_hw_act_flag[!!table->grp->group_id][table->type];
2132 	if (table->type == MLX5DR_TABLE_TYPE_FDB) {
2133 		attr.transfer = 1;
2134 		attr.ingress = 1;
2135 	} else if (table->type == MLX5DR_TABLE_TYPE_NIC_TX) {
2136 		attr.egress = 1;
2137 		attr.ingress = 0;
2138 	} else {
2139 		attr.ingress = 1;
2140 	}
2141 	if (hw_acts->mhdr && hw_acts->mhdr->mhdr_cmds_num > 0) {
2142 		uint16_t pos = hw_acts->mhdr->pos;
2143 
2144 		if (!hw_acts->mhdr->shared) {
2145 			rule_acts[pos].modify_header.offset =
2146 						job->flow->idx - 1;
2147 			rule_acts[pos].modify_header.data =
2148 						(uint8_t *)job->mhdr_cmd;
2149 			rte_memcpy(job->mhdr_cmd, hw_acts->mhdr->mhdr_cmds,
2150 				   sizeof(*job->mhdr_cmd) * hw_acts->mhdr->mhdr_cmds_num);
2151 		}
2152 	}
2153 	LIST_FOREACH(act_data, &hw_acts->act_list, next) {
2154 		uint32_t jump_group;
2155 		uint32_t tag;
2156 		uint64_t item_flags;
2157 		struct mlx5_hw_jump_action *jump;
2158 		struct mlx5_hrxq *hrxq;
2159 		uint32_t ct_idx;
2160 		cnt_id_t cnt_id;
2161 		uint32_t mtr_id;
2162 
2163 		action = &actions[act_data->action_src];
2164 		/*
2165 		 * action template construction replaces
2166 		 * OF_SET_VLAN_VID with MODIFY_FIELD
2167 		 */
2168 		if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)
2169 			MLX5_ASSERT(act_data->type ==
2170 				    RTE_FLOW_ACTION_TYPE_MODIFY_FIELD);
2171 		else
2172 			MLX5_ASSERT(action->type ==
2173 				    RTE_FLOW_ACTION_TYPE_INDIRECT ||
2174 				    (int)action->type == act_data->type);
2175 		switch (act_data->type) {
2176 		case RTE_FLOW_ACTION_TYPE_INDIRECT:
2177 			if (flow_hw_shared_action_construct
2178 					(dev, queue, action, table, it_idx,
2179 					 at->action_flags, job->flow,
2180 					 &rule_acts[act_data->action_dst]))
2181 				return -1;
2182 			break;
2183 		case RTE_FLOW_ACTION_TYPE_VOID:
2184 			break;
2185 		case RTE_FLOW_ACTION_TYPE_MARK:
2186 			tag = mlx5_flow_mark_set
2187 			      (((const struct rte_flow_action_mark *)
2188 			      (action->conf))->id);
2189 			rule_acts[act_data->action_dst].tag.value = tag;
2190 			break;
2191 		case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
2192 			rule_acts[act_data->action_dst].push_vlan.vlan_hdr =
2193 				vlan_hdr_to_be32(action);
2194 			break;
2195 		case RTE_FLOW_ACTION_TYPE_JUMP:
2196 			jump_group = ((const struct rte_flow_action_jump *)
2197 						action->conf)->group;
2198 			jump = flow_hw_jump_action_register
2199 				(dev, &table->cfg, jump_group, NULL);
2200 			if (!jump)
2201 				return -1;
2202 			rule_acts[act_data->action_dst].action =
2203 			(!!attr.group) ? jump->hws_action : jump->root_action;
2204 			job->flow->jump = jump;
2205 			job->flow->fate_type = MLX5_FLOW_FATE_JUMP;
2206 			break;
2207 		case RTE_FLOW_ACTION_TYPE_RSS:
2208 		case RTE_FLOW_ACTION_TYPE_QUEUE:
2209 			hrxq = flow_hw_tir_action_register(dev,
2210 					ft_flag,
2211 					action);
2212 			if (!hrxq)
2213 				return -1;
2214 			rule_acts[act_data->action_dst].action = hrxq->action;
2215 			job->flow->hrxq = hrxq;
2216 			job->flow->fate_type = MLX5_FLOW_FATE_QUEUE;
2217 			break;
2218 		case MLX5_RTE_FLOW_ACTION_TYPE_RSS:
2219 			item_flags = table->its[it_idx]->item_flags;
2220 			if (flow_hw_shared_action_get
2221 				(dev, act_data, item_flags,
2222 				 &rule_acts[act_data->action_dst]))
2223 				return -1;
2224 			break;
2225 		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
2226 			enc_item = ((const struct rte_flow_action_vxlan_encap *)
2227 				   action->conf)->definition;
2228 			if (flow_dv_convert_encap_data(enc_item, buf, &encap_len, NULL))
2229 				return -1;
2230 			break;
2231 		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
2232 			enc_item = ((const struct rte_flow_action_nvgre_encap *)
2233 				   action->conf)->definition;
2234 			if (flow_dv_convert_encap_data(enc_item, buf, &encap_len, NULL))
2235 				return -1;
2236 			break;
2237 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
2238 			raw_encap_data =
2239 				(const struct rte_flow_action_raw_encap *)
2240 				 action->conf;
2241 			rte_memcpy((void *)buf, raw_encap_data->data, act_data->encap.len);
2242 			MLX5_ASSERT(raw_encap_data->size ==
2243 				    act_data->encap.len);
2244 			break;
2245 		case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
2246 			if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)
2247 				ret = flow_hw_set_vlan_vid_construct(dev, job,
2248 								     act_data,
2249 								     hw_acts,
2250 								     action);
2251 			else
2252 				ret = flow_hw_modify_field_construct(job,
2253 								     act_data,
2254 								     hw_acts,
2255 								     action);
2256 			if (ret)
2257 				return -1;
2258 			break;
2259 		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
2260 			port_action = action->conf;
2261 			if (!priv->hw_vport[port_action->port_id])
2262 				return -1;
2263 			rule_acts[act_data->action_dst].action =
2264 					priv->hw_vport[port_action->port_id];
2265 			break;
2266 		case RTE_FLOW_ACTION_TYPE_METER:
2267 			meter = action->conf;
2268 			mtr_id = meter->mtr_id;
2269 			aso_mtr = mlx5_aso_meter_by_idx(priv, mtr_id);
2270 			rule_acts[act_data->action_dst].action =
2271 				priv->mtr_bulk.action;
2272 			rule_acts[act_data->action_dst].aso_meter.offset =
2273 								aso_mtr->offset;
2274 			jump = flow_hw_jump_action_register
2275 				(dev, &table->cfg, aso_mtr->fm.group, NULL);
2276 			if (!jump)
2277 				return -1;
2278 			MLX5_ASSERT
2279 				(!rule_acts[act_data->action_dst + 1].action);
2280 			rule_acts[act_data->action_dst + 1].action =
2281 					(!!attr.group) ? jump->hws_action :
2282 							 jump->root_action;
2283 			job->flow->jump = jump;
2284 			job->flow->fate_type = MLX5_FLOW_FATE_JUMP;
2285 			if (mlx5_aso_mtr_wait(priv->sh, MLX5_HW_INV_QUEUE, aso_mtr))
2286 				return -1;
2287 			break;
2288 		case RTE_FLOW_ACTION_TYPE_AGE:
2289 			age = action->conf;
2290 			/*
2291 			 * First, create the AGE parameter, then create its
2292 			 * counter later:
2293 			 * Regular counter - in next case.
2294 			 * Indirect counter - update it after the loop.
2295 			 */
2296 			age_idx = mlx5_hws_age_action_create(priv, queue, 0,
2297 							     age,
2298 							     job->flow->idx,
2299 							     error);
2300 			if (age_idx == 0)
2301 				return -rte_errno;
2302 			job->flow->age_idx = age_idx;
2303 			if (at->action_flags & MLX5_FLOW_ACTION_INDIRECT_COUNT)
2304 				/*
2305 				 * When AGE uses indirect counter, no need to
2306 				 * create counter but need to update it with the
2307 				 * AGE parameter, will be done after the loop.
2308 				 */
2309 				break;
2310 			/* Fall-through. */
2311 		case RTE_FLOW_ACTION_TYPE_COUNT:
2312 			ret = mlx5_hws_cnt_pool_get(priv->hws_cpool, &queue,
2313 						    &cnt_id, age_idx);
2314 			if (ret != 0)
2315 				return ret;
2316 			ret = mlx5_hws_cnt_pool_get_action_offset
2317 				(priv->hws_cpool,
2318 				 cnt_id,
2319 				 &rule_acts[act_data->action_dst].action,
2320 				 &rule_acts[act_data->action_dst].counter.offset
2321 				 );
2322 			if (ret != 0)
2323 				return ret;
2324 			job->flow->cnt_id = cnt_id;
2325 			break;
2326 		case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
2327 			ret = mlx5_hws_cnt_pool_get_action_offset
2328 				(priv->hws_cpool,
2329 				 act_data->shared_counter.id,
2330 				 &rule_acts[act_data->action_dst].action,
2331 				 &rule_acts[act_data->action_dst].counter.offset
2332 				 );
2333 			if (ret != 0)
2334 				return ret;
2335 			job->flow->cnt_id = act_data->shared_counter.id;
2336 			break;
2337 		case RTE_FLOW_ACTION_TYPE_CONNTRACK:
2338 			ct_idx = MLX5_ACTION_CTX_CT_GET_IDX
2339 				 ((uint32_t)(uintptr_t)action->conf);
2340 			if (flow_hw_ct_compile(dev, queue, ct_idx,
2341 					       &rule_acts[act_data->action_dst]))
2342 				return -1;
2343 			break;
2344 		case MLX5_RTE_FLOW_ACTION_TYPE_METER_MARK:
2345 			mtr_id = act_data->shared_meter.id &
2346 				((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
2347 			/* Find ASO object. */
2348 			aso_mtr = mlx5_ipool_get(pool->idx_pool, mtr_id);
2349 			if (!aso_mtr)
2350 				return -1;
2351 			rule_acts[act_data->action_dst].action =
2352 							pool->action;
2353 			rule_acts[act_data->action_dst].aso_meter.offset =
2354 							aso_mtr->offset;
2355 			rule_acts[act_data->action_dst].aso_meter.init_color =
2356 				(enum mlx5dr_action_aso_meter_color)
2357 				rte_col_2_mlx5_col(aso_mtr->init_color);
2358 			break;
2359 		case RTE_FLOW_ACTION_TYPE_METER_MARK:
2360 			/*
2361 			 * Allocate meter directly will slow down flow
2362 			 * insertion rate.
2363 			 */
2364 			ret = flow_hw_meter_mark_compile(dev,
2365 				act_data->action_dst, action,
2366 				rule_acts, &job->flow->mtr_id, MLX5_HW_INV_QUEUE);
2367 			if (ret != 0)
2368 				return ret;
2369 			break;
2370 		default:
2371 			break;
2372 		}
2373 	}
2374 	if (at->action_flags & MLX5_FLOW_ACTION_INDIRECT_COUNT) {
2375 		if (at->action_flags & MLX5_FLOW_ACTION_INDIRECT_AGE) {
2376 			age_idx = job->flow->age_idx & MLX5_HWS_AGE_IDX_MASK;
2377 			if (mlx5_hws_cnt_age_get(priv->hws_cpool,
2378 						 job->flow->cnt_id) != age_idx)
2379 				/*
2380 				 * This is first use of this indirect counter
2381 				 * for this indirect AGE, need to increase the
2382 				 * number of counters.
2383 				 */
2384 				mlx5_hws_age_nb_cnt_increase(priv, age_idx);
2385 		}
2386 		/*
2387 		 * Update this indirect counter the indirect/direct AGE in which
2388 		 * using it.
2389 		 */
2390 		mlx5_hws_cnt_age_set(priv->hws_cpool, job->flow->cnt_id,
2391 				     age_idx);
2392 	}
2393 	if (hw_acts->encap_decap && !hw_acts->encap_decap->shared) {
2394 		rule_acts[hw_acts->encap_decap_pos].reformat.offset =
2395 				job->flow->idx - 1;
2396 		rule_acts[hw_acts->encap_decap_pos].reformat.data = buf;
2397 	}
2398 	if (mlx5_hws_cnt_id_valid(hw_acts->cnt_id))
2399 		job->flow->cnt_id = hw_acts->cnt_id;
2400 	return 0;
2401 }
2402 
2403 static const struct rte_flow_item *
2404 flow_hw_get_rule_items(struct rte_eth_dev *dev,
2405 		       struct rte_flow_template_table *table,
2406 		       const struct rte_flow_item items[],
2407 		       uint8_t pattern_template_index,
2408 		       struct mlx5_hw_q_job *job)
2409 {
2410 	struct rte_flow_pattern_template *pt = table->its[pattern_template_index];
2411 
2412 	/* Only one implicit item can be added to flow rule pattern. */
2413 	MLX5_ASSERT(!pt->implicit_port || !pt->implicit_tag);
2414 	/* At least one item was allocated in job descriptor for items. */
2415 	MLX5_ASSERT(MLX5_HW_MAX_ITEMS >= 1);
2416 	if (pt->implicit_port) {
2417 		if (pt->orig_item_nb + 1 > MLX5_HW_MAX_ITEMS) {
2418 			rte_errno = ENOMEM;
2419 			return NULL;
2420 		}
2421 		/* Set up represented port item in job descriptor. */
2422 		job->port_spec = (struct rte_flow_item_ethdev){
2423 			.port_id = dev->data->port_id,
2424 		};
2425 		job->items[0] = (struct rte_flow_item){
2426 			.type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,
2427 			.spec = &job->port_spec,
2428 		};
2429 		rte_memcpy(&job->items[1], items, sizeof(*items) * pt->orig_item_nb);
2430 		return job->items;
2431 	} else if (pt->implicit_tag) {
2432 		if (pt->orig_item_nb + 1 > MLX5_HW_MAX_ITEMS) {
2433 			rte_errno = ENOMEM;
2434 			return NULL;
2435 		}
2436 		/* Set up tag item in job descriptor. */
2437 		job->tag_spec = (struct rte_flow_item_tag){
2438 			.data = flow_hw_tx_tag_regc_value(dev),
2439 		};
2440 		job->items[0] = (struct rte_flow_item){
2441 			.type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_TAG,
2442 			.spec = &job->tag_spec,
2443 		};
2444 		rte_memcpy(&job->items[1], items, sizeof(*items) * pt->orig_item_nb);
2445 		return job->items;
2446 	} else {
2447 		return items;
2448 	}
2449 }
2450 
2451 /**
2452  * Enqueue HW steering flow creation.
2453  *
2454  * The flow will be applied to the HW only if the postpone bit is not set or
2455  * the extra push function is called.
2456  * The flow creation status should be checked from dequeue result.
2457  *
2458  * @param[in] dev
2459  *   Pointer to the rte_eth_dev structure.
2460  * @param[in] queue
2461  *   The queue to create the flow.
2462  * @param[in] attr
2463  *   Pointer to the flow operation attributes.
2464  * @param[in] items
2465  *   Items with flow spec value.
2466  * @param[in] pattern_template_index
2467  *   The item pattern flow follows from the table.
2468  * @param[in] actions
2469  *   Action with flow spec value.
2470  * @param[in] action_template_index
2471  *   The action pattern flow follows from the table.
2472  * @param[in] user_data
2473  *   Pointer to the user_data.
2474  * @param[out] error
2475  *   Pointer to error structure.
2476  *
2477  * @return
2478  *    Flow pointer on success, NULL otherwise and rte_errno is set.
2479  */
2480 static struct rte_flow *
2481 flow_hw_async_flow_create(struct rte_eth_dev *dev,
2482 			  uint32_t queue,
2483 			  const struct rte_flow_op_attr *attr,
2484 			  struct rte_flow_template_table *table,
2485 			  const struct rte_flow_item items[],
2486 			  uint8_t pattern_template_index,
2487 			  const struct rte_flow_action actions[],
2488 			  uint8_t action_template_index,
2489 			  void *user_data,
2490 			  struct rte_flow_error *error)
2491 {
2492 	struct mlx5_priv *priv = dev->data->dev_private;
2493 	struct mlx5dr_rule_attr rule_attr = {
2494 		.queue_id = queue,
2495 		.user_data = user_data,
2496 		.burst = attr->postpone,
2497 	};
2498 	struct mlx5dr_rule_action rule_acts[MLX5_HW_MAX_ACTS];
2499 	struct rte_flow_hw *flow;
2500 	struct mlx5_hw_q_job *job;
2501 	const struct rte_flow_item *rule_items;
2502 	uint32_t flow_idx;
2503 	int ret;
2504 
2505 	if (unlikely((!dev->data->dev_started))) {
2506 		rte_errno = EINVAL;
2507 		goto error;
2508 	}
2509 	if (unlikely(!priv->hw_q[queue].job_idx)) {
2510 		rte_errno = ENOMEM;
2511 		goto error;
2512 	}
2513 	flow = mlx5_ipool_zmalloc(table->flow, &flow_idx);
2514 	if (!flow)
2515 		goto error;
2516 	/*
2517 	 * Set the table here in order to know the destination table
2518 	 * when free the flow afterwards.
2519 	 */
2520 	flow->table = table;
2521 	flow->idx = flow_idx;
2522 	job = priv->hw_q[queue].job[--priv->hw_q[queue].job_idx];
2523 	/*
2524 	 * Set the job type here in order to know if the flow memory
2525 	 * should be freed or not when get the result from dequeue.
2526 	 */
2527 	job->type = MLX5_HW_Q_JOB_TYPE_CREATE;
2528 	job->flow = flow;
2529 	job->user_data = user_data;
2530 	rule_attr.user_data = job;
2531 	/*
2532 	 * Indexed pool returns 1-based indices, but mlx5dr expects 0-based indices for rule
2533 	 * insertion hints.
2534 	 */
2535 	MLX5_ASSERT(flow_idx > 0);
2536 	rule_attr.rule_idx = flow_idx - 1;
2537 	/*
2538 	 * Construct the flow actions based on the input actions.
2539 	 * The implicitly appended action is always fixed, like metadata
2540 	 * copy action from FDB to NIC Rx.
2541 	 * No need to copy and contrust a new "actions" list based on the
2542 	 * user's input, in order to save the cost.
2543 	 */
2544 	if (flow_hw_actions_construct(dev, job,
2545 				      &table->ats[action_template_index],
2546 				      pattern_template_index, actions,
2547 				      rule_acts, queue, error)) {
2548 		rte_errno = EINVAL;
2549 		goto free;
2550 	}
2551 	rule_items = flow_hw_get_rule_items(dev, table, items,
2552 					    pattern_template_index, job);
2553 	if (!rule_items)
2554 		goto free;
2555 	ret = mlx5dr_rule_create(table->matcher,
2556 				 pattern_template_index, rule_items,
2557 				 action_template_index, rule_acts,
2558 				 &rule_attr, (struct mlx5dr_rule *)flow->rule);
2559 	if (likely(!ret))
2560 		return (struct rte_flow *)flow;
2561 free:
2562 	/* Flow created fail, return the descriptor and flow memory. */
2563 	mlx5_ipool_free(table->flow, flow_idx);
2564 	priv->hw_q[queue].job_idx++;
2565 error:
2566 	rte_flow_error_set(error, rte_errno,
2567 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2568 			   "fail to create rte flow");
2569 	return NULL;
2570 }
2571 
2572 /**
2573  * Enqueue HW steering flow destruction.
2574  *
2575  * The flow will be applied to the HW only if the postpone bit is not set or
2576  * the extra push function is called.
2577  * The flow destruction status should be checked from dequeue result.
2578  *
2579  * @param[in] dev
2580  *   Pointer to the rte_eth_dev structure.
2581  * @param[in] queue
2582  *   The queue to destroy the flow.
2583  * @param[in] attr
2584  *   Pointer to the flow operation attributes.
2585  * @param[in] flow
2586  *   Pointer to the flow to be destroyed.
2587  * @param[in] user_data
2588  *   Pointer to the user_data.
2589  * @param[out] error
2590  *   Pointer to error structure.
2591  *
2592  * @return
2593  *    0 on success, negative value otherwise and rte_errno is set.
2594  */
2595 static int
2596 flow_hw_async_flow_destroy(struct rte_eth_dev *dev,
2597 			   uint32_t queue,
2598 			   const struct rte_flow_op_attr *attr,
2599 			   struct rte_flow *flow,
2600 			   void *user_data,
2601 			   struct rte_flow_error *error)
2602 {
2603 	struct mlx5_priv *priv = dev->data->dev_private;
2604 	struct mlx5dr_rule_attr rule_attr = {
2605 		.queue_id = queue,
2606 		.user_data = user_data,
2607 		.burst = attr->postpone,
2608 	};
2609 	struct rte_flow_hw *fh = (struct rte_flow_hw *)flow;
2610 	struct mlx5_hw_q_job *job;
2611 	int ret;
2612 
2613 	if (unlikely(!priv->hw_q[queue].job_idx)) {
2614 		rte_errno = ENOMEM;
2615 		goto error;
2616 	}
2617 	job = priv->hw_q[queue].job[--priv->hw_q[queue].job_idx];
2618 	job->type = MLX5_HW_Q_JOB_TYPE_DESTROY;
2619 	job->user_data = user_data;
2620 	job->flow = fh;
2621 	rule_attr.user_data = job;
2622 	ret = mlx5dr_rule_destroy((struct mlx5dr_rule *)fh->rule, &rule_attr);
2623 	if (likely(!ret))
2624 		return 0;
2625 	priv->hw_q[queue].job_idx++;
2626 error:
2627 	return rte_flow_error_set(error, rte_errno,
2628 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2629 			"fail to destroy rte flow");
2630 }
2631 
2632 /**
2633  * Release the AGE and counter for given flow.
2634  *
2635  * @param[in] priv
2636  *   Pointer to the port private data structure.
2637  * @param[in] queue
2638  *   The queue to release the counter.
2639  * @param[in, out] flow
2640  *   Pointer to the flow containing the counter.
2641  * @param[out] error
2642  *   Pointer to error structure.
2643  */
2644 static void
2645 flow_hw_age_count_release(struct mlx5_priv *priv, uint32_t queue,
2646 			  struct rte_flow_hw *flow,
2647 			  struct rte_flow_error *error)
2648 {
2649 	if (mlx5_hws_cnt_is_shared(priv->hws_cpool, flow->cnt_id)) {
2650 		if (flow->age_idx && !mlx5_hws_age_is_indirect(flow->age_idx)) {
2651 			/* Remove this AGE parameter from indirect counter. */
2652 			mlx5_hws_cnt_age_set(priv->hws_cpool, flow->cnt_id, 0);
2653 			/* Release the AGE parameter. */
2654 			mlx5_hws_age_action_destroy(priv, flow->age_idx, error);
2655 			flow->age_idx = 0;
2656 		}
2657 		return;
2658 	}
2659 	/* Put the counter first to reduce the race risk in BG thread. */
2660 	mlx5_hws_cnt_pool_put(priv->hws_cpool, &queue, &flow->cnt_id);
2661 	flow->cnt_id = 0;
2662 	if (flow->age_idx) {
2663 		if (mlx5_hws_age_is_indirect(flow->age_idx)) {
2664 			uint32_t idx = flow->age_idx & MLX5_HWS_AGE_IDX_MASK;
2665 
2666 			mlx5_hws_age_nb_cnt_decrease(priv, idx);
2667 		} else {
2668 			/* Release the AGE parameter. */
2669 			mlx5_hws_age_action_destroy(priv, flow->age_idx, error);
2670 		}
2671 		flow->age_idx = 0;
2672 	}
2673 }
2674 
2675 static inline int
2676 __flow_hw_pull_indir_action_comp(struct rte_eth_dev *dev,
2677 				 uint32_t queue,
2678 				 struct rte_flow_op_result res[],
2679 				 uint16_t n_res)
2680 
2681 {
2682 	struct mlx5_priv *priv = dev->data->dev_private;
2683 	struct rte_ring *r = priv->hw_q[queue].indir_cq;
2684 	struct mlx5_hw_q_job *job;
2685 	void *user_data = NULL;
2686 	uint32_t type, idx;
2687 	struct mlx5_aso_mtr *aso_mtr;
2688 	struct mlx5_aso_ct_action *aso_ct;
2689 	int ret_comp, i;
2690 
2691 	ret_comp = (int)rte_ring_count(r);
2692 	if (ret_comp > n_res)
2693 		ret_comp = n_res;
2694 	for (i = 0; i < ret_comp; i++) {
2695 		rte_ring_dequeue(r, &user_data);
2696 		res[i].user_data = user_data;
2697 		res[i].status = RTE_FLOW_OP_SUCCESS;
2698 	}
2699 	if (ret_comp < n_res && priv->hws_mpool)
2700 		ret_comp += mlx5_aso_pull_completion(&priv->hws_mpool->sq[queue],
2701 				&res[ret_comp], n_res - ret_comp);
2702 	if (ret_comp < n_res && priv->hws_ctpool)
2703 		ret_comp += mlx5_aso_pull_completion(&priv->ct_mng->aso_sqs[queue],
2704 				&res[ret_comp], n_res - ret_comp);
2705 	for (i = 0; i <  ret_comp; i++) {
2706 		job = (struct mlx5_hw_q_job *)res[i].user_data;
2707 		/* Restore user data. */
2708 		res[i].user_data = job->user_data;
2709 		if (job->type == MLX5_HW_Q_JOB_TYPE_DESTROY) {
2710 			type = MLX5_INDIRECT_ACTION_TYPE_GET(job->action);
2711 			if (type == MLX5_INDIRECT_ACTION_TYPE_METER_MARK) {
2712 				idx = MLX5_INDIRECT_ACTION_IDX_GET(job->action);
2713 				mlx5_ipool_free(priv->hws_mpool->idx_pool, idx);
2714 			}
2715 		} else if (job->type == MLX5_HW_Q_JOB_TYPE_CREATE) {
2716 			type = MLX5_INDIRECT_ACTION_TYPE_GET(job->action);
2717 			if (type == MLX5_INDIRECT_ACTION_TYPE_METER_MARK) {
2718 				idx = MLX5_INDIRECT_ACTION_IDX_GET(job->action);
2719 				aso_mtr = mlx5_ipool_get(priv->hws_mpool->idx_pool, idx);
2720 				aso_mtr->state = ASO_METER_READY;
2721 			} else if (type == MLX5_INDIRECT_ACTION_TYPE_CT) {
2722 				idx = MLX5_ACTION_CTX_CT_GET_IDX
2723 					((uint32_t)(uintptr_t)job->action);
2724 				aso_ct = mlx5_ipool_get(priv->hws_ctpool->cts, idx);
2725 				aso_ct->state = ASO_CONNTRACK_READY;
2726 			}
2727 		} else if (job->type == MLX5_HW_Q_JOB_TYPE_QUERY) {
2728 			type = MLX5_INDIRECT_ACTION_TYPE_GET(job->action);
2729 			if (type == MLX5_INDIRECT_ACTION_TYPE_CT) {
2730 				idx = MLX5_ACTION_CTX_CT_GET_IDX
2731 					((uint32_t)(uintptr_t)job->action);
2732 				aso_ct = mlx5_ipool_get(priv->hws_ctpool->cts, idx);
2733 				mlx5_aso_ct_obj_analyze(job->profile,
2734 							job->out_data);
2735 				aso_ct->state = ASO_CONNTRACK_READY;
2736 			}
2737 		}
2738 		priv->hw_q[queue].job[priv->hw_q[queue].job_idx++] = job;
2739 	}
2740 	return ret_comp;
2741 }
2742 
2743 /**
2744  * Pull the enqueued flows.
2745  *
2746  * For flows enqueued from creation/destruction, the status should be
2747  * checked from the dequeue result.
2748  *
2749  * @param[in] dev
2750  *   Pointer to the rte_eth_dev structure.
2751  * @param[in] queue
2752  *   The queue to pull the result.
2753  * @param[in/out] res
2754  *   Array to save the results.
2755  * @param[in] n_res
2756  *   Available result with the array.
2757  * @param[out] error
2758  *   Pointer to error structure.
2759  *
2760  * @return
2761  *    Result number on success, negative value otherwise and rte_errno is set.
2762  */
2763 static int
2764 flow_hw_pull(struct rte_eth_dev *dev,
2765 	     uint32_t queue,
2766 	     struct rte_flow_op_result res[],
2767 	     uint16_t n_res,
2768 	     struct rte_flow_error *error)
2769 {
2770 	struct mlx5_priv *priv = dev->data->dev_private;
2771 	struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
2772 	struct mlx5_hw_q_job *job;
2773 	int ret, i;
2774 
2775 	/* 1. Pull the flow completion. */
2776 	ret = mlx5dr_send_queue_poll(priv->dr_ctx, queue, res, n_res);
2777 	if (ret < 0)
2778 		return rte_flow_error_set(error, rte_errno,
2779 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2780 				"fail to query flow queue");
2781 	for (i = 0; i <  ret; i++) {
2782 		job = (struct mlx5_hw_q_job *)res[i].user_data;
2783 		/* Restore user data. */
2784 		res[i].user_data = job->user_data;
2785 		if (job->type == MLX5_HW_Q_JOB_TYPE_DESTROY) {
2786 			if (job->flow->fate_type == MLX5_FLOW_FATE_JUMP)
2787 				flow_hw_jump_release(dev, job->flow->jump);
2788 			else if (job->flow->fate_type == MLX5_FLOW_FATE_QUEUE)
2789 				mlx5_hrxq_obj_release(dev, job->flow->hrxq);
2790 			if (mlx5_hws_cnt_id_valid(job->flow->cnt_id))
2791 				flow_hw_age_count_release(priv, queue,
2792 							  job->flow, error);
2793 			if (job->flow->mtr_id) {
2794 				mlx5_ipool_free(pool->idx_pool,	job->flow->mtr_id);
2795 				job->flow->mtr_id = 0;
2796 			}
2797 			mlx5_ipool_free(job->flow->table->flow, job->flow->idx);
2798 		}
2799 		priv->hw_q[queue].job[priv->hw_q[queue].job_idx++] = job;
2800 	}
2801 	/* 2. Pull indirect action comp. */
2802 	if (ret < n_res)
2803 		ret += __flow_hw_pull_indir_action_comp(dev, queue, &res[ret],
2804 							n_res - ret);
2805 	return ret;
2806 }
2807 
2808 static inline void
2809 __flow_hw_push_action(struct rte_eth_dev *dev,
2810 		    uint32_t queue)
2811 {
2812 	struct mlx5_priv *priv = dev->data->dev_private;
2813 	struct rte_ring *iq = priv->hw_q[queue].indir_iq;
2814 	struct rte_ring *cq = priv->hw_q[queue].indir_cq;
2815 	void *job = NULL;
2816 	uint32_t ret, i;
2817 
2818 	ret = rte_ring_count(iq);
2819 	for (i = 0; i < ret; i++) {
2820 		rte_ring_dequeue(iq, &job);
2821 		rte_ring_enqueue(cq, job);
2822 	}
2823 	if (priv->hws_ctpool)
2824 		mlx5_aso_push_wqe(priv->sh, &priv->ct_mng->aso_sqs[queue]);
2825 	if (priv->hws_mpool)
2826 		mlx5_aso_push_wqe(priv->sh, &priv->hws_mpool->sq[queue]);
2827 }
2828 
2829 /**
2830  * Push the enqueued flows to HW.
2831  *
2832  * Force apply all the enqueued flows to the HW.
2833  *
2834  * @param[in] dev
2835  *   Pointer to the rte_eth_dev structure.
2836  * @param[in] queue
2837  *   The queue to push the flow.
2838  * @param[out] error
2839  *   Pointer to error structure.
2840  *
2841  * @return
2842  *    0 on success, negative value otherwise and rte_errno is set.
2843  */
2844 static int
2845 flow_hw_push(struct rte_eth_dev *dev,
2846 	     uint32_t queue,
2847 	     struct rte_flow_error *error)
2848 {
2849 	struct mlx5_priv *priv = dev->data->dev_private;
2850 	int ret;
2851 
2852 	__flow_hw_push_action(dev, queue);
2853 	ret = mlx5dr_send_queue_action(priv->dr_ctx, queue,
2854 				       MLX5DR_SEND_QUEUE_ACTION_DRAIN);
2855 	if (ret) {
2856 		rte_flow_error_set(error, rte_errno,
2857 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2858 				   "fail to push flows");
2859 		return ret;
2860 	}
2861 	return 0;
2862 }
2863 
2864 /**
2865  * Drain the enqueued flows' completion.
2866  *
2867  * @param[in] dev
2868  *   Pointer to the rte_eth_dev structure.
2869  * @param[in] queue
2870  *   The queue to pull the flow.
2871  * @param[in] pending_rules
2872  *   The pending flow number.
2873  * @param[out] error
2874  *   Pointer to error structure.
2875  *
2876  * @return
2877  *    0 on success, negative value otherwise and rte_errno is set.
2878  */
2879 static int
2880 __flow_hw_pull_comp(struct rte_eth_dev *dev,
2881 		    uint32_t queue,
2882 		    uint32_t pending_rules,
2883 		    struct rte_flow_error *error)
2884 {
2885 	struct rte_flow_op_result comp[BURST_THR];
2886 	int ret, i, empty_loop = 0;
2887 
2888 	ret = flow_hw_push(dev, queue, error);
2889 	if (ret < 0)
2890 		return ret;
2891 	while (pending_rules) {
2892 		ret = flow_hw_pull(dev, queue, comp, BURST_THR, error);
2893 		if (ret < 0)
2894 			return -1;
2895 		if (!ret) {
2896 			rte_delay_us_sleep(20000);
2897 			if (++empty_loop > 5) {
2898 				DRV_LOG(WARNING, "No available dequeue, quit.");
2899 				break;
2900 			}
2901 			continue;
2902 		}
2903 		for (i = 0; i < ret; i++) {
2904 			if (comp[i].status == RTE_FLOW_OP_ERROR)
2905 				DRV_LOG(WARNING, "Flow flush get error CQE.");
2906 		}
2907 		if ((uint32_t)ret > pending_rules) {
2908 			DRV_LOG(WARNING, "Flow flush get extra CQE.");
2909 			return rte_flow_error_set(error, ERANGE,
2910 					RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2911 					"get extra CQE");
2912 		}
2913 		pending_rules -= ret;
2914 		empty_loop = 0;
2915 	}
2916 	return 0;
2917 }
2918 
2919 /**
2920  * Flush created flows.
2921  *
2922  * @param[in] dev
2923  *   Pointer to the rte_eth_dev structure.
2924  * @param[out] error
2925  *   Pointer to error structure.
2926  *
2927  * @return
2928  *    0 on success, negative value otherwise and rte_errno is set.
2929  */
2930 int
2931 flow_hw_q_flow_flush(struct rte_eth_dev *dev,
2932 		     struct rte_flow_error *error)
2933 {
2934 	struct mlx5_priv *priv = dev->data->dev_private;
2935 	struct mlx5_hw_q *hw_q;
2936 	struct rte_flow_template_table *tbl;
2937 	struct rte_flow_hw *flow;
2938 	struct rte_flow_op_attr attr = {
2939 		.postpone = 0,
2940 	};
2941 	uint32_t pending_rules = 0;
2942 	uint32_t queue;
2943 	uint32_t fidx;
2944 
2945 	/*
2946 	 * Ensure to push and dequeue all the enqueued flow
2947 	 * creation/destruction jobs in case user forgot to
2948 	 * dequeue. Or the enqueued created flows will be
2949 	 * leaked. The forgotten dequeues would also cause
2950 	 * flow flush get extra CQEs as expected and pending_rules
2951 	 * be minus value.
2952 	 */
2953 	for (queue = 0; queue < priv->nb_queue; queue++) {
2954 		hw_q = &priv->hw_q[queue];
2955 		if (__flow_hw_pull_comp(dev, queue, hw_q->size - hw_q->job_idx,
2956 					error))
2957 			return -1;
2958 	}
2959 	/* Flush flow per-table from MLX5_DEFAULT_FLUSH_QUEUE. */
2960 	hw_q = &priv->hw_q[MLX5_DEFAULT_FLUSH_QUEUE];
2961 	LIST_FOREACH(tbl, &priv->flow_hw_tbl, next) {
2962 		if (!tbl->cfg.external)
2963 			continue;
2964 		MLX5_IPOOL_FOREACH(tbl->flow, fidx, flow) {
2965 			if (flow_hw_async_flow_destroy(dev,
2966 						MLX5_DEFAULT_FLUSH_QUEUE,
2967 						&attr,
2968 						(struct rte_flow *)flow,
2969 						NULL,
2970 						error))
2971 				return -1;
2972 			pending_rules++;
2973 			/* Drain completion with queue size. */
2974 			if (pending_rules >= hw_q->size) {
2975 				if (__flow_hw_pull_comp(dev,
2976 						MLX5_DEFAULT_FLUSH_QUEUE,
2977 						pending_rules, error))
2978 					return -1;
2979 				pending_rules = 0;
2980 			}
2981 		}
2982 	}
2983 	/* Drain left completion. */
2984 	if (pending_rules &&
2985 	    __flow_hw_pull_comp(dev, MLX5_DEFAULT_FLUSH_QUEUE, pending_rules,
2986 				error))
2987 		return -1;
2988 	return 0;
2989 }
2990 
2991 /**
2992  * Create flow table.
2993  *
2994  * The input item and action templates will be binded to the table.
2995  * Flow memory will also be allocated. Matcher will be created based
2996  * on the item template. Action will be translated to the dedicated
2997  * DR action if possible.
2998  *
2999  * @param[in] dev
3000  *   Pointer to the rte_eth_dev structure.
3001  * @param[in] table_cfg
3002  *   Pointer to the table configuration.
3003  * @param[in] item_templates
3004  *   Item template array to be binded to the table.
3005  * @param[in] nb_item_templates
3006  *   Number of item template.
3007  * @param[in] action_templates
3008  *   Action template array to be binded to the table.
3009  * @param[in] nb_action_templates
3010  *   Number of action template.
3011  * @param[out] error
3012  *   Pointer to error structure.
3013  *
3014  * @return
3015  *    Table on success, NULL otherwise and rte_errno is set.
3016  */
3017 static struct rte_flow_template_table *
3018 flow_hw_table_create(struct rte_eth_dev *dev,
3019 		     const struct mlx5_flow_template_table_cfg *table_cfg,
3020 		     struct rte_flow_pattern_template *item_templates[],
3021 		     uint8_t nb_item_templates,
3022 		     struct rte_flow_actions_template *action_templates[],
3023 		     uint8_t nb_action_templates,
3024 		     struct rte_flow_error *error)
3025 {
3026 	struct rte_flow_error sub_error = {
3027 		.type = RTE_FLOW_ERROR_TYPE_NONE,
3028 		.cause = NULL,
3029 		.message = NULL,
3030 	};
3031 	struct mlx5_priv *priv = dev->data->dev_private;
3032 	struct mlx5dr_matcher_attr matcher_attr = {0};
3033 	struct rte_flow_template_table *tbl = NULL;
3034 	struct mlx5_flow_group *grp;
3035 	struct mlx5dr_match_template *mt[MLX5_HW_TBL_MAX_ITEM_TEMPLATE];
3036 	struct mlx5dr_action_template *at[MLX5_HW_TBL_MAX_ACTION_TEMPLATE];
3037 	const struct rte_flow_template_table_attr *attr = &table_cfg->attr;
3038 	struct rte_flow_attr flow_attr = attr->flow_attr;
3039 	struct mlx5_flow_cb_ctx ctx = {
3040 		.dev = dev,
3041 		.error = &sub_error,
3042 		.data = &flow_attr,
3043 	};
3044 	struct mlx5_indexed_pool_config cfg = {
3045 		.size = sizeof(struct rte_flow_hw) + mlx5dr_rule_get_handle_size(),
3046 		.trunk_size = 1 << 12,
3047 		.per_core_cache = 1 << 13,
3048 		.need_lock = 1,
3049 		.release_mem_en = !!priv->sh->config.reclaim_mode,
3050 		.malloc = mlx5_malloc,
3051 		.free = mlx5_free,
3052 		.type = "mlx5_hw_table_flow",
3053 	};
3054 	struct mlx5_list_entry *ge;
3055 	uint32_t i, max_tpl = MLX5_HW_TBL_MAX_ITEM_TEMPLATE;
3056 	uint32_t nb_flows = rte_align32pow2(attr->nb_flows);
3057 	bool port_started = !!dev->data->dev_started;
3058 	int err;
3059 
3060 	/* HWS layer accepts only 1 item template with root table. */
3061 	if (!attr->flow_attr.group)
3062 		max_tpl = 1;
3063 	cfg.max_idx = nb_flows;
3064 	/* For table has very limited flows, disable cache. */
3065 	if (nb_flows < cfg.trunk_size) {
3066 		cfg.per_core_cache = 0;
3067 		cfg.trunk_size = nb_flows;
3068 	} else if (nb_flows <= MLX5_HW_IPOOL_SIZE_THRESHOLD) {
3069 		cfg.per_core_cache = MLX5_HW_IPOOL_CACHE_MIN;
3070 	}
3071 	/* Check if we requires too many templates. */
3072 	if (nb_item_templates > max_tpl ||
3073 	    nb_action_templates > MLX5_HW_TBL_MAX_ACTION_TEMPLATE) {
3074 		rte_errno = EINVAL;
3075 		goto error;
3076 	}
3077 	/* Allocate the table memory. */
3078 	tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*tbl), 0, rte_socket_id());
3079 	if (!tbl)
3080 		goto error;
3081 	tbl->cfg = *table_cfg;
3082 	/* Allocate flow indexed pool. */
3083 	tbl->flow = mlx5_ipool_create(&cfg);
3084 	if (!tbl->flow)
3085 		goto error;
3086 	/* Register the flow group. */
3087 	ge = mlx5_hlist_register(priv->sh->groups, attr->flow_attr.group, &ctx);
3088 	if (!ge)
3089 		goto error;
3090 	grp = container_of(ge, struct mlx5_flow_group, entry);
3091 	tbl->grp = grp;
3092 	/* Prepare matcher information. */
3093 	matcher_attr.priority = attr->flow_attr.priority;
3094 	matcher_attr.optimize_using_rule_idx = true;
3095 	matcher_attr.mode = MLX5DR_MATCHER_RESOURCE_MODE_RULE;
3096 	matcher_attr.rule.num_log = rte_log2_u32(nb_flows);
3097 	/* Build the item template. */
3098 	for (i = 0; i < nb_item_templates; i++) {
3099 		uint32_t ret;
3100 
3101 		if ((flow_attr.ingress && !item_templates[i]->attr.ingress) ||
3102 		    (flow_attr.egress && !item_templates[i]->attr.egress) ||
3103 		    (flow_attr.transfer && !item_templates[i]->attr.transfer)) {
3104 			DRV_LOG(ERR, "pattern template and template table attribute mismatch");
3105 			rte_errno = EINVAL;
3106 			goto it_error;
3107 		}
3108 		ret = __atomic_add_fetch(&item_templates[i]->refcnt, 1,
3109 					 __ATOMIC_RELAXED);
3110 		if (ret <= 1) {
3111 			rte_errno = EINVAL;
3112 			goto it_error;
3113 		}
3114 		mt[i] = item_templates[i]->mt;
3115 		tbl->its[i] = item_templates[i];
3116 	}
3117 	tbl->nb_item_templates = nb_item_templates;
3118 	/* Build the action template. */
3119 	for (i = 0; i < nb_action_templates; i++) {
3120 		uint32_t ret;
3121 
3122 		ret = __atomic_add_fetch(&action_templates[i]->refcnt, 1,
3123 					 __ATOMIC_RELAXED);
3124 		if (ret <= 1) {
3125 			rte_errno = EINVAL;
3126 			goto at_error;
3127 		}
3128 		at[i] = action_templates[i]->tmpl;
3129 		tbl->ats[i].action_template = action_templates[i];
3130 		LIST_INIT(&tbl->ats[i].acts.act_list);
3131 		if (!port_started)
3132 			continue;
3133 		err = __flow_hw_actions_translate(dev, &tbl->cfg,
3134 						  &tbl->ats[i].acts,
3135 						  action_templates[i], &sub_error);
3136 		if (err) {
3137 			i++;
3138 			goto at_error;
3139 		}
3140 	}
3141 	tbl->nb_action_templates = nb_action_templates;
3142 	tbl->matcher = mlx5dr_matcher_create
3143 		(tbl->grp->tbl, mt, nb_item_templates, at, nb_action_templates, &matcher_attr);
3144 	if (!tbl->matcher)
3145 		goto at_error;
3146 	tbl->type = attr->flow_attr.transfer ? MLX5DR_TABLE_TYPE_FDB :
3147 		    (attr->flow_attr.egress ? MLX5DR_TABLE_TYPE_NIC_TX :
3148 		    MLX5DR_TABLE_TYPE_NIC_RX);
3149 	if (port_started)
3150 		LIST_INSERT_HEAD(&priv->flow_hw_tbl, tbl, next);
3151 	else
3152 		LIST_INSERT_HEAD(&priv->flow_hw_tbl_ongo, tbl, next);
3153 	return tbl;
3154 at_error:
3155 	while (i--) {
3156 		__flow_hw_action_template_destroy(dev, &tbl->ats[i].acts);
3157 		__atomic_sub_fetch(&action_templates[i]->refcnt,
3158 				   1, __ATOMIC_RELAXED);
3159 	}
3160 	i = nb_item_templates;
3161 it_error:
3162 	while (i--)
3163 		__atomic_sub_fetch(&item_templates[i]->refcnt,
3164 				   1, __ATOMIC_RELAXED);
3165 error:
3166 	err = rte_errno;
3167 	if (tbl) {
3168 		if (tbl->grp)
3169 			mlx5_hlist_unregister(priv->sh->groups,
3170 					      &tbl->grp->entry);
3171 		if (tbl->flow)
3172 			mlx5_ipool_destroy(tbl->flow);
3173 		mlx5_free(tbl);
3174 	}
3175 	if (error != NULL) {
3176 		if (sub_error.type == RTE_FLOW_ERROR_TYPE_NONE)
3177 			rte_flow_error_set(error, err, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3178 					   "Failed to create template table");
3179 		else
3180 			rte_memcpy(error, &sub_error, sizeof(sub_error));
3181 	}
3182 	return NULL;
3183 }
3184 
3185 /**
3186  * Update flow template table.
3187  *
3188  * @param[in] dev
3189  *   Pointer to the rte_eth_dev structure.
3190  * @param[out] error
3191  *   Pointer to error structure.
3192  *
3193  * @return
3194  *    0 on success, negative value otherwise and rte_errno is set.
3195  */
3196 int
3197 flow_hw_table_update(struct rte_eth_dev *dev,
3198 		     struct rte_flow_error *error)
3199 {
3200 	struct mlx5_priv *priv = dev->data->dev_private;
3201 	struct rte_flow_template_table *tbl;
3202 
3203 	while ((tbl = LIST_FIRST(&priv->flow_hw_tbl_ongo)) != NULL) {
3204 		if (flow_hw_actions_translate(dev, tbl, error))
3205 			return -1;
3206 		LIST_REMOVE(tbl, next);
3207 		LIST_INSERT_HEAD(&priv->flow_hw_tbl, tbl, next);
3208 	}
3209 	return 0;
3210 }
3211 
3212 /**
3213  * Translates group index specified by the user in @p attr to internal
3214  * group index.
3215  *
3216  * Translation is done by incrementing group index, so group n becomes n + 1.
3217  *
3218  * @param[in] dev
3219  *   Pointer to Ethernet device.
3220  * @param[in] cfg
3221  *   Pointer to the template table configuration.
3222  * @param[in] group
3223  *   Currently used group index (table group or jump destination).
3224  * @param[out] table_group
3225  *   Pointer to output group index.
3226  * @param[out] error
3227  *   Pointer to error structure.
3228  *
3229  * @return
3230  *   0 on success. Otherwise, returns negative error code, rte_errno is set
3231  *   and error structure is filled.
3232  */
3233 static int
3234 flow_hw_translate_group(struct rte_eth_dev *dev,
3235 			const struct mlx5_flow_template_table_cfg *cfg,
3236 			uint32_t group,
3237 			uint32_t *table_group,
3238 			struct rte_flow_error *error)
3239 {
3240 	struct mlx5_priv *priv = dev->data->dev_private;
3241 	struct mlx5_sh_config *config = &priv->sh->config;
3242 	const struct rte_flow_attr *flow_attr = &cfg->attr.flow_attr;
3243 
3244 	if (config->dv_esw_en &&
3245 	    priv->fdb_def_rule &&
3246 	    cfg->external &&
3247 	    flow_attr->transfer) {
3248 		if (group > MLX5_HW_MAX_TRANSFER_GROUP)
3249 			return rte_flow_error_set(error, EINVAL,
3250 						  RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
3251 						  NULL,
3252 						  "group index not supported");
3253 		*table_group = group + 1;
3254 	} else if (config->dv_esw_en &&
3255 		   !(config->repr_matching && config->dv_xmeta_en == MLX5_XMETA_MODE_META32_HWS) &&
3256 		   cfg->external &&
3257 		   flow_attr->egress) {
3258 		/*
3259 		 * On E-Switch setups, egress group translation is not done if and only if
3260 		 * representor matching is disabled and legacy metadata mode is selected.
3261 		 * In all other cases, egree group 0 is reserved for representor tagging flows
3262 		 * and metadata copy flows.
3263 		 */
3264 		if (group > MLX5_HW_MAX_EGRESS_GROUP)
3265 			return rte_flow_error_set(error, EINVAL,
3266 						  RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
3267 						  NULL,
3268 						  "group index not supported");
3269 		*table_group = group + 1;
3270 	} else {
3271 		*table_group = group;
3272 	}
3273 	return 0;
3274 }
3275 
3276 /**
3277  * Create flow table.
3278  *
3279  * This function is a wrapper over @ref flow_hw_table_create(), which translates parameters
3280  * provided by user to proper internal values.
3281  *
3282  * @param[in] dev
3283  *   Pointer to Ethernet device.
3284  * @param[in] attr
3285  *   Pointer to the table attributes.
3286  * @param[in] item_templates
3287  *   Item template array to be binded to the table.
3288  * @param[in] nb_item_templates
3289  *   Number of item templates.
3290  * @param[in] action_templates
3291  *   Action template array to be binded to the table.
3292  * @param[in] nb_action_templates
3293  *   Number of action templates.
3294  * @param[out] error
3295  *   Pointer to error structure.
3296  *
3297  * @return
3298  *   Table on success, Otherwise, returns negative error code, rte_errno is set
3299  *   and error structure is filled.
3300  */
3301 static struct rte_flow_template_table *
3302 flow_hw_template_table_create(struct rte_eth_dev *dev,
3303 			      const struct rte_flow_template_table_attr *attr,
3304 			      struct rte_flow_pattern_template *item_templates[],
3305 			      uint8_t nb_item_templates,
3306 			      struct rte_flow_actions_template *action_templates[],
3307 			      uint8_t nb_action_templates,
3308 			      struct rte_flow_error *error)
3309 {
3310 	struct mlx5_flow_template_table_cfg cfg = {
3311 		.attr = *attr,
3312 		.external = true,
3313 	};
3314 	uint32_t group = attr->flow_attr.group;
3315 
3316 	if (flow_hw_translate_group(dev, &cfg, group, &cfg.attr.flow_attr.group, error))
3317 		return NULL;
3318 	return flow_hw_table_create(dev, &cfg, item_templates, nb_item_templates,
3319 				    action_templates, nb_action_templates, error);
3320 }
3321 
3322 /**
3323  * Destroy flow table.
3324  *
3325  * @param[in] dev
3326  *   Pointer to the rte_eth_dev structure.
3327  * @param[in] table
3328  *   Pointer to the table to be destroyed.
3329  * @param[out] error
3330  *   Pointer to error structure.
3331  *
3332  * @return
3333  *   0 on success, a negative errno value otherwise and rte_errno is set.
3334  */
3335 static int
3336 flow_hw_table_destroy(struct rte_eth_dev *dev,
3337 		      struct rte_flow_template_table *table,
3338 		      struct rte_flow_error *error)
3339 {
3340 	struct mlx5_priv *priv = dev->data->dev_private;
3341 	int i;
3342 	uint32_t fidx = 1;
3343 
3344 	/* Build ipool allocated object bitmap. */
3345 	mlx5_ipool_flush_cache(table->flow);
3346 	/* Check if ipool has allocated objects. */
3347 	if (table->refcnt || mlx5_ipool_get_next(table->flow, &fidx)) {
3348 		DRV_LOG(WARNING, "Table %p is still in using.", (void *)table);
3349 		return rte_flow_error_set(error, EBUSY,
3350 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3351 				   NULL,
3352 				   "table in using");
3353 	}
3354 	LIST_REMOVE(table, next);
3355 	for (i = 0; i < table->nb_item_templates; i++)
3356 		__atomic_sub_fetch(&table->its[i]->refcnt,
3357 				   1, __ATOMIC_RELAXED);
3358 	for (i = 0; i < table->nb_action_templates; i++) {
3359 		__flow_hw_action_template_destroy(dev, &table->ats[i].acts);
3360 		__atomic_sub_fetch(&table->ats[i].action_template->refcnt,
3361 				   1, __ATOMIC_RELAXED);
3362 	}
3363 	mlx5dr_matcher_destroy(table->matcher);
3364 	mlx5_hlist_unregister(priv->sh->groups, &table->grp->entry);
3365 	mlx5_ipool_destroy(table->flow);
3366 	mlx5_free(table);
3367 	return 0;
3368 }
3369 
3370 static bool
3371 flow_hw_modify_field_is_used(const struct rte_flow_action_modify_field *action,
3372 			     enum rte_flow_field_id field)
3373 {
3374 	return action->src.field == field || action->dst.field == field;
3375 }
3376 
3377 static int
3378 flow_hw_validate_action_modify_field(const struct rte_flow_action *action,
3379 				     const struct rte_flow_action *mask,
3380 				     struct rte_flow_error *error)
3381 {
3382 	const struct rte_flow_action_modify_field *action_conf =
3383 		action->conf;
3384 	const struct rte_flow_action_modify_field *mask_conf =
3385 		mask->conf;
3386 
3387 	if (action_conf->operation != mask_conf->operation)
3388 		return rte_flow_error_set(error, EINVAL,
3389 				RTE_FLOW_ERROR_TYPE_ACTION, action,
3390 				"modify_field operation mask and template are not equal");
3391 	if (action_conf->dst.field != mask_conf->dst.field)
3392 		return rte_flow_error_set(error, EINVAL,
3393 				RTE_FLOW_ERROR_TYPE_ACTION, action,
3394 				"destination field mask and template are not equal");
3395 	if (action_conf->dst.field == RTE_FLOW_FIELD_POINTER ||
3396 	    action_conf->dst.field == RTE_FLOW_FIELD_VALUE)
3397 		return rte_flow_error_set(error, EINVAL,
3398 				RTE_FLOW_ERROR_TYPE_ACTION, action,
3399 				"immediate value and pointer cannot be used as destination");
3400 	if (mask_conf->dst.level != UINT32_MAX)
3401 		return rte_flow_error_set(error, EINVAL,
3402 			RTE_FLOW_ERROR_TYPE_ACTION, action,
3403 			"destination encapsulation level must be fully masked");
3404 	if (mask_conf->dst.offset != UINT32_MAX)
3405 		return rte_flow_error_set(error, EINVAL,
3406 			RTE_FLOW_ERROR_TYPE_ACTION, action,
3407 			"destination offset level must be fully masked");
3408 	if (action_conf->src.field != mask_conf->src.field)
3409 		return rte_flow_error_set(error, EINVAL,
3410 				RTE_FLOW_ERROR_TYPE_ACTION, action,
3411 				"destination field mask and template are not equal");
3412 	if (action_conf->src.field != RTE_FLOW_FIELD_POINTER &&
3413 	    action_conf->src.field != RTE_FLOW_FIELD_VALUE) {
3414 		if (mask_conf->src.level != UINT32_MAX)
3415 			return rte_flow_error_set(error, EINVAL,
3416 				RTE_FLOW_ERROR_TYPE_ACTION, action,
3417 				"source encapsulation level must be fully masked");
3418 		if (mask_conf->src.offset != UINT32_MAX)
3419 			return rte_flow_error_set(error, EINVAL,
3420 				RTE_FLOW_ERROR_TYPE_ACTION, action,
3421 				"source offset level must be fully masked");
3422 	}
3423 	if (mask_conf->width != UINT32_MAX)
3424 		return rte_flow_error_set(error, EINVAL,
3425 				RTE_FLOW_ERROR_TYPE_ACTION, action,
3426 				"modify_field width field must be fully masked");
3427 	if (flow_hw_modify_field_is_used(action_conf, RTE_FLOW_FIELD_START))
3428 		return rte_flow_error_set(error, EINVAL,
3429 				RTE_FLOW_ERROR_TYPE_ACTION, action,
3430 				"modifying arbitrary place in a packet is not supported");
3431 	if (flow_hw_modify_field_is_used(action_conf, RTE_FLOW_FIELD_VLAN_TYPE))
3432 		return rte_flow_error_set(error, EINVAL,
3433 				RTE_FLOW_ERROR_TYPE_ACTION, action,
3434 				"modifying vlan_type is not supported");
3435 	if (flow_hw_modify_field_is_used(action_conf, RTE_FLOW_FIELD_GENEVE_VNI))
3436 		return rte_flow_error_set(error, EINVAL,
3437 				RTE_FLOW_ERROR_TYPE_ACTION, action,
3438 				"modifying Geneve VNI is not supported");
3439 	return 0;
3440 }
3441 
3442 static int
3443 flow_hw_validate_action_represented_port(struct rte_eth_dev *dev,
3444 					 const struct rte_flow_action *action,
3445 					 const struct rte_flow_action *mask,
3446 					 struct rte_flow_error *error)
3447 {
3448 	const struct rte_flow_action_ethdev *action_conf = action->conf;
3449 	const struct rte_flow_action_ethdev *mask_conf = mask->conf;
3450 	struct mlx5_priv *priv = dev->data->dev_private;
3451 
3452 	if (!priv->sh->config.dv_esw_en)
3453 		return rte_flow_error_set(error, EINVAL,
3454 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3455 					  "cannot use represented_port actions"
3456 					  " without an E-Switch");
3457 	if (mask_conf && mask_conf->port_id) {
3458 		struct mlx5_priv *port_priv;
3459 		struct mlx5_priv *dev_priv;
3460 
3461 		if (!action_conf)
3462 			return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
3463 						  action, "port index was not provided");
3464 		port_priv = mlx5_port_to_eswitch_info(action_conf->port_id, false);
3465 		if (!port_priv)
3466 			return rte_flow_error_set(error, rte_errno,
3467 						  RTE_FLOW_ERROR_TYPE_ACTION,
3468 						  action,
3469 						  "failed to obtain E-Switch"
3470 						  " info for port");
3471 		dev_priv = mlx5_dev_to_eswitch_info(dev);
3472 		if (!dev_priv)
3473 			return rte_flow_error_set(error, rte_errno,
3474 						  RTE_FLOW_ERROR_TYPE_ACTION,
3475 						  action,
3476 						  "failed to obtain E-Switch"
3477 						  " info for transfer proxy");
3478 		if (port_priv->domain_id != dev_priv->domain_id)
3479 			return rte_flow_error_set(error, rte_errno,
3480 						  RTE_FLOW_ERROR_TYPE_ACTION,
3481 						  action,
3482 						  "cannot forward to port from"
3483 						  " a different E-Switch");
3484 	}
3485 	return 0;
3486 }
3487 
3488 /**
3489  * Validate AGE action.
3490  *
3491  * @param[in] dev
3492  *   Pointer to rte_eth_dev structure.
3493  * @param[in] action
3494  *   Pointer to the indirect action.
3495  * @param[in] action_flags
3496  *   Holds the actions detected until now.
3497  * @param[in] fixed_cnt
3498  *   Indicator if this list has a fixed COUNT action.
3499  * @param[out] error
3500  *   Pointer to error structure.
3501  *
3502  * @return
3503  *   0 on success, a negative errno value otherwise and rte_errno is set.
3504  */
3505 static int
3506 flow_hw_validate_action_age(struct rte_eth_dev *dev,
3507 			    const struct rte_flow_action *action,
3508 			    uint64_t action_flags, bool fixed_cnt,
3509 			    struct rte_flow_error *error)
3510 {
3511 	struct mlx5_priv *priv = dev->data->dev_private;
3512 	struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);
3513 
3514 	if (!priv->sh->cdev->config.devx)
3515 		return rte_flow_error_set(error, ENOTSUP,
3516 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3517 					  NULL, "AGE action not supported");
3518 	if (age_info->ages_ipool == NULL)
3519 		return rte_flow_error_set(error, EINVAL,
3520 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3521 					  "aging pool not initialized");
3522 	if ((action_flags & MLX5_FLOW_ACTION_AGE) ||
3523 	    (action_flags & MLX5_FLOW_ACTION_INDIRECT_AGE))
3524 		return rte_flow_error_set(error, EINVAL,
3525 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
3526 					  "duplicate AGE actions set");
3527 	if (fixed_cnt)
3528 		return rte_flow_error_set(error, EINVAL,
3529 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
3530 					  "AGE and fixed COUNT combination is not supported");
3531 	return 0;
3532 }
3533 
3534 /**
3535  * Validate count action.
3536  *
3537  * @param[in] dev
3538  *   Pointer to rte_eth_dev structure.
3539  * @param[in] action
3540  *   Pointer to the indirect action.
3541  * @param[in] mask
3542  *   Pointer to the indirect action mask.
3543  * @param[in] action_flags
3544  *   Holds the actions detected until now.
3545  * @param[out] error
3546  *   Pointer to error structure.
3547  *
3548  * @return
3549  *   0 on success, a negative errno value otherwise and rte_errno is set.
3550  */
3551 static int
3552 flow_hw_validate_action_count(struct rte_eth_dev *dev,
3553 			      const struct rte_flow_action *action,
3554 			      const struct rte_flow_action *mask,
3555 			      uint64_t action_flags,
3556 			      struct rte_flow_error *error)
3557 {
3558 	struct mlx5_priv *priv = dev->data->dev_private;
3559 	const struct rte_flow_action_count *count = mask->conf;
3560 
3561 	if (!priv->sh->cdev->config.devx)
3562 		return rte_flow_error_set(error, ENOTSUP,
3563 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
3564 					  "count action not supported");
3565 	if (!priv->hws_cpool)
3566 		return rte_flow_error_set(error, EINVAL,
3567 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
3568 					  "counters pool not initialized");
3569 	if ((action_flags & MLX5_FLOW_ACTION_COUNT) ||
3570 	    (action_flags & MLX5_FLOW_ACTION_INDIRECT_COUNT))
3571 		return rte_flow_error_set(error, EINVAL,
3572 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
3573 					  "duplicate count actions set");
3574 	if (count && count->id && (action_flags & MLX5_FLOW_ACTION_AGE))
3575 		return rte_flow_error_set(error, EINVAL,
3576 					  RTE_FLOW_ERROR_TYPE_ACTION, mask,
3577 					  "AGE and COUNT action shared by mask combination is not supported");
3578 	return 0;
3579 }
3580 
3581 /**
3582  * Validate meter_mark action.
3583  *
3584  * @param[in] dev
3585  *   Pointer to rte_eth_dev structure.
3586  * @param[in] action
3587  *   Pointer to the indirect action.
3588  * @param[out] error
3589  *   Pointer to error structure.
3590  *
3591  * @return
3592  *   0 on success, a negative errno value otherwise and rte_errno is set.
3593  */
3594 static int
3595 flow_hw_validate_action_meter_mark(struct rte_eth_dev *dev,
3596 			      const struct rte_flow_action *action,
3597 			      struct rte_flow_error *error)
3598 {
3599 	struct mlx5_priv *priv = dev->data->dev_private;
3600 
3601 	RTE_SET_USED(action);
3602 
3603 	if (!priv->sh->cdev->config.devx)
3604 		return rte_flow_error_set(error, ENOTSUP,
3605 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
3606 					  "meter_mark action not supported");
3607 	if (!priv->hws_mpool)
3608 		return rte_flow_error_set(error, EINVAL,
3609 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
3610 					  "meter_mark pool not initialized");
3611 	return 0;
3612 }
3613 
3614 /**
3615  * Validate indirect action.
3616  *
3617  * @param[in] dev
3618  *   Pointer to rte_eth_dev structure.
3619  * @param[in] action
3620  *   Pointer to the indirect action.
3621  * @param[in] mask
3622  *   Pointer to the indirect action mask.
3623  * @param[in, out] action_flags
3624  *   Holds the actions detected until now.
3625  * @param[in, out] fixed_cnt
3626  *   Pointer to indicator if this list has a fixed COUNT action.
3627  * @param[out] error
3628  *   Pointer to error structure.
3629  *
3630  * @return
3631  *   0 on success, a negative errno value otherwise and rte_errno is set.
3632  */
3633 static int
3634 flow_hw_validate_action_indirect(struct rte_eth_dev *dev,
3635 				 const struct rte_flow_action *action,
3636 				 const struct rte_flow_action *mask,
3637 				 uint64_t *action_flags, bool *fixed_cnt,
3638 				 struct rte_flow_error *error)
3639 {
3640 	uint32_t type;
3641 	int ret;
3642 
3643 	if (!mask)
3644 		return rte_flow_error_set(error, EINVAL,
3645 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3646 					  "Unable to determine indirect action type without a mask specified");
3647 	type = mask->type;
3648 	switch (type) {
3649 	case RTE_FLOW_ACTION_TYPE_METER_MARK:
3650 		ret = flow_hw_validate_action_meter_mark(dev, mask, error);
3651 		if (ret < 0)
3652 			return ret;
3653 		*action_flags |= MLX5_FLOW_ACTION_METER;
3654 		break;
3655 	case RTE_FLOW_ACTION_TYPE_RSS:
3656 		/* TODO: Validation logic (same as flow_hw_actions_validate) */
3657 		*action_flags |= MLX5_FLOW_ACTION_RSS;
3658 		break;
3659 	case RTE_FLOW_ACTION_TYPE_CONNTRACK:
3660 		/* TODO: Validation logic (same as flow_hw_actions_validate) */
3661 		*action_flags |= MLX5_FLOW_ACTION_CT;
3662 		break;
3663 	case RTE_FLOW_ACTION_TYPE_COUNT:
3664 		if (action->conf && mask->conf) {
3665 			if ((*action_flags & MLX5_FLOW_ACTION_AGE) ||
3666 			    (*action_flags & MLX5_FLOW_ACTION_INDIRECT_AGE))
3667 				/*
3668 				 * AGE cannot use indirect counter which is
3669 				 * shared with enother flow rules.
3670 				 */
3671 				return rte_flow_error_set(error, EINVAL,
3672 						  RTE_FLOW_ERROR_TYPE_ACTION,
3673 						  NULL,
3674 						  "AGE and fixed COUNT combination is not supported");
3675 			*fixed_cnt = true;
3676 		}
3677 		ret = flow_hw_validate_action_count(dev, action, mask,
3678 						    *action_flags, error);
3679 		if (ret < 0)
3680 			return ret;
3681 		*action_flags |= MLX5_FLOW_ACTION_INDIRECT_COUNT;
3682 		break;
3683 	case RTE_FLOW_ACTION_TYPE_AGE:
3684 		ret = flow_hw_validate_action_age(dev, action, *action_flags,
3685 						  *fixed_cnt, error);
3686 		if (ret < 0)
3687 			return ret;
3688 		*action_flags |= MLX5_FLOW_ACTION_INDIRECT_AGE;
3689 		break;
3690 	default:
3691 		DRV_LOG(WARNING, "Unsupported shared action type: %d", type);
3692 		return rte_flow_error_set(error, ENOTSUP,
3693 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, mask,
3694 					  "Unsupported indirect action type");
3695 	}
3696 	return 0;
3697 }
3698 
3699 /**
3700  * Validate raw_encap action.
3701  *
3702  * @param[in] dev
3703  *   Pointer to rte_eth_dev structure.
3704  * @param[in] action
3705  *   Pointer to the indirect action.
3706  * @param[out] error
3707  *   Pointer to error structure.
3708  *
3709  * @return
3710  *   0 on success, a negative errno value otherwise and rte_errno is set.
3711  */
3712 static int
3713 flow_hw_validate_action_raw_encap(struct rte_eth_dev *dev __rte_unused,
3714 				  const struct rte_flow_action *action,
3715 				  struct rte_flow_error *error)
3716 {
3717 	const struct rte_flow_action_raw_encap *raw_encap_data = action->conf;
3718 
3719 	if (!raw_encap_data || !raw_encap_data->size || !raw_encap_data->data)
3720 		return rte_flow_error_set(error, EINVAL,
3721 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
3722 					  "invalid raw_encap_data");
3723 	return 0;
3724 }
3725 
3726 static inline uint16_t
3727 flow_hw_template_expand_modify_field(const struct rte_flow_action actions[],
3728 				     const struct rte_flow_action masks[],
3729 				     const struct rte_flow_action *mf_action,
3730 				     const struct rte_flow_action *mf_mask,
3731 				     struct rte_flow_action *new_actions,
3732 				     struct rte_flow_action *new_masks,
3733 				     uint64_t flags, uint32_t act_num)
3734 {
3735 	uint32_t i, tail;
3736 
3737 	MLX5_ASSERT(actions && masks);
3738 	MLX5_ASSERT(new_actions && new_masks);
3739 	MLX5_ASSERT(mf_action && mf_mask);
3740 	if (flags & MLX5_FLOW_ACTION_MODIFY_FIELD) {
3741 		/*
3742 		 * Application action template already has Modify Field.
3743 		 * It's location will be used in DR.
3744 		 * Expanded MF action can be added before the END.
3745 		 */
3746 		i = act_num - 1;
3747 		goto insert;
3748 	}
3749 	/**
3750 	 * Locate the first action positioned BEFORE the new MF.
3751 	 *
3752 	 * Search for a place to insert modify header
3753 	 * from the END action backwards:
3754 	 * 1. END is always present in actions array
3755 	 * 2. END location is always at action[act_num - 1]
3756 	 * 3. END always positioned AFTER modify field location
3757 	 *
3758 	 * Relative actions order is the same for RX, TX and FDB.
3759 	 *
3760 	 * Current actions order (draft-3)
3761 	 * @see action_order_arr[]
3762 	 */
3763 	for (i = act_num - 2; (int)i >= 0; i--) {
3764 		enum rte_flow_action_type type = actions[i].type;
3765 
3766 		if (type == RTE_FLOW_ACTION_TYPE_INDIRECT)
3767 			type = masks[i].type;
3768 		switch (type) {
3769 		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3770 		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
3771 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
3772 		case RTE_FLOW_ACTION_TYPE_DROP:
3773 		case RTE_FLOW_ACTION_TYPE_JUMP:
3774 		case RTE_FLOW_ACTION_TYPE_QUEUE:
3775 		case RTE_FLOW_ACTION_TYPE_RSS:
3776 		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
3777 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
3778 		case RTE_FLOW_ACTION_TYPE_VOID:
3779 		case RTE_FLOW_ACTION_TYPE_END:
3780 			break;
3781 		default:
3782 			i++; /* new MF inserted AFTER actions[i] */
3783 			goto insert;
3784 			break;
3785 		}
3786 	}
3787 	i = 0;
3788 insert:
3789 	tail = act_num - i; /* num action to move */
3790 	memcpy(new_actions, actions, sizeof(actions[0]) * i);
3791 	new_actions[i] = *mf_action;
3792 	memcpy(new_actions + i + 1, actions + i, sizeof(actions[0]) * tail);
3793 	memcpy(new_masks, masks, sizeof(masks[0]) * i);
3794 	new_masks[i] = *mf_mask;
3795 	memcpy(new_masks + i + 1, masks + i, sizeof(masks[0]) * tail);
3796 	return i;
3797 }
3798 
3799 static int
3800 flow_hw_validate_action_push_vlan(struct rte_eth_dev *dev,
3801 				  const
3802 				  struct rte_flow_actions_template_attr *attr,
3803 				  const struct rte_flow_action *action,
3804 				  const struct rte_flow_action *mask,
3805 				  struct rte_flow_error *error)
3806 {
3807 #define X_FIELD(ptr, t, f) (((ptr)->conf) && ((t *)((ptr)->conf))->f)
3808 
3809 	const bool masked_push =
3810 		X_FIELD(mask + MLX5_HW_VLAN_PUSH_TYPE_IDX,
3811 			const struct rte_flow_action_of_push_vlan, ethertype);
3812 	bool masked_param;
3813 
3814 	/*
3815 	 * Mandatory actions order:
3816 	 * OF_PUSH_VLAN / OF_SET_VLAN_VID [ / OF_SET_VLAN_PCP ]
3817 	 */
3818 	RTE_SET_USED(dev);
3819 	RTE_SET_USED(attr);
3820 	/* Check that mark matches OF_PUSH_VLAN */
3821 	if (mask[MLX5_HW_VLAN_PUSH_TYPE_IDX].type !=
3822 	    RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN)
3823 		return rte_flow_error_set(error, EINVAL,
3824 					  RTE_FLOW_ERROR_TYPE_ACTION,
3825 					  action, "OF_PUSH_VLAN: mask does not match");
3826 	/* Check that the second template and mask items are SET_VLAN_VID */
3827 	if (action[MLX5_HW_VLAN_PUSH_VID_IDX].type !=
3828 	    RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID ||
3829 	    mask[MLX5_HW_VLAN_PUSH_VID_IDX].type !=
3830 	    RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)
3831 		return rte_flow_error_set(error, EINVAL,
3832 					  RTE_FLOW_ERROR_TYPE_ACTION,
3833 					  action, "OF_PUSH_VLAN: invalid actions order");
3834 	masked_param = X_FIELD(mask + MLX5_HW_VLAN_PUSH_VID_IDX,
3835 			       const struct rte_flow_action_of_set_vlan_vid,
3836 			       vlan_vid);
3837 	/*
3838 	 * PMD requires OF_SET_VLAN_VID mask to must match OF_PUSH_VLAN
3839 	 */
3840 	if (masked_push ^ masked_param)
3841 		return rte_flow_error_set(error, EINVAL,
3842 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
3843 					  "OF_SET_VLAN_VID: mask does not match OF_PUSH_VLAN");
3844 	if (is_of_vlan_pcp_present(action)) {
3845 		if (mask[MLX5_HW_VLAN_PUSH_PCP_IDX].type !=
3846 		     RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP)
3847 			return rte_flow_error_set(error, EINVAL,
3848 						  RTE_FLOW_ERROR_TYPE_ACTION,
3849 						  action, "OF_SET_VLAN_PCP: missing mask configuration");
3850 		masked_param = X_FIELD(mask + MLX5_HW_VLAN_PUSH_PCP_IDX,
3851 				       const struct
3852 				       rte_flow_action_of_set_vlan_pcp,
3853 				       vlan_pcp);
3854 		/*
3855 		 * PMD requires OF_SET_VLAN_PCP mask to must match OF_PUSH_VLAN
3856 		 */
3857 		if (masked_push ^ masked_param)
3858 			return rte_flow_error_set(error, EINVAL,
3859 						  RTE_FLOW_ERROR_TYPE_ACTION, action,
3860 						  "OF_SET_VLAN_PCP: mask does not match OF_PUSH_VLAN");
3861 	}
3862 	return 0;
3863 #undef X_FIELD
3864 }
3865 
3866 static int
3867 mlx5_flow_hw_actions_validate(struct rte_eth_dev *dev,
3868 			      const struct rte_flow_actions_template_attr *attr,
3869 			      const struct rte_flow_action actions[],
3870 			      const struct rte_flow_action masks[],
3871 			      uint64_t *act_flags,
3872 			      struct rte_flow_error *error)
3873 {
3874 	struct mlx5_priv *priv = dev->data->dev_private;
3875 	const struct rte_flow_action_count *count_mask = NULL;
3876 	bool fixed_cnt = false;
3877 	uint64_t action_flags = 0;
3878 	uint16_t i;
3879 	bool actions_end = false;
3880 	int ret;
3881 
3882 	/* FDB actions are only valid to proxy port. */
3883 	if (attr->transfer && (!priv->sh->config.dv_esw_en || !priv->master))
3884 		return rte_flow_error_set(error, EINVAL,
3885 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3886 					  NULL,
3887 					  "transfer actions are only valid to proxy port");
3888 	for (i = 0; !actions_end; ++i) {
3889 		const struct rte_flow_action *action = &actions[i];
3890 		const struct rte_flow_action *mask = &masks[i];
3891 
3892 		MLX5_ASSERT(i < MLX5_HW_MAX_ACTS);
3893 		if (action->type != RTE_FLOW_ACTION_TYPE_INDIRECT &&
3894 		    action->type != mask->type)
3895 			return rte_flow_error_set(error, ENOTSUP,
3896 						  RTE_FLOW_ERROR_TYPE_ACTION,
3897 						  action,
3898 						  "mask type does not match action type");
3899 		switch (action->type) {
3900 		case RTE_FLOW_ACTION_TYPE_VOID:
3901 			break;
3902 		case RTE_FLOW_ACTION_TYPE_INDIRECT:
3903 			ret = flow_hw_validate_action_indirect(dev, action,
3904 							       mask,
3905 							       &action_flags,
3906 							       &fixed_cnt,
3907 							       error);
3908 			if (ret < 0)
3909 				return ret;
3910 			break;
3911 		case RTE_FLOW_ACTION_TYPE_MARK:
3912 			/* TODO: Validation logic */
3913 			action_flags |= MLX5_FLOW_ACTION_MARK;
3914 			break;
3915 		case RTE_FLOW_ACTION_TYPE_DROP:
3916 			/* TODO: Validation logic */
3917 			action_flags |= MLX5_FLOW_ACTION_DROP;
3918 			break;
3919 		case RTE_FLOW_ACTION_TYPE_JUMP:
3920 			/* TODO: Validation logic */
3921 			action_flags |= MLX5_FLOW_ACTION_JUMP;
3922 			break;
3923 		case RTE_FLOW_ACTION_TYPE_QUEUE:
3924 			/* TODO: Validation logic */
3925 			action_flags |= MLX5_FLOW_ACTION_QUEUE;
3926 			break;
3927 		case RTE_FLOW_ACTION_TYPE_RSS:
3928 			/* TODO: Validation logic */
3929 			action_flags |= MLX5_FLOW_ACTION_RSS;
3930 			break;
3931 		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3932 			/* TODO: Validation logic */
3933 			action_flags |= MLX5_FLOW_ACTION_ENCAP;
3934 			break;
3935 		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
3936 			/* TODO: Validation logic */
3937 			action_flags |= MLX5_FLOW_ACTION_ENCAP;
3938 			break;
3939 		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
3940 			/* TODO: Validation logic */
3941 			action_flags |= MLX5_FLOW_ACTION_DECAP;
3942 			break;
3943 		case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
3944 			/* TODO: Validation logic */
3945 			action_flags |= MLX5_FLOW_ACTION_DECAP;
3946 			break;
3947 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
3948 			ret = flow_hw_validate_action_raw_encap(dev, action, error);
3949 			if (ret < 0)
3950 				return ret;
3951 			action_flags |= MLX5_FLOW_ACTION_ENCAP;
3952 			break;
3953 		case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
3954 			/* TODO: Validation logic */
3955 			action_flags |= MLX5_FLOW_ACTION_DECAP;
3956 			break;
3957 		case RTE_FLOW_ACTION_TYPE_METER:
3958 			/* TODO: Validation logic */
3959 			action_flags |= MLX5_FLOW_ACTION_METER;
3960 			break;
3961 		case RTE_FLOW_ACTION_TYPE_METER_MARK:
3962 			ret = flow_hw_validate_action_meter_mark(dev, action,
3963 								 error);
3964 			if (ret < 0)
3965 				return ret;
3966 			action_flags |= MLX5_FLOW_ACTION_METER;
3967 			break;
3968 		case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
3969 			ret = flow_hw_validate_action_modify_field(action,
3970 									mask,
3971 									error);
3972 			if (ret < 0)
3973 				return ret;
3974 			action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
3975 			break;
3976 		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
3977 			ret = flow_hw_validate_action_represented_port
3978 					(dev, action, mask, error);
3979 			if (ret < 0)
3980 				return ret;
3981 			action_flags |= MLX5_FLOW_ACTION_PORT_ID;
3982 			break;
3983 		case RTE_FLOW_ACTION_TYPE_AGE:
3984 			if (count_mask && count_mask->id)
3985 				fixed_cnt = true;
3986 			ret = flow_hw_validate_action_age(dev, action,
3987 							  action_flags,
3988 							  fixed_cnt, error);
3989 			if (ret < 0)
3990 				return ret;
3991 			action_flags |= MLX5_FLOW_ACTION_AGE;
3992 			break;
3993 		case RTE_FLOW_ACTION_TYPE_COUNT:
3994 			ret = flow_hw_validate_action_count(dev, action, mask,
3995 							    action_flags,
3996 							    error);
3997 			if (ret < 0)
3998 				return ret;
3999 			count_mask = mask->conf;
4000 			action_flags |= MLX5_FLOW_ACTION_COUNT;
4001 			break;
4002 		case RTE_FLOW_ACTION_TYPE_CONNTRACK:
4003 			/* TODO: Validation logic */
4004 			action_flags |= MLX5_FLOW_ACTION_CT;
4005 			break;
4006 		case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
4007 			action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
4008 			break;
4009 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
4010 			action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
4011 			break;
4012 		case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
4013 			ret = flow_hw_validate_action_push_vlan
4014 					(dev, attr, action, mask, error);
4015 			if (ret != 0)
4016 				return ret;
4017 			i += is_of_vlan_pcp_present(action) ?
4018 				MLX5_HW_VLAN_PUSH_PCP_IDX :
4019 				MLX5_HW_VLAN_PUSH_VID_IDX;
4020 			action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
4021 			break;
4022 		case RTE_FLOW_ACTION_TYPE_END:
4023 			actions_end = true;
4024 			break;
4025 		default:
4026 			return rte_flow_error_set(error, ENOTSUP,
4027 						  RTE_FLOW_ERROR_TYPE_ACTION,
4028 						  action,
4029 						  "action not supported in template API");
4030 		}
4031 	}
4032 	if (act_flags != NULL)
4033 		*act_flags = action_flags;
4034 	return 0;
4035 }
4036 
4037 static int
4038 flow_hw_actions_validate(struct rte_eth_dev *dev,
4039 			 const struct rte_flow_actions_template_attr *attr,
4040 			 const struct rte_flow_action actions[],
4041 			 const struct rte_flow_action masks[],
4042 			 struct rte_flow_error *error)
4043 {
4044 	return mlx5_flow_hw_actions_validate(dev, attr, actions, masks, NULL,
4045 					     error);
4046 }
4047 
4048 
4049 static enum mlx5dr_action_type mlx5_hw_dr_action_types[] = {
4050 	[RTE_FLOW_ACTION_TYPE_MARK] = MLX5DR_ACTION_TYP_TAG,
4051 	[RTE_FLOW_ACTION_TYPE_DROP] = MLX5DR_ACTION_TYP_DROP,
4052 	[RTE_FLOW_ACTION_TYPE_JUMP] = MLX5DR_ACTION_TYP_FT,
4053 	[RTE_FLOW_ACTION_TYPE_QUEUE] = MLX5DR_ACTION_TYP_TIR,
4054 	[RTE_FLOW_ACTION_TYPE_RSS] = MLX5DR_ACTION_TYP_TIR,
4055 	[RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP] = MLX5DR_ACTION_TYP_L2_TO_TNL_L2,
4056 	[RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP] = MLX5DR_ACTION_TYP_L2_TO_TNL_L2,
4057 	[RTE_FLOW_ACTION_TYPE_VXLAN_DECAP] = MLX5DR_ACTION_TYP_TNL_L2_TO_L2,
4058 	[RTE_FLOW_ACTION_TYPE_NVGRE_DECAP] = MLX5DR_ACTION_TYP_TNL_L2_TO_L2,
4059 	[RTE_FLOW_ACTION_TYPE_MODIFY_FIELD] = MLX5DR_ACTION_TYP_MODIFY_HDR,
4060 	[RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT] = MLX5DR_ACTION_TYP_VPORT,
4061 	[RTE_FLOW_ACTION_TYPE_CONNTRACK] = MLX5DR_ACTION_TYP_ASO_CT,
4062 	[RTE_FLOW_ACTION_TYPE_OF_POP_VLAN] = MLX5DR_ACTION_TYP_POP_VLAN,
4063 	[RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN] = MLX5DR_ACTION_TYP_PUSH_VLAN,
4064 };
4065 
4066 static int
4067 flow_hw_dr_actions_template_handle_shared(const struct rte_flow_action *mask,
4068 					  unsigned int action_src,
4069 					  enum mlx5dr_action_type *action_types,
4070 					  uint16_t *curr_off, uint16_t *cnt_off,
4071 					  struct rte_flow_actions_template *at)
4072 {
4073 	uint32_t type;
4074 
4075 	if (!mask) {
4076 		DRV_LOG(WARNING, "Unable to determine indirect action type "
4077 			"without a mask specified");
4078 		return -EINVAL;
4079 	}
4080 	type = mask->type;
4081 	switch (type) {
4082 	case RTE_FLOW_ACTION_TYPE_RSS:
4083 		at->actions_off[action_src] = *curr_off;
4084 		action_types[*curr_off] = MLX5DR_ACTION_TYP_TIR;
4085 		*curr_off = *curr_off + 1;
4086 		break;
4087 	case RTE_FLOW_ACTION_TYPE_AGE:
4088 	case RTE_FLOW_ACTION_TYPE_COUNT:
4089 		/*
4090 		 * Both AGE and COUNT action need counter, the first one fills
4091 		 * the action_types array, and the second only saves the offset.
4092 		 */
4093 		if (*cnt_off == UINT16_MAX) {
4094 			*cnt_off = *curr_off;
4095 			action_types[*cnt_off] = MLX5DR_ACTION_TYP_CTR;
4096 			*curr_off = *curr_off + 1;
4097 		}
4098 		at->actions_off[action_src] = *cnt_off;
4099 		break;
4100 	case RTE_FLOW_ACTION_TYPE_CONNTRACK:
4101 		at->actions_off[action_src] = *curr_off;
4102 		action_types[*curr_off] = MLX5DR_ACTION_TYP_ASO_CT;
4103 		*curr_off = *curr_off + 1;
4104 		break;
4105 	case RTE_FLOW_ACTION_TYPE_METER_MARK:
4106 		at->actions_off[action_src] = *curr_off;
4107 		action_types[*curr_off] = MLX5DR_ACTION_TYP_ASO_METER;
4108 		*curr_off = *curr_off + 1;
4109 		break;
4110 	default:
4111 		DRV_LOG(WARNING, "Unsupported shared action type: %d", type);
4112 		return -EINVAL;
4113 	}
4114 	return 0;
4115 }
4116 
4117 /**
4118  * Create DR action template based on a provided sequence of flow actions.
4119  *
4120  * @param[in] at
4121  *   Pointer to flow actions template to be updated.
4122  *
4123  * @return
4124  *   DR action template pointer on success and action offsets in @p at are updated.
4125  *   NULL otherwise.
4126  */
4127 static struct mlx5dr_action_template *
4128 flow_hw_dr_actions_template_create(struct rte_flow_actions_template *at)
4129 {
4130 	struct mlx5dr_action_template *dr_template;
4131 	enum mlx5dr_action_type action_types[MLX5_HW_MAX_ACTS] = { MLX5DR_ACTION_TYP_LAST };
4132 	unsigned int i;
4133 	uint16_t curr_off;
4134 	enum mlx5dr_action_type reformat_act_type = MLX5DR_ACTION_TYP_TNL_L2_TO_L2;
4135 	uint16_t reformat_off = UINT16_MAX;
4136 	uint16_t mhdr_off = UINT16_MAX;
4137 	uint16_t cnt_off = UINT16_MAX;
4138 	int ret;
4139 	for (i = 0, curr_off = 0; at->actions[i].type != RTE_FLOW_ACTION_TYPE_END; ++i) {
4140 		const struct rte_flow_action_raw_encap *raw_encap_data;
4141 		size_t data_size;
4142 		enum mlx5dr_action_type type;
4143 
4144 		if (curr_off >= MLX5_HW_MAX_ACTS)
4145 			goto err_actions_num;
4146 		switch (at->actions[i].type) {
4147 		case RTE_FLOW_ACTION_TYPE_VOID:
4148 			break;
4149 		case RTE_FLOW_ACTION_TYPE_INDIRECT:
4150 			ret = flow_hw_dr_actions_template_handle_shared
4151 								 (&at->masks[i],
4152 								  i,
4153 								  action_types,
4154 								  &curr_off,
4155 								  &cnt_off, at);
4156 			if (ret)
4157 				return NULL;
4158 			break;
4159 		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
4160 		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
4161 		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
4162 		case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
4163 			MLX5_ASSERT(reformat_off == UINT16_MAX);
4164 			reformat_off = curr_off++;
4165 			reformat_act_type = mlx5_hw_dr_action_types[at->actions[i].type];
4166 			break;
4167 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
4168 			raw_encap_data = at->actions[i].conf;
4169 			data_size = raw_encap_data->size;
4170 			if (reformat_off != UINT16_MAX) {
4171 				reformat_act_type = data_size < MLX5_ENCAPSULATION_DECISION_SIZE ?
4172 					MLX5DR_ACTION_TYP_TNL_L3_TO_L2 :
4173 					MLX5DR_ACTION_TYP_L2_TO_TNL_L3;
4174 			} else {
4175 				reformat_off = curr_off++;
4176 				reformat_act_type = MLX5DR_ACTION_TYP_L2_TO_TNL_L2;
4177 			}
4178 			break;
4179 		case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
4180 			reformat_off = curr_off++;
4181 			reformat_act_type = MLX5DR_ACTION_TYP_TNL_L2_TO_L2;
4182 			break;
4183 		case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
4184 			if (mhdr_off == UINT16_MAX) {
4185 				mhdr_off = curr_off++;
4186 				type = mlx5_hw_dr_action_types[at->actions[i].type];
4187 				action_types[mhdr_off] = type;
4188 			}
4189 			break;
4190 		case RTE_FLOW_ACTION_TYPE_METER:
4191 			at->actions_off[i] = curr_off;
4192 			action_types[curr_off++] = MLX5DR_ACTION_TYP_ASO_METER;
4193 			if (curr_off >= MLX5_HW_MAX_ACTS)
4194 				goto err_actions_num;
4195 			action_types[curr_off++] = MLX5DR_ACTION_TYP_FT;
4196 			break;
4197 		case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
4198 			type = mlx5_hw_dr_action_types[at->actions[i].type];
4199 			at->actions_off[i] = curr_off;
4200 			action_types[curr_off++] = type;
4201 			i += is_of_vlan_pcp_present(at->actions + i) ?
4202 				MLX5_HW_VLAN_PUSH_PCP_IDX :
4203 				MLX5_HW_VLAN_PUSH_VID_IDX;
4204 			break;
4205 		case RTE_FLOW_ACTION_TYPE_METER_MARK:
4206 			at->actions_off[i] = curr_off;
4207 			action_types[curr_off++] = MLX5DR_ACTION_TYP_ASO_METER;
4208 			if (curr_off >= MLX5_HW_MAX_ACTS)
4209 				goto err_actions_num;
4210 			break;
4211 		case RTE_FLOW_ACTION_TYPE_AGE:
4212 		case RTE_FLOW_ACTION_TYPE_COUNT:
4213 			/*
4214 			 * Both AGE and COUNT action need counter, the first
4215 			 * one fills the action_types array, and the second only
4216 			 * saves the offset.
4217 			 */
4218 			if (cnt_off == UINT16_MAX) {
4219 				cnt_off = curr_off++;
4220 				action_types[cnt_off] = MLX5DR_ACTION_TYP_CTR;
4221 			}
4222 			at->actions_off[i] = cnt_off;
4223 			break;
4224 		default:
4225 			type = mlx5_hw_dr_action_types[at->actions[i].type];
4226 			at->actions_off[i] = curr_off;
4227 			action_types[curr_off++] = type;
4228 			break;
4229 		}
4230 	}
4231 	if (curr_off >= MLX5_HW_MAX_ACTS)
4232 		goto err_actions_num;
4233 	if (mhdr_off != UINT16_MAX)
4234 		at->mhdr_off = mhdr_off;
4235 	if (reformat_off != UINT16_MAX) {
4236 		at->reformat_off = reformat_off;
4237 		action_types[reformat_off] = reformat_act_type;
4238 	}
4239 	dr_template = mlx5dr_action_template_create(action_types);
4240 	if (dr_template)
4241 		at->dr_actions_num = curr_off;
4242 	else
4243 		DRV_LOG(ERR, "Failed to create DR action template: %d", rte_errno);
4244 	return dr_template;
4245 err_actions_num:
4246 	DRV_LOG(ERR, "Number of HW actions (%u) exceeded maximum (%u) allowed in template",
4247 		curr_off, MLX5_HW_MAX_ACTS);
4248 	return NULL;
4249 }
4250 
4251 static void
4252 flow_hw_set_vlan_vid(struct rte_eth_dev *dev,
4253 		     struct rte_flow_action *ra,
4254 		     struct rte_flow_action *rm,
4255 		     struct rte_flow_action_modify_field *spec,
4256 		     struct rte_flow_action_modify_field *mask,
4257 		     int set_vlan_vid_ix)
4258 {
4259 	struct rte_flow_error error;
4260 	const bool masked = rm[set_vlan_vid_ix].conf &&
4261 		(((const struct rte_flow_action_of_set_vlan_vid *)
4262 			rm[set_vlan_vid_ix].conf)->vlan_vid != 0);
4263 	const struct rte_flow_action_of_set_vlan_vid *conf =
4264 		ra[set_vlan_vid_ix].conf;
4265 	rte_be16_t vid = masked ? conf->vlan_vid : 0;
4266 	int width = mlx5_flow_item_field_width(dev, RTE_FLOW_FIELD_VLAN_ID, 0,
4267 					       NULL, &error);
4268 	*spec = (typeof(*spec)) {
4269 		.operation = RTE_FLOW_MODIFY_SET,
4270 		.dst = {
4271 			.field = RTE_FLOW_FIELD_VLAN_ID,
4272 			.level = 0, .offset = 0,
4273 		},
4274 		.src = {
4275 			.field = RTE_FLOW_FIELD_VALUE,
4276 			.level = vid,
4277 			.offset = 0,
4278 		},
4279 		.width = width,
4280 	};
4281 	*mask = (typeof(*mask)) {
4282 		.operation = RTE_FLOW_MODIFY_SET,
4283 		.dst = {
4284 			.field = RTE_FLOW_FIELD_VLAN_ID,
4285 			.level = 0xffffffff, .offset = 0xffffffff,
4286 		},
4287 		.src = {
4288 			.field = RTE_FLOW_FIELD_VALUE,
4289 			.level = masked ? (1U << width) - 1 : 0,
4290 			.offset = 0,
4291 		},
4292 		.width = 0xffffffff,
4293 	};
4294 	ra[set_vlan_vid_ix].type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD;
4295 	ra[set_vlan_vid_ix].conf = spec;
4296 	rm[set_vlan_vid_ix].type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD;
4297 	rm[set_vlan_vid_ix].conf = mask;
4298 }
4299 
4300 static __rte_always_inline int
4301 flow_hw_set_vlan_vid_construct(struct rte_eth_dev *dev,
4302 			       struct mlx5_hw_q_job *job,
4303 			       struct mlx5_action_construct_data *act_data,
4304 			       const struct mlx5_hw_actions *hw_acts,
4305 			       const struct rte_flow_action *action)
4306 {
4307 	struct rte_flow_error error;
4308 	rte_be16_t vid = ((const struct rte_flow_action_of_set_vlan_vid *)
4309 			   action->conf)->vlan_vid;
4310 	int width = mlx5_flow_item_field_width(dev, RTE_FLOW_FIELD_VLAN_ID, 0,
4311 					       NULL, &error);
4312 	struct rte_flow_action_modify_field conf = {
4313 		.operation = RTE_FLOW_MODIFY_SET,
4314 		.dst = {
4315 			.field = RTE_FLOW_FIELD_VLAN_ID,
4316 			.level = 0, .offset = 0,
4317 		},
4318 		.src = {
4319 			.field = RTE_FLOW_FIELD_VALUE,
4320 			.level = vid,
4321 			.offset = 0,
4322 		},
4323 		.width = width,
4324 	};
4325 	struct rte_flow_action modify_action = {
4326 		.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
4327 		.conf = &conf
4328 	};
4329 
4330 	return flow_hw_modify_field_construct(job, act_data, hw_acts,
4331 					      &modify_action);
4332 }
4333 
4334 /**
4335  * Create flow action template.
4336  *
4337  * @param[in] dev
4338  *   Pointer to the rte_eth_dev structure.
4339  * @param[in] attr
4340  *   Pointer to the action template attributes.
4341  * @param[in] actions
4342  *   Associated actions (list terminated by the END action).
4343  * @param[in] masks
4344  *   List of actions that marks which of the action's member is constant.
4345  * @param[out] error
4346  *   Pointer to error structure.
4347  *
4348  * @return
4349  *   Action template pointer on success, NULL otherwise and rte_errno is set.
4350  */
4351 static struct rte_flow_actions_template *
4352 flow_hw_actions_template_create(struct rte_eth_dev *dev,
4353 			const struct rte_flow_actions_template_attr *attr,
4354 			const struct rte_flow_action actions[],
4355 			const struct rte_flow_action masks[],
4356 			struct rte_flow_error *error)
4357 {
4358 	struct mlx5_priv *priv = dev->data->dev_private;
4359 	int len, act_len, mask_len;
4360 	unsigned int act_num;
4361 	unsigned int i;
4362 	struct rte_flow_actions_template *at = NULL;
4363 	uint16_t pos = UINT16_MAX;
4364 	uint64_t action_flags = 0;
4365 	struct rte_flow_action tmp_action[MLX5_HW_MAX_ACTS];
4366 	struct rte_flow_action tmp_mask[MLX5_HW_MAX_ACTS];
4367 	struct rte_flow_action *ra = (void *)(uintptr_t)actions;
4368 	struct rte_flow_action *rm = (void *)(uintptr_t)masks;
4369 	int set_vlan_vid_ix = -1;
4370 	struct rte_flow_action_modify_field set_vlan_vid_spec = {0, };
4371 	struct rte_flow_action_modify_field set_vlan_vid_mask = {0, };
4372 	const struct rte_flow_action_modify_field rx_mreg = {
4373 		.operation = RTE_FLOW_MODIFY_SET,
4374 		.dst = {
4375 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
4376 			.level = REG_B,
4377 		},
4378 		.src = {
4379 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
4380 			.level = REG_C_1,
4381 		},
4382 		.width = 32,
4383 	};
4384 	const struct rte_flow_action_modify_field rx_mreg_mask = {
4385 		.operation = RTE_FLOW_MODIFY_SET,
4386 		.dst = {
4387 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
4388 			.level = UINT32_MAX,
4389 			.offset = UINT32_MAX,
4390 		},
4391 		.src = {
4392 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
4393 			.level = UINT32_MAX,
4394 			.offset = UINT32_MAX,
4395 		},
4396 		.width = UINT32_MAX,
4397 	};
4398 	const struct rte_flow_action rx_cpy = {
4399 		.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
4400 		.conf = &rx_mreg,
4401 	};
4402 	const struct rte_flow_action rx_cpy_mask = {
4403 		.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
4404 		.conf = &rx_mreg_mask,
4405 	};
4406 
4407 	if (mlx5_flow_hw_actions_validate(dev, attr, actions, masks,
4408 					  &action_flags, error))
4409 		return NULL;
4410 	for (i = 0; ra[i].type != RTE_FLOW_ACTION_TYPE_END; ++i) {
4411 		switch (ra[i].type) {
4412 		/* OF_PUSH_VLAN *MUST* come before OF_SET_VLAN_VID */
4413 		case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
4414 			i += is_of_vlan_pcp_present(ra + i) ?
4415 				MLX5_HW_VLAN_PUSH_PCP_IDX :
4416 				MLX5_HW_VLAN_PUSH_VID_IDX;
4417 			break;
4418 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
4419 			set_vlan_vid_ix = i;
4420 			break;
4421 		default:
4422 			break;
4423 		}
4424 	}
4425 	/*
4426 	 * Count flow actions to allocate required space for storing DR offsets and to check
4427 	 * if temporary buffer would not be overrun.
4428 	 */
4429 	act_num = i + 1;
4430 	if (act_num >= MLX5_HW_MAX_ACTS) {
4431 		rte_flow_error_set(error, EINVAL,
4432 				   RTE_FLOW_ERROR_TYPE_ACTION, NULL, "Too many actions");
4433 		return NULL;
4434 	}
4435 	if (priv->sh->config.dv_xmeta_en == MLX5_XMETA_MODE_META32_HWS &&
4436 	    priv->sh->config.dv_esw_en &&
4437 	    (action_flags & (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS))) {
4438 		/* Insert META copy */
4439 		if (act_num + 1 > MLX5_HW_MAX_ACTS) {
4440 			rte_flow_error_set(error, E2BIG,
4441 					   RTE_FLOW_ERROR_TYPE_ACTION,
4442 					   NULL, "cannot expand: too many actions");
4443 			return NULL;
4444 		}
4445 		/* Application should make sure only one Q/RSS exist in one rule. */
4446 		pos = flow_hw_template_expand_modify_field(actions, masks,
4447 							   &rx_cpy,
4448 							   &rx_cpy_mask,
4449 							   tmp_action, tmp_mask,
4450 							   action_flags,
4451 							   act_num);
4452 		ra = tmp_action;
4453 		rm = tmp_mask;
4454 		act_num++;
4455 		action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
4456 	}
4457 	if (set_vlan_vid_ix != -1) {
4458 		/* If temporary action buffer was not used, copy template actions to it */
4459 		if (ra == actions && rm == masks) {
4460 			for (i = 0; i < act_num; ++i) {
4461 				tmp_action[i] = actions[i];
4462 				tmp_mask[i] = masks[i];
4463 				if (actions[i].type == RTE_FLOW_ACTION_TYPE_END)
4464 					break;
4465 			}
4466 			ra = tmp_action;
4467 			rm = tmp_mask;
4468 		}
4469 		flow_hw_set_vlan_vid(dev, ra, rm,
4470 				     &set_vlan_vid_spec, &set_vlan_vid_mask,
4471 				     set_vlan_vid_ix);
4472 	}
4473 	act_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, NULL, 0, ra, error);
4474 	if (act_len <= 0)
4475 		return NULL;
4476 	len = RTE_ALIGN(act_len, 16);
4477 	mask_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, NULL, 0, rm, error);
4478 	if (mask_len <= 0)
4479 		return NULL;
4480 	len += RTE_ALIGN(mask_len, 16);
4481 	len += RTE_ALIGN(act_num * sizeof(*at->actions_off), 16);
4482 	at = mlx5_malloc(MLX5_MEM_ZERO, len + sizeof(*at),
4483 			 RTE_CACHE_LINE_SIZE, rte_socket_id());
4484 	if (!at) {
4485 		rte_flow_error_set(error, ENOMEM,
4486 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4487 				   NULL,
4488 				   "cannot allocate action template");
4489 		return NULL;
4490 	}
4491 	/* Actions part is in the first part. */
4492 	at->attr = *attr;
4493 	at->actions = (struct rte_flow_action *)(at + 1);
4494 	act_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, at->actions,
4495 				len, ra, error);
4496 	if (act_len <= 0)
4497 		goto error;
4498 	/* Masks part is in the second part. */
4499 	at->masks = (struct rte_flow_action *)(((uint8_t *)at->actions) + act_len);
4500 	mask_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, at->masks,
4501 				 len - act_len, rm, error);
4502 	if (mask_len <= 0)
4503 		goto error;
4504 	/* DR actions offsets in the third part. */
4505 	at->actions_off = (uint16_t *)((uint8_t *)at->masks + mask_len);
4506 	at->actions_num = act_num;
4507 	for (i = 0; i < at->actions_num; ++i)
4508 		at->actions_off[i] = UINT16_MAX;
4509 	at->reformat_off = UINT16_MAX;
4510 	at->mhdr_off = UINT16_MAX;
4511 	at->rx_cpy_pos = pos;
4512 	/*
4513 	 * mlx5 PMD hacks indirect action index directly to the action conf.
4514 	 * The rte_flow_conv() function copies the content from conf pointer.
4515 	 * Need to restore the indirect action index from action conf here.
4516 	 */
4517 	for (i = 0; actions->type != RTE_FLOW_ACTION_TYPE_END;
4518 	     actions++, masks++, i++) {
4519 		if (actions->type == RTE_FLOW_ACTION_TYPE_INDIRECT) {
4520 			at->actions[i].conf = actions->conf;
4521 			at->masks[i].conf = masks->conf;
4522 		}
4523 	}
4524 	at->tmpl = flow_hw_dr_actions_template_create(at);
4525 	if (!at->tmpl)
4526 		goto error;
4527 	at->action_flags = action_flags;
4528 	__atomic_fetch_add(&at->refcnt, 1, __ATOMIC_RELAXED);
4529 	LIST_INSERT_HEAD(&priv->flow_hw_at, at, next);
4530 	return at;
4531 error:
4532 	if (at) {
4533 		if (at->tmpl)
4534 			mlx5dr_action_template_destroy(at->tmpl);
4535 		mlx5_free(at);
4536 	}
4537 	return NULL;
4538 }
4539 
4540 /**
4541  * Destroy flow action template.
4542  *
4543  * @param[in] dev
4544  *   Pointer to the rte_eth_dev structure.
4545  * @param[in] template
4546  *   Pointer to the action template to be destroyed.
4547  * @param[out] error
4548  *   Pointer to error structure.
4549  *
4550  * @return
4551  *   0 on success, a negative errno value otherwise and rte_errno is set.
4552  */
4553 static int
4554 flow_hw_actions_template_destroy(struct rte_eth_dev *dev __rte_unused,
4555 				 struct rte_flow_actions_template *template,
4556 				 struct rte_flow_error *error __rte_unused)
4557 {
4558 	if (__atomic_load_n(&template->refcnt, __ATOMIC_RELAXED) > 1) {
4559 		DRV_LOG(WARNING, "Action template %p is still in use.",
4560 			(void *)template);
4561 		return rte_flow_error_set(error, EBUSY,
4562 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4563 				   NULL,
4564 				   "action template in using");
4565 	}
4566 	LIST_REMOVE(template, next);
4567 	if (template->tmpl)
4568 		mlx5dr_action_template_destroy(template->tmpl);
4569 	mlx5_free(template);
4570 	return 0;
4571 }
4572 
4573 static uint32_t
4574 flow_hw_count_items(const struct rte_flow_item *items)
4575 {
4576 	const struct rte_flow_item *curr_item;
4577 	uint32_t nb_items;
4578 
4579 	nb_items = 0;
4580 	for (curr_item = items; curr_item->type != RTE_FLOW_ITEM_TYPE_END; ++curr_item)
4581 		++nb_items;
4582 	return ++nb_items;
4583 }
4584 
4585 static struct rte_flow_item *
4586 flow_hw_prepend_item(const struct rte_flow_item *items,
4587 		     const uint32_t nb_items,
4588 		     const struct rte_flow_item *new_item,
4589 		     struct rte_flow_error *error)
4590 {
4591 	struct rte_flow_item *copied_items;
4592 	size_t size;
4593 
4594 	/* Allocate new array of items. */
4595 	size = sizeof(*copied_items) * (nb_items + 1);
4596 	copied_items = mlx5_malloc(MLX5_MEM_ZERO, size, 0, rte_socket_id());
4597 	if (!copied_items) {
4598 		rte_flow_error_set(error, ENOMEM,
4599 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4600 				   NULL,
4601 				   "cannot allocate item template");
4602 		return NULL;
4603 	}
4604 	/* Put new item at the beginning and copy the rest. */
4605 	copied_items[0] = *new_item;
4606 	rte_memcpy(&copied_items[1], items, sizeof(*items) * nb_items);
4607 	return copied_items;
4608 }
4609 
4610 static int
4611 flow_hw_pattern_validate(struct rte_eth_dev *dev,
4612 			 const struct rte_flow_pattern_template_attr *attr,
4613 			 const struct rte_flow_item items[],
4614 			 struct rte_flow_error *error)
4615 {
4616 	struct mlx5_priv *priv = dev->data->dev_private;
4617 	int i;
4618 	bool items_end = false;
4619 
4620 	if (!attr->ingress && !attr->egress && !attr->transfer)
4621 		return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR, NULL,
4622 					  "at least one of the direction attributes"
4623 					  " must be specified");
4624 	if (priv->sh->config.dv_esw_en) {
4625 		MLX5_ASSERT(priv->master || priv->representor);
4626 		if (priv->master) {
4627 			if ((attr->ingress && attr->egress) ||
4628 			    (attr->ingress && attr->transfer) ||
4629 			    (attr->egress && attr->transfer))
4630 				return rte_flow_error_set(error, EINVAL,
4631 							  RTE_FLOW_ERROR_TYPE_ATTR, NULL,
4632 							  "only one direction attribute at once"
4633 							  " can be used on transfer proxy port");
4634 		} else {
4635 			if (attr->transfer)
4636 				return rte_flow_error_set(error, EINVAL,
4637 							  RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, NULL,
4638 							  "transfer attribute cannot be used with"
4639 							  " port representors");
4640 			if (attr->ingress && attr->egress)
4641 				return rte_flow_error_set(error, EINVAL,
4642 							  RTE_FLOW_ERROR_TYPE_ATTR, NULL,
4643 							  "ingress and egress direction attributes"
4644 							  " cannot be used at the same time on"
4645 							  " port representors");
4646 		}
4647 	} else {
4648 		if (attr->transfer)
4649 			return rte_flow_error_set(error, EINVAL,
4650 						  RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, NULL,
4651 						  "transfer attribute cannot be used when"
4652 						  " E-Switch is disabled");
4653 	}
4654 	for (i = 0; !items_end; i++) {
4655 		int type = items[i].type;
4656 
4657 		switch (type) {
4658 		case RTE_FLOW_ITEM_TYPE_TAG:
4659 		{
4660 			int reg;
4661 			const struct rte_flow_item_tag *tag =
4662 				(const struct rte_flow_item_tag *)items[i].spec;
4663 
4664 			reg = flow_hw_get_reg_id(RTE_FLOW_ITEM_TYPE_TAG, tag->index);
4665 			if (reg == REG_NON)
4666 				return rte_flow_error_set(error, EINVAL,
4667 							  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4668 							  NULL,
4669 							  "Unsupported tag index");
4670 			break;
4671 		}
4672 		case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
4673 		{
4674 			const struct rte_flow_item_tag *tag =
4675 				(const struct rte_flow_item_tag *)items[i].spec;
4676 			uint8_t regcs = (uint8_t)priv->sh->cdev->config.hca_attr.set_reg_c;
4677 
4678 			if (!((1 << (tag->index - REG_C_0)) & regcs))
4679 				return rte_flow_error_set(error, EINVAL,
4680 							  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4681 							  NULL,
4682 							  "Unsupported internal tag index");
4683 			break;
4684 		}
4685 		case RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT:
4686 			if (attr->ingress && priv->sh->config.repr_matching)
4687 				return rte_flow_error_set(error, EINVAL,
4688 						  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
4689 						  "represented port item cannot be used"
4690 						  " when ingress attribute is set");
4691 			if (attr->egress)
4692 				return rte_flow_error_set(error, EINVAL,
4693 						  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
4694 						  "represented port item cannot be used"
4695 						  " when egress attribute is set");
4696 			break;
4697 		case RTE_FLOW_ITEM_TYPE_META:
4698 			if (!priv->sh->config.dv_esw_en ||
4699 			    priv->sh->config.dv_xmeta_en != MLX5_XMETA_MODE_META32_HWS) {
4700 				if (attr->ingress)
4701 					return rte_flow_error_set(error, EINVAL,
4702 								  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
4703 								  "META item is not supported"
4704 								  " on current FW with ingress"
4705 								  " attribute");
4706 			}
4707 			break;
4708 		case RTE_FLOW_ITEM_TYPE_METER_COLOR:
4709 		{
4710 			int reg = flow_hw_get_reg_id(RTE_FLOW_ITEM_TYPE_METER_COLOR, 0);
4711 			if (reg == REG_NON)
4712 				return rte_flow_error_set(error, EINVAL,
4713 							  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4714 							  NULL,
4715 							  "Unsupported meter color register");
4716 			break;
4717 		}
4718 		case RTE_FLOW_ITEM_TYPE_VOID:
4719 		case RTE_FLOW_ITEM_TYPE_ETH:
4720 		case RTE_FLOW_ITEM_TYPE_VLAN:
4721 		case RTE_FLOW_ITEM_TYPE_IPV4:
4722 		case RTE_FLOW_ITEM_TYPE_IPV6:
4723 		case RTE_FLOW_ITEM_TYPE_UDP:
4724 		case RTE_FLOW_ITEM_TYPE_TCP:
4725 		case RTE_FLOW_ITEM_TYPE_GTP:
4726 		case RTE_FLOW_ITEM_TYPE_GTP_PSC:
4727 		case RTE_FLOW_ITEM_TYPE_VXLAN:
4728 		case MLX5_RTE_FLOW_ITEM_TYPE_SQ:
4729 		case RTE_FLOW_ITEM_TYPE_GRE:
4730 		case RTE_FLOW_ITEM_TYPE_GRE_KEY:
4731 		case RTE_FLOW_ITEM_TYPE_GRE_OPTION:
4732 		case RTE_FLOW_ITEM_TYPE_ICMP:
4733 		case RTE_FLOW_ITEM_TYPE_ICMP6:
4734 		case RTE_FLOW_ITEM_TYPE_CONNTRACK:
4735 			break;
4736 		case RTE_FLOW_ITEM_TYPE_INTEGRITY:
4737 			/*
4738 			 * Integrity flow item validation require access to
4739 			 * both item mask and spec.
4740 			 * Current HWS model allows item mask in pattern
4741 			 * template and item spec in flow rule.
4742 			 */
4743 			break;
4744 		case RTE_FLOW_ITEM_TYPE_END:
4745 			items_end = true;
4746 			break;
4747 		default:
4748 			return rte_flow_error_set(error, EINVAL,
4749 						  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4750 						  NULL,
4751 						  "Unsupported item type");
4752 		}
4753 	}
4754 	return 0;
4755 }
4756 
4757 static bool
4758 flow_hw_pattern_has_sq_match(const struct rte_flow_item *items)
4759 {
4760 	unsigned int i;
4761 
4762 	for (i = 0; items[i].type != RTE_FLOW_ITEM_TYPE_END; ++i)
4763 		if (items[i].type == (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_SQ)
4764 			return true;
4765 	return false;
4766 }
4767 
4768 /**
4769  * Create flow item template.
4770  *
4771  * @param[in] dev
4772  *   Pointer to the rte_eth_dev structure.
4773  * @param[in] attr
4774  *   Pointer to the item template attributes.
4775  * @param[in] items
4776  *   The template item pattern.
4777  * @param[out] error
4778  *   Pointer to error structure.
4779  *
4780  * @return
4781  *  Item template pointer on success, NULL otherwise and rte_errno is set.
4782  */
4783 static struct rte_flow_pattern_template *
4784 flow_hw_pattern_template_create(struct rte_eth_dev *dev,
4785 			     const struct rte_flow_pattern_template_attr *attr,
4786 			     const struct rte_flow_item items[],
4787 			     struct rte_flow_error *error)
4788 {
4789 	struct mlx5_priv *priv = dev->data->dev_private;
4790 	struct rte_flow_pattern_template *it;
4791 	struct rte_flow_item *copied_items = NULL;
4792 	const struct rte_flow_item *tmpl_items;
4793 	uint64_t orig_item_nb;
4794 	struct rte_flow_item port = {
4795 		.type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,
4796 		.mask = &rte_flow_item_ethdev_mask,
4797 	};
4798 	struct rte_flow_item_tag tag_v = {
4799 		.data = 0,
4800 		.index = REG_C_0,
4801 	};
4802 	struct rte_flow_item_tag tag_m = {
4803 		.data = flow_hw_tx_tag_regc_mask(dev),
4804 		.index = 0xff,
4805 	};
4806 	struct rte_flow_item tag = {
4807 		.type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_TAG,
4808 		.spec = &tag_v,
4809 		.mask = &tag_m,
4810 		.last = NULL
4811 	};
4812 
4813 	if (flow_hw_pattern_validate(dev, attr, items, error))
4814 		return NULL;
4815 	orig_item_nb = flow_hw_count_items(items);
4816 	if (priv->sh->config.dv_esw_en &&
4817 	    priv->sh->config.repr_matching &&
4818 	    attr->ingress && !attr->egress && !attr->transfer) {
4819 		copied_items = flow_hw_prepend_item(items, orig_item_nb, &port, error);
4820 		if (!copied_items)
4821 			return NULL;
4822 		tmpl_items = copied_items;
4823 	} else if (priv->sh->config.dv_esw_en &&
4824 		   priv->sh->config.repr_matching &&
4825 		   !attr->ingress && attr->egress && !attr->transfer) {
4826 		if (flow_hw_pattern_has_sq_match(items)) {
4827 			DRV_LOG(DEBUG, "Port %u omitting implicit REG_C_0 match for egress "
4828 				       "pattern template", dev->data->port_id);
4829 			tmpl_items = items;
4830 			goto setup_pattern_template;
4831 		}
4832 		copied_items = flow_hw_prepend_item(items, orig_item_nb, &tag, error);
4833 		if (!copied_items)
4834 			return NULL;
4835 		tmpl_items = copied_items;
4836 	} else {
4837 		tmpl_items = items;
4838 	}
4839 setup_pattern_template:
4840 	it = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*it), 0, rte_socket_id());
4841 	if (!it) {
4842 		if (copied_items)
4843 			mlx5_free(copied_items);
4844 		rte_flow_error_set(error, ENOMEM,
4845 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4846 				   NULL,
4847 				   "cannot allocate item template");
4848 		return NULL;
4849 	}
4850 	it->attr = *attr;
4851 	it->orig_item_nb = orig_item_nb;
4852 	it->mt = mlx5dr_match_template_create(tmpl_items, attr->relaxed_matching);
4853 	if (!it->mt) {
4854 		if (copied_items)
4855 			mlx5_free(copied_items);
4856 		mlx5_free(it);
4857 		rte_flow_error_set(error, rte_errno,
4858 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4859 				   NULL,
4860 				   "cannot create match template");
4861 		return NULL;
4862 	}
4863 	it->item_flags = flow_hw_rss_item_flags_get(tmpl_items);
4864 	if (copied_items) {
4865 		if (attr->ingress)
4866 			it->implicit_port = true;
4867 		else if (attr->egress)
4868 			it->implicit_tag = true;
4869 		mlx5_free(copied_items);
4870 	}
4871 	__atomic_fetch_add(&it->refcnt, 1, __ATOMIC_RELAXED);
4872 	LIST_INSERT_HEAD(&priv->flow_hw_itt, it, next);
4873 	return it;
4874 }
4875 
4876 /**
4877  * Destroy flow item template.
4878  *
4879  * @param[in] dev
4880  *   Pointer to the rte_eth_dev structure.
4881  * @param[in] template
4882  *   Pointer to the item template to be destroyed.
4883  * @param[out] error
4884  *   Pointer to error structure.
4885  *
4886  * @return
4887  *   0 on success, a negative errno value otherwise and rte_errno is set.
4888  */
4889 static int
4890 flow_hw_pattern_template_destroy(struct rte_eth_dev *dev __rte_unused,
4891 			      struct rte_flow_pattern_template *template,
4892 			      struct rte_flow_error *error __rte_unused)
4893 {
4894 	if (__atomic_load_n(&template->refcnt, __ATOMIC_RELAXED) > 1) {
4895 		DRV_LOG(WARNING, "Item template %p is still in use.",
4896 			(void *)template);
4897 		return rte_flow_error_set(error, EBUSY,
4898 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4899 				   NULL,
4900 				   "item template in using");
4901 	}
4902 	LIST_REMOVE(template, next);
4903 	claim_zero(mlx5dr_match_template_destroy(template->mt));
4904 	mlx5_free(template);
4905 	return 0;
4906 }
4907 
4908 /*
4909  * Get information about HWS pre-configurable resources.
4910  *
4911  * @param[in] dev
4912  *   Pointer to the rte_eth_dev structure.
4913  * @param[out] port_info
4914  *   Pointer to port information.
4915  * @param[out] queue_info
4916  *   Pointer to queue information.
4917  * @param[out] error
4918  *   Pointer to error structure.
4919  *
4920  * @return
4921  *   0 on success, a negative errno value otherwise and rte_errno is set.
4922  */
4923 static int
4924 flow_hw_info_get(struct rte_eth_dev *dev,
4925 		 struct rte_flow_port_info *port_info,
4926 		 struct rte_flow_queue_info *queue_info,
4927 		 struct rte_flow_error *error __rte_unused)
4928 {
4929 	struct mlx5_priv *priv = dev->data->dev_private;
4930 	uint16_t port_id = dev->data->port_id;
4931 	struct rte_mtr_capabilities mtr_cap;
4932 	int ret;
4933 
4934 	memset(port_info, 0, sizeof(*port_info));
4935 	/* Queue size is unlimited from low-level. */
4936 	port_info->max_nb_queues = UINT32_MAX;
4937 	queue_info->max_size = UINT32_MAX;
4938 
4939 	memset(&mtr_cap, 0, sizeof(struct rte_mtr_capabilities));
4940 	ret = rte_mtr_capabilities_get(port_id, &mtr_cap, NULL);
4941 	if (!ret)
4942 		port_info->max_nb_meters = mtr_cap.n_max;
4943 	port_info->max_nb_counters = priv->sh->hws_max_nb_counters;
4944 	port_info->max_nb_aging_objects = port_info->max_nb_counters;
4945 	return 0;
4946 }
4947 
4948 /**
4949  * Create group callback.
4950  *
4951  * @param[in] tool_ctx
4952  *   Pointer to the hash list related context.
4953  * @param[in] cb_ctx
4954  *   Pointer to the group creation context.
4955  *
4956  * @return
4957  *   Group entry on success, NULL otherwise and rte_errno is set.
4958  */
4959 struct mlx5_list_entry *
4960 flow_hw_grp_create_cb(void *tool_ctx, void *cb_ctx)
4961 {
4962 	struct mlx5_dev_ctx_shared *sh = tool_ctx;
4963 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
4964 	struct rte_eth_dev *dev = ctx->dev;
4965 	struct rte_flow_attr *attr = (struct rte_flow_attr *)ctx->data;
4966 	struct mlx5_priv *priv = dev->data->dev_private;
4967 	struct mlx5dr_table_attr dr_tbl_attr = {0};
4968 	struct rte_flow_error *error = ctx->error;
4969 	struct mlx5_flow_group *grp_data;
4970 	struct mlx5dr_table *tbl = NULL;
4971 	struct mlx5dr_action *jump;
4972 	uint32_t idx = 0;
4973 
4974 	grp_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_HW_GRP], &idx);
4975 	if (!grp_data) {
4976 		rte_flow_error_set(error, ENOMEM,
4977 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4978 				   NULL,
4979 				   "cannot allocate flow table data entry");
4980 		return NULL;
4981 	}
4982 	dr_tbl_attr.level = attr->group;
4983 	if (attr->transfer)
4984 		dr_tbl_attr.type = MLX5DR_TABLE_TYPE_FDB;
4985 	else if (attr->egress)
4986 		dr_tbl_attr.type = MLX5DR_TABLE_TYPE_NIC_TX;
4987 	else
4988 		dr_tbl_attr.type = MLX5DR_TABLE_TYPE_NIC_RX;
4989 	tbl = mlx5dr_table_create(priv->dr_ctx, &dr_tbl_attr);
4990 	if (!tbl)
4991 		goto error;
4992 	grp_data->tbl = tbl;
4993 	if (attr->group) {
4994 		/* Jump action be used by non-root table. */
4995 		jump = mlx5dr_action_create_dest_table
4996 			(priv->dr_ctx, tbl,
4997 			 mlx5_hw_act_flag[!!attr->group][dr_tbl_attr.type]);
4998 		if (!jump)
4999 			goto error;
5000 		grp_data->jump.hws_action = jump;
5001 		/* Jump action be used by root table.  */
5002 		jump = mlx5dr_action_create_dest_table
5003 			(priv->dr_ctx, tbl,
5004 			 mlx5_hw_act_flag[MLX5_HW_ACTION_FLAG_ROOT]
5005 					 [dr_tbl_attr.type]);
5006 		if (!jump)
5007 			goto error;
5008 		grp_data->jump.root_action = jump;
5009 	}
5010 	grp_data->dev = dev;
5011 	grp_data->idx = idx;
5012 	grp_data->group_id = attr->group;
5013 	grp_data->type = dr_tbl_attr.type;
5014 	return &grp_data->entry;
5015 error:
5016 	if (grp_data->jump.root_action)
5017 		mlx5dr_action_destroy(grp_data->jump.root_action);
5018 	if (grp_data->jump.hws_action)
5019 		mlx5dr_action_destroy(grp_data->jump.hws_action);
5020 	if (tbl)
5021 		mlx5dr_table_destroy(tbl);
5022 	if (idx)
5023 		mlx5_ipool_free(sh->ipool[MLX5_IPOOL_HW_GRP], idx);
5024 	rte_flow_error_set(error, ENOMEM,
5025 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5026 			   NULL,
5027 			   "cannot allocate flow dr table");
5028 	return NULL;
5029 }
5030 
5031 /**
5032  * Remove group callback.
5033  *
5034  * @param[in] tool_ctx
5035  *   Pointer to the hash list related context.
5036  * @param[in] entry
5037  *   Pointer to the entry to be removed.
5038  */
5039 void
5040 flow_hw_grp_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
5041 {
5042 	struct mlx5_dev_ctx_shared *sh = tool_ctx;
5043 	struct mlx5_flow_group *grp_data =
5044 		    container_of(entry, struct mlx5_flow_group, entry);
5045 
5046 	MLX5_ASSERT(entry && sh);
5047 	/* To use the wrapper glue functions instead. */
5048 	if (grp_data->jump.hws_action)
5049 		mlx5dr_action_destroy(grp_data->jump.hws_action);
5050 	if (grp_data->jump.root_action)
5051 		mlx5dr_action_destroy(grp_data->jump.root_action);
5052 	mlx5dr_table_destroy(grp_data->tbl);
5053 	mlx5_ipool_free(sh->ipool[MLX5_IPOOL_HW_GRP], grp_data->idx);
5054 }
5055 
5056 /**
5057  * Match group callback.
5058  *
5059  * @param[in] tool_ctx
5060  *   Pointer to the hash list related context.
5061  * @param[in] entry
5062  *   Pointer to the group to be matched.
5063  * @param[in] cb_ctx
5064  *   Pointer to the group matching context.
5065  *
5066  * @return
5067  *   0 on matched, 1 on miss matched.
5068  */
5069 int
5070 flow_hw_grp_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry,
5071 		     void *cb_ctx)
5072 {
5073 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5074 	struct mlx5_flow_group *grp_data =
5075 		container_of(entry, struct mlx5_flow_group, entry);
5076 	struct rte_flow_attr *attr =
5077 			(struct rte_flow_attr *)ctx->data;
5078 
5079 	return (grp_data->dev != ctx->dev) ||
5080 		(grp_data->group_id != attr->group) ||
5081 		((grp_data->type != MLX5DR_TABLE_TYPE_FDB) &&
5082 		attr->transfer) ||
5083 		((grp_data->type != MLX5DR_TABLE_TYPE_NIC_TX) &&
5084 		attr->egress) ||
5085 		((grp_data->type != MLX5DR_TABLE_TYPE_NIC_RX) &&
5086 		attr->ingress);
5087 }
5088 
5089 /**
5090  * Clone group entry callback.
5091  *
5092  * @param[in] tool_ctx
5093  *   Pointer to the hash list related context.
5094  * @param[in] entry
5095  *   Pointer to the group to be matched.
5096  * @param[in] cb_ctx
5097  *   Pointer to the group matching context.
5098  *
5099  * @return
5100  *   0 on matched, 1 on miss matched.
5101  */
5102 struct mlx5_list_entry *
5103 flow_hw_grp_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
5104 		     void *cb_ctx)
5105 {
5106 	struct mlx5_dev_ctx_shared *sh = tool_ctx;
5107 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
5108 	struct mlx5_flow_group *grp_data;
5109 	struct rte_flow_error *error = ctx->error;
5110 	uint32_t idx = 0;
5111 
5112 	grp_data = mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_HW_GRP], &idx);
5113 	if (!grp_data) {
5114 		rte_flow_error_set(error, ENOMEM,
5115 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5116 				   NULL,
5117 				   "cannot allocate flow table data entry");
5118 		return NULL;
5119 	}
5120 	memcpy(grp_data, oentry, sizeof(*grp_data));
5121 	grp_data->idx = idx;
5122 	return &grp_data->entry;
5123 }
5124 
5125 /**
5126  * Free cloned group entry callback.
5127  *
5128  * @param[in] tool_ctx
5129  *   Pointer to the hash list related context.
5130  * @param[in] entry
5131  *   Pointer to the group to be freed.
5132  */
5133 void
5134 flow_hw_grp_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
5135 {
5136 	struct mlx5_dev_ctx_shared *sh = tool_ctx;
5137 	struct mlx5_flow_group *grp_data =
5138 		    container_of(entry, struct mlx5_flow_group, entry);
5139 
5140 	mlx5_ipool_free(sh->ipool[MLX5_IPOOL_HW_GRP], grp_data->idx);
5141 }
5142 
5143 /**
5144  * Create and cache a vport action for given @p dev port. vport actions
5145  * cache is used in HWS with FDB flows.
5146  *
5147  * This function does not create any function if proxy port for @p dev port
5148  * was not configured for HW Steering.
5149  *
5150  * This function assumes that E-Switch is enabled and PMD is running with
5151  * HW Steering configured.
5152  *
5153  * @param dev
5154  *   Pointer to Ethernet device which will be the action destination.
5155  *
5156  * @return
5157  *   0 on success, positive value otherwise.
5158  */
5159 int
5160 flow_hw_create_vport_action(struct rte_eth_dev *dev)
5161 {
5162 	struct mlx5_priv *priv = dev->data->dev_private;
5163 	struct rte_eth_dev *proxy_dev;
5164 	struct mlx5_priv *proxy_priv;
5165 	uint16_t port_id = dev->data->port_id;
5166 	uint16_t proxy_port_id = port_id;
5167 	int ret;
5168 
5169 	ret = mlx5_flow_pick_transfer_proxy(dev, &proxy_port_id, NULL);
5170 	if (ret)
5171 		return ret;
5172 	proxy_dev = &rte_eth_devices[proxy_port_id];
5173 	proxy_priv = proxy_dev->data->dev_private;
5174 	if (!proxy_priv->hw_vport)
5175 		return 0;
5176 	if (proxy_priv->hw_vport[port_id]) {
5177 		DRV_LOG(ERR, "port %u HWS vport action already created",
5178 			port_id);
5179 		return -EINVAL;
5180 	}
5181 	proxy_priv->hw_vport[port_id] = mlx5dr_action_create_dest_vport
5182 			(proxy_priv->dr_ctx, priv->dev_port,
5183 			 MLX5DR_ACTION_FLAG_HWS_FDB);
5184 	if (!proxy_priv->hw_vport[port_id]) {
5185 		DRV_LOG(ERR, "port %u unable to create HWS vport action",
5186 			port_id);
5187 		return -EINVAL;
5188 	}
5189 	return 0;
5190 }
5191 
5192 /**
5193  * Destroys the vport action associated with @p dev device
5194  * from actions' cache.
5195  *
5196  * This function does not destroy any action if there is no action cached
5197  * for @p dev or proxy port was not configured for HW Steering.
5198  *
5199  * This function assumes that E-Switch is enabled and PMD is running with
5200  * HW Steering configured.
5201  *
5202  * @param dev
5203  *   Pointer to Ethernet device which will be the action destination.
5204  */
5205 void
5206 flow_hw_destroy_vport_action(struct rte_eth_dev *dev)
5207 {
5208 	struct rte_eth_dev *proxy_dev;
5209 	struct mlx5_priv *proxy_priv;
5210 	uint16_t port_id = dev->data->port_id;
5211 	uint16_t proxy_port_id = port_id;
5212 
5213 	if (mlx5_flow_pick_transfer_proxy(dev, &proxy_port_id, NULL))
5214 		return;
5215 	proxy_dev = &rte_eth_devices[proxy_port_id];
5216 	proxy_priv = proxy_dev->data->dev_private;
5217 	if (!proxy_priv->hw_vport || !proxy_priv->hw_vport[port_id])
5218 		return;
5219 	mlx5dr_action_destroy(proxy_priv->hw_vport[port_id]);
5220 	proxy_priv->hw_vport[port_id] = NULL;
5221 }
5222 
5223 static int
5224 flow_hw_create_vport_actions(struct mlx5_priv *priv)
5225 {
5226 	uint16_t port_id;
5227 
5228 	MLX5_ASSERT(!priv->hw_vport);
5229 	priv->hw_vport = mlx5_malloc(MLX5_MEM_ZERO,
5230 				     sizeof(*priv->hw_vport) * RTE_MAX_ETHPORTS,
5231 				     0, SOCKET_ID_ANY);
5232 	if (!priv->hw_vport)
5233 		return -ENOMEM;
5234 	DRV_LOG(DEBUG, "port %u :: creating vport actions", priv->dev_data->port_id);
5235 	DRV_LOG(DEBUG, "port %u ::    domain_id=%u", priv->dev_data->port_id, priv->domain_id);
5236 	MLX5_ETH_FOREACH_DEV(port_id, NULL) {
5237 		struct mlx5_priv *port_priv = rte_eth_devices[port_id].data->dev_private;
5238 
5239 		if (!port_priv ||
5240 		    port_priv->domain_id != priv->domain_id)
5241 			continue;
5242 		DRV_LOG(DEBUG, "port %u :: for port_id=%u, calling mlx5dr_action_create_dest_vport() with ibport=%u",
5243 			priv->dev_data->port_id, port_id, port_priv->dev_port);
5244 		priv->hw_vport[port_id] = mlx5dr_action_create_dest_vport
5245 				(priv->dr_ctx, port_priv->dev_port,
5246 				 MLX5DR_ACTION_FLAG_HWS_FDB);
5247 		DRV_LOG(DEBUG, "port %u :: priv->hw_vport[%u]=%p",
5248 			priv->dev_data->port_id, port_id, (void *)priv->hw_vport[port_id]);
5249 		if (!priv->hw_vport[port_id])
5250 			return -EINVAL;
5251 	}
5252 	return 0;
5253 }
5254 
5255 static void
5256 flow_hw_free_vport_actions(struct mlx5_priv *priv)
5257 {
5258 	uint16_t port_id;
5259 
5260 	if (!priv->hw_vport)
5261 		return;
5262 	for (port_id = 0; port_id < RTE_MAX_ETHPORTS; ++port_id)
5263 		if (priv->hw_vport[port_id])
5264 			mlx5dr_action_destroy(priv->hw_vport[port_id]);
5265 	mlx5_free(priv->hw_vport);
5266 	priv->hw_vport = NULL;
5267 }
5268 
5269 /**
5270  * Create an egress pattern template matching on source SQ.
5271  *
5272  * @param dev
5273  *   Pointer to Ethernet device.
5274  *
5275  * @return
5276  *   Pointer to pattern template on success. NULL otherwise, and rte_errno is set.
5277  */
5278 static struct rte_flow_pattern_template *
5279 flow_hw_create_tx_repr_sq_pattern_tmpl(struct rte_eth_dev *dev)
5280 {
5281 	struct rte_flow_pattern_template_attr attr = {
5282 		.relaxed_matching = 0,
5283 		.egress = 1,
5284 	};
5285 	struct mlx5_rte_flow_item_sq sq_mask = {
5286 		.queue = UINT32_MAX,
5287 	};
5288 	struct rte_flow_item items[] = {
5289 		{
5290 			.type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_SQ,
5291 			.mask = &sq_mask,
5292 		},
5293 		{
5294 			.type = RTE_FLOW_ITEM_TYPE_END,
5295 		},
5296 	};
5297 
5298 	return flow_hw_pattern_template_create(dev, &attr, items, NULL);
5299 }
5300 
5301 static __rte_always_inline uint32_t
5302 flow_hw_tx_tag_regc_mask(struct rte_eth_dev *dev)
5303 {
5304 	struct mlx5_priv *priv = dev->data->dev_private;
5305 	uint32_t mask = priv->sh->dv_regc0_mask;
5306 
5307 	/* Mask is verified during device initialization. Sanity checking here. */
5308 	MLX5_ASSERT(mask != 0);
5309 	/*
5310 	 * Availability of sufficient number of bits in REG_C_0 is verified on initialization.
5311 	 * Sanity checking here.
5312 	 */
5313 	MLX5_ASSERT(__builtin_popcount(mask) >= __builtin_popcount(priv->vport_meta_mask));
5314 	return mask;
5315 }
5316 
5317 static __rte_always_inline uint32_t
5318 flow_hw_tx_tag_regc_value(struct rte_eth_dev *dev)
5319 {
5320 	struct mlx5_priv *priv = dev->data->dev_private;
5321 	uint32_t tag;
5322 
5323 	/* Mask is verified during device initialization. Sanity checking here. */
5324 	MLX5_ASSERT(priv->vport_meta_mask != 0);
5325 	tag = priv->vport_meta_tag >> (rte_bsf32(priv->vport_meta_mask));
5326 	/*
5327 	 * Availability of sufficient number of bits in REG_C_0 is verified on initialization.
5328 	 * Sanity checking here.
5329 	 */
5330 	MLX5_ASSERT((tag & priv->sh->dv_regc0_mask) == tag);
5331 	return tag;
5332 }
5333 
5334 static void
5335 flow_hw_update_action_mask(struct rte_flow_action *action,
5336 			   struct rte_flow_action *mask,
5337 			   enum rte_flow_action_type type,
5338 			   void *conf_v,
5339 			   void *conf_m)
5340 {
5341 	action->type = type;
5342 	action->conf = conf_v;
5343 	mask->type = type;
5344 	mask->conf = conf_m;
5345 }
5346 
5347 /**
5348  * Create an egress actions template with MODIFY_FIELD action for setting unused REG_C_0 bits
5349  * to vport tag and JUMP action to group 1.
5350  *
5351  * If extended metadata mode is enabled, then MODIFY_FIELD action for copying software metadata
5352  * to REG_C_1 is added as well.
5353  *
5354  * @param dev
5355  *   Pointer to Ethernet device.
5356  *
5357  * @return
5358  *   Pointer to actions template on success. NULL otherwise, and rte_errno is set.
5359  */
5360 static struct rte_flow_actions_template *
5361 flow_hw_create_tx_repr_tag_jump_acts_tmpl(struct rte_eth_dev *dev)
5362 {
5363 	uint32_t tag_mask = flow_hw_tx_tag_regc_mask(dev);
5364 	uint32_t tag_value = flow_hw_tx_tag_regc_value(dev);
5365 	struct rte_flow_actions_template_attr attr = {
5366 		.egress = 1,
5367 	};
5368 	struct rte_flow_action_modify_field set_tag_v = {
5369 		.operation = RTE_FLOW_MODIFY_SET,
5370 		.dst = {
5371 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
5372 			.level = REG_C_0,
5373 			.offset = rte_bsf32(tag_mask),
5374 		},
5375 		.src = {
5376 			.field = RTE_FLOW_FIELD_VALUE,
5377 		},
5378 		.width = __builtin_popcount(tag_mask),
5379 	};
5380 	struct rte_flow_action_modify_field set_tag_m = {
5381 		.operation = RTE_FLOW_MODIFY_SET,
5382 		.dst = {
5383 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
5384 			.level = UINT32_MAX,
5385 			.offset = UINT32_MAX,
5386 		},
5387 		.src = {
5388 			.field = RTE_FLOW_FIELD_VALUE,
5389 		},
5390 		.width = UINT32_MAX,
5391 	};
5392 	struct rte_flow_action_modify_field copy_metadata_v = {
5393 		.operation = RTE_FLOW_MODIFY_SET,
5394 		.dst = {
5395 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
5396 			.level = REG_C_1,
5397 		},
5398 		.src = {
5399 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
5400 			.level = REG_A,
5401 		},
5402 		.width = 32,
5403 	};
5404 	struct rte_flow_action_modify_field copy_metadata_m = {
5405 		.operation = RTE_FLOW_MODIFY_SET,
5406 		.dst = {
5407 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
5408 			.level = UINT32_MAX,
5409 			.offset = UINT32_MAX,
5410 		},
5411 		.src = {
5412 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
5413 			.level = UINT32_MAX,
5414 			.offset = UINT32_MAX,
5415 		},
5416 		.width = UINT32_MAX,
5417 	};
5418 	struct rte_flow_action_jump jump_v = {
5419 		.group = MLX5_HW_LOWEST_USABLE_GROUP,
5420 	};
5421 	struct rte_flow_action_jump jump_m = {
5422 		.group = UINT32_MAX,
5423 	};
5424 	struct rte_flow_action actions_v[4] = { { 0 } };
5425 	struct rte_flow_action actions_m[4] = { { 0 } };
5426 	unsigned int idx = 0;
5427 
5428 	rte_memcpy(set_tag_v.src.value, &tag_value, sizeof(tag_value));
5429 	rte_memcpy(set_tag_m.src.value, &tag_mask, sizeof(tag_mask));
5430 	flow_hw_update_action_mask(&actions_v[idx], &actions_m[idx],
5431 				   RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
5432 				   &set_tag_v, &set_tag_m);
5433 	idx++;
5434 	if (MLX5_SH(dev)->config.dv_xmeta_en == MLX5_XMETA_MODE_META32_HWS) {
5435 		flow_hw_update_action_mask(&actions_v[idx], &actions_m[idx],
5436 					   RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
5437 					   &copy_metadata_v, &copy_metadata_m);
5438 		idx++;
5439 	}
5440 	flow_hw_update_action_mask(&actions_v[idx], &actions_m[idx], RTE_FLOW_ACTION_TYPE_JUMP,
5441 				   &jump_v, &jump_m);
5442 	idx++;
5443 	flow_hw_update_action_mask(&actions_v[idx], &actions_m[idx], RTE_FLOW_ACTION_TYPE_END,
5444 				   NULL, NULL);
5445 	idx++;
5446 	MLX5_ASSERT(idx <= RTE_DIM(actions_v));
5447 	return flow_hw_actions_template_create(dev, &attr, actions_v, actions_m, NULL);
5448 }
5449 
5450 static void
5451 flow_hw_cleanup_tx_repr_tagging(struct rte_eth_dev *dev)
5452 {
5453 	struct mlx5_priv *priv = dev->data->dev_private;
5454 
5455 	if (priv->hw_tx_repr_tagging_tbl) {
5456 		flow_hw_table_destroy(dev, priv->hw_tx_repr_tagging_tbl, NULL);
5457 		priv->hw_tx_repr_tagging_tbl = NULL;
5458 	}
5459 	if (priv->hw_tx_repr_tagging_at) {
5460 		flow_hw_actions_template_destroy(dev, priv->hw_tx_repr_tagging_at, NULL);
5461 		priv->hw_tx_repr_tagging_at = NULL;
5462 	}
5463 	if (priv->hw_tx_repr_tagging_pt) {
5464 		flow_hw_pattern_template_destroy(dev, priv->hw_tx_repr_tagging_pt, NULL);
5465 		priv->hw_tx_repr_tagging_pt = NULL;
5466 	}
5467 }
5468 
5469 /**
5470  * Setup templates and table used to create default Tx flow rules. These default rules
5471  * allow for matching Tx representor traffic using a vport tag placed in unused bits of
5472  * REG_C_0 register.
5473  *
5474  * @param dev
5475  *   Pointer to Ethernet device.
5476  *
5477  * @return
5478  *   0 on success, negative errno value otherwise.
5479  */
5480 static int
5481 flow_hw_setup_tx_repr_tagging(struct rte_eth_dev *dev)
5482 {
5483 	struct mlx5_priv *priv = dev->data->dev_private;
5484 	struct rte_flow_template_table_attr attr = {
5485 		.flow_attr = {
5486 			.group = 0,
5487 			.priority = MLX5_HW_LOWEST_PRIO_ROOT,
5488 			.egress = 1,
5489 		},
5490 		.nb_flows = MLX5_HW_CTRL_FLOW_NB_RULES,
5491 	};
5492 	struct mlx5_flow_template_table_cfg cfg = {
5493 		.attr = attr,
5494 		.external = false,
5495 	};
5496 
5497 	MLX5_ASSERT(priv->sh->config.dv_esw_en);
5498 	MLX5_ASSERT(priv->sh->config.repr_matching);
5499 	priv->hw_tx_repr_tagging_pt = flow_hw_create_tx_repr_sq_pattern_tmpl(dev);
5500 	if (!priv->hw_tx_repr_tagging_pt)
5501 		goto error;
5502 	priv->hw_tx_repr_tagging_at = flow_hw_create_tx_repr_tag_jump_acts_tmpl(dev);
5503 	if (!priv->hw_tx_repr_tagging_at)
5504 		goto error;
5505 	priv->hw_tx_repr_tagging_tbl = flow_hw_table_create(dev, &cfg,
5506 							    &priv->hw_tx_repr_tagging_pt, 1,
5507 							    &priv->hw_tx_repr_tagging_at, 1,
5508 							    NULL);
5509 	if (!priv->hw_tx_repr_tagging_tbl)
5510 		goto error;
5511 	return 0;
5512 error:
5513 	flow_hw_cleanup_tx_repr_tagging(dev);
5514 	return -rte_errno;
5515 }
5516 
5517 static uint32_t
5518 flow_hw_esw_mgr_regc_marker_mask(struct rte_eth_dev *dev)
5519 {
5520 	uint32_t mask = MLX5_SH(dev)->dv_regc0_mask;
5521 
5522 	/* Mask is verified during device initialization. */
5523 	MLX5_ASSERT(mask != 0);
5524 	return mask;
5525 }
5526 
5527 static uint32_t
5528 flow_hw_esw_mgr_regc_marker(struct rte_eth_dev *dev)
5529 {
5530 	uint32_t mask = MLX5_SH(dev)->dv_regc0_mask;
5531 
5532 	/* Mask is verified during device initialization. */
5533 	MLX5_ASSERT(mask != 0);
5534 	return RTE_BIT32(rte_bsf32(mask));
5535 }
5536 
5537 /**
5538  * Creates a flow pattern template used to match on E-Switch Manager.
5539  * This template is used to set up a table for SQ miss default flow.
5540  *
5541  * @param dev
5542  *   Pointer to Ethernet device.
5543  *
5544  * @return
5545  *   Pointer to flow pattern template on success, NULL otherwise.
5546  */
5547 static struct rte_flow_pattern_template *
5548 flow_hw_create_ctrl_esw_mgr_pattern_template(struct rte_eth_dev *dev)
5549 {
5550 	struct rte_flow_pattern_template_attr attr = {
5551 		.relaxed_matching = 0,
5552 		.transfer = 1,
5553 	};
5554 	struct rte_flow_item_ethdev port_spec = {
5555 		.port_id = MLX5_REPRESENTED_PORT_ESW_MGR,
5556 	};
5557 	struct rte_flow_item_ethdev port_mask = {
5558 		.port_id = UINT16_MAX,
5559 	};
5560 	struct mlx5_rte_flow_item_sq sq_mask = {
5561 		.queue = UINT32_MAX,
5562 	};
5563 	struct rte_flow_item items[] = {
5564 		{
5565 			.type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,
5566 			.spec = &port_spec,
5567 			.mask = &port_mask,
5568 		},
5569 		{
5570 			.type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_SQ,
5571 			.mask = &sq_mask,
5572 		},
5573 		{
5574 			.type = RTE_FLOW_ITEM_TYPE_END,
5575 		},
5576 	};
5577 
5578 	return flow_hw_pattern_template_create(dev, &attr, items, NULL);
5579 }
5580 
5581 /**
5582  * Creates a flow pattern template used to match REG_C_0 and a SQ.
5583  * Matching on REG_C_0 is set up to match on all bits usable by user-space.
5584  * If traffic was sent from E-Switch Manager, then all usable bits will be set to 0,
5585  * except the least significant bit, which will be set to 1.
5586  *
5587  * This template is used to set up a table for SQ miss default flow.
5588  *
5589  * @param dev
5590  *   Pointer to Ethernet device.
5591  *
5592  * @return
5593  *   Pointer to flow pattern template on success, NULL otherwise.
5594  */
5595 static struct rte_flow_pattern_template *
5596 flow_hw_create_ctrl_regc_sq_pattern_template(struct rte_eth_dev *dev)
5597 {
5598 	struct rte_flow_pattern_template_attr attr = {
5599 		.relaxed_matching = 0,
5600 		.transfer = 1,
5601 	};
5602 	struct rte_flow_item_tag reg_c0_spec = {
5603 		.index = (uint8_t)REG_C_0,
5604 	};
5605 	struct rte_flow_item_tag reg_c0_mask = {
5606 		.index = 0xff,
5607 		.data = flow_hw_esw_mgr_regc_marker_mask(dev),
5608 	};
5609 	struct mlx5_rte_flow_item_sq queue_mask = {
5610 		.queue = UINT32_MAX,
5611 	};
5612 	struct rte_flow_item items[] = {
5613 		{
5614 			.type = (enum rte_flow_item_type)
5615 				MLX5_RTE_FLOW_ITEM_TYPE_TAG,
5616 			.spec = &reg_c0_spec,
5617 			.mask = &reg_c0_mask,
5618 		},
5619 		{
5620 			.type = (enum rte_flow_item_type)
5621 				MLX5_RTE_FLOW_ITEM_TYPE_SQ,
5622 			.mask = &queue_mask,
5623 		},
5624 		{
5625 			.type = RTE_FLOW_ITEM_TYPE_END,
5626 		},
5627 	};
5628 
5629 	return flow_hw_pattern_template_create(dev, &attr, items, NULL);
5630 }
5631 
5632 /**
5633  * Creates a flow pattern template with unmasked represented port matching.
5634  * This template is used to set up a table for default transfer flows
5635  * directing packets to group 1.
5636  *
5637  * @param dev
5638  *   Pointer to Ethernet device.
5639  *
5640  * @return
5641  *   Pointer to flow pattern template on success, NULL otherwise.
5642  */
5643 static struct rte_flow_pattern_template *
5644 flow_hw_create_ctrl_port_pattern_template(struct rte_eth_dev *dev)
5645 {
5646 	struct rte_flow_pattern_template_attr attr = {
5647 		.relaxed_matching = 0,
5648 		.transfer = 1,
5649 	};
5650 	struct rte_flow_item_ethdev port_mask = {
5651 		.port_id = UINT16_MAX,
5652 	};
5653 	struct rte_flow_item items[] = {
5654 		{
5655 			.type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,
5656 			.mask = &port_mask,
5657 		},
5658 		{
5659 			.type = RTE_FLOW_ITEM_TYPE_END,
5660 		},
5661 	};
5662 
5663 	return flow_hw_pattern_template_create(dev, &attr, items, NULL);
5664 }
5665 
5666 /*
5667  * Creating a flow pattern template with all ETH packets matching.
5668  * This template is used to set up a table for default Tx copy (Tx metadata
5669  * to REG_C_1) flow rule usage.
5670  *
5671  * @param dev
5672  *   Pointer to Ethernet device.
5673  *
5674  * @return
5675  *   Pointer to flow pattern template on success, NULL otherwise.
5676  */
5677 static struct rte_flow_pattern_template *
5678 flow_hw_create_tx_default_mreg_copy_pattern_template(struct rte_eth_dev *dev)
5679 {
5680 	struct rte_flow_pattern_template_attr tx_pa_attr = {
5681 		.relaxed_matching = 0,
5682 		.egress = 1,
5683 	};
5684 	struct rte_flow_item_eth promisc = {
5685 		.hdr.dst_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
5686 		.hdr.src_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
5687 		.hdr.ether_type = 0,
5688 	};
5689 	struct rte_flow_item eth_all[] = {
5690 		[0] = {
5691 			.type = RTE_FLOW_ITEM_TYPE_ETH,
5692 			.spec = &promisc,
5693 			.mask = &promisc,
5694 		},
5695 		[1] = {
5696 			.type = RTE_FLOW_ITEM_TYPE_END,
5697 		},
5698 	};
5699 	struct rte_flow_error drop_err;
5700 
5701 	RTE_SET_USED(drop_err);
5702 	return flow_hw_pattern_template_create(dev, &tx_pa_attr, eth_all, &drop_err);
5703 }
5704 
5705 /**
5706  * Creates a flow actions template with modify field action and masked jump action.
5707  * Modify field action sets the least significant bit of REG_C_0 (usable by user-space)
5708  * to 1, meaning that packet was originated from E-Switch Manager. Jump action
5709  * transfers steering to group 1.
5710  *
5711  * @param dev
5712  *   Pointer to Ethernet device.
5713  *
5714  * @return
5715  *   Pointer to flow actions template on success, NULL otherwise.
5716  */
5717 static struct rte_flow_actions_template *
5718 flow_hw_create_ctrl_regc_jump_actions_template(struct rte_eth_dev *dev)
5719 {
5720 	uint32_t marker_mask = flow_hw_esw_mgr_regc_marker_mask(dev);
5721 	uint32_t marker_bits = flow_hw_esw_mgr_regc_marker(dev);
5722 	struct rte_flow_actions_template_attr attr = {
5723 		.transfer = 1,
5724 	};
5725 	struct rte_flow_action_modify_field set_reg_v = {
5726 		.operation = RTE_FLOW_MODIFY_SET,
5727 		.dst = {
5728 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
5729 			.level = REG_C_0,
5730 		},
5731 		.src = {
5732 			.field = RTE_FLOW_FIELD_VALUE,
5733 		},
5734 		.width = __builtin_popcount(marker_mask),
5735 	};
5736 	struct rte_flow_action_modify_field set_reg_m = {
5737 		.operation = RTE_FLOW_MODIFY_SET,
5738 		.dst = {
5739 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
5740 			.level = UINT32_MAX,
5741 			.offset = UINT32_MAX,
5742 		},
5743 		.src = {
5744 			.field = RTE_FLOW_FIELD_VALUE,
5745 		},
5746 		.width = UINT32_MAX,
5747 	};
5748 	struct rte_flow_action_jump jump_v = {
5749 		.group = MLX5_HW_LOWEST_USABLE_GROUP,
5750 	};
5751 	struct rte_flow_action_jump jump_m = {
5752 		.group = UINT32_MAX,
5753 	};
5754 	struct rte_flow_action actions_v[] = {
5755 		{
5756 			.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
5757 			.conf = &set_reg_v,
5758 		},
5759 		{
5760 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
5761 			.conf = &jump_v,
5762 		},
5763 		{
5764 			.type = RTE_FLOW_ACTION_TYPE_END,
5765 		}
5766 	};
5767 	struct rte_flow_action actions_m[] = {
5768 		{
5769 			.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
5770 			.conf = &set_reg_m,
5771 		},
5772 		{
5773 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
5774 			.conf = &jump_m,
5775 		},
5776 		{
5777 			.type = RTE_FLOW_ACTION_TYPE_END,
5778 		}
5779 	};
5780 
5781 	set_reg_v.dst.offset = rte_bsf32(marker_mask);
5782 	rte_memcpy(set_reg_v.src.value, &marker_bits, sizeof(marker_bits));
5783 	rte_memcpy(set_reg_m.src.value, &marker_mask, sizeof(marker_mask));
5784 	return flow_hw_actions_template_create(dev, &attr, actions_v, actions_m, NULL);
5785 }
5786 
5787 /**
5788  * Creates a flow actions template with an unmasked JUMP action. Flows
5789  * based on this template will perform a jump to some group. This template
5790  * is used to set up tables for control flows.
5791  *
5792  * @param dev
5793  *   Pointer to Ethernet device.
5794  * @param group
5795  *   Destination group for this action template.
5796  *
5797  * @return
5798  *   Pointer to flow actions template on success, NULL otherwise.
5799  */
5800 static struct rte_flow_actions_template *
5801 flow_hw_create_ctrl_jump_actions_template(struct rte_eth_dev *dev,
5802 					  uint32_t group)
5803 {
5804 	struct rte_flow_actions_template_attr attr = {
5805 		.transfer = 1,
5806 	};
5807 	struct rte_flow_action_jump jump_v = {
5808 		.group = group,
5809 	};
5810 	struct rte_flow_action_jump jump_m = {
5811 		.group = UINT32_MAX,
5812 	};
5813 	struct rte_flow_action actions_v[] = {
5814 		{
5815 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
5816 			.conf = &jump_v,
5817 		},
5818 		{
5819 			.type = RTE_FLOW_ACTION_TYPE_END,
5820 		}
5821 	};
5822 	struct rte_flow_action actions_m[] = {
5823 		{
5824 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
5825 			.conf = &jump_m,
5826 		},
5827 		{
5828 			.type = RTE_FLOW_ACTION_TYPE_END,
5829 		}
5830 	};
5831 
5832 	return flow_hw_actions_template_create(dev, &attr, actions_v, actions_m,
5833 					       NULL);
5834 }
5835 
5836 /**
5837  * Creates a flow action template with a unmasked REPRESENTED_PORT action.
5838  * It is used to create control flow tables.
5839  *
5840  * @param dev
5841  *   Pointer to Ethernet device.
5842  *
5843  * @return
5844  *   Pointer to flow action template on success, NULL otherwise.
5845  */
5846 static struct rte_flow_actions_template *
5847 flow_hw_create_ctrl_port_actions_template(struct rte_eth_dev *dev)
5848 {
5849 	struct rte_flow_actions_template_attr attr = {
5850 		.transfer = 1,
5851 	};
5852 	struct rte_flow_action_ethdev port_v = {
5853 		.port_id = 0,
5854 	};
5855 	struct rte_flow_action actions_v[] = {
5856 		{
5857 			.type = RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT,
5858 			.conf = &port_v,
5859 		},
5860 		{
5861 			.type = RTE_FLOW_ACTION_TYPE_END,
5862 		}
5863 	};
5864 	struct rte_flow_action_ethdev port_m = {
5865 		.port_id = 0,
5866 	};
5867 	struct rte_flow_action actions_m[] = {
5868 		{
5869 			.type = RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT,
5870 			.conf = &port_m,
5871 		},
5872 		{
5873 			.type = RTE_FLOW_ACTION_TYPE_END,
5874 		}
5875 	};
5876 
5877 	return flow_hw_actions_template_create(dev, &attr, actions_v, actions_m,
5878 					       NULL);
5879 }
5880 
5881 /*
5882  * Creating an actions template to use header modify action for register
5883  * copying. This template is used to set up a table for copy flow.
5884  *
5885  * @param dev
5886  *   Pointer to Ethernet device.
5887  *
5888  * @return
5889  *   Pointer to flow actions template on success, NULL otherwise.
5890  */
5891 static struct rte_flow_actions_template *
5892 flow_hw_create_tx_default_mreg_copy_actions_template(struct rte_eth_dev *dev)
5893 {
5894 	struct rte_flow_actions_template_attr tx_act_attr = {
5895 		.egress = 1,
5896 	};
5897 	const struct rte_flow_action_modify_field mreg_action = {
5898 		.operation = RTE_FLOW_MODIFY_SET,
5899 		.dst = {
5900 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
5901 			.level = REG_C_1,
5902 		},
5903 		.src = {
5904 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
5905 			.level = REG_A,
5906 		},
5907 		.width = 32,
5908 	};
5909 	const struct rte_flow_action_modify_field mreg_mask = {
5910 		.operation = RTE_FLOW_MODIFY_SET,
5911 		.dst = {
5912 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
5913 			.level = UINT32_MAX,
5914 			.offset = UINT32_MAX,
5915 		},
5916 		.src = {
5917 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
5918 			.level = UINT32_MAX,
5919 			.offset = UINT32_MAX,
5920 		},
5921 		.width = UINT32_MAX,
5922 	};
5923 	const struct rte_flow_action_jump jump_action = {
5924 		.group = 1,
5925 	};
5926 	const struct rte_flow_action_jump jump_mask = {
5927 		.group = UINT32_MAX,
5928 	};
5929 	const struct rte_flow_action actions[] = {
5930 		[0] = {
5931 			.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
5932 			.conf = &mreg_action,
5933 		},
5934 		[1] = {
5935 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
5936 			.conf = &jump_action,
5937 		},
5938 		[2] = {
5939 			.type = RTE_FLOW_ACTION_TYPE_END,
5940 		},
5941 	};
5942 	const struct rte_flow_action masks[] = {
5943 		[0] = {
5944 			.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
5945 			.conf = &mreg_mask,
5946 		},
5947 		[1] = {
5948 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
5949 			.conf = &jump_mask,
5950 		},
5951 		[2] = {
5952 			.type = RTE_FLOW_ACTION_TYPE_END,
5953 		},
5954 	};
5955 	struct rte_flow_error drop_err;
5956 
5957 	RTE_SET_USED(drop_err);
5958 	return flow_hw_actions_template_create(dev, &tx_act_attr, actions,
5959 					       masks, &drop_err);
5960 }
5961 
5962 /**
5963  * Creates a control flow table used to transfer traffic from E-Switch Manager
5964  * and TX queues from group 0 to group 1.
5965  *
5966  * @param dev
5967  *   Pointer to Ethernet device.
5968  * @param it
5969  *   Pointer to flow pattern template.
5970  * @param at
5971  *   Pointer to flow actions template.
5972  *
5973  * @return
5974  *   Pointer to flow table on success, NULL otherwise.
5975  */
5976 static struct rte_flow_template_table*
5977 flow_hw_create_ctrl_sq_miss_root_table(struct rte_eth_dev *dev,
5978 				       struct rte_flow_pattern_template *it,
5979 				       struct rte_flow_actions_template *at)
5980 {
5981 	struct rte_flow_template_table_attr attr = {
5982 		.flow_attr = {
5983 			.group = 0,
5984 			.priority = MLX5_HW_LOWEST_PRIO_ROOT,
5985 			.ingress = 0,
5986 			.egress = 0,
5987 			.transfer = 1,
5988 		},
5989 		.nb_flows = MLX5_HW_CTRL_FLOW_NB_RULES,
5990 	};
5991 	struct mlx5_flow_template_table_cfg cfg = {
5992 		.attr = attr,
5993 		.external = false,
5994 	};
5995 
5996 	return flow_hw_table_create(dev, &cfg, &it, 1, &at, 1, NULL);
5997 }
5998 
5999 
6000 /**
6001  * Creates a control flow table used to transfer traffic from E-Switch Manager
6002  * and TX queues from group 0 to group 1.
6003  *
6004  * @param dev
6005  *   Pointer to Ethernet device.
6006  * @param it
6007  *   Pointer to flow pattern template.
6008  * @param at
6009  *   Pointer to flow actions template.
6010  *
6011  * @return
6012  *   Pointer to flow table on success, NULL otherwise.
6013  */
6014 static struct rte_flow_template_table*
6015 flow_hw_create_ctrl_sq_miss_table(struct rte_eth_dev *dev,
6016 				  struct rte_flow_pattern_template *it,
6017 				  struct rte_flow_actions_template *at)
6018 {
6019 	struct rte_flow_template_table_attr attr = {
6020 		.flow_attr = {
6021 			.group = 1,
6022 			.priority = MLX5_HW_LOWEST_PRIO_NON_ROOT,
6023 			.ingress = 0,
6024 			.egress = 0,
6025 			.transfer = 1,
6026 		},
6027 		.nb_flows = MLX5_HW_CTRL_FLOW_NB_RULES,
6028 	};
6029 	struct mlx5_flow_template_table_cfg cfg = {
6030 		.attr = attr,
6031 		.external = false,
6032 	};
6033 
6034 	return flow_hw_table_create(dev, &cfg, &it, 1, &at, 1, NULL);
6035 }
6036 
6037 /*
6038  * Creating the default Tx metadata copy table on NIC Tx group 0.
6039  *
6040  * @param dev
6041  *   Pointer to Ethernet device.
6042  * @param pt
6043  *   Pointer to flow pattern template.
6044  * @param at
6045  *   Pointer to flow actions template.
6046  *
6047  * @return
6048  *   Pointer to flow table on success, NULL otherwise.
6049  */
6050 static struct rte_flow_template_table*
6051 flow_hw_create_tx_default_mreg_copy_table(struct rte_eth_dev *dev,
6052 					  struct rte_flow_pattern_template *pt,
6053 					  struct rte_flow_actions_template *at)
6054 {
6055 	struct rte_flow_template_table_attr tx_tbl_attr = {
6056 		.flow_attr = {
6057 			.group = 0, /* Root */
6058 			.priority = MLX5_HW_LOWEST_PRIO_ROOT,
6059 			.egress = 1,
6060 		},
6061 		.nb_flows = 1, /* One default flow rule for all. */
6062 	};
6063 	struct mlx5_flow_template_table_cfg tx_tbl_cfg = {
6064 		.attr = tx_tbl_attr,
6065 		.external = false,
6066 	};
6067 	struct rte_flow_error drop_err = {
6068 		.type = RTE_FLOW_ERROR_TYPE_NONE,
6069 		.cause = NULL,
6070 		.message = NULL,
6071 	};
6072 
6073 	RTE_SET_USED(drop_err);
6074 	return flow_hw_table_create(dev, &tx_tbl_cfg, &pt, 1, &at, 1, &drop_err);
6075 }
6076 
6077 /**
6078  * Creates a control flow table used to transfer traffic
6079  * from group 0 to group 1.
6080  *
6081  * @param dev
6082  *   Pointer to Ethernet device.
6083  * @param it
6084  *   Pointer to flow pattern template.
6085  * @param at
6086  *   Pointer to flow actions template.
6087  *
6088  * @return
6089  *   Pointer to flow table on success, NULL otherwise.
6090  */
6091 static struct rte_flow_template_table *
6092 flow_hw_create_ctrl_jump_table(struct rte_eth_dev *dev,
6093 			       struct rte_flow_pattern_template *it,
6094 			       struct rte_flow_actions_template *at)
6095 {
6096 	struct rte_flow_template_table_attr attr = {
6097 		.flow_attr = {
6098 			.group = 0,
6099 			.priority = 0,
6100 			.ingress = 0,
6101 			.egress = 0,
6102 			.transfer = 1,
6103 		},
6104 		.nb_flows = MLX5_HW_CTRL_FLOW_NB_RULES,
6105 	};
6106 	struct mlx5_flow_template_table_cfg cfg = {
6107 		.attr = attr,
6108 		.external = false,
6109 	};
6110 
6111 	return flow_hw_table_create(dev, &cfg, &it, 1, &at, 1, NULL);
6112 }
6113 
6114 /**
6115  * Creates a set of flow tables used to create control flows used
6116  * when E-Switch is engaged.
6117  *
6118  * @param dev
6119  *   Pointer to Ethernet device.
6120  *
6121  * @return
6122  *   0 on success, EINVAL otherwise
6123  */
6124 static __rte_unused int
6125 flow_hw_create_ctrl_tables(struct rte_eth_dev *dev)
6126 {
6127 	struct mlx5_priv *priv = dev->data->dev_private;
6128 	struct rte_flow_pattern_template *esw_mgr_items_tmpl = NULL;
6129 	struct rte_flow_pattern_template *regc_sq_items_tmpl = NULL;
6130 	struct rte_flow_pattern_template *port_items_tmpl = NULL;
6131 	struct rte_flow_pattern_template *tx_meta_items_tmpl = NULL;
6132 	struct rte_flow_actions_template *regc_jump_actions_tmpl = NULL;
6133 	struct rte_flow_actions_template *port_actions_tmpl = NULL;
6134 	struct rte_flow_actions_template *jump_one_actions_tmpl = NULL;
6135 	struct rte_flow_actions_template *tx_meta_actions_tmpl = NULL;
6136 	uint32_t xmeta = priv->sh->config.dv_xmeta_en;
6137 	uint32_t repr_matching = priv->sh->config.repr_matching;
6138 
6139 	/* Create templates and table for default SQ miss flow rules - root table. */
6140 	esw_mgr_items_tmpl = flow_hw_create_ctrl_esw_mgr_pattern_template(dev);
6141 	if (!esw_mgr_items_tmpl) {
6142 		DRV_LOG(ERR, "port %u failed to create E-Switch Manager item"
6143 			" template for control flows", dev->data->port_id);
6144 		goto error;
6145 	}
6146 	regc_jump_actions_tmpl = flow_hw_create_ctrl_regc_jump_actions_template(dev);
6147 	if (!regc_jump_actions_tmpl) {
6148 		DRV_LOG(ERR, "port %u failed to create REG_C set and jump action template"
6149 			" for control flows", dev->data->port_id);
6150 		goto error;
6151 	}
6152 	MLX5_ASSERT(priv->hw_esw_sq_miss_root_tbl == NULL);
6153 	priv->hw_esw_sq_miss_root_tbl = flow_hw_create_ctrl_sq_miss_root_table
6154 			(dev, esw_mgr_items_tmpl, regc_jump_actions_tmpl);
6155 	if (!priv->hw_esw_sq_miss_root_tbl) {
6156 		DRV_LOG(ERR, "port %u failed to create table for default sq miss (root table)"
6157 			" for control flows", dev->data->port_id);
6158 		goto error;
6159 	}
6160 	/* Create templates and table for default SQ miss flow rules - non-root table. */
6161 	regc_sq_items_tmpl = flow_hw_create_ctrl_regc_sq_pattern_template(dev);
6162 	if (!regc_sq_items_tmpl) {
6163 		DRV_LOG(ERR, "port %u failed to create SQ item template for"
6164 			" control flows", dev->data->port_id);
6165 		goto error;
6166 	}
6167 	port_actions_tmpl = flow_hw_create_ctrl_port_actions_template(dev);
6168 	if (!port_actions_tmpl) {
6169 		DRV_LOG(ERR, "port %u failed to create port action template"
6170 			" for control flows", dev->data->port_id);
6171 		goto error;
6172 	}
6173 	MLX5_ASSERT(priv->hw_esw_sq_miss_tbl == NULL);
6174 	priv->hw_esw_sq_miss_tbl = flow_hw_create_ctrl_sq_miss_table(dev, regc_sq_items_tmpl,
6175 								     port_actions_tmpl);
6176 	if (!priv->hw_esw_sq_miss_tbl) {
6177 		DRV_LOG(ERR, "port %u failed to create table for default sq miss (non-root table)"
6178 			" for control flows", dev->data->port_id);
6179 		goto error;
6180 	}
6181 	/* Create templates and table for default FDB jump flow rules. */
6182 	port_items_tmpl = flow_hw_create_ctrl_port_pattern_template(dev);
6183 	if (!port_items_tmpl) {
6184 		DRV_LOG(ERR, "port %u failed to create SQ item template for"
6185 			" control flows", dev->data->port_id);
6186 		goto error;
6187 	}
6188 	jump_one_actions_tmpl = flow_hw_create_ctrl_jump_actions_template
6189 			(dev, MLX5_HW_LOWEST_USABLE_GROUP);
6190 	if (!jump_one_actions_tmpl) {
6191 		DRV_LOG(ERR, "port %u failed to create jump action template"
6192 			" for control flows", dev->data->port_id);
6193 		goto error;
6194 	}
6195 	MLX5_ASSERT(priv->hw_esw_zero_tbl == NULL);
6196 	priv->hw_esw_zero_tbl = flow_hw_create_ctrl_jump_table(dev, port_items_tmpl,
6197 							       jump_one_actions_tmpl);
6198 	if (!priv->hw_esw_zero_tbl) {
6199 		DRV_LOG(ERR, "port %u failed to create table for default jump to group 1"
6200 			" for control flows", dev->data->port_id);
6201 		goto error;
6202 	}
6203 	/* Create templates and table for default Tx metadata copy flow rule. */
6204 	if (!repr_matching && xmeta == MLX5_XMETA_MODE_META32_HWS) {
6205 		tx_meta_items_tmpl = flow_hw_create_tx_default_mreg_copy_pattern_template(dev);
6206 		if (!tx_meta_items_tmpl) {
6207 			DRV_LOG(ERR, "port %u failed to Tx metadata copy pattern"
6208 				" template for control flows", dev->data->port_id);
6209 			goto error;
6210 		}
6211 		tx_meta_actions_tmpl = flow_hw_create_tx_default_mreg_copy_actions_template(dev);
6212 		if (!tx_meta_actions_tmpl) {
6213 			DRV_LOG(ERR, "port %u failed to Tx metadata copy actions"
6214 				" template for control flows", dev->data->port_id);
6215 			goto error;
6216 		}
6217 		MLX5_ASSERT(priv->hw_tx_meta_cpy_tbl == NULL);
6218 		priv->hw_tx_meta_cpy_tbl = flow_hw_create_tx_default_mreg_copy_table(dev,
6219 					tx_meta_items_tmpl, tx_meta_actions_tmpl);
6220 		if (!priv->hw_tx_meta_cpy_tbl) {
6221 			DRV_LOG(ERR, "port %u failed to create table for default"
6222 				" Tx metadata copy flow rule", dev->data->port_id);
6223 			goto error;
6224 		}
6225 	}
6226 	return 0;
6227 error:
6228 	if (priv->hw_esw_zero_tbl) {
6229 		flow_hw_table_destroy(dev, priv->hw_esw_zero_tbl, NULL);
6230 		priv->hw_esw_zero_tbl = NULL;
6231 	}
6232 	if (priv->hw_esw_sq_miss_tbl) {
6233 		flow_hw_table_destroy(dev, priv->hw_esw_sq_miss_tbl, NULL);
6234 		priv->hw_esw_sq_miss_tbl = NULL;
6235 	}
6236 	if (priv->hw_esw_sq_miss_root_tbl) {
6237 		flow_hw_table_destroy(dev, priv->hw_esw_sq_miss_root_tbl, NULL);
6238 		priv->hw_esw_sq_miss_root_tbl = NULL;
6239 	}
6240 	if (tx_meta_actions_tmpl)
6241 		flow_hw_actions_template_destroy(dev, tx_meta_actions_tmpl, NULL);
6242 	if (jump_one_actions_tmpl)
6243 		flow_hw_actions_template_destroy(dev, jump_one_actions_tmpl, NULL);
6244 	if (port_actions_tmpl)
6245 		flow_hw_actions_template_destroy(dev, port_actions_tmpl, NULL);
6246 	if (regc_jump_actions_tmpl)
6247 		flow_hw_actions_template_destroy(dev, regc_jump_actions_tmpl, NULL);
6248 	if (tx_meta_items_tmpl)
6249 		flow_hw_pattern_template_destroy(dev, tx_meta_items_tmpl, NULL);
6250 	if (port_items_tmpl)
6251 		flow_hw_pattern_template_destroy(dev, port_items_tmpl, NULL);
6252 	if (regc_sq_items_tmpl)
6253 		flow_hw_pattern_template_destroy(dev, regc_sq_items_tmpl, NULL);
6254 	if (esw_mgr_items_tmpl)
6255 		flow_hw_pattern_template_destroy(dev, esw_mgr_items_tmpl, NULL);
6256 	return -EINVAL;
6257 }
6258 
6259 static void
6260 flow_hw_ct_mng_destroy(struct rte_eth_dev *dev,
6261 		       struct mlx5_aso_ct_pools_mng *ct_mng)
6262 {
6263 	struct mlx5_priv *priv = dev->data->dev_private;
6264 
6265 	mlx5_aso_ct_queue_uninit(priv->sh, ct_mng);
6266 	mlx5_free(ct_mng);
6267 }
6268 
6269 static void
6270 flow_hw_ct_pool_destroy(struct rte_eth_dev *dev __rte_unused,
6271 			struct mlx5_aso_ct_pool *pool)
6272 {
6273 	if (pool->dr_action)
6274 		mlx5dr_action_destroy(pool->dr_action);
6275 	if (pool->devx_obj)
6276 		claim_zero(mlx5_devx_cmd_destroy(pool->devx_obj));
6277 	if (pool->cts)
6278 		mlx5_ipool_destroy(pool->cts);
6279 	mlx5_free(pool);
6280 }
6281 
6282 static struct mlx5_aso_ct_pool *
6283 flow_hw_ct_pool_create(struct rte_eth_dev *dev,
6284 		       const struct rte_flow_port_attr *port_attr)
6285 {
6286 	struct mlx5_priv *priv = dev->data->dev_private;
6287 	struct mlx5_aso_ct_pool *pool;
6288 	struct mlx5_devx_obj *obj;
6289 	uint32_t nb_cts = rte_align32pow2(port_attr->nb_conn_tracks);
6290 	uint32_t log_obj_size = rte_log2_u32(nb_cts);
6291 	struct mlx5_indexed_pool_config cfg = {
6292 		.size = sizeof(struct mlx5_aso_ct_action),
6293 		.trunk_size = 1 << 12,
6294 		.per_core_cache = 1 << 13,
6295 		.need_lock = 1,
6296 		.release_mem_en = !!priv->sh->config.reclaim_mode,
6297 		.malloc = mlx5_malloc,
6298 		.free = mlx5_free,
6299 		.type = "mlx5_hw_ct_action",
6300 	};
6301 	int reg_id;
6302 	uint32_t flags;
6303 
6304 	pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
6305 	if (!pool) {
6306 		rte_errno = ENOMEM;
6307 		return NULL;
6308 	}
6309 	obj = mlx5_devx_cmd_create_conn_track_offload_obj(priv->sh->cdev->ctx,
6310 							  priv->sh->cdev->pdn,
6311 							  log_obj_size);
6312 	if (!obj) {
6313 		rte_errno = ENODATA;
6314 		DRV_LOG(ERR, "Failed to create conn_track_offload_obj using DevX.");
6315 		goto err;
6316 	}
6317 	pool->devx_obj = obj;
6318 	reg_id = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, NULL);
6319 	flags = MLX5DR_ACTION_FLAG_HWS_RX | MLX5DR_ACTION_FLAG_HWS_TX;
6320 	if (priv->sh->config.dv_esw_en && priv->master)
6321 		flags |= MLX5DR_ACTION_FLAG_HWS_FDB;
6322 	pool->dr_action = mlx5dr_action_create_aso_ct(priv->dr_ctx,
6323 						      (struct mlx5dr_devx_obj *)obj,
6324 						      reg_id - REG_C_0, flags);
6325 	if (!pool->dr_action)
6326 		goto err;
6327 	/*
6328 	 * No need for local cache if CT number is a small number. Since
6329 	 * flow insertion rate will be very limited in that case. Here let's
6330 	 * set the number to less than default trunk size 4K.
6331 	 */
6332 	if (nb_cts <= cfg.trunk_size) {
6333 		cfg.per_core_cache = 0;
6334 		cfg.trunk_size = nb_cts;
6335 	} else if (nb_cts <= MLX5_HW_IPOOL_SIZE_THRESHOLD) {
6336 		cfg.per_core_cache = MLX5_HW_IPOOL_CACHE_MIN;
6337 	}
6338 	pool->cts = mlx5_ipool_create(&cfg);
6339 	if (!pool->cts)
6340 		goto err;
6341 	pool->sq = priv->ct_mng->aso_sqs;
6342 	/* Assign the last extra ASO SQ as public SQ. */
6343 	pool->shared_sq = &priv->ct_mng->aso_sqs[priv->nb_queue - 1];
6344 	return pool;
6345 err:
6346 	flow_hw_ct_pool_destroy(dev, pool);
6347 	return NULL;
6348 }
6349 
6350 static void
6351 flow_hw_destroy_vlan(struct rte_eth_dev *dev)
6352 {
6353 	struct mlx5_priv *priv = dev->data->dev_private;
6354 	enum mlx5dr_table_type i;
6355 
6356 	for (i = MLX5DR_TABLE_TYPE_NIC_RX; i < MLX5DR_TABLE_TYPE_MAX; i++) {
6357 		if (priv->hw_pop_vlan[i]) {
6358 			mlx5dr_action_destroy(priv->hw_pop_vlan[i]);
6359 			priv->hw_pop_vlan[i] = NULL;
6360 		}
6361 		if (priv->hw_push_vlan[i]) {
6362 			mlx5dr_action_destroy(priv->hw_push_vlan[i]);
6363 			priv->hw_push_vlan[i] = NULL;
6364 		}
6365 	}
6366 }
6367 
6368 static int
6369 flow_hw_create_vlan(struct rte_eth_dev *dev)
6370 {
6371 	struct mlx5_priv *priv = dev->data->dev_private;
6372 	enum mlx5dr_table_type i;
6373 	const enum mlx5dr_action_flags flags[MLX5DR_TABLE_TYPE_MAX] = {
6374 		MLX5DR_ACTION_FLAG_HWS_RX,
6375 		MLX5DR_ACTION_FLAG_HWS_TX,
6376 		MLX5DR_ACTION_FLAG_HWS_FDB
6377 	};
6378 
6379 	for (i = MLX5DR_TABLE_TYPE_NIC_RX; i <= MLX5DR_TABLE_TYPE_NIC_TX; i++) {
6380 		priv->hw_pop_vlan[i] =
6381 			mlx5dr_action_create_pop_vlan(priv->dr_ctx, flags[i]);
6382 		if (!priv->hw_pop_vlan[i])
6383 			return -ENOENT;
6384 		priv->hw_push_vlan[i] =
6385 			mlx5dr_action_create_push_vlan(priv->dr_ctx, flags[i]);
6386 		if (!priv->hw_pop_vlan[i])
6387 			return -ENOENT;
6388 	}
6389 	if (priv->sh->config.dv_esw_en && priv->master) {
6390 		priv->hw_pop_vlan[MLX5DR_TABLE_TYPE_FDB] =
6391 			mlx5dr_action_create_pop_vlan
6392 				(priv->dr_ctx, MLX5DR_ACTION_FLAG_HWS_FDB);
6393 		if (!priv->hw_pop_vlan[MLX5DR_TABLE_TYPE_FDB])
6394 			return -ENOENT;
6395 		priv->hw_push_vlan[MLX5DR_TABLE_TYPE_FDB] =
6396 			mlx5dr_action_create_push_vlan
6397 				(priv->dr_ctx, MLX5DR_ACTION_FLAG_HWS_FDB);
6398 		if (!priv->hw_pop_vlan[MLX5DR_TABLE_TYPE_FDB])
6399 			return -ENOENT;
6400 	}
6401 	return 0;
6402 }
6403 
6404 static void
6405 flow_hw_cleanup_ctrl_rx_tables(struct rte_eth_dev *dev)
6406 {
6407 	struct mlx5_priv *priv = dev->data->dev_private;
6408 	unsigned int i;
6409 	unsigned int j;
6410 
6411 	if (!priv->hw_ctrl_rx)
6412 		return;
6413 	for (i = 0; i < MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_MAX; ++i) {
6414 		for (j = 0; j < MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX; ++j) {
6415 			struct rte_flow_template_table *tbl = priv->hw_ctrl_rx->tables[i][j].tbl;
6416 			struct rte_flow_pattern_template *pt = priv->hw_ctrl_rx->tables[i][j].pt;
6417 
6418 			if (tbl)
6419 				claim_zero(flow_hw_table_destroy(dev, tbl, NULL));
6420 			if (pt)
6421 				claim_zero(flow_hw_pattern_template_destroy(dev, pt, NULL));
6422 		}
6423 	}
6424 	for (i = 0; i < MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX; ++i) {
6425 		struct rte_flow_actions_template *at = priv->hw_ctrl_rx->rss[i];
6426 
6427 		if (at)
6428 			claim_zero(flow_hw_actions_template_destroy(dev, at, NULL));
6429 	}
6430 	mlx5_free(priv->hw_ctrl_rx);
6431 	priv->hw_ctrl_rx = NULL;
6432 }
6433 
6434 static uint64_t
6435 flow_hw_ctrl_rx_rss_type_hash_types(const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
6436 {
6437 	switch (rss_type) {
6438 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_NON_IP:
6439 		return 0;
6440 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4:
6441 		return RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | RTE_ETH_RSS_NONFRAG_IPV4_OTHER;
6442 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_UDP:
6443 		return RTE_ETH_RSS_NONFRAG_IPV4_UDP;
6444 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_TCP:
6445 		return RTE_ETH_RSS_NONFRAG_IPV4_TCP;
6446 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6:
6447 		return RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_OTHER;
6448 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_UDP:
6449 		return RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_IPV6_UDP_EX;
6450 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_TCP:
6451 		return RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_IPV6_TCP_EX;
6452 	default:
6453 		/* Should not reach here. */
6454 		MLX5_ASSERT(false);
6455 		return 0;
6456 	}
6457 }
6458 
6459 static struct rte_flow_actions_template *
6460 flow_hw_create_ctrl_rx_rss_template(struct rte_eth_dev *dev,
6461 				    const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
6462 {
6463 	struct mlx5_priv *priv = dev->data->dev_private;
6464 	struct rte_flow_actions_template_attr attr = {
6465 		.ingress = 1,
6466 	};
6467 	uint16_t queue[RTE_MAX_QUEUES_PER_PORT];
6468 	struct rte_flow_action_rss rss_conf = {
6469 		.func = RTE_ETH_HASH_FUNCTION_DEFAULT,
6470 		.level = 0,
6471 		.types = 0,
6472 		.key_len = priv->rss_conf.rss_key_len,
6473 		.key = priv->rss_conf.rss_key,
6474 		.queue_num = priv->reta_idx_n,
6475 		.queue = queue,
6476 	};
6477 	struct rte_flow_action actions[] = {
6478 		{
6479 			.type = RTE_FLOW_ACTION_TYPE_RSS,
6480 			.conf = &rss_conf,
6481 		},
6482 		{
6483 			.type = RTE_FLOW_ACTION_TYPE_END,
6484 		}
6485 	};
6486 	struct rte_flow_action masks[] = {
6487 		{
6488 			.type = RTE_FLOW_ACTION_TYPE_RSS,
6489 			.conf = &rss_conf,
6490 		},
6491 		{
6492 			.type = RTE_FLOW_ACTION_TYPE_END,
6493 		}
6494 	};
6495 	struct rte_flow_actions_template *at;
6496 	struct rte_flow_error error;
6497 	unsigned int i;
6498 
6499 	MLX5_ASSERT(priv->reta_idx_n > 0 && priv->reta_idx);
6500 	/* Select proper RSS hash types and based on that configure the actions template. */
6501 	rss_conf.types = flow_hw_ctrl_rx_rss_type_hash_types(rss_type);
6502 	if (rss_conf.types) {
6503 		for (i = 0; i < priv->reta_idx_n; ++i)
6504 			queue[i] = (*priv->reta_idx)[i];
6505 	} else {
6506 		rss_conf.queue_num = 1;
6507 		queue[0] = (*priv->reta_idx)[0];
6508 	}
6509 	at = flow_hw_actions_template_create(dev, &attr, actions, masks, &error);
6510 	if (!at)
6511 		DRV_LOG(ERR,
6512 			"Failed to create ctrl flow actions template: rte_errno(%d), type(%d): %s",
6513 			rte_errno, error.type,
6514 			error.message ? error.message : "(no stated reason)");
6515 	return at;
6516 }
6517 
6518 static uint32_t ctrl_rx_rss_priority_map[MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX] = {
6519 	[MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_NON_IP] = MLX5_HW_CTRL_RX_PRIO_L2,
6520 	[MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4] = MLX5_HW_CTRL_RX_PRIO_L3,
6521 	[MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_UDP] = MLX5_HW_CTRL_RX_PRIO_L4,
6522 	[MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_TCP] = MLX5_HW_CTRL_RX_PRIO_L4,
6523 	[MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6] = MLX5_HW_CTRL_RX_PRIO_L3,
6524 	[MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_UDP] = MLX5_HW_CTRL_RX_PRIO_L4,
6525 	[MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_TCP] = MLX5_HW_CTRL_RX_PRIO_L4,
6526 };
6527 
6528 static uint32_t ctrl_rx_nb_flows_map[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_MAX] = {
6529 	[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL] = 1,
6530 	[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL_MCAST] = 1,
6531 	[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST] = 1,
6532 	[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST_VLAN] = MLX5_MAX_VLAN_IDS,
6533 	[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST] = 1,
6534 	[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST_VLAN] = MLX5_MAX_VLAN_IDS,
6535 	[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST] = 1,
6536 	[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST_VLAN] = MLX5_MAX_VLAN_IDS,
6537 	[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC] = MLX5_MAX_UC_MAC_ADDRESSES,
6538 	[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN] =
6539 			MLX5_MAX_UC_MAC_ADDRESSES * MLX5_MAX_VLAN_IDS,
6540 };
6541 
6542 static struct rte_flow_template_table_attr
6543 flow_hw_get_ctrl_rx_table_attr(enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type,
6544 			       const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
6545 {
6546 	return (struct rte_flow_template_table_attr){
6547 		.flow_attr = {
6548 			.group = 0,
6549 			.priority = ctrl_rx_rss_priority_map[rss_type],
6550 			.ingress = 1,
6551 		},
6552 		.nb_flows = ctrl_rx_nb_flows_map[eth_pattern_type],
6553 	};
6554 }
6555 
6556 static struct rte_flow_item
6557 flow_hw_get_ctrl_rx_eth_item(const enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type)
6558 {
6559 	struct rte_flow_item item = {
6560 		.type = RTE_FLOW_ITEM_TYPE_ETH,
6561 		.mask = NULL,
6562 	};
6563 
6564 	switch (eth_pattern_type) {
6565 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL:
6566 		item.mask = &ctrl_rx_eth_promisc_mask;
6567 		break;
6568 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL_MCAST:
6569 		item.mask = &ctrl_rx_eth_mcast_mask;
6570 		break;
6571 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST:
6572 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST_VLAN:
6573 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC:
6574 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN:
6575 		item.mask = &ctrl_rx_eth_dmac_mask;
6576 		break;
6577 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST:
6578 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST_VLAN:
6579 		item.mask = &ctrl_rx_eth_ipv4_mcast_mask;
6580 		break;
6581 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST:
6582 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST_VLAN:
6583 		item.mask = &ctrl_rx_eth_ipv6_mcast_mask;
6584 		break;
6585 	default:
6586 		/* Should not reach here - ETH mask must be present. */
6587 		item.type = RTE_FLOW_ITEM_TYPE_END;
6588 		MLX5_ASSERT(false);
6589 		break;
6590 	}
6591 	return item;
6592 }
6593 
6594 static struct rte_flow_item
6595 flow_hw_get_ctrl_rx_vlan_item(const enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type)
6596 {
6597 	struct rte_flow_item item = {
6598 		.type = RTE_FLOW_ITEM_TYPE_VOID,
6599 		.mask = NULL,
6600 	};
6601 
6602 	switch (eth_pattern_type) {
6603 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST_VLAN:
6604 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST_VLAN:
6605 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST_VLAN:
6606 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN:
6607 		item.type = RTE_FLOW_ITEM_TYPE_VLAN;
6608 		item.mask = &rte_flow_item_vlan_mask;
6609 		break;
6610 	default:
6611 		/* Nothing to update. */
6612 		break;
6613 	}
6614 	return item;
6615 }
6616 
6617 static struct rte_flow_item
6618 flow_hw_get_ctrl_rx_l3_item(const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
6619 {
6620 	struct rte_flow_item item = {
6621 		.type = RTE_FLOW_ITEM_TYPE_VOID,
6622 		.mask = NULL,
6623 	};
6624 
6625 	switch (rss_type) {
6626 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4:
6627 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_UDP:
6628 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_TCP:
6629 		item.type = RTE_FLOW_ITEM_TYPE_IPV4;
6630 		break;
6631 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6:
6632 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_UDP:
6633 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_TCP:
6634 		item.type = RTE_FLOW_ITEM_TYPE_IPV6;
6635 		break;
6636 	default:
6637 		/* Nothing to update. */
6638 		break;
6639 	}
6640 	return item;
6641 }
6642 
6643 static struct rte_flow_item
6644 flow_hw_get_ctrl_rx_l4_item(const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
6645 {
6646 	struct rte_flow_item item = {
6647 		.type = RTE_FLOW_ITEM_TYPE_VOID,
6648 		.mask = NULL,
6649 	};
6650 
6651 	switch (rss_type) {
6652 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_UDP:
6653 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_UDP:
6654 		item.type = RTE_FLOW_ITEM_TYPE_UDP;
6655 		break;
6656 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_TCP:
6657 	case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_TCP:
6658 		item.type = RTE_FLOW_ITEM_TYPE_TCP;
6659 		break;
6660 	default:
6661 		/* Nothing to update. */
6662 		break;
6663 	}
6664 	return item;
6665 }
6666 
6667 static struct rte_flow_pattern_template *
6668 flow_hw_create_ctrl_rx_pattern_template
6669 		(struct rte_eth_dev *dev,
6670 		 const enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type,
6671 		 const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
6672 {
6673 	const struct rte_flow_pattern_template_attr attr = {
6674 		.relaxed_matching = 0,
6675 		.ingress = 1,
6676 	};
6677 	struct rte_flow_item items[] = {
6678 		/* Matching patterns */
6679 		flow_hw_get_ctrl_rx_eth_item(eth_pattern_type),
6680 		flow_hw_get_ctrl_rx_vlan_item(eth_pattern_type),
6681 		flow_hw_get_ctrl_rx_l3_item(rss_type),
6682 		flow_hw_get_ctrl_rx_l4_item(rss_type),
6683 		/* Terminate pattern */
6684 		{ .type = RTE_FLOW_ITEM_TYPE_END }
6685 	};
6686 
6687 	return flow_hw_pattern_template_create(dev, &attr, items, NULL);
6688 }
6689 
6690 static int
6691 flow_hw_create_ctrl_rx_tables(struct rte_eth_dev *dev)
6692 {
6693 	struct mlx5_priv *priv = dev->data->dev_private;
6694 	unsigned int i;
6695 	unsigned int j;
6696 	int ret;
6697 
6698 	MLX5_ASSERT(!priv->hw_ctrl_rx);
6699 	priv->hw_ctrl_rx = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*priv->hw_ctrl_rx),
6700 				       RTE_CACHE_LINE_SIZE, rte_socket_id());
6701 	if (!priv->hw_ctrl_rx) {
6702 		DRV_LOG(ERR, "Failed to allocate memory for Rx control flow tables");
6703 		rte_errno = ENOMEM;
6704 		return -rte_errno;
6705 	}
6706 	/* Create all pattern template variants. */
6707 	for (i = 0; i < MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_MAX; ++i) {
6708 		enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type = i;
6709 
6710 		for (j = 0; j < MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX; ++j) {
6711 			const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type = j;
6712 			struct rte_flow_template_table_attr attr;
6713 			struct rte_flow_pattern_template *pt;
6714 
6715 			attr = flow_hw_get_ctrl_rx_table_attr(eth_pattern_type, rss_type);
6716 			pt = flow_hw_create_ctrl_rx_pattern_template(dev, eth_pattern_type,
6717 								     rss_type);
6718 			if (!pt)
6719 				goto err;
6720 			priv->hw_ctrl_rx->tables[i][j].attr = attr;
6721 			priv->hw_ctrl_rx->tables[i][j].pt = pt;
6722 		}
6723 	}
6724 	return 0;
6725 err:
6726 	ret = rte_errno;
6727 	flow_hw_cleanup_ctrl_rx_tables(dev);
6728 	rte_errno = ret;
6729 	return -ret;
6730 }
6731 
6732 void
6733 mlx5_flow_hw_cleanup_ctrl_rx_templates(struct rte_eth_dev *dev)
6734 {
6735 	struct mlx5_priv *priv = dev->data->dev_private;
6736 	struct mlx5_flow_hw_ctrl_rx *hw_ctrl_rx;
6737 	unsigned int i;
6738 	unsigned int j;
6739 
6740 	if (!priv->dr_ctx)
6741 		return;
6742 	if (!priv->hw_ctrl_rx)
6743 		return;
6744 	hw_ctrl_rx = priv->hw_ctrl_rx;
6745 	for (i = 0; i < MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_MAX; ++i) {
6746 		for (j = 0; j < MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX; ++j) {
6747 			struct mlx5_flow_hw_ctrl_rx_table *tmpls = &hw_ctrl_rx->tables[i][j];
6748 
6749 			if (tmpls->tbl) {
6750 				claim_zero(flow_hw_table_destroy(dev, tmpls->tbl, NULL));
6751 				tmpls->tbl = NULL;
6752 			}
6753 		}
6754 	}
6755 	for (j = 0; j < MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX; ++j) {
6756 		if (hw_ctrl_rx->rss[j]) {
6757 			claim_zero(flow_hw_actions_template_destroy(dev, hw_ctrl_rx->rss[j], NULL));
6758 			hw_ctrl_rx->rss[j] = NULL;
6759 		}
6760 	}
6761 }
6762 
6763 /**
6764  * Configure port HWS resources.
6765  *
6766  * @param[in] dev
6767  *   Pointer to the rte_eth_dev structure.
6768  * @param[in] port_attr
6769  *   Port configuration attributes.
6770  * @param[in] nb_queue
6771  *   Number of queue.
6772  * @param[in] queue_attr
6773  *   Array that holds attributes for each flow queue.
6774  * @param[out] error
6775  *   Pointer to error structure.
6776  *
6777  * @return
6778  *   0 on success, a negative errno value otherwise and rte_errno is set.
6779  */
6780 static int
6781 flow_hw_configure(struct rte_eth_dev *dev,
6782 		  const struct rte_flow_port_attr *port_attr,
6783 		  uint16_t nb_queue,
6784 		  const struct rte_flow_queue_attr *queue_attr[],
6785 		  struct rte_flow_error *error)
6786 {
6787 	struct mlx5_priv *priv = dev->data->dev_private;
6788 	struct mlx5dr_context *dr_ctx = NULL;
6789 	struct mlx5dr_context_attr dr_ctx_attr = {0};
6790 	struct mlx5_hw_q *hw_q;
6791 	struct mlx5_hw_q_job *job = NULL;
6792 	uint32_t mem_size, i, j;
6793 	struct mlx5_indexed_pool_config cfg = {
6794 		.size = sizeof(struct mlx5_action_construct_data),
6795 		.trunk_size = 4096,
6796 		.need_lock = 1,
6797 		.release_mem_en = !!priv->sh->config.reclaim_mode,
6798 		.malloc = mlx5_malloc,
6799 		.free = mlx5_free,
6800 		.type = "mlx5_hw_action_construct_data",
6801 	};
6802 	/* Adds one queue to be used by PMD.
6803 	 * The last queue will be used by the PMD.
6804 	 */
6805 	uint16_t nb_q_updated = 0;
6806 	struct rte_flow_queue_attr **_queue_attr = NULL;
6807 	struct rte_flow_queue_attr ctrl_queue_attr = {0};
6808 	bool is_proxy = !!(priv->sh->config.dv_esw_en && priv->master);
6809 	int ret = 0;
6810 
6811 	if (!port_attr || !nb_queue || !queue_attr) {
6812 		rte_errno = EINVAL;
6813 		goto err;
6814 	}
6815 	/* In case re-configuring, release existing context at first. */
6816 	if (priv->dr_ctx) {
6817 		/* */
6818 		for (i = 0; i < priv->nb_queue; i++) {
6819 			hw_q = &priv->hw_q[i];
6820 			/* Make sure all queues are empty. */
6821 			if (hw_q->size != hw_q->job_idx) {
6822 				rte_errno = EBUSY;
6823 				goto err;
6824 			}
6825 		}
6826 		flow_hw_resource_release(dev);
6827 	}
6828 	ctrl_queue_attr.size = queue_attr[0]->size;
6829 	nb_q_updated = nb_queue + 1;
6830 	_queue_attr = mlx5_malloc(MLX5_MEM_ZERO,
6831 				  nb_q_updated *
6832 				  sizeof(struct rte_flow_queue_attr *),
6833 				  64, SOCKET_ID_ANY);
6834 	if (!_queue_attr) {
6835 		rte_errno = ENOMEM;
6836 		goto err;
6837 	}
6838 
6839 	memcpy(_queue_attr, queue_attr,
6840 	       sizeof(void *) * nb_queue);
6841 	_queue_attr[nb_queue] = &ctrl_queue_attr;
6842 	priv->acts_ipool = mlx5_ipool_create(&cfg);
6843 	if (!priv->acts_ipool)
6844 		goto err;
6845 	/* Allocate the queue job descriptor LIFO. */
6846 	mem_size = sizeof(priv->hw_q[0]) * nb_q_updated;
6847 	for (i = 0; i < nb_q_updated; i++) {
6848 		/*
6849 		 * Check if the queues' size are all the same as the
6850 		 * limitation from HWS layer.
6851 		 */
6852 		if (_queue_attr[i]->size != _queue_attr[0]->size) {
6853 			rte_errno = EINVAL;
6854 			goto err;
6855 		}
6856 		mem_size += (sizeof(struct mlx5_hw_q_job *) +
6857 			    sizeof(struct mlx5_hw_q_job) +
6858 			    sizeof(uint8_t) * MLX5_ENCAP_MAX_LEN +
6859 			    sizeof(struct mlx5_modification_cmd) *
6860 			    MLX5_MHDR_MAX_CMD +
6861 			    sizeof(struct rte_flow_item) *
6862 			    MLX5_HW_MAX_ITEMS) *
6863 			    _queue_attr[i]->size;
6864 	}
6865 	priv->hw_q = mlx5_malloc(MLX5_MEM_ZERO, mem_size,
6866 				 64, SOCKET_ID_ANY);
6867 	if (!priv->hw_q) {
6868 		rte_errno = ENOMEM;
6869 		goto err;
6870 	}
6871 	for (i = 0; i < nb_q_updated; i++) {
6872 		char mz_name[RTE_MEMZONE_NAMESIZE];
6873 		uint8_t *encap = NULL;
6874 		struct mlx5_modification_cmd *mhdr_cmd = NULL;
6875 		struct rte_flow_item *items = NULL;
6876 
6877 		priv->hw_q[i].job_idx = _queue_attr[i]->size;
6878 		priv->hw_q[i].size = _queue_attr[i]->size;
6879 		if (i == 0)
6880 			priv->hw_q[i].job = (struct mlx5_hw_q_job **)
6881 					    &priv->hw_q[nb_q_updated];
6882 		else
6883 			priv->hw_q[i].job = (struct mlx5_hw_q_job **)
6884 				&job[_queue_attr[i - 1]->size - 1].items
6885 				 [MLX5_HW_MAX_ITEMS];
6886 		job = (struct mlx5_hw_q_job *)
6887 		      &priv->hw_q[i].job[_queue_attr[i]->size];
6888 		mhdr_cmd = (struct mlx5_modification_cmd *)
6889 			   &job[_queue_attr[i]->size];
6890 		encap = (uint8_t *)
6891 			 &mhdr_cmd[_queue_attr[i]->size * MLX5_MHDR_MAX_CMD];
6892 		items = (struct rte_flow_item *)
6893 			 &encap[_queue_attr[i]->size * MLX5_ENCAP_MAX_LEN];
6894 		for (j = 0; j < _queue_attr[i]->size; j++) {
6895 			job[j].mhdr_cmd = &mhdr_cmd[j * MLX5_MHDR_MAX_CMD];
6896 			job[j].encap_data = &encap[j * MLX5_ENCAP_MAX_LEN];
6897 			job[j].items = &items[j * MLX5_HW_MAX_ITEMS];
6898 			priv->hw_q[i].job[j] = &job[j];
6899 		}
6900 		snprintf(mz_name, sizeof(mz_name), "port_%u_indir_act_cq_%u",
6901 			 dev->data->port_id, i);
6902 		priv->hw_q[i].indir_cq = rte_ring_create(mz_name,
6903 				_queue_attr[i]->size, SOCKET_ID_ANY,
6904 				RING_F_SP_ENQ | RING_F_SC_DEQ |
6905 				RING_F_EXACT_SZ);
6906 		if (!priv->hw_q[i].indir_cq)
6907 			goto err;
6908 		snprintf(mz_name, sizeof(mz_name), "port_%u_indir_act_iq_%u",
6909 			 dev->data->port_id, i);
6910 		priv->hw_q[i].indir_iq = rte_ring_create(mz_name,
6911 				_queue_attr[i]->size, SOCKET_ID_ANY,
6912 				RING_F_SP_ENQ | RING_F_SC_DEQ |
6913 				RING_F_EXACT_SZ);
6914 		if (!priv->hw_q[i].indir_iq)
6915 			goto err;
6916 	}
6917 	dr_ctx_attr.pd = priv->sh->cdev->pd;
6918 	dr_ctx_attr.queues = nb_q_updated;
6919 	/* Queue size should all be the same. Take the first one. */
6920 	dr_ctx_attr.queue_size = _queue_attr[0]->size;
6921 	dr_ctx = mlx5dr_context_open(priv->sh->cdev->ctx, &dr_ctx_attr);
6922 	/* rte_errno has been updated by HWS layer. */
6923 	if (!dr_ctx)
6924 		goto err;
6925 	priv->dr_ctx = dr_ctx;
6926 	priv->nb_queue = nb_q_updated;
6927 	rte_spinlock_init(&priv->hw_ctrl_lock);
6928 	LIST_INIT(&priv->hw_ctrl_flows);
6929 	ret = flow_hw_create_ctrl_rx_tables(dev);
6930 	if (ret) {
6931 		rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6932 				   "Failed to set up Rx control flow templates");
6933 		goto err;
6934 	}
6935 	/* Initialize meter library*/
6936 	if (port_attr->nb_meters)
6937 		if (mlx5_flow_meter_init(dev, port_attr->nb_meters, 1, 1, nb_q_updated))
6938 			goto err;
6939 	/* Add global actions. */
6940 	for (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) {
6941 		uint32_t act_flags = 0;
6942 
6943 		act_flags = mlx5_hw_act_flag[i][0] | mlx5_hw_act_flag[i][1];
6944 		if (is_proxy)
6945 			act_flags |= mlx5_hw_act_flag[i][2];
6946 		priv->hw_drop[i] = mlx5dr_action_create_dest_drop(priv->dr_ctx, act_flags);
6947 		if (!priv->hw_drop[i])
6948 			goto err;
6949 		priv->hw_tag[i] = mlx5dr_action_create_tag
6950 			(priv->dr_ctx, mlx5_hw_act_flag[i][0]);
6951 		if (!priv->hw_tag[i])
6952 			goto err;
6953 	}
6954 	if (priv->sh->config.dv_esw_en && priv->sh->config.repr_matching) {
6955 		ret = flow_hw_setup_tx_repr_tagging(dev);
6956 		if (ret) {
6957 			rte_errno = -ret;
6958 			goto err;
6959 		}
6960 	}
6961 	if (is_proxy) {
6962 		ret = flow_hw_create_vport_actions(priv);
6963 		if (ret) {
6964 			rte_errno = -ret;
6965 			goto err;
6966 		}
6967 		ret = flow_hw_create_ctrl_tables(dev);
6968 		if (ret) {
6969 			rte_errno = -ret;
6970 			goto err;
6971 		}
6972 	}
6973 	if (port_attr->nb_conn_tracks) {
6974 		mem_size = sizeof(struct mlx5_aso_sq) * nb_q_updated +
6975 			   sizeof(*priv->ct_mng);
6976 		priv->ct_mng = mlx5_malloc(MLX5_MEM_ZERO, mem_size,
6977 					   RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
6978 		if (!priv->ct_mng)
6979 			goto err;
6980 		if (mlx5_aso_ct_queue_init(priv->sh, priv->ct_mng, nb_q_updated))
6981 			goto err;
6982 		priv->hws_ctpool = flow_hw_ct_pool_create(dev, port_attr);
6983 		if (!priv->hws_ctpool)
6984 			goto err;
6985 		priv->sh->ct_aso_en = 1;
6986 	}
6987 	if (port_attr->nb_counters) {
6988 		priv->hws_cpool = mlx5_hws_cnt_pool_create(dev, port_attr,
6989 							   nb_queue);
6990 		if (priv->hws_cpool == NULL)
6991 			goto err;
6992 	}
6993 	if (port_attr->nb_aging_objects) {
6994 		if (port_attr->nb_counters == 0) {
6995 			/*
6996 			 * Aging management uses counter. Number counters
6997 			 * requesting should take into account a counter for
6998 			 * each flow rules containing AGE without counter.
6999 			 */
7000 			DRV_LOG(ERR, "Port %u AGE objects are requested (%u) "
7001 				"without counters requesting.",
7002 				dev->data->port_id,
7003 				port_attr->nb_aging_objects);
7004 			rte_errno = EINVAL;
7005 			goto err;
7006 		}
7007 		ret = mlx5_hws_age_pool_init(dev, port_attr, nb_queue);
7008 		if (ret < 0)
7009 			goto err;
7010 	}
7011 	ret = flow_hw_create_vlan(dev);
7012 	if (ret)
7013 		goto err;
7014 	if (_queue_attr)
7015 		mlx5_free(_queue_attr);
7016 	if (port_attr->flags & RTE_FLOW_PORT_FLAG_STRICT_QUEUE)
7017 		priv->hws_strict_queue = 1;
7018 	return 0;
7019 err:
7020 	if (priv->hws_ctpool) {
7021 		flow_hw_ct_pool_destroy(dev, priv->hws_ctpool);
7022 		priv->hws_ctpool = NULL;
7023 	}
7024 	if (priv->ct_mng) {
7025 		flow_hw_ct_mng_destroy(dev, priv->ct_mng);
7026 		priv->ct_mng = NULL;
7027 	}
7028 	if (priv->hws_age_req)
7029 		mlx5_hws_age_pool_destroy(priv);
7030 	if (priv->hws_cpool) {
7031 		mlx5_hws_cnt_pool_destroy(priv->sh, priv->hws_cpool);
7032 		priv->hws_cpool = NULL;
7033 	}
7034 	flow_hw_free_vport_actions(priv);
7035 	for (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) {
7036 		if (priv->hw_drop[i])
7037 			mlx5dr_action_destroy(priv->hw_drop[i]);
7038 		if (priv->hw_tag[i])
7039 			mlx5dr_action_destroy(priv->hw_tag[i]);
7040 	}
7041 	flow_hw_destroy_vlan(dev);
7042 	if (dr_ctx)
7043 		claim_zero(mlx5dr_context_close(dr_ctx));
7044 	for (i = 0; i < nb_q_updated; i++) {
7045 		rte_ring_free(priv->hw_q[i].indir_iq);
7046 		rte_ring_free(priv->hw_q[i].indir_cq);
7047 	}
7048 	mlx5_free(priv->hw_q);
7049 	priv->hw_q = NULL;
7050 	if (priv->acts_ipool) {
7051 		mlx5_ipool_destroy(priv->acts_ipool);
7052 		priv->acts_ipool = NULL;
7053 	}
7054 	if (_queue_attr)
7055 		mlx5_free(_queue_attr);
7056 	/* Do not overwrite the internal errno information. */
7057 	if (ret)
7058 		return ret;
7059 	return rte_flow_error_set(error, rte_errno,
7060 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7061 				  "fail to configure port");
7062 }
7063 
7064 /**
7065  * Release HWS resources.
7066  *
7067  * @param[in] dev
7068  *   Pointer to the rte_eth_dev structure.
7069  */
7070 void
7071 flow_hw_resource_release(struct rte_eth_dev *dev)
7072 {
7073 	struct mlx5_priv *priv = dev->data->dev_private;
7074 	struct rte_flow_template_table *tbl;
7075 	struct rte_flow_pattern_template *it;
7076 	struct rte_flow_actions_template *at;
7077 	uint32_t i;
7078 
7079 	if (!priv->dr_ctx)
7080 		return;
7081 	flow_hw_rxq_flag_set(dev, false);
7082 	flow_hw_flush_all_ctrl_flows(dev);
7083 	flow_hw_cleanup_tx_repr_tagging(dev);
7084 	flow_hw_cleanup_ctrl_rx_tables(dev);
7085 	while (!LIST_EMPTY(&priv->flow_hw_tbl_ongo)) {
7086 		tbl = LIST_FIRST(&priv->flow_hw_tbl_ongo);
7087 		flow_hw_table_destroy(dev, tbl, NULL);
7088 	}
7089 	while (!LIST_EMPTY(&priv->flow_hw_tbl)) {
7090 		tbl = LIST_FIRST(&priv->flow_hw_tbl);
7091 		flow_hw_table_destroy(dev, tbl, NULL);
7092 	}
7093 	while (!LIST_EMPTY(&priv->flow_hw_itt)) {
7094 		it = LIST_FIRST(&priv->flow_hw_itt);
7095 		flow_hw_pattern_template_destroy(dev, it, NULL);
7096 	}
7097 	while (!LIST_EMPTY(&priv->flow_hw_at)) {
7098 		at = LIST_FIRST(&priv->flow_hw_at);
7099 		flow_hw_actions_template_destroy(dev, at, NULL);
7100 	}
7101 	for (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) {
7102 		if (priv->hw_drop[i])
7103 			mlx5dr_action_destroy(priv->hw_drop[i]);
7104 		if (priv->hw_tag[i])
7105 			mlx5dr_action_destroy(priv->hw_tag[i]);
7106 	}
7107 	flow_hw_destroy_vlan(dev);
7108 	flow_hw_free_vport_actions(priv);
7109 	if (priv->acts_ipool) {
7110 		mlx5_ipool_destroy(priv->acts_ipool);
7111 		priv->acts_ipool = NULL;
7112 	}
7113 	if (priv->hws_age_req)
7114 		mlx5_hws_age_pool_destroy(priv);
7115 	if (priv->hws_cpool) {
7116 		mlx5_hws_cnt_pool_destroy(priv->sh, priv->hws_cpool);
7117 		priv->hws_cpool = NULL;
7118 	}
7119 	if (priv->hws_ctpool) {
7120 		flow_hw_ct_pool_destroy(dev, priv->hws_ctpool);
7121 		priv->hws_ctpool = NULL;
7122 	}
7123 	if (priv->ct_mng) {
7124 		flow_hw_ct_mng_destroy(dev, priv->ct_mng);
7125 		priv->ct_mng = NULL;
7126 	}
7127 	for (i = 0; i < priv->nb_queue; i++) {
7128 		rte_ring_free(priv->hw_q[i].indir_iq);
7129 		rte_ring_free(priv->hw_q[i].indir_cq);
7130 	}
7131 	mlx5_free(priv->hw_q);
7132 	priv->hw_q = NULL;
7133 	claim_zero(mlx5dr_context_close(priv->dr_ctx));
7134 	priv->dr_ctx = NULL;
7135 	priv->nb_queue = 0;
7136 }
7137 
7138 /* Sets vport tag and mask, for given port, used in HWS rules. */
7139 void
7140 flow_hw_set_port_info(struct rte_eth_dev *dev)
7141 {
7142 	struct mlx5_priv *priv = dev->data->dev_private;
7143 	uint16_t port_id = dev->data->port_id;
7144 	struct flow_hw_port_info *info;
7145 
7146 	MLX5_ASSERT(port_id < RTE_MAX_ETHPORTS);
7147 	info = &mlx5_flow_hw_port_infos[port_id];
7148 	info->regc_mask = priv->vport_meta_mask;
7149 	info->regc_value = priv->vport_meta_tag;
7150 	info->is_wire = priv->master;
7151 }
7152 
7153 /* Clears vport tag and mask used for HWS rules. */
7154 void
7155 flow_hw_clear_port_info(struct rte_eth_dev *dev)
7156 {
7157 	uint16_t port_id = dev->data->port_id;
7158 	struct flow_hw_port_info *info;
7159 
7160 	MLX5_ASSERT(port_id < RTE_MAX_ETHPORTS);
7161 	info = &mlx5_flow_hw_port_infos[port_id];
7162 	info->regc_mask = 0;
7163 	info->regc_value = 0;
7164 	info->is_wire = 0;
7165 }
7166 
7167 /*
7168  * Initialize the information of available tag registers and an intersection
7169  * of all the probed devices' REG_C_Xs.
7170  * PS. No port concept in steering part, right now it cannot be per port level.
7171  *
7172  * @param[in] dev
7173  *   Pointer to the rte_eth_dev structure.
7174  */
7175 void flow_hw_init_tags_set(struct rte_eth_dev *dev)
7176 {
7177 	struct mlx5_priv *priv = dev->data->dev_private;
7178 	uint32_t meta_mode = priv->sh->config.dv_xmeta_en;
7179 	uint8_t masks = (uint8_t)priv->sh->cdev->config.hca_attr.set_reg_c;
7180 	uint32_t i, j;
7181 	enum modify_reg copy[MLX5_FLOW_HW_TAGS_MAX] = {REG_NON};
7182 	uint8_t unset = 0;
7183 	uint8_t copy_masks = 0;
7184 
7185 	/*
7186 	 * The CAPA is global for common device but only used in net.
7187 	 * It is shared per eswitch domain.
7188 	 */
7189 	if (!!priv->sh->hws_tags)
7190 		return;
7191 	unset |= 1 << (priv->mtr_color_reg - REG_C_0);
7192 	unset |= 1 << (REG_C_6 - REG_C_0);
7193 	if (priv->sh->config.dv_esw_en)
7194 		unset |= 1 << (REG_C_0 - REG_C_0);
7195 	if (meta_mode == MLX5_XMETA_MODE_META32_HWS)
7196 		unset |= 1 << (REG_C_1 - REG_C_0);
7197 	masks &= ~unset;
7198 	if (mlx5_flow_hw_avl_tags_init_cnt) {
7199 		MLX5_ASSERT(mlx5_flow_hw_aso_tag == priv->mtr_color_reg);
7200 		for (i = 0; i < MLX5_FLOW_HW_TAGS_MAX; i++) {
7201 			if (mlx5_flow_hw_avl_tags[i] != REG_NON && !!((1 << i) & masks)) {
7202 				copy[mlx5_flow_hw_avl_tags[i] - REG_C_0] =
7203 						mlx5_flow_hw_avl_tags[i];
7204 				copy_masks |= (1 << (mlx5_flow_hw_avl_tags[i] - REG_C_0));
7205 			}
7206 		}
7207 		if (copy_masks != masks) {
7208 			j = 0;
7209 			for (i = 0; i < MLX5_FLOW_HW_TAGS_MAX; i++)
7210 				if (!!((1 << i) & copy_masks))
7211 					mlx5_flow_hw_avl_tags[j++] = copy[i];
7212 		}
7213 	} else {
7214 		j = 0;
7215 		for (i = 0; i < MLX5_FLOW_HW_TAGS_MAX; i++) {
7216 			if (!!((1 << i) & masks))
7217 				mlx5_flow_hw_avl_tags[j++] =
7218 					(enum modify_reg)(i + (uint32_t)REG_C_0);
7219 		}
7220 	}
7221 	priv->sh->hws_tags = 1;
7222 	mlx5_flow_hw_aso_tag = (enum modify_reg)priv->mtr_color_reg;
7223 	mlx5_flow_hw_avl_tags_init_cnt++;
7224 }
7225 
7226 /*
7227  * Reset the available tag registers information to NONE.
7228  *
7229  * @param[in] dev
7230  *   Pointer to the rte_eth_dev structure.
7231  */
7232 void flow_hw_clear_tags_set(struct rte_eth_dev *dev)
7233 {
7234 	struct mlx5_priv *priv = dev->data->dev_private;
7235 
7236 	if (!priv->sh->hws_tags)
7237 		return;
7238 	priv->sh->hws_tags = 0;
7239 	mlx5_flow_hw_avl_tags_init_cnt--;
7240 	if (!mlx5_flow_hw_avl_tags_init_cnt)
7241 		memset(mlx5_flow_hw_avl_tags, REG_NON,
7242 		       sizeof(enum modify_reg) * MLX5_FLOW_HW_TAGS_MAX);
7243 }
7244 
7245 uint32_t mlx5_flow_hw_flow_metadata_config_refcnt;
7246 uint8_t mlx5_flow_hw_flow_metadata_esw_en;
7247 uint8_t mlx5_flow_hw_flow_metadata_xmeta_en;
7248 
7249 /**
7250  * Initializes static configuration of META flow items.
7251  *
7252  * As a temporary workaround, META flow item is translated to a register,
7253  * based on statically saved dv_esw_en and dv_xmeta_en device arguments.
7254  * It is a workaround for flow_hw_get_reg_id() where port specific information
7255  * is not available at runtime.
7256  *
7257  * Values of dv_esw_en and dv_xmeta_en device arguments are taken from the first opened port.
7258  * This means that each mlx5 port will use the same configuration for translation
7259  * of META flow items.
7260  *
7261  * @param[in] dev
7262  *    Pointer to Ethernet device.
7263  */
7264 void
7265 flow_hw_init_flow_metadata_config(struct rte_eth_dev *dev)
7266 {
7267 	uint32_t refcnt;
7268 
7269 	refcnt = __atomic_fetch_add(&mlx5_flow_hw_flow_metadata_config_refcnt, 1,
7270 				    __ATOMIC_RELAXED);
7271 	if (refcnt > 0)
7272 		return;
7273 	mlx5_flow_hw_flow_metadata_esw_en = MLX5_SH(dev)->config.dv_esw_en;
7274 	mlx5_flow_hw_flow_metadata_xmeta_en = MLX5_SH(dev)->config.dv_xmeta_en;
7275 }
7276 
7277 /**
7278  * Clears statically stored configuration related to META flow items.
7279  */
7280 void
7281 flow_hw_clear_flow_metadata_config(void)
7282 {
7283 	uint32_t refcnt;
7284 
7285 	refcnt = __atomic_sub_fetch(&mlx5_flow_hw_flow_metadata_config_refcnt, 1,
7286 				    __ATOMIC_RELAXED);
7287 	if (refcnt > 0)
7288 		return;
7289 	mlx5_flow_hw_flow_metadata_esw_en = 0;
7290 	mlx5_flow_hw_flow_metadata_xmeta_en = 0;
7291 }
7292 
7293 static int
7294 flow_hw_conntrack_destroy(struct rte_eth_dev *dev __rte_unused,
7295 			  uint32_t idx,
7296 			  struct rte_flow_error *error)
7297 {
7298 	uint16_t owner = (uint16_t)MLX5_ACTION_CTX_CT_GET_OWNER(idx);
7299 	uint32_t ct_idx = MLX5_ACTION_CTX_CT_GET_IDX(idx);
7300 	struct rte_eth_dev *owndev = &rte_eth_devices[owner];
7301 	struct mlx5_priv *priv = owndev->data->dev_private;
7302 	struct mlx5_aso_ct_pool *pool = priv->hws_ctpool;
7303 	struct mlx5_aso_ct_action *ct;
7304 
7305 	ct = mlx5_ipool_get(pool->cts, ct_idx);
7306 	if (!ct) {
7307 		return rte_flow_error_set(error, EINVAL,
7308 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7309 				NULL,
7310 				"Invalid CT destruction index");
7311 	}
7312 	__atomic_store_n(&ct->state, ASO_CONNTRACK_FREE,
7313 				 __ATOMIC_RELAXED);
7314 	mlx5_ipool_free(pool->cts, ct_idx);
7315 	return 0;
7316 }
7317 
7318 static int
7319 flow_hw_conntrack_query(struct rte_eth_dev *dev, uint32_t queue, uint32_t idx,
7320 			struct rte_flow_action_conntrack *profile,
7321 			void *user_data, bool push,
7322 			struct rte_flow_error *error)
7323 {
7324 	struct mlx5_priv *priv = dev->data->dev_private;
7325 	struct mlx5_aso_ct_pool *pool = priv->hws_ctpool;
7326 	struct mlx5_aso_ct_action *ct;
7327 	uint16_t owner = (uint16_t)MLX5_ACTION_CTX_CT_GET_OWNER(idx);
7328 	uint32_t ct_idx;
7329 
7330 	if (owner != PORT_ID(priv))
7331 		return rte_flow_error_set(error, EACCES,
7332 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7333 				NULL,
7334 				"Can't query CT object owned by another port");
7335 	ct_idx = MLX5_ACTION_CTX_CT_GET_IDX(idx);
7336 	ct = mlx5_ipool_get(pool->cts, ct_idx);
7337 	if (!ct) {
7338 		return rte_flow_error_set(error, EINVAL,
7339 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7340 				NULL,
7341 				"Invalid CT query index");
7342 	}
7343 	profile->peer_port = ct->peer;
7344 	profile->is_original_dir = ct->is_original;
7345 	if (mlx5_aso_ct_query_by_wqe(priv->sh, queue, ct, profile, user_data, push))
7346 		return rte_flow_error_set(error, EIO,
7347 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7348 				NULL,
7349 				"Failed to query CT context");
7350 	return 0;
7351 }
7352 
7353 
7354 static int
7355 flow_hw_conntrack_update(struct rte_eth_dev *dev, uint32_t queue,
7356 			 const struct rte_flow_modify_conntrack *action_conf,
7357 			 uint32_t idx, void *user_data, bool push,
7358 			 struct rte_flow_error *error)
7359 {
7360 	struct mlx5_priv *priv = dev->data->dev_private;
7361 	struct mlx5_aso_ct_pool *pool = priv->hws_ctpool;
7362 	struct mlx5_aso_ct_action *ct;
7363 	const struct rte_flow_action_conntrack *new_prf;
7364 	uint16_t owner = (uint16_t)MLX5_ACTION_CTX_CT_GET_OWNER(idx);
7365 	uint32_t ct_idx;
7366 	int ret = 0;
7367 
7368 	if (PORT_ID(priv) != owner)
7369 		return rte_flow_error_set(error, EACCES,
7370 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7371 					  NULL,
7372 					  "Can't update CT object owned by another port");
7373 	ct_idx = MLX5_ACTION_CTX_CT_GET_IDX(idx);
7374 	ct = mlx5_ipool_get(pool->cts, ct_idx);
7375 	if (!ct) {
7376 		return rte_flow_error_set(error, EINVAL,
7377 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7378 				NULL,
7379 				"Invalid CT update index");
7380 	}
7381 	new_prf = &action_conf->new_ct;
7382 	if (action_conf->direction)
7383 		ct->is_original = !!new_prf->is_original_dir;
7384 	if (action_conf->state) {
7385 		/* Only validate the profile when it needs to be updated. */
7386 		ret = mlx5_validate_action_ct(dev, new_prf, error);
7387 		if (ret)
7388 			return ret;
7389 		ret = mlx5_aso_ct_update_by_wqe(priv->sh, queue, ct, new_prf,
7390 						user_data, push);
7391 		if (ret)
7392 			return rte_flow_error_set(error, EIO,
7393 					RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7394 					NULL,
7395 					"Failed to send CT context update WQE");
7396 		if (queue != MLX5_HW_INV_QUEUE)
7397 			return 0;
7398 		/* Block until ready or a failure in synchronous mode. */
7399 		ret = mlx5_aso_ct_available(priv->sh, queue, ct);
7400 		if (ret)
7401 			rte_flow_error_set(error, rte_errno,
7402 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7403 					   NULL,
7404 					   "Timeout to get the CT update");
7405 	}
7406 	return ret;
7407 }
7408 
7409 static struct rte_flow_action_handle *
7410 flow_hw_conntrack_create(struct rte_eth_dev *dev, uint32_t queue,
7411 			 const struct rte_flow_action_conntrack *pro,
7412 			 void *user_data, bool push,
7413 			 struct rte_flow_error *error)
7414 {
7415 	struct mlx5_priv *priv = dev->data->dev_private;
7416 	struct mlx5_aso_ct_pool *pool = priv->hws_ctpool;
7417 	struct mlx5_aso_ct_action *ct;
7418 	uint32_t ct_idx = 0;
7419 	int ret;
7420 	bool async = !!(queue != MLX5_HW_INV_QUEUE);
7421 
7422 	if (!pool) {
7423 		rte_flow_error_set(error, EINVAL,
7424 				   RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7425 				   "CT is not enabled");
7426 		return 0;
7427 	}
7428 	ct = mlx5_ipool_zmalloc(pool->cts, &ct_idx);
7429 	if (!ct) {
7430 		rte_flow_error_set(error, rte_errno,
7431 				   RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7432 				   "Failed to allocate CT object");
7433 		return 0;
7434 	}
7435 	ct->offset = ct_idx - 1;
7436 	ct->is_original = !!pro->is_original_dir;
7437 	ct->peer = pro->peer_port;
7438 	ct->pool = pool;
7439 	if (mlx5_aso_ct_update_by_wqe(priv->sh, queue, ct, pro, user_data, push)) {
7440 		mlx5_ipool_free(pool->cts, ct_idx);
7441 		rte_flow_error_set(error, EBUSY,
7442 				   RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7443 				   "Failed to update CT");
7444 		return 0;
7445 	}
7446 	if (!async) {
7447 		ret = mlx5_aso_ct_available(priv->sh, queue, ct);
7448 		if (ret) {
7449 			mlx5_ipool_free(pool->cts, ct_idx);
7450 			rte_flow_error_set(error, rte_errno,
7451 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7452 					   NULL,
7453 					   "Timeout to get the CT update");
7454 			return 0;
7455 		}
7456 	}
7457 	return (struct rte_flow_action_handle *)(uintptr_t)
7458 		MLX5_ACTION_CTX_CT_GEN_IDX(PORT_ID(priv), ct_idx);
7459 }
7460 
7461 /**
7462  * Validate shared action.
7463  *
7464  * @param[in] dev
7465  *   Pointer to the rte_eth_dev structure.
7466  * @param[in] queue
7467  *   Which queue to be used.
7468  * @param[in] attr
7469  *   Operation attribute.
7470  * @param[in] conf
7471  *   Indirect action configuration.
7472  * @param[in] action
7473  *   rte_flow action detail.
7474  * @param[in] user_data
7475  *   Pointer to the user_data.
7476  * @param[out] error
7477  *   Pointer to error structure.
7478  *
7479  * @return
7480  *   0 on success, otherwise negative errno value.
7481  */
7482 static int
7483 flow_hw_action_handle_validate(struct rte_eth_dev *dev, uint32_t queue,
7484 			       const struct rte_flow_op_attr *attr,
7485 			       const struct rte_flow_indir_action_conf *conf,
7486 			       const struct rte_flow_action *action,
7487 			       void *user_data,
7488 			       struct rte_flow_error *error)
7489 {
7490 	struct mlx5_priv *priv = dev->data->dev_private;
7491 
7492 	RTE_SET_USED(attr);
7493 	RTE_SET_USED(queue);
7494 	RTE_SET_USED(user_data);
7495 	switch (action->type) {
7496 	case RTE_FLOW_ACTION_TYPE_AGE:
7497 		if (!priv->hws_age_req)
7498 			return rte_flow_error_set(error, EINVAL,
7499 						  RTE_FLOW_ERROR_TYPE_ACTION,
7500 						  NULL,
7501 						  "aging pool not initialized");
7502 		break;
7503 	case RTE_FLOW_ACTION_TYPE_COUNT:
7504 		if (!priv->hws_cpool)
7505 			return rte_flow_error_set(error, EINVAL,
7506 						  RTE_FLOW_ERROR_TYPE_ACTION,
7507 						  NULL,
7508 						  "counters pool not initialized");
7509 		break;
7510 	case RTE_FLOW_ACTION_TYPE_CONNTRACK:
7511 		if (priv->hws_ctpool == NULL)
7512 			return rte_flow_error_set(error, EINVAL,
7513 						  RTE_FLOW_ERROR_TYPE_ACTION,
7514 						  NULL,
7515 						  "CT pool not initialized");
7516 		return mlx5_validate_action_ct(dev, action->conf, error);
7517 	case RTE_FLOW_ACTION_TYPE_METER_MARK:
7518 		return flow_hw_validate_action_meter_mark(dev, action, error);
7519 	case RTE_FLOW_ACTION_TYPE_RSS:
7520 		return flow_dv_action_validate(dev, conf, action, error);
7521 	default:
7522 		return rte_flow_error_set(error, ENOTSUP,
7523 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7524 					  "action type not supported");
7525 	}
7526 	return 0;
7527 }
7528 
7529 /**
7530  * Create shared action.
7531  *
7532  * @param[in] dev
7533  *   Pointer to the rte_eth_dev structure.
7534  * @param[in] queue
7535  *   Which queue to be used.
7536  * @param[in] attr
7537  *   Operation attribute.
7538  * @param[in] conf
7539  *   Indirect action configuration.
7540  * @param[in] action
7541  *   rte_flow action detail.
7542  * @param[in] user_data
7543  *   Pointer to the user_data.
7544  * @param[out] error
7545  *   Pointer to error structure.
7546  *
7547  * @return
7548  *   Action handle on success, NULL otherwise and rte_errno is set.
7549  */
7550 static struct rte_flow_action_handle *
7551 flow_hw_action_handle_create(struct rte_eth_dev *dev, uint32_t queue,
7552 			     const struct rte_flow_op_attr *attr,
7553 			     const struct rte_flow_indir_action_conf *conf,
7554 			     const struct rte_flow_action *action,
7555 			     void *user_data,
7556 			     struct rte_flow_error *error)
7557 {
7558 	struct rte_flow_action_handle *handle = NULL;
7559 	struct mlx5_hw_q_job *job = NULL;
7560 	struct mlx5_priv *priv = dev->data->dev_private;
7561 	const struct rte_flow_action_age *age;
7562 	struct mlx5_aso_mtr *aso_mtr;
7563 	cnt_id_t cnt_id;
7564 	uint32_t mtr_id;
7565 	uint32_t age_idx;
7566 	bool push = true;
7567 	bool aso = false;
7568 
7569 	if (attr) {
7570 		MLX5_ASSERT(queue != MLX5_HW_INV_QUEUE);
7571 		if (unlikely(!priv->hw_q[queue].job_idx)) {
7572 			rte_flow_error_set(error, ENOMEM,
7573 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7574 				"Flow queue full.");
7575 			return NULL;
7576 		}
7577 		job = priv->hw_q[queue].job[--priv->hw_q[queue].job_idx];
7578 		job->type = MLX5_HW_Q_JOB_TYPE_CREATE;
7579 		job->user_data = user_data;
7580 		push = !attr->postpone;
7581 	}
7582 	switch (action->type) {
7583 	case RTE_FLOW_ACTION_TYPE_AGE:
7584 		if (priv->hws_strict_queue) {
7585 			struct mlx5_age_info *info = GET_PORT_AGE_INFO(priv);
7586 
7587 			if (queue >= info->hw_q_age->nb_rings) {
7588 				rte_flow_error_set(error, EINVAL,
7589 						   RTE_FLOW_ERROR_TYPE_ACTION,
7590 						   NULL,
7591 						   "Invalid queue ID for indirect AGE.");
7592 				rte_errno = EINVAL;
7593 				return NULL;
7594 			}
7595 		}
7596 		age = action->conf;
7597 		age_idx = mlx5_hws_age_action_create(priv, queue, true, age,
7598 						     0, error);
7599 		if (age_idx == 0) {
7600 			rte_flow_error_set(error, ENODEV,
7601 					   RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7602 					   "AGE are not configured!");
7603 		} else {
7604 			age_idx = (MLX5_INDIRECT_ACTION_TYPE_AGE <<
7605 				   MLX5_INDIRECT_ACTION_TYPE_OFFSET) | age_idx;
7606 			handle =
7607 			    (struct rte_flow_action_handle *)(uintptr_t)age_idx;
7608 		}
7609 		break;
7610 	case RTE_FLOW_ACTION_TYPE_COUNT:
7611 		if (mlx5_hws_cnt_shared_get(priv->hws_cpool, &cnt_id, 0))
7612 			rte_flow_error_set(error, ENODEV,
7613 					RTE_FLOW_ERROR_TYPE_ACTION,
7614 					NULL,
7615 					"counter are not configured!");
7616 		else
7617 			handle = (struct rte_flow_action_handle *)
7618 				 (uintptr_t)cnt_id;
7619 		break;
7620 	case RTE_FLOW_ACTION_TYPE_CONNTRACK:
7621 		aso = true;
7622 		handle = flow_hw_conntrack_create(dev, queue, action->conf, job,
7623 						  push, error);
7624 		break;
7625 	case RTE_FLOW_ACTION_TYPE_METER_MARK:
7626 		aso = true;
7627 		aso_mtr = flow_hw_meter_mark_alloc(dev, queue, action, job, push);
7628 		if (!aso_mtr)
7629 			break;
7630 		mtr_id = (MLX5_INDIRECT_ACTION_TYPE_METER_MARK <<
7631 			MLX5_INDIRECT_ACTION_TYPE_OFFSET) | (aso_mtr->fm.meter_id);
7632 		handle = (struct rte_flow_action_handle *)(uintptr_t)mtr_id;
7633 		break;
7634 	case RTE_FLOW_ACTION_TYPE_RSS:
7635 		handle = flow_dv_action_create(dev, conf, action, error);
7636 		break;
7637 	default:
7638 		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
7639 				   NULL, "action type not supported");
7640 		break;
7641 	}
7642 	if (job) {
7643 		if (!handle) {
7644 			priv->hw_q[queue].job_idx++;
7645 			return NULL;
7646 		}
7647 		job->action = handle;
7648 		if (push)
7649 			__flow_hw_push_action(dev, queue);
7650 		if (aso)
7651 			return handle;
7652 		rte_ring_enqueue(push ? priv->hw_q[queue].indir_cq :
7653 				 priv->hw_q[queue].indir_iq, job);
7654 	}
7655 	return handle;
7656 }
7657 
7658 /**
7659  * Update shared action.
7660  *
7661  * @param[in] dev
7662  *   Pointer to the rte_eth_dev structure.
7663  * @param[in] queue
7664  *   Which queue to be used.
7665  * @param[in] attr
7666  *   Operation attribute.
7667  * @param[in] handle
7668  *   Action handle to be updated.
7669  * @param[in] update
7670  *   Update value.
7671  * @param[in] user_data
7672  *   Pointer to the user_data.
7673  * @param[out] error
7674  *   Pointer to error structure.
7675  *
7676  * @return
7677  *   0 on success, negative value otherwise and rte_errno is set.
7678  */
7679 static int
7680 flow_hw_action_handle_update(struct rte_eth_dev *dev, uint32_t queue,
7681 			     const struct rte_flow_op_attr *attr,
7682 			     struct rte_flow_action_handle *handle,
7683 			     const void *update,
7684 			     void *user_data,
7685 			     struct rte_flow_error *error)
7686 {
7687 	struct mlx5_priv *priv = dev->data->dev_private;
7688 	struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
7689 	const struct rte_flow_modify_conntrack *ct_conf =
7690 		(const struct rte_flow_modify_conntrack *)update;
7691 	const struct rte_flow_update_meter_mark *upd_meter_mark =
7692 		(const struct rte_flow_update_meter_mark *)update;
7693 	const struct rte_flow_action_meter_mark *meter_mark;
7694 	struct mlx5_hw_q_job *job = NULL;
7695 	struct mlx5_aso_mtr *aso_mtr;
7696 	struct mlx5_flow_meter_info *fm;
7697 	uint32_t act_idx = (uint32_t)(uintptr_t)handle;
7698 	uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
7699 	uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
7700 	int ret = 0;
7701 	bool push = true;
7702 	bool aso = false;
7703 
7704 	if (attr) {
7705 		MLX5_ASSERT(queue != MLX5_HW_INV_QUEUE);
7706 		if (unlikely(!priv->hw_q[queue].job_idx))
7707 			return rte_flow_error_set(error, ENOMEM,
7708 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7709 				"Action update failed due to queue full.");
7710 		job = priv->hw_q[queue].job[--priv->hw_q[queue].job_idx];
7711 		job->type = MLX5_HW_Q_JOB_TYPE_UPDATE;
7712 		job->user_data = user_data;
7713 		push = !attr->postpone;
7714 	}
7715 	switch (type) {
7716 	case MLX5_INDIRECT_ACTION_TYPE_AGE:
7717 		ret = mlx5_hws_age_action_update(priv, idx, update, error);
7718 		break;
7719 	case MLX5_INDIRECT_ACTION_TYPE_CT:
7720 		if (ct_conf->state)
7721 			aso = true;
7722 		ret = flow_hw_conntrack_update(dev, queue, update, act_idx,
7723 					       job, push, error);
7724 		break;
7725 	case MLX5_INDIRECT_ACTION_TYPE_METER_MARK:
7726 		aso = true;
7727 		meter_mark = &upd_meter_mark->meter_mark;
7728 		/* Find ASO object. */
7729 		aso_mtr = mlx5_ipool_get(pool->idx_pool, idx);
7730 		if (!aso_mtr) {
7731 			ret = -EINVAL;
7732 			rte_flow_error_set(error, EINVAL,
7733 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7734 				NULL, "Invalid meter_mark update index");
7735 			break;
7736 		}
7737 		fm = &aso_mtr->fm;
7738 		if (upd_meter_mark->profile_valid)
7739 			fm->profile = (struct mlx5_flow_meter_profile *)
7740 							(meter_mark->profile);
7741 		if (upd_meter_mark->color_mode_valid)
7742 			fm->color_aware = meter_mark->color_mode;
7743 		if (upd_meter_mark->init_color_valid)
7744 			aso_mtr->init_color = (meter_mark->color_mode) ?
7745 				meter_mark->init_color : RTE_COLOR_GREEN;
7746 		if (upd_meter_mark->state_valid)
7747 			fm->is_enable = meter_mark->state;
7748 		/* Update ASO flow meter by wqe. */
7749 		if (mlx5_aso_meter_update_by_wqe(priv->sh, queue,
7750 						 aso_mtr, &priv->mtr_bulk, job, push)) {
7751 			ret = -EINVAL;
7752 			rte_flow_error_set(error, EINVAL,
7753 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7754 				NULL, "Unable to update ASO meter WQE");
7755 			break;
7756 		}
7757 		/* Wait for ASO object completion. */
7758 		if (queue == MLX5_HW_INV_QUEUE &&
7759 		    mlx5_aso_mtr_wait(priv->sh, MLX5_HW_INV_QUEUE, aso_mtr)) {
7760 			ret = -EINVAL;
7761 			rte_flow_error_set(error, EINVAL,
7762 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7763 				NULL, "Unable to wait for ASO meter CQE");
7764 		}
7765 		break;
7766 	case MLX5_INDIRECT_ACTION_TYPE_RSS:
7767 		ret = flow_dv_action_update(dev, handle, update, error);
7768 		break;
7769 	default:
7770 		ret = -ENOTSUP;
7771 		rte_flow_error_set(error, ENOTSUP,
7772 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7773 					  "action type not supported");
7774 		break;
7775 	}
7776 	if (job) {
7777 		if (ret) {
7778 			priv->hw_q[queue].job_idx++;
7779 			return ret;
7780 		}
7781 		job->action = handle;
7782 		if (push)
7783 			__flow_hw_push_action(dev, queue);
7784 		if (aso)
7785 			return 0;
7786 		rte_ring_enqueue(push ? priv->hw_q[queue].indir_cq :
7787 				 priv->hw_q[queue].indir_iq, job);
7788 	}
7789 	return ret;
7790 }
7791 
7792 /**
7793  * Destroy shared action.
7794  *
7795  * @param[in] dev
7796  *   Pointer to the rte_eth_dev structure.
7797  * @param[in] queue
7798  *   Which queue to be used.
7799  * @param[in] attr
7800  *   Operation attribute.
7801  * @param[in] handle
7802  *   Action handle to be destroyed.
7803  * @param[in] user_data
7804  *   Pointer to the user_data.
7805  * @param[out] error
7806  *   Pointer to error structure.
7807  *
7808  * @return
7809  *   0 on success, negative value otherwise and rte_errno is set.
7810  */
7811 static int
7812 flow_hw_action_handle_destroy(struct rte_eth_dev *dev, uint32_t queue,
7813 			      const struct rte_flow_op_attr *attr,
7814 			      struct rte_flow_action_handle *handle,
7815 			      void *user_data,
7816 			      struct rte_flow_error *error)
7817 {
7818 	uint32_t act_idx = (uint32_t)(uintptr_t)handle;
7819 	uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
7820 	uint32_t age_idx = act_idx & MLX5_HWS_AGE_IDX_MASK;
7821 	uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
7822 	struct mlx5_priv *priv = dev->data->dev_private;
7823 	struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
7824 	struct mlx5_hw_q_job *job = NULL;
7825 	struct mlx5_aso_mtr *aso_mtr;
7826 	struct mlx5_flow_meter_info *fm;
7827 	bool push = true;
7828 	bool aso = false;
7829 	int ret = 0;
7830 
7831 	if (attr) {
7832 		MLX5_ASSERT(queue != MLX5_HW_INV_QUEUE);
7833 		if (unlikely(!priv->hw_q[queue].job_idx))
7834 			return rte_flow_error_set(error, ENOMEM,
7835 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7836 				"Action destroy failed due to queue full.");
7837 		job = priv->hw_q[queue].job[--priv->hw_q[queue].job_idx];
7838 		job->type = MLX5_HW_Q_JOB_TYPE_DESTROY;
7839 		job->user_data = user_data;
7840 		push = !attr->postpone;
7841 	}
7842 	switch (type) {
7843 	case MLX5_INDIRECT_ACTION_TYPE_AGE:
7844 		ret = mlx5_hws_age_action_destroy(priv, age_idx, error);
7845 		break;
7846 	case MLX5_INDIRECT_ACTION_TYPE_COUNT:
7847 		age_idx = mlx5_hws_cnt_age_get(priv->hws_cpool, act_idx);
7848 		if (age_idx != 0)
7849 			/*
7850 			 * If this counter belongs to indirect AGE, here is the
7851 			 * time to update the AGE.
7852 			 */
7853 			mlx5_hws_age_nb_cnt_decrease(priv, age_idx);
7854 		mlx5_hws_cnt_shared_put(priv->hws_cpool, &act_idx);
7855 		break;
7856 	case MLX5_INDIRECT_ACTION_TYPE_CT:
7857 		ret = flow_hw_conntrack_destroy(dev, act_idx, error);
7858 		break;
7859 	case MLX5_INDIRECT_ACTION_TYPE_METER_MARK:
7860 		aso_mtr = mlx5_ipool_get(pool->idx_pool, idx);
7861 		if (!aso_mtr) {
7862 			ret = -EINVAL;
7863 			rte_flow_error_set(error, EINVAL,
7864 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7865 				NULL, "Invalid meter_mark destroy index");
7866 			break;
7867 		}
7868 		fm = &aso_mtr->fm;
7869 		fm->is_enable = 0;
7870 		/* Update ASO flow meter by wqe. */
7871 		if (mlx5_aso_meter_update_by_wqe(priv->sh, queue, aso_mtr,
7872 						 &priv->mtr_bulk, job, push)) {
7873 			ret = -EINVAL;
7874 			rte_flow_error_set(error, EINVAL,
7875 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7876 				NULL, "Unable to update ASO meter WQE");
7877 			break;
7878 		}
7879 		/* Wait for ASO object completion. */
7880 		if (queue == MLX5_HW_INV_QUEUE &&
7881 		    mlx5_aso_mtr_wait(priv->sh, MLX5_HW_INV_QUEUE, aso_mtr)) {
7882 			ret = -EINVAL;
7883 			rte_flow_error_set(error, EINVAL,
7884 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7885 				NULL, "Unable to wait for ASO meter CQE");
7886 			break;
7887 		}
7888 		if (!job)
7889 			mlx5_ipool_free(pool->idx_pool, idx);
7890 		else
7891 			aso = true;
7892 		break;
7893 	case MLX5_INDIRECT_ACTION_TYPE_RSS:
7894 		ret = flow_dv_action_destroy(dev, handle, error);
7895 		break;
7896 	default:
7897 		ret = -ENOTSUP;
7898 		rte_flow_error_set(error, ENOTSUP,
7899 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
7900 					  "action type not supported");
7901 		break;
7902 	}
7903 	if (job) {
7904 		if (ret) {
7905 			priv->hw_q[queue].job_idx++;
7906 			return ret;
7907 		}
7908 		job->action = handle;
7909 		if (push)
7910 			__flow_hw_push_action(dev, queue);
7911 		if (aso)
7912 			return ret;
7913 		rte_ring_enqueue(push ? priv->hw_q[queue].indir_cq :
7914 				 priv->hw_q[queue].indir_iq, job);
7915 	}
7916 	return ret;
7917 }
7918 
7919 static int
7920 flow_hw_query_counter(const struct rte_eth_dev *dev, uint32_t counter,
7921 		      void *data, struct rte_flow_error *error)
7922 {
7923 	struct mlx5_priv *priv = dev->data->dev_private;
7924 	struct mlx5_hws_cnt *cnt;
7925 	struct rte_flow_query_count *qc = data;
7926 	uint32_t iidx;
7927 	uint64_t pkts, bytes;
7928 
7929 	if (!mlx5_hws_cnt_id_valid(counter))
7930 		return rte_flow_error_set(error, EINVAL,
7931 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7932 				"counter are not available");
7933 	iidx = mlx5_hws_cnt_iidx(priv->hws_cpool, counter);
7934 	cnt = &priv->hws_cpool->pool[iidx];
7935 	__hws_cnt_query_raw(priv->hws_cpool, counter, &pkts, &bytes);
7936 	qc->hits_set = 1;
7937 	qc->bytes_set = 1;
7938 	qc->hits = pkts - cnt->reset.hits;
7939 	qc->bytes = bytes - cnt->reset.bytes;
7940 	if (qc->reset) {
7941 		cnt->reset.bytes = bytes;
7942 		cnt->reset.hits = pkts;
7943 	}
7944 	return 0;
7945 }
7946 
7947 /**
7948  * Query a flow rule AGE action for aging information.
7949  *
7950  * @param[in] dev
7951  *   Pointer to Ethernet device.
7952  * @param[in] age_idx
7953  *   Index of AGE action parameter.
7954  * @param[out] data
7955  *   Data retrieved by the query.
7956  * @param[out] error
7957  *   Perform verbose error reporting if not NULL.
7958  *
7959  * @return
7960  *   0 on success, a negative errno value otherwise and rte_errno is set.
7961  */
7962 static int
7963 flow_hw_query_age(const struct rte_eth_dev *dev, uint32_t age_idx, void *data,
7964 		  struct rte_flow_error *error)
7965 {
7966 	struct mlx5_priv *priv = dev->data->dev_private;
7967 	struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);
7968 	struct mlx5_indexed_pool *ipool = age_info->ages_ipool;
7969 	struct mlx5_hws_age_param *param = mlx5_ipool_get(ipool, age_idx);
7970 	struct rte_flow_query_age *resp = data;
7971 
7972 	if (!param || !param->timeout)
7973 		return rte_flow_error_set(error, EINVAL,
7974 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7975 					  NULL, "age data not available");
7976 	switch (__atomic_load_n(&param->state, __ATOMIC_RELAXED)) {
7977 	case HWS_AGE_AGED_OUT_REPORTED:
7978 	case HWS_AGE_AGED_OUT_NOT_REPORTED:
7979 		resp->aged = 1;
7980 		break;
7981 	case HWS_AGE_CANDIDATE:
7982 	case HWS_AGE_CANDIDATE_INSIDE_RING:
7983 		resp->aged = 0;
7984 		break;
7985 	case HWS_AGE_FREE:
7986 		/*
7987 		 * When state is FREE the flow itself should be invalid.
7988 		 * Fall-through.
7989 		 */
7990 	default:
7991 		MLX5_ASSERT(0);
7992 		break;
7993 	}
7994 	resp->sec_since_last_hit_valid = !resp->aged;
7995 	if (resp->sec_since_last_hit_valid)
7996 		resp->sec_since_last_hit = __atomic_load_n
7997 				 (&param->sec_since_last_hit, __ATOMIC_RELAXED);
7998 	return 0;
7999 }
8000 
8001 static int
8002 flow_hw_query(struct rte_eth_dev *dev, struct rte_flow *flow,
8003 	      const struct rte_flow_action *actions, void *data,
8004 	      struct rte_flow_error *error)
8005 {
8006 	int ret = -EINVAL;
8007 	struct rte_flow_hw *hw_flow = (struct rte_flow_hw *)flow;
8008 
8009 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
8010 		switch (actions->type) {
8011 		case RTE_FLOW_ACTION_TYPE_VOID:
8012 			break;
8013 		case RTE_FLOW_ACTION_TYPE_COUNT:
8014 			ret = flow_hw_query_counter(dev, hw_flow->cnt_id, data,
8015 						    error);
8016 			break;
8017 		case RTE_FLOW_ACTION_TYPE_AGE:
8018 			ret = flow_hw_query_age(dev, hw_flow->age_idx, data,
8019 						error);
8020 			break;
8021 		default:
8022 			return rte_flow_error_set(error, ENOTSUP,
8023 						  RTE_FLOW_ERROR_TYPE_ACTION,
8024 						  actions,
8025 						  "action not supported");
8026 		}
8027 	}
8028 	return ret;
8029 }
8030 
8031 /**
8032  * Validate indirect action.
8033  *
8034  * @param[in] dev
8035  *   Pointer to the Ethernet device structure.
8036  * @param[in] conf
8037  *   Shared action configuration.
8038  * @param[in] action
8039  *   Action specification used to create indirect action.
8040  * @param[out] error
8041  *   Perform verbose error reporting if not NULL. Initialized in case of
8042  *   error only.
8043  *
8044  * @return
8045  *   0 on success, otherwise negative errno value.
8046  */
8047 static int
8048 flow_hw_action_validate(struct rte_eth_dev *dev,
8049 			const struct rte_flow_indir_action_conf *conf,
8050 			const struct rte_flow_action *action,
8051 			struct rte_flow_error *err)
8052 {
8053 	return flow_hw_action_handle_validate(dev, MLX5_HW_INV_QUEUE, NULL,
8054 					      conf, action, NULL, err);
8055 }
8056 
8057 /**
8058  * Create indirect action.
8059  *
8060  * @param[in] dev
8061  *   Pointer to the Ethernet device structure.
8062  * @param[in] conf
8063  *   Shared action configuration.
8064  * @param[in] action
8065  *   Action specification used to create indirect action.
8066  * @param[out] error
8067  *   Perform verbose error reporting if not NULL. Initialized in case of
8068  *   error only.
8069  *
8070  * @return
8071  *   A valid shared action handle in case of success, NULL otherwise and
8072  *   rte_errno is set.
8073  */
8074 static struct rte_flow_action_handle *
8075 flow_hw_action_create(struct rte_eth_dev *dev,
8076 		       const struct rte_flow_indir_action_conf *conf,
8077 		       const struct rte_flow_action *action,
8078 		       struct rte_flow_error *err)
8079 {
8080 	return flow_hw_action_handle_create(dev, MLX5_HW_INV_QUEUE,
8081 					    NULL, conf, action, NULL, err);
8082 }
8083 
8084 /**
8085  * Destroy the indirect action.
8086  * Release action related resources on the NIC and the memory.
8087  * Lock free, (mutex should be acquired by caller).
8088  * Dispatcher for action type specific call.
8089  *
8090  * @param[in] dev
8091  *   Pointer to the Ethernet device structure.
8092  * @param[in] handle
8093  *   The indirect action object handle to be removed.
8094  * @param[out] error
8095  *   Perform verbose error reporting if not NULL. Initialized in case of
8096  *   error only.
8097  *
8098  * @return
8099  *   0 on success, otherwise negative errno value.
8100  */
8101 static int
8102 flow_hw_action_destroy(struct rte_eth_dev *dev,
8103 		       struct rte_flow_action_handle *handle,
8104 		       struct rte_flow_error *error)
8105 {
8106 	return flow_hw_action_handle_destroy(dev, MLX5_HW_INV_QUEUE,
8107 			NULL, handle, NULL, error);
8108 }
8109 
8110 /**
8111  * Updates in place shared action configuration.
8112  *
8113  * @param[in] dev
8114  *   Pointer to the Ethernet device structure.
8115  * @param[in] handle
8116  *   The indirect action object handle to be updated.
8117  * @param[in] update
8118  *   Action specification used to modify the action pointed by *handle*.
8119  *   *update* could be of same type with the action pointed by the *handle*
8120  *   handle argument, or some other structures like a wrapper, depending on
8121  *   the indirect action type.
8122  * @param[out] error
8123  *   Perform verbose error reporting if not NULL. Initialized in case of
8124  *   error only.
8125  *
8126  * @return
8127  *   0 on success, otherwise negative errno value.
8128  */
8129 static int
8130 flow_hw_action_update(struct rte_eth_dev *dev,
8131 		      struct rte_flow_action_handle *handle,
8132 		      const void *update,
8133 		      struct rte_flow_error *err)
8134 {
8135 	return flow_hw_action_handle_update(dev, MLX5_HW_INV_QUEUE,
8136 			NULL, handle, update, NULL, err);
8137 }
8138 
8139 static int
8140 flow_hw_action_handle_query(struct rte_eth_dev *dev, uint32_t queue,
8141 			    const struct rte_flow_op_attr *attr,
8142 			    const struct rte_flow_action_handle *handle,
8143 			    void *data, void *user_data,
8144 			    struct rte_flow_error *error)
8145 {
8146 	struct mlx5_priv *priv = dev->data->dev_private;
8147 	struct mlx5_hw_q_job *job = NULL;
8148 	uint32_t act_idx = (uint32_t)(uintptr_t)handle;
8149 	uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
8150 	uint32_t age_idx = act_idx & MLX5_HWS_AGE_IDX_MASK;
8151 	int ret;
8152 	bool push = true;
8153 	bool aso = false;
8154 
8155 	if (attr) {
8156 		MLX5_ASSERT(queue != MLX5_HW_INV_QUEUE);
8157 		if (unlikely(!priv->hw_q[queue].job_idx))
8158 			return rte_flow_error_set(error, ENOMEM,
8159 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8160 				"Action destroy failed due to queue full.");
8161 		job = priv->hw_q[queue].job[--priv->hw_q[queue].job_idx];
8162 		job->type = MLX5_HW_Q_JOB_TYPE_QUERY;
8163 		job->user_data = user_data;
8164 		push = !attr->postpone;
8165 	}
8166 	switch (type) {
8167 	case MLX5_INDIRECT_ACTION_TYPE_AGE:
8168 		ret = flow_hw_query_age(dev, age_idx, data, error);
8169 		break;
8170 	case MLX5_INDIRECT_ACTION_TYPE_COUNT:
8171 		ret = flow_hw_query_counter(dev, act_idx, data, error);
8172 		break;
8173 	case MLX5_INDIRECT_ACTION_TYPE_CT:
8174 		aso = true;
8175 		if (job)
8176 			job->profile = (struct rte_flow_action_conntrack *)data;
8177 		ret = flow_hw_conntrack_query(dev, queue, act_idx, data,
8178 					      job, push, error);
8179 		break;
8180 	default:
8181 		ret = -ENOTSUP;
8182 		rte_flow_error_set(error, ENOTSUP,
8183 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
8184 					  "action type not supported");
8185 		break;
8186 	}
8187 	if (job) {
8188 		if (ret) {
8189 			priv->hw_q[queue].job_idx++;
8190 			return ret;
8191 		}
8192 		job->action = handle;
8193 		if (push)
8194 			__flow_hw_push_action(dev, queue);
8195 		if (aso)
8196 			return ret;
8197 		rte_ring_enqueue(push ? priv->hw_q[queue].indir_cq :
8198 				 priv->hw_q[queue].indir_iq, job);
8199 	}
8200 	return 0;
8201 }
8202 
8203 static int
8204 flow_hw_action_query(struct rte_eth_dev *dev,
8205 		     const struct rte_flow_action_handle *handle, void *data,
8206 		     struct rte_flow_error *error)
8207 {
8208 	return flow_hw_action_handle_query(dev, MLX5_HW_INV_QUEUE, NULL,
8209 			handle, data, NULL, error);
8210 }
8211 
8212 /**
8213  * Get aged-out flows of a given port on the given HWS flow queue.
8214  *
8215  * @param[in] dev
8216  *   Pointer to the Ethernet device structure.
8217  * @param[in] queue_id
8218  *   Flow queue to query. Ignored when RTE_FLOW_PORT_FLAG_STRICT_QUEUE not set.
8219  * @param[in, out] contexts
8220  *   The address of an array of pointers to the aged-out flows contexts.
8221  * @param[in] nb_contexts
8222  *   The length of context array pointers.
8223  * @param[out] error
8224  *   Perform verbose error reporting if not NULL. Initialized in case of
8225  *   error only.
8226  *
8227  * @return
8228  *   if nb_contexts is 0, return the amount of all aged contexts.
8229  *   if nb_contexts is not 0 , return the amount of aged flows reported
8230  *   in the context array, otherwise negative errno value.
8231  */
8232 static int
8233 flow_hw_get_q_aged_flows(struct rte_eth_dev *dev, uint32_t queue_id,
8234 			 void **contexts, uint32_t nb_contexts,
8235 			 struct rte_flow_error *error)
8236 {
8237 	struct mlx5_priv *priv = dev->data->dev_private;
8238 	struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);
8239 	struct rte_ring *r;
8240 	int nb_flows = 0;
8241 
8242 	if (nb_contexts && !contexts)
8243 		return rte_flow_error_set(error, EINVAL,
8244 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8245 					  NULL, "empty context");
8246 	if (priv->hws_strict_queue) {
8247 		if (queue_id >= age_info->hw_q_age->nb_rings)
8248 			return rte_flow_error_set(error, EINVAL,
8249 						RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8250 						NULL, "invalid queue id");
8251 		r = age_info->hw_q_age->aged_lists[queue_id];
8252 	} else {
8253 		r = age_info->hw_age.aged_list;
8254 		MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
8255 	}
8256 	if (nb_contexts == 0)
8257 		return rte_ring_count(r);
8258 	while ((uint32_t)nb_flows < nb_contexts) {
8259 		uint32_t age_idx;
8260 
8261 		if (rte_ring_dequeue_elem(r, &age_idx, sizeof(uint32_t)) < 0)
8262 			break;
8263 		/* get the AGE context if the aged-out index is still valid. */
8264 		contexts[nb_flows] = mlx5_hws_age_context_get(priv, age_idx);
8265 		if (!contexts[nb_flows])
8266 			continue;
8267 		nb_flows++;
8268 	}
8269 	return nb_flows;
8270 }
8271 
8272 /**
8273  * Get aged-out flows.
8274  *
8275  * This function is relevant only if RTE_FLOW_PORT_FLAG_STRICT_QUEUE isn't set.
8276  *
8277  * @param[in] dev
8278  *   Pointer to the Ethernet device structure.
8279  * @param[in] contexts
8280  *   The address of an array of pointers to the aged-out flows contexts.
8281  * @param[in] nb_contexts
8282  *   The length of context array pointers.
8283  * @param[out] error
8284  *   Perform verbose error reporting if not NULL. Initialized in case of
8285  *   error only.
8286  *
8287  * @return
8288  *   how many contexts get in success, otherwise negative errno value.
8289  *   if nb_contexts is 0, return the amount of all aged contexts.
8290  *   if nb_contexts is not 0 , return the amount of aged flows reported
8291  *   in the context array.
8292  */
8293 static int
8294 flow_hw_get_aged_flows(struct rte_eth_dev *dev, void **contexts,
8295 		       uint32_t nb_contexts, struct rte_flow_error *error)
8296 {
8297 	struct mlx5_priv *priv = dev->data->dev_private;
8298 
8299 	if (priv->hws_strict_queue)
8300 		DRV_LOG(WARNING,
8301 			"port %u get aged flows called in strict queue mode.",
8302 			dev->data->port_id);
8303 	return flow_hw_get_q_aged_flows(dev, 0, contexts, nb_contexts, error);
8304 }
8305 
8306 const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {
8307 	.info_get = flow_hw_info_get,
8308 	.configure = flow_hw_configure,
8309 	.pattern_validate = flow_hw_pattern_validate,
8310 	.pattern_template_create = flow_hw_pattern_template_create,
8311 	.pattern_template_destroy = flow_hw_pattern_template_destroy,
8312 	.actions_validate = flow_hw_actions_validate,
8313 	.actions_template_create = flow_hw_actions_template_create,
8314 	.actions_template_destroy = flow_hw_actions_template_destroy,
8315 	.template_table_create = flow_hw_template_table_create,
8316 	.template_table_destroy = flow_hw_table_destroy,
8317 	.async_flow_create = flow_hw_async_flow_create,
8318 	.async_flow_destroy = flow_hw_async_flow_destroy,
8319 	.pull = flow_hw_pull,
8320 	.push = flow_hw_push,
8321 	.async_action_create = flow_hw_action_handle_create,
8322 	.async_action_destroy = flow_hw_action_handle_destroy,
8323 	.async_action_update = flow_hw_action_handle_update,
8324 	.async_action_query = flow_hw_action_handle_query,
8325 	.action_validate = flow_hw_action_validate,
8326 	.action_create = flow_hw_action_create,
8327 	.action_destroy = flow_hw_action_destroy,
8328 	.action_update = flow_hw_action_update,
8329 	.action_query = flow_hw_action_query,
8330 	.query = flow_hw_query,
8331 	.get_aged_flows = flow_hw_get_aged_flows,
8332 	.get_q_aged_flows = flow_hw_get_q_aged_flows,
8333 };
8334 
8335 /**
8336  * Creates a control flow using flow template API on @p proxy_dev device,
8337  * on behalf of @p owner_dev device.
8338  *
8339  * This function uses locks internally to synchronize access to the
8340  * flow queue.
8341  *
8342  * Created flow is stored in private list associated with @p proxy_dev device.
8343  *
8344  * @param owner_dev
8345  *   Pointer to Ethernet device on behalf of which flow is created.
8346  * @param proxy_dev
8347  *   Pointer to Ethernet device on which flow is created.
8348  * @param table
8349  *   Pointer to flow table.
8350  * @param items
8351  *   Pointer to flow rule items.
8352  * @param item_template_idx
8353  *   Index of an item template associated with @p table.
8354  * @param actions
8355  *   Pointer to flow rule actions.
8356  * @param action_template_idx
8357  *   Index of an action template associated with @p table.
8358  *
8359  * @return
8360  *   0 on success, negative errno value otherwise and rte_errno set.
8361  */
8362 static __rte_unused int
8363 flow_hw_create_ctrl_flow(struct rte_eth_dev *owner_dev,
8364 			 struct rte_eth_dev *proxy_dev,
8365 			 struct rte_flow_template_table *table,
8366 			 struct rte_flow_item items[],
8367 			 uint8_t item_template_idx,
8368 			 struct rte_flow_action actions[],
8369 			 uint8_t action_template_idx)
8370 {
8371 	struct mlx5_priv *priv = proxy_dev->data->dev_private;
8372 	uint32_t queue = CTRL_QUEUE_ID(priv);
8373 	struct rte_flow_op_attr op_attr = {
8374 		.postpone = 0,
8375 	};
8376 	struct rte_flow *flow = NULL;
8377 	struct mlx5_hw_ctrl_flow *entry = NULL;
8378 	int ret;
8379 
8380 	rte_spinlock_lock(&priv->hw_ctrl_lock);
8381 	entry = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_SYS, sizeof(*entry),
8382 			    0, SOCKET_ID_ANY);
8383 	if (!entry) {
8384 		DRV_LOG(ERR, "port %u not enough memory to create control flows",
8385 			proxy_dev->data->port_id);
8386 		rte_errno = ENOMEM;
8387 		ret = -rte_errno;
8388 		goto error;
8389 	}
8390 	flow = flow_hw_async_flow_create(proxy_dev, queue, &op_attr, table,
8391 					 items, item_template_idx,
8392 					 actions, action_template_idx,
8393 					 NULL, NULL);
8394 	if (!flow) {
8395 		DRV_LOG(ERR, "port %u failed to enqueue create control"
8396 			" flow operation", proxy_dev->data->port_id);
8397 		ret = -rte_errno;
8398 		goto error;
8399 	}
8400 	ret = flow_hw_push(proxy_dev, queue, NULL);
8401 	if (ret) {
8402 		DRV_LOG(ERR, "port %u failed to drain control flow queue",
8403 			proxy_dev->data->port_id);
8404 		goto error;
8405 	}
8406 	ret = __flow_hw_pull_comp(proxy_dev, queue, 1, NULL);
8407 	if (ret) {
8408 		DRV_LOG(ERR, "port %u failed to insert control flow",
8409 			proxy_dev->data->port_id);
8410 		rte_errno = EINVAL;
8411 		ret = -rte_errno;
8412 		goto error;
8413 	}
8414 	entry->owner_dev = owner_dev;
8415 	entry->flow = flow;
8416 	LIST_INSERT_HEAD(&priv->hw_ctrl_flows, entry, next);
8417 	rte_spinlock_unlock(&priv->hw_ctrl_lock);
8418 	return 0;
8419 error:
8420 	if (entry)
8421 		mlx5_free(entry);
8422 	rte_spinlock_unlock(&priv->hw_ctrl_lock);
8423 	return ret;
8424 }
8425 
8426 /**
8427  * Destroys a control flow @p flow using flow template API on @p dev device.
8428  *
8429  * This function uses locks internally to synchronize access to the
8430  * flow queue.
8431  *
8432  * If the @p flow is stored on any private list/pool, then caller must free up
8433  * the relevant resources.
8434  *
8435  * @param dev
8436  *   Pointer to Ethernet device.
8437  * @param flow
8438  *   Pointer to flow rule.
8439  *
8440  * @return
8441  *   0 on success, non-zero value otherwise.
8442  */
8443 static int
8444 flow_hw_destroy_ctrl_flow(struct rte_eth_dev *dev, struct rte_flow *flow)
8445 {
8446 	struct mlx5_priv *priv = dev->data->dev_private;
8447 	uint32_t queue = CTRL_QUEUE_ID(priv);
8448 	struct rte_flow_op_attr op_attr = {
8449 		.postpone = 0,
8450 	};
8451 	int ret;
8452 
8453 	rte_spinlock_lock(&priv->hw_ctrl_lock);
8454 	ret = flow_hw_async_flow_destroy(dev, queue, &op_attr, flow, NULL, NULL);
8455 	if (ret) {
8456 		DRV_LOG(ERR, "port %u failed to enqueue destroy control"
8457 			" flow operation", dev->data->port_id);
8458 		goto exit;
8459 	}
8460 	ret = flow_hw_push(dev, queue, NULL);
8461 	if (ret) {
8462 		DRV_LOG(ERR, "port %u failed to drain control flow queue",
8463 			dev->data->port_id);
8464 		goto exit;
8465 	}
8466 	ret = __flow_hw_pull_comp(dev, queue, 1, NULL);
8467 	if (ret) {
8468 		DRV_LOG(ERR, "port %u failed to destroy control flow",
8469 			dev->data->port_id);
8470 		rte_errno = EINVAL;
8471 		ret = -rte_errno;
8472 		goto exit;
8473 	}
8474 exit:
8475 	rte_spinlock_unlock(&priv->hw_ctrl_lock);
8476 	return ret;
8477 }
8478 
8479 /**
8480  * Destroys control flows created on behalf of @p owner device on @p dev device.
8481  *
8482  * @param dev
8483  *   Pointer to Ethernet device on which control flows were created.
8484  * @param owner
8485  *   Pointer to Ethernet device owning control flows.
8486  *
8487  * @return
8488  *   0 on success, otherwise negative error code is returned and
8489  *   rte_errno is set.
8490  */
8491 static int
8492 flow_hw_flush_ctrl_flows_owned_by(struct rte_eth_dev *dev, struct rte_eth_dev *owner)
8493 {
8494 	struct mlx5_priv *priv = dev->data->dev_private;
8495 	struct mlx5_hw_ctrl_flow *cf;
8496 	struct mlx5_hw_ctrl_flow *cf_next;
8497 	int ret;
8498 
8499 	cf = LIST_FIRST(&priv->hw_ctrl_flows);
8500 	while (cf != NULL) {
8501 		cf_next = LIST_NEXT(cf, next);
8502 		if (cf->owner_dev == owner) {
8503 			ret = flow_hw_destroy_ctrl_flow(dev, cf->flow);
8504 			if (ret) {
8505 				rte_errno = ret;
8506 				return -ret;
8507 			}
8508 			LIST_REMOVE(cf, next);
8509 			mlx5_free(cf);
8510 		}
8511 		cf = cf_next;
8512 	}
8513 	return 0;
8514 }
8515 
8516 /**
8517  * Destroys control flows created for @p owner_dev device.
8518  *
8519  * @param owner_dev
8520  *   Pointer to Ethernet device owning control flows.
8521  *
8522  * @return
8523  *   0 on success, otherwise negative error code is returned and
8524  *   rte_errno is set.
8525  */
8526 int
8527 mlx5_flow_hw_flush_ctrl_flows(struct rte_eth_dev *owner_dev)
8528 {
8529 	struct mlx5_priv *owner_priv = owner_dev->data->dev_private;
8530 	struct rte_eth_dev *proxy_dev;
8531 	uint16_t owner_port_id = owner_dev->data->port_id;
8532 	uint16_t proxy_port_id = owner_dev->data->port_id;
8533 	int ret;
8534 
8535 	/* Flush all flows created by this port for itself. */
8536 	ret = flow_hw_flush_ctrl_flows_owned_by(owner_dev, owner_dev);
8537 	if (ret)
8538 		return ret;
8539 	/* Flush all flows created for this port on proxy port. */
8540 	if (owner_priv->sh->config.dv_esw_en) {
8541 		ret = rte_flow_pick_transfer_proxy(owner_port_id, &proxy_port_id, NULL);
8542 		if (ret == -ENODEV) {
8543 			DRV_LOG(DEBUG, "Unable to find transfer proxy port for port %u. It was "
8544 				       "probably closed. Control flows were cleared.",
8545 				       owner_port_id);
8546 			rte_errno = 0;
8547 			return 0;
8548 		} else if (ret) {
8549 			DRV_LOG(ERR, "Unable to find proxy port for port %u (ret = %d)",
8550 				owner_port_id, ret);
8551 			return ret;
8552 		}
8553 		proxy_dev = &rte_eth_devices[proxy_port_id];
8554 	} else {
8555 		proxy_dev = owner_dev;
8556 	}
8557 	return flow_hw_flush_ctrl_flows_owned_by(proxy_dev, owner_dev);
8558 }
8559 
8560 /**
8561  * Destroys all control flows created on @p dev device.
8562  *
8563  * @param owner_dev
8564  *   Pointer to Ethernet device.
8565  *
8566  * @return
8567  *   0 on success, otherwise negative error code is returned and
8568  *   rte_errno is set.
8569  */
8570 static int
8571 flow_hw_flush_all_ctrl_flows(struct rte_eth_dev *dev)
8572 {
8573 	struct mlx5_priv *priv = dev->data->dev_private;
8574 	struct mlx5_hw_ctrl_flow *cf;
8575 	struct mlx5_hw_ctrl_flow *cf_next;
8576 	int ret;
8577 
8578 	cf = LIST_FIRST(&priv->hw_ctrl_flows);
8579 	while (cf != NULL) {
8580 		cf_next = LIST_NEXT(cf, next);
8581 		ret = flow_hw_destroy_ctrl_flow(dev, cf->flow);
8582 		if (ret) {
8583 			rte_errno = ret;
8584 			return -ret;
8585 		}
8586 		LIST_REMOVE(cf, next);
8587 		mlx5_free(cf);
8588 		cf = cf_next;
8589 	}
8590 	return 0;
8591 }
8592 
8593 int
8594 mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn)
8595 {
8596 	uint16_t port_id = dev->data->port_id;
8597 	struct rte_flow_item_ethdev esw_mgr_spec = {
8598 		.port_id = MLX5_REPRESENTED_PORT_ESW_MGR,
8599 	};
8600 	struct rte_flow_item_ethdev esw_mgr_mask = {
8601 		.port_id = MLX5_REPRESENTED_PORT_ESW_MGR,
8602 	};
8603 	struct rte_flow_item_tag reg_c0_spec = {
8604 		.index = (uint8_t)REG_C_0,
8605 		.data = flow_hw_esw_mgr_regc_marker(dev),
8606 	};
8607 	struct rte_flow_item_tag reg_c0_mask = {
8608 		.index = 0xff,
8609 		.data = flow_hw_esw_mgr_regc_marker_mask(dev),
8610 	};
8611 	struct mlx5_rte_flow_item_sq sq_spec = {
8612 		.queue = sqn,
8613 	};
8614 	struct rte_flow_action_ethdev port = {
8615 		.port_id = port_id,
8616 	};
8617 	struct rte_flow_item items[3] = { { 0 } };
8618 	struct rte_flow_action actions[3] = { { 0 } };
8619 	struct rte_eth_dev *proxy_dev;
8620 	struct mlx5_priv *proxy_priv;
8621 	uint16_t proxy_port_id = dev->data->port_id;
8622 	int ret;
8623 
8624 	ret = rte_flow_pick_transfer_proxy(port_id, &proxy_port_id, NULL);
8625 	if (ret) {
8626 		DRV_LOG(ERR, "Unable to pick transfer proxy port for port %u. Transfer proxy "
8627 			     "port must be present to create default SQ miss flows.",
8628 			     port_id);
8629 		return ret;
8630 	}
8631 	proxy_dev = &rte_eth_devices[proxy_port_id];
8632 	proxy_priv = proxy_dev->data->dev_private;
8633 	if (!proxy_priv->dr_ctx) {
8634 		DRV_LOG(DEBUG, "Transfer proxy port (port %u) of port %u must be configured "
8635 			       "for HWS to create default SQ miss flows. Default flows will "
8636 			       "not be created.",
8637 			       proxy_port_id, port_id);
8638 		return 0;
8639 	}
8640 	if (!proxy_priv->hw_esw_sq_miss_root_tbl ||
8641 	    !proxy_priv->hw_esw_sq_miss_tbl) {
8642 		DRV_LOG(ERR, "Transfer proxy port (port %u) of port %u was configured, but "
8643 			     "default flow tables were not created.",
8644 			     proxy_port_id, port_id);
8645 		rte_errno = ENOMEM;
8646 		return -rte_errno;
8647 	}
8648 	/*
8649 	 * Create a root SQ miss flow rule - match E-Switch Manager and SQ,
8650 	 * and jump to group 1.
8651 	 */
8652 	items[0] = (struct rte_flow_item){
8653 		.type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,
8654 		.spec = &esw_mgr_spec,
8655 		.mask = &esw_mgr_mask,
8656 	};
8657 	items[1] = (struct rte_flow_item){
8658 		.type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_SQ,
8659 		.spec = &sq_spec,
8660 	};
8661 	items[2] = (struct rte_flow_item){
8662 		.type = RTE_FLOW_ITEM_TYPE_END,
8663 	};
8664 	actions[0] = (struct rte_flow_action){
8665 		.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
8666 	};
8667 	actions[1] = (struct rte_flow_action){
8668 		.type = RTE_FLOW_ACTION_TYPE_JUMP,
8669 	};
8670 	actions[2] = (struct rte_flow_action) {
8671 		.type = RTE_FLOW_ACTION_TYPE_END,
8672 	};
8673 	ret = flow_hw_create_ctrl_flow(dev, proxy_dev, proxy_priv->hw_esw_sq_miss_root_tbl,
8674 				       items, 0, actions, 0);
8675 	if (ret) {
8676 		DRV_LOG(ERR, "Port %u failed to create root SQ miss flow rule for SQ %u, ret %d",
8677 			port_id, sqn, ret);
8678 		return ret;
8679 	}
8680 	/*
8681 	 * Create a non-root SQ miss flow rule - match REG_C_0 marker and SQ,
8682 	 * and forward to port.
8683 	 */
8684 	items[0] = (struct rte_flow_item){
8685 		.type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_TAG,
8686 		.spec = &reg_c0_spec,
8687 		.mask = &reg_c0_mask,
8688 	};
8689 	items[1] = (struct rte_flow_item){
8690 		.type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_SQ,
8691 		.spec = &sq_spec,
8692 	};
8693 	items[2] = (struct rte_flow_item){
8694 		.type = RTE_FLOW_ITEM_TYPE_END,
8695 	};
8696 	actions[0] = (struct rte_flow_action){
8697 		.type = RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT,
8698 		.conf = &port,
8699 	};
8700 	actions[1] = (struct rte_flow_action){
8701 		.type = RTE_FLOW_ACTION_TYPE_END,
8702 	};
8703 	ret = flow_hw_create_ctrl_flow(dev, proxy_dev, proxy_priv->hw_esw_sq_miss_tbl,
8704 				       items, 0, actions, 0);
8705 	if (ret) {
8706 		DRV_LOG(ERR, "Port %u failed to create HWS SQ miss flow rule for SQ %u, ret %d",
8707 			port_id, sqn, ret);
8708 		return ret;
8709 	}
8710 	return 0;
8711 }
8712 
8713 int
8714 mlx5_flow_hw_esw_create_default_jump_flow(struct rte_eth_dev *dev)
8715 {
8716 	uint16_t port_id = dev->data->port_id;
8717 	struct rte_flow_item_ethdev port_spec = {
8718 		.port_id = port_id,
8719 	};
8720 	struct rte_flow_item items[] = {
8721 		{
8722 			.type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,
8723 			.spec = &port_spec,
8724 		},
8725 		{
8726 			.type = RTE_FLOW_ITEM_TYPE_END,
8727 		},
8728 	};
8729 	struct rte_flow_action_jump jump = {
8730 		.group = 1,
8731 	};
8732 	struct rte_flow_action actions[] = {
8733 		{
8734 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
8735 			.conf = &jump,
8736 		},
8737 		{
8738 			.type = RTE_FLOW_ACTION_TYPE_END,
8739 		}
8740 	};
8741 	struct rte_eth_dev *proxy_dev;
8742 	struct mlx5_priv *proxy_priv;
8743 	uint16_t proxy_port_id = dev->data->port_id;
8744 	int ret;
8745 
8746 	ret = rte_flow_pick_transfer_proxy(port_id, &proxy_port_id, NULL);
8747 	if (ret) {
8748 		DRV_LOG(ERR, "Unable to pick transfer proxy port for port %u. Transfer proxy "
8749 			     "port must be present to create default FDB jump rule.",
8750 			     port_id);
8751 		return ret;
8752 	}
8753 	proxy_dev = &rte_eth_devices[proxy_port_id];
8754 	proxy_priv = proxy_dev->data->dev_private;
8755 	if (!proxy_priv->dr_ctx) {
8756 		DRV_LOG(DEBUG, "Transfer proxy port (port %u) of port %u must be configured "
8757 			       "for HWS to create default FDB jump rule. Default rule will "
8758 			       "not be created.",
8759 			       proxy_port_id, port_id);
8760 		return 0;
8761 	}
8762 	if (!proxy_priv->hw_esw_zero_tbl) {
8763 		DRV_LOG(ERR, "Transfer proxy port (port %u) of port %u was configured, but "
8764 			     "default flow tables were not created.",
8765 			     proxy_port_id, port_id);
8766 		rte_errno = EINVAL;
8767 		return -rte_errno;
8768 	}
8769 	return flow_hw_create_ctrl_flow(dev, proxy_dev,
8770 					proxy_priv->hw_esw_zero_tbl,
8771 					items, 0, actions, 0);
8772 }
8773 
8774 int
8775 mlx5_flow_hw_create_tx_default_mreg_copy_flow(struct rte_eth_dev *dev)
8776 {
8777 	struct mlx5_priv *priv = dev->data->dev_private;
8778 	struct rte_flow_item_eth promisc = {
8779 		.hdr.dst_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
8780 		.hdr.src_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
8781 		.hdr.ether_type = 0,
8782 	};
8783 	struct rte_flow_item eth_all[] = {
8784 		[0] = {
8785 			.type = RTE_FLOW_ITEM_TYPE_ETH,
8786 			.spec = &promisc,
8787 			.mask = &promisc,
8788 		},
8789 		[1] = {
8790 			.type = RTE_FLOW_ITEM_TYPE_END,
8791 		},
8792 	};
8793 	struct rte_flow_action_modify_field mreg_action = {
8794 		.operation = RTE_FLOW_MODIFY_SET,
8795 		.dst = {
8796 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
8797 			.level = REG_C_1,
8798 		},
8799 		.src = {
8800 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
8801 			.level = REG_A,
8802 		},
8803 		.width = 32,
8804 	};
8805 	struct rte_flow_action copy_reg_action[] = {
8806 		[0] = {
8807 			.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
8808 			.conf = &mreg_action,
8809 		},
8810 		[1] = {
8811 			.type = RTE_FLOW_ACTION_TYPE_JUMP,
8812 		},
8813 		[2] = {
8814 			.type = RTE_FLOW_ACTION_TYPE_END,
8815 		},
8816 	};
8817 
8818 	MLX5_ASSERT(priv->master);
8819 	if (!priv->dr_ctx || !priv->hw_tx_meta_cpy_tbl)
8820 		return 0;
8821 	return flow_hw_create_ctrl_flow(dev, dev,
8822 					priv->hw_tx_meta_cpy_tbl,
8823 					eth_all, 0, copy_reg_action, 0);
8824 }
8825 
8826 int
8827 mlx5_flow_hw_tx_repr_matching_flow(struct rte_eth_dev *dev, uint32_t sqn)
8828 {
8829 	struct mlx5_priv *priv = dev->data->dev_private;
8830 	struct mlx5_rte_flow_item_sq sq_spec = {
8831 		.queue = sqn,
8832 	};
8833 	struct rte_flow_item items[] = {
8834 		{
8835 			.type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_SQ,
8836 			.spec = &sq_spec,
8837 		},
8838 		{
8839 			.type = RTE_FLOW_ITEM_TYPE_END,
8840 		},
8841 	};
8842 	/*
8843 	 * Allocate actions array suitable for all cases - extended metadata enabled or not.
8844 	 * With extended metadata there will be an additional MODIFY_FIELD action before JUMP.
8845 	 */
8846 	struct rte_flow_action actions[] = {
8847 		{ .type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD },
8848 		{ .type = RTE_FLOW_ACTION_TYPE_JUMP },
8849 		{ .type = RTE_FLOW_ACTION_TYPE_END },
8850 		{ .type = RTE_FLOW_ACTION_TYPE_END },
8851 	};
8852 
8853 	/* It is assumed that caller checked for representor matching. */
8854 	MLX5_ASSERT(priv->sh->config.repr_matching);
8855 	if (!priv->dr_ctx) {
8856 		DRV_LOG(DEBUG, "Port %u must be configured for HWS, before creating "
8857 			       "default egress flow rules. Omitting creation.",
8858 			       dev->data->port_id);
8859 		return 0;
8860 	}
8861 	if (!priv->hw_tx_repr_tagging_tbl) {
8862 		DRV_LOG(ERR, "Port %u is configured for HWS, but table for default "
8863 			     "egress flow rules does not exist.",
8864 			     dev->data->port_id);
8865 		rte_errno = EINVAL;
8866 		return -rte_errno;
8867 	}
8868 	/*
8869 	 * If extended metadata mode is enabled, then an additional MODIFY_FIELD action must be
8870 	 * placed before terminating JUMP action.
8871 	 */
8872 	if (priv->sh->config.dv_xmeta_en == MLX5_XMETA_MODE_META32_HWS) {
8873 		actions[1].type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD;
8874 		actions[2].type = RTE_FLOW_ACTION_TYPE_JUMP;
8875 	}
8876 	return flow_hw_create_ctrl_flow(dev, dev, priv->hw_tx_repr_tagging_tbl,
8877 					items, 0, actions, 0);
8878 }
8879 
8880 static uint32_t
8881 __calc_pattern_flags(const enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type)
8882 {
8883 	switch (eth_pattern_type) {
8884 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL:
8885 		return MLX5_CTRL_PROMISCUOUS;
8886 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL_MCAST:
8887 		return MLX5_CTRL_ALL_MULTICAST;
8888 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST:
8889 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST_VLAN:
8890 		return MLX5_CTRL_BROADCAST;
8891 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST:
8892 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST_VLAN:
8893 		return MLX5_CTRL_IPV4_MULTICAST;
8894 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST:
8895 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST_VLAN:
8896 		return MLX5_CTRL_IPV6_MULTICAST;
8897 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC:
8898 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN:
8899 		return MLX5_CTRL_DMAC;
8900 	default:
8901 		/* Should not reach here. */
8902 		MLX5_ASSERT(false);
8903 		return 0;
8904 	}
8905 }
8906 
8907 static uint32_t
8908 __calc_vlan_flags(const enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type)
8909 {
8910 	switch (eth_pattern_type) {
8911 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST_VLAN:
8912 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST_VLAN:
8913 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST_VLAN:
8914 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN:
8915 		return MLX5_CTRL_VLAN_FILTER;
8916 	default:
8917 		return 0;
8918 	}
8919 }
8920 
8921 static bool
8922 eth_pattern_type_is_requested(const enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type,
8923 			      uint32_t flags)
8924 {
8925 	uint32_t pattern_flags = __calc_pattern_flags(eth_pattern_type);
8926 	uint32_t vlan_flags = __calc_vlan_flags(eth_pattern_type);
8927 	bool pattern_requested = !!(pattern_flags & flags);
8928 	bool consider_vlan = vlan_flags || (MLX5_CTRL_VLAN_FILTER & flags);
8929 	bool vlan_requested = !!(vlan_flags & flags);
8930 
8931 	if (consider_vlan)
8932 		return pattern_requested && vlan_requested;
8933 	else
8934 		return pattern_requested;
8935 }
8936 
8937 static bool
8938 rss_type_is_requested(struct mlx5_priv *priv,
8939 		      const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
8940 {
8941 	struct rte_flow_actions_template *at = priv->hw_ctrl_rx->rss[rss_type];
8942 	unsigned int i;
8943 
8944 	for (i = 0; at->actions[i].type != RTE_FLOW_ACTION_TYPE_END; ++i) {
8945 		if (at->actions[i].type == RTE_FLOW_ACTION_TYPE_RSS) {
8946 			const struct rte_flow_action_rss *rss = at->actions[i].conf;
8947 			uint64_t rss_types = rss->types;
8948 
8949 			if ((rss_types & priv->rss_conf.rss_hf) != rss_types)
8950 				return false;
8951 		}
8952 	}
8953 	return true;
8954 }
8955 
8956 static const struct rte_flow_item_eth *
8957 __get_eth_spec(const enum mlx5_flow_ctrl_rx_eth_pattern_type pattern)
8958 {
8959 	switch (pattern) {
8960 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL:
8961 		return &ctrl_rx_eth_promisc_spec;
8962 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL_MCAST:
8963 		return &ctrl_rx_eth_mcast_spec;
8964 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST:
8965 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST_VLAN:
8966 		return &ctrl_rx_eth_bcast_spec;
8967 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST:
8968 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST_VLAN:
8969 		return &ctrl_rx_eth_ipv4_mcast_spec;
8970 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST:
8971 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST_VLAN:
8972 		return &ctrl_rx_eth_ipv6_mcast_spec;
8973 	default:
8974 		/* This case should not be reached. */
8975 		MLX5_ASSERT(false);
8976 		return NULL;
8977 	}
8978 }
8979 
8980 static int
8981 __flow_hw_ctrl_flows_single(struct rte_eth_dev *dev,
8982 			    struct rte_flow_template_table *tbl,
8983 			    const enum mlx5_flow_ctrl_rx_eth_pattern_type pattern_type,
8984 			    const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
8985 {
8986 	const struct rte_flow_item_eth *eth_spec = __get_eth_spec(pattern_type);
8987 	struct rte_flow_item items[5];
8988 	struct rte_flow_action actions[] = {
8989 		{ .type = RTE_FLOW_ACTION_TYPE_RSS },
8990 		{ .type = RTE_FLOW_ACTION_TYPE_END },
8991 	};
8992 
8993 	if (!eth_spec)
8994 		return -EINVAL;
8995 	memset(items, 0, sizeof(items));
8996 	items[0] = (struct rte_flow_item){
8997 		.type = RTE_FLOW_ITEM_TYPE_ETH,
8998 		.spec = eth_spec,
8999 	};
9000 	items[1] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_VOID };
9001 	items[2] = flow_hw_get_ctrl_rx_l3_item(rss_type);
9002 	items[3] = flow_hw_get_ctrl_rx_l4_item(rss_type);
9003 	items[4] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_END };
9004 	/* Without VLAN filtering, only a single flow rule must be created. */
9005 	return flow_hw_create_ctrl_flow(dev, dev, tbl, items, 0, actions, 0);
9006 }
9007 
9008 static int
9009 __flow_hw_ctrl_flows_single_vlan(struct rte_eth_dev *dev,
9010 				 struct rte_flow_template_table *tbl,
9011 				 const enum mlx5_flow_ctrl_rx_eth_pattern_type pattern_type,
9012 				 const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
9013 {
9014 	struct mlx5_priv *priv = dev->data->dev_private;
9015 	const struct rte_flow_item_eth *eth_spec = __get_eth_spec(pattern_type);
9016 	struct rte_flow_item items[5];
9017 	struct rte_flow_action actions[] = {
9018 		{ .type = RTE_FLOW_ACTION_TYPE_RSS },
9019 		{ .type = RTE_FLOW_ACTION_TYPE_END },
9020 	};
9021 	unsigned int i;
9022 
9023 	if (!eth_spec)
9024 		return -EINVAL;
9025 	memset(items, 0, sizeof(items));
9026 	items[0] = (struct rte_flow_item){
9027 		.type = RTE_FLOW_ITEM_TYPE_ETH,
9028 		.spec = eth_spec,
9029 	};
9030 	/* Optional VLAN for now will be VOID - will be filled later. */
9031 	items[1] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_VLAN };
9032 	items[2] = flow_hw_get_ctrl_rx_l3_item(rss_type);
9033 	items[3] = flow_hw_get_ctrl_rx_l4_item(rss_type);
9034 	items[4] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_END };
9035 	/* Since VLAN filtering is done, create a single flow rule for each registered vid. */
9036 	for (i = 0; i < priv->vlan_filter_n; ++i) {
9037 		uint16_t vlan = priv->vlan_filter[i];
9038 		struct rte_flow_item_vlan vlan_spec = {
9039 			.hdr.vlan_tci = rte_cpu_to_be_16(vlan),
9040 		};
9041 
9042 		items[1].spec = &vlan_spec;
9043 		if (flow_hw_create_ctrl_flow(dev, dev, tbl, items, 0, actions, 0))
9044 			return -rte_errno;
9045 	}
9046 	return 0;
9047 }
9048 
9049 static int
9050 __flow_hw_ctrl_flows_unicast(struct rte_eth_dev *dev,
9051 			     struct rte_flow_template_table *tbl,
9052 			     const enum mlx5_flow_ctrl_rx_eth_pattern_type pattern_type,
9053 			     const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
9054 {
9055 	struct rte_flow_item_eth eth_spec;
9056 	struct rte_flow_item items[5];
9057 	struct rte_flow_action actions[] = {
9058 		{ .type = RTE_FLOW_ACTION_TYPE_RSS },
9059 		{ .type = RTE_FLOW_ACTION_TYPE_END },
9060 	};
9061 	const struct rte_ether_addr cmp = {
9062 		.addr_bytes = "\x00\x00\x00\x00\x00\x00",
9063 	};
9064 	unsigned int i;
9065 
9066 	RTE_SET_USED(pattern_type);
9067 
9068 	memset(&eth_spec, 0, sizeof(eth_spec));
9069 	memset(items, 0, sizeof(items));
9070 	items[0] = (struct rte_flow_item){
9071 		.type = RTE_FLOW_ITEM_TYPE_ETH,
9072 		.spec = &eth_spec,
9073 	};
9074 	items[1] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_VOID };
9075 	items[2] = flow_hw_get_ctrl_rx_l3_item(rss_type);
9076 	items[3] = flow_hw_get_ctrl_rx_l4_item(rss_type);
9077 	items[4] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_END };
9078 	for (i = 0; i < MLX5_MAX_MAC_ADDRESSES; ++i) {
9079 		struct rte_ether_addr *mac = &dev->data->mac_addrs[i];
9080 
9081 		if (!memcmp(mac, &cmp, sizeof(*mac)))
9082 			continue;
9083 		memcpy(&eth_spec.hdr.dst_addr.addr_bytes, mac->addr_bytes, RTE_ETHER_ADDR_LEN);
9084 		if (flow_hw_create_ctrl_flow(dev, dev, tbl, items, 0, actions, 0))
9085 			return -rte_errno;
9086 	}
9087 	return 0;
9088 }
9089 
9090 static int
9091 __flow_hw_ctrl_flows_unicast_vlan(struct rte_eth_dev *dev,
9092 				  struct rte_flow_template_table *tbl,
9093 				  const enum mlx5_flow_ctrl_rx_eth_pattern_type pattern_type,
9094 				  const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
9095 {
9096 	struct mlx5_priv *priv = dev->data->dev_private;
9097 	struct rte_flow_item_eth eth_spec;
9098 	struct rte_flow_item items[5];
9099 	struct rte_flow_action actions[] = {
9100 		{ .type = RTE_FLOW_ACTION_TYPE_RSS },
9101 		{ .type = RTE_FLOW_ACTION_TYPE_END },
9102 	};
9103 	const struct rte_ether_addr cmp = {
9104 		.addr_bytes = "\x00\x00\x00\x00\x00\x00",
9105 	};
9106 	unsigned int i;
9107 	unsigned int j;
9108 
9109 	RTE_SET_USED(pattern_type);
9110 
9111 	memset(&eth_spec, 0, sizeof(eth_spec));
9112 	memset(items, 0, sizeof(items));
9113 	items[0] = (struct rte_flow_item){
9114 		.type = RTE_FLOW_ITEM_TYPE_ETH,
9115 		.spec = &eth_spec,
9116 	};
9117 	items[1] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_VLAN };
9118 	items[2] = flow_hw_get_ctrl_rx_l3_item(rss_type);
9119 	items[3] = flow_hw_get_ctrl_rx_l4_item(rss_type);
9120 	items[4] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_END };
9121 	for (i = 0; i < MLX5_MAX_MAC_ADDRESSES; ++i) {
9122 		struct rte_ether_addr *mac = &dev->data->mac_addrs[i];
9123 
9124 		if (!memcmp(mac, &cmp, sizeof(*mac)))
9125 			continue;
9126 		memcpy(&eth_spec.hdr.dst_addr.addr_bytes, mac->addr_bytes, RTE_ETHER_ADDR_LEN);
9127 		for (j = 0; j < priv->vlan_filter_n; ++j) {
9128 			uint16_t vlan = priv->vlan_filter[j];
9129 			struct rte_flow_item_vlan vlan_spec = {
9130 				.hdr.vlan_tci = rte_cpu_to_be_16(vlan),
9131 			};
9132 
9133 			items[1].spec = &vlan_spec;
9134 			if (flow_hw_create_ctrl_flow(dev, dev, tbl, items, 0, actions, 0))
9135 				return -rte_errno;
9136 		}
9137 	}
9138 	return 0;
9139 }
9140 
9141 static int
9142 __flow_hw_ctrl_flows(struct rte_eth_dev *dev,
9143 		     struct rte_flow_template_table *tbl,
9144 		     const enum mlx5_flow_ctrl_rx_eth_pattern_type pattern_type,
9145 		     const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
9146 {
9147 	switch (pattern_type) {
9148 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL:
9149 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL_MCAST:
9150 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST:
9151 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST:
9152 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST:
9153 		return __flow_hw_ctrl_flows_single(dev, tbl, pattern_type, rss_type);
9154 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST_VLAN:
9155 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST_VLAN:
9156 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST_VLAN:
9157 		return __flow_hw_ctrl_flows_single_vlan(dev, tbl, pattern_type, rss_type);
9158 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC:
9159 		return __flow_hw_ctrl_flows_unicast(dev, tbl, pattern_type, rss_type);
9160 	case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN:
9161 		return __flow_hw_ctrl_flows_unicast_vlan(dev, tbl, pattern_type, rss_type);
9162 	default:
9163 		/* Should not reach here. */
9164 		MLX5_ASSERT(false);
9165 		rte_errno = EINVAL;
9166 		return -EINVAL;
9167 	}
9168 }
9169 
9170 
9171 int
9172 mlx5_flow_hw_ctrl_flows(struct rte_eth_dev *dev, uint32_t flags)
9173 {
9174 	struct mlx5_priv *priv = dev->data->dev_private;
9175 	struct mlx5_flow_hw_ctrl_rx *hw_ctrl_rx;
9176 	unsigned int i;
9177 	unsigned int j;
9178 	int ret = 0;
9179 
9180 	RTE_SET_USED(priv);
9181 	RTE_SET_USED(flags);
9182 	if (!priv->dr_ctx) {
9183 		DRV_LOG(DEBUG, "port %u Control flow rules will not be created. "
9184 			       "HWS needs to be configured beforehand.",
9185 			       dev->data->port_id);
9186 		return 0;
9187 	}
9188 	if (!priv->hw_ctrl_rx) {
9189 		DRV_LOG(ERR, "port %u Control flow rules templates were not created.",
9190 			dev->data->port_id);
9191 		rte_errno = EINVAL;
9192 		return -rte_errno;
9193 	}
9194 	hw_ctrl_rx = priv->hw_ctrl_rx;
9195 	for (i = 0; i < MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_MAX; ++i) {
9196 		const enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type = i;
9197 
9198 		if (!eth_pattern_type_is_requested(eth_pattern_type, flags))
9199 			continue;
9200 		for (j = 0; j < MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX; ++j) {
9201 			const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type = j;
9202 			struct rte_flow_actions_template *at;
9203 			struct mlx5_flow_hw_ctrl_rx_table *tmpls = &hw_ctrl_rx->tables[i][j];
9204 			const struct mlx5_flow_template_table_cfg cfg = {
9205 				.attr = tmpls->attr,
9206 				.external = 0,
9207 			};
9208 
9209 			if (!hw_ctrl_rx->rss[rss_type]) {
9210 				at = flow_hw_create_ctrl_rx_rss_template(dev, rss_type);
9211 				if (!at)
9212 					return -rte_errno;
9213 				hw_ctrl_rx->rss[rss_type] = at;
9214 			} else {
9215 				at = hw_ctrl_rx->rss[rss_type];
9216 			}
9217 			if (!rss_type_is_requested(priv, rss_type))
9218 				continue;
9219 			if (!tmpls->tbl) {
9220 				tmpls->tbl = flow_hw_table_create(dev, &cfg,
9221 								  &tmpls->pt, 1, &at, 1, NULL);
9222 				if (!tmpls->tbl) {
9223 					DRV_LOG(ERR, "port %u Failed to create template table "
9224 						     "for control flow rules. Unable to create "
9225 						     "control flow rules.",
9226 						     dev->data->port_id);
9227 					return -rte_errno;
9228 				}
9229 			}
9230 
9231 			ret = __flow_hw_ctrl_flows(dev, tmpls->tbl, eth_pattern_type, rss_type);
9232 			if (ret) {
9233 				DRV_LOG(ERR, "port %u Failed to create control flow rule.",
9234 					dev->data->port_id);
9235 				return ret;
9236 			}
9237 		}
9238 	}
9239 	return 0;
9240 }
9241 
9242 void
9243 mlx5_flow_meter_uninit(struct rte_eth_dev *dev)
9244 {
9245 	struct mlx5_priv *priv = dev->data->dev_private;
9246 
9247 	if (priv->mtr_policy_arr) {
9248 		mlx5_free(priv->mtr_policy_arr);
9249 		priv->mtr_policy_arr = NULL;
9250 	}
9251 	if (priv->mtr_profile_arr) {
9252 		mlx5_free(priv->mtr_profile_arr);
9253 		priv->mtr_profile_arr = NULL;
9254 	}
9255 	if (priv->hws_mpool) {
9256 		mlx5_aso_mtr_queue_uninit(priv->sh, priv->hws_mpool, NULL);
9257 		mlx5_ipool_destroy(priv->hws_mpool->idx_pool);
9258 		mlx5_free(priv->hws_mpool);
9259 		priv->hws_mpool = NULL;
9260 	}
9261 	if (priv->mtr_bulk.aso) {
9262 		mlx5_free(priv->mtr_bulk.aso);
9263 		priv->mtr_bulk.aso = NULL;
9264 		priv->mtr_bulk.size = 0;
9265 		mlx5_aso_queue_uninit(priv->sh, ASO_OPC_MOD_POLICER);
9266 	}
9267 	if (priv->mtr_bulk.action) {
9268 		mlx5dr_action_destroy(priv->mtr_bulk.action);
9269 		priv->mtr_bulk.action = NULL;
9270 	}
9271 	if (priv->mtr_bulk.devx_obj) {
9272 		claim_zero(mlx5_devx_cmd_destroy(priv->mtr_bulk.devx_obj));
9273 		priv->mtr_bulk.devx_obj = NULL;
9274 	}
9275 }
9276 
9277 int
9278 mlx5_flow_meter_init(struct rte_eth_dev *dev,
9279 		     uint32_t nb_meters,
9280 		     uint32_t nb_meter_profiles,
9281 		     uint32_t nb_meter_policies,
9282 		     uint32_t nb_queues)
9283 {
9284 	struct mlx5_priv *priv = dev->data->dev_private;
9285 	struct mlx5_devx_obj *dcs = NULL;
9286 	uint32_t log_obj_size;
9287 	int ret = 0;
9288 	int reg_id;
9289 	struct mlx5_aso_mtr *aso;
9290 	uint32_t i;
9291 	struct rte_flow_error error;
9292 	uint32_t flags;
9293 	uint32_t nb_mtrs = rte_align32pow2(nb_meters);
9294 	struct mlx5_indexed_pool_config cfg = {
9295 		.size = sizeof(struct mlx5_aso_mtr),
9296 		.trunk_size = 1 << 12,
9297 		.per_core_cache = 1 << 13,
9298 		.need_lock = 1,
9299 		.release_mem_en = !!priv->sh->config.reclaim_mode,
9300 		.malloc = mlx5_malloc,
9301 		.max_idx = nb_meters,
9302 		.free = mlx5_free,
9303 		.type = "mlx5_hw_mtr_mark_action",
9304 	};
9305 
9306 	if (!nb_meters || !nb_meter_profiles || !nb_meter_policies) {
9307 		ret = ENOTSUP;
9308 		rte_flow_error_set(&error, ENOMEM,
9309 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9310 				  NULL, "Meter configuration is invalid.");
9311 		goto err;
9312 	}
9313 	if (!priv->mtr_en || !priv->sh->meter_aso_en) {
9314 		ret = ENOTSUP;
9315 		rte_flow_error_set(&error, ENOMEM,
9316 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9317 				  NULL, "Meter ASO is not supported.");
9318 		goto err;
9319 	}
9320 	priv->mtr_config.nb_meters = nb_meters;
9321 	log_obj_size = rte_log2_u32(nb_meters >> 1);
9322 	dcs = mlx5_devx_cmd_create_flow_meter_aso_obj
9323 		(priv->sh->cdev->ctx, priv->sh->cdev->pdn,
9324 			log_obj_size);
9325 	if (!dcs) {
9326 		ret = ENOMEM;
9327 		rte_flow_error_set(&error, ENOMEM,
9328 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9329 				  NULL, "Meter ASO object allocation failed.");
9330 		goto err;
9331 	}
9332 	priv->mtr_bulk.devx_obj = dcs;
9333 	reg_id = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, NULL);
9334 	if (reg_id < 0) {
9335 		ret = ENOTSUP;
9336 		rte_flow_error_set(&error, ENOMEM,
9337 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9338 				  NULL, "Meter register is not available.");
9339 		goto err;
9340 	}
9341 	flags = MLX5DR_ACTION_FLAG_HWS_RX | MLX5DR_ACTION_FLAG_HWS_TX;
9342 	if (priv->sh->config.dv_esw_en && priv->master)
9343 		flags |= MLX5DR_ACTION_FLAG_HWS_FDB;
9344 	priv->mtr_bulk.action = mlx5dr_action_create_aso_meter
9345 			(priv->dr_ctx, (struct mlx5dr_devx_obj *)dcs,
9346 				reg_id - REG_C_0, flags);
9347 	if (!priv->mtr_bulk.action) {
9348 		ret = ENOMEM;
9349 		rte_flow_error_set(&error, ENOMEM,
9350 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9351 				  NULL, "Meter action creation failed.");
9352 		goto err;
9353 	}
9354 	priv->mtr_bulk.aso = mlx5_malloc(MLX5_MEM_ZERO,
9355 					 sizeof(struct mlx5_aso_mtr) *
9356 					 nb_meters,
9357 					 RTE_CACHE_LINE_SIZE,
9358 					 SOCKET_ID_ANY);
9359 	if (!priv->mtr_bulk.aso) {
9360 		ret = ENOMEM;
9361 		rte_flow_error_set(&error, ENOMEM,
9362 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9363 				  NULL, "Meter bulk ASO allocation failed.");
9364 		goto err;
9365 	}
9366 	priv->mtr_bulk.size = nb_meters;
9367 	aso = priv->mtr_bulk.aso;
9368 	for (i = 0; i < priv->mtr_bulk.size; i++) {
9369 		aso->type = ASO_METER_DIRECT;
9370 		aso->state = ASO_METER_WAIT;
9371 		aso->offset = i;
9372 		aso++;
9373 	}
9374 	priv->hws_mpool = mlx5_malloc(MLX5_MEM_ZERO,
9375 				sizeof(struct mlx5_aso_mtr_pool),
9376 				RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
9377 	if (!priv->hws_mpool) {
9378 		ret = ENOMEM;
9379 		rte_flow_error_set(&error, ENOMEM,
9380 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9381 				  NULL, "Meter ipool allocation failed.");
9382 		goto err;
9383 	}
9384 	priv->hws_mpool->devx_obj = priv->mtr_bulk.devx_obj;
9385 	priv->hws_mpool->action = priv->mtr_bulk.action;
9386 	priv->hws_mpool->nb_sq = nb_queues;
9387 	if (mlx5_aso_mtr_queue_init(priv->sh, priv->hws_mpool,
9388 				    &priv->sh->mtrmng->pools_mng, nb_queues)) {
9389 		ret = ENOMEM;
9390 		rte_flow_error_set(&error, ENOMEM,
9391 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9392 				  NULL, "Meter ASO queue allocation failed.");
9393 		goto err;
9394 	}
9395 	/*
9396 	 * No need for local cache if Meter number is a small number.
9397 	 * Since flow insertion rate will be very limited in that case.
9398 	 * Here let's set the number to less than default trunk size 4K.
9399 	 */
9400 	if (nb_mtrs <= cfg.trunk_size) {
9401 		cfg.per_core_cache = 0;
9402 		cfg.trunk_size = nb_mtrs;
9403 	} else if (nb_mtrs <= MLX5_HW_IPOOL_SIZE_THRESHOLD) {
9404 		cfg.per_core_cache = MLX5_HW_IPOOL_CACHE_MIN;
9405 	}
9406 	priv->hws_mpool->idx_pool = mlx5_ipool_create(&cfg);
9407 	priv->mtr_config.nb_meter_profiles = nb_meter_profiles;
9408 	priv->mtr_profile_arr =
9409 		mlx5_malloc(MLX5_MEM_ZERO,
9410 			    sizeof(struct mlx5_flow_meter_profile) *
9411 			    nb_meter_profiles,
9412 			    RTE_CACHE_LINE_SIZE,
9413 			    SOCKET_ID_ANY);
9414 	if (!priv->mtr_profile_arr) {
9415 		ret = ENOMEM;
9416 		rte_flow_error_set(&error, ENOMEM,
9417 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9418 				  NULL, "Meter profile allocation failed.");
9419 		goto err;
9420 	}
9421 	priv->mtr_config.nb_meter_policies = nb_meter_policies;
9422 	priv->mtr_policy_arr =
9423 		mlx5_malloc(MLX5_MEM_ZERO,
9424 			    sizeof(struct mlx5_flow_meter_policy) *
9425 			    nb_meter_policies,
9426 			    RTE_CACHE_LINE_SIZE,
9427 			    SOCKET_ID_ANY);
9428 	if (!priv->mtr_policy_arr) {
9429 		ret = ENOMEM;
9430 		rte_flow_error_set(&error, ENOMEM,
9431 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9432 				  NULL, "Meter policy allocation failed.");
9433 		goto err;
9434 	}
9435 	return 0;
9436 err:
9437 	mlx5_flow_meter_uninit(dev);
9438 	return ret;
9439 }
9440 
9441 #endif
9442