xref: /dpdk/drivers/net/mlx5/mlx5_nta_split.c (revision 821a6a5cc4951337a7eac64b6cce6a25c01be442)
1*821a6a5cSBing Zhao /* SPDX-License-Identifier: BSD-3-Clause
2*821a6a5cSBing Zhao  * Copyright (c) 2024 NVIDIA Corporation & Affiliates
3*821a6a5cSBing Zhao  */
4*821a6a5cSBing Zhao 
5*821a6a5cSBing Zhao #include <rte_common.h>
6*821a6a5cSBing Zhao #include <rte_flow.h>
7*821a6a5cSBing Zhao 
8*821a6a5cSBing Zhao #include "mlx5_malloc.h"
9*821a6a5cSBing Zhao #include "mlx5.h"
10*821a6a5cSBing Zhao #include "mlx5_defs.h"
11*821a6a5cSBing Zhao #include "mlx5_flow.h"
12*821a6a5cSBing Zhao #include "mlx5_rx.h"
13*821a6a5cSBing Zhao 
14*821a6a5cSBing Zhao #ifdef HAVE_MLX5_HWS_SUPPORT
15*821a6a5cSBing Zhao 
16*821a6a5cSBing Zhao /*
17*821a6a5cSBing Zhao  * Generate new actions lists for prefix and suffix flows.
18*821a6a5cSBing Zhao  *
19*821a6a5cSBing Zhao  * @param[in] dev
20*821a6a5cSBing Zhao  *   Pointer to rte_eth_dev structure.
21*821a6a5cSBing Zhao  * @param[in] prefix_act
22*821a6a5cSBing Zhao  *   Pointer to actions for prefix flow rule.
23*821a6a5cSBing Zhao  * @param[in] suffix_act
24*821a6a5cSBing Zhao  *   Pointer to actions for suffix flow rule.
25*821a6a5cSBing Zhao  * @param[in] actions
26*821a6a5cSBing Zhao  *   Pointer to the original actions list.
27*821a6a5cSBing Zhao  * @param[in] qrss
28*821a6a5cSBing Zhao  *   Pointer to the action of QUEUE / RSS.
29*821a6a5cSBing Zhao  * @param[in] actions_n
30*821a6a5cSBing Zhao  *   Number of the actions in the original list.
31*821a6a5cSBing Zhao  * @param[out] error
32*821a6a5cSBing Zhao  *   Pointer to error structure.
33*821a6a5cSBing Zhao  *
34*821a6a5cSBing Zhao  * @return
35*821a6a5cSBing Zhao  *   Positive prefix flow ID on success, zero on failure.
36*821a6a5cSBing Zhao  */
37*821a6a5cSBing Zhao static uint32_t
38*821a6a5cSBing Zhao mlx5_flow_nta_split_qrss_actions_prep(struct rte_eth_dev *dev,
39*821a6a5cSBing Zhao 				      struct rte_flow_action *prefix_act,
40*821a6a5cSBing Zhao 				      struct rte_flow_action *suffix_act,
41*821a6a5cSBing Zhao 				      const struct rte_flow_action *actions,
42*821a6a5cSBing Zhao 				      const struct rte_flow_action *qrss,
43*821a6a5cSBing Zhao 				      int actions_n,
44*821a6a5cSBing Zhao 				      struct rte_flow_error *error)
45*821a6a5cSBing Zhao {
46*821a6a5cSBing Zhao 	struct mlx5_priv *priv = dev->data->dev_private;
47*821a6a5cSBing Zhao 	struct mlx5_rte_flow_action_set_tag *set_tag;
48*821a6a5cSBing Zhao 	struct rte_flow_action_jump *jump;
49*821a6a5cSBing Zhao 	const int qrss_idx = qrss - actions;
50*821a6a5cSBing Zhao 	uint32_t flow_id = 0;
51*821a6a5cSBing Zhao 	int ret = 0;
52*821a6a5cSBing Zhao 
53*821a6a5cSBing Zhao 	/* Allocate the new subflow ID and used to be matched later. */
54*821a6a5cSBing Zhao 	mlx5_ipool_malloc(priv->sh->ipool[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], &flow_id);
55*821a6a5cSBing Zhao 	if (!flow_id) {
56*821a6a5cSBing Zhao 		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ACTION, NULL,
57*821a6a5cSBing Zhao 				   "can't allocate id for split Q/RSS subflow");
58*821a6a5cSBing Zhao 		return 0;
59*821a6a5cSBing Zhao 	}
60*821a6a5cSBing Zhao 	/*
61*821a6a5cSBing Zhao 	 * Given actions will be split
62*821a6a5cSBing Zhao 	 * - Replace QUEUE/RSS action with SET_TAG to set flow ID.
63*821a6a5cSBing Zhao 	 * - Add jump to mreg CP_TBL.
64*821a6a5cSBing Zhao 	 * As a result, there will be one more action.
65*821a6a5cSBing Zhao 	 */
66*821a6a5cSBing Zhao 	memcpy(prefix_act, actions, sizeof(struct rte_flow_action) * actions_n);
67*821a6a5cSBing Zhao 	/* Count MLX5_RTE_FLOW_ACTION_TYPE_TAG. */
68*821a6a5cSBing Zhao 	actions_n++;
69*821a6a5cSBing Zhao 	set_tag = (void *)(prefix_act + actions_n);
70*821a6a5cSBing Zhao 	/* Reuse ASO reg, should always succeed. Consider to use REG_C_6. */
71*821a6a5cSBing Zhao 	ret = flow_hw_get_reg_id_by_domain(dev, RTE_FLOW_ITEM_TYPE_METER_COLOR,
72*821a6a5cSBing Zhao 					   MLX5DR_TABLE_TYPE_NIC_RX, 0);
73*821a6a5cSBing Zhao 	MLX5_ASSERT(ret != (int)REG_NON);
74*821a6a5cSBing Zhao 	set_tag->id = (enum modify_reg)ret;
75*821a6a5cSBing Zhao 	/* Internal SET_TAG action to set flow ID. */
76*821a6a5cSBing Zhao 	set_tag->data = flow_id;
77*821a6a5cSBing Zhao 	/* Construct new actions array and replace QUEUE/RSS action. */
78*821a6a5cSBing Zhao 	prefix_act[qrss_idx] = (struct rte_flow_action) {
79*821a6a5cSBing Zhao 		.type = (enum rte_flow_action_type)MLX5_RTE_FLOW_ACTION_TYPE_TAG,
80*821a6a5cSBing Zhao 		.conf = set_tag,
81*821a6a5cSBing Zhao 	};
82*821a6a5cSBing Zhao 	/* JUMP action to jump to mreg copy table (CP_TBL). */
83*821a6a5cSBing Zhao 	jump = (void *)(set_tag + 1);
84*821a6a5cSBing Zhao 	*jump = (struct rte_flow_action_jump) {
85*821a6a5cSBing Zhao 		.group = MLX5_FLOW_MREG_CP_TABLE_GROUP,
86*821a6a5cSBing Zhao 	};
87*821a6a5cSBing Zhao 	prefix_act[actions_n - 2] = (struct rte_flow_action) {
88*821a6a5cSBing Zhao 		.type = RTE_FLOW_ACTION_TYPE_JUMP,
89*821a6a5cSBing Zhao 		.conf = jump,
90*821a6a5cSBing Zhao 	};
91*821a6a5cSBing Zhao 	prefix_act[actions_n - 1] = (struct rte_flow_action) {
92*821a6a5cSBing Zhao 		.type = RTE_FLOW_ACTION_TYPE_END,
93*821a6a5cSBing Zhao 	};
94*821a6a5cSBing Zhao 	/* Copy the suffix Q/RSS action, can also be indirect RSS. */
95*821a6a5cSBing Zhao 	suffix_act[0] = (struct rte_flow_action) {
96*821a6a5cSBing Zhao 		.type = qrss->type,
97*821a6a5cSBing Zhao 		.conf = qrss->conf,
98*821a6a5cSBing Zhao 	};
99*821a6a5cSBing Zhao 	suffix_act[1] = (struct rte_flow_action) {
100*821a6a5cSBing Zhao 		.type = RTE_FLOW_ACTION_TYPE_END,
101*821a6a5cSBing Zhao 	};
102*821a6a5cSBing Zhao 	return flow_id;
103*821a6a5cSBing Zhao }
104*821a6a5cSBing Zhao 
105*821a6a5cSBing Zhao /*
106*821a6a5cSBing Zhao  * Generate new attribute and items for suffix flows.
107*821a6a5cSBing Zhao  *
108*821a6a5cSBing Zhao  * @param[in] dev
109*821a6a5cSBing Zhao  *   Pointer to rte_eth_dev structure.
110*821a6a5cSBing Zhao  * @param[in] split_attr
111*821a6a5cSBing Zhao  *   Pointer to attribute for prefix flow rule.
112*821a6a5cSBing Zhao  * @param[in] split_items
113*821a6a5cSBing Zhao  *   Pointer to actions for suffix flow rule.
114*821a6a5cSBing Zhao  * @param[in] qrss_id
115*821a6a5cSBing Zhao  *   Prefix flow ID to match.
116*821a6a5cSBing Zhao  */
117*821a6a5cSBing Zhao static void
118*821a6a5cSBing Zhao mlx5_flow_nta_split_qrss_items_prep(struct rte_eth_dev *dev,
119*821a6a5cSBing Zhao 				    struct rte_flow_attr *split_attr,
120*821a6a5cSBing Zhao 				    struct rte_flow_item *split_items,
121*821a6a5cSBing Zhao 				    uint32_t qrss_id)
122*821a6a5cSBing Zhao {
123*821a6a5cSBing Zhao 	struct mlx5_rte_flow_item_tag *q_tag_spec;
124*821a6a5cSBing Zhao 
125*821a6a5cSBing Zhao 	/* MLX5_FLOW_MREG_CP_TABLE_GROUP -> MLX5_FLOW_MREG_ACT_TABLE_GROUP(Q/RSS base) */
126*821a6a5cSBing Zhao 	split_attr->ingress = 1;
127*821a6a5cSBing Zhao 	split_attr->group = MLX5_FLOW_MREG_ACT_TABLE_GROUP;
128*821a6a5cSBing Zhao 	/* Only internal tag will be used, together with the item flags for RSS. */
129*821a6a5cSBing Zhao 	q_tag_spec = (void *)((char *)split_items + 2 * sizeof(struct rte_flow_item));
130*821a6a5cSBing Zhao 	split_items[0].type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_TAG;
131*821a6a5cSBing Zhao 	split_items[0].spec = q_tag_spec;
132*821a6a5cSBing Zhao 	split_items[1].type = RTE_FLOW_ITEM_TYPE_END;
133*821a6a5cSBing Zhao 	q_tag_spec->data = qrss_id;
134*821a6a5cSBing Zhao 	q_tag_spec->id = (enum modify_reg)
135*821a6a5cSBing Zhao 			 flow_hw_get_reg_id_by_domain(dev, RTE_FLOW_ITEM_TYPE_METER_COLOR,
136*821a6a5cSBing Zhao 						      MLX5DR_TABLE_TYPE_NIC_RX, 0);
137*821a6a5cSBing Zhao 	MLX5_ASSERT(q_tag_spec->id != REG_NON);
138*821a6a5cSBing Zhao }
139*821a6a5cSBing Zhao 
140*821a6a5cSBing Zhao /*
141*821a6a5cSBing Zhao  * Checking the split information and split the actions, items, attributes into
142*821a6a5cSBing Zhao  * prefix and suffix to connect the flows after passing the copy tables.
143*821a6a5cSBing Zhao  *
144*821a6a5cSBing Zhao  * @param[in] dev
145*821a6a5cSBing Zhao  *   Pointer to rte_eth_dev structure.
146*821a6a5cSBing Zhao  * @param[in] attr
147*821a6a5cSBing Zhao  *   Pointer to the flow attributes.
148*821a6a5cSBing Zhao  * @param[in] actions
149*821a6a5cSBing Zhao  *   Pointer to the original actions list.
150*821a6a5cSBing Zhao  * @param[in] qrss
151*821a6a5cSBing Zhao  *   Pointer to the action of QUEUE / RSS.
152*821a6a5cSBing Zhao  * @param[in] action_flags
153*821a6a5cSBing Zhao  *   Holds the actions detected.
154*821a6a5cSBing Zhao  * @param[in] actions_n
155*821a6a5cSBing Zhao  *   Number of original actions.
156*821a6a5cSBing Zhao  * @param[in] external
157*821a6a5cSBing Zhao  *   This flow rule is created by request external to PMD.
158*821a6a5cSBing Zhao  * @param[out] res
159*821a6a5cSBing Zhao  *   Pointer to the resource to store the split result.
160*821a6a5cSBing Zhao  * @param[out] error
161*821a6a5cSBing Zhao  *   Pointer to error structure.
162*821a6a5cSBing Zhao  *
163*821a6a5cSBing Zhao  * @return
164*821a6a5cSBing Zhao  *   - Positive 1 on succeed.
165*821a6a5cSBing Zhao  *   - 0 on no split.
166*821a6a5cSBing Zhao  *   - negative errno value on error.
167*821a6a5cSBing Zhao  */
168*821a6a5cSBing Zhao int
169*821a6a5cSBing Zhao mlx5_flow_nta_split_metadata(struct rte_eth_dev *dev,
170*821a6a5cSBing Zhao 			     const struct rte_flow_attr *attr,
171*821a6a5cSBing Zhao 			     const struct rte_flow_action actions[],
172*821a6a5cSBing Zhao 			     const struct rte_flow_action *qrss,
173*821a6a5cSBing Zhao 			     uint64_t action_flags,
174*821a6a5cSBing Zhao 			     int actions_n,
175*821a6a5cSBing Zhao 			     bool external,
176*821a6a5cSBing Zhao 			     struct mlx5_flow_hw_split_resource *res,
177*821a6a5cSBing Zhao 			     struct rte_flow_error *error)
178*821a6a5cSBing Zhao {
179*821a6a5cSBing Zhao 	struct mlx5_priv *priv = dev->data->dev_private;
180*821a6a5cSBing Zhao 	struct mlx5_sh_config *config = &priv->sh->config;
181*821a6a5cSBing Zhao 	const struct rte_flow_action_queue *queue;
182*821a6a5cSBing Zhao 	const struct rte_flow_action_rss *rss;
183*821a6a5cSBing Zhao 	struct rte_flow_action *prfx_actions;
184*821a6a5cSBing Zhao 	struct rte_flow_action *sfx_actions;
185*821a6a5cSBing Zhao 	struct rte_flow_attr *sfx_attr;
186*821a6a5cSBing Zhao 	struct rte_flow_item *sfx_items;
187*821a6a5cSBing Zhao 	size_t pefx_act_size, sfx_act_size;
188*821a6a5cSBing Zhao 	size_t attr_size, item_size;
189*821a6a5cSBing Zhao 	size_t total_size;
190*821a6a5cSBing Zhao 	uint32_t qrss_id;
191*821a6a5cSBing Zhao 
192*821a6a5cSBing Zhao 	/*
193*821a6a5cSBing Zhao 	 * The metadata copy flow should be created:
194*821a6a5cSBing Zhao 	 *   1. only on NIC Rx domain with Q / RSS
195*821a6a5cSBing Zhao 	 *   2. only when extended metadata mode is enabled
196*821a6a5cSBing Zhao 	 *   3. only on HWS, should always be "config->dv_flow_en == 2", this
197*821a6a5cSBing Zhao 	 *      checking can be skipped
198*821a6a5cSBing Zhao 	 * Note:
199*821a6a5cSBing Zhao 	 *   1. Even if metadata is not enabled in the data-path, it can still
200*821a6a5cSBing Zhao 	 *      be used to match on the Rx side.
201*821a6a5cSBing Zhao 	 *   2. The HWS Tx default copy rule or SQ rules already have the metadata
202*821a6a5cSBing Zhao 	 *      copy on the root table. The user's rule will always be inserted
203*821a6a5cSBing Zhao 	 *      and executed after the root table steering.
204*821a6a5cSBing Zhao 	 */
205*821a6a5cSBing Zhao 	if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY || attr->transfer ||
206*821a6a5cSBing Zhao 	    attr->egress || !external || !qrss)
207*821a6a5cSBing Zhao 		return 0;
208*821a6a5cSBing Zhao 	if (action_flags & MLX5_FLOW_ACTION_QUEUE) {
209*821a6a5cSBing Zhao 		queue = (const struct rte_flow_action_queue *)actions->conf;
210*821a6a5cSBing Zhao 		if (mlx5_rxq_is_hairpin(dev, queue->index))
211*821a6a5cSBing Zhao 			return 0;
212*821a6a5cSBing Zhao 	} else if (action_flags & MLX5_FLOW_ACTION_RSS) {
213*821a6a5cSBing Zhao 		rss = (const struct rte_flow_action_rss *)actions->conf;
214*821a6a5cSBing Zhao 		if (mlx5_rxq_is_hairpin(dev, rss->queue[0]))
215*821a6a5cSBing Zhao 			return 0;
216*821a6a5cSBing Zhao 	}
217*821a6a5cSBing Zhao 	/* The prefix and suffix flows' actions. */
218*821a6a5cSBing Zhao 	pefx_act_size = sizeof(struct rte_flow_action) * (actions_n + 1) +
219*821a6a5cSBing Zhao 			sizeof(struct rte_flow_action_set_tag) +
220*821a6a5cSBing Zhao 			sizeof(struct rte_flow_action_jump);
221*821a6a5cSBing Zhao 	sfx_act_size = sizeof(struct rte_flow_action) * 2;
222*821a6a5cSBing Zhao 	/* The suffix attribute. */
223*821a6a5cSBing Zhao 	attr_size = sizeof(struct rte_flow_attr);
224*821a6a5cSBing Zhao 	/* The suffix items - mlx5_tag + end. */
225*821a6a5cSBing Zhao 	item_size = sizeof(struct rte_flow_item) * 2 +
226*821a6a5cSBing Zhao 		    sizeof(struct mlx5_rte_flow_item_tag);
227*821a6a5cSBing Zhao 	total_size = pefx_act_size + sfx_act_size + attr_size + item_size;
228*821a6a5cSBing Zhao 	prfx_actions = mlx5_malloc(MLX5_MEM_ZERO, total_size, 0, SOCKET_ID_ANY);
229*821a6a5cSBing Zhao 	if (!prfx_actions)
230*821a6a5cSBing Zhao 		return rte_flow_error_set(error, ENOMEM,
231*821a6a5cSBing Zhao 					  RTE_FLOW_ERROR_TYPE_ACTION,
232*821a6a5cSBing Zhao 					  NULL, "no memory to split "
233*821a6a5cSBing Zhao 					  "metadata flow");
234*821a6a5cSBing Zhao 	sfx_actions = (void *)((char *)prfx_actions + pefx_act_size);
235*821a6a5cSBing Zhao 	qrss_id = mlx5_flow_nta_split_qrss_actions_prep(dev, prfx_actions,
236*821a6a5cSBing Zhao 							sfx_actions, actions,
237*821a6a5cSBing Zhao 							qrss, actions_n, error);
238*821a6a5cSBing Zhao 	if (!qrss_id) {
239*821a6a5cSBing Zhao 		mlx5_free(prfx_actions);
240*821a6a5cSBing Zhao 		return -rte_errno;
241*821a6a5cSBing Zhao 	}
242*821a6a5cSBing Zhao 	sfx_attr = (void *)((char *)sfx_actions + sfx_act_size);
243*821a6a5cSBing Zhao 	sfx_items = (void *)((char *)sfx_attr + attr_size);
244*821a6a5cSBing Zhao 	mlx5_flow_nta_split_qrss_items_prep(dev, sfx_attr, sfx_items, qrss_id);
245*821a6a5cSBing Zhao 	res->prefix.actions = prfx_actions;
246*821a6a5cSBing Zhao 	res->suffix.actions = sfx_actions;
247*821a6a5cSBing Zhao 	res->suffix.items = sfx_items;
248*821a6a5cSBing Zhao 	res->suffix.attr = sfx_attr;
249*821a6a5cSBing Zhao 	res->buf_start = prfx_actions;
250*821a6a5cSBing Zhao 	res->flow_idx = qrss_id;
251*821a6a5cSBing Zhao 	return 1;
252*821a6a5cSBing Zhao }
253*821a6a5cSBing Zhao 
254*821a6a5cSBing Zhao /*
255*821a6a5cSBing Zhao  * Release the buffer and flow ID.
256*821a6a5cSBing Zhao  *
257*821a6a5cSBing Zhao  * @param[in] dev
258*821a6a5cSBing Zhao  *   Pointer to rte_eth_dev structure.
259*821a6a5cSBing Zhao  * @param[in] res
260*821a6a5cSBing Zhao  *   Pointer to the resource to release.
261*821a6a5cSBing Zhao  */
262*821a6a5cSBing Zhao void
263*821a6a5cSBing Zhao mlx5_flow_nta_split_resource_free(struct rte_eth_dev *dev,
264*821a6a5cSBing Zhao 				  struct mlx5_flow_hw_split_resource *res)
265*821a6a5cSBing Zhao {
266*821a6a5cSBing Zhao 	struct mlx5_priv *priv = dev->data->dev_private;
267*821a6a5cSBing Zhao 
268*821a6a5cSBing Zhao 	mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], res->flow_idx);
269*821a6a5cSBing Zhao 	mlx5_free(res->buf_start);
270*821a6a5cSBing Zhao }
271*821a6a5cSBing Zhao 
272*821a6a5cSBing Zhao /*
273*821a6a5cSBing Zhao  * Callback functions for the metadata copy and mark / flag set flow.
274*821a6a5cSBing Zhao  * The create and remove cannot reuse the DV since the flow opaque and structure
275*821a6a5cSBing Zhao  * are different, and the action used to copy the metadata is also different.
276*821a6a5cSBing Zhao  */
277*821a6a5cSBing Zhao struct mlx5_list_entry *
278*821a6a5cSBing Zhao flow_nta_mreg_create_cb(void *tool_ctx, void *cb_ctx)
279*821a6a5cSBing Zhao {
280*821a6a5cSBing Zhao 	struct rte_eth_dev *dev = tool_ctx;
281*821a6a5cSBing Zhao 	struct mlx5_priv *priv = dev->data->dev_private;
282*821a6a5cSBing Zhao 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
283*821a6a5cSBing Zhao 	struct mlx5_flow_mreg_copy_resource *mcp_res;
284*821a6a5cSBing Zhao 	struct rte_flow_error *error = ctx->error;
285*821a6a5cSBing Zhao 	uint32_t idx = 0;
286*821a6a5cSBing Zhao 	uint32_t mark_id = *(uint32_t *)(ctx->data);
287*821a6a5cSBing Zhao 	struct rte_flow_attr attr = {
288*821a6a5cSBing Zhao 		.group = MLX5_FLOW_MREG_CP_TABLE_GROUP,
289*821a6a5cSBing Zhao 		.ingress = 1,
290*821a6a5cSBing Zhao 	};
291*821a6a5cSBing Zhao 	struct mlx5_rte_flow_item_tag tag_spec = {
292*821a6a5cSBing Zhao 		.id = REG_C_0,
293*821a6a5cSBing Zhao 		.data = mark_id,
294*821a6a5cSBing Zhao 	};
295*821a6a5cSBing Zhao 	struct mlx5_rte_flow_item_tag tag_mask = {
296*821a6a5cSBing Zhao 		.data = priv->sh->dv_mark_mask,
297*821a6a5cSBing Zhao 	};
298*821a6a5cSBing Zhao 	struct rte_flow_action_mark ftag = {
299*821a6a5cSBing Zhao 		.id = mark_id,
300*821a6a5cSBing Zhao 	};
301*821a6a5cSBing Zhao 	struct rte_flow_action_modify_field rx_meta = {
302*821a6a5cSBing Zhao 		.operation = RTE_FLOW_MODIFY_SET,
303*821a6a5cSBing Zhao 		.dst = {
304*821a6a5cSBing Zhao 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
305*821a6a5cSBing Zhao 			.tag_index = REG_B,
306*821a6a5cSBing Zhao 		},
307*821a6a5cSBing Zhao 		.src = {
308*821a6a5cSBing Zhao 			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
309*821a6a5cSBing Zhao 			.tag_index = REG_C_1,
310*821a6a5cSBing Zhao 		},
311*821a6a5cSBing Zhao 		.width = 32,
312*821a6a5cSBing Zhao 	};
313*821a6a5cSBing Zhao 	struct rte_flow_action_jump jump = {
314*821a6a5cSBing Zhao 		.group = MLX5_FLOW_MREG_ACT_TABLE_GROUP,
315*821a6a5cSBing Zhao 	};
316*821a6a5cSBing Zhao 	struct rte_flow_item items[2];
317*821a6a5cSBing Zhao 	struct rte_flow_action actions[4];
318*821a6a5cSBing Zhao 
319*821a6a5cSBing Zhao 	/* Provide the full width of FLAG specific value. */
320*821a6a5cSBing Zhao 	if (mark_id == (priv->sh->dv_regc0_mask & MLX5_FLOW_MARK_DEFAULT))
321*821a6a5cSBing Zhao 		tag_spec.data = MLX5_FLOW_MARK_DEFAULT;
322*821a6a5cSBing Zhao 	/* Build a new flow. */
323*821a6a5cSBing Zhao 	if (mark_id != MLX5_DEFAULT_COPY_ID) {
324*821a6a5cSBing Zhao 		items[0] = (struct rte_flow_item) {
325*821a6a5cSBing Zhao 			.type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_TAG,
326*821a6a5cSBing Zhao 			.spec = &tag_spec,
327*821a6a5cSBing Zhao 			.mask = &tag_mask,
328*821a6a5cSBing Zhao 		};
329*821a6a5cSBing Zhao 		actions[0] = (struct rte_flow_action) {
330*821a6a5cSBing Zhao 			.type = RTE_FLOW_ACTION_TYPE_MARK,
331*821a6a5cSBing Zhao 			.conf = &ftag,
332*821a6a5cSBing Zhao 		};
333*821a6a5cSBing Zhao 	} else {
334*821a6a5cSBing Zhao 		/* Default rule, wildcard match with lowest priority. */
335*821a6a5cSBing Zhao 		attr.priority = MLX5_FLOW_LOWEST_PRIO_INDICATOR;
336*821a6a5cSBing Zhao 		items[0] = (struct rte_flow_item) {
337*821a6a5cSBing Zhao 			.type = RTE_FLOW_ITEM_TYPE_ETH,
338*821a6a5cSBing Zhao 		};
339*821a6a5cSBing Zhao 		actions[0] = (struct rte_flow_action) {
340*821a6a5cSBing Zhao 			.type = RTE_FLOW_ACTION_TYPE_VOID,
341*821a6a5cSBing Zhao 		};
342*821a6a5cSBing Zhao 	}
343*821a6a5cSBing Zhao 	/* (match REG 'tag') or all. */
344*821a6a5cSBing Zhao 	items[1].type = RTE_FLOW_ITEM_TYPE_END;
345*821a6a5cSBing Zhao 	/* (Mark) or void + copy to Rx meta + jump to the MREG_ACT_TABLE_GROUP. */
346*821a6a5cSBing Zhao 	actions[1].type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
347*821a6a5cSBing Zhao 	actions[1].conf = &rx_meta,
348*821a6a5cSBing Zhao 	actions[2].type = RTE_FLOW_ACTION_TYPE_JUMP;
349*821a6a5cSBing Zhao 	actions[2].conf = &jump;
350*821a6a5cSBing Zhao 	actions[3].type = RTE_FLOW_ACTION_TYPE_END;
351*821a6a5cSBing Zhao 	/* Build a new entry. */
352*821a6a5cSBing Zhao 	mcp_res = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MCP], &idx);
353*821a6a5cSBing Zhao 	if (!mcp_res) {
354*821a6a5cSBing Zhao 		rte_errno = ENOMEM;
355*821a6a5cSBing Zhao 		return NULL;
356*821a6a5cSBing Zhao 	}
357*821a6a5cSBing Zhao 	mcp_res->idx = idx;
358*821a6a5cSBing Zhao 	mcp_res->mark_id = mark_id;
359*821a6a5cSBing Zhao 	/*
360*821a6a5cSBing Zhao 	 * The copy flows are not included in any list. There
361*821a6a5cSBing Zhao 	 * ones are referenced from other flows and cannot
362*821a6a5cSBing Zhao 	 * be applied, removed, deleted in arbitrary order
363*821a6a5cSBing Zhao 	 * by list traversing.
364*821a6a5cSBing Zhao 	 */
365*821a6a5cSBing Zhao 	mcp_res->hw_flow = mlx5_flow_list_create(dev, MLX5_FLOW_TYPE_MCP, &attr,
366*821a6a5cSBing Zhao 						 items, actions, false, error);
367*821a6a5cSBing Zhao 	if (!mcp_res->hw_flow) {
368*821a6a5cSBing Zhao 		mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], idx);
369*821a6a5cSBing Zhao 		return NULL;
370*821a6a5cSBing Zhao 	}
371*821a6a5cSBing Zhao 	return &mcp_res->hlist_ent;
372*821a6a5cSBing Zhao }
373*821a6a5cSBing Zhao 
374*821a6a5cSBing Zhao void
375*821a6a5cSBing Zhao flow_nta_mreg_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
376*821a6a5cSBing Zhao {
377*821a6a5cSBing Zhao 	struct mlx5_flow_mreg_copy_resource *mcp_res =
378*821a6a5cSBing Zhao 			       container_of(entry, typeof(*mcp_res), hlist_ent);
379*821a6a5cSBing Zhao 	struct rte_eth_dev *dev = tool_ctx;
380*821a6a5cSBing Zhao 	struct mlx5_priv *priv = dev->data->dev_private;
381*821a6a5cSBing Zhao 
382*821a6a5cSBing Zhao 	MLX5_ASSERT(mcp_res->hw_flow);
383*821a6a5cSBing Zhao 	mlx5_flow_list_destroy(dev, MLX5_FLOW_TYPE_MCP, mcp_res->hw_flow);
384*821a6a5cSBing Zhao 	mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx);
385*821a6a5cSBing Zhao }
386*821a6a5cSBing Zhao 
387*821a6a5cSBing Zhao /*
388*821a6a5cSBing Zhao  * Add a flow of copying flow metadata registers in RX_CP_TBL.
389*821a6a5cSBing Zhao  * @see flow_mreg_add_copy_action
390*821a6a5cSBing Zhao  *
391*821a6a5cSBing Zhao  * @param[in] dev
392*821a6a5cSBing Zhao  *   Pointer to Ethernet device.
393*821a6a5cSBing Zhao  * @param[in] mark_id
394*821a6a5cSBing Zhao  *   ID of MARK action, zero means default flow for META.
395*821a6a5cSBing Zhao  * @param[out] error
396*821a6a5cSBing Zhao  *   Perform verbose error reporting if not NULL.
397*821a6a5cSBing Zhao  *
398*821a6a5cSBing Zhao  * @return
399*821a6a5cSBing Zhao  *   Associated resource on success, NULL otherwise and rte_errno is set.
400*821a6a5cSBing Zhao  */
401*821a6a5cSBing Zhao static struct mlx5_flow_mreg_copy_resource *
402*821a6a5cSBing Zhao mlx5_flow_nta_add_copy_action(struct rte_eth_dev *dev,
403*821a6a5cSBing Zhao 			      uint32_t mark_id,
404*821a6a5cSBing Zhao 			      struct rte_flow_error *error)
405*821a6a5cSBing Zhao {
406*821a6a5cSBing Zhao 	struct mlx5_priv *priv = dev->data->dev_private;
407*821a6a5cSBing Zhao 	struct mlx5_list_entry *entry;
408*821a6a5cSBing Zhao 	uint32_t specialize = 0;
409*821a6a5cSBing Zhao 	struct mlx5_flow_cb_ctx ctx = {
410*821a6a5cSBing Zhao 		.dev = dev,
411*821a6a5cSBing Zhao 		.error = error,
412*821a6a5cSBing Zhao 		.data = &mark_id,
413*821a6a5cSBing Zhao 		.data2 = &specialize,
414*821a6a5cSBing Zhao 	};
415*821a6a5cSBing Zhao 
416*821a6a5cSBing Zhao 	/* Check if already registered. */
417*821a6a5cSBing Zhao 	MLX5_ASSERT(priv->sh->mreg_cp_tbl);
418*821a6a5cSBing Zhao 	entry = mlx5_hlist_register(priv->sh->mreg_cp_tbl, mark_id, &ctx);
419*821a6a5cSBing Zhao 	if (!entry)
420*821a6a5cSBing Zhao 		return NULL;
421*821a6a5cSBing Zhao 	return container_of(entry, struct mlx5_flow_mreg_copy_resource, hlist_ent);
422*821a6a5cSBing Zhao }
423*821a6a5cSBing Zhao 
424*821a6a5cSBing Zhao /*
425*821a6a5cSBing Zhao  * Release flow in RX_CP_TBL.
426*821a6a5cSBing Zhao  *
427*821a6a5cSBing Zhao  * @param[in] dev
428*821a6a5cSBing Zhao  *   Pointer to Ethernet device.
429*821a6a5cSBing Zhao  * @param[in] idx
430*821a6a5cSBing Zhao  *   Index in the pool to store the copy flow.
431*821a6a5cSBing Zhao  */
432*821a6a5cSBing Zhao void
433*821a6a5cSBing Zhao mlx5_flow_nta_del_copy_action(struct rte_eth_dev *dev, uint32_t idx)
434*821a6a5cSBing Zhao {
435*821a6a5cSBing Zhao 	struct mlx5_flow_mreg_copy_resource *mcp_res;
436*821a6a5cSBing Zhao 	struct mlx5_priv *priv = dev->data->dev_private;
437*821a6a5cSBing Zhao 
438*821a6a5cSBing Zhao 	if (!idx)
439*821a6a5cSBing Zhao 		return;
440*821a6a5cSBing Zhao 	mcp_res = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MCP], idx);
441*821a6a5cSBing Zhao 	if (!mcp_res || !priv->sh->mreg_cp_tbl)
442*821a6a5cSBing Zhao 		return;
443*821a6a5cSBing Zhao 	MLX5_ASSERT(mcp_res->hw_flow);
444*821a6a5cSBing Zhao 	mlx5_hlist_unregister(priv->sh->mreg_cp_tbl, &mcp_res->hlist_ent);
445*821a6a5cSBing Zhao }
446*821a6a5cSBing Zhao 
447*821a6a5cSBing Zhao /*
448*821a6a5cSBing Zhao  * Remove the default copy action from RX_CP_TBL.
449*821a6a5cSBing Zhao  * @see flow_mreg_del_default_copy_action
450*821a6a5cSBing Zhao  *
451*821a6a5cSBing Zhao  * This functions is called in the mlx5_dev_start(). No thread safe
452*821a6a5cSBing Zhao  * is guaranteed.
453*821a6a5cSBing Zhao  *
454*821a6a5cSBing Zhao  * @param[in] dev
455*821a6a5cSBing Zhao  *   Pointer to Ethernet device.
456*821a6a5cSBing Zhao  */
457*821a6a5cSBing Zhao void
458*821a6a5cSBing Zhao mlx5_flow_nta_del_default_copy_action(struct rte_eth_dev *dev)
459*821a6a5cSBing Zhao {
460*821a6a5cSBing Zhao 	struct mlx5_list_entry *entry;
461*821a6a5cSBing Zhao 	struct mlx5_priv *priv = dev->data->dev_private;
462*821a6a5cSBing Zhao 	struct mlx5_flow_cb_ctx ctx;
463*821a6a5cSBing Zhao 	uint32_t mark_id;
464*821a6a5cSBing Zhao 
465*821a6a5cSBing Zhao 	/* Check if default flow is registered. */
466*821a6a5cSBing Zhao 	if (!priv->sh->mreg_cp_tbl)
467*821a6a5cSBing Zhao 		return;
468*821a6a5cSBing Zhao 	mark_id = MLX5_DEFAULT_COPY_ID;
469*821a6a5cSBing Zhao 	ctx.data = &mark_id;
470*821a6a5cSBing Zhao 	entry = mlx5_hlist_lookup(priv->sh->mreg_cp_tbl, mark_id, &ctx);
471*821a6a5cSBing Zhao 	if (!entry)
472*821a6a5cSBing Zhao 		return;
473*821a6a5cSBing Zhao 	mlx5_hlist_unregister(priv->sh->mreg_cp_tbl, entry);
474*821a6a5cSBing Zhao }
475*821a6a5cSBing Zhao 
476*821a6a5cSBing Zhao /*
477*821a6a5cSBing Zhao  * Add the default copy action in RX_CP_TBL.
478*821a6a5cSBing Zhao  *
479*821a6a5cSBing Zhao  * This functions is called in the mlx5_dev_start(). No thread safe
480*821a6a5cSBing Zhao  * is guaranteed.
481*821a6a5cSBing Zhao  * @see flow_mreg_add_default_copy_action
482*821a6a5cSBing Zhao  *
483*821a6a5cSBing Zhao  * @param[in] dev
484*821a6a5cSBing Zhao  *   Pointer to Ethernet device.
485*821a6a5cSBing Zhao  * @param[out] error
486*821a6a5cSBing Zhao  *   Perform verbose error reporting if not NULL.
487*821a6a5cSBing Zhao  *
488*821a6a5cSBing Zhao  * @return
489*821a6a5cSBing Zhao  *   0 for success, negative value otherwise and rte_errno is set.
490*821a6a5cSBing Zhao  */
491*821a6a5cSBing Zhao int
492*821a6a5cSBing Zhao mlx5_flow_nta_add_default_copy_action(struct rte_eth_dev *dev,
493*821a6a5cSBing Zhao 				      struct rte_flow_error *error)
494*821a6a5cSBing Zhao {
495*821a6a5cSBing Zhao 	struct mlx5_priv *priv = dev->data->dev_private;
496*821a6a5cSBing Zhao 	struct mlx5_sh_config *config = &priv->sh->config;
497*821a6a5cSBing Zhao 	struct mlx5_flow_mreg_copy_resource *mcp_res;
498*821a6a5cSBing Zhao 	struct mlx5_flow_cb_ctx ctx;
499*821a6a5cSBing Zhao 	uint32_t mark_id;
500*821a6a5cSBing Zhao 
501*821a6a5cSBing Zhao 	if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
502*821a6a5cSBing Zhao 	    !priv->sh->dv_regc0_mask)
503*821a6a5cSBing Zhao 		return 0;
504*821a6a5cSBing Zhao 	/*
505*821a6a5cSBing Zhao 	 * Add default mreg copy flow may be called multiple time, but
506*821a6a5cSBing Zhao 	 * only be called once in stop. Avoid register it twice.
507*821a6a5cSBing Zhao 	 */
508*821a6a5cSBing Zhao 	mark_id = MLX5_DEFAULT_COPY_ID;
509*821a6a5cSBing Zhao 	ctx.data = &mark_id;
510*821a6a5cSBing Zhao 	if (mlx5_hlist_lookup(priv->sh->mreg_cp_tbl, mark_id, &ctx))
511*821a6a5cSBing Zhao 		return 0;
512*821a6a5cSBing Zhao 	mcp_res = mlx5_flow_nta_add_copy_action(dev, mark_id, error);
513*821a6a5cSBing Zhao 	if (!mcp_res)
514*821a6a5cSBing Zhao 		return -rte_errno;
515*821a6a5cSBing Zhao 	return 0;
516*821a6a5cSBing Zhao }
517*821a6a5cSBing Zhao 
518*821a6a5cSBing Zhao /*
519*821a6a5cSBing Zhao  * Add a flow of copying flow metadata registers in RX_CP_TBL.
520*821a6a5cSBing Zhao  * @see flow_mreg_update_copy_table
521*821a6a5cSBing Zhao  *
522*821a6a5cSBing Zhao  * @param[in] dev
523*821a6a5cSBing Zhao  *   Pointer to Ethernet device.
524*821a6a5cSBing Zhao  * @param[out] idx
525*821a6a5cSBing Zhao  *   Pointer to store the index of flow in the pool.
526*821a6a5cSBing Zhao  * @param[in] mark
527*821a6a5cSBing Zhao  *   Pointer to mark or flag action.
528*821a6a5cSBing Zhao  * @param[in] action_flags
529*821a6a5cSBing Zhao  *   Holds the actions detected.
530*821a6a5cSBing Zhao  * @param[out] error
531*821a6a5cSBing Zhao  *   Perform verbose error reporting if not NULL.
532*821a6a5cSBing Zhao  *
533*821a6a5cSBing Zhao  * @return
534*821a6a5cSBing Zhao  *   0 on success, negative value otherwise and rte_errno is set.
535*821a6a5cSBing Zhao  */
536*821a6a5cSBing Zhao int
537*821a6a5cSBing Zhao mlx5_flow_nta_update_copy_table(struct rte_eth_dev *dev,
538*821a6a5cSBing Zhao 				uint32_t *idx,
539*821a6a5cSBing Zhao 				const struct rte_flow_action *mark,
540*821a6a5cSBing Zhao 				uint64_t action_flags,
541*821a6a5cSBing Zhao 				struct rte_flow_error *error)
542*821a6a5cSBing Zhao {
543*821a6a5cSBing Zhao 	struct mlx5_priv *priv = dev->data->dev_private;
544*821a6a5cSBing Zhao 	struct mlx5_sh_config *config = &priv->sh->config;
545*821a6a5cSBing Zhao 	struct mlx5_flow_mreg_copy_resource *mcp_res;
546*821a6a5cSBing Zhao 	const struct rte_flow_action_mark *mark_conf;
547*821a6a5cSBing Zhao 	uint32_t mark_id;
548*821a6a5cSBing Zhao 
549*821a6a5cSBing Zhao 	if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
550*821a6a5cSBing Zhao 	    !priv->sh->dv_regc0_mask)
551*821a6a5cSBing Zhao 		return 0;
552*821a6a5cSBing Zhao 	/* Find MARK action. */
553*821a6a5cSBing Zhao 	if (action_flags & (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK)) {
554*821a6a5cSBing Zhao 		if (mark) {
555*821a6a5cSBing Zhao 			mark_conf = (const struct rte_flow_action_mark *)mark->conf;
556*821a6a5cSBing Zhao 			mark_id = mark_conf->id;
557*821a6a5cSBing Zhao 		} else {
558*821a6a5cSBing Zhao 			mark_id = MLX5_FLOW_MARK_DEFAULT;
559*821a6a5cSBing Zhao 		}
560*821a6a5cSBing Zhao 		mcp_res = mlx5_flow_nta_add_copy_action(dev, mark_id, error);
561*821a6a5cSBing Zhao 		if (!mcp_res)
562*821a6a5cSBing Zhao 			return -rte_errno;
563*821a6a5cSBing Zhao 		*idx = mcp_res->idx;
564*821a6a5cSBing Zhao 	}
565*821a6a5cSBing Zhao 	return 0;
566*821a6a5cSBing Zhao }
567*821a6a5cSBing Zhao 
568*821a6a5cSBing Zhao #endif
569