xref: /dpdk/drivers/net/mlx5/mlx5_nta_rss.c (revision f74914c9956ed6316ed2fb8129c9112d28121c25)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2024 NVIDIA Corporation & Affiliates
3  */
4 
5 #include <rte_flow.h>
6 
7 #include <mlx5_malloc.h>
8 #include "mlx5.h"
9 #include "mlx5_defs.h"
10 #include "mlx5_flow.h"
11 #include "mlx5_rx.h"
12 #include "rte_common.h"
13 
14 #ifdef HAVE_MLX5_HWS_SUPPORT
15 
16 struct mlx5_nta_rss_ctx {
17 	struct rte_eth_dev *dev;
18 	struct rte_flow_attr *attr;
19 	struct rte_flow_item *pattern;
20 	struct rte_flow_action *actions;
21 	const struct rte_flow_action_rss *rss_conf;
22 	struct rte_flow_error *error;
23 	struct mlx5_nta_rss_flow_head *head;
24 	uint64_t pattern_flags;
25 	enum mlx5_flow_type flow_type;
26 	bool external;
27 };
28 
29 #define MLX5_RSS_PTYPE_ITEM_INDEX 0
30 #ifdef MLX5_RSS_PTYPE_DEBUG
31 #define MLX5_RSS_PTYPE_ACTION_INDEX 1
32 #else
33 #define MLX5_RSS_PTYPE_ACTION_INDEX 0
34 #endif
35 
36 #define MLX5_RSS_PTYPE_ITEMS_NUM (MLX5_RSS_PTYPE_ITEM_INDEX + 2)
37 #define MLX5_RSS_PTYPE_ACTIONS_NUM (MLX5_RSS_PTYPE_ACTION_INDEX + 2)
38 
39 static int
mlx5_nta_ptype_rss_flow_create(struct mlx5_nta_rss_ctx * ctx,uint32_t ptype,uint64_t rss_type)40 mlx5_nta_ptype_rss_flow_create(struct mlx5_nta_rss_ctx *ctx,
41 			       uint32_t ptype, uint64_t rss_type)
42 {
43 	int ret;
44 	struct rte_flow_hw *flow;
45 	struct rte_flow_item_ptype *ptype_spec = (void *)(uintptr_t)
46 				    ctx->pattern[MLX5_RSS_PTYPE_ITEM_INDEX].spec;
47 	struct rte_flow_action_rss *rss_conf = (void *)(uintptr_t)
48 				    ctx->actions[MLX5_RSS_PTYPE_ACTION_INDEX].conf;
49 	bool dbg_log = rte_log_can_log(mlx5_logtype, RTE_LOG_DEBUG);
50 	uint32_t mark_id = 0;
51 #ifdef MLX5_RSS_PTYPE_DEBUG
52 	struct rte_flow_action_mark *mark = (void *)(uintptr_t)
53 				     ctx->actions[MLX5_RSS_PTYPE_ACTION_INDEX - 1].conf;
54 
55 	/*
56 	 * Inner L3 and L4 ptype values are too large for 24bit mark
57 	 */
58 	mark->id =
59 		((ptype & (RTE_PTYPE_INNER_L3_MASK | RTE_PTYPE_INNER_L4_MASK)) == ptype) ?
60 		ptype >> 20 : ptype;
61 	mark_id = mark->id;
62 	dbg_log = true;
63 #endif
64 	ptype_spec->packet_type = ptype;
65 	rss_conf->types = rss_type;
66 	ret = flow_hw_create_flow(ctx->dev, MLX5_FLOW_TYPE_GEN, ctx->attr,
67 				  ctx->pattern, ctx->actions,
68 				  MLX5_FLOW_ITEM_PTYPE, MLX5_FLOW_ACTION_RSS,
69 				  ctx->external, &flow, ctx->error);
70 	if (flow) {
71 		SLIST_INSERT_HEAD(ctx->head, flow, nt2hws->next);
72 		if (dbg_log) {
73 			DRV_LOG(NOTICE,
74 				"PTYPE RSS: group %u ptype spec %#x rss types %#lx mark %#x\n",
75 			       ctx->attr->group, ptype_spec->packet_type,
76 			       (unsigned long)rss_conf->types, mark_id);
77 		}
78 	}
79 	return ret;
80 }
81 
82 /*
83  * Call conditions:
84  * * Flow pattern did not include outer L3 and L4 items.
85  * * RSS configuration had L3 hash types.
86  */
87 static struct rte_flow_hw *
mlx5_hw_rss_expand_l3(struct mlx5_nta_rss_ctx * rss_ctx)88 mlx5_hw_rss_expand_l3(struct mlx5_nta_rss_ctx *rss_ctx)
89 {
90 	int ret;
91 	int ptype_ip4, ptype_ip6;
92 	uint64_t rss_types = rte_eth_rss_hf_refine(rss_ctx->rss_conf->types);
93 
94 	if (rss_ctx->rss_conf->level < 2) {
95 		ptype_ip4 = RTE_PTYPE_L3_IPV4;
96 		ptype_ip6 = RTE_PTYPE_L3_IPV6;
97 	} else {
98 		ptype_ip4 = RTE_PTYPE_INNER_L3_IPV4;
99 		ptype_ip6 = RTE_PTYPE_INNER_L3_IPV6;
100 	}
101 	if (rss_types & MLX5_IPV4_LAYER_TYPES) {
102 		ret = mlx5_nta_ptype_rss_flow_create
103 			(rss_ctx, ptype_ip4, (rss_types & ~MLX5_IPV6_LAYER_TYPES));
104 		if (ret)
105 			goto error;
106 	}
107 	if (rss_types & MLX5_IPV6_LAYER_TYPES) {
108 		ret = mlx5_nta_ptype_rss_flow_create
109 			(rss_ctx, ptype_ip6, rss_types & ~MLX5_IPV4_LAYER_TYPES);
110 		if (ret)
111 			goto error;
112 	}
113 	return SLIST_FIRST(rss_ctx->head);
114 
115 error:
116 	flow_hw_list_destroy(rss_ctx->dev, rss_ctx->flow_type,
117 			     (uintptr_t)SLIST_FIRST(rss_ctx->head));
118 	return NULL;
119 }
120 
121 static void
mlx5_nta_rss_expand_l3_l4(struct mlx5_nta_rss_ctx * rss_ctx,uint64_t rss_types,uint64_t rss_l3_types)122 mlx5_nta_rss_expand_l3_l4(struct mlx5_nta_rss_ctx *rss_ctx,
123 			  uint64_t rss_types, uint64_t rss_l3_types)
124 {
125 	int ret;
126 	int ptype_l3, ptype_l4_udp, ptype_l4_tcp, ptype_l4_esp = 0;
127 	uint64_t rss = rss_types &
128 		 ~(rss_l3_types == MLX5_IPV4_LAYER_TYPES ?
129 		  MLX5_IPV6_LAYER_TYPES : MLX5_IPV4_LAYER_TYPES);
130 
131 
132 	if (rss_ctx->rss_conf->level < 2) {
133 		ptype_l3 = rss_l3_types == MLX5_IPV4_LAYER_TYPES ?
134 			   RTE_PTYPE_L3_IPV4 : RTE_PTYPE_L3_IPV6;
135 		ptype_l4_esp = RTE_PTYPE_TUNNEL_ESP;
136 		ptype_l4_udp = RTE_PTYPE_L4_UDP;
137 		ptype_l4_tcp = RTE_PTYPE_L4_TCP;
138 	} else {
139 		ptype_l3 = rss_l3_types == MLX5_IPV4_LAYER_TYPES ?
140 			   RTE_PTYPE_INNER_L3_IPV4 : RTE_PTYPE_INNER_L3_IPV6;
141 		ptype_l4_udp = RTE_PTYPE_INNER_L4_UDP;
142 		ptype_l4_tcp = RTE_PTYPE_INNER_L4_TCP;
143 	}
144 	if (rss_types & RTE_ETH_RSS_ESP) {
145 		ret = mlx5_nta_ptype_rss_flow_create
146 			(rss_ctx, ptype_l3 | ptype_l4_esp,
147 			rss & ~(RTE_ETH_RSS_UDP | RTE_ETH_RSS_TCP));
148 		if (ret)
149 			goto error;
150 	}
151 	if (rss_types & RTE_ETH_RSS_UDP) {
152 		ret = mlx5_nta_ptype_rss_flow_create(rss_ctx,
153 			ptype_l3 | ptype_l4_udp,
154 			rss & ~(RTE_ETH_RSS_ESP | RTE_ETH_RSS_TCP));
155 		if (ret)
156 			goto error;
157 	}
158 	if (rss_types & RTE_ETH_RSS_TCP) {
159 		ret = mlx5_nta_ptype_rss_flow_create(rss_ctx,
160 			ptype_l3 | ptype_l4_tcp,
161 			rss & ~(RTE_ETH_RSS_ESP | RTE_ETH_RSS_UDP));
162 		if (ret)
163 			goto error;
164 	}
165 	return;
166 error:
167 	flow_hw_list_destroy(rss_ctx->dev, rss_ctx->flow_type,
168 			     (uintptr_t)SLIST_FIRST(rss_ctx->head));
169 }
170 
171 /*
172  * Call conditions:
173  * * Flow pattern did not include L4 item.
174  * * RSS configuration had L4 hash types.
175  */
176 static struct rte_flow_hw *
mlx5_hw_rss_expand_l4(struct mlx5_nta_rss_ctx * rss_ctx)177 mlx5_hw_rss_expand_l4(struct mlx5_nta_rss_ctx *rss_ctx)
178 {
179 	uint64_t rss_types = rte_eth_rss_hf_refine(rss_ctx->rss_conf->types);
180 	uint64_t l3_item = rss_ctx->pattern_flags &
181 			   (rss_ctx->rss_conf->level < 2 ?
182 			    MLX5_FLOW_LAYER_OUTER_L3 : MLX5_FLOW_LAYER_INNER_L3);
183 
184 	if (l3_item) {
185 		/*
186 		 * Outer L3 header was present in the original pattern.
187 		 * Expand L4 level only.
188 		 */
189 		if (l3_item & MLX5_FLOW_LAYER_L3_IPV4)
190 			mlx5_nta_rss_expand_l3_l4(rss_ctx, rss_types, MLX5_IPV4_LAYER_TYPES);
191 		else
192 			mlx5_nta_rss_expand_l3_l4(rss_ctx, rss_types, MLX5_IPV6_LAYER_TYPES);
193 	} else {
194 		if (rss_types & (MLX5_IPV4_LAYER_TYPES | MLX5_IPV6_LAYER_TYPES)) {
195 			mlx5_hw_rss_expand_l3(rss_ctx);
196 			/*
197 			 * No outer L3 item in application flow pattern.
198 			 * RSS hash types are L3 and L4.
199 			 * ** Expand L3 according to RSS configuration and L4.
200 			 */
201 			if (rss_types & MLX5_IPV4_LAYER_TYPES)
202 				mlx5_nta_rss_expand_l3_l4(rss_ctx, rss_types,
203 							  MLX5_IPV4_LAYER_TYPES);
204 			if (rss_types & MLX5_IPV6_LAYER_TYPES)
205 				mlx5_nta_rss_expand_l3_l4(rss_ctx, rss_types,
206 							  MLX5_IPV6_LAYER_TYPES);
207 		} else {
208 			/*
209 			 * No outer L3 item in application flow pattern,
210 			 * RSS hash type is L4 only.
211 			 */
212 			mlx5_nta_rss_expand_l3_l4(rss_ctx, rss_types,
213 						  MLX5_IPV4_LAYER_TYPES);
214 			mlx5_nta_rss_expand_l3_l4(rss_ctx, rss_types,
215 						  MLX5_IPV6_LAYER_TYPES);
216 		}
217 	}
218 	return SLIST_EMPTY(rss_ctx->head) ? NULL : SLIST_FIRST(rss_ctx->head);
219 }
220 
221 static struct mlx5_indexed_pool *
mlx5_nta_ptype_ipool_create(struct rte_eth_dev * dev)222 mlx5_nta_ptype_ipool_create(struct rte_eth_dev *dev)
223 {
224 	struct mlx5_priv *priv = dev->data->dev_private;
225 	struct mlx5_indexed_pool_config ipool_cfg = {
226 		.size = 1,
227 		.trunk_size = 32,
228 		.grow_trunk = 5,
229 		.grow_shift = 1,
230 		.need_lock = 1,
231 		.release_mem_en = !!priv->sh->config.reclaim_mode,
232 		.malloc = mlx5_malloc,
233 		.max_idx = MLX5_FLOW_TABLE_PTYPE_RSS_NUM,
234 		.free = mlx5_free,
235 		.type = "mlx5_nta_ptype_rss"
236 	};
237 	return mlx5_ipool_create(&ipool_cfg);
238 }
239 
240 static void
mlx5_hw_release_rss_ptype_group(struct rte_eth_dev * dev,uint32_t group)241 mlx5_hw_release_rss_ptype_group(struct rte_eth_dev *dev, uint32_t group)
242 {
243 	struct mlx5_priv *priv = dev->data->dev_private;
244 
245 	if (!priv->ptype_rss_groups)
246 		return;
247 	mlx5_ipool_free(priv->ptype_rss_groups, group);
248 }
249 
250 static uint32_t
mlx5_hw_get_rss_ptype_group(struct rte_eth_dev * dev)251 mlx5_hw_get_rss_ptype_group(struct rte_eth_dev *dev)
252 {
253 	void *obj;
254 	uint32_t idx = 0;
255 	struct mlx5_priv *priv = dev->data->dev_private;
256 
257 	if (!priv->ptype_rss_groups) {
258 		priv->ptype_rss_groups = mlx5_nta_ptype_ipool_create(dev);
259 		if (!priv->ptype_rss_groups) {
260 			DRV_LOG(DEBUG, "PTYPE RSS: failed to allocate groups pool");
261 			return 0;
262 		}
263 	}
264 	obj = mlx5_ipool_malloc(priv->ptype_rss_groups, &idx);
265 	if (!obj) {
266 		DRV_LOG(DEBUG, "PTYPE RSS: failed to fetch ptype group from the pool");
267 		return 0;
268 	}
269 	return idx + MLX5_FLOW_TABLE_PTYPE_RSS_BASE;
270 }
271 
272 static struct rte_flow_hw *
mlx5_hw_rss_ptype_create_miss_flow(struct rte_eth_dev * dev,const struct rte_flow_action_rss * rss_conf,uint32_t ptype_group,bool external,struct rte_flow_error * error)273 mlx5_hw_rss_ptype_create_miss_flow(struct rte_eth_dev *dev,
274 				   const struct rte_flow_action_rss *rss_conf,
275 				   uint32_t ptype_group, bool external,
276 				   struct rte_flow_error *error)
277 {
278 	struct rte_flow_hw *flow = NULL;
279 	const struct rte_flow_attr miss_attr = {
280 		.ingress = 1,
281 		.group = ptype_group,
282 		.priority = 3
283 	};
284 	const struct rte_flow_item miss_pattern[2] = {
285 		[0] = { .type = RTE_FLOW_ITEM_TYPE_ETH },
286 		[1] = { .type = RTE_FLOW_ITEM_TYPE_END }
287 	};
288 	struct rte_flow_action miss_actions[] = {
289 #ifdef MLX5_RSS_PTYPE_DEBUG
290 		[MLX5_RSS_PTYPE_ACTION_INDEX - 1] = {
291 			.type = RTE_FLOW_ACTION_TYPE_MARK,
292 			.conf = &(const struct rte_flow_action_mark){.id = 0xfac}
293 		},
294 #endif
295 		[MLX5_RSS_PTYPE_ACTION_INDEX] = {
296 			.type = RTE_FLOW_ACTION_TYPE_RSS,
297 			.conf = rss_conf
298 		},
299 		[MLX5_RSS_PTYPE_ACTION_INDEX + 1] = { .type = RTE_FLOW_ACTION_TYPE_END }
300 	};
301 
302 	flow_hw_create_flow(dev, MLX5_FLOW_TYPE_GEN, &miss_attr,
303 			    miss_pattern, miss_actions, 0, MLX5_FLOW_ACTION_RSS,
304 			    external, &flow, error);
305 	return flow;
306 }
307 
308 static struct rte_flow_hw *
mlx5_hw_rss_ptype_create_base_flow(struct rte_eth_dev * dev,const struct rte_flow_attr * attr,const struct rte_flow_item pattern[],const struct rte_flow_action orig_actions[],uint32_t ptype_group,uint64_t item_flags,uint64_t action_flags,bool external,enum mlx5_flow_type flow_type,struct rte_flow_error * error)309 mlx5_hw_rss_ptype_create_base_flow(struct rte_eth_dev *dev,
310 				   const struct rte_flow_attr *attr,
311 				   const struct rte_flow_item pattern[],
312 				   const struct rte_flow_action orig_actions[],
313 				   uint32_t ptype_group, uint64_t item_flags,
314 				   uint64_t action_flags, bool external,
315 				   enum mlx5_flow_type flow_type,
316 				   struct rte_flow_error *error)
317 {
318 	int i = 0;
319 	struct rte_flow_hw *flow = NULL;
320 	struct rte_flow_action actions[MLX5_HW_MAX_ACTS];
321 	enum mlx5_indirect_type indirect_type;
322 
323 	do {
324 		switch (orig_actions[i].type) {
325 		case RTE_FLOW_ACTION_TYPE_INDIRECT:
326 			indirect_type = (typeof(indirect_type))
327 					MLX5_INDIRECT_ACTION_TYPE_GET
328 					(orig_actions[i].conf);
329 			if (indirect_type != MLX5_INDIRECT_ACTION_TYPE_RSS) {
330 				actions[i] = orig_actions[i];
331 				break;
332 			}
333 			/* Fall through */
334 		case RTE_FLOW_ACTION_TYPE_RSS:
335 			actions[i].type = RTE_FLOW_ACTION_TYPE_JUMP;
336 			actions[i].conf = &(const struct rte_flow_action_jump) {
337 				.group = ptype_group
338 			};
339 			break;
340 		default:
341 			actions[i] = orig_actions[i];
342 		}
343 
344 	} while (actions[i++].type != RTE_FLOW_ACTION_TYPE_END);
345 	action_flags &= ~MLX5_FLOW_ACTION_RSS;
346 	action_flags |= MLX5_FLOW_ACTION_JUMP;
347 	flow_hw_create_flow(dev, flow_type, attr, pattern, actions,
348 			    item_flags, action_flags, external, &flow, error);
349 	return flow;
350 }
351 
352 const struct rte_flow_action_rss *
flow_nta_locate_rss(struct rte_eth_dev * dev,const struct rte_flow_action actions[],struct rte_flow_error * error)353 flow_nta_locate_rss(struct rte_eth_dev *dev,
354 		    const struct rte_flow_action actions[],
355 		    struct rte_flow_error *error)
356 {
357 	const struct rte_flow_action *a;
358 	const struct rte_flow_action_rss *rss_conf = NULL;
359 
360 	for (a = actions; a->type != RTE_FLOW_ACTION_TYPE_END; a++) {
361 		if (a->type == RTE_FLOW_ACTION_TYPE_RSS) {
362 			rss_conf = a->conf;
363 			break;
364 		}
365 		if (a->type == RTE_FLOW_ACTION_TYPE_INDIRECT &&
366 		    MLX5_INDIRECT_ACTION_TYPE_GET(a->conf) ==
367 		    MLX5_INDIRECT_ACTION_TYPE_RSS) {
368 			struct mlx5_priv *priv = dev->data->dev_private;
369 			struct mlx5_shared_action_rss *shared_rss;
370 			uint32_t handle = (uint32_t)(uintptr_t)a->conf;
371 
372 			shared_rss = mlx5_ipool_get
373 				(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
374 				 MLX5_INDIRECT_ACTION_IDX_GET(handle));
375 			if (!shared_rss) {
376 				rte_flow_error_set(error, EINVAL,
377 						   RTE_FLOW_ERROR_TYPE_ACTION_CONF,
378 						   a->conf, "invalid shared RSS handle");
379 				return NULL;
380 			}
381 			rss_conf = &shared_rss->origin;
382 			break;
383 		}
384 	}
385 	if (a->type == RTE_FLOW_ACTION_TYPE_END) {
386 		rte_flow_error_set(error, 0, RTE_FLOW_ERROR_TYPE_NONE, NULL, NULL);
387 		return NULL;
388 	}
389 	return rss_conf;
390 }
391 
392 static __rte_always_inline void
mlx5_nta_rss_init_ptype_ctx(struct mlx5_nta_rss_ctx * rss_ctx,struct rte_eth_dev * dev,struct rte_flow_attr * ptype_attr,struct rte_flow_item * ptype_pattern,struct rte_flow_action * ptype_actions,const struct rte_flow_action_rss * rss_conf,struct mlx5_nta_rss_flow_head * head,struct rte_flow_error * error,uint64_t item_flags,enum mlx5_flow_type flow_type,bool external)393 mlx5_nta_rss_init_ptype_ctx(struct mlx5_nta_rss_ctx *rss_ctx,
394 			    struct rte_eth_dev *dev,
395 			    struct rte_flow_attr *ptype_attr,
396 			    struct rte_flow_item *ptype_pattern,
397 			    struct rte_flow_action *ptype_actions,
398 			    const struct rte_flow_action_rss *rss_conf,
399 			    struct mlx5_nta_rss_flow_head *head,
400 			    struct rte_flow_error *error,
401 			    uint64_t item_flags,
402 			    enum mlx5_flow_type flow_type, bool external)
403 {
404 	rss_ctx->dev = dev;
405 	rss_ctx->attr = ptype_attr;
406 	rss_ctx->pattern = ptype_pattern;
407 	rss_ctx->actions = ptype_actions;
408 	rss_ctx->rss_conf = rss_conf;
409 	rss_ctx->error = error;
410 	rss_ctx->head = head;
411 	rss_ctx->pattern_flags = item_flags;
412 	rss_ctx->flow_type = flow_type;
413 	rss_ctx->external = external;
414 }
415 
416 static struct rte_flow_hw *
flow_nta_create_single(struct rte_eth_dev * dev,const struct rte_flow_attr * attr,const struct rte_flow_item items[],const struct rte_flow_action actions[],struct rte_flow_action_rss * rss_conf,int64_t item_flags,uint64_t action_flags,bool external,bool copy_actions,enum mlx5_flow_type flow_type,struct rte_flow_error * error)417 flow_nta_create_single(struct rte_eth_dev *dev,
418 		       const struct rte_flow_attr *attr,
419 		       const struct rte_flow_item items[],
420 		       const struct rte_flow_action actions[],
421 		       struct rte_flow_action_rss *rss_conf,
422 		       int64_t item_flags, uint64_t action_flags,
423 		       bool external, bool copy_actions,
424 		       enum mlx5_flow_type flow_type,
425 		       struct rte_flow_error *error)
426 {
427 	struct rte_flow_hw *flow = NULL;
428 	struct rte_flow_action copy[MLX5_HW_MAX_ACTS];
429 	const struct rte_flow_action *_actions;
430 
431 	if (copy_actions) {
432 		int i;
433 
434 		_actions = copy;
435 		for (i = 0; ; i++) {
436 			copy[i] = actions[i];
437 			switch (actions[i].type) {
438 			case RTE_FLOW_ACTION_TYPE_RSS:
439 				copy[i].conf = rss_conf;
440 				break;
441 			case RTE_FLOW_ACTION_TYPE_INDIRECT:
442 				if (MLX5_INDIRECT_ACTION_TYPE_GET(actions[i].conf) ==
443 					MLX5_INDIRECT_ACTION_TYPE_RSS) {
444 					copy[i].type = RTE_FLOW_ACTION_TYPE_RSS;
445 					copy[i].conf = rss_conf;
446 				}
447 				break;
448 			case RTE_FLOW_ACTION_TYPE_END:
449 				goto end;
450 			default:
451 				break;
452 			}
453 		}
454 	} else {
455 		_actions = actions;
456 	}
457 end:
458 	flow_hw_create_flow(dev, flow_type, attr, items,
459 			    _actions, item_flags, action_flags,
460 			    external, &flow, error);
461 	return flow;
462 }
463 
464 /*
465  * MLX5 HW hashes IPv4 and IPv6 L3 headers and UDP, TCP, ESP L4 headers.
466  * RSS expansion is required when RSS action was configured to hash
467  * network protocol that was not mentioned in flow pattern.
468  *
469  */
470 #define MLX5_PTYPE_RSS_OUTER_MASK (RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L3_IPV6 | \
471 				  RTE_PTYPE_L4_UDP | RTE_PTYPE_L4_TCP | \
472 				  RTE_PTYPE_TUNNEL_ESP)
473 #define MLX5_PTYPE_RSS_INNER_MASK (RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L3_IPV6 | \
474 				  RTE_PTYPE_INNER_L4_TCP | RTE_PTYPE_INNER_L4_UDP)
475 
476 struct rte_flow_hw *
flow_nta_handle_rss(struct rte_eth_dev * dev,const struct rte_flow_attr * attr,const struct rte_flow_item items[],const struct rte_flow_action actions[],const struct rte_flow_action_rss * rss_conf,uint64_t item_flags,uint64_t action_flags,bool external,enum mlx5_flow_type flow_type,struct rte_flow_error * error)477 flow_nta_handle_rss(struct rte_eth_dev *dev,
478 		    const struct rte_flow_attr *attr,
479 		    const struct rte_flow_item items[],
480 		    const struct rte_flow_action actions[],
481 		    const struct rte_flow_action_rss *rss_conf,
482 		    uint64_t item_flags, uint64_t action_flags,
483 		    bool external, enum mlx5_flow_type flow_type,
484 		    struct rte_flow_error *error)
485 {
486 	struct rte_flow_hw *rss_base = NULL, *rss_next = NULL, *rss_miss = NULL;
487 	struct rte_flow_action_rss ptype_rss_conf = *rss_conf;
488 	struct mlx5_nta_rss_ctx rss_ctx;
489 	uint64_t rss_types = rte_eth_rss_hf_refine(rss_conf->types);
490 	bool expand = true;
491 	bool copy_actions = false;
492 	bool inner_rss = rss_conf->level > 1;
493 	bool outer_rss = !inner_rss;
494 	bool l3_item = (outer_rss && (item_flags & MLX5_FLOW_LAYER_OUTER_L3)) ||
495 		       (inner_rss && (item_flags & MLX5_FLOW_LAYER_INNER_L3));
496 	bool l4_item = (outer_rss && (item_flags & MLX5_FLOW_LAYER_OUTER_L4)) ||
497 		       (inner_rss && (item_flags & MLX5_FLOW_LAYER_INNER_L4));
498 	bool l3_hash = rss_types & (MLX5_IPV4_LAYER_TYPES | MLX5_IPV6_LAYER_TYPES);
499 	bool l4_hash = rss_types & (RTE_ETH_RSS_UDP | RTE_ETH_RSS_TCP | RTE_ETH_RSS_ESP);
500 	struct mlx5_nta_rss_flow_head expansion_head = SLIST_HEAD_INITIALIZER(0);
501 	struct rte_flow_attr ptype_attr = {
502 		.ingress = 1
503 	};
504 	struct rte_flow_item_ptype ptype_spec = { .packet_type = 0 };
505 	const struct rte_flow_item_ptype ptype_mask = {
506 		.packet_type = outer_rss ?
507 			MLX5_PTYPE_RSS_OUTER_MASK : MLX5_PTYPE_RSS_INNER_MASK
508 	};
509 	struct rte_flow_item ptype_pattern[MLX5_RSS_PTYPE_ITEMS_NUM] = {
510 		[MLX5_RSS_PTYPE_ITEM_INDEX] = {
511 			.type = RTE_FLOW_ITEM_TYPE_PTYPE,
512 			.spec = &ptype_spec,
513 			.mask = &ptype_mask
514 		},
515 		[MLX5_RSS_PTYPE_ITEM_INDEX + 1] = { .type = RTE_FLOW_ITEM_TYPE_END }
516 	};
517 	struct rte_flow_action ptype_actions[MLX5_RSS_PTYPE_ACTIONS_NUM] = {
518 #ifdef MLX5_RSS_PTYPE_DEBUG
519 		[MLX5_RSS_PTYPE_ACTION_INDEX - 1] = {
520 			.type = RTE_FLOW_ACTION_TYPE_MARK,
521 			.conf = &(const struct rte_flow_action_mark) {.id = 101}
522 		},
523 #endif
524 		[MLX5_RSS_PTYPE_ACTION_INDEX] = {
525 			.type = RTE_FLOW_ACTION_TYPE_RSS,
526 			.conf = &ptype_rss_conf
527 		},
528 		[MLX5_RSS_PTYPE_ACTION_INDEX + 1] = { .type = RTE_FLOW_ACTION_TYPE_END }
529 	};
530 
531 	ptype_rss_conf.types = rss_types;
532 	if (l4_item) {
533 		/*
534 		 * Original flow pattern extended up to L4 level.
535 		 * L4 is the maximal expansion level.
536 		 * Original pattern does not need expansion.
537 		 */
538 		expand = false;
539 	} else if (!l4_hash) {
540 		if (!l3_hash) {
541 			/*
542 			 * RSS action was not configured to hash L3 or L4.
543 			 * No expansion needed.
544 			 */
545 			expand = false;
546 		} else if (l3_item) {
547 			/*
548 			 * Original flow pattern extended up to L3 level.
549 			 * RSS action was not set for L4 hash.
550 			 */
551 			bool ip4_item =
552 				(outer_rss && (item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4)) ||
553 				(inner_rss && (item_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4));
554 			bool ip6_item =
555 				(outer_rss && (item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6)) ||
556 				(inner_rss && (item_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6));
557 			bool ip4_hash = rss_types & MLX5_IPV4_LAYER_TYPES;
558 			bool ip6_hash = rss_types & MLX5_IPV6_LAYER_TYPES;
559 
560 			expand = false;
561 			if (ip4_item && ip4_hash) {
562 				ptype_rss_conf.types &= ~MLX5_IPV6_LAYER_TYPES;
563 				copy_actions = true;
564 			} else if (ip6_item && ip6_hash) {
565 				/*
566 				 * MLX5 HW will not activate TIR IPv6 hash
567 				 * if that TIR has also IPv4 hash
568 				 */
569 				ptype_rss_conf.types &= ~MLX5_IPV4_LAYER_TYPES;
570 				copy_actions = true;
571 			}
572 		}
573 	}
574 	if (!expand)
575 		return flow_nta_create_single(dev, attr, items, actions,
576 					      &ptype_rss_conf, item_flags,
577 					      action_flags, external,
578 					      copy_actions, flow_type, error);
579 	/* Create RSS expansions in dedicated PTYPE flow group */
580 	ptype_attr.group = mlx5_hw_get_rss_ptype_group(dev);
581 	if (!ptype_attr.group) {
582 		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
583 				   NULL, "cannot get RSS PTYPE group");
584 		return NULL;
585 	}
586 	mlx5_nta_rss_init_ptype_ctx(&rss_ctx, dev, &ptype_attr, ptype_pattern,
587 				    ptype_actions, &ptype_rss_conf, &expansion_head,
588 				    error, item_flags, flow_type, external);
589 	rss_miss = mlx5_hw_rss_ptype_create_miss_flow(dev, &ptype_rss_conf, ptype_attr.group,
590 						      external, error);
591 	if (!rss_miss)
592 		goto error;
593 	if (l4_hash) {
594 		rss_next = mlx5_hw_rss_expand_l4(&rss_ctx);
595 		if (!rss_next)
596 			goto error;
597 	} else if (l3_hash) {
598 		rss_next = mlx5_hw_rss_expand_l3(&rss_ctx);
599 		if (!rss_next)
600 			goto error;
601 	}
602 	rss_base = mlx5_hw_rss_ptype_create_base_flow(dev, attr, items, actions,
603 						      ptype_attr.group, item_flags,
604 						      action_flags, external,
605 						      flow_type, error);
606 	if (!rss_base)
607 		goto error;
608 	SLIST_INSERT_HEAD(&expansion_head, rss_miss, nt2hws->next);
609 	SLIST_INSERT_HEAD(&expansion_head, rss_base, nt2hws->next);
610 	/**
611 	 * PMD must return to application a reference to the base flow.
612 	 * This way RSS expansion could work with counter, meter and other
613 	 * flow actions.
614 	 */
615 	MLX5_ASSERT(rss_base == SLIST_FIRST(&expansion_head));
616 	rss_next = SLIST_NEXT(rss_base, nt2hws->next);
617 	while (rss_next) {
618 		rss_next->nt2hws->chaned_flow = 1;
619 		rss_next = SLIST_NEXT(rss_next, nt2hws->next);
620 	}
621 	return SLIST_FIRST(&expansion_head);
622 
623 error:
624 	if (rss_miss)
625 		flow_hw_list_destroy(dev, flow_type, (uintptr_t)rss_miss);
626 	if (rss_next)
627 		flow_hw_list_destroy(dev, flow_type, (uintptr_t)rss_next);
628 	mlx5_hw_release_rss_ptype_group(dev, ptype_attr.group);
629 	return NULL;
630 }
631 
632 #endif
633 
634