xref: /dpdk/drivers/net/mlx5/mlx5_flow_flex.c (revision 665b49c51639a10c553433bc2bcd85c7331c631e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2021 NVIDIA Corporation & Affiliates
3  */
4 #include <rte_malloc.h>
5 #include <mlx5_devx_cmds.h>
6 #include <mlx5_malloc.h>
7 #include "mlx5.h"
8 #include "mlx5_flow.h"
9 
10 static_assert(sizeof(uint32_t) * CHAR_BIT >= MLX5_PORT_FLEX_ITEM_NUM,
11 	      "Flex item maximal number exceeds uint32_t bit width");
12 
13 /**
14  *  Routine called once on port initialization to init flex item
15  *  related infrastructure initialization
16  *
17  * @param dev
18  *   Ethernet device to perform flex item initialization
19  *
20  * @return
21  *   0 on success, a negative errno value otherwise and rte_errno is set.
22  */
23 int
24 mlx5_flex_item_port_init(struct rte_eth_dev *dev)
25 {
26 	struct mlx5_priv *priv = dev->data->dev_private;
27 
28 	rte_spinlock_init(&priv->flex_item_sl);
29 	MLX5_ASSERT(!priv->flex_item_map);
30 	return 0;
31 }
32 
33 /**
34  *  Routine called once on port close to perform flex item
35  *  related infrastructure cleanup.
36  *
37  * @param dev
38  *   Ethernet device to perform cleanup
39  */
40 void
41 mlx5_flex_item_port_cleanup(struct rte_eth_dev *dev)
42 {
43 	struct mlx5_priv *priv = dev->data->dev_private;
44 	uint32_t i;
45 
46 	for (i = 0; i < MLX5_PORT_FLEX_ITEM_NUM && priv->flex_item_map ; i++) {
47 		if (priv->flex_item_map & (1 << i)) {
48 			struct mlx5_flex_item *flex = &priv->flex_item[i];
49 
50 			claim_zero(mlx5_list_unregister
51 					(priv->sh->flex_parsers_dv,
52 					 &flex->devx_fp->entry));
53 			flex->devx_fp = NULL;
54 			flex->refcnt = 0;
55 			priv->flex_item_map &= ~(1 << i);
56 		}
57 	}
58 }
59 
60 static int
61 mlx5_flex_index(struct mlx5_priv *priv, struct mlx5_flex_item *item)
62 {
63 	uintptr_t start = (uintptr_t)&priv->flex_item[0];
64 	uintptr_t entry = (uintptr_t)item;
65 	uintptr_t idx = (entry - start) / sizeof(struct mlx5_flex_item);
66 
67 	if (entry < start ||
68 	    idx >= MLX5_PORT_FLEX_ITEM_NUM ||
69 	    (entry - start) % sizeof(struct mlx5_flex_item) ||
70 	    !(priv->flex_item_map & (1u << idx)))
71 		return -1;
72 	return (int)idx;
73 }
74 
75 static struct mlx5_flex_item *
76 mlx5_flex_alloc(struct mlx5_priv *priv)
77 {
78 	struct mlx5_flex_item *item = NULL;
79 
80 	rte_spinlock_lock(&priv->flex_item_sl);
81 	if (~priv->flex_item_map) {
82 		uint32_t idx = rte_bsf32(~priv->flex_item_map);
83 
84 		if (idx < MLX5_PORT_FLEX_ITEM_NUM) {
85 			item = &priv->flex_item[idx];
86 			MLX5_ASSERT(!item->refcnt);
87 			MLX5_ASSERT(!item->devx_fp);
88 			item->devx_fp = NULL;
89 			__atomic_store_n(&item->refcnt, 0, __ATOMIC_RELEASE);
90 			priv->flex_item_map |= 1u << idx;
91 		}
92 	}
93 	rte_spinlock_unlock(&priv->flex_item_sl);
94 	return item;
95 }
96 
97 static void
98 mlx5_flex_free(struct mlx5_priv *priv, struct mlx5_flex_item *item)
99 {
100 	int idx = mlx5_flex_index(priv, item);
101 
102 	MLX5_ASSERT(idx >= 0 &&
103 		    idx < MLX5_PORT_FLEX_ITEM_NUM &&
104 		    (priv->flex_item_map & (1u << idx)));
105 	if (idx >= 0) {
106 		rte_spinlock_lock(&priv->flex_item_sl);
107 		MLX5_ASSERT(!item->refcnt);
108 		MLX5_ASSERT(!item->devx_fp);
109 		item->devx_fp = NULL;
110 		__atomic_store_n(&item->refcnt, 0, __ATOMIC_RELEASE);
111 		priv->flex_item_map &= ~(1u << idx);
112 		rte_spinlock_unlock(&priv->flex_item_sl);
113 	}
114 }
115 
116 static uint32_t
117 mlx5_flex_get_bitfield(const struct rte_flow_item_flex *item,
118 		       uint32_t pos, uint32_t width, uint32_t shift)
119 {
120 	const uint8_t *ptr = item->pattern + pos / CHAR_BIT;
121 	uint32_t val, vbits;
122 
123 	/* Proceed the bitfield start byte. */
124 	MLX5_ASSERT(width <= sizeof(uint32_t) * CHAR_BIT && width);
125 	MLX5_ASSERT(width + shift <= sizeof(uint32_t) * CHAR_BIT);
126 	if (item->length <= pos / CHAR_BIT)
127 		return 0;
128 	val = *ptr++ >> (pos % CHAR_BIT);
129 	vbits = CHAR_BIT - pos % CHAR_BIT;
130 	pos = (pos + vbits) / CHAR_BIT;
131 	vbits = RTE_MIN(vbits, width);
132 	val &= RTE_BIT32(vbits) - 1;
133 	while (vbits < width && pos < item->length) {
134 		uint32_t part = RTE_MIN(width - vbits, (uint32_t)CHAR_BIT);
135 		uint32_t tmp = *ptr++;
136 
137 		pos++;
138 		tmp &= RTE_BIT32(part) - 1;
139 		val |= tmp << vbits;
140 		vbits += part;
141 	}
142 	return rte_bswap32(val <<= shift);
143 }
144 
145 #define SET_FP_MATCH_SAMPLE_ID(x, def, msk, val, sid) \
146 	do { \
147 		uint32_t tmp, out = (def); \
148 		tmp = MLX5_GET(fte_match_set_misc4, misc4_v, \
149 			       prog_sample_field_value_##x); \
150 		tmp = (tmp & ~out) | (val); \
151 		MLX5_SET(fte_match_set_misc4, misc4_v, \
152 			 prog_sample_field_value_##x, tmp); \
153 		tmp = MLX5_GET(fte_match_set_misc4, misc4_m, \
154 			       prog_sample_field_value_##x); \
155 		tmp = (tmp & ~out) | (msk); \
156 		MLX5_SET(fte_match_set_misc4, misc4_m, \
157 			 prog_sample_field_value_##x, tmp); \
158 		tmp = tmp ? (sid) : 0; \
159 		MLX5_SET(fte_match_set_misc4, misc4_v, \
160 			 prog_sample_field_id_##x, tmp);\
161 		MLX5_SET(fte_match_set_misc4, misc4_m, \
162 			 prog_sample_field_id_##x, tmp); \
163 	} while (0)
164 
165 __rte_always_inline static void
166 mlx5_flex_set_match_sample(void *misc4_m, void *misc4_v,
167 			   uint32_t def, uint32_t mask, uint32_t value,
168 			   uint32_t sample_id, uint32_t id)
169 {
170 	switch (id) {
171 	case 0:
172 		SET_FP_MATCH_SAMPLE_ID(0, def, mask, value, sample_id);
173 		break;
174 	case 1:
175 		SET_FP_MATCH_SAMPLE_ID(1, def, mask, value, sample_id);
176 		break;
177 	case 2:
178 		SET_FP_MATCH_SAMPLE_ID(2, def, mask, value, sample_id);
179 		break;
180 	case 3:
181 		SET_FP_MATCH_SAMPLE_ID(3, def, mask, value, sample_id);
182 		break;
183 	case 4:
184 		SET_FP_MATCH_SAMPLE_ID(4, def, mask, value, sample_id);
185 		break;
186 	case 5:
187 		SET_FP_MATCH_SAMPLE_ID(5, def, mask, value, sample_id);
188 		break;
189 	case 6:
190 		SET_FP_MATCH_SAMPLE_ID(6, def, mask, value, sample_id);
191 		break;
192 	case 7:
193 		SET_FP_MATCH_SAMPLE_ID(7, def, mask, value, sample_id);
194 		break;
195 	default:
196 		MLX5_ASSERT(false);
197 		break;
198 	}
199 #undef SET_FP_MATCH_SAMPLE_ID
200 }
201 /**
202  * Translate item pattern into matcher fields according to translation
203  * array.
204  *
205  * @param dev
206  *   Ethernet device to translate flex item on.
207  * @param[in, out] matcher
208  *   Flow matcher to configure
209  * @param[in, out] key
210  *   Flow matcher value.
211  * @param[in] item
212  *   Flow pattern to translate.
213  * @param[in] is_inner
214  *   Inner Flex Item (follows after tunnel header).
215  *
216  * @return
217  *   0 on success, a negative errno value otherwise and rte_errno is set.
218  */
219 void
220 mlx5_flex_flow_translate_item(struct rte_eth_dev *dev,
221 			      void *matcher, void *key,
222 			      const struct rte_flow_item *item,
223 			      bool is_inner)
224 {
225 	const struct rte_flow_item_flex *spec, *mask;
226 	void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher,
227 				     misc_parameters_4);
228 	void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4);
229 	struct mlx5_priv *priv = dev->data->dev_private;
230 	struct mlx5_hca_flex_attr *attr = &priv->sh->cdev->config.hca_attr.flex;
231 	struct mlx5_flex_item *tp;
232 	uint32_t i, pos = 0;
233 	uint32_t sample_id;
234 
235 	RTE_SET_USED(dev);
236 	MLX5_ASSERT(item->spec && item->mask);
237 	spec = item->spec;
238 	mask = item->mask;
239 	tp = (struct mlx5_flex_item *)spec->handle;
240 	MLX5_ASSERT(mlx5_flex_index(priv, tp) >= 0);
241 	for (i = 0; i < tp->mapnum; i++) {
242 		struct mlx5_flex_pattern_field *map = tp->map + i;
243 		uint32_t id = map->reg_id;
244 		uint32_t def = (RTE_BIT64(map->width) - 1) << map->shift;
245 		uint32_t val, msk;
246 
247 		/* Skip placeholders for DUMMY fields. */
248 		if (id == MLX5_INVALID_SAMPLE_REG_ID) {
249 			pos += map->width;
250 			continue;
251 		}
252 		val = mlx5_flex_get_bitfield(spec, pos, map->width, map->shift);
253 		msk = mlx5_flex_get_bitfield(mask, pos, map->width, map->shift);
254 		MLX5_ASSERT(map->width);
255 		MLX5_ASSERT(id < tp->devx_fp->num_samples);
256 		if (tp->tunnel_mode == FLEX_TUNNEL_MODE_MULTI && is_inner) {
257 			uint32_t num_samples = tp->devx_fp->num_samples / 2;
258 
259 			MLX5_ASSERT(tp->devx_fp->num_samples % 2 == 0);
260 			MLX5_ASSERT(id < num_samples);
261 			id += num_samples;
262 		}
263 		if (attr->ext_sample_id)
264 			sample_id = tp->devx_fp->sample_ids[id].sample_id;
265 		else
266 			sample_id = tp->devx_fp->sample_ids[id].id;
267 		mlx5_flex_set_match_sample(misc4_m, misc4_v,
268 					   def, msk & def, val & msk & def,
269 					   sample_id, id);
270 		pos += map->width;
271 	}
272 }
273 
274 /**
275  * Convert flex item handle (from the RTE flow) to flex item index on port.
276  * Optionally can increment flex item object reference count.
277  *
278  * @param dev
279  *   Ethernet device to acquire flex item on.
280  * @param[in] handle
281  *   Flow item handle from item spec.
282  * @param[in] acquire
283  *   If set - increment reference counter.
284  *
285  * @return
286  *   >=0 - index on success, a negative errno value otherwise
287  *         and rte_errno is set.
288  */
289 int
290 mlx5_flex_acquire_index(struct rte_eth_dev *dev,
291 			struct rte_flow_item_flex_handle *handle,
292 			bool acquire)
293 {
294 	struct mlx5_priv *priv = dev->data->dev_private;
295 	struct mlx5_flex_item *flex = (struct mlx5_flex_item *)handle;
296 	int ret = mlx5_flex_index(priv, flex);
297 
298 	if (ret < 0) {
299 		errno = -EINVAL;
300 		rte_errno = EINVAL;
301 		return ret;
302 	}
303 	if (acquire)
304 		__atomic_add_fetch(&flex->refcnt, 1, __ATOMIC_RELEASE);
305 	return ret;
306 }
307 
308 /**
309  * Release flex item index on port - decrements reference counter by index.
310  *
311  * @param dev
312  *   Ethernet device to acquire flex item on.
313  * @param[in] index
314  *   Flow item index.
315  *
316  * @return
317  *   0 - on success, a negative errno value otherwise and rte_errno is set.
318  */
319 int
320 mlx5_flex_release_index(struct rte_eth_dev *dev,
321 			int index)
322 {
323 	struct mlx5_priv *priv = dev->data->dev_private;
324 	struct mlx5_flex_item *flex;
325 
326 	if (index >= MLX5_PORT_FLEX_ITEM_NUM ||
327 	    !(priv->flex_item_map & (1u << index))) {
328 		errno = EINVAL;
329 		rte_errno = -EINVAL;
330 		return -EINVAL;
331 	}
332 	flex = priv->flex_item + index;
333 	if (flex->refcnt <= 1) {
334 		MLX5_ASSERT(false);
335 		errno = EINVAL;
336 		rte_errno = -EINVAL;
337 		return -EINVAL;
338 	}
339 	__atomic_sub_fetch(&flex->refcnt, 1, __ATOMIC_RELEASE);
340 	return 0;
341 }
342 
343 /*
344  * Calculate largest mask value for a given shift.
345  *
346  *   shift      mask
347  * ------- ---------------
348  *    0     b111100  0x3C
349  *    1     b111110  0x3E
350  *    2     b111111  0x3F
351  *    3     b011111  0x1F
352  *    4     b001111  0x0F
353  *    5     b000111  0x07
354  */
355 static uint8_t
356 mlx5_flex_hdr_len_mask(uint8_t shift,
357 		       const struct mlx5_hca_flex_attr *attr)
358 {
359 	uint32_t base_mask;
360 	int diff = shift - MLX5_PARSE_GRAPH_NODE_HDR_LEN_SHIFT_DWORD;
361 
362 	base_mask = mlx5_hca_parse_graph_node_base_hdr_len_mask(attr);
363 	return diff == 0 ? base_mask :
364 	       diff < 0 ? (base_mask << -diff) & base_mask : base_mask >> diff;
365 }
366 
367 static int
368 mlx5_flex_translate_length(struct mlx5_hca_flex_attr *attr,
369 			   const struct rte_flow_item_flex_conf *conf,
370 			   struct mlx5_flex_parser_devx *devx,
371 			   struct rte_flow_error *error)
372 {
373 	const struct rte_flow_item_flex_field *field = &conf->next_header;
374 	struct mlx5_devx_graph_node_attr *node = &devx->devx_conf;
375 	uint32_t len_width, mask;
376 
377 	if (field->field_base % CHAR_BIT)
378 		return rte_flow_error_set
379 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
380 			 "not byte aligned header length field");
381 	switch (field->field_mode) {
382 	case FIELD_MODE_DUMMY:
383 		return rte_flow_error_set
384 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
385 			 "invalid header length field mode (DUMMY)");
386 	case FIELD_MODE_FIXED:
387 		if (!(attr->header_length_mode &
388 		    RTE_BIT32(MLX5_GRAPH_NODE_LEN_FIXED)))
389 			return rte_flow_error_set
390 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
391 				 "unsupported header length field mode (FIXED)");
392 		if (field->field_size ||
393 		    field->offset_mask || field->offset_shift)
394 			return rte_flow_error_set
395 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
396 				 "invalid fields for fixed mode");
397 		if (field->field_base < 0)
398 			return rte_flow_error_set
399 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
400 				 "negative header length field base (FIXED)");
401 		node->header_length_mode = MLX5_GRAPH_NODE_LEN_FIXED;
402 		break;
403 	case FIELD_MODE_OFFSET:
404 		if (!(attr->header_length_mode &
405 		    RTE_BIT32(MLX5_GRAPH_NODE_LEN_FIELD)))
406 			return rte_flow_error_set
407 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
408 				 "unsupported header length field mode (OFFSET)");
409 		node->header_length_mode = MLX5_GRAPH_NODE_LEN_FIELD;
410 		if (field->offset_mask == 0 ||
411 		    !rte_is_power_of_2(field->offset_mask + 1))
412 			return rte_flow_error_set
413 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
414 				 "invalid length field offset mask (OFFSET)");
415 		len_width = rte_fls_u32(field->offset_mask);
416 		if (len_width > attr->header_length_mask_width)
417 			return rte_flow_error_set
418 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
419 				 "length field offset mask too wide (OFFSET)");
420 		mask = mlx5_flex_hdr_len_mask(field->offset_shift, attr);
421 		if (mask < field->offset_mask)
422 			return rte_flow_error_set
423 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
424 				 "length field shift too big (OFFSET)");
425 		node->header_length_field_mask = RTE_MIN(mask,
426 							 field->offset_mask);
427 		break;
428 	case FIELD_MODE_BITMASK:
429 		if (!(attr->header_length_mode &
430 		    RTE_BIT32(MLX5_GRAPH_NODE_LEN_BITMASK)))
431 			return rte_flow_error_set
432 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
433 				 "unsupported header length field mode (BITMASK)");
434 		if (attr->header_length_mask_width < field->field_size)
435 			return rte_flow_error_set
436 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
437 				 "header length field width exceeds limit");
438 		node->header_length_mode = MLX5_GRAPH_NODE_LEN_BITMASK;
439 		mask = mlx5_flex_hdr_len_mask(field->offset_shift, attr);
440 		if (mask < field->offset_mask)
441 			return rte_flow_error_set
442 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
443 				 "length field shift too big (BITMASK)");
444 		node->header_length_field_mask = RTE_MIN(mask,
445 							 field->offset_mask);
446 		break;
447 	default:
448 		return rte_flow_error_set
449 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
450 			 "unknown header length field mode");
451 	}
452 	if (field->field_base / CHAR_BIT >= 0 &&
453 	    field->field_base / CHAR_BIT > attr->max_base_header_length)
454 		return rte_flow_error_set
455 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
456 			 "header length field base exceeds limit");
457 	node->header_length_base_value = field->field_base / CHAR_BIT;
458 	if (field->field_mode == FIELD_MODE_OFFSET ||
459 	    field->field_mode == FIELD_MODE_BITMASK) {
460 		if (field->offset_shift > 15 || field->offset_shift < 0)
461 			return rte_flow_error_set
462 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
463 				 "header length field shift exceeds limit");
464 		node->header_length_field_shift	= field->offset_shift;
465 		node->header_length_field_offset = field->offset_base;
466 	}
467 	return 0;
468 }
469 
470 static int
471 mlx5_flex_translate_next(struct mlx5_hca_flex_attr *attr,
472 			 const struct rte_flow_item_flex_conf *conf,
473 			 struct mlx5_flex_parser_devx *devx,
474 			 struct rte_flow_error *error)
475 {
476 	const struct rte_flow_item_flex_field *field = &conf->next_protocol;
477 	struct mlx5_devx_graph_node_attr *node = &devx->devx_conf;
478 
479 	switch (field->field_mode) {
480 	case FIELD_MODE_DUMMY:
481 		if (conf->nb_outputs)
482 			return rte_flow_error_set
483 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
484 				 "next protocol field is required (DUMMY)");
485 		return 0;
486 	case FIELD_MODE_FIXED:
487 		break;
488 	case FIELD_MODE_OFFSET:
489 		return rte_flow_error_set
490 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
491 			 "unsupported next protocol field mode (OFFSET)");
492 		break;
493 	case FIELD_MODE_BITMASK:
494 		return rte_flow_error_set
495 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
496 			 "unsupported next protocol field mode (BITMASK)");
497 	default:
498 		return rte_flow_error_set
499 			(error, EINVAL,
500 			 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
501 			 "unknown next protocol field mode");
502 	}
503 	MLX5_ASSERT(field->field_mode == FIELD_MODE_FIXED);
504 	if (!conf->nb_outputs)
505 		return rte_flow_error_set
506 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
507 			 "out link(s) is required if next field present");
508 	if (attr->max_next_header_offset < field->field_base)
509 		return rte_flow_error_set
510 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
511 			 "next protocol field base exceeds limit");
512 	if (field->offset_shift)
513 		return rte_flow_error_set
514 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
515 			 "unsupported next protocol field shift");
516 	node->next_header_field_offset = field->field_base;
517 	node->next_header_field_size = field->field_size;
518 	return 0;
519 }
520 
521 /* Helper structure to handle field bit intervals. */
522 struct mlx5_flex_field_cover {
523 	uint16_t num;
524 	int32_t start[MLX5_FLEX_ITEM_MAPPING_NUM];
525 	int32_t end[MLX5_FLEX_ITEM_MAPPING_NUM];
526 	uint8_t mapped[MLX5_FLEX_ITEM_MAPPING_NUM / CHAR_BIT + 1];
527 };
528 
529 static void
530 mlx5_flex_insert_field(struct mlx5_flex_field_cover *cover,
531 		       uint16_t num, int32_t start, int32_t end)
532 {
533 	MLX5_ASSERT(num < MLX5_FLEX_ITEM_MAPPING_NUM);
534 	MLX5_ASSERT(num <= cover->num);
535 	if (num < cover->num) {
536 		memmove(&cover->start[num + 1],	&cover->start[num],
537 			(cover->num - num) * sizeof(int32_t));
538 		memmove(&cover->end[num + 1],	&cover->end[num],
539 			(cover->num - num) * sizeof(int32_t));
540 	}
541 	cover->start[num] = start;
542 	cover->end[num] = end;
543 	cover->num++;
544 }
545 
546 static void
547 mlx5_flex_merge_field(struct mlx5_flex_field_cover *cover, uint16_t num)
548 {
549 	uint32_t i, del = 0;
550 	int32_t end;
551 
552 	MLX5_ASSERT(num < MLX5_FLEX_ITEM_MAPPING_NUM);
553 	MLX5_ASSERT(num < (cover->num - 1));
554 	end = cover->end[num];
555 	for (i = num + 1; i < cover->num; i++) {
556 		if (end < cover->start[i])
557 			break;
558 		del++;
559 		if (end <= cover->end[i]) {
560 			cover->end[num] = cover->end[i];
561 			break;
562 		}
563 	}
564 	if (del) {
565 		MLX5_ASSERT(del < (cover->num - 1u - num));
566 		cover->num -= del;
567 		MLX5_ASSERT(cover->num > num);
568 		if ((cover->num - num) > 1) {
569 			memmove(&cover->start[num + 1],
570 				&cover->start[num + 1 + del],
571 				(cover->num - num - 1) * sizeof(int32_t));
572 			memmove(&cover->end[num + 1],
573 				&cover->end[num + 1 + del],
574 				(cover->num - num - 1) * sizeof(int32_t));
575 		}
576 	}
577 }
578 
579 /*
580  * Validate the sample field and update interval array
581  * if parameters match with the 'match" field.
582  * Returns:
583  *    < 0  - error
584  *    == 0 - no match, interval array not updated
585  *    > 0  - match, interval array updated
586  */
587 static int
588 mlx5_flex_cover_sample(struct mlx5_flex_field_cover *cover,
589 		       struct rte_flow_item_flex_field *field,
590 		       struct rte_flow_item_flex_field *match,
591 		       struct mlx5_hca_flex_attr *attr,
592 		       struct rte_flow_error *error)
593 {
594 	int32_t start, end;
595 	uint32_t i;
596 
597 	switch (field->field_mode) {
598 	case FIELD_MODE_DUMMY:
599 		return 0;
600 	case FIELD_MODE_FIXED:
601 		if (!(attr->sample_offset_mode &
602 		    RTE_BIT32(MLX5_GRAPH_SAMPLE_OFFSET_FIXED)))
603 			return rte_flow_error_set
604 				(error, EINVAL,
605 				 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
606 				 "unsupported sample field mode (FIXED)");
607 		if (field->offset_shift)
608 			return rte_flow_error_set
609 				(error, EINVAL,
610 				 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
611 				 "invalid sample field shift (FIXED");
612 		if (field->field_base < 0)
613 			return rte_flow_error_set
614 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
615 				 "invalid sample field base (FIXED)");
616 		if (field->field_base / CHAR_BIT > attr->max_sample_base_offset)
617 			return rte_flow_error_set
618 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
619 				 "sample field base exceeds limit (FIXED)");
620 		break;
621 	case FIELD_MODE_OFFSET:
622 		if (!(attr->sample_offset_mode &
623 		    RTE_BIT32(MLX5_GRAPH_SAMPLE_OFFSET_FIELD)))
624 			return rte_flow_error_set
625 				(error, EINVAL,
626 				 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
627 				 "unsupported sample field mode (OFFSET)");
628 		if (field->field_base / CHAR_BIT >= 0 &&
629 		    field->field_base / CHAR_BIT > attr->max_sample_base_offset)
630 			return rte_flow_error_set
631 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
632 				"sample field base exceeds limit");
633 		break;
634 	case FIELD_MODE_BITMASK:
635 		if (!(attr->sample_offset_mode &
636 		    RTE_BIT32(MLX5_GRAPH_SAMPLE_OFFSET_BITMASK)))
637 			return rte_flow_error_set
638 				(error, EINVAL,
639 				 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
640 				 "unsupported sample field mode (BITMASK)");
641 		if (field->field_base / CHAR_BIT >= 0 &&
642 		    field->field_base / CHAR_BIT > attr->max_sample_base_offset)
643 			return rte_flow_error_set
644 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
645 				"sample field base exceeds limit");
646 		break;
647 	default:
648 		return rte_flow_error_set
649 			(error, EINVAL,
650 			 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
651 			 "unknown data sample field mode");
652 	}
653 	if (!match) {
654 		if (!field->field_size)
655 			return rte_flow_error_set
656 				(error, EINVAL,
657 				RTE_FLOW_ERROR_TYPE_ITEM, NULL,
658 				"zero sample field width");
659 		if (field->field_id)
660 			DRV_LOG(DEBUG, "sample field id hint ignored");
661 	} else {
662 		if (field->field_mode != match->field_mode ||
663 		    field->offset_base | match->offset_base ||
664 		    field->offset_mask | match->offset_mask ||
665 		    field->offset_shift | match->offset_shift)
666 			return 0;
667 	}
668 	start = field->field_base;
669 	end = start + field->field_size;
670 	/* Add the new or similar field to interval array. */
671 	if (!cover->num) {
672 		cover->start[cover->num] = start;
673 		cover->end[cover->num] = end;
674 		cover->num = 1;
675 		return 1;
676 	}
677 	for (i = 0; i < cover->num; i++) {
678 		if (start > cover->end[i]) {
679 			if (i >= (cover->num - 1u)) {
680 				mlx5_flex_insert_field(cover, cover->num,
681 						       start, end);
682 				break;
683 			}
684 			continue;
685 		}
686 		if (end < cover->start[i]) {
687 			mlx5_flex_insert_field(cover, i, start, end);
688 			break;
689 		}
690 		if (start < cover->start[i])
691 			cover->start[i] = start;
692 		if (end > cover->end[i]) {
693 			cover->end[i] = end;
694 			if (i < (cover->num - 1u))
695 				mlx5_flex_merge_field(cover, i);
696 		}
697 		break;
698 	}
699 	return 1;
700 }
701 
702 static void
703 mlx5_flex_config_sample(struct mlx5_devx_match_sample_attr *na,
704 			struct rte_flow_item_flex_field *field,
705 			enum rte_flow_item_flex_tunnel_mode tunnel_mode)
706 {
707 	memset(na, 0, sizeof(struct mlx5_devx_match_sample_attr));
708 	na->flow_match_sample_en = 1;
709 	switch (field->field_mode) {
710 	case FIELD_MODE_FIXED:
711 		na->flow_match_sample_offset_mode =
712 			MLX5_GRAPH_SAMPLE_OFFSET_FIXED;
713 		break;
714 	case FIELD_MODE_OFFSET:
715 		na->flow_match_sample_offset_mode =
716 			MLX5_GRAPH_SAMPLE_OFFSET_FIELD;
717 		na->flow_match_sample_field_offset = field->offset_base;
718 		na->flow_match_sample_field_offset_mask = field->offset_mask;
719 		na->flow_match_sample_field_offset_shift = field->offset_shift;
720 		break;
721 	case FIELD_MODE_BITMASK:
722 		na->flow_match_sample_offset_mode =
723 			MLX5_GRAPH_SAMPLE_OFFSET_BITMASK;
724 		na->flow_match_sample_field_offset = field->offset_base;
725 		na->flow_match_sample_field_offset_mask = field->offset_mask;
726 		na->flow_match_sample_field_offset_shift = field->offset_shift;
727 		break;
728 	default:
729 		MLX5_ASSERT(false);
730 		break;
731 	}
732 	switch (tunnel_mode) {
733 	case FLEX_TUNNEL_MODE_SINGLE:
734 		/* Fallthrough */
735 	case FLEX_TUNNEL_MODE_TUNNEL:
736 		na->flow_match_sample_tunnel_mode =
737 			MLX5_GRAPH_SAMPLE_TUNNEL_FIRST;
738 		break;
739 	case FLEX_TUNNEL_MODE_MULTI:
740 		/* Fallthrough */
741 	case FLEX_TUNNEL_MODE_OUTER:
742 		na->flow_match_sample_tunnel_mode =
743 			MLX5_GRAPH_SAMPLE_TUNNEL_OUTER;
744 		break;
745 	case FLEX_TUNNEL_MODE_INNER:
746 		na->flow_match_sample_tunnel_mode =
747 			MLX5_GRAPH_SAMPLE_TUNNEL_INNER;
748 		break;
749 	default:
750 		MLX5_ASSERT(false);
751 		break;
752 	}
753 }
754 
755 /* Map specified field to set/subset of allocated sample registers. */
756 static int
757 mlx5_flex_map_sample(struct rte_flow_item_flex_field *field,
758 		     struct mlx5_flex_parser_devx *parser,
759 		     struct mlx5_flex_item *item,
760 		     struct rte_flow_error *error)
761 {
762 	struct mlx5_devx_match_sample_attr node;
763 	int32_t start = field->field_base;
764 	int32_t end = start + field->field_size;
765 	struct mlx5_flex_pattern_field *trans;
766 	uint32_t i, done_bits = 0;
767 
768 	if (field->field_mode == FIELD_MODE_DUMMY) {
769 		done_bits = field->field_size;
770 		while (done_bits) {
771 			uint32_t part = RTE_MIN(done_bits,
772 						sizeof(uint32_t) * CHAR_BIT);
773 			if (item->mapnum >= MLX5_FLEX_ITEM_MAPPING_NUM)
774 				return rte_flow_error_set
775 					(error,
776 					 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
777 					 "too many flex item pattern translations");
778 			trans = &item->map[item->mapnum];
779 			trans->reg_id = MLX5_INVALID_SAMPLE_REG_ID;
780 			trans->shift = 0;
781 			trans->width = part;
782 			item->mapnum++;
783 			done_bits -= part;
784 		}
785 		return 0;
786 	}
787 	mlx5_flex_config_sample(&node, field, item->tunnel_mode);
788 	for (i = 0; i < parser->num_samples; i++) {
789 		struct mlx5_devx_match_sample_attr *sample =
790 			&parser->devx_conf.sample[i];
791 		int32_t reg_start, reg_end;
792 		int32_t cov_start, cov_end;
793 
794 		MLX5_ASSERT(sample->flow_match_sample_en);
795 		if (!sample->flow_match_sample_en)
796 			break;
797 		node.flow_match_sample_field_base_offset =
798 			sample->flow_match_sample_field_base_offset;
799 		if (memcmp(&node, sample, sizeof(node)))
800 			continue;
801 		reg_start = (int8_t)sample->flow_match_sample_field_base_offset;
802 		reg_start *= CHAR_BIT;
803 		reg_end = reg_start + 32;
804 		if (end <= reg_start || start >= reg_end)
805 			continue;
806 		cov_start = RTE_MAX(reg_start, start);
807 		cov_end = RTE_MIN(reg_end, end);
808 		MLX5_ASSERT(cov_end > cov_start);
809 		done_bits += cov_end - cov_start;
810 		if (item->mapnum >= MLX5_FLEX_ITEM_MAPPING_NUM)
811 			return rte_flow_error_set
812 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
813 				 "too many flex item pattern translations");
814 		trans = &item->map[item->mapnum];
815 		item->mapnum++;
816 		trans->reg_id = i;
817 		trans->shift = cov_start - reg_start;
818 		trans->width = cov_end - cov_start;
819 	}
820 	if (done_bits != field->field_size) {
821 		MLX5_ASSERT(false);
822 		return rte_flow_error_set
823 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
824 			 "failed to map field to sample register");
825 	}
826 	return 0;
827 }
828 
829 /* Allocate sample registers for the specified field type and interval array. */
830 static int
831 mlx5_flex_alloc_sample(struct mlx5_flex_field_cover *cover,
832 		       struct mlx5_flex_parser_devx *parser,
833 		       struct mlx5_flex_item *item,
834 		       struct rte_flow_item_flex_field *field,
835 		       struct mlx5_hca_flex_attr *attr,
836 		       struct rte_flow_error *error)
837 {
838 	struct mlx5_devx_match_sample_attr node;
839 	uint32_t idx = 0;
840 
841 	mlx5_flex_config_sample(&node, field, item->tunnel_mode);
842 	while (idx < cover->num) {
843 		int32_t start, end;
844 
845 		/*
846 		 * Sample base offsets are in bytes, should be aligned
847 		 * to 32-bit as required by firmware for samples.
848 		 */
849 		start = RTE_ALIGN_FLOOR(cover->start[idx],
850 					sizeof(uint32_t) * CHAR_BIT);
851 		node.flow_match_sample_field_base_offset =
852 						(start / CHAR_BIT) & 0xFF;
853 		/* Allocate sample register. */
854 		if (parser->num_samples >= MLX5_GRAPH_NODE_SAMPLE_NUM ||
855 		    parser->num_samples >= attr->max_num_sample ||
856 		    parser->num_samples >= attr->max_num_prog_sample)
857 			return rte_flow_error_set
858 				(error, EINVAL,
859 				 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
860 				 "no sample registers to handle all flex item fields");
861 		parser->devx_conf.sample[parser->num_samples] = node;
862 		parser->num_samples++;
863 		/* Remove or update covered intervals. */
864 		end = start + 32;
865 		while (idx < cover->num) {
866 			if (end >= cover->end[idx]) {
867 				idx++;
868 				continue;
869 			}
870 			if (end > cover->start[idx])
871 				cover->start[idx] = end;
872 			break;
873 		}
874 	}
875 	return 0;
876 }
877 
878 static int
879 mlx5_flex_translate_sample(struct mlx5_hca_flex_attr *attr,
880 			   const struct rte_flow_item_flex_conf *conf,
881 			   struct mlx5_flex_parser_devx *parser,
882 			   struct mlx5_flex_item *item,
883 			   struct rte_flow_error *error)
884 {
885 	struct mlx5_flex_field_cover cover;
886 	uint32_t i, j;
887 	int ret;
888 
889 	switch (conf->tunnel) {
890 	case FLEX_TUNNEL_MODE_SINGLE:
891 		/* Fallthrough */
892 	case FLEX_TUNNEL_MODE_OUTER:
893 		/* Fallthrough */
894 	case FLEX_TUNNEL_MODE_INNER:
895 		/* Fallthrough */
896 	case FLEX_TUNNEL_MODE_MULTI:
897 		/* Fallthrough */
898 	case FLEX_TUNNEL_MODE_TUNNEL:
899 		break;
900 	default:
901 		return rte_flow_error_set
902 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
903 			 "unrecognized tunnel mode");
904 	}
905 	item->tunnel_mode = conf->tunnel;
906 	if (conf->nb_samples > MLX5_FLEX_ITEM_MAPPING_NUM)
907 		return rte_flow_error_set
908 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
909 			 "sample field number exceeds limit");
910 	/*
911 	 * The application can specify fields smaller or bigger than 32 bits
912 	 * covered with single sample register and it can specify field
913 	 * offsets in any order.
914 	 *
915 	 * Gather all similar fields together, build array of bit intervals
916 	 * in ascending order and try to cover with the smallest set of sample
917 	 * registers.
918 	 */
919 	memset(&cover, 0, sizeof(cover));
920 	for (i = 0; i < conf->nb_samples; i++) {
921 		struct rte_flow_item_flex_field *fl = conf->sample_data + i;
922 
923 		/* Check whether field was covered in the previous iteration. */
924 		if (cover.mapped[i / CHAR_BIT] & (1u << (i % CHAR_BIT)))
925 			continue;
926 		if (fl->field_mode == FIELD_MODE_DUMMY)
927 			continue;
928 		/* Build an interval array for the field and similar ones */
929 		cover.num = 0;
930 		/* Add the first field to array unconditionally. */
931 		ret = mlx5_flex_cover_sample(&cover, fl, NULL, attr, error);
932 		if (ret < 0)
933 			return ret;
934 		MLX5_ASSERT(ret > 0);
935 		cover.mapped[i / CHAR_BIT] |= 1u << (i % CHAR_BIT);
936 		for (j = i + 1; j < conf->nb_samples; j++) {
937 			struct rte_flow_item_flex_field *ft;
938 
939 			/* Add field to array if its type matches. */
940 			ft = conf->sample_data + j;
941 			ret = mlx5_flex_cover_sample(&cover, ft, fl,
942 						     attr, error);
943 			if (ret < 0)
944 				return ret;
945 			if (!ret)
946 				continue;
947 			cover.mapped[j / CHAR_BIT] |= 1u << (j % CHAR_BIT);
948 		}
949 		/* Allocate sample registers to cover array of intervals. */
950 		ret = mlx5_flex_alloc_sample(&cover, parser, item,
951 					     fl, attr, error);
952 		if (ret)
953 			return ret;
954 	}
955 	/* Build the item pattern translating data on flow creation. */
956 	item->mapnum = 0;
957 	memset(&item->map, 0, sizeof(item->map));
958 	for (i = 0; i < conf->nb_samples; i++) {
959 		struct rte_flow_item_flex_field *fl = conf->sample_data + i;
960 
961 		ret = mlx5_flex_map_sample(fl, parser, item, error);
962 		if (ret) {
963 			MLX5_ASSERT(false);
964 			return ret;
965 		}
966 	}
967 	if (conf->tunnel == FLEX_TUNNEL_MODE_MULTI) {
968 		/*
969 		 * In FLEX_TUNNEL_MODE_MULTI tunnel mode PMD creates 2 sets
970 		 * of samples. The first set is for outer and the second set
971 		 * for inner flex flow item. Outer and inner samples differ
972 		 * only in tunnel_mode.
973 		 */
974 		if (parser->num_samples > MLX5_GRAPH_NODE_SAMPLE_NUM / 2)
975 			return rte_flow_error_set
976 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
977 				 "no sample registers for inner");
978 		rte_memcpy(parser->devx_conf.sample + parser->num_samples,
979 			   parser->devx_conf.sample,
980 			   parser->num_samples *
981 					sizeof(parser->devx_conf.sample[0]));
982 		for (i = 0; i < parser->num_samples; i++) {
983 			struct mlx5_devx_match_sample_attr *sm = i +
984 				parser->devx_conf.sample + parser->num_samples;
985 
986 			sm->flow_match_sample_tunnel_mode =
987 						MLX5_GRAPH_SAMPLE_TUNNEL_INNER;
988 		}
989 		parser->num_samples *= 2;
990 	}
991 	return 0;
992 }
993 
994 static int
995 mlx5_flex_arc_type(enum rte_flow_item_type type, int in)
996 {
997 	switch (type) {
998 	case RTE_FLOW_ITEM_TYPE_ETH:
999 		return  MLX5_GRAPH_ARC_NODE_MAC;
1000 	case RTE_FLOW_ITEM_TYPE_IPV4:
1001 		return in ? MLX5_GRAPH_ARC_NODE_IP : MLX5_GRAPH_ARC_NODE_IPV4;
1002 	case RTE_FLOW_ITEM_TYPE_IPV6:
1003 		return in ? MLX5_GRAPH_ARC_NODE_IP : MLX5_GRAPH_ARC_NODE_IPV6;
1004 	case RTE_FLOW_ITEM_TYPE_UDP:
1005 		return MLX5_GRAPH_ARC_NODE_UDP;
1006 	case RTE_FLOW_ITEM_TYPE_TCP:
1007 		return MLX5_GRAPH_ARC_NODE_TCP;
1008 	case RTE_FLOW_ITEM_TYPE_MPLS:
1009 		return MLX5_GRAPH_ARC_NODE_MPLS;
1010 	case RTE_FLOW_ITEM_TYPE_GRE:
1011 		return MLX5_GRAPH_ARC_NODE_GRE;
1012 	case RTE_FLOW_ITEM_TYPE_GENEVE:
1013 		return MLX5_GRAPH_ARC_NODE_GENEVE;
1014 	case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1015 		return MLX5_GRAPH_ARC_NODE_VXLAN_GPE;
1016 	default:
1017 		return -EINVAL;
1018 	}
1019 }
1020 
1021 static int
1022 mlx5_flex_arc_in_eth(const struct rte_flow_item *item,
1023 		     struct rte_flow_error *error)
1024 {
1025 	const struct rte_flow_item_eth *spec = item->spec;
1026 	const struct rte_flow_item_eth *mask = item->mask;
1027 	struct rte_flow_item_eth eth = { .hdr.ether_type = RTE_BE16(0xFFFF) };
1028 
1029 	if (memcmp(mask, &eth, sizeof(struct rte_flow_item_eth))) {
1030 		return rte_flow_error_set
1031 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,
1032 			 "invalid eth item mask");
1033 	}
1034 	return rte_be_to_cpu_16(spec->hdr.ether_type);
1035 }
1036 
1037 static int
1038 mlx5_flex_arc_in_udp(const struct rte_flow_item *item,
1039 		     struct rte_flow_error *error)
1040 {
1041 	const struct rte_flow_item_udp *spec = item->spec;
1042 	const struct rte_flow_item_udp *mask = item->mask;
1043 	struct rte_flow_item_udp udp = { .hdr.dst_port = RTE_BE16(0xFFFF) };
1044 
1045 	if (memcmp(mask, &udp, sizeof(struct rte_flow_item_udp))) {
1046 		return rte_flow_error_set
1047 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,
1048 			 "invalid eth item mask");
1049 	}
1050 	return rte_be_to_cpu_16(spec->hdr.dst_port);
1051 }
1052 
1053 static int
1054 mlx5_flex_translate_arc_in(struct mlx5_hca_flex_attr *attr,
1055 			   const struct rte_flow_item_flex_conf *conf,
1056 			   struct mlx5_flex_parser_devx *devx,
1057 			   struct mlx5_flex_item *item,
1058 			   struct rte_flow_error *error)
1059 {
1060 	struct mlx5_devx_graph_node_attr *node = &devx->devx_conf;
1061 	uint32_t i;
1062 
1063 	RTE_SET_USED(item);
1064 	if (conf->nb_inputs > attr->max_num_arc_in)
1065 		return rte_flow_error_set
1066 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1067 			 "too many input links");
1068 	for (i = 0; i < conf->nb_inputs; i++) {
1069 		struct mlx5_devx_graph_arc_attr *arc = node->in + i;
1070 		struct rte_flow_item_flex_link *link = conf->input_link + i;
1071 		const struct rte_flow_item *rte_item = &link->item;
1072 		int arc_type;
1073 		int ret;
1074 
1075 		if (!rte_item->spec || !rte_item->mask || rte_item->last)
1076 			return rte_flow_error_set
1077 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1078 				 "invalid flex item IN arc format");
1079 		arc_type = mlx5_flex_arc_type(rte_item->type, true);
1080 		if (arc_type < 0 || !(attr->node_in & RTE_BIT32(arc_type)))
1081 			return rte_flow_error_set
1082 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1083 				 "unsupported flex item IN arc type");
1084 		arc->arc_parse_graph_node = arc_type;
1085 		arc->start_inner_tunnel = 0;
1086 		/*
1087 		 * Configure arc IN condition value. The value location depends
1088 		 * on protocol. Current FW version supports IP & UDP for IN
1089 		 * arcs only, and locations for these protocols are defined.
1090 		 * Add more protocols when available.
1091 		 */
1092 		switch (rte_item->type) {
1093 		case RTE_FLOW_ITEM_TYPE_ETH:
1094 			ret = mlx5_flex_arc_in_eth(rte_item, error);
1095 			break;
1096 		case RTE_FLOW_ITEM_TYPE_UDP:
1097 			ret = mlx5_flex_arc_in_udp(rte_item, error);
1098 			break;
1099 		default:
1100 			MLX5_ASSERT(false);
1101 			return rte_flow_error_set
1102 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1103 				 "unsupported flex item IN arc type");
1104 		}
1105 		if (ret < 0)
1106 			return ret;
1107 		arc->compare_condition_value = (uint16_t)ret;
1108 	}
1109 	return 0;
1110 }
1111 
1112 static int
1113 mlx5_flex_translate_arc_out(struct mlx5_hca_flex_attr *attr,
1114 			    const struct rte_flow_item_flex_conf *conf,
1115 			    struct mlx5_flex_parser_devx *devx,
1116 			    struct mlx5_flex_item *item,
1117 			    struct rte_flow_error *error)
1118 {
1119 	struct mlx5_devx_graph_node_attr *node = &devx->devx_conf;
1120 	bool is_tunnel = conf->tunnel == FLEX_TUNNEL_MODE_TUNNEL;
1121 	uint32_t i;
1122 
1123 	RTE_SET_USED(item);
1124 	if (conf->nb_outputs > attr->max_num_arc_out)
1125 		return rte_flow_error_set
1126 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1127 			 "too many output links");
1128 	for (i = 0; i < conf->nb_outputs; i++) {
1129 		struct mlx5_devx_graph_arc_attr *arc = node->out + i;
1130 		struct rte_flow_item_flex_link *link = conf->output_link + i;
1131 		const struct rte_flow_item *rte_item = &link->item;
1132 		int arc_type;
1133 
1134 		if (rte_item->spec || rte_item->mask || rte_item->last)
1135 			return rte_flow_error_set
1136 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1137 				 "flex node: invalid OUT arc format");
1138 		arc_type = mlx5_flex_arc_type(rte_item->type, false);
1139 		if (arc_type < 0 || !(attr->node_out & RTE_BIT32(arc_type)))
1140 			return rte_flow_error_set
1141 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1142 				 "unsupported flex item OUT arc type");
1143 		arc->arc_parse_graph_node = arc_type;
1144 		arc->start_inner_tunnel = !!is_tunnel;
1145 		arc->compare_condition_value = link->next;
1146 	}
1147 	return 0;
1148 }
1149 
1150 /* Translate RTE flex item API configuration into flaex parser settings. */
1151 static int
1152 mlx5_flex_translate_conf(struct rte_eth_dev *dev,
1153 			 const struct rte_flow_item_flex_conf *conf,
1154 			 struct mlx5_flex_parser_devx *devx,
1155 			 struct mlx5_flex_item *item,
1156 			 struct rte_flow_error *error)
1157 {
1158 	struct mlx5_priv *priv = dev->data->dev_private;
1159 	struct mlx5_hca_flex_attr *attr = &priv->sh->cdev->config.hca_attr.flex;
1160 	int ret;
1161 
1162 	ret = mlx5_flex_translate_length(attr, conf, devx, error);
1163 	if (ret)
1164 		return ret;
1165 	ret = mlx5_flex_translate_next(attr, conf, devx, error);
1166 	if (ret)
1167 		return ret;
1168 	ret = mlx5_flex_translate_sample(attr, conf, devx, item, error);
1169 	if (ret)
1170 		return ret;
1171 	ret = mlx5_flex_translate_arc_in(attr, conf, devx, item, error);
1172 	if (ret)
1173 		return ret;
1174 	ret = mlx5_flex_translate_arc_out(attr, conf, devx, item, error);
1175 	if (ret)
1176 		return ret;
1177 	return 0;
1178 }
1179 
1180 /**
1181  * Create the flex item with specified configuration over the Ethernet device.
1182  *
1183  * @param dev
1184  *   Ethernet device to create flex item on.
1185  * @param[in] conf
1186  *   Flex item configuration.
1187  * @param[out] error
1188  *   Perform verbose error reporting if not NULL. PMDs initialize this
1189  *   structure in case of error only.
1190  *
1191  * @return
1192  *   Non-NULL opaque pointer on success, NULL otherwise and rte_errno is set.
1193  */
1194 struct rte_flow_item_flex_handle *
1195 flow_dv_item_create(struct rte_eth_dev *dev,
1196 		    const struct rte_flow_item_flex_conf *conf,
1197 		    struct rte_flow_error *error)
1198 {
1199 	struct mlx5_priv *priv = dev->data->dev_private;
1200 	struct mlx5_flex_parser_devx devx_config = { .devx_obj = NULL };
1201 	struct mlx5_flex_item *flex;
1202 	struct mlx5_list_entry *ent;
1203 
1204 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
1205 	flex = mlx5_flex_alloc(priv);
1206 	if (!flex) {
1207 		rte_flow_error_set(error, ENOMEM,
1208 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1209 				   "too many flex items created on the port");
1210 		return NULL;
1211 	}
1212 	if (mlx5_flex_translate_conf(dev, conf, &devx_config, flex, error))
1213 		goto error;
1214 	ent = mlx5_list_register(priv->sh->flex_parsers_dv, &devx_config);
1215 	if (!ent) {
1216 		rte_flow_error_set(error, ENOMEM,
1217 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1218 				   "flex item creation failure");
1219 		goto error;
1220 	}
1221 	flex->devx_fp = container_of(ent, struct mlx5_flex_parser_devx, entry);
1222 	/* Mark initialized flex item valid. */
1223 	__atomic_add_fetch(&flex->refcnt, 1, __ATOMIC_RELEASE);
1224 	return (struct rte_flow_item_flex_handle *)flex;
1225 
1226 error:
1227 	mlx5_flex_free(priv, flex);
1228 	return NULL;
1229 }
1230 
1231 /**
1232  * Release the flex item on the specified Ethernet device.
1233  *
1234  * @param dev
1235  *   Ethernet device to destroy flex item on.
1236  * @param[in] handle
1237  *   Handle of the item existing on the specified device.
1238  * @param[out] error
1239  *   Perform verbose error reporting if not NULL. PMDs initialize this
1240  *   structure in case of error only.
1241  *
1242  * @return
1243  *   0 on success, a negative errno value otherwise and rte_errno is set.
1244  */
1245 int
1246 flow_dv_item_release(struct rte_eth_dev *dev,
1247 		     const struct rte_flow_item_flex_handle *handle,
1248 		     struct rte_flow_error *error)
1249 {
1250 	struct mlx5_priv *priv = dev->data->dev_private;
1251 	struct mlx5_flex_item *flex =
1252 		(struct mlx5_flex_item *)(uintptr_t)handle;
1253 	uint32_t old_refcnt = 1;
1254 	int rc;
1255 
1256 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
1257 	rte_spinlock_lock(&priv->flex_item_sl);
1258 	if (mlx5_flex_index(priv, flex) < 0) {
1259 		rte_spinlock_unlock(&priv->flex_item_sl);
1260 		return rte_flow_error_set(error, EINVAL,
1261 					  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1262 					  "invalid flex item handle value");
1263 	}
1264 	if (!__atomic_compare_exchange_n(&flex->refcnt, &old_refcnt, 0, 0,
1265 					 __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
1266 		rte_spinlock_unlock(&priv->flex_item_sl);
1267 		return rte_flow_error_set(error, EBUSY,
1268 					  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1269 					  "flex item has flow references");
1270 	}
1271 	/* Flex item is marked as invalid, we can leave locked section. */
1272 	rte_spinlock_unlock(&priv->flex_item_sl);
1273 	MLX5_ASSERT(flex->devx_fp);
1274 	rc = mlx5_list_unregister(priv->sh->flex_parsers_dv,
1275 				  &flex->devx_fp->entry);
1276 	flex->devx_fp = NULL;
1277 	mlx5_flex_free(priv, flex);
1278 	if (rc < 0)
1279 		return rte_flow_error_set(error, EBUSY,
1280 					  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1281 					  "flex item release failure");
1282 	return 0;
1283 }
1284 
1285 /* DevX flex parser list callbacks. */
1286 struct mlx5_list_entry *
1287 mlx5_flex_parser_create_cb(void *list_ctx, void *ctx)
1288 {
1289 	struct mlx5_dev_ctx_shared *sh = list_ctx;
1290 	struct mlx5_flex_parser_devx *fp, *conf = ctx;
1291 	int ret;
1292 
1293 	fp = mlx5_malloc(MLX5_MEM_ZERO,	sizeof(struct mlx5_flex_parser_devx),
1294 			 0, SOCKET_ID_ANY);
1295 	if (!fp)
1296 		return NULL;
1297 	/* Copy the requested configurations. */
1298 	fp->num_samples = conf->num_samples;
1299 	memcpy(&fp->devx_conf, &conf->devx_conf, sizeof(fp->devx_conf));
1300 	/* Create DevX flex parser. */
1301 	fp->devx_obj = mlx5_devx_cmd_create_flex_parser(sh->cdev->ctx,
1302 							&fp->devx_conf);
1303 	if (!fp->devx_obj)
1304 		goto error;
1305 	/* Query the firmware assigned sample ids. */
1306 	ret = mlx5_devx_cmd_query_parse_samples(fp->devx_obj,
1307 						fp->sample_ids,
1308 						fp->num_samples,
1309 						&fp->anchor_id);
1310 	if (ret)
1311 		goto error;
1312 	DRV_LOG(DEBUG, "DEVx flex parser %p created, samples num: %u",
1313 		(const void *)fp, fp->num_samples);
1314 	return &fp->entry;
1315 error:
1316 	if (fp->devx_obj)
1317 		mlx5_devx_cmd_destroy((void *)(uintptr_t)fp->devx_obj);
1318 	if (fp)
1319 		mlx5_free(fp);
1320 	return NULL;
1321 }
1322 
1323 int
1324 mlx5_flex_parser_match_cb(void *list_ctx,
1325 			  struct mlx5_list_entry *iter, void *ctx)
1326 {
1327 	struct mlx5_flex_parser_devx *fp =
1328 		container_of(iter, struct mlx5_flex_parser_devx, entry);
1329 	struct mlx5_flex_parser_devx *org =
1330 		container_of(ctx, struct mlx5_flex_parser_devx, entry);
1331 
1332 	RTE_SET_USED(list_ctx);
1333 	return !iter || !ctx || memcmp(&fp->devx_conf,
1334 				       &org->devx_conf,
1335 				       sizeof(fp->devx_conf));
1336 }
1337 
1338 void
1339 mlx5_flex_parser_remove_cb(void *list_ctx, struct mlx5_list_entry *entry)
1340 {
1341 	struct mlx5_flex_parser_devx *fp =
1342 		container_of(entry, struct mlx5_flex_parser_devx, entry);
1343 
1344 	RTE_SET_USED(list_ctx);
1345 	MLX5_ASSERT(fp->devx_obj);
1346 	claim_zero(mlx5_devx_cmd_destroy(fp->devx_obj));
1347 	DRV_LOG(DEBUG, "DEVx flex parser %p destroyed", (const void *)fp);
1348 	mlx5_free(entry);
1349 }
1350 
1351 struct mlx5_list_entry *
1352 mlx5_flex_parser_clone_cb(void *list_ctx,
1353 			  struct mlx5_list_entry *entry, void *ctx)
1354 {
1355 	struct mlx5_flex_parser_devx *fp;
1356 
1357 	RTE_SET_USED(list_ctx);
1358 	RTE_SET_USED(entry);
1359 	fp = mlx5_malloc(0, sizeof(struct mlx5_flex_parser_devx),
1360 			 0, SOCKET_ID_ANY);
1361 	if (!fp)
1362 		return NULL;
1363 	memcpy(fp, ctx, sizeof(struct mlx5_flex_parser_devx));
1364 	return &fp->entry;
1365 }
1366 
1367 void
1368 mlx5_flex_parser_clone_free_cb(void *list_ctx, struct mlx5_list_entry *entry)
1369 {
1370 	struct mlx5_flex_parser_devx *fp =
1371 		container_of(entry, struct mlx5_flex_parser_devx, entry);
1372 	RTE_SET_USED(list_ctx);
1373 	mlx5_free(fp);
1374 }
1375