xref: /dpdk/drivers/net/mlx5/mlx5_flow_flex.c (revision 6dfb83f13f7a6d259e4ecd3d53d40b9ed87e2fe1)
1db25cadcSViacheslav Ovsiienko /* SPDX-License-Identifier: BSD-3-Clause
2db25cadcSViacheslav Ovsiienko  * Copyright (c) 2021 NVIDIA Corporation & Affiliates
3db25cadcSViacheslav Ovsiienko  */
4db25cadcSViacheslav Ovsiienko #include <rte_malloc.h>
5db25cadcSViacheslav Ovsiienko #include <mlx5_devx_cmds.h>
6db25cadcSViacheslav Ovsiienko #include <mlx5_malloc.h>
7db25cadcSViacheslav Ovsiienko #include "mlx5.h"
8db25cadcSViacheslav Ovsiienko #include "mlx5_flow.h"
9db25cadcSViacheslav Ovsiienko 
10db25cadcSViacheslav Ovsiienko static_assert(sizeof(uint32_t) * CHAR_BIT >= MLX5_PORT_FLEX_ITEM_NUM,
11db25cadcSViacheslav Ovsiienko 	      "Flex item maximal number exceeds uint32_t bit width");
12db25cadcSViacheslav Ovsiienko 
13db25cadcSViacheslav Ovsiienko /**
14db25cadcSViacheslav Ovsiienko  *  Routine called once on port initialization to init flex item
15db25cadcSViacheslav Ovsiienko  *  related infrastructure initialization
16db25cadcSViacheslav Ovsiienko  *
17db25cadcSViacheslav Ovsiienko  * @param dev
18db25cadcSViacheslav Ovsiienko  *   Ethernet device to perform flex item initialization
19db25cadcSViacheslav Ovsiienko  *
20db25cadcSViacheslav Ovsiienko  * @return
21db25cadcSViacheslav Ovsiienko  *   0 on success, a negative errno value otherwise and rte_errno is set.
22db25cadcSViacheslav Ovsiienko  */
23db25cadcSViacheslav Ovsiienko int
24db25cadcSViacheslav Ovsiienko mlx5_flex_item_port_init(struct rte_eth_dev *dev)
25db25cadcSViacheslav Ovsiienko {
26db25cadcSViacheslav Ovsiienko 	struct mlx5_priv *priv = dev->data->dev_private;
27db25cadcSViacheslav Ovsiienko 
28db25cadcSViacheslav Ovsiienko 	rte_spinlock_init(&priv->flex_item_sl);
29db25cadcSViacheslav Ovsiienko 	MLX5_ASSERT(!priv->flex_item_map);
30db25cadcSViacheslav Ovsiienko 	return 0;
31db25cadcSViacheslav Ovsiienko }
32db25cadcSViacheslav Ovsiienko 
33db25cadcSViacheslav Ovsiienko /**
34db25cadcSViacheslav Ovsiienko  *  Routine called once on port close to perform flex item
35db25cadcSViacheslav Ovsiienko  *  related infrastructure cleanup.
36db25cadcSViacheslav Ovsiienko  *
37db25cadcSViacheslav Ovsiienko  * @param dev
38db25cadcSViacheslav Ovsiienko  *   Ethernet device to perform cleanup
39db25cadcSViacheslav Ovsiienko  */
40db25cadcSViacheslav Ovsiienko void
41db25cadcSViacheslav Ovsiienko mlx5_flex_item_port_cleanup(struct rte_eth_dev *dev)
42db25cadcSViacheslav Ovsiienko {
43db25cadcSViacheslav Ovsiienko 	struct mlx5_priv *priv = dev->data->dev_private;
44db25cadcSViacheslav Ovsiienko 	uint32_t i;
45db25cadcSViacheslav Ovsiienko 
46db25cadcSViacheslav Ovsiienko 	for (i = 0; i < MLX5_PORT_FLEX_ITEM_NUM && priv->flex_item_map ; i++) {
47db25cadcSViacheslav Ovsiienko 		if (priv->flex_item_map & (1 << i)) {
489086ac09SGregory Etelson 			struct mlx5_flex_item *flex = &priv->flex_item[i];
499086ac09SGregory Etelson 
509086ac09SGregory Etelson 			claim_zero(mlx5_list_unregister
519086ac09SGregory Etelson 					(priv->sh->flex_parsers_dv,
529086ac09SGregory Etelson 					 &flex->devx_fp->entry));
539086ac09SGregory Etelson 			flex->devx_fp = NULL;
549086ac09SGregory Etelson 			flex->refcnt = 0;
55db25cadcSViacheslav Ovsiienko 			priv->flex_item_map &= ~(1 << i);
56db25cadcSViacheslav Ovsiienko 		}
57db25cadcSViacheslav Ovsiienko 	}
58db25cadcSViacheslav Ovsiienko }
59db25cadcSViacheslav Ovsiienko 
60db25cadcSViacheslav Ovsiienko static int
61db25cadcSViacheslav Ovsiienko mlx5_flex_index(struct mlx5_priv *priv, struct mlx5_flex_item *item)
62db25cadcSViacheslav Ovsiienko {
63db25cadcSViacheslav Ovsiienko 	uintptr_t start = (uintptr_t)&priv->flex_item[0];
64db25cadcSViacheslav Ovsiienko 	uintptr_t entry = (uintptr_t)item;
65db25cadcSViacheslav Ovsiienko 	uintptr_t idx = (entry - start) / sizeof(struct mlx5_flex_item);
66db25cadcSViacheslav Ovsiienko 
67db25cadcSViacheslav Ovsiienko 	if (entry < start ||
68db25cadcSViacheslav Ovsiienko 	    idx >= MLX5_PORT_FLEX_ITEM_NUM ||
69db25cadcSViacheslav Ovsiienko 	    (entry - start) % sizeof(struct mlx5_flex_item) ||
70db25cadcSViacheslav Ovsiienko 	    !(priv->flex_item_map & (1u << idx)))
71db25cadcSViacheslav Ovsiienko 		return -1;
72db25cadcSViacheslav Ovsiienko 	return (int)idx;
73db25cadcSViacheslav Ovsiienko }
74db25cadcSViacheslav Ovsiienko 
75db25cadcSViacheslav Ovsiienko static struct mlx5_flex_item *
76db25cadcSViacheslav Ovsiienko mlx5_flex_alloc(struct mlx5_priv *priv)
77db25cadcSViacheslav Ovsiienko {
78db25cadcSViacheslav Ovsiienko 	struct mlx5_flex_item *item = NULL;
79db25cadcSViacheslav Ovsiienko 
80db25cadcSViacheslav Ovsiienko 	rte_spinlock_lock(&priv->flex_item_sl);
81db25cadcSViacheslav Ovsiienko 	if (~priv->flex_item_map) {
82db25cadcSViacheslav Ovsiienko 		uint32_t idx = rte_bsf32(~priv->flex_item_map);
83db25cadcSViacheslav Ovsiienko 
84db25cadcSViacheslav Ovsiienko 		if (idx < MLX5_PORT_FLEX_ITEM_NUM) {
85db25cadcSViacheslav Ovsiienko 			item = &priv->flex_item[idx];
86db25cadcSViacheslav Ovsiienko 			MLX5_ASSERT(!item->refcnt);
87db25cadcSViacheslav Ovsiienko 			MLX5_ASSERT(!item->devx_fp);
88db25cadcSViacheslav Ovsiienko 			item->devx_fp = NULL;
89e12a0166STyler Retzlaff 			rte_atomic_store_explicit(&item->refcnt, 0, rte_memory_order_release);
90db25cadcSViacheslav Ovsiienko 			priv->flex_item_map |= 1u << idx;
91db25cadcSViacheslav Ovsiienko 		}
92db25cadcSViacheslav Ovsiienko 	}
93db25cadcSViacheslav Ovsiienko 	rte_spinlock_unlock(&priv->flex_item_sl);
94db25cadcSViacheslav Ovsiienko 	return item;
95db25cadcSViacheslav Ovsiienko }
96db25cadcSViacheslav Ovsiienko 
97db25cadcSViacheslav Ovsiienko static void
98db25cadcSViacheslav Ovsiienko mlx5_flex_free(struct mlx5_priv *priv, struct mlx5_flex_item *item)
99db25cadcSViacheslav Ovsiienko {
100db25cadcSViacheslav Ovsiienko 	int idx = mlx5_flex_index(priv, item);
101db25cadcSViacheslav Ovsiienko 
102db25cadcSViacheslav Ovsiienko 	MLX5_ASSERT(idx >= 0 &&
103db25cadcSViacheslav Ovsiienko 		    idx < MLX5_PORT_FLEX_ITEM_NUM &&
104db25cadcSViacheslav Ovsiienko 		    (priv->flex_item_map & (1u << idx)));
105db25cadcSViacheslav Ovsiienko 	if (idx >= 0) {
106db25cadcSViacheslav Ovsiienko 		rte_spinlock_lock(&priv->flex_item_sl);
107db25cadcSViacheslav Ovsiienko 		MLX5_ASSERT(!item->refcnt);
108db25cadcSViacheslav Ovsiienko 		MLX5_ASSERT(!item->devx_fp);
109db25cadcSViacheslav Ovsiienko 		item->devx_fp = NULL;
110e12a0166STyler Retzlaff 		rte_atomic_store_explicit(&item->refcnt, 0, rte_memory_order_release);
111db25cadcSViacheslav Ovsiienko 		priv->flex_item_map &= ~(1u << idx);
112db25cadcSViacheslav Ovsiienko 		rte_spinlock_unlock(&priv->flex_item_sl);
113db25cadcSViacheslav Ovsiienko 	}
114db25cadcSViacheslav Ovsiienko }
115db25cadcSViacheslav Ovsiienko 
1166dac7d7fSViacheslav Ovsiienko static uint32_t
1176dac7d7fSViacheslav Ovsiienko mlx5_flex_get_bitfield(const struct rte_flow_item_flex *item,
1186dac7d7fSViacheslav Ovsiienko 		       uint32_t pos, uint32_t width, uint32_t shift)
1196dac7d7fSViacheslav Ovsiienko {
1206dac7d7fSViacheslav Ovsiienko 	const uint8_t *ptr = item->pattern + pos / CHAR_BIT;
1216dac7d7fSViacheslav Ovsiienko 	uint32_t val, vbits;
1226dac7d7fSViacheslav Ovsiienko 
1236dac7d7fSViacheslav Ovsiienko 	/* Proceed the bitfield start byte. */
1246dac7d7fSViacheslav Ovsiienko 	MLX5_ASSERT(width <= sizeof(uint32_t) * CHAR_BIT && width);
1256dac7d7fSViacheslav Ovsiienko 	MLX5_ASSERT(width + shift <= sizeof(uint32_t) * CHAR_BIT);
1266dac7d7fSViacheslav Ovsiienko 	if (item->length <= pos / CHAR_BIT)
1276dac7d7fSViacheslav Ovsiienko 		return 0;
1286dac7d7fSViacheslav Ovsiienko 	val = *ptr++ >> (pos % CHAR_BIT);
1296dac7d7fSViacheslav Ovsiienko 	vbits = CHAR_BIT - pos % CHAR_BIT;
1306dac7d7fSViacheslav Ovsiienko 	pos = (pos + vbits) / CHAR_BIT;
1316dac7d7fSViacheslav Ovsiienko 	vbits = RTE_MIN(vbits, width);
1326dac7d7fSViacheslav Ovsiienko 	val &= RTE_BIT32(vbits) - 1;
1336dac7d7fSViacheslav Ovsiienko 	while (vbits < width && pos < item->length) {
1346dac7d7fSViacheslav Ovsiienko 		uint32_t part = RTE_MIN(width - vbits, (uint32_t)CHAR_BIT);
1356dac7d7fSViacheslav Ovsiienko 		uint32_t tmp = *ptr++;
1366dac7d7fSViacheslav Ovsiienko 
1376dac7d7fSViacheslav Ovsiienko 		pos++;
1386dac7d7fSViacheslav Ovsiienko 		tmp &= RTE_BIT32(part) - 1;
1396dac7d7fSViacheslav Ovsiienko 		val |= tmp << vbits;
1406dac7d7fSViacheslav Ovsiienko 		vbits += part;
1416dac7d7fSViacheslav Ovsiienko 	}
1426dac7d7fSViacheslav Ovsiienko 	return rte_bswap32(val <<= shift);
1436dac7d7fSViacheslav Ovsiienko }
1446dac7d7fSViacheslav Ovsiienko 
1456dac7d7fSViacheslav Ovsiienko #define SET_FP_MATCH_SAMPLE_ID(x, def, msk, val, sid) \
1466dac7d7fSViacheslav Ovsiienko 	do { \
1476dac7d7fSViacheslav Ovsiienko 		uint32_t tmp, out = (def); \
1486dac7d7fSViacheslav Ovsiienko 		tmp = MLX5_GET(fte_match_set_misc4, misc4_v, \
1496dac7d7fSViacheslav Ovsiienko 			       prog_sample_field_value_##x); \
1506dac7d7fSViacheslav Ovsiienko 		tmp = (tmp & ~out) | (val); \
1516dac7d7fSViacheslav Ovsiienko 		MLX5_SET(fte_match_set_misc4, misc4_v, \
1526dac7d7fSViacheslav Ovsiienko 			 prog_sample_field_value_##x, tmp); \
1536dac7d7fSViacheslav Ovsiienko 		tmp = MLX5_GET(fte_match_set_misc4, misc4_m, \
1546dac7d7fSViacheslav Ovsiienko 			       prog_sample_field_value_##x); \
1556dac7d7fSViacheslav Ovsiienko 		tmp = (tmp & ~out) | (msk); \
1566dac7d7fSViacheslav Ovsiienko 		MLX5_SET(fte_match_set_misc4, misc4_m, \
1576dac7d7fSViacheslav Ovsiienko 			 prog_sample_field_value_##x, tmp); \
1586dac7d7fSViacheslav Ovsiienko 		tmp = tmp ? (sid) : 0; \
1596dac7d7fSViacheslav Ovsiienko 		MLX5_SET(fte_match_set_misc4, misc4_v, \
1606dac7d7fSViacheslav Ovsiienko 			 prog_sample_field_id_##x, tmp);\
1616dac7d7fSViacheslav Ovsiienko 		MLX5_SET(fte_match_set_misc4, misc4_m, \
1626dac7d7fSViacheslav Ovsiienko 			 prog_sample_field_id_##x, tmp); \
1636dac7d7fSViacheslav Ovsiienko 	} while (0)
1646dac7d7fSViacheslav Ovsiienko 
1656dac7d7fSViacheslav Ovsiienko __rte_always_inline static void
1666dac7d7fSViacheslav Ovsiienko mlx5_flex_set_match_sample(void *misc4_m, void *misc4_v,
1676dac7d7fSViacheslav Ovsiienko 			   uint32_t def, uint32_t mask, uint32_t value,
1686dac7d7fSViacheslav Ovsiienko 			   uint32_t sample_id, uint32_t id)
1696dac7d7fSViacheslav Ovsiienko {
1706dac7d7fSViacheslav Ovsiienko 	switch (id) {
1716dac7d7fSViacheslav Ovsiienko 	case 0:
1726dac7d7fSViacheslav Ovsiienko 		SET_FP_MATCH_SAMPLE_ID(0, def, mask, value, sample_id);
1736dac7d7fSViacheslav Ovsiienko 		break;
1746dac7d7fSViacheslav Ovsiienko 	case 1:
1756dac7d7fSViacheslav Ovsiienko 		SET_FP_MATCH_SAMPLE_ID(1, def, mask, value, sample_id);
1766dac7d7fSViacheslav Ovsiienko 		break;
1776dac7d7fSViacheslav Ovsiienko 	case 2:
1786dac7d7fSViacheslav Ovsiienko 		SET_FP_MATCH_SAMPLE_ID(2, def, mask, value, sample_id);
1796dac7d7fSViacheslav Ovsiienko 		break;
1806dac7d7fSViacheslav Ovsiienko 	case 3:
1816dac7d7fSViacheslav Ovsiienko 		SET_FP_MATCH_SAMPLE_ID(3, def, mask, value, sample_id);
1826dac7d7fSViacheslav Ovsiienko 		break;
1836dac7d7fSViacheslav Ovsiienko 	case 4:
1846dac7d7fSViacheslav Ovsiienko 		SET_FP_MATCH_SAMPLE_ID(4, def, mask, value, sample_id);
1856dac7d7fSViacheslav Ovsiienko 		break;
1866dac7d7fSViacheslav Ovsiienko 	case 5:
1876dac7d7fSViacheslav Ovsiienko 		SET_FP_MATCH_SAMPLE_ID(5, def, mask, value, sample_id);
1886dac7d7fSViacheslav Ovsiienko 		break;
1896dac7d7fSViacheslav Ovsiienko 	case 6:
1906dac7d7fSViacheslav Ovsiienko 		SET_FP_MATCH_SAMPLE_ID(6, def, mask, value, sample_id);
1916dac7d7fSViacheslav Ovsiienko 		break;
1926dac7d7fSViacheslav Ovsiienko 	case 7:
1936dac7d7fSViacheslav Ovsiienko 		SET_FP_MATCH_SAMPLE_ID(7, def, mask, value, sample_id);
1946dac7d7fSViacheslav Ovsiienko 		break;
1956dac7d7fSViacheslav Ovsiienko 	default:
1966dac7d7fSViacheslav Ovsiienko 		MLX5_ASSERT(false);
1976dac7d7fSViacheslav Ovsiienko 		break;
1986dac7d7fSViacheslav Ovsiienko 	}
1996dac7d7fSViacheslav Ovsiienko #undef SET_FP_MATCH_SAMPLE_ID
2006dac7d7fSViacheslav Ovsiienko }
2018c0ca752SRongwei Liu 
2028c0ca752SRongwei Liu /**
2038c0ca752SRongwei Liu  * Get the flex parser sample id and corresponding mask
2048c0ca752SRongwei Liu  * per shift and width information.
2058c0ca752SRongwei Liu  *
2068c0ca752SRongwei Liu  * @param[in] tp
2078c0ca752SRongwei Liu  *   Mlx5 flex item sample mapping handle.
2088c0ca752SRongwei Liu  * @param[in] idx
2098c0ca752SRongwei Liu  *   Mapping index.
2108c0ca752SRongwei Liu  * @param[in, out] pos
2118c0ca752SRongwei Liu  *   Where to search the value and mask.
2128c0ca752SRongwei Liu  * @param[in] is_inner
2138c0ca752SRongwei Liu  *   For inner matching or not.
2148c0ca752SRongwei Liu  * @param[in, def] def
2158c0ca752SRongwei Liu  *   Mask generated by mapping shift and width.
2168c0ca752SRongwei Liu  *
2178c0ca752SRongwei Liu  * @return
2188c0ca752SRongwei Liu  *   0 on success, -1 to ignore.
2198c0ca752SRongwei Liu  */
2208c0ca752SRongwei Liu int
2218c0ca752SRongwei Liu mlx5_flex_get_sample_id(const struct mlx5_flex_item *tp,
2228c0ca752SRongwei Liu 			uint32_t idx, uint32_t *pos,
2238c0ca752SRongwei Liu 			bool is_inner, uint32_t *def)
2248c0ca752SRongwei Liu {
2258c0ca752SRongwei Liu 	const struct mlx5_flex_pattern_field *map = tp->map + idx;
2268c0ca752SRongwei Liu 	uint32_t id = map->reg_id;
2278c0ca752SRongwei Liu 
2288c0ca752SRongwei Liu 	*def = (RTE_BIT64(map->width) - 1) << map->shift;
2298c0ca752SRongwei Liu 	/* Skip placeholders for DUMMY fields. */
2308c0ca752SRongwei Liu 	if (id == MLX5_INVALID_SAMPLE_REG_ID) {
2318c0ca752SRongwei Liu 		*pos += map->width;
2328c0ca752SRongwei Liu 		return -1;
2338c0ca752SRongwei Liu 	}
2348c0ca752SRongwei Liu 	MLX5_ASSERT(map->width);
2358c0ca752SRongwei Liu 	MLX5_ASSERT(id < tp->devx_fp->num_samples);
2368c0ca752SRongwei Liu 	if (tp->tunnel_mode == FLEX_TUNNEL_MODE_MULTI && is_inner) {
2378c0ca752SRongwei Liu 		uint32_t num_samples = tp->devx_fp->num_samples / 2;
2388c0ca752SRongwei Liu 
2398c0ca752SRongwei Liu 		MLX5_ASSERT(tp->devx_fp->num_samples % 2 == 0);
2408c0ca752SRongwei Liu 		MLX5_ASSERT(id < num_samples);
2418c0ca752SRongwei Liu 		id += num_samples;
2428c0ca752SRongwei Liu 	}
2438c0ca752SRongwei Liu 	return id;
2448c0ca752SRongwei Liu }
2458c0ca752SRongwei Liu 
2468c0ca752SRongwei Liu /**
2478c0ca752SRongwei Liu  * Get the flex parser mapping value per definer format_select_dw.
2488c0ca752SRongwei Liu  *
2498c0ca752SRongwei Liu  * @param[in] item
2508c0ca752SRongwei Liu  *   Rte flex item pointer.
2518c0ca752SRongwei Liu  * @param[in] flex
2528c0ca752SRongwei Liu  *   Mlx5 flex item sample mapping handle.
2538c0ca752SRongwei Liu  * @param[in] byte_off
2548c0ca752SRongwei Liu  *   Mlx5 flex item format_select_dw.
2558c0ca752SRongwei Liu  * @param[in] is_mask
2568c0ca752SRongwei Liu  *   Spec or mask.
2578c0ca752SRongwei Liu  * @param[in] tunnel
2588c0ca752SRongwei Liu  *   Tunnel mode or not.
2598c0ca752SRongwei Liu  * @param[in, def] value
2608c0ca752SRongwei Liu  *   Value calculated for this flex parser, either spec or mask.
2618c0ca752SRongwei Liu  *
2628c0ca752SRongwei Liu  * @return
2638c0ca752SRongwei Liu  *   0 on success, -1 for error.
2648c0ca752SRongwei Liu  */
2658c0ca752SRongwei Liu int
2668c0ca752SRongwei Liu mlx5_flex_get_parser_value_per_byte_off(const struct rte_flow_item_flex *item,
2678c0ca752SRongwei Liu 					void *flex, uint32_t byte_off,
2688c0ca752SRongwei Liu 					bool is_mask, bool tunnel, uint32_t *value)
2698c0ca752SRongwei Liu {
2708c0ca752SRongwei Liu 	struct mlx5_flex_pattern_field *map;
2718c0ca752SRongwei Liu 	struct mlx5_flex_item *tp = flex;
2728c0ca752SRongwei Liu 	uint32_t def, i, pos, val;
2738c0ca752SRongwei Liu 	int id;
2748c0ca752SRongwei Liu 
2758c0ca752SRongwei Liu 	*value = 0;
2768c0ca752SRongwei Liu 	for (i = 0, pos = 0; i < tp->mapnum && pos < item->length * CHAR_BIT; i++) {
2778c0ca752SRongwei Liu 		map = tp->map + i;
2788c0ca752SRongwei Liu 		id = mlx5_flex_get_sample_id(tp, i, &pos, tunnel, &def);
2798c0ca752SRongwei Liu 		if (id == -1)
2808c0ca752SRongwei Liu 			continue;
2818c0ca752SRongwei Liu 		if (id >= (int)tp->devx_fp->num_samples || id >= MLX5_GRAPH_NODE_SAMPLE_NUM)
2828c0ca752SRongwei Liu 			return -1;
283bc0a9303SRongwei Liu 		if (byte_off == tp->devx_fp->sample_info[id].sample_dw_data * sizeof(uint32_t)) {
2848c0ca752SRongwei Liu 			val = mlx5_flex_get_bitfield(item, pos, map->width, map->shift);
2858c0ca752SRongwei Liu 			if (is_mask)
2868c0ca752SRongwei Liu 				val &= RTE_BE32(def);
2878c0ca752SRongwei Liu 			*value |= val;
2888c0ca752SRongwei Liu 		}
2898c0ca752SRongwei Liu 		pos += map->width;
2908c0ca752SRongwei Liu 	}
2918c0ca752SRongwei Liu 	return 0;
2928c0ca752SRongwei Liu }
2938c0ca752SRongwei Liu 
2946dac7d7fSViacheslav Ovsiienko /**
2956dac7d7fSViacheslav Ovsiienko  * Translate item pattern into matcher fields according to translation
2966dac7d7fSViacheslav Ovsiienko  * array.
2976dac7d7fSViacheslav Ovsiienko  *
2986dac7d7fSViacheslav Ovsiienko  * @param dev
2996dac7d7fSViacheslav Ovsiienko  *   Ethernet device to translate flex item on.
3006dac7d7fSViacheslav Ovsiienko  * @param[in, out] matcher
3017be78d02SJosh Soref  *   Flow matcher to configure
3026dac7d7fSViacheslav Ovsiienko  * @param[in, out] key
3036dac7d7fSViacheslav Ovsiienko  *   Flow matcher value.
3046dac7d7fSViacheslav Ovsiienko  * @param[in] item
3056dac7d7fSViacheslav Ovsiienko  *   Flow pattern to translate.
3066dac7d7fSViacheslav Ovsiienko  * @param[in] is_inner
3076dac7d7fSViacheslav Ovsiienko  *   Inner Flex Item (follows after tunnel header).
3086dac7d7fSViacheslav Ovsiienko  *
3096dac7d7fSViacheslav Ovsiienko  * @return
3106dac7d7fSViacheslav Ovsiienko  *   0 on success, a negative errno value otherwise and rte_errno is set.
3116dac7d7fSViacheslav Ovsiienko  */
3126dac7d7fSViacheslav Ovsiienko void
3136dac7d7fSViacheslav Ovsiienko mlx5_flex_flow_translate_item(struct rte_eth_dev *dev,
3146dac7d7fSViacheslav Ovsiienko 			      void *matcher, void *key,
3156dac7d7fSViacheslav Ovsiienko 			      const struct rte_flow_item *item,
3166dac7d7fSViacheslav Ovsiienko 			      bool is_inner)
3176dac7d7fSViacheslav Ovsiienko {
3186dac7d7fSViacheslav Ovsiienko 	const struct rte_flow_item_flex *spec, *mask;
3196dac7d7fSViacheslav Ovsiienko 	void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher,
3206dac7d7fSViacheslav Ovsiienko 				     misc_parameters_4);
3216dac7d7fSViacheslav Ovsiienko 	void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4);
3226dac7d7fSViacheslav Ovsiienko 	struct mlx5_flex_item *tp;
3236dac7d7fSViacheslav Ovsiienko 	uint32_t i, pos = 0;
324f1324a17SRongwei Liu 	uint32_t sample_id;
3256dac7d7fSViacheslav Ovsiienko 
3266dac7d7fSViacheslav Ovsiienko 	RTE_SET_USED(dev);
3276dac7d7fSViacheslav Ovsiienko 	MLX5_ASSERT(item->spec && item->mask);
3286dac7d7fSViacheslav Ovsiienko 	spec = item->spec;
3296dac7d7fSViacheslav Ovsiienko 	mask = item->mask;
3306dac7d7fSViacheslav Ovsiienko 	tp = (struct mlx5_flex_item *)spec->handle;
3316dac7d7fSViacheslav Ovsiienko 	for (i = 0; i < tp->mapnum; i++) {
3326dac7d7fSViacheslav Ovsiienko 		struct mlx5_flex_pattern_field *map = tp->map + i;
3338c0ca752SRongwei Liu 		uint32_t val, msk, def;
3348c0ca752SRongwei Liu 		int id = mlx5_flex_get_sample_id(tp, i, &pos, is_inner, &def);
3356dac7d7fSViacheslav Ovsiienko 
3368c0ca752SRongwei Liu 		if (id == -1)
3376dac7d7fSViacheslav Ovsiienko 			continue;
3388c0ca752SRongwei Liu 		MLX5_ASSERT(id < (int)tp->devx_fp->num_samples);
3398c0ca752SRongwei Liu 		if (id >= (int)tp->devx_fp->num_samples ||
3408c0ca752SRongwei Liu 		    id >= MLX5_GRAPH_NODE_SAMPLE_NUM)
3418c0ca752SRongwei Liu 			return;
3426dac7d7fSViacheslav Ovsiienko 		val = mlx5_flex_get_bitfield(spec, pos, map->width, map->shift);
3436dac7d7fSViacheslav Ovsiienko 		msk = mlx5_flex_get_bitfield(mask, pos, map->width, map->shift);
344bc0a9303SRongwei Liu 		sample_id = tp->devx_fp->sample_ids[id];
3456dac7d7fSViacheslav Ovsiienko 		mlx5_flex_set_match_sample(misc4_m, misc4_v,
3466dac7d7fSViacheslav Ovsiienko 					   def, msk & def, val & msk & def,
347f1324a17SRongwei Liu 					   sample_id, id);
3486dac7d7fSViacheslav Ovsiienko 		pos += map->width;
3496dac7d7fSViacheslav Ovsiienko 	}
3506dac7d7fSViacheslav Ovsiienko }
3516dac7d7fSViacheslav Ovsiienko 
3526dac7d7fSViacheslav Ovsiienko /**
3536dac7d7fSViacheslav Ovsiienko  * Convert flex item handle (from the RTE flow) to flex item index on port.
3546dac7d7fSViacheslav Ovsiienko  * Optionally can increment flex item object reference count.
3556dac7d7fSViacheslav Ovsiienko  *
3566dac7d7fSViacheslav Ovsiienko  * @param dev
3576dac7d7fSViacheslav Ovsiienko  *   Ethernet device to acquire flex item on.
3586dac7d7fSViacheslav Ovsiienko  * @param[in] handle
3596dac7d7fSViacheslav Ovsiienko  *   Flow item handle from item spec.
3606dac7d7fSViacheslav Ovsiienko  * @param[in] acquire
3616dac7d7fSViacheslav Ovsiienko  *   If set - increment reference counter.
3626dac7d7fSViacheslav Ovsiienko  *
3636dac7d7fSViacheslav Ovsiienko  * @return
3646dac7d7fSViacheslav Ovsiienko  *   >=0 - index on success, a negative errno value otherwise
3656dac7d7fSViacheslav Ovsiienko  *         and rte_errno is set.
3666dac7d7fSViacheslav Ovsiienko  */
3676dac7d7fSViacheslav Ovsiienko int
3686dac7d7fSViacheslav Ovsiienko mlx5_flex_acquire_index(struct rte_eth_dev *dev,
3696dac7d7fSViacheslav Ovsiienko 			struct rte_flow_item_flex_handle *handle,
3706dac7d7fSViacheslav Ovsiienko 			bool acquire)
3716dac7d7fSViacheslav Ovsiienko {
3726dac7d7fSViacheslav Ovsiienko 	struct mlx5_priv *priv = dev->data->dev_private;
3736dac7d7fSViacheslav Ovsiienko 	struct mlx5_flex_item *flex = (struct mlx5_flex_item *)handle;
3746dac7d7fSViacheslav Ovsiienko 	int ret = mlx5_flex_index(priv, flex);
3756dac7d7fSViacheslav Ovsiienko 
3766dac7d7fSViacheslav Ovsiienko 	if (ret < 0) {
3776dac7d7fSViacheslav Ovsiienko 		errno = -EINVAL;
3786dac7d7fSViacheslav Ovsiienko 		rte_errno = EINVAL;
3796dac7d7fSViacheslav Ovsiienko 		return ret;
3806dac7d7fSViacheslav Ovsiienko 	}
3816dac7d7fSViacheslav Ovsiienko 	if (acquire)
382e12a0166STyler Retzlaff 		rte_atomic_fetch_add_explicit(&flex->refcnt, 1, rte_memory_order_release);
3836dac7d7fSViacheslav Ovsiienko 	return ret;
3846dac7d7fSViacheslav Ovsiienko }
3856dac7d7fSViacheslav Ovsiienko 
3866dac7d7fSViacheslav Ovsiienko /**
3876dac7d7fSViacheslav Ovsiienko  * Release flex item index on port - decrements reference counter by index.
3886dac7d7fSViacheslav Ovsiienko  *
3896dac7d7fSViacheslav Ovsiienko  * @param dev
3906dac7d7fSViacheslav Ovsiienko  *   Ethernet device to acquire flex item on.
3916dac7d7fSViacheslav Ovsiienko  * @param[in] index
3926dac7d7fSViacheslav Ovsiienko  *   Flow item index.
3936dac7d7fSViacheslav Ovsiienko  *
3946dac7d7fSViacheslav Ovsiienko  * @return
3956dac7d7fSViacheslav Ovsiienko  *   0 - on success, a negative errno value otherwise and rte_errno is set.
3966dac7d7fSViacheslav Ovsiienko  */
3976dac7d7fSViacheslav Ovsiienko int
3986dac7d7fSViacheslav Ovsiienko mlx5_flex_release_index(struct rte_eth_dev *dev,
3996dac7d7fSViacheslav Ovsiienko 			int index)
4006dac7d7fSViacheslav Ovsiienko {
4016dac7d7fSViacheslav Ovsiienko 	struct mlx5_priv *priv = dev->data->dev_private;
4026dac7d7fSViacheslav Ovsiienko 	struct mlx5_flex_item *flex;
4036dac7d7fSViacheslav Ovsiienko 
4046dac7d7fSViacheslav Ovsiienko 	if (index >= MLX5_PORT_FLEX_ITEM_NUM ||
4056dac7d7fSViacheslav Ovsiienko 	    !(priv->flex_item_map & (1u << index))) {
4066dac7d7fSViacheslav Ovsiienko 		errno = EINVAL;
4076dac7d7fSViacheslav Ovsiienko 		rte_errno = -EINVAL;
4086dac7d7fSViacheslav Ovsiienko 		return -EINVAL;
4096dac7d7fSViacheslav Ovsiienko 	}
4106dac7d7fSViacheslav Ovsiienko 	flex = priv->flex_item + index;
4116dac7d7fSViacheslav Ovsiienko 	if (flex->refcnt <= 1) {
4126dac7d7fSViacheslav Ovsiienko 		MLX5_ASSERT(false);
4136dac7d7fSViacheslav Ovsiienko 		errno = EINVAL;
4146dac7d7fSViacheslav Ovsiienko 		rte_errno = -EINVAL;
4156dac7d7fSViacheslav Ovsiienko 		return -EINVAL;
4166dac7d7fSViacheslav Ovsiienko 	}
417e12a0166STyler Retzlaff 	rte_atomic_fetch_sub_explicit(&flex->refcnt, 1, rte_memory_order_release);
4186dac7d7fSViacheslav Ovsiienko 	return 0;
4196dac7d7fSViacheslav Ovsiienko }
4206dac7d7fSViacheslav Ovsiienko 
421b293e8e4SViacheslav Ovsiienko /*
422b293e8e4SViacheslav Ovsiienko  * Calculate largest mask value for a given shift.
423b293e8e4SViacheslav Ovsiienko  *
424b293e8e4SViacheslav Ovsiienko  *   shift      mask
425b293e8e4SViacheslav Ovsiienko  * ------- ---------------
426b293e8e4SViacheslav Ovsiienko  *    0     b111100  0x3C
427b293e8e4SViacheslav Ovsiienko  *    1     b111110  0x3E
428b293e8e4SViacheslav Ovsiienko  *    2     b111111  0x3F
429b293e8e4SViacheslav Ovsiienko  *    3     b011111  0x1F
430b293e8e4SViacheslav Ovsiienko  *    4     b001111  0x0F
431b293e8e4SViacheslav Ovsiienko  *    5     b000111  0x07
432b293e8e4SViacheslav Ovsiienko  */
433b293e8e4SViacheslav Ovsiienko static uint8_t
434b293e8e4SViacheslav Ovsiienko mlx5_flex_hdr_len_mask(uint8_t shift,
435b293e8e4SViacheslav Ovsiienko 		       const struct mlx5_hca_flex_attr *attr)
436b293e8e4SViacheslav Ovsiienko {
437b293e8e4SViacheslav Ovsiienko 	uint32_t base_mask;
438b293e8e4SViacheslav Ovsiienko 	int diff = shift - MLX5_PARSE_GRAPH_NODE_HDR_LEN_SHIFT_DWORD;
439b293e8e4SViacheslav Ovsiienko 
440b293e8e4SViacheslav Ovsiienko 	base_mask = mlx5_hca_parse_graph_node_base_hdr_len_mask(attr);
441b293e8e4SViacheslav Ovsiienko 	return diff == 0 ? base_mask :
442b293e8e4SViacheslav Ovsiienko 	       diff < 0 ? (base_mask << -diff) & base_mask : base_mask >> diff;
443b293e8e4SViacheslav Ovsiienko }
444b293e8e4SViacheslav Ovsiienko 
445b293e8e4SViacheslav Ovsiienko static int
446b293e8e4SViacheslav Ovsiienko mlx5_flex_translate_length(struct mlx5_hca_flex_attr *attr,
447b293e8e4SViacheslav Ovsiienko 			   const struct rte_flow_item_flex_conf *conf,
448b293e8e4SViacheslav Ovsiienko 			   struct mlx5_flex_parser_devx *devx,
449b293e8e4SViacheslav Ovsiienko 			   struct rte_flow_error *error)
450b293e8e4SViacheslav Ovsiienko {
451b293e8e4SViacheslav Ovsiienko 	const struct rte_flow_item_flex_field *field = &conf->next_header;
452b293e8e4SViacheslav Ovsiienko 	struct mlx5_devx_graph_node_attr *node = &devx->devx_conf;
453b293e8e4SViacheslav Ovsiienko 	uint32_t len_width, mask;
454b293e8e4SViacheslav Ovsiienko 
455b293e8e4SViacheslav Ovsiienko 	if (field->field_base % CHAR_BIT)
456b293e8e4SViacheslav Ovsiienko 		return rte_flow_error_set
457b293e8e4SViacheslav Ovsiienko 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
458b293e8e4SViacheslav Ovsiienko 			 "not byte aligned header length field");
459b293e8e4SViacheslav Ovsiienko 	switch (field->field_mode) {
460b293e8e4SViacheslav Ovsiienko 	case FIELD_MODE_DUMMY:
461b293e8e4SViacheslav Ovsiienko 		return rte_flow_error_set
462b293e8e4SViacheslav Ovsiienko 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
463b293e8e4SViacheslav Ovsiienko 			 "invalid header length field mode (DUMMY)");
464b293e8e4SViacheslav Ovsiienko 	case FIELD_MODE_FIXED:
465b293e8e4SViacheslav Ovsiienko 		if (!(attr->header_length_mode &
466b293e8e4SViacheslav Ovsiienko 		    RTE_BIT32(MLX5_GRAPH_NODE_LEN_FIXED)))
467b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
468b293e8e4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
469b293e8e4SViacheslav Ovsiienko 				 "unsupported header length field mode (FIXED)");
4707bda5beeSGregory Etelson 		if (field->field_size ||
4717bda5beeSGregory Etelson 		    field->offset_mask || field->offset_shift)
472b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
473b293e8e4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
4747bda5beeSGregory Etelson 				 "invalid fields for fixed mode");
475b293e8e4SViacheslav Ovsiienko 		if (field->field_base < 0)
476b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
477b293e8e4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
478b293e8e4SViacheslav Ovsiienko 				 "negative header length field base (FIXED)");
479b293e8e4SViacheslav Ovsiienko 		node->header_length_mode = MLX5_GRAPH_NODE_LEN_FIXED;
480b293e8e4SViacheslav Ovsiienko 		break;
481b293e8e4SViacheslav Ovsiienko 	case FIELD_MODE_OFFSET:
482b293e8e4SViacheslav Ovsiienko 		if (!(attr->header_length_mode &
483b293e8e4SViacheslav Ovsiienko 		    RTE_BIT32(MLX5_GRAPH_NODE_LEN_FIELD)))
484b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
485b293e8e4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
486b293e8e4SViacheslav Ovsiienko 				 "unsupported header length field mode (OFFSET)");
48744b5d879SRongwei Liu 		if (!field->field_size)
48844b5d879SRongwei Liu 			return rte_flow_error_set
48944b5d879SRongwei Liu 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
49044b5d879SRongwei Liu 				 "field size is a must for offset mode");
49144b5d879SRongwei Liu 		if (field->field_size + field->offset_base < attr->header_length_mask_width)
49244b5d879SRongwei Liu 			return rte_flow_error_set
49344b5d879SRongwei Liu 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
49444b5d879SRongwei Liu 				 "field size plus offset_base is too small");
495b293e8e4SViacheslav Ovsiienko 		node->header_length_mode = MLX5_GRAPH_NODE_LEN_FIELD;
496b293e8e4SViacheslav Ovsiienko 		if (field->offset_mask == 0 ||
497b293e8e4SViacheslav Ovsiienko 		    !rte_is_power_of_2(field->offset_mask + 1))
498b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
499b293e8e4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
500b293e8e4SViacheslav Ovsiienko 				 "invalid length field offset mask (OFFSET)");
501b293e8e4SViacheslav Ovsiienko 		len_width = rte_fls_u32(field->offset_mask);
502b293e8e4SViacheslav Ovsiienko 		if (len_width > attr->header_length_mask_width)
503b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
504b293e8e4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
505b293e8e4SViacheslav Ovsiienko 				 "length field offset mask too wide (OFFSET)");
506b293e8e4SViacheslav Ovsiienko 		mask = mlx5_flex_hdr_len_mask(field->offset_shift, attr);
507b293e8e4SViacheslav Ovsiienko 		if (mask < field->offset_mask)
508b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
509b293e8e4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
510b293e8e4SViacheslav Ovsiienko 				 "length field shift too big (OFFSET)");
511b293e8e4SViacheslav Ovsiienko 		node->header_length_field_mask = RTE_MIN(mask,
512b293e8e4SViacheslav Ovsiienko 							 field->offset_mask);
513b293e8e4SViacheslav Ovsiienko 		break;
514b293e8e4SViacheslav Ovsiienko 	case FIELD_MODE_BITMASK:
515b293e8e4SViacheslav Ovsiienko 		if (!(attr->header_length_mode &
516b293e8e4SViacheslav Ovsiienko 		    RTE_BIT32(MLX5_GRAPH_NODE_LEN_BITMASK)))
517b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
518b293e8e4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
519b293e8e4SViacheslav Ovsiienko 				 "unsupported header length field mode (BITMASK)");
520b293e8e4SViacheslav Ovsiienko 		if (attr->header_length_mask_width < field->field_size)
521b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
522b293e8e4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
523b293e8e4SViacheslav Ovsiienko 				 "header length field width exceeds limit");
524b293e8e4SViacheslav Ovsiienko 		node->header_length_mode = MLX5_GRAPH_NODE_LEN_BITMASK;
525b293e8e4SViacheslav Ovsiienko 		mask = mlx5_flex_hdr_len_mask(field->offset_shift, attr);
526b293e8e4SViacheslav Ovsiienko 		if (mask < field->offset_mask)
527b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
528b293e8e4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
529b293e8e4SViacheslav Ovsiienko 				 "length field shift too big (BITMASK)");
530b293e8e4SViacheslav Ovsiienko 		node->header_length_field_mask = RTE_MIN(mask,
531b293e8e4SViacheslav Ovsiienko 							 field->offset_mask);
532b293e8e4SViacheslav Ovsiienko 		break;
533b293e8e4SViacheslav Ovsiienko 	default:
534b293e8e4SViacheslav Ovsiienko 		return rte_flow_error_set
535b293e8e4SViacheslav Ovsiienko 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
536b293e8e4SViacheslav Ovsiienko 			 "unknown header length field mode");
537b293e8e4SViacheslav Ovsiienko 	}
538b293e8e4SViacheslav Ovsiienko 	if (field->field_base / CHAR_BIT >= 0 &&
539b293e8e4SViacheslav Ovsiienko 	    field->field_base / CHAR_BIT > attr->max_base_header_length)
540b293e8e4SViacheslav Ovsiienko 		return rte_flow_error_set
541b293e8e4SViacheslav Ovsiienko 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
542b293e8e4SViacheslav Ovsiienko 			 "header length field base exceeds limit");
543b293e8e4SViacheslav Ovsiienko 	node->header_length_base_value = field->field_base / CHAR_BIT;
544b293e8e4SViacheslav Ovsiienko 	if (field->field_mode == FIELD_MODE_OFFSET ||
545b293e8e4SViacheslav Ovsiienko 	    field->field_mode == FIELD_MODE_BITMASK) {
546b293e8e4SViacheslav Ovsiienko 		if (field->offset_shift > 15 || field->offset_shift < 0)
547b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
548b293e8e4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
5497be78d02SJosh Soref 				 "header length field shift exceeds limit");
550b293e8e4SViacheslav Ovsiienko 		node->header_length_field_shift = field->offset_shift;
551b293e8e4SViacheslav Ovsiienko 		node->header_length_field_offset = field->offset_base;
552b293e8e4SViacheslav Ovsiienko 	}
55344b5d879SRongwei Liu 	if (field->field_mode == FIELD_MODE_OFFSET) {
55444b5d879SRongwei Liu 		if (field->field_size > attr->header_length_mask_width) {
55544b5d879SRongwei Liu 			node->header_length_field_offset +=
55644b5d879SRongwei Liu 				field->field_size - attr->header_length_mask_width;
55744b5d879SRongwei Liu 		} else if (field->field_size < attr->header_length_mask_width) {
55844b5d879SRongwei Liu 			node->header_length_field_offset -=
55944b5d879SRongwei Liu 				attr->header_length_mask_width - field->field_size;
56044b5d879SRongwei Liu 			node->header_length_field_mask =
56144b5d879SRongwei Liu 					RTE_MIN(node->header_length_field_mask,
56244b5d879SRongwei Liu 						(1u << field->field_size) - 1);
56344b5d879SRongwei Liu 		}
56444b5d879SRongwei Liu 	}
565b293e8e4SViacheslav Ovsiienko 	return 0;
566b293e8e4SViacheslav Ovsiienko }
567b293e8e4SViacheslav Ovsiienko 
568b293e8e4SViacheslav Ovsiienko static int
569b293e8e4SViacheslav Ovsiienko mlx5_flex_translate_next(struct mlx5_hca_flex_attr *attr,
570b293e8e4SViacheslav Ovsiienko 			 const struct rte_flow_item_flex_conf *conf,
571b293e8e4SViacheslav Ovsiienko 			 struct mlx5_flex_parser_devx *devx,
572b293e8e4SViacheslav Ovsiienko 			 struct rte_flow_error *error)
573b293e8e4SViacheslav Ovsiienko {
574b293e8e4SViacheslav Ovsiienko 	const struct rte_flow_item_flex_field *field = &conf->next_protocol;
575b293e8e4SViacheslav Ovsiienko 	struct mlx5_devx_graph_node_attr *node = &devx->devx_conf;
576b293e8e4SViacheslav Ovsiienko 
577b293e8e4SViacheslav Ovsiienko 	switch (field->field_mode) {
578b293e8e4SViacheslav Ovsiienko 	case FIELD_MODE_DUMMY:
579b293e8e4SViacheslav Ovsiienko 		if (conf->nb_outputs)
580b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
581b293e8e4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
582b293e8e4SViacheslav Ovsiienko 				 "next protocol field is required (DUMMY)");
583b293e8e4SViacheslav Ovsiienko 		return 0;
584b293e8e4SViacheslav Ovsiienko 	case FIELD_MODE_FIXED:
585b293e8e4SViacheslav Ovsiienko 		break;
586b293e8e4SViacheslav Ovsiienko 	case FIELD_MODE_OFFSET:
587b293e8e4SViacheslav Ovsiienko 		return rte_flow_error_set
588b293e8e4SViacheslav Ovsiienko 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
589b293e8e4SViacheslav Ovsiienko 			 "unsupported next protocol field mode (OFFSET)");
590b293e8e4SViacheslav Ovsiienko 		break;
591b293e8e4SViacheslav Ovsiienko 	case FIELD_MODE_BITMASK:
592b293e8e4SViacheslav Ovsiienko 		return rte_flow_error_set
593b293e8e4SViacheslav Ovsiienko 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
594b293e8e4SViacheslav Ovsiienko 			 "unsupported next protocol field mode (BITMASK)");
595b293e8e4SViacheslav Ovsiienko 	default:
596b293e8e4SViacheslav Ovsiienko 		return rte_flow_error_set
597b293e8e4SViacheslav Ovsiienko 			(error, EINVAL,
598b293e8e4SViacheslav Ovsiienko 			 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
599b293e8e4SViacheslav Ovsiienko 			 "unknown next protocol field mode");
600b293e8e4SViacheslav Ovsiienko 	}
601b293e8e4SViacheslav Ovsiienko 	MLX5_ASSERT(field->field_mode == FIELD_MODE_FIXED);
602b293e8e4SViacheslav Ovsiienko 	if (!conf->nb_outputs)
603b293e8e4SViacheslav Ovsiienko 		return rte_flow_error_set
604b293e8e4SViacheslav Ovsiienko 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
605b293e8e4SViacheslav Ovsiienko 			 "out link(s) is required if next field present");
606b293e8e4SViacheslav Ovsiienko 	if (attr->max_next_header_offset < field->field_base)
607b293e8e4SViacheslav Ovsiienko 		return rte_flow_error_set
608b293e8e4SViacheslav Ovsiienko 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
609b293e8e4SViacheslav Ovsiienko 			 "next protocol field base exceeds limit");
610b293e8e4SViacheslav Ovsiienko 	if (field->offset_shift)
611b293e8e4SViacheslav Ovsiienko 		return rte_flow_error_set
612b293e8e4SViacheslav Ovsiienko 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
613b293e8e4SViacheslav Ovsiienko 			 "unsupported next protocol field shift");
614b293e8e4SViacheslav Ovsiienko 	node->next_header_field_offset = field->field_base;
615b293e8e4SViacheslav Ovsiienko 	node->next_header_field_size = field->field_size;
616b293e8e4SViacheslav Ovsiienko 	return 0;
617b293e8e4SViacheslav Ovsiienko }
618b293e8e4SViacheslav Ovsiienko 
619b293e8e4SViacheslav Ovsiienko /* Helper structure to handle field bit intervals. */
620b293e8e4SViacheslav Ovsiienko struct mlx5_flex_field_cover {
621b293e8e4SViacheslav Ovsiienko 	uint16_t num;
622b293e8e4SViacheslav Ovsiienko 	int32_t start[MLX5_FLEX_ITEM_MAPPING_NUM];
623b293e8e4SViacheslav Ovsiienko 	int32_t end[MLX5_FLEX_ITEM_MAPPING_NUM];
624b293e8e4SViacheslav Ovsiienko 	uint8_t mapped[MLX5_FLEX_ITEM_MAPPING_NUM / CHAR_BIT + 1];
625b293e8e4SViacheslav Ovsiienko };
626b293e8e4SViacheslav Ovsiienko 
627b293e8e4SViacheslav Ovsiienko static void
628b293e8e4SViacheslav Ovsiienko mlx5_flex_insert_field(struct mlx5_flex_field_cover *cover,
629b293e8e4SViacheslav Ovsiienko 		       uint16_t num, int32_t start, int32_t end)
630b293e8e4SViacheslav Ovsiienko {
631b293e8e4SViacheslav Ovsiienko 	MLX5_ASSERT(num < MLX5_FLEX_ITEM_MAPPING_NUM);
632b293e8e4SViacheslav Ovsiienko 	MLX5_ASSERT(num <= cover->num);
633b293e8e4SViacheslav Ovsiienko 	if (num < cover->num) {
634b293e8e4SViacheslav Ovsiienko 		memmove(&cover->start[num + 1],	&cover->start[num],
635b293e8e4SViacheslav Ovsiienko 			(cover->num - num) * sizeof(int32_t));
636b293e8e4SViacheslav Ovsiienko 		memmove(&cover->end[num + 1],	&cover->end[num],
637b293e8e4SViacheslav Ovsiienko 			(cover->num - num) * sizeof(int32_t));
638b293e8e4SViacheslav Ovsiienko 	}
639b293e8e4SViacheslav Ovsiienko 	cover->start[num] = start;
640b293e8e4SViacheslav Ovsiienko 	cover->end[num] = end;
641b293e8e4SViacheslav Ovsiienko 	cover->num++;
642b293e8e4SViacheslav Ovsiienko }
643b293e8e4SViacheslav Ovsiienko 
644b293e8e4SViacheslav Ovsiienko static void
645b293e8e4SViacheslav Ovsiienko mlx5_flex_merge_field(struct mlx5_flex_field_cover *cover, uint16_t num)
646b293e8e4SViacheslav Ovsiienko {
647b293e8e4SViacheslav Ovsiienko 	uint32_t i, del = 0;
648b293e8e4SViacheslav Ovsiienko 	int32_t end;
649b293e8e4SViacheslav Ovsiienko 
650b293e8e4SViacheslav Ovsiienko 	MLX5_ASSERT(num < MLX5_FLEX_ITEM_MAPPING_NUM);
651b293e8e4SViacheslav Ovsiienko 	MLX5_ASSERT(num < (cover->num - 1));
652b293e8e4SViacheslav Ovsiienko 	end = cover->end[num];
653b293e8e4SViacheslav Ovsiienko 	for (i = num + 1; i < cover->num; i++) {
654b293e8e4SViacheslav Ovsiienko 		if (end < cover->start[i])
655b293e8e4SViacheslav Ovsiienko 			break;
656b293e8e4SViacheslav Ovsiienko 		del++;
657b293e8e4SViacheslav Ovsiienko 		if (end <= cover->end[i]) {
658b293e8e4SViacheslav Ovsiienko 			cover->end[num] = cover->end[i];
659b293e8e4SViacheslav Ovsiienko 			break;
660b293e8e4SViacheslav Ovsiienko 		}
661b293e8e4SViacheslav Ovsiienko 	}
662b293e8e4SViacheslav Ovsiienko 	if (del) {
663b293e8e4SViacheslav Ovsiienko 		MLX5_ASSERT(del < (cover->num - 1u - num));
664b293e8e4SViacheslav Ovsiienko 		cover->num -= del;
665b293e8e4SViacheslav Ovsiienko 		MLX5_ASSERT(cover->num > num);
666b293e8e4SViacheslav Ovsiienko 		if ((cover->num - num) > 1) {
667b293e8e4SViacheslav Ovsiienko 			memmove(&cover->start[num + 1],
668b293e8e4SViacheslav Ovsiienko 				&cover->start[num + 1 + del],
669b293e8e4SViacheslav Ovsiienko 				(cover->num - num - 1) * sizeof(int32_t));
670b293e8e4SViacheslav Ovsiienko 			memmove(&cover->end[num + 1],
671b293e8e4SViacheslav Ovsiienko 				&cover->end[num + 1 + del],
672b293e8e4SViacheslav Ovsiienko 				(cover->num - num - 1) * sizeof(int32_t));
673b293e8e4SViacheslav Ovsiienko 		}
674b293e8e4SViacheslav Ovsiienko 	}
675b293e8e4SViacheslav Ovsiienko }
676b293e8e4SViacheslav Ovsiienko 
677b293e8e4SViacheslav Ovsiienko /*
678b293e8e4SViacheslav Ovsiienko  * Validate the sample field and update interval array
679b293e8e4SViacheslav Ovsiienko  * if parameters match with the 'match" field.
680b293e8e4SViacheslav Ovsiienko  * Returns:
681b293e8e4SViacheslav Ovsiienko  *    < 0  - error
682b293e8e4SViacheslav Ovsiienko  *    == 0 - no match, interval array not updated
683b293e8e4SViacheslav Ovsiienko  *    > 0  - match, interval array updated
684b293e8e4SViacheslav Ovsiienko  */
685b293e8e4SViacheslav Ovsiienko static int
686b293e8e4SViacheslav Ovsiienko mlx5_flex_cover_sample(struct mlx5_flex_field_cover *cover,
687b293e8e4SViacheslav Ovsiienko 		       struct rte_flow_item_flex_field *field,
688b293e8e4SViacheslav Ovsiienko 		       struct rte_flow_item_flex_field *match,
689b293e8e4SViacheslav Ovsiienko 		       struct mlx5_hca_flex_attr *attr,
690b293e8e4SViacheslav Ovsiienko 		       struct rte_flow_error *error)
691b293e8e4SViacheslav Ovsiienko {
692b293e8e4SViacheslav Ovsiienko 	int32_t start, end;
693b293e8e4SViacheslav Ovsiienko 	uint32_t i;
694b293e8e4SViacheslav Ovsiienko 
695b293e8e4SViacheslav Ovsiienko 	switch (field->field_mode) {
696b293e8e4SViacheslav Ovsiienko 	case FIELD_MODE_DUMMY:
697b293e8e4SViacheslav Ovsiienko 		return 0;
698b293e8e4SViacheslav Ovsiienko 	case FIELD_MODE_FIXED:
699b293e8e4SViacheslav Ovsiienko 		if (!(attr->sample_offset_mode &
700b293e8e4SViacheslav Ovsiienko 		    RTE_BIT32(MLX5_GRAPH_SAMPLE_OFFSET_FIXED)))
701b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
702b293e8e4SViacheslav Ovsiienko 				(error, EINVAL,
703b293e8e4SViacheslav Ovsiienko 				 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
704b293e8e4SViacheslav Ovsiienko 				 "unsupported sample field mode (FIXED)");
705b293e8e4SViacheslav Ovsiienko 		if (field->offset_shift)
706b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
707b293e8e4SViacheslav Ovsiienko 				(error, EINVAL,
708b293e8e4SViacheslav Ovsiienko 				 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
709b293e8e4SViacheslav Ovsiienko 				 "invalid sample field shift (FIXED");
710b293e8e4SViacheslav Ovsiienko 		if (field->field_base < 0)
711b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
712b293e8e4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
713b293e8e4SViacheslav Ovsiienko 				 "invalid sample field base (FIXED)");
714b293e8e4SViacheslav Ovsiienko 		if (field->field_base / CHAR_BIT > attr->max_sample_base_offset)
715b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
716b293e8e4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
717b293e8e4SViacheslav Ovsiienko 				 "sample field base exceeds limit (FIXED)");
718b293e8e4SViacheslav Ovsiienko 		break;
719b293e8e4SViacheslav Ovsiienko 	case FIELD_MODE_OFFSET:
720b293e8e4SViacheslav Ovsiienko 		if (!(attr->sample_offset_mode &
721b293e8e4SViacheslav Ovsiienko 		    RTE_BIT32(MLX5_GRAPH_SAMPLE_OFFSET_FIELD)))
722b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
723b293e8e4SViacheslav Ovsiienko 				(error, EINVAL,
724b293e8e4SViacheslav Ovsiienko 				 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
725b293e8e4SViacheslav Ovsiienko 				 "unsupported sample field mode (OFFSET)");
726b293e8e4SViacheslav Ovsiienko 		if (field->field_base / CHAR_BIT >= 0 &&
727b293e8e4SViacheslav Ovsiienko 		    field->field_base / CHAR_BIT > attr->max_sample_base_offset)
728b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
729b293e8e4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
730b293e8e4SViacheslav Ovsiienko 				"sample field base exceeds limit");
731b293e8e4SViacheslav Ovsiienko 		break;
732b293e8e4SViacheslav Ovsiienko 	case FIELD_MODE_BITMASK:
733b293e8e4SViacheslav Ovsiienko 		if (!(attr->sample_offset_mode &
734b293e8e4SViacheslav Ovsiienko 		    RTE_BIT32(MLX5_GRAPH_SAMPLE_OFFSET_BITMASK)))
735b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
736b293e8e4SViacheslav Ovsiienko 				(error, EINVAL,
737b293e8e4SViacheslav Ovsiienko 				 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
738b293e8e4SViacheslav Ovsiienko 				 "unsupported sample field mode (BITMASK)");
739b293e8e4SViacheslav Ovsiienko 		if (field->field_base / CHAR_BIT >= 0 &&
740b293e8e4SViacheslav Ovsiienko 		    field->field_base / CHAR_BIT > attr->max_sample_base_offset)
741b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
742b293e8e4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
743b293e8e4SViacheslav Ovsiienko 				"sample field base exceeds limit");
744b293e8e4SViacheslav Ovsiienko 		break;
745b293e8e4SViacheslav Ovsiienko 	default:
746b293e8e4SViacheslav Ovsiienko 		return rte_flow_error_set
747b293e8e4SViacheslav Ovsiienko 			(error, EINVAL,
748b293e8e4SViacheslav Ovsiienko 			 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
749b293e8e4SViacheslav Ovsiienko 			 "unknown data sample field mode");
750b293e8e4SViacheslav Ovsiienko 	}
751b293e8e4SViacheslav Ovsiienko 	if (!match) {
752b293e8e4SViacheslav Ovsiienko 		if (!field->field_size)
753b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
754b293e8e4SViacheslav Ovsiienko 				(error, EINVAL,
755b293e8e4SViacheslav Ovsiienko 				RTE_FLOW_ERROR_TYPE_ITEM, NULL,
756b293e8e4SViacheslav Ovsiienko 				"zero sample field width");
757b293e8e4SViacheslav Ovsiienko 		if (field->field_id)
758b293e8e4SViacheslav Ovsiienko 			DRV_LOG(DEBUG, "sample field id hint ignored");
759b293e8e4SViacheslav Ovsiienko 	} else {
760b293e8e4SViacheslav Ovsiienko 		if (field->field_mode != match->field_mode ||
761b293e8e4SViacheslav Ovsiienko 		    field->offset_base | match->offset_base ||
762b293e8e4SViacheslav Ovsiienko 		    field->offset_mask | match->offset_mask ||
763b293e8e4SViacheslav Ovsiienko 		    field->offset_shift | match->offset_shift)
764b293e8e4SViacheslav Ovsiienko 			return 0;
765b293e8e4SViacheslav Ovsiienko 	}
766b293e8e4SViacheslav Ovsiienko 	start = field->field_base;
767b293e8e4SViacheslav Ovsiienko 	end = start + field->field_size;
768b293e8e4SViacheslav Ovsiienko 	/* Add the new or similar field to interval array. */
769b293e8e4SViacheslav Ovsiienko 	if (!cover->num) {
770b293e8e4SViacheslav Ovsiienko 		cover->start[cover->num] = start;
771b293e8e4SViacheslav Ovsiienko 		cover->end[cover->num] = end;
772b293e8e4SViacheslav Ovsiienko 		cover->num = 1;
773b293e8e4SViacheslav Ovsiienko 		return 1;
774b293e8e4SViacheslav Ovsiienko 	}
775b293e8e4SViacheslav Ovsiienko 	for (i = 0; i < cover->num; i++) {
776b293e8e4SViacheslav Ovsiienko 		if (start > cover->end[i]) {
777b293e8e4SViacheslav Ovsiienko 			if (i >= (cover->num - 1u)) {
778b293e8e4SViacheslav Ovsiienko 				mlx5_flex_insert_field(cover, cover->num,
779b293e8e4SViacheslav Ovsiienko 						       start, end);
780b293e8e4SViacheslav Ovsiienko 				break;
781b293e8e4SViacheslav Ovsiienko 			}
782b293e8e4SViacheslav Ovsiienko 			continue;
783b293e8e4SViacheslav Ovsiienko 		}
784b293e8e4SViacheslav Ovsiienko 		if (end < cover->start[i]) {
785b293e8e4SViacheslav Ovsiienko 			mlx5_flex_insert_field(cover, i, start, end);
786b293e8e4SViacheslav Ovsiienko 			break;
787b293e8e4SViacheslav Ovsiienko 		}
788b293e8e4SViacheslav Ovsiienko 		if (start < cover->start[i])
789b293e8e4SViacheslav Ovsiienko 			cover->start[i] = start;
790b293e8e4SViacheslav Ovsiienko 		if (end > cover->end[i]) {
791b293e8e4SViacheslav Ovsiienko 			cover->end[i] = end;
792b293e8e4SViacheslav Ovsiienko 			if (i < (cover->num - 1u))
793b293e8e4SViacheslav Ovsiienko 				mlx5_flex_merge_field(cover, i);
794b293e8e4SViacheslav Ovsiienko 		}
795b293e8e4SViacheslav Ovsiienko 		break;
796b293e8e4SViacheslav Ovsiienko 	}
797b293e8e4SViacheslav Ovsiienko 	return 1;
798b293e8e4SViacheslav Ovsiienko }
799b293e8e4SViacheslav Ovsiienko 
800b293e8e4SViacheslav Ovsiienko static void
801b293e8e4SViacheslav Ovsiienko mlx5_flex_config_sample(struct mlx5_devx_match_sample_attr *na,
802b293e8e4SViacheslav Ovsiienko 			struct rte_flow_item_flex_field *field,
803b293e8e4SViacheslav Ovsiienko 			enum rte_flow_item_flex_tunnel_mode tunnel_mode)
804b293e8e4SViacheslav Ovsiienko {
805b293e8e4SViacheslav Ovsiienko 	memset(na, 0, sizeof(struct mlx5_devx_match_sample_attr));
806b293e8e4SViacheslav Ovsiienko 	na->flow_match_sample_en = 1;
807b293e8e4SViacheslav Ovsiienko 	switch (field->field_mode) {
808b293e8e4SViacheslav Ovsiienko 	case FIELD_MODE_FIXED:
809b293e8e4SViacheslav Ovsiienko 		na->flow_match_sample_offset_mode =
810b293e8e4SViacheslav Ovsiienko 			MLX5_GRAPH_SAMPLE_OFFSET_FIXED;
811b293e8e4SViacheslav Ovsiienko 		break;
812b293e8e4SViacheslav Ovsiienko 	case FIELD_MODE_OFFSET:
813b293e8e4SViacheslav Ovsiienko 		na->flow_match_sample_offset_mode =
814b293e8e4SViacheslav Ovsiienko 			MLX5_GRAPH_SAMPLE_OFFSET_FIELD;
815b293e8e4SViacheslav Ovsiienko 		na->flow_match_sample_field_offset = field->offset_base;
816b293e8e4SViacheslav Ovsiienko 		na->flow_match_sample_field_offset_mask = field->offset_mask;
817b293e8e4SViacheslav Ovsiienko 		na->flow_match_sample_field_offset_shift = field->offset_shift;
818b293e8e4SViacheslav Ovsiienko 		break;
819b293e8e4SViacheslav Ovsiienko 	case FIELD_MODE_BITMASK:
820b293e8e4SViacheslav Ovsiienko 		na->flow_match_sample_offset_mode =
821b293e8e4SViacheslav Ovsiienko 			MLX5_GRAPH_SAMPLE_OFFSET_BITMASK;
822b293e8e4SViacheslav Ovsiienko 		na->flow_match_sample_field_offset = field->offset_base;
823b293e8e4SViacheslav Ovsiienko 		na->flow_match_sample_field_offset_mask = field->offset_mask;
824b293e8e4SViacheslav Ovsiienko 		na->flow_match_sample_field_offset_shift = field->offset_shift;
825b293e8e4SViacheslav Ovsiienko 		break;
826b293e8e4SViacheslav Ovsiienko 	default:
827b293e8e4SViacheslav Ovsiienko 		MLX5_ASSERT(false);
828b293e8e4SViacheslav Ovsiienko 		break;
829b293e8e4SViacheslav Ovsiienko 	}
830b293e8e4SViacheslav Ovsiienko 	switch (tunnel_mode) {
831b293e8e4SViacheslav Ovsiienko 	case FLEX_TUNNEL_MODE_SINGLE:
832b293e8e4SViacheslav Ovsiienko 		/* Fallthrough */
833b293e8e4SViacheslav Ovsiienko 	case FLEX_TUNNEL_MODE_TUNNEL:
834b293e8e4SViacheslav Ovsiienko 		na->flow_match_sample_tunnel_mode =
835b293e8e4SViacheslav Ovsiienko 			MLX5_GRAPH_SAMPLE_TUNNEL_FIRST;
836b293e8e4SViacheslav Ovsiienko 		break;
837b293e8e4SViacheslav Ovsiienko 	case FLEX_TUNNEL_MODE_MULTI:
838b293e8e4SViacheslav Ovsiienko 		/* Fallthrough */
839b293e8e4SViacheslav Ovsiienko 	case FLEX_TUNNEL_MODE_OUTER:
840b293e8e4SViacheslav Ovsiienko 		na->flow_match_sample_tunnel_mode =
841b293e8e4SViacheslav Ovsiienko 			MLX5_GRAPH_SAMPLE_TUNNEL_OUTER;
842b293e8e4SViacheslav Ovsiienko 		break;
843b293e8e4SViacheslav Ovsiienko 	case FLEX_TUNNEL_MODE_INNER:
844b293e8e4SViacheslav Ovsiienko 		na->flow_match_sample_tunnel_mode =
845b293e8e4SViacheslav Ovsiienko 			MLX5_GRAPH_SAMPLE_TUNNEL_INNER;
846b293e8e4SViacheslav Ovsiienko 		break;
847b293e8e4SViacheslav Ovsiienko 	default:
848b293e8e4SViacheslav Ovsiienko 		MLX5_ASSERT(false);
849b293e8e4SViacheslav Ovsiienko 		break;
850b293e8e4SViacheslav Ovsiienko 	}
851b293e8e4SViacheslav Ovsiienko }
852b293e8e4SViacheslav Ovsiienko 
853b293e8e4SViacheslav Ovsiienko /* Map specified field to set/subset of allocated sample registers. */
854b293e8e4SViacheslav Ovsiienko static int
855b293e8e4SViacheslav Ovsiienko mlx5_flex_map_sample(struct rte_flow_item_flex_field *field,
856b293e8e4SViacheslav Ovsiienko 		     struct mlx5_flex_parser_devx *parser,
857b293e8e4SViacheslav Ovsiienko 		     struct mlx5_flex_item *item,
858b293e8e4SViacheslav Ovsiienko 		     struct rte_flow_error *error)
859b293e8e4SViacheslav Ovsiienko {
860b293e8e4SViacheslav Ovsiienko 	struct mlx5_devx_match_sample_attr node;
861b293e8e4SViacheslav Ovsiienko 	int32_t start = field->field_base;
862b293e8e4SViacheslav Ovsiienko 	int32_t end = start + field->field_size;
863b293e8e4SViacheslav Ovsiienko 	struct mlx5_flex_pattern_field *trans;
864b293e8e4SViacheslav Ovsiienko 	uint32_t i, done_bits = 0;
865b293e8e4SViacheslav Ovsiienko 
866b293e8e4SViacheslav Ovsiienko 	if (field->field_mode == FIELD_MODE_DUMMY) {
867b293e8e4SViacheslav Ovsiienko 		done_bits = field->field_size;
868b293e8e4SViacheslav Ovsiienko 		while (done_bits) {
869b293e8e4SViacheslav Ovsiienko 			uint32_t part = RTE_MIN(done_bits,
870b293e8e4SViacheslav Ovsiienko 						sizeof(uint32_t) * CHAR_BIT);
871b293e8e4SViacheslav Ovsiienko 			if (item->mapnum >= MLX5_FLEX_ITEM_MAPPING_NUM)
872b293e8e4SViacheslav Ovsiienko 				return rte_flow_error_set
873b293e8e4SViacheslav Ovsiienko 					(error,
874b293e8e4SViacheslav Ovsiienko 					 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
875b293e8e4SViacheslav Ovsiienko 					 "too many flex item pattern translations");
876b293e8e4SViacheslav Ovsiienko 			trans = &item->map[item->mapnum];
877b293e8e4SViacheslav Ovsiienko 			trans->reg_id = MLX5_INVALID_SAMPLE_REG_ID;
878b293e8e4SViacheslav Ovsiienko 			trans->shift = 0;
879b293e8e4SViacheslav Ovsiienko 			trans->width = part;
880b293e8e4SViacheslav Ovsiienko 			item->mapnum++;
881b293e8e4SViacheslav Ovsiienko 			done_bits -= part;
882b293e8e4SViacheslav Ovsiienko 		}
883b293e8e4SViacheslav Ovsiienko 		return 0;
884b293e8e4SViacheslav Ovsiienko 	}
885b293e8e4SViacheslav Ovsiienko 	mlx5_flex_config_sample(&node, field, item->tunnel_mode);
886b293e8e4SViacheslav Ovsiienko 	for (i = 0; i < parser->num_samples; i++) {
887b293e8e4SViacheslav Ovsiienko 		struct mlx5_devx_match_sample_attr *sample =
888b293e8e4SViacheslav Ovsiienko 			&parser->devx_conf.sample[i];
889b293e8e4SViacheslav Ovsiienko 		int32_t reg_start, reg_end;
890b293e8e4SViacheslav Ovsiienko 		int32_t cov_start, cov_end;
891b293e8e4SViacheslav Ovsiienko 
892b293e8e4SViacheslav Ovsiienko 		MLX5_ASSERT(sample->flow_match_sample_en);
893b293e8e4SViacheslav Ovsiienko 		if (!sample->flow_match_sample_en)
894b293e8e4SViacheslav Ovsiienko 			break;
895b293e8e4SViacheslav Ovsiienko 		node.flow_match_sample_field_base_offset =
896b293e8e4SViacheslav Ovsiienko 			sample->flow_match_sample_field_base_offset;
897b293e8e4SViacheslav Ovsiienko 		if (memcmp(&node, sample, sizeof(node)))
898b293e8e4SViacheslav Ovsiienko 			continue;
899b293e8e4SViacheslav Ovsiienko 		reg_start = (int8_t)sample->flow_match_sample_field_base_offset;
900b293e8e4SViacheslav Ovsiienko 		reg_start *= CHAR_BIT;
901b293e8e4SViacheslav Ovsiienko 		reg_end = reg_start + 32;
902b293e8e4SViacheslav Ovsiienko 		if (end <= reg_start || start >= reg_end)
903b293e8e4SViacheslav Ovsiienko 			continue;
904b293e8e4SViacheslav Ovsiienko 		cov_start = RTE_MAX(reg_start, start);
905b293e8e4SViacheslav Ovsiienko 		cov_end = RTE_MIN(reg_end, end);
906b293e8e4SViacheslav Ovsiienko 		MLX5_ASSERT(cov_end > cov_start);
907b293e8e4SViacheslav Ovsiienko 		done_bits += cov_end - cov_start;
908b293e8e4SViacheslav Ovsiienko 		if (item->mapnum >= MLX5_FLEX_ITEM_MAPPING_NUM)
909b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
910b293e8e4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
911b293e8e4SViacheslav Ovsiienko 				 "too many flex item pattern translations");
912b293e8e4SViacheslav Ovsiienko 		trans = &item->map[item->mapnum];
913b293e8e4SViacheslav Ovsiienko 		item->mapnum++;
914b293e8e4SViacheslav Ovsiienko 		trans->reg_id = i;
915b293e8e4SViacheslav Ovsiienko 		trans->shift = cov_start - reg_start;
916b293e8e4SViacheslav Ovsiienko 		trans->width = cov_end - cov_start;
917b293e8e4SViacheslav Ovsiienko 	}
918b293e8e4SViacheslav Ovsiienko 	if (done_bits != field->field_size) {
919b293e8e4SViacheslav Ovsiienko 		MLX5_ASSERT(false);
920b293e8e4SViacheslav Ovsiienko 		return rte_flow_error_set
921b293e8e4SViacheslav Ovsiienko 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
922b293e8e4SViacheslav Ovsiienko 			 "failed to map field to sample register");
923b293e8e4SViacheslav Ovsiienko 	}
924b293e8e4SViacheslav Ovsiienko 	return 0;
925b293e8e4SViacheslav Ovsiienko }
926b293e8e4SViacheslav Ovsiienko 
927b293e8e4SViacheslav Ovsiienko /* Allocate sample registers for the specified field type and interval array. */
928b293e8e4SViacheslav Ovsiienko static int
929b293e8e4SViacheslav Ovsiienko mlx5_flex_alloc_sample(struct mlx5_flex_field_cover *cover,
930b293e8e4SViacheslav Ovsiienko 		       struct mlx5_flex_parser_devx *parser,
931b293e8e4SViacheslav Ovsiienko 		       struct mlx5_flex_item *item,
932b293e8e4SViacheslav Ovsiienko 		       struct rte_flow_item_flex_field *field,
933b293e8e4SViacheslav Ovsiienko 		       struct mlx5_hca_flex_attr *attr,
934b293e8e4SViacheslav Ovsiienko 		       struct rte_flow_error *error)
935b293e8e4SViacheslav Ovsiienko {
936b293e8e4SViacheslav Ovsiienko 	struct mlx5_devx_match_sample_attr node;
937b293e8e4SViacheslav Ovsiienko 	uint32_t idx = 0;
938b293e8e4SViacheslav Ovsiienko 
939b293e8e4SViacheslav Ovsiienko 	mlx5_flex_config_sample(&node, field, item->tunnel_mode);
940b293e8e4SViacheslav Ovsiienko 	while (idx < cover->num) {
941b293e8e4SViacheslav Ovsiienko 		int32_t start, end;
942b293e8e4SViacheslav Ovsiienko 
943b293e8e4SViacheslav Ovsiienko 		/*
944b293e8e4SViacheslav Ovsiienko 		 * Sample base offsets are in bytes, should be aligned
945b293e8e4SViacheslav Ovsiienko 		 * to 32-bit as required by firmware for samples.
946b293e8e4SViacheslav Ovsiienko 		 */
947b293e8e4SViacheslav Ovsiienko 		start = RTE_ALIGN_FLOOR(cover->start[idx],
948b293e8e4SViacheslav Ovsiienko 					sizeof(uint32_t) * CHAR_BIT);
949b293e8e4SViacheslav Ovsiienko 		node.flow_match_sample_field_base_offset =
950b293e8e4SViacheslav Ovsiienko 						(start / CHAR_BIT) & 0xFF;
951b293e8e4SViacheslav Ovsiienko 		/* Allocate sample register. */
952b293e8e4SViacheslav Ovsiienko 		if (parser->num_samples >= MLX5_GRAPH_NODE_SAMPLE_NUM ||
953b293e8e4SViacheslav Ovsiienko 		    parser->num_samples >= attr->max_num_sample ||
954b293e8e4SViacheslav Ovsiienko 		    parser->num_samples >= attr->max_num_prog_sample)
955b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
956b293e8e4SViacheslav Ovsiienko 				(error, EINVAL,
957b293e8e4SViacheslav Ovsiienko 				 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
958b293e8e4SViacheslav Ovsiienko 				 "no sample registers to handle all flex item fields");
959b293e8e4SViacheslav Ovsiienko 		parser->devx_conf.sample[parser->num_samples] = node;
960b293e8e4SViacheslav Ovsiienko 		parser->num_samples++;
961b293e8e4SViacheslav Ovsiienko 		/* Remove or update covered intervals. */
962b293e8e4SViacheslav Ovsiienko 		end = start + 32;
963b293e8e4SViacheslav Ovsiienko 		while (idx < cover->num) {
964b293e8e4SViacheslav Ovsiienko 			if (end >= cover->end[idx]) {
965b293e8e4SViacheslav Ovsiienko 				idx++;
966b293e8e4SViacheslav Ovsiienko 				continue;
967b293e8e4SViacheslav Ovsiienko 			}
968b293e8e4SViacheslav Ovsiienko 			if (end > cover->start[idx])
969b293e8e4SViacheslav Ovsiienko 				cover->start[idx] = end;
970b293e8e4SViacheslav Ovsiienko 			break;
971b293e8e4SViacheslav Ovsiienko 		}
972b293e8e4SViacheslav Ovsiienko 	}
973b293e8e4SViacheslav Ovsiienko 	return 0;
974b293e8e4SViacheslav Ovsiienko }
975b293e8e4SViacheslav Ovsiienko 
976b293e8e4SViacheslav Ovsiienko static int
977b293e8e4SViacheslav Ovsiienko mlx5_flex_translate_sample(struct mlx5_hca_flex_attr *attr,
978b293e8e4SViacheslav Ovsiienko 			   const struct rte_flow_item_flex_conf *conf,
979b293e8e4SViacheslav Ovsiienko 			   struct mlx5_flex_parser_devx *parser,
980b293e8e4SViacheslav Ovsiienko 			   struct mlx5_flex_item *item,
981b293e8e4SViacheslav Ovsiienko 			   struct rte_flow_error *error)
982b293e8e4SViacheslav Ovsiienko {
983b293e8e4SViacheslav Ovsiienko 	struct mlx5_flex_field_cover cover;
984b293e8e4SViacheslav Ovsiienko 	uint32_t i, j;
985b293e8e4SViacheslav Ovsiienko 	int ret;
986b293e8e4SViacheslav Ovsiienko 
987b293e8e4SViacheslav Ovsiienko 	switch (conf->tunnel) {
988b293e8e4SViacheslav Ovsiienko 	case FLEX_TUNNEL_MODE_SINGLE:
989b293e8e4SViacheslav Ovsiienko 		/* Fallthrough */
990b293e8e4SViacheslav Ovsiienko 	case FLEX_TUNNEL_MODE_OUTER:
991b293e8e4SViacheslav Ovsiienko 		/* Fallthrough */
992b293e8e4SViacheslav Ovsiienko 	case FLEX_TUNNEL_MODE_INNER:
993b293e8e4SViacheslav Ovsiienko 		/* Fallthrough */
994b293e8e4SViacheslav Ovsiienko 	case FLEX_TUNNEL_MODE_MULTI:
995b293e8e4SViacheslav Ovsiienko 		/* Fallthrough */
996b293e8e4SViacheslav Ovsiienko 	case FLEX_TUNNEL_MODE_TUNNEL:
997b293e8e4SViacheslav Ovsiienko 		break;
998b293e8e4SViacheslav Ovsiienko 	default:
999b293e8e4SViacheslav Ovsiienko 		return rte_flow_error_set
1000b293e8e4SViacheslav Ovsiienko 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1001b293e8e4SViacheslav Ovsiienko 			 "unrecognized tunnel mode");
1002b293e8e4SViacheslav Ovsiienko 	}
1003b293e8e4SViacheslav Ovsiienko 	item->tunnel_mode = conf->tunnel;
1004b293e8e4SViacheslav Ovsiienko 	if (conf->nb_samples > MLX5_FLEX_ITEM_MAPPING_NUM)
1005b293e8e4SViacheslav Ovsiienko 		return rte_flow_error_set
1006b293e8e4SViacheslav Ovsiienko 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1007b293e8e4SViacheslav Ovsiienko 			 "sample field number exceeds limit");
1008b293e8e4SViacheslav Ovsiienko 	/*
1009b293e8e4SViacheslav Ovsiienko 	 * The application can specify fields smaller or bigger than 32 bits
1010b293e8e4SViacheslav Ovsiienko 	 * covered with single sample register and it can specify field
1011b293e8e4SViacheslav Ovsiienko 	 * offsets in any order.
1012b293e8e4SViacheslav Ovsiienko 	 *
1013b293e8e4SViacheslav Ovsiienko 	 * Gather all similar fields together, build array of bit intervals
101453820561SMichael Baum 	 * in ascending order and try to cover with the smallest set of sample
1015b293e8e4SViacheslav Ovsiienko 	 * registers.
1016b293e8e4SViacheslav Ovsiienko 	 */
1017b293e8e4SViacheslav Ovsiienko 	memset(&cover, 0, sizeof(cover));
1018b293e8e4SViacheslav Ovsiienko 	for (i = 0; i < conf->nb_samples; i++) {
1019b293e8e4SViacheslav Ovsiienko 		struct rte_flow_item_flex_field *fl = conf->sample_data + i;
1020b293e8e4SViacheslav Ovsiienko 
1021b293e8e4SViacheslav Ovsiienko 		/* Check whether field was covered in the previous iteration. */
1022b293e8e4SViacheslav Ovsiienko 		if (cover.mapped[i / CHAR_BIT] & (1u << (i % CHAR_BIT)))
1023b293e8e4SViacheslav Ovsiienko 			continue;
1024b293e8e4SViacheslav Ovsiienko 		if (fl->field_mode == FIELD_MODE_DUMMY)
1025b293e8e4SViacheslav Ovsiienko 			continue;
1026b293e8e4SViacheslav Ovsiienko 		/* Build an interval array for the field and similar ones */
1027b293e8e4SViacheslav Ovsiienko 		cover.num = 0;
1028b293e8e4SViacheslav Ovsiienko 		/* Add the first field to array unconditionally. */
1029b293e8e4SViacheslav Ovsiienko 		ret = mlx5_flex_cover_sample(&cover, fl, NULL, attr, error);
1030b293e8e4SViacheslav Ovsiienko 		if (ret < 0)
1031b293e8e4SViacheslav Ovsiienko 			return ret;
1032b293e8e4SViacheslav Ovsiienko 		MLX5_ASSERT(ret > 0);
1033b293e8e4SViacheslav Ovsiienko 		cover.mapped[i / CHAR_BIT] |= 1u << (i % CHAR_BIT);
1034b293e8e4SViacheslav Ovsiienko 		for (j = i + 1; j < conf->nb_samples; j++) {
1035b293e8e4SViacheslav Ovsiienko 			struct rte_flow_item_flex_field *ft;
1036b293e8e4SViacheslav Ovsiienko 
1037b293e8e4SViacheslav Ovsiienko 			/* Add field to array if its type matches. */
1038b293e8e4SViacheslav Ovsiienko 			ft = conf->sample_data + j;
1039b293e8e4SViacheslav Ovsiienko 			ret = mlx5_flex_cover_sample(&cover, ft, fl,
1040b293e8e4SViacheslav Ovsiienko 						     attr, error);
1041b293e8e4SViacheslav Ovsiienko 			if (ret < 0)
1042b293e8e4SViacheslav Ovsiienko 				return ret;
1043b293e8e4SViacheslav Ovsiienko 			if (!ret)
1044b293e8e4SViacheslav Ovsiienko 				continue;
1045b293e8e4SViacheslav Ovsiienko 			cover.mapped[j / CHAR_BIT] |= 1u << (j % CHAR_BIT);
1046b293e8e4SViacheslav Ovsiienko 		}
1047b293e8e4SViacheslav Ovsiienko 		/* Allocate sample registers to cover array of intervals. */
1048b293e8e4SViacheslav Ovsiienko 		ret = mlx5_flex_alloc_sample(&cover, parser, item,
1049b293e8e4SViacheslav Ovsiienko 					     fl, attr, error);
1050b293e8e4SViacheslav Ovsiienko 		if (ret)
1051b293e8e4SViacheslav Ovsiienko 			return ret;
1052b293e8e4SViacheslav Ovsiienko 	}
1053b293e8e4SViacheslav Ovsiienko 	/* Build the item pattern translating data on flow creation. */
1054b293e8e4SViacheslav Ovsiienko 	item->mapnum = 0;
1055b293e8e4SViacheslav Ovsiienko 	memset(&item->map, 0, sizeof(item->map));
1056b293e8e4SViacheslav Ovsiienko 	for (i = 0; i < conf->nb_samples; i++) {
1057b293e8e4SViacheslav Ovsiienko 		struct rte_flow_item_flex_field *fl = conf->sample_data + i;
1058b293e8e4SViacheslav Ovsiienko 
1059b293e8e4SViacheslav Ovsiienko 		ret = mlx5_flex_map_sample(fl, parser, item, error);
1060b293e8e4SViacheslav Ovsiienko 		if (ret) {
1061b293e8e4SViacheslav Ovsiienko 			MLX5_ASSERT(false);
1062b293e8e4SViacheslav Ovsiienko 			return ret;
1063b293e8e4SViacheslav Ovsiienko 		}
1064b293e8e4SViacheslav Ovsiienko 	}
1065b293e8e4SViacheslav Ovsiienko 	if (conf->tunnel == FLEX_TUNNEL_MODE_MULTI) {
1066b293e8e4SViacheslav Ovsiienko 		/*
1067b293e8e4SViacheslav Ovsiienko 		 * In FLEX_TUNNEL_MODE_MULTI tunnel mode PMD creates 2 sets
1068b293e8e4SViacheslav Ovsiienko 		 * of samples. The first set is for outer and the second set
1069b293e8e4SViacheslav Ovsiienko 		 * for inner flex flow item. Outer and inner samples differ
1070b293e8e4SViacheslav Ovsiienko 		 * only in tunnel_mode.
1071b293e8e4SViacheslav Ovsiienko 		 */
1072b293e8e4SViacheslav Ovsiienko 		if (parser->num_samples > MLX5_GRAPH_NODE_SAMPLE_NUM / 2)
1073b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
1074b293e8e4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1075b293e8e4SViacheslav Ovsiienko 				 "no sample registers for inner");
1076b293e8e4SViacheslav Ovsiienko 		rte_memcpy(parser->devx_conf.sample + parser->num_samples,
1077b293e8e4SViacheslav Ovsiienko 			   parser->devx_conf.sample,
1078b293e8e4SViacheslav Ovsiienko 			   parser->num_samples *
1079b293e8e4SViacheslav Ovsiienko 					sizeof(parser->devx_conf.sample[0]));
1080b293e8e4SViacheslav Ovsiienko 		for (i = 0; i < parser->num_samples; i++) {
1081b293e8e4SViacheslav Ovsiienko 			struct mlx5_devx_match_sample_attr *sm = i +
1082b293e8e4SViacheslav Ovsiienko 				parser->devx_conf.sample + parser->num_samples;
1083b293e8e4SViacheslav Ovsiienko 
1084b293e8e4SViacheslav Ovsiienko 			sm->flow_match_sample_tunnel_mode =
1085b293e8e4SViacheslav Ovsiienko 						MLX5_GRAPH_SAMPLE_TUNNEL_INNER;
1086b293e8e4SViacheslav Ovsiienko 		}
1087b293e8e4SViacheslav Ovsiienko 		parser->num_samples *= 2;
1088b293e8e4SViacheslav Ovsiienko 	}
1089b293e8e4SViacheslav Ovsiienko 	return 0;
1090b293e8e4SViacheslav Ovsiienko }
1091b293e8e4SViacheslav Ovsiienko 
1092b293e8e4SViacheslav Ovsiienko static int
1093b293e8e4SViacheslav Ovsiienko mlx5_flex_arc_type(enum rte_flow_item_type type, int in)
1094b293e8e4SViacheslav Ovsiienko {
1095b293e8e4SViacheslav Ovsiienko 	switch (type) {
1096b293e8e4SViacheslav Ovsiienko 	case RTE_FLOW_ITEM_TYPE_ETH:
1097b293e8e4SViacheslav Ovsiienko 		return  MLX5_GRAPH_ARC_NODE_MAC;
1098b293e8e4SViacheslav Ovsiienko 	case RTE_FLOW_ITEM_TYPE_IPV4:
1099b293e8e4SViacheslav Ovsiienko 		return in ? MLX5_GRAPH_ARC_NODE_IP : MLX5_GRAPH_ARC_NODE_IPV4;
1100b293e8e4SViacheslav Ovsiienko 	case RTE_FLOW_ITEM_TYPE_IPV6:
1101b293e8e4SViacheslav Ovsiienko 		return in ? MLX5_GRAPH_ARC_NODE_IP : MLX5_GRAPH_ARC_NODE_IPV6;
1102b293e8e4SViacheslav Ovsiienko 	case RTE_FLOW_ITEM_TYPE_UDP:
1103b293e8e4SViacheslav Ovsiienko 		return MLX5_GRAPH_ARC_NODE_UDP;
1104b293e8e4SViacheslav Ovsiienko 	case RTE_FLOW_ITEM_TYPE_TCP:
1105b293e8e4SViacheslav Ovsiienko 		return MLX5_GRAPH_ARC_NODE_TCP;
1106b293e8e4SViacheslav Ovsiienko 	case RTE_FLOW_ITEM_TYPE_MPLS:
1107b293e8e4SViacheslav Ovsiienko 		return MLX5_GRAPH_ARC_NODE_MPLS;
1108b293e8e4SViacheslav Ovsiienko 	case RTE_FLOW_ITEM_TYPE_GRE:
1109b293e8e4SViacheslav Ovsiienko 		return MLX5_GRAPH_ARC_NODE_GRE;
1110b293e8e4SViacheslav Ovsiienko 	case RTE_FLOW_ITEM_TYPE_GENEVE:
1111b293e8e4SViacheslav Ovsiienko 		return MLX5_GRAPH_ARC_NODE_GENEVE;
1112b293e8e4SViacheslav Ovsiienko 	case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1113b293e8e4SViacheslav Ovsiienko 		return MLX5_GRAPH_ARC_NODE_VXLAN_GPE;
1114*6dfb83f1SViacheslav Ovsiienko 	case RTE_FLOW_ITEM_TYPE_ESP:
1115*6dfb83f1SViacheslav Ovsiienko 		return MLX5_GRAPH_ARC_NODE_IPSEC_ESP;
1116b293e8e4SViacheslav Ovsiienko 	default:
1117b293e8e4SViacheslav Ovsiienko 		return -EINVAL;
1118b293e8e4SViacheslav Ovsiienko 	}
1119b293e8e4SViacheslav Ovsiienko }
1120b293e8e4SViacheslav Ovsiienko 
1121b293e8e4SViacheslav Ovsiienko static int
1122b293e8e4SViacheslav Ovsiienko mlx5_flex_arc_in_eth(const struct rte_flow_item *item,
1123b293e8e4SViacheslav Ovsiienko 		     struct rte_flow_error *error)
1124b293e8e4SViacheslav Ovsiienko {
1125b293e8e4SViacheslav Ovsiienko 	const struct rte_flow_item_eth *spec = item->spec;
1126b293e8e4SViacheslav Ovsiienko 	const struct rte_flow_item_eth *mask = item->mask;
1127b293e8e4SViacheslav Ovsiienko 	struct rte_flow_item_eth eth = { .hdr.ether_type = RTE_BE16(0xFFFF) };
1128b293e8e4SViacheslav Ovsiienko 
1129b293e8e4SViacheslav Ovsiienko 	if (memcmp(mask, &eth, sizeof(struct rte_flow_item_eth))) {
1130b293e8e4SViacheslav Ovsiienko 		return rte_flow_error_set
1131b293e8e4SViacheslav Ovsiienko 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,
1132b293e8e4SViacheslav Ovsiienko 			 "invalid eth item mask");
1133b293e8e4SViacheslav Ovsiienko 	}
1134b293e8e4SViacheslav Ovsiienko 	return rte_be_to_cpu_16(spec->hdr.ether_type);
1135b293e8e4SViacheslav Ovsiienko }
1136b293e8e4SViacheslav Ovsiienko 
1137b293e8e4SViacheslav Ovsiienko static int
1138b293e8e4SViacheslav Ovsiienko mlx5_flex_arc_in_udp(const struct rte_flow_item *item,
1139b293e8e4SViacheslav Ovsiienko 		     struct rte_flow_error *error)
1140b293e8e4SViacheslav Ovsiienko {
1141b293e8e4SViacheslav Ovsiienko 	const struct rte_flow_item_udp *spec = item->spec;
1142b293e8e4SViacheslav Ovsiienko 	const struct rte_flow_item_udp *mask = item->mask;
1143b293e8e4SViacheslav Ovsiienko 	struct rte_flow_item_udp udp = { .hdr.dst_port = RTE_BE16(0xFFFF) };
1144b293e8e4SViacheslav Ovsiienko 
1145b293e8e4SViacheslav Ovsiienko 	if (memcmp(mask, &udp, sizeof(struct rte_flow_item_udp))) {
1146b293e8e4SViacheslav Ovsiienko 		return rte_flow_error_set
1147b293e8e4SViacheslav Ovsiienko 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,
1148b293e8e4SViacheslav Ovsiienko 			 "invalid eth item mask");
1149b293e8e4SViacheslav Ovsiienko 	}
1150b293e8e4SViacheslav Ovsiienko 	return rte_be_to_cpu_16(spec->hdr.dst_port);
1151b293e8e4SViacheslav Ovsiienko }
1152b293e8e4SViacheslav Ovsiienko 
1153b293e8e4SViacheslav Ovsiienko static int
1154*6dfb83f1SViacheslav Ovsiienko mlx5_flex_arc_in_ipv4(const struct rte_flow_item *item,
1155*6dfb83f1SViacheslav Ovsiienko 		      struct rte_flow_error *error)
1156*6dfb83f1SViacheslav Ovsiienko {
1157*6dfb83f1SViacheslav Ovsiienko 	const struct rte_flow_item_ipv4 *spec = item->spec;
1158*6dfb83f1SViacheslav Ovsiienko 	const struct rte_flow_item_ipv4 *mask = item->mask;
1159*6dfb83f1SViacheslav Ovsiienko 	struct rte_flow_item_ipv4 ip = { .hdr.next_proto_id = 0xff };
1160*6dfb83f1SViacheslav Ovsiienko 
1161*6dfb83f1SViacheslav Ovsiienko 	if (memcmp(mask, &ip, sizeof(struct rte_flow_item_ipv4))) {
1162*6dfb83f1SViacheslav Ovsiienko 		return rte_flow_error_set
1163*6dfb83f1SViacheslav Ovsiienko 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,
1164*6dfb83f1SViacheslav Ovsiienko 			 "invalid ipv4 item mask, full mask is desired");
1165*6dfb83f1SViacheslav Ovsiienko 	}
1166*6dfb83f1SViacheslav Ovsiienko 	return spec->hdr.next_proto_id;
1167*6dfb83f1SViacheslav Ovsiienko }
1168*6dfb83f1SViacheslav Ovsiienko 
1169*6dfb83f1SViacheslav Ovsiienko static int
1170d451d1a1SRongwei Liu mlx5_flex_arc_in_ipv6(const struct rte_flow_item *item,
1171d451d1a1SRongwei Liu 		      struct rte_flow_error *error)
1172d451d1a1SRongwei Liu {
1173d451d1a1SRongwei Liu 	const struct rte_flow_item_ipv6 *spec = item->spec;
1174d451d1a1SRongwei Liu 	const struct rte_flow_item_ipv6 *mask = item->mask;
1175d451d1a1SRongwei Liu 	struct rte_flow_item_ipv6 ip = { .hdr.proto = 0xff };
1176d451d1a1SRongwei Liu 
1177d451d1a1SRongwei Liu 	if (memcmp(mask, &ip, sizeof(struct rte_flow_item_ipv6))) {
1178d451d1a1SRongwei Liu 		return rte_flow_error_set
1179d451d1a1SRongwei Liu 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,
1180d451d1a1SRongwei Liu 			 "invalid ipv6 item mask, full mask is desired");
1181d451d1a1SRongwei Liu 	}
1182d451d1a1SRongwei Liu 	return spec->hdr.proto;
1183d451d1a1SRongwei Liu }
1184d451d1a1SRongwei Liu 
1185d451d1a1SRongwei Liu static int
1186b293e8e4SViacheslav Ovsiienko mlx5_flex_translate_arc_in(struct mlx5_hca_flex_attr *attr,
1187b293e8e4SViacheslav Ovsiienko 			   const struct rte_flow_item_flex_conf *conf,
1188b293e8e4SViacheslav Ovsiienko 			   struct mlx5_flex_parser_devx *devx,
1189b293e8e4SViacheslav Ovsiienko 			   struct mlx5_flex_item *item,
1190b293e8e4SViacheslav Ovsiienko 			   struct rte_flow_error *error)
1191b293e8e4SViacheslav Ovsiienko {
1192b293e8e4SViacheslav Ovsiienko 	struct mlx5_devx_graph_node_attr *node = &devx->devx_conf;
1193b293e8e4SViacheslav Ovsiienko 	uint32_t i;
1194b293e8e4SViacheslav Ovsiienko 
1195b293e8e4SViacheslav Ovsiienko 	RTE_SET_USED(item);
1196b293e8e4SViacheslav Ovsiienko 	if (conf->nb_inputs > attr->max_num_arc_in)
1197b293e8e4SViacheslav Ovsiienko 		return rte_flow_error_set
1198b293e8e4SViacheslav Ovsiienko 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1199b293e8e4SViacheslav Ovsiienko 			 "too many input links");
1200b293e8e4SViacheslav Ovsiienko 	for (i = 0; i < conf->nb_inputs; i++) {
1201b293e8e4SViacheslav Ovsiienko 		struct mlx5_devx_graph_arc_attr *arc = node->in + i;
1202b293e8e4SViacheslav Ovsiienko 		struct rte_flow_item_flex_link *link = conf->input_link + i;
1203b293e8e4SViacheslav Ovsiienko 		const struct rte_flow_item *rte_item = &link->item;
1204b293e8e4SViacheslav Ovsiienko 		int arc_type;
1205b293e8e4SViacheslav Ovsiienko 		int ret;
1206b293e8e4SViacheslav Ovsiienko 
1207b293e8e4SViacheslav Ovsiienko 		if (!rte_item->spec || !rte_item->mask || rte_item->last)
1208b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
1209b293e8e4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1210b293e8e4SViacheslav Ovsiienko 				 "invalid flex item IN arc format");
1211b293e8e4SViacheslav Ovsiienko 		arc_type = mlx5_flex_arc_type(rte_item->type, true);
1212b293e8e4SViacheslav Ovsiienko 		if (arc_type < 0 || !(attr->node_in & RTE_BIT32(arc_type)))
1213b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
1214b293e8e4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1215b293e8e4SViacheslav Ovsiienko 				 "unsupported flex item IN arc type");
1216b293e8e4SViacheslav Ovsiienko 		arc->arc_parse_graph_node = arc_type;
1217b293e8e4SViacheslav Ovsiienko 		arc->start_inner_tunnel = 0;
1218b293e8e4SViacheslav Ovsiienko 		/*
1219b293e8e4SViacheslav Ovsiienko 		 * Configure arc IN condition value. The value location depends
1220b293e8e4SViacheslav Ovsiienko 		 * on protocol. Current FW version supports IP & UDP for IN
1221b293e8e4SViacheslav Ovsiienko 		 * arcs only, and locations for these protocols are defined.
1222b293e8e4SViacheslav Ovsiienko 		 * Add more protocols when available.
1223b293e8e4SViacheslav Ovsiienko 		 */
1224b293e8e4SViacheslav Ovsiienko 		switch (rte_item->type) {
1225b293e8e4SViacheslav Ovsiienko 		case RTE_FLOW_ITEM_TYPE_ETH:
1226b293e8e4SViacheslav Ovsiienko 			ret = mlx5_flex_arc_in_eth(rte_item, error);
1227b293e8e4SViacheslav Ovsiienko 			break;
1228b293e8e4SViacheslav Ovsiienko 		case RTE_FLOW_ITEM_TYPE_UDP:
1229b293e8e4SViacheslav Ovsiienko 			ret = mlx5_flex_arc_in_udp(rte_item, error);
1230b293e8e4SViacheslav Ovsiienko 			break;
1231*6dfb83f1SViacheslav Ovsiienko 		case RTE_FLOW_ITEM_TYPE_IPV4:
1232*6dfb83f1SViacheslav Ovsiienko 			ret = mlx5_flex_arc_in_ipv4(rte_item, error);
1233*6dfb83f1SViacheslav Ovsiienko 			break;
1234d451d1a1SRongwei Liu 		case RTE_FLOW_ITEM_TYPE_IPV6:
1235d451d1a1SRongwei Liu 			ret = mlx5_flex_arc_in_ipv6(rte_item, error);
1236d451d1a1SRongwei Liu 			break;
1237b293e8e4SViacheslav Ovsiienko 		default:
1238b293e8e4SViacheslav Ovsiienko 			MLX5_ASSERT(false);
1239b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
1240b293e8e4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1241b293e8e4SViacheslav Ovsiienko 				 "unsupported flex item IN arc type");
1242b293e8e4SViacheslav Ovsiienko 		}
1243b293e8e4SViacheslav Ovsiienko 		if (ret < 0)
1244b293e8e4SViacheslav Ovsiienko 			return ret;
1245b293e8e4SViacheslav Ovsiienko 		arc->compare_condition_value = (uint16_t)ret;
1246b293e8e4SViacheslav Ovsiienko 	}
1247b293e8e4SViacheslav Ovsiienko 	return 0;
1248b293e8e4SViacheslav Ovsiienko }
1249b293e8e4SViacheslav Ovsiienko 
1250b293e8e4SViacheslav Ovsiienko static int
1251b293e8e4SViacheslav Ovsiienko mlx5_flex_translate_arc_out(struct mlx5_hca_flex_attr *attr,
1252b293e8e4SViacheslav Ovsiienko 			    const struct rte_flow_item_flex_conf *conf,
1253b293e8e4SViacheslav Ovsiienko 			    struct mlx5_flex_parser_devx *devx,
1254b293e8e4SViacheslav Ovsiienko 			    struct mlx5_flex_item *item,
1255b293e8e4SViacheslav Ovsiienko 			    struct rte_flow_error *error)
1256b293e8e4SViacheslav Ovsiienko {
1257b293e8e4SViacheslav Ovsiienko 	struct mlx5_devx_graph_node_attr *node = &devx->devx_conf;
1258b293e8e4SViacheslav Ovsiienko 	bool is_tunnel = conf->tunnel == FLEX_TUNNEL_MODE_TUNNEL;
1259b293e8e4SViacheslav Ovsiienko 	uint32_t i;
1260b293e8e4SViacheslav Ovsiienko 
1261b293e8e4SViacheslav Ovsiienko 	RTE_SET_USED(item);
1262b293e8e4SViacheslav Ovsiienko 	if (conf->nb_outputs > attr->max_num_arc_out)
1263b293e8e4SViacheslav Ovsiienko 		return rte_flow_error_set
1264b293e8e4SViacheslav Ovsiienko 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1265b293e8e4SViacheslav Ovsiienko 			 "too many output links");
1266b293e8e4SViacheslav Ovsiienko 	for (i = 0; i < conf->nb_outputs; i++) {
1267b293e8e4SViacheslav Ovsiienko 		struct mlx5_devx_graph_arc_attr *arc = node->out + i;
1268b293e8e4SViacheslav Ovsiienko 		struct rte_flow_item_flex_link *link = conf->output_link + i;
1269b293e8e4SViacheslav Ovsiienko 		const struct rte_flow_item *rte_item = &link->item;
1270b293e8e4SViacheslav Ovsiienko 		int arc_type;
1271b293e8e4SViacheslav Ovsiienko 
1272b293e8e4SViacheslav Ovsiienko 		if (rte_item->spec || rte_item->mask || rte_item->last)
1273b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
1274b293e8e4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1275b293e8e4SViacheslav Ovsiienko 				 "flex node: invalid OUT arc format");
1276b293e8e4SViacheslav Ovsiienko 		arc_type = mlx5_flex_arc_type(rte_item->type, false);
1277b293e8e4SViacheslav Ovsiienko 		if (arc_type < 0 || !(attr->node_out & RTE_BIT32(arc_type)))
1278b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
1279b293e8e4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1280b293e8e4SViacheslav Ovsiienko 				 "unsupported flex item OUT arc type");
1281b293e8e4SViacheslav Ovsiienko 		arc->arc_parse_graph_node = arc_type;
1282b293e8e4SViacheslav Ovsiienko 		arc->start_inner_tunnel = !!is_tunnel;
1283b293e8e4SViacheslav Ovsiienko 		arc->compare_condition_value = link->next;
1284b293e8e4SViacheslav Ovsiienko 	}
1285b293e8e4SViacheslav Ovsiienko 	return 0;
1286b293e8e4SViacheslav Ovsiienko }
1287b293e8e4SViacheslav Ovsiienko 
1288b293e8e4SViacheslav Ovsiienko /* Translate RTE flex item API configuration into flaex parser settings. */
1289b293e8e4SViacheslav Ovsiienko static int
1290b293e8e4SViacheslav Ovsiienko mlx5_flex_translate_conf(struct rte_eth_dev *dev,
1291b293e8e4SViacheslav Ovsiienko 			 const struct rte_flow_item_flex_conf *conf,
1292b293e8e4SViacheslav Ovsiienko 			 struct mlx5_flex_parser_devx *devx,
1293b293e8e4SViacheslav Ovsiienko 			 struct mlx5_flex_item *item,
1294b293e8e4SViacheslav Ovsiienko 			 struct rte_flow_error *error)
1295b293e8e4SViacheslav Ovsiienko {
1296b293e8e4SViacheslav Ovsiienko 	struct mlx5_priv *priv = dev->data->dev_private;
129753820561SMichael Baum 	struct mlx5_hca_flex_attr *attr = &priv->sh->cdev->config.hca_attr.flex;
1298b293e8e4SViacheslav Ovsiienko 	int ret;
1299b293e8e4SViacheslav Ovsiienko 
1300b293e8e4SViacheslav Ovsiienko 	ret = mlx5_flex_translate_length(attr, conf, devx, error);
1301b293e8e4SViacheslav Ovsiienko 	if (ret)
1302b293e8e4SViacheslav Ovsiienko 		return ret;
1303b293e8e4SViacheslav Ovsiienko 	ret = mlx5_flex_translate_next(attr, conf, devx, error);
1304b293e8e4SViacheslav Ovsiienko 	if (ret)
1305b293e8e4SViacheslav Ovsiienko 		return ret;
1306b293e8e4SViacheslav Ovsiienko 	ret = mlx5_flex_translate_sample(attr, conf, devx, item, error);
1307b293e8e4SViacheslav Ovsiienko 	if (ret)
1308b293e8e4SViacheslav Ovsiienko 		return ret;
1309b293e8e4SViacheslav Ovsiienko 	ret = mlx5_flex_translate_arc_in(attr, conf, devx, item, error);
1310b293e8e4SViacheslav Ovsiienko 	if (ret)
1311b293e8e4SViacheslav Ovsiienko 		return ret;
1312b293e8e4SViacheslav Ovsiienko 	ret = mlx5_flex_translate_arc_out(attr, conf, devx, item, error);
1313b293e8e4SViacheslav Ovsiienko 	if (ret)
1314b293e8e4SViacheslav Ovsiienko 		return ret;
1315b293e8e4SViacheslav Ovsiienko 	return 0;
1316b293e8e4SViacheslav Ovsiienko }
1317b293e8e4SViacheslav Ovsiienko 
1318db25cadcSViacheslav Ovsiienko /**
1319db25cadcSViacheslav Ovsiienko  * Create the flex item with specified configuration over the Ethernet device.
1320db25cadcSViacheslav Ovsiienko  *
1321db25cadcSViacheslav Ovsiienko  * @param dev
1322db25cadcSViacheslav Ovsiienko  *   Ethernet device to create flex item on.
1323db25cadcSViacheslav Ovsiienko  * @param[in] conf
1324db25cadcSViacheslav Ovsiienko  *   Flex item configuration.
1325db25cadcSViacheslav Ovsiienko  * @param[out] error
1326db25cadcSViacheslav Ovsiienko  *   Perform verbose error reporting if not NULL. PMDs initialize this
1327db25cadcSViacheslav Ovsiienko  *   structure in case of error only.
1328db25cadcSViacheslav Ovsiienko  *
1329db25cadcSViacheslav Ovsiienko  * @return
1330db25cadcSViacheslav Ovsiienko  *   Non-NULL opaque pointer on success, NULL otherwise and rte_errno is set.
1331db25cadcSViacheslav Ovsiienko  */
1332db25cadcSViacheslav Ovsiienko struct rte_flow_item_flex_handle *
1333db25cadcSViacheslav Ovsiienko flow_dv_item_create(struct rte_eth_dev *dev,
1334db25cadcSViacheslav Ovsiienko 		    const struct rte_flow_item_flex_conf *conf,
1335db25cadcSViacheslav Ovsiienko 		    struct rte_flow_error *error)
1336db25cadcSViacheslav Ovsiienko {
1337db25cadcSViacheslav Ovsiienko 	struct mlx5_priv *priv = dev->data->dev_private;
13389086ac09SGregory Etelson 	struct mlx5_flex_parser_devx devx_config = { .devx_obj = NULL };
1339db25cadcSViacheslav Ovsiienko 	struct mlx5_flex_item *flex;
13409086ac09SGregory Etelson 	struct mlx5_list_entry *ent;
1341db25cadcSViacheslav Ovsiienko 
1342db25cadcSViacheslav Ovsiienko 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
1343db25cadcSViacheslav Ovsiienko 	flex = mlx5_flex_alloc(priv);
1344db25cadcSViacheslav Ovsiienko 	if (!flex) {
1345db25cadcSViacheslav Ovsiienko 		rte_flow_error_set(error, ENOMEM,
1346db25cadcSViacheslav Ovsiienko 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1347db25cadcSViacheslav Ovsiienko 				   "too many flex items created on the port");
1348db25cadcSViacheslav Ovsiienko 		return NULL;
1349db25cadcSViacheslav Ovsiienko 	}
1350b293e8e4SViacheslav Ovsiienko 	if (mlx5_flex_translate_conf(dev, conf, &devx_config, flex, error))
1351b293e8e4SViacheslav Ovsiienko 		goto error;
13529086ac09SGregory Etelson 	ent = mlx5_list_register(priv->sh->flex_parsers_dv, &devx_config);
13539086ac09SGregory Etelson 	if (!ent) {
13549086ac09SGregory Etelson 		rte_flow_error_set(error, ENOMEM,
13559086ac09SGregory Etelson 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13569086ac09SGregory Etelson 				   "flex item creation failure");
13579086ac09SGregory Etelson 		goto error;
13589086ac09SGregory Etelson 	}
13599086ac09SGregory Etelson 	flex->devx_fp = container_of(ent, struct mlx5_flex_parser_devx, entry);
1360db25cadcSViacheslav Ovsiienko 	/* Mark initialized flex item valid. */
1361e12a0166STyler Retzlaff 	rte_atomic_fetch_add_explicit(&flex->refcnt, 1, rte_memory_order_release);
1362db25cadcSViacheslav Ovsiienko 	return (struct rte_flow_item_flex_handle *)flex;
13639086ac09SGregory Etelson 
13649086ac09SGregory Etelson error:
13659086ac09SGregory Etelson 	mlx5_flex_free(priv, flex);
13669086ac09SGregory Etelson 	return NULL;
1367db25cadcSViacheslav Ovsiienko }
1368db25cadcSViacheslav Ovsiienko 
1369db25cadcSViacheslav Ovsiienko /**
1370db25cadcSViacheslav Ovsiienko  * Release the flex item on the specified Ethernet device.
1371db25cadcSViacheslav Ovsiienko  *
1372db25cadcSViacheslav Ovsiienko  * @param dev
1373db25cadcSViacheslav Ovsiienko  *   Ethernet device to destroy flex item on.
1374db25cadcSViacheslav Ovsiienko  * @param[in] handle
1375db25cadcSViacheslav Ovsiienko  *   Handle of the item existing on the specified device.
1376db25cadcSViacheslav Ovsiienko  * @param[out] error
1377db25cadcSViacheslav Ovsiienko  *   Perform verbose error reporting if not NULL. PMDs initialize this
1378db25cadcSViacheslav Ovsiienko  *   structure in case of error only.
1379db25cadcSViacheslav Ovsiienko  *
1380db25cadcSViacheslav Ovsiienko  * @return
1381db25cadcSViacheslav Ovsiienko  *   0 on success, a negative errno value otherwise and rte_errno is set.
1382db25cadcSViacheslav Ovsiienko  */
1383db25cadcSViacheslav Ovsiienko int
1384db25cadcSViacheslav Ovsiienko flow_dv_item_release(struct rte_eth_dev *dev,
1385db25cadcSViacheslav Ovsiienko 		     const struct rte_flow_item_flex_handle *handle,
1386db25cadcSViacheslav Ovsiienko 		     struct rte_flow_error *error)
1387db25cadcSViacheslav Ovsiienko {
1388db25cadcSViacheslav Ovsiienko 	struct mlx5_priv *priv = dev->data->dev_private;
1389db25cadcSViacheslav Ovsiienko 	struct mlx5_flex_item *flex =
1390db25cadcSViacheslav Ovsiienko 		(struct mlx5_flex_item *)(uintptr_t)handle;
1391db25cadcSViacheslav Ovsiienko 	uint32_t old_refcnt = 1;
13929086ac09SGregory Etelson 	int rc;
1393db25cadcSViacheslav Ovsiienko 
1394db25cadcSViacheslav Ovsiienko 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
1395db25cadcSViacheslav Ovsiienko 	rte_spinlock_lock(&priv->flex_item_sl);
1396db25cadcSViacheslav Ovsiienko 	if (mlx5_flex_index(priv, flex) < 0) {
1397db25cadcSViacheslav Ovsiienko 		rte_spinlock_unlock(&priv->flex_item_sl);
1398db25cadcSViacheslav Ovsiienko 		return rte_flow_error_set(error, EINVAL,
1399db25cadcSViacheslav Ovsiienko 					  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1400db25cadcSViacheslav Ovsiienko 					  "invalid flex item handle value");
1401db25cadcSViacheslav Ovsiienko 	}
1402e12a0166STyler Retzlaff 	if (!rte_atomic_compare_exchange_strong_explicit(&flex->refcnt, &old_refcnt, 0,
1403e12a0166STyler Retzlaff 					 rte_memory_order_acquire, rte_memory_order_relaxed)) {
1404db25cadcSViacheslav Ovsiienko 		rte_spinlock_unlock(&priv->flex_item_sl);
1405db25cadcSViacheslav Ovsiienko 		return rte_flow_error_set(error, EBUSY,
1406db25cadcSViacheslav Ovsiienko 					  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1407db25cadcSViacheslav Ovsiienko 					  "flex item has flow references");
1408db25cadcSViacheslav Ovsiienko 	}
1409db25cadcSViacheslav Ovsiienko 	/* Flex item is marked as invalid, we can leave locked section. */
1410db25cadcSViacheslav Ovsiienko 	rte_spinlock_unlock(&priv->flex_item_sl);
14119086ac09SGregory Etelson 	MLX5_ASSERT(flex->devx_fp);
14129086ac09SGregory Etelson 	rc = mlx5_list_unregister(priv->sh->flex_parsers_dv,
14139086ac09SGregory Etelson 				  &flex->devx_fp->entry);
14149086ac09SGregory Etelson 	flex->devx_fp = NULL;
1415db25cadcSViacheslav Ovsiienko 	mlx5_flex_free(priv, flex);
14169086ac09SGregory Etelson 	if (rc < 0)
14179086ac09SGregory Etelson 		return rte_flow_error_set(error, EBUSY,
14189086ac09SGregory Etelson 					  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
14199086ac09SGregory Etelson 					  "flex item release failure");
1420db25cadcSViacheslav Ovsiienko 	return 0;
1421db25cadcSViacheslav Ovsiienko }
14229086ac09SGregory Etelson 
14239086ac09SGregory Etelson /* DevX flex parser list callbacks. */
14249086ac09SGregory Etelson struct mlx5_list_entry *
14259086ac09SGregory Etelson mlx5_flex_parser_create_cb(void *list_ctx, void *ctx)
14269086ac09SGregory Etelson {
14279086ac09SGregory Etelson 	struct mlx5_dev_ctx_shared *sh = list_ctx;
14289086ac09SGregory Etelson 	struct mlx5_flex_parser_devx *fp, *conf = ctx;
1429bc0a9303SRongwei Liu 	uint32_t i;
1430bc0a9303SRongwei Liu 	uint8_t sample_info = sh->cdev->config.hca_attr.flex.query_match_sample_info;
14319086ac09SGregory Etelson 	int ret;
14329086ac09SGregory Etelson 
14339086ac09SGregory Etelson 	fp = mlx5_malloc(MLX5_MEM_ZERO,	sizeof(struct mlx5_flex_parser_devx),
14349086ac09SGregory Etelson 			 0, SOCKET_ID_ANY);
14359086ac09SGregory Etelson 	if (!fp)
14369086ac09SGregory Etelson 		return NULL;
14379086ac09SGregory Etelson 	/* Copy the requested configurations. */
14389086ac09SGregory Etelson 	fp->num_samples = conf->num_samples;
14399086ac09SGregory Etelson 	memcpy(&fp->devx_conf, &conf->devx_conf, sizeof(fp->devx_conf));
14409086ac09SGregory Etelson 	/* Create DevX flex parser. */
14419086ac09SGregory Etelson 	fp->devx_obj = mlx5_devx_cmd_create_flex_parser(sh->cdev->ctx,
14429086ac09SGregory Etelson 							&fp->devx_conf);
14439086ac09SGregory Etelson 	if (!fp->devx_obj)
14449086ac09SGregory Etelson 		goto error;
14459086ac09SGregory Etelson 	/* Query the firmware assigned sample ids. */
14469086ac09SGregory Etelson 	ret = mlx5_devx_cmd_query_parse_samples(fp->devx_obj,
14479086ac09SGregory Etelson 						fp->sample_ids,
1448f1324a17SRongwei Liu 						fp->num_samples,
1449f1324a17SRongwei Liu 						&fp->anchor_id);
14509086ac09SGregory Etelson 	if (ret)
14519086ac09SGregory Etelson 		goto error;
1452bc0a9303SRongwei Liu 	/* Query sample information per ID. */
1453bc0a9303SRongwei Liu 	for (i = 0; i < fp->num_samples && sample_info; i++) {
1454bc0a9303SRongwei Liu 		ret = mlx5_devx_cmd_match_sample_info_query(sh->cdev->ctx, fp->sample_ids[i],
1455bc0a9303SRongwei Liu 							    &fp->sample_info[i]);
1456bc0a9303SRongwei Liu 		if (ret)
1457bc0a9303SRongwei Liu 			goto error;
1458bc0a9303SRongwei Liu 	}
14599086ac09SGregory Etelson 	DRV_LOG(DEBUG, "DEVx flex parser %p created, samples num: %u",
14609086ac09SGregory Etelson 		(const void *)fp, fp->num_samples);
14619086ac09SGregory Etelson 	return &fp->entry;
14629086ac09SGregory Etelson error:
14639086ac09SGregory Etelson 	if (fp->devx_obj)
14649086ac09SGregory Etelson 		mlx5_devx_cmd_destroy((void *)(uintptr_t)fp->devx_obj);
14659086ac09SGregory Etelson 	if (fp)
14669086ac09SGregory Etelson 		mlx5_free(fp);
14679086ac09SGregory Etelson 	return NULL;
14689086ac09SGregory Etelson }
14699086ac09SGregory Etelson 
14709086ac09SGregory Etelson int
14719086ac09SGregory Etelson mlx5_flex_parser_match_cb(void *list_ctx,
14729086ac09SGregory Etelson 			  struct mlx5_list_entry *iter, void *ctx)
14739086ac09SGregory Etelson {
14749086ac09SGregory Etelson 	struct mlx5_flex_parser_devx *fp =
14759086ac09SGregory Etelson 		container_of(iter, struct mlx5_flex_parser_devx, entry);
14769086ac09SGregory Etelson 	struct mlx5_flex_parser_devx *org =
14779086ac09SGregory Etelson 		container_of(ctx, struct mlx5_flex_parser_devx, entry);
14789086ac09SGregory Etelson 
14799086ac09SGregory Etelson 	RTE_SET_USED(list_ctx);
14809086ac09SGregory Etelson 	return !iter || !ctx || memcmp(&fp->devx_conf,
14819086ac09SGregory Etelson 				       &org->devx_conf,
14829086ac09SGregory Etelson 				       sizeof(fp->devx_conf));
14839086ac09SGregory Etelson }
14849086ac09SGregory Etelson 
14859086ac09SGregory Etelson void
14869086ac09SGregory Etelson mlx5_flex_parser_remove_cb(void *list_ctx, struct mlx5_list_entry *entry)
14879086ac09SGregory Etelson {
14889086ac09SGregory Etelson 	struct mlx5_flex_parser_devx *fp =
14899086ac09SGregory Etelson 		container_of(entry, struct mlx5_flex_parser_devx, entry);
14909086ac09SGregory Etelson 
14919086ac09SGregory Etelson 	RTE_SET_USED(list_ctx);
14929086ac09SGregory Etelson 	MLX5_ASSERT(fp->devx_obj);
14939086ac09SGregory Etelson 	claim_zero(mlx5_devx_cmd_destroy(fp->devx_obj));
14949086ac09SGregory Etelson 	DRV_LOG(DEBUG, "DEVx flex parser %p destroyed", (const void *)fp);
14959086ac09SGregory Etelson 	mlx5_free(entry);
14969086ac09SGregory Etelson }
14979086ac09SGregory Etelson 
14989086ac09SGregory Etelson struct mlx5_list_entry *
14999086ac09SGregory Etelson mlx5_flex_parser_clone_cb(void *list_ctx,
15009086ac09SGregory Etelson 			  struct mlx5_list_entry *entry, void *ctx)
15019086ac09SGregory Etelson {
15029086ac09SGregory Etelson 	struct mlx5_flex_parser_devx *fp;
15039086ac09SGregory Etelson 
15049086ac09SGregory Etelson 	RTE_SET_USED(list_ctx);
15059086ac09SGregory Etelson 	RTE_SET_USED(entry);
15069086ac09SGregory Etelson 	fp = mlx5_malloc(0, sizeof(struct mlx5_flex_parser_devx),
15079086ac09SGregory Etelson 			 0, SOCKET_ID_ANY);
15089086ac09SGregory Etelson 	if (!fp)
15099086ac09SGregory Etelson 		return NULL;
15109086ac09SGregory Etelson 	memcpy(fp, ctx, sizeof(struct mlx5_flex_parser_devx));
15119086ac09SGregory Etelson 	return &fp->entry;
15129086ac09SGregory Etelson }
15139086ac09SGregory Etelson 
15149086ac09SGregory Etelson void
15159086ac09SGregory Etelson mlx5_flex_parser_clone_free_cb(void *list_ctx, struct mlx5_list_entry *entry)
15169086ac09SGregory Etelson {
15179086ac09SGregory Etelson 	struct mlx5_flex_parser_devx *fp =
15189086ac09SGregory Etelson 		container_of(entry, struct mlx5_flex_parser_devx, entry);
15199086ac09SGregory Etelson 	RTE_SET_USED(list_ctx);
15209086ac09SGregory Etelson 	mlx5_free(fp);
15219086ac09SGregory Etelson }
1522