xref: /dpdk/drivers/net/mlx5/mlx5_flow_flex.c (revision b04b06f4cb3f3bdd24228f3ca2ec5b3a7b64308d)
1db25cadcSViacheslav Ovsiienko /* SPDX-License-Identifier: BSD-3-Clause
2db25cadcSViacheslav Ovsiienko  * Copyright (c) 2021 NVIDIA Corporation & Affiliates
3db25cadcSViacheslav Ovsiienko  */
4db25cadcSViacheslav Ovsiienko #include <rte_malloc.h>
5db25cadcSViacheslav Ovsiienko #include <mlx5_devx_cmds.h>
6db25cadcSViacheslav Ovsiienko #include <mlx5_malloc.h>
7db25cadcSViacheslav Ovsiienko #include "mlx5.h"
8db25cadcSViacheslav Ovsiienko #include "mlx5_flow.h"
9db25cadcSViacheslav Ovsiienko 
10db25cadcSViacheslav Ovsiienko static_assert(sizeof(uint32_t) * CHAR_BIT >= MLX5_PORT_FLEX_ITEM_NUM,
11db25cadcSViacheslav Ovsiienko 	      "Flex item maximal number exceeds uint32_t bit width");
12db25cadcSViacheslav Ovsiienko 
13db25cadcSViacheslav Ovsiienko /**
14db25cadcSViacheslav Ovsiienko  *  Routine called once on port initialization to init flex item
15db25cadcSViacheslav Ovsiienko  *  related infrastructure initialization
16db25cadcSViacheslav Ovsiienko  *
17db25cadcSViacheslav Ovsiienko  * @param dev
18db25cadcSViacheslav Ovsiienko  *   Ethernet device to perform flex item initialization
19db25cadcSViacheslav Ovsiienko  *
20db25cadcSViacheslav Ovsiienko  * @return
21db25cadcSViacheslav Ovsiienko  *   0 on success, a negative errno value otherwise and rte_errno is set.
22db25cadcSViacheslav Ovsiienko  */
23db25cadcSViacheslav Ovsiienko int
24db25cadcSViacheslav Ovsiienko mlx5_flex_item_port_init(struct rte_eth_dev *dev)
25db25cadcSViacheslav Ovsiienko {
26db25cadcSViacheslav Ovsiienko 	struct mlx5_priv *priv = dev->data->dev_private;
27db25cadcSViacheslav Ovsiienko 
28db25cadcSViacheslav Ovsiienko 	rte_spinlock_init(&priv->flex_item_sl);
29db25cadcSViacheslav Ovsiienko 	MLX5_ASSERT(!priv->flex_item_map);
30db25cadcSViacheslav Ovsiienko 	return 0;
31db25cadcSViacheslav Ovsiienko }
32db25cadcSViacheslav Ovsiienko 
33db25cadcSViacheslav Ovsiienko /**
34db25cadcSViacheslav Ovsiienko  *  Routine called once on port close to perform flex item
35db25cadcSViacheslav Ovsiienko  *  related infrastructure cleanup.
36db25cadcSViacheslav Ovsiienko  *
37db25cadcSViacheslav Ovsiienko  * @param dev
38db25cadcSViacheslav Ovsiienko  *   Ethernet device to perform cleanup
39db25cadcSViacheslav Ovsiienko  */
40db25cadcSViacheslav Ovsiienko void
41db25cadcSViacheslav Ovsiienko mlx5_flex_item_port_cleanup(struct rte_eth_dev *dev)
42db25cadcSViacheslav Ovsiienko {
43db25cadcSViacheslav Ovsiienko 	struct mlx5_priv *priv = dev->data->dev_private;
44db25cadcSViacheslav Ovsiienko 	uint32_t i;
45db25cadcSViacheslav Ovsiienko 
46db25cadcSViacheslav Ovsiienko 	for (i = 0; i < MLX5_PORT_FLEX_ITEM_NUM && priv->flex_item_map ; i++) {
47db25cadcSViacheslav Ovsiienko 		if (priv->flex_item_map & (1 << i)) {
489086ac09SGregory Etelson 			struct mlx5_flex_item *flex = &priv->flex_item[i];
499086ac09SGregory Etelson 
509086ac09SGregory Etelson 			claim_zero(mlx5_list_unregister
519086ac09SGregory Etelson 					(priv->sh->flex_parsers_dv,
529086ac09SGregory Etelson 					 &flex->devx_fp->entry));
539086ac09SGregory Etelson 			flex->devx_fp = NULL;
549086ac09SGregory Etelson 			flex->refcnt = 0;
55db25cadcSViacheslav Ovsiienko 			priv->flex_item_map &= ~(1 << i);
56db25cadcSViacheslav Ovsiienko 		}
57db25cadcSViacheslav Ovsiienko 	}
58db25cadcSViacheslav Ovsiienko }
59db25cadcSViacheslav Ovsiienko 
60db25cadcSViacheslav Ovsiienko static int
61db25cadcSViacheslav Ovsiienko mlx5_flex_index(struct mlx5_priv *priv, struct mlx5_flex_item *item)
62db25cadcSViacheslav Ovsiienko {
63db25cadcSViacheslav Ovsiienko 	uintptr_t start = (uintptr_t)&priv->flex_item[0];
64db25cadcSViacheslav Ovsiienko 	uintptr_t entry = (uintptr_t)item;
65db25cadcSViacheslav Ovsiienko 	uintptr_t idx = (entry - start) / sizeof(struct mlx5_flex_item);
66db25cadcSViacheslav Ovsiienko 
67db25cadcSViacheslav Ovsiienko 	if (entry < start ||
68db25cadcSViacheslav Ovsiienko 	    idx >= MLX5_PORT_FLEX_ITEM_NUM ||
69db25cadcSViacheslav Ovsiienko 	    (entry - start) % sizeof(struct mlx5_flex_item) ||
70db25cadcSViacheslav Ovsiienko 	    !(priv->flex_item_map & (1u << idx)))
71db25cadcSViacheslav Ovsiienko 		return -1;
72db25cadcSViacheslav Ovsiienko 	return (int)idx;
73db25cadcSViacheslav Ovsiienko }
74db25cadcSViacheslav Ovsiienko 
75db25cadcSViacheslav Ovsiienko static struct mlx5_flex_item *
76db25cadcSViacheslav Ovsiienko mlx5_flex_alloc(struct mlx5_priv *priv)
77db25cadcSViacheslav Ovsiienko {
78db25cadcSViacheslav Ovsiienko 	struct mlx5_flex_item *item = NULL;
79db25cadcSViacheslav Ovsiienko 
80db25cadcSViacheslav Ovsiienko 	rte_spinlock_lock(&priv->flex_item_sl);
81db25cadcSViacheslav Ovsiienko 	if (~priv->flex_item_map) {
82db25cadcSViacheslav Ovsiienko 		uint32_t idx = rte_bsf32(~priv->flex_item_map);
83db25cadcSViacheslav Ovsiienko 
84db25cadcSViacheslav Ovsiienko 		if (idx < MLX5_PORT_FLEX_ITEM_NUM) {
85db25cadcSViacheslav Ovsiienko 			item = &priv->flex_item[idx];
86db25cadcSViacheslav Ovsiienko 			MLX5_ASSERT(!item->refcnt);
87db25cadcSViacheslav Ovsiienko 			MLX5_ASSERT(!item->devx_fp);
88db25cadcSViacheslav Ovsiienko 			item->devx_fp = NULL;
89e12a0166STyler Retzlaff 			rte_atomic_store_explicit(&item->refcnt, 0, rte_memory_order_release);
90db25cadcSViacheslav Ovsiienko 			priv->flex_item_map |= 1u << idx;
91db25cadcSViacheslav Ovsiienko 		}
92db25cadcSViacheslav Ovsiienko 	}
93db25cadcSViacheslav Ovsiienko 	rte_spinlock_unlock(&priv->flex_item_sl);
94db25cadcSViacheslav Ovsiienko 	return item;
95db25cadcSViacheslav Ovsiienko }
96db25cadcSViacheslav Ovsiienko 
97db25cadcSViacheslav Ovsiienko static void
98db25cadcSViacheslav Ovsiienko mlx5_flex_free(struct mlx5_priv *priv, struct mlx5_flex_item *item)
99db25cadcSViacheslav Ovsiienko {
100db25cadcSViacheslav Ovsiienko 	int idx = mlx5_flex_index(priv, item);
101db25cadcSViacheslav Ovsiienko 
102db25cadcSViacheslav Ovsiienko 	MLX5_ASSERT(idx >= 0 &&
103db25cadcSViacheslav Ovsiienko 		    idx < MLX5_PORT_FLEX_ITEM_NUM &&
104db25cadcSViacheslav Ovsiienko 		    (priv->flex_item_map & (1u << idx)));
105db25cadcSViacheslav Ovsiienko 	if (idx >= 0) {
106db25cadcSViacheslav Ovsiienko 		rte_spinlock_lock(&priv->flex_item_sl);
107db25cadcSViacheslav Ovsiienko 		MLX5_ASSERT(!item->refcnt);
108db25cadcSViacheslav Ovsiienko 		MLX5_ASSERT(!item->devx_fp);
109db25cadcSViacheslav Ovsiienko 		item->devx_fp = NULL;
110e12a0166STyler Retzlaff 		rte_atomic_store_explicit(&item->refcnt, 0, rte_memory_order_release);
111db25cadcSViacheslav Ovsiienko 		priv->flex_item_map &= ~(1u << idx);
112db25cadcSViacheslav Ovsiienko 		rte_spinlock_unlock(&priv->flex_item_sl);
113db25cadcSViacheslav Ovsiienko 	}
114db25cadcSViacheslav Ovsiienko }
115db25cadcSViacheslav Ovsiienko 
1166dac7d7fSViacheslav Ovsiienko static uint32_t
1176dac7d7fSViacheslav Ovsiienko mlx5_flex_get_bitfield(const struct rte_flow_item_flex *item,
1186dac7d7fSViacheslav Ovsiienko 		       uint32_t pos, uint32_t width, uint32_t shift)
1196dac7d7fSViacheslav Ovsiienko {
1206dac7d7fSViacheslav Ovsiienko 	const uint8_t *ptr = item->pattern + pos / CHAR_BIT;
12197e19f07SViacheslav Ovsiienko 	uint32_t val, vbits, skip = pos % CHAR_BIT;
1226dac7d7fSViacheslav Ovsiienko 
1236dac7d7fSViacheslav Ovsiienko 	/* Proceed the bitfield start byte. */
1246dac7d7fSViacheslav Ovsiienko 	MLX5_ASSERT(width <= sizeof(uint32_t) * CHAR_BIT && width);
1256dac7d7fSViacheslav Ovsiienko 	MLX5_ASSERT(width + shift <= sizeof(uint32_t) * CHAR_BIT);
1266dac7d7fSViacheslav Ovsiienko 	if (item->length <= pos / CHAR_BIT)
1276dac7d7fSViacheslav Ovsiienko 		return 0;
12897e19f07SViacheslav Ovsiienko 	/* Bits are enumerated in byte in network order: 01234567 */
12997e19f07SViacheslav Ovsiienko 	val = *ptr++;
1306dac7d7fSViacheslav Ovsiienko 	vbits = CHAR_BIT - pos % CHAR_BIT;
13197e19f07SViacheslav Ovsiienko 	pos = RTE_ALIGN_CEIL(pos, CHAR_BIT) / CHAR_BIT;
1326dac7d7fSViacheslav Ovsiienko 	vbits = RTE_MIN(vbits, width);
13397e19f07SViacheslav Ovsiienko 	/* Load bytes to cover the field width, checking pattern boundary */
1346dac7d7fSViacheslav Ovsiienko 	while (vbits < width && pos < item->length) {
1356dac7d7fSViacheslav Ovsiienko 		uint32_t part = RTE_MIN(width - vbits, (uint32_t)CHAR_BIT);
1366dac7d7fSViacheslav Ovsiienko 		uint32_t tmp = *ptr++;
1376dac7d7fSViacheslav Ovsiienko 
13897e19f07SViacheslav Ovsiienko 		val |= tmp << RTE_ALIGN_CEIL(vbits, CHAR_BIT);
1396dac7d7fSViacheslav Ovsiienko 		vbits += part;
14097e19f07SViacheslav Ovsiienko 		pos++;
1416dac7d7fSViacheslav Ovsiienko 	}
14297e19f07SViacheslav Ovsiienko 	val = rte_cpu_to_be_32(val);
14397e19f07SViacheslav Ovsiienko 	val <<= skip;
14497e19f07SViacheslav Ovsiienko 	val >>= shift;
14597e19f07SViacheslav Ovsiienko 	val &= (RTE_BIT64(width) - 1) << (sizeof(uint32_t) * CHAR_BIT - shift - width);
14697e19f07SViacheslav Ovsiienko 	return val;
1476dac7d7fSViacheslav Ovsiienko }
1486dac7d7fSViacheslav Ovsiienko 
1496dac7d7fSViacheslav Ovsiienko #define SET_FP_MATCH_SAMPLE_ID(x, def, msk, val, sid) \
1506dac7d7fSViacheslav Ovsiienko 	do { \
1516dac7d7fSViacheslav Ovsiienko 		uint32_t tmp, out = (def); \
1526dac7d7fSViacheslav Ovsiienko 		tmp = MLX5_GET(fte_match_set_misc4, misc4_v, \
1536dac7d7fSViacheslav Ovsiienko 			       prog_sample_field_value_##x); \
1546dac7d7fSViacheslav Ovsiienko 		tmp = (tmp & ~out) | (val); \
1556dac7d7fSViacheslav Ovsiienko 		MLX5_SET(fte_match_set_misc4, misc4_v, \
1566dac7d7fSViacheslav Ovsiienko 			 prog_sample_field_value_##x, tmp); \
1576dac7d7fSViacheslav Ovsiienko 		tmp = MLX5_GET(fte_match_set_misc4, misc4_m, \
1586dac7d7fSViacheslav Ovsiienko 			       prog_sample_field_value_##x); \
1596dac7d7fSViacheslav Ovsiienko 		tmp = (tmp & ~out) | (msk); \
1606dac7d7fSViacheslav Ovsiienko 		MLX5_SET(fte_match_set_misc4, misc4_m, \
1616dac7d7fSViacheslav Ovsiienko 			 prog_sample_field_value_##x, tmp); \
1626dac7d7fSViacheslav Ovsiienko 		tmp = tmp ? (sid) : 0; \
1636dac7d7fSViacheslav Ovsiienko 		MLX5_SET(fte_match_set_misc4, misc4_v, \
1646dac7d7fSViacheslav Ovsiienko 			 prog_sample_field_id_##x, tmp);\
1656dac7d7fSViacheslav Ovsiienko 		MLX5_SET(fte_match_set_misc4, misc4_m, \
1666dac7d7fSViacheslav Ovsiienko 			 prog_sample_field_id_##x, tmp); \
1676dac7d7fSViacheslav Ovsiienko 	} while (0)
1686dac7d7fSViacheslav Ovsiienko 
1696dac7d7fSViacheslav Ovsiienko __rte_always_inline static void
1706dac7d7fSViacheslav Ovsiienko mlx5_flex_set_match_sample(void *misc4_m, void *misc4_v,
1716dac7d7fSViacheslav Ovsiienko 			   uint32_t def, uint32_t mask, uint32_t value,
1726dac7d7fSViacheslav Ovsiienko 			   uint32_t sample_id, uint32_t id)
1736dac7d7fSViacheslav Ovsiienko {
1746dac7d7fSViacheslav Ovsiienko 	switch (id) {
1756dac7d7fSViacheslav Ovsiienko 	case 0:
1766dac7d7fSViacheslav Ovsiienko 		SET_FP_MATCH_SAMPLE_ID(0, def, mask, value, sample_id);
1776dac7d7fSViacheslav Ovsiienko 		break;
1786dac7d7fSViacheslav Ovsiienko 	case 1:
1796dac7d7fSViacheslav Ovsiienko 		SET_FP_MATCH_SAMPLE_ID(1, def, mask, value, sample_id);
1806dac7d7fSViacheslav Ovsiienko 		break;
1816dac7d7fSViacheslav Ovsiienko 	case 2:
1826dac7d7fSViacheslav Ovsiienko 		SET_FP_MATCH_SAMPLE_ID(2, def, mask, value, sample_id);
1836dac7d7fSViacheslav Ovsiienko 		break;
1846dac7d7fSViacheslav Ovsiienko 	case 3:
1856dac7d7fSViacheslav Ovsiienko 		SET_FP_MATCH_SAMPLE_ID(3, def, mask, value, sample_id);
1866dac7d7fSViacheslav Ovsiienko 		break;
1876dac7d7fSViacheslav Ovsiienko 	case 4:
1886dac7d7fSViacheslav Ovsiienko 		SET_FP_MATCH_SAMPLE_ID(4, def, mask, value, sample_id);
1896dac7d7fSViacheslav Ovsiienko 		break;
1906dac7d7fSViacheslav Ovsiienko 	case 5:
1916dac7d7fSViacheslav Ovsiienko 		SET_FP_MATCH_SAMPLE_ID(5, def, mask, value, sample_id);
1926dac7d7fSViacheslav Ovsiienko 		break;
1936dac7d7fSViacheslav Ovsiienko 	case 6:
1946dac7d7fSViacheslav Ovsiienko 		SET_FP_MATCH_SAMPLE_ID(6, def, mask, value, sample_id);
1956dac7d7fSViacheslav Ovsiienko 		break;
1966dac7d7fSViacheslav Ovsiienko 	case 7:
1976dac7d7fSViacheslav Ovsiienko 		SET_FP_MATCH_SAMPLE_ID(7, def, mask, value, sample_id);
1986dac7d7fSViacheslav Ovsiienko 		break;
1996dac7d7fSViacheslav Ovsiienko 	default:
2006dac7d7fSViacheslav Ovsiienko 		MLX5_ASSERT(false);
2016dac7d7fSViacheslav Ovsiienko 		break;
2026dac7d7fSViacheslav Ovsiienko 	}
2036dac7d7fSViacheslav Ovsiienko #undef SET_FP_MATCH_SAMPLE_ID
2046dac7d7fSViacheslav Ovsiienko }
2058c0ca752SRongwei Liu 
2068c0ca752SRongwei Liu /**
2078c0ca752SRongwei Liu  * Get the flex parser sample id and corresponding mask
2088c0ca752SRongwei Liu  * per shift and width information.
2098c0ca752SRongwei Liu  *
2108c0ca752SRongwei Liu  * @param[in] tp
2118c0ca752SRongwei Liu  *   Mlx5 flex item sample mapping handle.
2128c0ca752SRongwei Liu  * @param[in] idx
2138c0ca752SRongwei Liu  *   Mapping index.
2148c0ca752SRongwei Liu  * @param[in, out] pos
2158c0ca752SRongwei Liu  *   Where to search the value and mask.
2168c0ca752SRongwei Liu  * @param[in] is_inner
2178c0ca752SRongwei Liu  *   For inner matching or not.
2188c0ca752SRongwei Liu  *
2198c0ca752SRongwei Liu  * @return
2208c0ca752SRongwei Liu  *   0 on success, -1 to ignore.
2218c0ca752SRongwei Liu  */
2228c0ca752SRongwei Liu int
2238c0ca752SRongwei Liu mlx5_flex_get_sample_id(const struct mlx5_flex_item *tp,
22497e19f07SViacheslav Ovsiienko 			uint32_t idx, uint32_t *pos, bool is_inner)
2258c0ca752SRongwei Liu {
2268c0ca752SRongwei Liu 	const struct mlx5_flex_pattern_field *map = tp->map + idx;
2278c0ca752SRongwei Liu 	uint32_t id = map->reg_id;
2288c0ca752SRongwei Liu 
2298c0ca752SRongwei Liu 	/* Skip placeholders for DUMMY fields. */
2308c0ca752SRongwei Liu 	if (id == MLX5_INVALID_SAMPLE_REG_ID) {
2318c0ca752SRongwei Liu 		*pos += map->width;
2328c0ca752SRongwei Liu 		return -1;
2338c0ca752SRongwei Liu 	}
2348c0ca752SRongwei Liu 	MLX5_ASSERT(map->width);
2358c0ca752SRongwei Liu 	MLX5_ASSERT(id < tp->devx_fp->num_samples);
2368c0ca752SRongwei Liu 	if (tp->tunnel_mode == FLEX_TUNNEL_MODE_MULTI && is_inner) {
2378c0ca752SRongwei Liu 		uint32_t num_samples = tp->devx_fp->num_samples / 2;
2388c0ca752SRongwei Liu 
2398c0ca752SRongwei Liu 		MLX5_ASSERT(tp->devx_fp->num_samples % 2 == 0);
2408c0ca752SRongwei Liu 		MLX5_ASSERT(id < num_samples);
2418c0ca752SRongwei Liu 		id += num_samples;
2428c0ca752SRongwei Liu 	}
2438c0ca752SRongwei Liu 	return id;
2448c0ca752SRongwei Liu }
2458c0ca752SRongwei Liu 
2468c0ca752SRongwei Liu /**
2478c0ca752SRongwei Liu  * Get the flex parser mapping value per definer format_select_dw.
2488c0ca752SRongwei Liu  *
2498c0ca752SRongwei Liu  * @param[in] item
2508c0ca752SRongwei Liu  *   Rte flex item pointer.
2518c0ca752SRongwei Liu  * @param[in] flex
2528c0ca752SRongwei Liu  *   Mlx5 flex item sample mapping handle.
2538c0ca752SRongwei Liu  * @param[in] byte_off
2548c0ca752SRongwei Liu  *   Mlx5 flex item format_select_dw.
2558c0ca752SRongwei Liu  * @param[in] tunnel
2568c0ca752SRongwei Liu  *   Tunnel mode or not.
2578c0ca752SRongwei Liu  * @param[in, def] value
2588c0ca752SRongwei Liu  *   Value calculated for this flex parser, either spec or mask.
2598c0ca752SRongwei Liu  *
2608c0ca752SRongwei Liu  * @return
2618c0ca752SRongwei Liu  *   0 on success, -1 for error.
2628c0ca752SRongwei Liu  */
2638c0ca752SRongwei Liu int
2648c0ca752SRongwei Liu mlx5_flex_get_parser_value_per_byte_off(const struct rte_flow_item_flex *item,
2658c0ca752SRongwei Liu 					void *flex, uint32_t byte_off,
26697e19f07SViacheslav Ovsiienko 					bool tunnel, uint32_t *value)
2678c0ca752SRongwei Liu {
2688c0ca752SRongwei Liu 	struct mlx5_flex_pattern_field *map;
2698c0ca752SRongwei Liu 	struct mlx5_flex_item *tp = flex;
27097e19f07SViacheslav Ovsiienko 	uint32_t i, pos, val;
2718c0ca752SRongwei Liu 	int id;
2728c0ca752SRongwei Liu 
2738c0ca752SRongwei Liu 	*value = 0;
2748c0ca752SRongwei Liu 	for (i = 0, pos = 0; i < tp->mapnum && pos < item->length * CHAR_BIT; i++) {
2758c0ca752SRongwei Liu 		map = tp->map + i;
27697e19f07SViacheslav Ovsiienko 		id = mlx5_flex_get_sample_id(tp, i, &pos, tunnel);
2778c0ca752SRongwei Liu 		if (id == -1)
2788c0ca752SRongwei Liu 			continue;
2798c0ca752SRongwei Liu 		if (id >= (int)tp->devx_fp->num_samples || id >= MLX5_GRAPH_NODE_SAMPLE_NUM)
2808c0ca752SRongwei Liu 			return -1;
281bc0a9303SRongwei Liu 		if (byte_off == tp->devx_fp->sample_info[id].sample_dw_data * sizeof(uint32_t)) {
2828c0ca752SRongwei Liu 			val = mlx5_flex_get_bitfield(item, pos, map->width, map->shift);
2838c0ca752SRongwei Liu 			*value |= val;
2848c0ca752SRongwei Liu 		}
2858c0ca752SRongwei Liu 		pos += map->width;
2868c0ca752SRongwei Liu 	}
2878c0ca752SRongwei Liu 	return 0;
2888c0ca752SRongwei Liu }
2898c0ca752SRongwei Liu 
2906dac7d7fSViacheslav Ovsiienko /**
291850233acSViacheslav Ovsiienko  * Get the flex parser tunnel mode.
292850233acSViacheslav Ovsiienko  *
293850233acSViacheslav Ovsiienko  * @param[in] item
294850233acSViacheslav Ovsiienko  *   RTE Flex item.
295850233acSViacheslav Ovsiienko  * @param[in, out] tunnel_mode
296850233acSViacheslav Ovsiienko  *   Pointer to return tunnel mode.
297850233acSViacheslav Ovsiienko  *
298850233acSViacheslav Ovsiienko  * @return
299850233acSViacheslav Ovsiienko  *   0 on success, otherwise negative error code.
300850233acSViacheslav Ovsiienko  */
301850233acSViacheslav Ovsiienko int
302850233acSViacheslav Ovsiienko mlx5_flex_get_tunnel_mode(const struct rte_flow_item *item,
303850233acSViacheslav Ovsiienko 			  enum rte_flow_item_flex_tunnel_mode *tunnel_mode)
304850233acSViacheslav Ovsiienko {
305850233acSViacheslav Ovsiienko 	if (item && item->spec && tunnel_mode) {
306850233acSViacheslav Ovsiienko 		const struct rte_flow_item_flex *spec = item->spec;
307850233acSViacheslav Ovsiienko 		struct mlx5_flex_item *flex = (struct mlx5_flex_item *)spec->handle;
308850233acSViacheslav Ovsiienko 
309850233acSViacheslav Ovsiienko 		if (flex) {
310850233acSViacheslav Ovsiienko 			*tunnel_mode = flex->tunnel_mode;
311850233acSViacheslav Ovsiienko 			return 0;
312850233acSViacheslav Ovsiienko 		}
313850233acSViacheslav Ovsiienko 	}
314850233acSViacheslav Ovsiienko 	return -EINVAL;
315850233acSViacheslav Ovsiienko }
316850233acSViacheslav Ovsiienko 
317850233acSViacheslav Ovsiienko /**
3186dac7d7fSViacheslav Ovsiienko  * Translate item pattern into matcher fields according to translation
3196dac7d7fSViacheslav Ovsiienko  * array.
3206dac7d7fSViacheslav Ovsiienko  *
3216dac7d7fSViacheslav Ovsiienko  * @param dev
3226dac7d7fSViacheslav Ovsiienko  *   Ethernet device to translate flex item on.
3236dac7d7fSViacheslav Ovsiienko  * @param[in, out] matcher
3247be78d02SJosh Soref  *   Flow matcher to configure
3256dac7d7fSViacheslav Ovsiienko  * @param[in, out] key
3266dac7d7fSViacheslav Ovsiienko  *   Flow matcher value.
3276dac7d7fSViacheslav Ovsiienko  * @param[in] item
3286dac7d7fSViacheslav Ovsiienko  *   Flow pattern to translate.
3296dac7d7fSViacheslav Ovsiienko  * @param[in] is_inner
3306dac7d7fSViacheslav Ovsiienko  *   Inner Flex Item (follows after tunnel header).
3316dac7d7fSViacheslav Ovsiienko  *
3326dac7d7fSViacheslav Ovsiienko  * @return
3336dac7d7fSViacheslav Ovsiienko  *   0 on success, a negative errno value otherwise and rte_errno is set.
3346dac7d7fSViacheslav Ovsiienko  */
3356dac7d7fSViacheslav Ovsiienko void
3366dac7d7fSViacheslav Ovsiienko mlx5_flex_flow_translate_item(struct rte_eth_dev *dev,
3376dac7d7fSViacheslav Ovsiienko 			      void *matcher, void *key,
3386dac7d7fSViacheslav Ovsiienko 			      const struct rte_flow_item *item,
3396dac7d7fSViacheslav Ovsiienko 			      bool is_inner)
3406dac7d7fSViacheslav Ovsiienko {
3416dac7d7fSViacheslav Ovsiienko 	const struct rte_flow_item_flex *spec, *mask;
3426dac7d7fSViacheslav Ovsiienko 	void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher,
3436dac7d7fSViacheslav Ovsiienko 				     misc_parameters_4);
3446dac7d7fSViacheslav Ovsiienko 	void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4);
3456dac7d7fSViacheslav Ovsiienko 	struct mlx5_flex_item *tp;
3466dac7d7fSViacheslav Ovsiienko 	uint32_t i, pos = 0;
347f1324a17SRongwei Liu 	uint32_t sample_id;
3486dac7d7fSViacheslav Ovsiienko 
3496dac7d7fSViacheslav Ovsiienko 	RTE_SET_USED(dev);
3506dac7d7fSViacheslav Ovsiienko 	MLX5_ASSERT(item->spec && item->mask);
3516dac7d7fSViacheslav Ovsiienko 	spec = item->spec;
3526dac7d7fSViacheslav Ovsiienko 	mask = item->mask;
3536dac7d7fSViacheslav Ovsiienko 	tp = (struct mlx5_flex_item *)spec->handle;
35497e19f07SViacheslav Ovsiienko 	for (i = 0; i < tp->mapnum && pos < (spec->length * CHAR_BIT); i++) {
3556dac7d7fSViacheslav Ovsiienko 		struct mlx5_flex_pattern_field *map = tp->map + i;
3568c0ca752SRongwei Liu 		uint32_t val, msk, def;
35797e19f07SViacheslav Ovsiienko 		int id = mlx5_flex_get_sample_id(tp, i, &pos, is_inner);
3586dac7d7fSViacheslav Ovsiienko 
3598c0ca752SRongwei Liu 		if (id == -1)
3606dac7d7fSViacheslav Ovsiienko 			continue;
3618c0ca752SRongwei Liu 		MLX5_ASSERT(id < (int)tp->devx_fp->num_samples);
3628c0ca752SRongwei Liu 		if (id >= (int)tp->devx_fp->num_samples ||
3638c0ca752SRongwei Liu 		    id >= MLX5_GRAPH_NODE_SAMPLE_NUM)
3648c0ca752SRongwei Liu 			return;
36597e19f07SViacheslav Ovsiienko 		def = (uint32_t)(RTE_BIT64(map->width) - 1);
36697e19f07SViacheslav Ovsiienko 		def <<= (sizeof(uint32_t) * CHAR_BIT - map->shift - map->width);
3676dac7d7fSViacheslav Ovsiienko 		val = mlx5_flex_get_bitfield(spec, pos, map->width, map->shift);
36897e19f07SViacheslav Ovsiienko 		msk = pos < (mask->length * CHAR_BIT) ?
36997e19f07SViacheslav Ovsiienko 		      mlx5_flex_get_bitfield(mask, pos, map->width, map->shift) : def;
370bc0a9303SRongwei Liu 		sample_id = tp->devx_fp->sample_ids[id];
3716dac7d7fSViacheslav Ovsiienko 		mlx5_flex_set_match_sample(misc4_m, misc4_v,
37297e19f07SViacheslav Ovsiienko 					   def, msk, val & msk,
373f1324a17SRongwei Liu 					   sample_id, id);
3746dac7d7fSViacheslav Ovsiienko 		pos += map->width;
3756dac7d7fSViacheslav Ovsiienko 	}
3766dac7d7fSViacheslav Ovsiienko }
3776dac7d7fSViacheslav Ovsiienko 
3786dac7d7fSViacheslav Ovsiienko /**
3796dac7d7fSViacheslav Ovsiienko  * Convert flex item handle (from the RTE flow) to flex item index on port.
3806dac7d7fSViacheslav Ovsiienko  * Optionally can increment flex item object reference count.
3816dac7d7fSViacheslav Ovsiienko  *
3826dac7d7fSViacheslav Ovsiienko  * @param dev
3836dac7d7fSViacheslav Ovsiienko  *   Ethernet device to acquire flex item on.
3846dac7d7fSViacheslav Ovsiienko  * @param[in] handle
3856dac7d7fSViacheslav Ovsiienko  *   Flow item handle from item spec.
3866dac7d7fSViacheslav Ovsiienko  * @param[in] acquire
3876dac7d7fSViacheslav Ovsiienko  *   If set - increment reference counter.
3886dac7d7fSViacheslav Ovsiienko  *
3896dac7d7fSViacheslav Ovsiienko  * @return
3906dac7d7fSViacheslav Ovsiienko  *   >=0 - index on success, a negative errno value otherwise
3916dac7d7fSViacheslav Ovsiienko  *         and rte_errno is set.
3926dac7d7fSViacheslav Ovsiienko  */
3936dac7d7fSViacheslav Ovsiienko int
3946dac7d7fSViacheslav Ovsiienko mlx5_flex_acquire_index(struct rte_eth_dev *dev,
3956dac7d7fSViacheslav Ovsiienko 			struct rte_flow_item_flex_handle *handle,
3966dac7d7fSViacheslav Ovsiienko 			bool acquire)
3976dac7d7fSViacheslav Ovsiienko {
3986dac7d7fSViacheslav Ovsiienko 	struct mlx5_priv *priv = dev->data->dev_private;
3996dac7d7fSViacheslav Ovsiienko 	struct mlx5_flex_item *flex = (struct mlx5_flex_item *)handle;
4006dac7d7fSViacheslav Ovsiienko 	int ret = mlx5_flex_index(priv, flex);
4016dac7d7fSViacheslav Ovsiienko 
4026dac7d7fSViacheslav Ovsiienko 	if (ret < 0) {
4036dac7d7fSViacheslav Ovsiienko 		errno = -EINVAL;
4046dac7d7fSViacheslav Ovsiienko 		rte_errno = EINVAL;
4056dac7d7fSViacheslav Ovsiienko 		return ret;
4066dac7d7fSViacheslav Ovsiienko 	}
4076dac7d7fSViacheslav Ovsiienko 	if (acquire)
408e12a0166STyler Retzlaff 		rte_atomic_fetch_add_explicit(&flex->refcnt, 1, rte_memory_order_release);
4096dac7d7fSViacheslav Ovsiienko 	return ret;
4106dac7d7fSViacheslav Ovsiienko }
4116dac7d7fSViacheslav Ovsiienko 
4126dac7d7fSViacheslav Ovsiienko /**
4136dac7d7fSViacheslav Ovsiienko  * Release flex item index on port - decrements reference counter by index.
4146dac7d7fSViacheslav Ovsiienko  *
4156dac7d7fSViacheslav Ovsiienko  * @param dev
4166dac7d7fSViacheslav Ovsiienko  *   Ethernet device to acquire flex item on.
4176dac7d7fSViacheslav Ovsiienko  * @param[in] index
4186dac7d7fSViacheslav Ovsiienko  *   Flow item index.
4196dac7d7fSViacheslav Ovsiienko  *
4206dac7d7fSViacheslav Ovsiienko  * @return
4216dac7d7fSViacheslav Ovsiienko  *   0 - on success, a negative errno value otherwise and rte_errno is set.
4226dac7d7fSViacheslav Ovsiienko  */
4236dac7d7fSViacheslav Ovsiienko int
4246dac7d7fSViacheslav Ovsiienko mlx5_flex_release_index(struct rte_eth_dev *dev,
4256dac7d7fSViacheslav Ovsiienko 			int index)
4266dac7d7fSViacheslav Ovsiienko {
4276dac7d7fSViacheslav Ovsiienko 	struct mlx5_priv *priv = dev->data->dev_private;
4286dac7d7fSViacheslav Ovsiienko 	struct mlx5_flex_item *flex;
4296dac7d7fSViacheslav Ovsiienko 
4306dac7d7fSViacheslav Ovsiienko 	if (index >= MLX5_PORT_FLEX_ITEM_NUM ||
4316dac7d7fSViacheslav Ovsiienko 	    !(priv->flex_item_map & (1u << index))) {
4326dac7d7fSViacheslav Ovsiienko 		errno = EINVAL;
4336dac7d7fSViacheslav Ovsiienko 		rte_errno = -EINVAL;
4346dac7d7fSViacheslav Ovsiienko 		return -EINVAL;
4356dac7d7fSViacheslav Ovsiienko 	}
4366dac7d7fSViacheslav Ovsiienko 	flex = priv->flex_item + index;
4376dac7d7fSViacheslav Ovsiienko 	if (flex->refcnt <= 1) {
4386dac7d7fSViacheslav Ovsiienko 		MLX5_ASSERT(false);
4396dac7d7fSViacheslav Ovsiienko 		errno = EINVAL;
4406dac7d7fSViacheslav Ovsiienko 		rte_errno = -EINVAL;
4416dac7d7fSViacheslav Ovsiienko 		return -EINVAL;
4426dac7d7fSViacheslav Ovsiienko 	}
443e12a0166STyler Retzlaff 	rte_atomic_fetch_sub_explicit(&flex->refcnt, 1, rte_memory_order_release);
4446dac7d7fSViacheslav Ovsiienko 	return 0;
4456dac7d7fSViacheslav Ovsiienko }
4466dac7d7fSViacheslav Ovsiienko 
447b293e8e4SViacheslav Ovsiienko /*
448b293e8e4SViacheslav Ovsiienko  * Calculate largest mask value for a given shift.
449b293e8e4SViacheslav Ovsiienko  *
450b293e8e4SViacheslav Ovsiienko  *   shift      mask
451b293e8e4SViacheslav Ovsiienko  * ------- ---------------
452*b04b06f4SViacheslav Ovsiienko  *    0     b11111100  0x3C
453*b04b06f4SViacheslav Ovsiienko  *    1     b01111110  0x3E
454*b04b06f4SViacheslav Ovsiienko  *    2     b00111111  0x3F
455*b04b06f4SViacheslav Ovsiienko  *    3     b00011111  0x1F
456*b04b06f4SViacheslav Ovsiienko  *    4     b00001111  0x0F
457*b04b06f4SViacheslav Ovsiienko  *    5     b00000111  0x07
458*b04b06f4SViacheslav Ovsiienko  *    6     b00000011  0x03
459*b04b06f4SViacheslav Ovsiienko  *    7     b00000001  0x01
460b293e8e4SViacheslav Ovsiienko  */
461b293e8e4SViacheslav Ovsiienko static uint8_t
462b293e8e4SViacheslav Ovsiienko mlx5_flex_hdr_len_mask(uint8_t shift,
463b293e8e4SViacheslav Ovsiienko 		       const struct mlx5_hca_flex_attr *attr)
464b293e8e4SViacheslav Ovsiienko {
465b293e8e4SViacheslav Ovsiienko 	uint32_t base_mask;
466b293e8e4SViacheslav Ovsiienko 	int diff = shift - MLX5_PARSE_GRAPH_NODE_HDR_LEN_SHIFT_DWORD;
467b293e8e4SViacheslav Ovsiienko 
468b293e8e4SViacheslav Ovsiienko 	base_mask = mlx5_hca_parse_graph_node_base_hdr_len_mask(attr);
469*b04b06f4SViacheslav Ovsiienko 	return diff < 0 ? base_mask << -diff : base_mask >> diff;
470b293e8e4SViacheslav Ovsiienko }
471b293e8e4SViacheslav Ovsiienko 
472b293e8e4SViacheslav Ovsiienko static int
473b293e8e4SViacheslav Ovsiienko mlx5_flex_translate_length(struct mlx5_hca_flex_attr *attr,
474b293e8e4SViacheslav Ovsiienko 			   const struct rte_flow_item_flex_conf *conf,
475b293e8e4SViacheslav Ovsiienko 			   struct mlx5_flex_parser_devx *devx,
476b293e8e4SViacheslav Ovsiienko 			   struct rte_flow_error *error)
477b293e8e4SViacheslav Ovsiienko {
478b293e8e4SViacheslav Ovsiienko 	const struct rte_flow_item_flex_field *field = &conf->next_header;
479b293e8e4SViacheslav Ovsiienko 	struct mlx5_devx_graph_node_attr *node = &devx->devx_conf;
480b293e8e4SViacheslav Ovsiienko 
481b293e8e4SViacheslav Ovsiienko 	if (field->field_base % CHAR_BIT)
482b293e8e4SViacheslav Ovsiienko 		return rte_flow_error_set
483b293e8e4SViacheslav Ovsiienko 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
484b293e8e4SViacheslav Ovsiienko 			 "not byte aligned header length field");
485b293e8e4SViacheslav Ovsiienko 	switch (field->field_mode) {
486b293e8e4SViacheslav Ovsiienko 	case FIELD_MODE_DUMMY:
487b293e8e4SViacheslav Ovsiienko 		return rte_flow_error_set
488b293e8e4SViacheslav Ovsiienko 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
489b293e8e4SViacheslav Ovsiienko 			 "invalid header length field mode (DUMMY)");
490b293e8e4SViacheslav Ovsiienko 	case FIELD_MODE_FIXED:
491b293e8e4SViacheslav Ovsiienko 		if (!(attr->header_length_mode &
492b293e8e4SViacheslav Ovsiienko 		    RTE_BIT32(MLX5_GRAPH_NODE_LEN_FIXED)))
493b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
494b293e8e4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
495b293e8e4SViacheslav Ovsiienko 				 "unsupported header length field mode (FIXED)");
4967bda5beeSGregory Etelson 		if (field->field_size ||
4977bda5beeSGregory Etelson 		    field->offset_mask || field->offset_shift)
498b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
499b293e8e4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
5007bda5beeSGregory Etelson 				 "invalid fields for fixed mode");
501b293e8e4SViacheslav Ovsiienko 		if (field->field_base < 0)
502b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
503b293e8e4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
504b293e8e4SViacheslav Ovsiienko 				 "negative header length field base (FIXED)");
505b293e8e4SViacheslav Ovsiienko 		node->header_length_mode = MLX5_GRAPH_NODE_LEN_FIXED;
506b293e8e4SViacheslav Ovsiienko 		break;
507*b04b06f4SViacheslav Ovsiienko 	case FIELD_MODE_OFFSET: {
508*b04b06f4SViacheslav Ovsiienko 		uint32_t msb, lsb;
509*b04b06f4SViacheslav Ovsiienko 		int32_t shift = field->offset_shift;
510*b04b06f4SViacheslav Ovsiienko 		uint32_t offset = field->offset_base;
511*b04b06f4SViacheslav Ovsiienko 		uint32_t mask = field->offset_mask;
512*b04b06f4SViacheslav Ovsiienko 		uint32_t wmax = attr->header_length_mask_width +
513*b04b06f4SViacheslav Ovsiienko 				MLX5_PARSE_GRAPH_NODE_HDR_LEN_SHIFT_DWORD;
514*b04b06f4SViacheslav Ovsiienko 
515b293e8e4SViacheslav Ovsiienko 		if (!(attr->header_length_mode &
516b293e8e4SViacheslav Ovsiienko 		    RTE_BIT32(MLX5_GRAPH_NODE_LEN_FIELD)))
517b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
518b293e8e4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
519b293e8e4SViacheslav Ovsiienko 				 "unsupported header length field mode (OFFSET)");
52044b5d879SRongwei Liu 		if (!field->field_size)
52144b5d879SRongwei Liu 			return rte_flow_error_set
52244b5d879SRongwei Liu 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
52344b5d879SRongwei Liu 				 "field size is a must for offset mode");
524*b04b06f4SViacheslav Ovsiienko 		if ((offset ^ (field->field_size + offset)) >> 5)
525*b04b06f4SViacheslav Ovsiienko 			return rte_flow_error_set
526*b04b06f4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
527*b04b06f4SViacheslav Ovsiienko 				 "field crosses the 32-bit word boundary");
528*b04b06f4SViacheslav Ovsiienko 		/* Hardware counts in dwords, all shifts done by offset within mask */
529*b04b06f4SViacheslav Ovsiienko 		if (shift < 0 || (uint32_t)shift >= wmax)
530*b04b06f4SViacheslav Ovsiienko 			return rte_flow_error_set
531*b04b06f4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
532*b04b06f4SViacheslav Ovsiienko 				 "header length field shift exceeds limits (OFFSET)");
533*b04b06f4SViacheslav Ovsiienko 		if (!mask)
534*b04b06f4SViacheslav Ovsiienko 			return rte_flow_error_set
535*b04b06f4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
536*b04b06f4SViacheslav Ovsiienko 				 "zero length field offset mask (OFFSET)");
537*b04b06f4SViacheslav Ovsiienko 		msb = rte_fls_u32(mask) - 1;
538*b04b06f4SViacheslav Ovsiienko 		lsb = rte_bsf32(mask);
539*b04b06f4SViacheslav Ovsiienko 		if (!rte_is_power_of_2((mask >> lsb) + 1))
540*b04b06f4SViacheslav Ovsiienko 			return rte_flow_error_set
541*b04b06f4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
542*b04b06f4SViacheslav Ovsiienko 				 "length field offset mask not contiguous (OFFSET)");
543*b04b06f4SViacheslav Ovsiienko 		if (msb >= field->field_size)
544*b04b06f4SViacheslav Ovsiienko 			return rte_flow_error_set
545*b04b06f4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
546*b04b06f4SViacheslav Ovsiienko 				 "length field offset mask exceeds field size (OFFSET)");
547*b04b06f4SViacheslav Ovsiienko 		if (msb >= wmax)
548*b04b06f4SViacheslav Ovsiienko 			return rte_flow_error_set
549*b04b06f4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
550*b04b06f4SViacheslav Ovsiienko 				 "length field offset mask exceeds supported width (OFFSET)");
551*b04b06f4SViacheslav Ovsiienko 		if (mask & ~mlx5_flex_hdr_len_mask(shift, attr))
552*b04b06f4SViacheslav Ovsiienko 			return rte_flow_error_set
553*b04b06f4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
554*b04b06f4SViacheslav Ovsiienko 				 "mask and shift combination not supported (OFFSET)");
555*b04b06f4SViacheslav Ovsiienko 		msb++;
556*b04b06f4SViacheslav Ovsiienko 		offset += field->field_size - msb;
557*b04b06f4SViacheslav Ovsiienko 		if (msb < attr->header_length_mask_width) {
558*b04b06f4SViacheslav Ovsiienko 			if (attr->header_length_mask_width - msb > offset)
55944b5d879SRongwei Liu 				return rte_flow_error_set
56044b5d879SRongwei Liu 					(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
56144b5d879SRongwei Liu 					 "field size plus offset_base is too small");
562*b04b06f4SViacheslav Ovsiienko 			offset += msb;
563*b04b06f4SViacheslav Ovsiienko 			/*
564*b04b06f4SViacheslav Ovsiienko 			 * Here we can move to preceding dword. Hardware does
565*b04b06f4SViacheslav Ovsiienko 			 * cyclic left shift so we should avoid this and stay
566*b04b06f4SViacheslav Ovsiienko 			 * at current dword offset.
567*b04b06f4SViacheslav Ovsiienko 			 */
568*b04b06f4SViacheslav Ovsiienko 			offset = (offset & ~0x1Fu) |
569*b04b06f4SViacheslav Ovsiienko 				 ((offset - attr->header_length_mask_width) & 0x1F);
570*b04b06f4SViacheslav Ovsiienko 		}
571b293e8e4SViacheslav Ovsiienko 		node->header_length_mode = MLX5_GRAPH_NODE_LEN_FIELD;
572*b04b06f4SViacheslav Ovsiienko 		node->header_length_field_mask = mask;
573*b04b06f4SViacheslav Ovsiienko 		node->header_length_field_shift = shift;
574*b04b06f4SViacheslav Ovsiienko 		node->header_length_field_offset = offset;
575b293e8e4SViacheslav Ovsiienko 		break;
576*b04b06f4SViacheslav Ovsiienko 	}
577b293e8e4SViacheslav Ovsiienko 	case FIELD_MODE_BITMASK:
578b293e8e4SViacheslav Ovsiienko 		if (!(attr->header_length_mode &
579b293e8e4SViacheslav Ovsiienko 		    RTE_BIT32(MLX5_GRAPH_NODE_LEN_BITMASK)))
580b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
581b293e8e4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
582b293e8e4SViacheslav Ovsiienko 				 "unsupported header length field mode (BITMASK)");
583*b04b06f4SViacheslav Ovsiienko 		if (field->offset_shift > 15 || field->offset_shift < 0)
584b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
585b293e8e4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
586*b04b06f4SViacheslav Ovsiienko 				 "header length field shift exceeds limit (BITMASK)");
587b293e8e4SViacheslav Ovsiienko 		node->header_length_mode = MLX5_GRAPH_NODE_LEN_BITMASK;
588*b04b06f4SViacheslav Ovsiienko 		node->header_length_field_mask = field->offset_mask;
589*b04b06f4SViacheslav Ovsiienko 		node->header_length_field_shift = field->offset_shift;
590*b04b06f4SViacheslav Ovsiienko 		node->header_length_field_offset = field->offset_base;
591b293e8e4SViacheslav Ovsiienko 		break;
592b293e8e4SViacheslav Ovsiienko 	default:
593b293e8e4SViacheslav Ovsiienko 		return rte_flow_error_set
594b293e8e4SViacheslav Ovsiienko 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
595b293e8e4SViacheslav Ovsiienko 			 "unknown header length field mode");
596b293e8e4SViacheslav Ovsiienko 	}
597b293e8e4SViacheslav Ovsiienko 	if (field->field_base / CHAR_BIT >= 0 &&
598b293e8e4SViacheslav Ovsiienko 	    field->field_base / CHAR_BIT > attr->max_base_header_length)
599b293e8e4SViacheslav Ovsiienko 		return rte_flow_error_set
600b293e8e4SViacheslav Ovsiienko 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
601b293e8e4SViacheslav Ovsiienko 			 "header length field base exceeds limit");
602b293e8e4SViacheslav Ovsiienko 	node->header_length_base_value = field->field_base / CHAR_BIT;
603b293e8e4SViacheslav Ovsiienko 	return 0;
604b293e8e4SViacheslav Ovsiienko }
605b293e8e4SViacheslav Ovsiienko 
606b293e8e4SViacheslav Ovsiienko static int
607b293e8e4SViacheslav Ovsiienko mlx5_flex_translate_next(struct mlx5_hca_flex_attr *attr,
608b293e8e4SViacheslav Ovsiienko 			 const struct rte_flow_item_flex_conf *conf,
609b293e8e4SViacheslav Ovsiienko 			 struct mlx5_flex_parser_devx *devx,
610b293e8e4SViacheslav Ovsiienko 			 struct rte_flow_error *error)
611b293e8e4SViacheslav Ovsiienko {
612b293e8e4SViacheslav Ovsiienko 	const struct rte_flow_item_flex_field *field = &conf->next_protocol;
613b293e8e4SViacheslav Ovsiienko 	struct mlx5_devx_graph_node_attr *node = &devx->devx_conf;
614b293e8e4SViacheslav Ovsiienko 
615b293e8e4SViacheslav Ovsiienko 	switch (field->field_mode) {
616b293e8e4SViacheslav Ovsiienko 	case FIELD_MODE_DUMMY:
617b293e8e4SViacheslav Ovsiienko 		if (conf->nb_outputs)
618b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
619b293e8e4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
620b293e8e4SViacheslav Ovsiienko 				 "next protocol field is required (DUMMY)");
621b293e8e4SViacheslav Ovsiienko 		return 0;
622b293e8e4SViacheslav Ovsiienko 	case FIELD_MODE_FIXED:
623b293e8e4SViacheslav Ovsiienko 		break;
624b293e8e4SViacheslav Ovsiienko 	case FIELD_MODE_OFFSET:
625b293e8e4SViacheslav Ovsiienko 		return rte_flow_error_set
626b293e8e4SViacheslav Ovsiienko 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
627b293e8e4SViacheslav Ovsiienko 			 "unsupported next protocol field mode (OFFSET)");
628b293e8e4SViacheslav Ovsiienko 		break;
629b293e8e4SViacheslav Ovsiienko 	case FIELD_MODE_BITMASK:
630b293e8e4SViacheslav Ovsiienko 		return rte_flow_error_set
631b293e8e4SViacheslav Ovsiienko 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
632b293e8e4SViacheslav Ovsiienko 			 "unsupported next protocol field mode (BITMASK)");
633b293e8e4SViacheslav Ovsiienko 	default:
634b293e8e4SViacheslav Ovsiienko 		return rte_flow_error_set
635b293e8e4SViacheslav Ovsiienko 			(error, EINVAL,
636b293e8e4SViacheslav Ovsiienko 			 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
637b293e8e4SViacheslav Ovsiienko 			 "unknown next protocol field mode");
638b293e8e4SViacheslav Ovsiienko 	}
639b293e8e4SViacheslav Ovsiienko 	MLX5_ASSERT(field->field_mode == FIELD_MODE_FIXED);
640b293e8e4SViacheslav Ovsiienko 	if (!conf->nb_outputs)
641b293e8e4SViacheslav Ovsiienko 		return rte_flow_error_set
642b293e8e4SViacheslav Ovsiienko 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
643b293e8e4SViacheslav Ovsiienko 			 "out link(s) is required if next field present");
644b293e8e4SViacheslav Ovsiienko 	if (attr->max_next_header_offset < field->field_base)
645b293e8e4SViacheslav Ovsiienko 		return rte_flow_error_set
646b293e8e4SViacheslav Ovsiienko 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
647b293e8e4SViacheslav Ovsiienko 			 "next protocol field base exceeds limit");
648b293e8e4SViacheslav Ovsiienko 	if (field->offset_shift)
649b293e8e4SViacheslav Ovsiienko 		return rte_flow_error_set
650b293e8e4SViacheslav Ovsiienko 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
651b293e8e4SViacheslav Ovsiienko 			 "unsupported next protocol field shift");
652b293e8e4SViacheslav Ovsiienko 	node->next_header_field_offset = field->field_base;
653b293e8e4SViacheslav Ovsiienko 	node->next_header_field_size = field->field_size;
654b293e8e4SViacheslav Ovsiienko 	return 0;
655b293e8e4SViacheslav Ovsiienko }
656b293e8e4SViacheslav Ovsiienko 
657b293e8e4SViacheslav Ovsiienko /* Helper structure to handle field bit intervals. */
658b293e8e4SViacheslav Ovsiienko struct mlx5_flex_field_cover {
659b293e8e4SViacheslav Ovsiienko 	uint16_t num;
660b293e8e4SViacheslav Ovsiienko 	int32_t start[MLX5_FLEX_ITEM_MAPPING_NUM];
661b293e8e4SViacheslav Ovsiienko 	int32_t end[MLX5_FLEX_ITEM_MAPPING_NUM];
662b293e8e4SViacheslav Ovsiienko 	uint8_t mapped[MLX5_FLEX_ITEM_MAPPING_NUM / CHAR_BIT + 1];
663b293e8e4SViacheslav Ovsiienko };
664b293e8e4SViacheslav Ovsiienko 
665b293e8e4SViacheslav Ovsiienko static void
666b293e8e4SViacheslav Ovsiienko mlx5_flex_insert_field(struct mlx5_flex_field_cover *cover,
667b293e8e4SViacheslav Ovsiienko 		       uint16_t num, int32_t start, int32_t end)
668b293e8e4SViacheslav Ovsiienko {
669b293e8e4SViacheslav Ovsiienko 	MLX5_ASSERT(num < MLX5_FLEX_ITEM_MAPPING_NUM);
670b293e8e4SViacheslav Ovsiienko 	MLX5_ASSERT(num <= cover->num);
671b293e8e4SViacheslav Ovsiienko 	if (num < cover->num) {
672b293e8e4SViacheslav Ovsiienko 		memmove(&cover->start[num + 1],	&cover->start[num],
673b293e8e4SViacheslav Ovsiienko 			(cover->num - num) * sizeof(int32_t));
674b293e8e4SViacheslav Ovsiienko 		memmove(&cover->end[num + 1],	&cover->end[num],
675b293e8e4SViacheslav Ovsiienko 			(cover->num - num) * sizeof(int32_t));
676b293e8e4SViacheslav Ovsiienko 	}
677b293e8e4SViacheslav Ovsiienko 	cover->start[num] = start;
678b293e8e4SViacheslav Ovsiienko 	cover->end[num] = end;
679b293e8e4SViacheslav Ovsiienko 	cover->num++;
680b293e8e4SViacheslav Ovsiienko }
681b293e8e4SViacheslav Ovsiienko 
682b293e8e4SViacheslav Ovsiienko static void
683b293e8e4SViacheslav Ovsiienko mlx5_flex_merge_field(struct mlx5_flex_field_cover *cover, uint16_t num)
684b293e8e4SViacheslav Ovsiienko {
685b293e8e4SViacheslav Ovsiienko 	uint32_t i, del = 0;
686b293e8e4SViacheslav Ovsiienko 	int32_t end;
687b293e8e4SViacheslav Ovsiienko 
688b293e8e4SViacheslav Ovsiienko 	MLX5_ASSERT(num < MLX5_FLEX_ITEM_MAPPING_NUM);
689b293e8e4SViacheslav Ovsiienko 	MLX5_ASSERT(num < (cover->num - 1));
690b293e8e4SViacheslav Ovsiienko 	end = cover->end[num];
691b293e8e4SViacheslav Ovsiienko 	for (i = num + 1; i < cover->num; i++) {
692b293e8e4SViacheslav Ovsiienko 		if (end < cover->start[i])
693b293e8e4SViacheslav Ovsiienko 			break;
694b293e8e4SViacheslav Ovsiienko 		del++;
695b293e8e4SViacheslav Ovsiienko 		if (end <= cover->end[i]) {
696b293e8e4SViacheslav Ovsiienko 			cover->end[num] = cover->end[i];
697b293e8e4SViacheslav Ovsiienko 			break;
698b293e8e4SViacheslav Ovsiienko 		}
699b293e8e4SViacheslav Ovsiienko 	}
700b293e8e4SViacheslav Ovsiienko 	if (del) {
701b293e8e4SViacheslav Ovsiienko 		MLX5_ASSERT(del < (cover->num - 1u - num));
702b293e8e4SViacheslav Ovsiienko 		cover->num -= del;
703b293e8e4SViacheslav Ovsiienko 		MLX5_ASSERT(cover->num > num);
704b293e8e4SViacheslav Ovsiienko 		if ((cover->num - num) > 1) {
705b293e8e4SViacheslav Ovsiienko 			memmove(&cover->start[num + 1],
706b293e8e4SViacheslav Ovsiienko 				&cover->start[num + 1 + del],
707b293e8e4SViacheslav Ovsiienko 				(cover->num - num - 1) * sizeof(int32_t));
708b293e8e4SViacheslav Ovsiienko 			memmove(&cover->end[num + 1],
709b293e8e4SViacheslav Ovsiienko 				&cover->end[num + 1 + del],
710b293e8e4SViacheslav Ovsiienko 				(cover->num - num - 1) * sizeof(int32_t));
711b293e8e4SViacheslav Ovsiienko 		}
712b293e8e4SViacheslav Ovsiienko 	}
713b293e8e4SViacheslav Ovsiienko }
714b293e8e4SViacheslav Ovsiienko 
715b293e8e4SViacheslav Ovsiienko /*
716b293e8e4SViacheslav Ovsiienko  * Validate the sample field and update interval array
717b293e8e4SViacheslav Ovsiienko  * if parameters match with the 'match" field.
718b293e8e4SViacheslav Ovsiienko  * Returns:
719b293e8e4SViacheslav Ovsiienko  *    < 0  - error
720b293e8e4SViacheslav Ovsiienko  *    == 0 - no match, interval array not updated
721b293e8e4SViacheslav Ovsiienko  *    > 0  - match, interval array updated
722b293e8e4SViacheslav Ovsiienko  */
723b293e8e4SViacheslav Ovsiienko static int
724b293e8e4SViacheslav Ovsiienko mlx5_flex_cover_sample(struct mlx5_flex_field_cover *cover,
725b293e8e4SViacheslav Ovsiienko 		       struct rte_flow_item_flex_field *field,
726b293e8e4SViacheslav Ovsiienko 		       struct rte_flow_item_flex_field *match,
727b293e8e4SViacheslav Ovsiienko 		       struct mlx5_hca_flex_attr *attr,
728b293e8e4SViacheslav Ovsiienko 		       struct rte_flow_error *error)
729b293e8e4SViacheslav Ovsiienko {
730b293e8e4SViacheslav Ovsiienko 	int32_t start, end;
731b293e8e4SViacheslav Ovsiienko 	uint32_t i;
732b293e8e4SViacheslav Ovsiienko 
733b293e8e4SViacheslav Ovsiienko 	switch (field->field_mode) {
734b293e8e4SViacheslav Ovsiienko 	case FIELD_MODE_DUMMY:
735b293e8e4SViacheslav Ovsiienko 		return 0;
736b293e8e4SViacheslav Ovsiienko 	case FIELD_MODE_FIXED:
737b293e8e4SViacheslav Ovsiienko 		if (!(attr->sample_offset_mode &
738b293e8e4SViacheslav Ovsiienko 		    RTE_BIT32(MLX5_GRAPH_SAMPLE_OFFSET_FIXED)))
739b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
740b293e8e4SViacheslav Ovsiienko 				(error, EINVAL,
741b293e8e4SViacheslav Ovsiienko 				 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
742b293e8e4SViacheslav Ovsiienko 				 "unsupported sample field mode (FIXED)");
743b293e8e4SViacheslav Ovsiienko 		if (field->offset_shift)
744b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
745b293e8e4SViacheslav Ovsiienko 				(error, EINVAL,
746b293e8e4SViacheslav Ovsiienko 				 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
747b293e8e4SViacheslav Ovsiienko 				 "invalid sample field shift (FIXED");
748b293e8e4SViacheslav Ovsiienko 		if (field->field_base < 0)
749b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
750b293e8e4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
751b293e8e4SViacheslav Ovsiienko 				 "invalid sample field base (FIXED)");
752b293e8e4SViacheslav Ovsiienko 		if (field->field_base / CHAR_BIT > attr->max_sample_base_offset)
753b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
754b293e8e4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
755b293e8e4SViacheslav Ovsiienko 				 "sample field base exceeds limit (FIXED)");
756b293e8e4SViacheslav Ovsiienko 		break;
757b293e8e4SViacheslav Ovsiienko 	case FIELD_MODE_OFFSET:
758b293e8e4SViacheslav Ovsiienko 		if (!(attr->sample_offset_mode &
759b293e8e4SViacheslav Ovsiienko 		    RTE_BIT32(MLX5_GRAPH_SAMPLE_OFFSET_FIELD)))
760b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
761b293e8e4SViacheslav Ovsiienko 				(error, EINVAL,
762b293e8e4SViacheslav Ovsiienko 				 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
763b293e8e4SViacheslav Ovsiienko 				 "unsupported sample field mode (OFFSET)");
764b293e8e4SViacheslav Ovsiienko 		if (field->field_base / CHAR_BIT >= 0 &&
765b293e8e4SViacheslav Ovsiienko 		    field->field_base / CHAR_BIT > attr->max_sample_base_offset)
766b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
767b293e8e4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
768b293e8e4SViacheslav Ovsiienko 				"sample field base exceeds limit");
769b293e8e4SViacheslav Ovsiienko 		break;
770b293e8e4SViacheslav Ovsiienko 	case FIELD_MODE_BITMASK:
771b293e8e4SViacheslav Ovsiienko 		if (!(attr->sample_offset_mode &
772b293e8e4SViacheslav Ovsiienko 		    RTE_BIT32(MLX5_GRAPH_SAMPLE_OFFSET_BITMASK)))
773b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
774b293e8e4SViacheslav Ovsiienko 				(error, EINVAL,
775b293e8e4SViacheslav Ovsiienko 				 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
776b293e8e4SViacheslav Ovsiienko 				 "unsupported sample field mode (BITMASK)");
777b293e8e4SViacheslav Ovsiienko 		if (field->field_base / CHAR_BIT >= 0 &&
778b293e8e4SViacheslav Ovsiienko 		    field->field_base / CHAR_BIT > attr->max_sample_base_offset)
779b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
780b293e8e4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
781b293e8e4SViacheslav Ovsiienko 				"sample field base exceeds limit");
782b293e8e4SViacheslav Ovsiienko 		break;
783b293e8e4SViacheslav Ovsiienko 	default:
784b293e8e4SViacheslav Ovsiienko 		return rte_flow_error_set
785b293e8e4SViacheslav Ovsiienko 			(error, EINVAL,
786b293e8e4SViacheslav Ovsiienko 			 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
787b293e8e4SViacheslav Ovsiienko 			 "unknown data sample field mode");
788b293e8e4SViacheslav Ovsiienko 	}
789b293e8e4SViacheslav Ovsiienko 	if (!match) {
790b293e8e4SViacheslav Ovsiienko 		if (!field->field_size)
791b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
792b293e8e4SViacheslav Ovsiienko 				(error, EINVAL,
793b293e8e4SViacheslav Ovsiienko 				RTE_FLOW_ERROR_TYPE_ITEM, NULL,
794b293e8e4SViacheslav Ovsiienko 				"zero sample field width");
795b293e8e4SViacheslav Ovsiienko 		if (field->field_id)
796b293e8e4SViacheslav Ovsiienko 			DRV_LOG(DEBUG, "sample field id hint ignored");
797b293e8e4SViacheslav Ovsiienko 	} else {
798b293e8e4SViacheslav Ovsiienko 		if (field->field_mode != match->field_mode ||
799b293e8e4SViacheslav Ovsiienko 		    field->offset_base | match->offset_base ||
800b293e8e4SViacheslav Ovsiienko 		    field->offset_mask | match->offset_mask ||
801b293e8e4SViacheslav Ovsiienko 		    field->offset_shift | match->offset_shift)
802b293e8e4SViacheslav Ovsiienko 			return 0;
803b293e8e4SViacheslav Ovsiienko 	}
804b293e8e4SViacheslav Ovsiienko 	start = field->field_base;
805b293e8e4SViacheslav Ovsiienko 	end = start + field->field_size;
806b293e8e4SViacheslav Ovsiienko 	/* Add the new or similar field to interval array. */
807b293e8e4SViacheslav Ovsiienko 	if (!cover->num) {
808b293e8e4SViacheslav Ovsiienko 		cover->start[cover->num] = start;
809b293e8e4SViacheslav Ovsiienko 		cover->end[cover->num] = end;
810b293e8e4SViacheslav Ovsiienko 		cover->num = 1;
811b293e8e4SViacheslav Ovsiienko 		return 1;
812b293e8e4SViacheslav Ovsiienko 	}
813b293e8e4SViacheslav Ovsiienko 	for (i = 0; i < cover->num; i++) {
814b293e8e4SViacheslav Ovsiienko 		if (start > cover->end[i]) {
815b293e8e4SViacheslav Ovsiienko 			if (i >= (cover->num - 1u)) {
816b293e8e4SViacheslav Ovsiienko 				mlx5_flex_insert_field(cover, cover->num,
817b293e8e4SViacheslav Ovsiienko 						       start, end);
818b293e8e4SViacheslav Ovsiienko 				break;
819b293e8e4SViacheslav Ovsiienko 			}
820b293e8e4SViacheslav Ovsiienko 			continue;
821b293e8e4SViacheslav Ovsiienko 		}
822b293e8e4SViacheslav Ovsiienko 		if (end < cover->start[i]) {
823b293e8e4SViacheslav Ovsiienko 			mlx5_flex_insert_field(cover, i, start, end);
824b293e8e4SViacheslav Ovsiienko 			break;
825b293e8e4SViacheslav Ovsiienko 		}
826b293e8e4SViacheslav Ovsiienko 		if (start < cover->start[i])
827b293e8e4SViacheslav Ovsiienko 			cover->start[i] = start;
828b293e8e4SViacheslav Ovsiienko 		if (end > cover->end[i]) {
829b293e8e4SViacheslav Ovsiienko 			cover->end[i] = end;
830b293e8e4SViacheslav Ovsiienko 			if (i < (cover->num - 1u))
831b293e8e4SViacheslav Ovsiienko 				mlx5_flex_merge_field(cover, i);
832b293e8e4SViacheslav Ovsiienko 		}
833b293e8e4SViacheslav Ovsiienko 		break;
834b293e8e4SViacheslav Ovsiienko 	}
835b293e8e4SViacheslav Ovsiienko 	return 1;
836b293e8e4SViacheslav Ovsiienko }
837b293e8e4SViacheslav Ovsiienko 
838b293e8e4SViacheslav Ovsiienko static void
839b293e8e4SViacheslav Ovsiienko mlx5_flex_config_sample(struct mlx5_devx_match_sample_attr *na,
840b293e8e4SViacheslav Ovsiienko 			struct rte_flow_item_flex_field *field,
841b293e8e4SViacheslav Ovsiienko 			enum rte_flow_item_flex_tunnel_mode tunnel_mode)
842b293e8e4SViacheslav Ovsiienko {
843b293e8e4SViacheslav Ovsiienko 	memset(na, 0, sizeof(struct mlx5_devx_match_sample_attr));
844b293e8e4SViacheslav Ovsiienko 	na->flow_match_sample_en = 1;
845b293e8e4SViacheslav Ovsiienko 	switch (field->field_mode) {
846b293e8e4SViacheslav Ovsiienko 	case FIELD_MODE_FIXED:
847b293e8e4SViacheslav Ovsiienko 		na->flow_match_sample_offset_mode =
848b293e8e4SViacheslav Ovsiienko 			MLX5_GRAPH_SAMPLE_OFFSET_FIXED;
849b293e8e4SViacheslav Ovsiienko 		break;
850b293e8e4SViacheslav Ovsiienko 	case FIELD_MODE_OFFSET:
851b293e8e4SViacheslav Ovsiienko 		na->flow_match_sample_offset_mode =
852b293e8e4SViacheslav Ovsiienko 			MLX5_GRAPH_SAMPLE_OFFSET_FIELD;
853b293e8e4SViacheslav Ovsiienko 		na->flow_match_sample_field_offset = field->offset_base;
854b293e8e4SViacheslav Ovsiienko 		na->flow_match_sample_field_offset_mask = field->offset_mask;
855b293e8e4SViacheslav Ovsiienko 		na->flow_match_sample_field_offset_shift = field->offset_shift;
856b293e8e4SViacheslav Ovsiienko 		break;
857b293e8e4SViacheslav Ovsiienko 	case FIELD_MODE_BITMASK:
858b293e8e4SViacheslav Ovsiienko 		na->flow_match_sample_offset_mode =
859b293e8e4SViacheslav Ovsiienko 			MLX5_GRAPH_SAMPLE_OFFSET_BITMASK;
860b293e8e4SViacheslav Ovsiienko 		na->flow_match_sample_field_offset = field->offset_base;
861b293e8e4SViacheslav Ovsiienko 		na->flow_match_sample_field_offset_mask = field->offset_mask;
862b293e8e4SViacheslav Ovsiienko 		na->flow_match_sample_field_offset_shift = field->offset_shift;
863b293e8e4SViacheslav Ovsiienko 		break;
864b293e8e4SViacheslav Ovsiienko 	default:
865b293e8e4SViacheslav Ovsiienko 		MLX5_ASSERT(false);
866b293e8e4SViacheslav Ovsiienko 		break;
867b293e8e4SViacheslav Ovsiienko 	}
868b293e8e4SViacheslav Ovsiienko 	switch (tunnel_mode) {
869b293e8e4SViacheslav Ovsiienko 	case FLEX_TUNNEL_MODE_SINGLE:
870b293e8e4SViacheslav Ovsiienko 		/* Fallthrough */
871b293e8e4SViacheslav Ovsiienko 	case FLEX_TUNNEL_MODE_TUNNEL:
872b293e8e4SViacheslav Ovsiienko 		na->flow_match_sample_tunnel_mode =
873b293e8e4SViacheslav Ovsiienko 			MLX5_GRAPH_SAMPLE_TUNNEL_FIRST;
874b293e8e4SViacheslav Ovsiienko 		break;
875b293e8e4SViacheslav Ovsiienko 	case FLEX_TUNNEL_MODE_MULTI:
876b293e8e4SViacheslav Ovsiienko 		/* Fallthrough */
877b293e8e4SViacheslav Ovsiienko 	case FLEX_TUNNEL_MODE_OUTER:
878b293e8e4SViacheslav Ovsiienko 		na->flow_match_sample_tunnel_mode =
879b293e8e4SViacheslav Ovsiienko 			MLX5_GRAPH_SAMPLE_TUNNEL_OUTER;
880b293e8e4SViacheslav Ovsiienko 		break;
881b293e8e4SViacheslav Ovsiienko 	case FLEX_TUNNEL_MODE_INNER:
882b293e8e4SViacheslav Ovsiienko 		na->flow_match_sample_tunnel_mode =
883b293e8e4SViacheslav Ovsiienko 			MLX5_GRAPH_SAMPLE_TUNNEL_INNER;
884b293e8e4SViacheslav Ovsiienko 		break;
885b293e8e4SViacheslav Ovsiienko 	default:
886b293e8e4SViacheslav Ovsiienko 		MLX5_ASSERT(false);
887b293e8e4SViacheslav Ovsiienko 		break;
888b293e8e4SViacheslav Ovsiienko 	}
889b293e8e4SViacheslav Ovsiienko }
890b293e8e4SViacheslav Ovsiienko 
891b293e8e4SViacheslav Ovsiienko /* Map specified field to set/subset of allocated sample registers. */
892b293e8e4SViacheslav Ovsiienko static int
893b293e8e4SViacheslav Ovsiienko mlx5_flex_map_sample(struct rte_flow_item_flex_field *field,
894b293e8e4SViacheslav Ovsiienko 		     struct mlx5_flex_parser_devx *parser,
895b293e8e4SViacheslav Ovsiienko 		     struct mlx5_flex_item *item,
896b293e8e4SViacheslav Ovsiienko 		     struct rte_flow_error *error)
897b293e8e4SViacheslav Ovsiienko {
898b293e8e4SViacheslav Ovsiienko 	struct mlx5_devx_match_sample_attr node;
899b293e8e4SViacheslav Ovsiienko 	int32_t start = field->field_base;
900b293e8e4SViacheslav Ovsiienko 	int32_t end = start + field->field_size;
901b293e8e4SViacheslav Ovsiienko 	struct mlx5_flex_pattern_field *trans;
902b293e8e4SViacheslav Ovsiienko 	uint32_t i, done_bits = 0;
903b293e8e4SViacheslav Ovsiienko 
904b293e8e4SViacheslav Ovsiienko 	if (field->field_mode == FIELD_MODE_DUMMY) {
905b293e8e4SViacheslav Ovsiienko 		done_bits = field->field_size;
906b293e8e4SViacheslav Ovsiienko 		while (done_bits) {
907b293e8e4SViacheslav Ovsiienko 			uint32_t part = RTE_MIN(done_bits,
908b293e8e4SViacheslav Ovsiienko 						sizeof(uint32_t) * CHAR_BIT);
909b293e8e4SViacheslav Ovsiienko 			if (item->mapnum >= MLX5_FLEX_ITEM_MAPPING_NUM)
910b293e8e4SViacheslav Ovsiienko 				return rte_flow_error_set
911b293e8e4SViacheslav Ovsiienko 					(error,
912b293e8e4SViacheslav Ovsiienko 					 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
913b293e8e4SViacheslav Ovsiienko 					 "too many flex item pattern translations");
914b293e8e4SViacheslav Ovsiienko 			trans = &item->map[item->mapnum];
915b293e8e4SViacheslav Ovsiienko 			trans->reg_id = MLX5_INVALID_SAMPLE_REG_ID;
916b293e8e4SViacheslav Ovsiienko 			trans->shift = 0;
917b293e8e4SViacheslav Ovsiienko 			trans->width = part;
918b293e8e4SViacheslav Ovsiienko 			item->mapnum++;
919b293e8e4SViacheslav Ovsiienko 			done_bits -= part;
920b293e8e4SViacheslav Ovsiienko 		}
921b293e8e4SViacheslav Ovsiienko 		return 0;
922b293e8e4SViacheslav Ovsiienko 	}
923b293e8e4SViacheslav Ovsiienko 	mlx5_flex_config_sample(&node, field, item->tunnel_mode);
924b293e8e4SViacheslav Ovsiienko 	for (i = 0; i < parser->num_samples; i++) {
925b293e8e4SViacheslav Ovsiienko 		struct mlx5_devx_match_sample_attr *sample =
926b293e8e4SViacheslav Ovsiienko 			&parser->devx_conf.sample[i];
927b293e8e4SViacheslav Ovsiienko 		int32_t reg_start, reg_end;
928b293e8e4SViacheslav Ovsiienko 		int32_t cov_start, cov_end;
929b293e8e4SViacheslav Ovsiienko 
930b293e8e4SViacheslav Ovsiienko 		MLX5_ASSERT(sample->flow_match_sample_en);
931b293e8e4SViacheslav Ovsiienko 		if (!sample->flow_match_sample_en)
932b293e8e4SViacheslav Ovsiienko 			break;
933b293e8e4SViacheslav Ovsiienko 		node.flow_match_sample_field_base_offset =
934b293e8e4SViacheslav Ovsiienko 			sample->flow_match_sample_field_base_offset;
935b293e8e4SViacheslav Ovsiienko 		if (memcmp(&node, sample, sizeof(node)))
936b293e8e4SViacheslav Ovsiienko 			continue;
937b293e8e4SViacheslav Ovsiienko 		reg_start = (int8_t)sample->flow_match_sample_field_base_offset;
938b293e8e4SViacheslav Ovsiienko 		reg_start *= CHAR_BIT;
939b293e8e4SViacheslav Ovsiienko 		reg_end = reg_start + 32;
940b293e8e4SViacheslav Ovsiienko 		if (end <= reg_start || start >= reg_end)
941b293e8e4SViacheslav Ovsiienko 			continue;
942b293e8e4SViacheslav Ovsiienko 		cov_start = RTE_MAX(reg_start, start);
943b293e8e4SViacheslav Ovsiienko 		cov_end = RTE_MIN(reg_end, end);
944b293e8e4SViacheslav Ovsiienko 		MLX5_ASSERT(cov_end > cov_start);
945b293e8e4SViacheslav Ovsiienko 		done_bits += cov_end - cov_start;
946b293e8e4SViacheslav Ovsiienko 		if (item->mapnum >= MLX5_FLEX_ITEM_MAPPING_NUM)
947b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
948b293e8e4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
949b293e8e4SViacheslav Ovsiienko 				 "too many flex item pattern translations");
950b293e8e4SViacheslav Ovsiienko 		trans = &item->map[item->mapnum];
951b293e8e4SViacheslav Ovsiienko 		item->mapnum++;
952b293e8e4SViacheslav Ovsiienko 		trans->reg_id = i;
953b293e8e4SViacheslav Ovsiienko 		trans->shift = cov_start - reg_start;
954b293e8e4SViacheslav Ovsiienko 		trans->width = cov_end - cov_start;
955b293e8e4SViacheslav Ovsiienko 	}
956b293e8e4SViacheslav Ovsiienko 	if (done_bits != field->field_size) {
957b293e8e4SViacheslav Ovsiienko 		MLX5_ASSERT(false);
958b293e8e4SViacheslav Ovsiienko 		return rte_flow_error_set
959b293e8e4SViacheslav Ovsiienko 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
960b293e8e4SViacheslav Ovsiienko 			 "failed to map field to sample register");
961b293e8e4SViacheslav Ovsiienko 	}
962b293e8e4SViacheslav Ovsiienko 	return 0;
963b293e8e4SViacheslav Ovsiienko }
964b293e8e4SViacheslav Ovsiienko 
965b293e8e4SViacheslav Ovsiienko /* Allocate sample registers for the specified field type and interval array. */
966b293e8e4SViacheslav Ovsiienko static int
967b293e8e4SViacheslav Ovsiienko mlx5_flex_alloc_sample(struct mlx5_flex_field_cover *cover,
968b293e8e4SViacheslav Ovsiienko 		       struct mlx5_flex_parser_devx *parser,
969b293e8e4SViacheslav Ovsiienko 		       struct mlx5_flex_item *item,
970b293e8e4SViacheslav Ovsiienko 		       struct rte_flow_item_flex_field *field,
971b293e8e4SViacheslav Ovsiienko 		       struct mlx5_hca_flex_attr *attr,
972b293e8e4SViacheslav Ovsiienko 		       struct rte_flow_error *error)
973b293e8e4SViacheslav Ovsiienko {
974b293e8e4SViacheslav Ovsiienko 	struct mlx5_devx_match_sample_attr node;
975b293e8e4SViacheslav Ovsiienko 	uint32_t idx = 0;
976b293e8e4SViacheslav Ovsiienko 
977b293e8e4SViacheslav Ovsiienko 	mlx5_flex_config_sample(&node, field, item->tunnel_mode);
978b293e8e4SViacheslav Ovsiienko 	while (idx < cover->num) {
979b293e8e4SViacheslav Ovsiienko 		int32_t start, end;
980b293e8e4SViacheslav Ovsiienko 
981b293e8e4SViacheslav Ovsiienko 		/*
982b293e8e4SViacheslav Ovsiienko 		 * Sample base offsets are in bytes, should be aligned
983b293e8e4SViacheslav Ovsiienko 		 * to 32-bit as required by firmware for samples.
984b293e8e4SViacheslav Ovsiienko 		 */
985b293e8e4SViacheslav Ovsiienko 		start = RTE_ALIGN_FLOOR(cover->start[idx],
986b293e8e4SViacheslav Ovsiienko 					sizeof(uint32_t) * CHAR_BIT);
987b293e8e4SViacheslav Ovsiienko 		node.flow_match_sample_field_base_offset =
988b293e8e4SViacheslav Ovsiienko 						(start / CHAR_BIT) & 0xFF;
989b293e8e4SViacheslav Ovsiienko 		/* Allocate sample register. */
990b293e8e4SViacheslav Ovsiienko 		if (parser->num_samples >= MLX5_GRAPH_NODE_SAMPLE_NUM ||
991b293e8e4SViacheslav Ovsiienko 		    parser->num_samples >= attr->max_num_sample ||
992b293e8e4SViacheslav Ovsiienko 		    parser->num_samples >= attr->max_num_prog_sample)
993b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
994b293e8e4SViacheslav Ovsiienko 				(error, EINVAL,
995b293e8e4SViacheslav Ovsiienko 				 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
996b293e8e4SViacheslav Ovsiienko 				 "no sample registers to handle all flex item fields");
997b293e8e4SViacheslav Ovsiienko 		parser->devx_conf.sample[parser->num_samples] = node;
998b293e8e4SViacheslav Ovsiienko 		parser->num_samples++;
999b293e8e4SViacheslav Ovsiienko 		/* Remove or update covered intervals. */
1000b293e8e4SViacheslav Ovsiienko 		end = start + 32;
1001b293e8e4SViacheslav Ovsiienko 		while (idx < cover->num) {
1002b293e8e4SViacheslav Ovsiienko 			if (end >= cover->end[idx]) {
1003b293e8e4SViacheslav Ovsiienko 				idx++;
1004b293e8e4SViacheslav Ovsiienko 				continue;
1005b293e8e4SViacheslav Ovsiienko 			}
1006b293e8e4SViacheslav Ovsiienko 			if (end > cover->start[idx])
1007b293e8e4SViacheslav Ovsiienko 				cover->start[idx] = end;
1008b293e8e4SViacheslav Ovsiienko 			break;
1009b293e8e4SViacheslav Ovsiienko 		}
1010b293e8e4SViacheslav Ovsiienko 	}
1011b293e8e4SViacheslav Ovsiienko 	return 0;
1012b293e8e4SViacheslav Ovsiienko }
1013b293e8e4SViacheslav Ovsiienko 
1014b293e8e4SViacheslav Ovsiienko static int
1015b293e8e4SViacheslav Ovsiienko mlx5_flex_translate_sample(struct mlx5_hca_flex_attr *attr,
1016b293e8e4SViacheslav Ovsiienko 			   const struct rte_flow_item_flex_conf *conf,
1017b293e8e4SViacheslav Ovsiienko 			   struct mlx5_flex_parser_devx *parser,
1018b293e8e4SViacheslav Ovsiienko 			   struct mlx5_flex_item *item,
1019b293e8e4SViacheslav Ovsiienko 			   struct rte_flow_error *error)
1020b293e8e4SViacheslav Ovsiienko {
1021b293e8e4SViacheslav Ovsiienko 	struct mlx5_flex_field_cover cover;
1022b293e8e4SViacheslav Ovsiienko 	uint32_t i, j;
1023b293e8e4SViacheslav Ovsiienko 	int ret;
1024b293e8e4SViacheslav Ovsiienko 
1025b293e8e4SViacheslav Ovsiienko 	switch (conf->tunnel) {
1026b293e8e4SViacheslav Ovsiienko 	case FLEX_TUNNEL_MODE_SINGLE:
1027b293e8e4SViacheslav Ovsiienko 		/* Fallthrough */
1028b293e8e4SViacheslav Ovsiienko 	case FLEX_TUNNEL_MODE_OUTER:
1029b293e8e4SViacheslav Ovsiienko 		/* Fallthrough */
1030b293e8e4SViacheslav Ovsiienko 	case FLEX_TUNNEL_MODE_INNER:
1031b293e8e4SViacheslav Ovsiienko 		/* Fallthrough */
1032b293e8e4SViacheslav Ovsiienko 	case FLEX_TUNNEL_MODE_MULTI:
1033b293e8e4SViacheslav Ovsiienko 		/* Fallthrough */
1034b293e8e4SViacheslav Ovsiienko 	case FLEX_TUNNEL_MODE_TUNNEL:
1035b293e8e4SViacheslav Ovsiienko 		break;
1036b293e8e4SViacheslav Ovsiienko 	default:
1037b293e8e4SViacheslav Ovsiienko 		return rte_flow_error_set
1038b293e8e4SViacheslav Ovsiienko 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1039b293e8e4SViacheslav Ovsiienko 			 "unrecognized tunnel mode");
1040b293e8e4SViacheslav Ovsiienko 	}
1041b293e8e4SViacheslav Ovsiienko 	item->tunnel_mode = conf->tunnel;
1042b293e8e4SViacheslav Ovsiienko 	if (conf->nb_samples > MLX5_FLEX_ITEM_MAPPING_NUM)
1043b293e8e4SViacheslav Ovsiienko 		return rte_flow_error_set
1044b293e8e4SViacheslav Ovsiienko 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1045b293e8e4SViacheslav Ovsiienko 			 "sample field number exceeds limit");
1046b293e8e4SViacheslav Ovsiienko 	/*
1047b293e8e4SViacheslav Ovsiienko 	 * The application can specify fields smaller or bigger than 32 bits
1048b293e8e4SViacheslav Ovsiienko 	 * covered with single sample register and it can specify field
1049b293e8e4SViacheslav Ovsiienko 	 * offsets in any order.
1050b293e8e4SViacheslav Ovsiienko 	 *
1051b293e8e4SViacheslav Ovsiienko 	 * Gather all similar fields together, build array of bit intervals
105253820561SMichael Baum 	 * in ascending order and try to cover with the smallest set of sample
1053b293e8e4SViacheslav Ovsiienko 	 * registers.
1054b293e8e4SViacheslav Ovsiienko 	 */
1055b293e8e4SViacheslav Ovsiienko 	memset(&cover, 0, sizeof(cover));
1056b293e8e4SViacheslav Ovsiienko 	for (i = 0; i < conf->nb_samples; i++) {
1057b293e8e4SViacheslav Ovsiienko 		struct rte_flow_item_flex_field *fl = conf->sample_data + i;
1058b293e8e4SViacheslav Ovsiienko 
1059b293e8e4SViacheslav Ovsiienko 		/* Check whether field was covered in the previous iteration. */
1060b293e8e4SViacheslav Ovsiienko 		if (cover.mapped[i / CHAR_BIT] & (1u << (i % CHAR_BIT)))
1061b293e8e4SViacheslav Ovsiienko 			continue;
1062b293e8e4SViacheslav Ovsiienko 		if (fl->field_mode == FIELD_MODE_DUMMY)
1063b293e8e4SViacheslav Ovsiienko 			continue;
1064b293e8e4SViacheslav Ovsiienko 		/* Build an interval array for the field and similar ones */
1065b293e8e4SViacheslav Ovsiienko 		cover.num = 0;
1066b293e8e4SViacheslav Ovsiienko 		/* Add the first field to array unconditionally. */
1067b293e8e4SViacheslav Ovsiienko 		ret = mlx5_flex_cover_sample(&cover, fl, NULL, attr, error);
1068b293e8e4SViacheslav Ovsiienko 		if (ret < 0)
1069b293e8e4SViacheslav Ovsiienko 			return ret;
1070b293e8e4SViacheslav Ovsiienko 		MLX5_ASSERT(ret > 0);
1071b293e8e4SViacheslav Ovsiienko 		cover.mapped[i / CHAR_BIT] |= 1u << (i % CHAR_BIT);
1072b293e8e4SViacheslav Ovsiienko 		for (j = i + 1; j < conf->nb_samples; j++) {
1073b293e8e4SViacheslav Ovsiienko 			struct rte_flow_item_flex_field *ft;
1074b293e8e4SViacheslav Ovsiienko 
1075b293e8e4SViacheslav Ovsiienko 			/* Add field to array if its type matches. */
1076b293e8e4SViacheslav Ovsiienko 			ft = conf->sample_data + j;
1077b293e8e4SViacheslav Ovsiienko 			ret = mlx5_flex_cover_sample(&cover, ft, fl,
1078b293e8e4SViacheslav Ovsiienko 						     attr, error);
1079b293e8e4SViacheslav Ovsiienko 			if (ret < 0)
1080b293e8e4SViacheslav Ovsiienko 				return ret;
1081b293e8e4SViacheslav Ovsiienko 			if (!ret)
1082b293e8e4SViacheslav Ovsiienko 				continue;
1083b293e8e4SViacheslav Ovsiienko 			cover.mapped[j / CHAR_BIT] |= 1u << (j % CHAR_BIT);
1084b293e8e4SViacheslav Ovsiienko 		}
1085b293e8e4SViacheslav Ovsiienko 		/* Allocate sample registers to cover array of intervals. */
1086b293e8e4SViacheslav Ovsiienko 		ret = mlx5_flex_alloc_sample(&cover, parser, item,
1087b293e8e4SViacheslav Ovsiienko 					     fl, attr, error);
1088b293e8e4SViacheslav Ovsiienko 		if (ret)
1089b293e8e4SViacheslav Ovsiienko 			return ret;
1090b293e8e4SViacheslav Ovsiienko 	}
1091b293e8e4SViacheslav Ovsiienko 	/* Build the item pattern translating data on flow creation. */
1092b293e8e4SViacheslav Ovsiienko 	item->mapnum = 0;
1093b293e8e4SViacheslav Ovsiienko 	memset(&item->map, 0, sizeof(item->map));
1094b293e8e4SViacheslav Ovsiienko 	for (i = 0; i < conf->nb_samples; i++) {
1095b293e8e4SViacheslav Ovsiienko 		struct rte_flow_item_flex_field *fl = conf->sample_data + i;
1096b293e8e4SViacheslav Ovsiienko 
1097b293e8e4SViacheslav Ovsiienko 		ret = mlx5_flex_map_sample(fl, parser, item, error);
1098b293e8e4SViacheslav Ovsiienko 		if (ret) {
1099b293e8e4SViacheslav Ovsiienko 			MLX5_ASSERT(false);
1100b293e8e4SViacheslav Ovsiienko 			return ret;
1101b293e8e4SViacheslav Ovsiienko 		}
1102b293e8e4SViacheslav Ovsiienko 	}
1103b293e8e4SViacheslav Ovsiienko 	if (conf->tunnel == FLEX_TUNNEL_MODE_MULTI) {
1104b293e8e4SViacheslav Ovsiienko 		/*
1105b293e8e4SViacheslav Ovsiienko 		 * In FLEX_TUNNEL_MODE_MULTI tunnel mode PMD creates 2 sets
1106b293e8e4SViacheslav Ovsiienko 		 * of samples. The first set is for outer and the second set
1107b293e8e4SViacheslav Ovsiienko 		 * for inner flex flow item. Outer and inner samples differ
1108b293e8e4SViacheslav Ovsiienko 		 * only in tunnel_mode.
1109b293e8e4SViacheslav Ovsiienko 		 */
1110b293e8e4SViacheslav Ovsiienko 		if (parser->num_samples > MLX5_GRAPH_NODE_SAMPLE_NUM / 2)
1111b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
1112b293e8e4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1113b293e8e4SViacheslav Ovsiienko 				 "no sample registers for inner");
1114b293e8e4SViacheslav Ovsiienko 		rte_memcpy(parser->devx_conf.sample + parser->num_samples,
1115b293e8e4SViacheslav Ovsiienko 			   parser->devx_conf.sample,
1116b293e8e4SViacheslav Ovsiienko 			   parser->num_samples *
1117b293e8e4SViacheslav Ovsiienko 					sizeof(parser->devx_conf.sample[0]));
1118b293e8e4SViacheslav Ovsiienko 		for (i = 0; i < parser->num_samples; i++) {
1119b293e8e4SViacheslav Ovsiienko 			struct mlx5_devx_match_sample_attr *sm = i +
1120b293e8e4SViacheslav Ovsiienko 				parser->devx_conf.sample + parser->num_samples;
1121b293e8e4SViacheslav Ovsiienko 
1122b293e8e4SViacheslav Ovsiienko 			sm->flow_match_sample_tunnel_mode =
1123b293e8e4SViacheslav Ovsiienko 						MLX5_GRAPH_SAMPLE_TUNNEL_INNER;
1124b293e8e4SViacheslav Ovsiienko 		}
1125b293e8e4SViacheslav Ovsiienko 		parser->num_samples *= 2;
1126b293e8e4SViacheslav Ovsiienko 	}
1127b293e8e4SViacheslav Ovsiienko 	return 0;
1128b293e8e4SViacheslav Ovsiienko }
1129b293e8e4SViacheslav Ovsiienko 
1130b293e8e4SViacheslav Ovsiienko static int
1131b293e8e4SViacheslav Ovsiienko mlx5_flex_arc_type(enum rte_flow_item_type type, int in)
1132b293e8e4SViacheslav Ovsiienko {
1133b293e8e4SViacheslav Ovsiienko 	switch (type) {
1134b293e8e4SViacheslav Ovsiienko 	case RTE_FLOW_ITEM_TYPE_ETH:
1135b293e8e4SViacheslav Ovsiienko 		return  MLX5_GRAPH_ARC_NODE_MAC;
1136b293e8e4SViacheslav Ovsiienko 	case RTE_FLOW_ITEM_TYPE_IPV4:
1137b293e8e4SViacheslav Ovsiienko 		return in ? MLX5_GRAPH_ARC_NODE_IP : MLX5_GRAPH_ARC_NODE_IPV4;
1138b293e8e4SViacheslav Ovsiienko 	case RTE_FLOW_ITEM_TYPE_IPV6:
1139b293e8e4SViacheslav Ovsiienko 		return in ? MLX5_GRAPH_ARC_NODE_IP : MLX5_GRAPH_ARC_NODE_IPV6;
1140b293e8e4SViacheslav Ovsiienko 	case RTE_FLOW_ITEM_TYPE_UDP:
1141b293e8e4SViacheslav Ovsiienko 		return MLX5_GRAPH_ARC_NODE_UDP;
1142b293e8e4SViacheslav Ovsiienko 	case RTE_FLOW_ITEM_TYPE_TCP:
1143b293e8e4SViacheslav Ovsiienko 		return MLX5_GRAPH_ARC_NODE_TCP;
1144b293e8e4SViacheslav Ovsiienko 	case RTE_FLOW_ITEM_TYPE_MPLS:
1145b293e8e4SViacheslav Ovsiienko 		return MLX5_GRAPH_ARC_NODE_MPLS;
1146b293e8e4SViacheslav Ovsiienko 	case RTE_FLOW_ITEM_TYPE_GRE:
1147b293e8e4SViacheslav Ovsiienko 		return MLX5_GRAPH_ARC_NODE_GRE;
1148b293e8e4SViacheslav Ovsiienko 	case RTE_FLOW_ITEM_TYPE_GENEVE:
1149b293e8e4SViacheslav Ovsiienko 		return MLX5_GRAPH_ARC_NODE_GENEVE;
1150b293e8e4SViacheslav Ovsiienko 	case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1151b293e8e4SViacheslav Ovsiienko 		return MLX5_GRAPH_ARC_NODE_VXLAN_GPE;
11526dfb83f1SViacheslav Ovsiienko 	case RTE_FLOW_ITEM_TYPE_ESP:
11536dfb83f1SViacheslav Ovsiienko 		return MLX5_GRAPH_ARC_NODE_IPSEC_ESP;
1154b293e8e4SViacheslav Ovsiienko 	default:
1155b293e8e4SViacheslav Ovsiienko 		return -EINVAL;
1156b293e8e4SViacheslav Ovsiienko 	}
1157b293e8e4SViacheslav Ovsiienko }
1158b293e8e4SViacheslav Ovsiienko 
1159b293e8e4SViacheslav Ovsiienko static int
1160b293e8e4SViacheslav Ovsiienko mlx5_flex_arc_in_eth(const struct rte_flow_item *item,
1161b293e8e4SViacheslav Ovsiienko 		     struct rte_flow_error *error)
1162b293e8e4SViacheslav Ovsiienko {
1163b293e8e4SViacheslav Ovsiienko 	const struct rte_flow_item_eth *spec = item->spec;
1164b293e8e4SViacheslav Ovsiienko 	const struct rte_flow_item_eth *mask = item->mask;
1165b293e8e4SViacheslav Ovsiienko 	struct rte_flow_item_eth eth = { .hdr.ether_type = RTE_BE16(0xFFFF) };
1166b293e8e4SViacheslav Ovsiienko 
1167b293e8e4SViacheslav Ovsiienko 	if (memcmp(mask, &eth, sizeof(struct rte_flow_item_eth))) {
1168b293e8e4SViacheslav Ovsiienko 		return rte_flow_error_set
1169b293e8e4SViacheslav Ovsiienko 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,
1170b293e8e4SViacheslav Ovsiienko 			 "invalid eth item mask");
1171b293e8e4SViacheslav Ovsiienko 	}
1172b293e8e4SViacheslav Ovsiienko 	return rte_be_to_cpu_16(spec->hdr.ether_type);
1173b293e8e4SViacheslav Ovsiienko }
1174b293e8e4SViacheslav Ovsiienko 
1175b293e8e4SViacheslav Ovsiienko static int
1176b293e8e4SViacheslav Ovsiienko mlx5_flex_arc_in_udp(const struct rte_flow_item *item,
1177b293e8e4SViacheslav Ovsiienko 		     struct rte_flow_error *error)
1178b293e8e4SViacheslav Ovsiienko {
1179b293e8e4SViacheslav Ovsiienko 	const struct rte_flow_item_udp *spec = item->spec;
1180b293e8e4SViacheslav Ovsiienko 	const struct rte_flow_item_udp *mask = item->mask;
1181b293e8e4SViacheslav Ovsiienko 	struct rte_flow_item_udp udp = { .hdr.dst_port = RTE_BE16(0xFFFF) };
1182b293e8e4SViacheslav Ovsiienko 
1183b293e8e4SViacheslav Ovsiienko 	if (memcmp(mask, &udp, sizeof(struct rte_flow_item_udp))) {
1184b293e8e4SViacheslav Ovsiienko 		return rte_flow_error_set
1185b293e8e4SViacheslav Ovsiienko 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,
1186b293e8e4SViacheslav Ovsiienko 			 "invalid eth item mask");
1187b293e8e4SViacheslav Ovsiienko 	}
1188b293e8e4SViacheslav Ovsiienko 	return rte_be_to_cpu_16(spec->hdr.dst_port);
1189b293e8e4SViacheslav Ovsiienko }
1190b293e8e4SViacheslav Ovsiienko 
1191b293e8e4SViacheslav Ovsiienko static int
11926dfb83f1SViacheslav Ovsiienko mlx5_flex_arc_in_ipv4(const struct rte_flow_item *item,
11936dfb83f1SViacheslav Ovsiienko 		      struct rte_flow_error *error)
11946dfb83f1SViacheslav Ovsiienko {
11956dfb83f1SViacheslav Ovsiienko 	const struct rte_flow_item_ipv4 *spec = item->spec;
11966dfb83f1SViacheslav Ovsiienko 	const struct rte_flow_item_ipv4 *mask = item->mask;
11976dfb83f1SViacheslav Ovsiienko 	struct rte_flow_item_ipv4 ip = { .hdr.next_proto_id = 0xff };
11986dfb83f1SViacheslav Ovsiienko 
11996dfb83f1SViacheslav Ovsiienko 	if (memcmp(mask, &ip, sizeof(struct rte_flow_item_ipv4))) {
12006dfb83f1SViacheslav Ovsiienko 		return rte_flow_error_set
12016dfb83f1SViacheslav Ovsiienko 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,
12026dfb83f1SViacheslav Ovsiienko 			 "invalid ipv4 item mask, full mask is desired");
12036dfb83f1SViacheslav Ovsiienko 	}
12046dfb83f1SViacheslav Ovsiienko 	return spec->hdr.next_proto_id;
12056dfb83f1SViacheslav Ovsiienko }
12066dfb83f1SViacheslav Ovsiienko 
12076dfb83f1SViacheslav Ovsiienko static int
1208d451d1a1SRongwei Liu mlx5_flex_arc_in_ipv6(const struct rte_flow_item *item,
1209d451d1a1SRongwei Liu 		      struct rte_flow_error *error)
1210d451d1a1SRongwei Liu {
1211d451d1a1SRongwei Liu 	const struct rte_flow_item_ipv6 *spec = item->spec;
1212d451d1a1SRongwei Liu 	const struct rte_flow_item_ipv6 *mask = item->mask;
1213d451d1a1SRongwei Liu 	struct rte_flow_item_ipv6 ip = { .hdr.proto = 0xff };
1214d451d1a1SRongwei Liu 
1215d451d1a1SRongwei Liu 	if (memcmp(mask, &ip, sizeof(struct rte_flow_item_ipv6))) {
1216d451d1a1SRongwei Liu 		return rte_flow_error_set
1217d451d1a1SRongwei Liu 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,
1218d451d1a1SRongwei Liu 			 "invalid ipv6 item mask, full mask is desired");
1219d451d1a1SRongwei Liu 	}
1220d451d1a1SRongwei Liu 	return spec->hdr.proto;
1221d451d1a1SRongwei Liu }
1222d451d1a1SRongwei Liu 
1223d451d1a1SRongwei Liu static int
1224b293e8e4SViacheslav Ovsiienko mlx5_flex_translate_arc_in(struct mlx5_hca_flex_attr *attr,
1225b293e8e4SViacheslav Ovsiienko 			   const struct rte_flow_item_flex_conf *conf,
1226b293e8e4SViacheslav Ovsiienko 			   struct mlx5_flex_parser_devx *devx,
1227b293e8e4SViacheslav Ovsiienko 			   struct mlx5_flex_item *item,
1228b293e8e4SViacheslav Ovsiienko 			   struct rte_flow_error *error)
1229b293e8e4SViacheslav Ovsiienko {
1230b293e8e4SViacheslav Ovsiienko 	struct mlx5_devx_graph_node_attr *node = &devx->devx_conf;
1231b293e8e4SViacheslav Ovsiienko 	uint32_t i;
1232b293e8e4SViacheslav Ovsiienko 
1233b293e8e4SViacheslav Ovsiienko 	RTE_SET_USED(item);
1234b293e8e4SViacheslav Ovsiienko 	if (conf->nb_inputs > attr->max_num_arc_in)
1235b293e8e4SViacheslav Ovsiienko 		return rte_flow_error_set
1236b293e8e4SViacheslav Ovsiienko 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1237b293e8e4SViacheslav Ovsiienko 			 "too many input links");
1238b293e8e4SViacheslav Ovsiienko 	for (i = 0; i < conf->nb_inputs; i++) {
1239b293e8e4SViacheslav Ovsiienko 		struct mlx5_devx_graph_arc_attr *arc = node->in + i;
1240b293e8e4SViacheslav Ovsiienko 		struct rte_flow_item_flex_link *link = conf->input_link + i;
1241b293e8e4SViacheslav Ovsiienko 		const struct rte_flow_item *rte_item = &link->item;
1242b293e8e4SViacheslav Ovsiienko 		int arc_type;
1243b293e8e4SViacheslav Ovsiienko 		int ret;
1244b293e8e4SViacheslav Ovsiienko 
1245b293e8e4SViacheslav Ovsiienko 		if (!rte_item->spec || !rte_item->mask || rte_item->last)
1246b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
1247b293e8e4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1248b293e8e4SViacheslav Ovsiienko 				 "invalid flex item IN arc format");
1249b293e8e4SViacheslav Ovsiienko 		arc_type = mlx5_flex_arc_type(rte_item->type, true);
1250b293e8e4SViacheslav Ovsiienko 		if (arc_type < 0 || !(attr->node_in & RTE_BIT32(arc_type)))
1251b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
1252b293e8e4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1253b293e8e4SViacheslav Ovsiienko 				 "unsupported flex item IN arc type");
1254b293e8e4SViacheslav Ovsiienko 		arc->arc_parse_graph_node = arc_type;
1255b293e8e4SViacheslav Ovsiienko 		arc->start_inner_tunnel = 0;
1256b293e8e4SViacheslav Ovsiienko 		/*
1257b293e8e4SViacheslav Ovsiienko 		 * Configure arc IN condition value. The value location depends
1258b293e8e4SViacheslav Ovsiienko 		 * on protocol. Current FW version supports IP & UDP for IN
1259b293e8e4SViacheslav Ovsiienko 		 * arcs only, and locations for these protocols are defined.
1260b293e8e4SViacheslav Ovsiienko 		 * Add more protocols when available.
1261b293e8e4SViacheslav Ovsiienko 		 */
1262b293e8e4SViacheslav Ovsiienko 		switch (rte_item->type) {
1263b293e8e4SViacheslav Ovsiienko 		case RTE_FLOW_ITEM_TYPE_ETH:
1264b293e8e4SViacheslav Ovsiienko 			ret = mlx5_flex_arc_in_eth(rte_item, error);
1265b293e8e4SViacheslav Ovsiienko 			break;
1266b293e8e4SViacheslav Ovsiienko 		case RTE_FLOW_ITEM_TYPE_UDP:
1267b293e8e4SViacheslav Ovsiienko 			ret = mlx5_flex_arc_in_udp(rte_item, error);
1268b293e8e4SViacheslav Ovsiienko 			break;
12696dfb83f1SViacheslav Ovsiienko 		case RTE_FLOW_ITEM_TYPE_IPV4:
12706dfb83f1SViacheslav Ovsiienko 			ret = mlx5_flex_arc_in_ipv4(rte_item, error);
12716dfb83f1SViacheslav Ovsiienko 			break;
1272d451d1a1SRongwei Liu 		case RTE_FLOW_ITEM_TYPE_IPV6:
1273d451d1a1SRongwei Liu 			ret = mlx5_flex_arc_in_ipv6(rte_item, error);
1274d451d1a1SRongwei Liu 			break;
1275b293e8e4SViacheslav Ovsiienko 		default:
1276b293e8e4SViacheslav Ovsiienko 			MLX5_ASSERT(false);
1277b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
1278b293e8e4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1279b293e8e4SViacheslav Ovsiienko 				 "unsupported flex item IN arc type");
1280b293e8e4SViacheslav Ovsiienko 		}
1281b293e8e4SViacheslav Ovsiienko 		if (ret < 0)
1282b293e8e4SViacheslav Ovsiienko 			return ret;
1283b293e8e4SViacheslav Ovsiienko 		arc->compare_condition_value = (uint16_t)ret;
1284b293e8e4SViacheslav Ovsiienko 	}
1285b293e8e4SViacheslav Ovsiienko 	return 0;
1286b293e8e4SViacheslav Ovsiienko }
1287b293e8e4SViacheslav Ovsiienko 
1288b293e8e4SViacheslav Ovsiienko static int
1289b293e8e4SViacheslav Ovsiienko mlx5_flex_translate_arc_out(struct mlx5_hca_flex_attr *attr,
1290b293e8e4SViacheslav Ovsiienko 			    const struct rte_flow_item_flex_conf *conf,
1291b293e8e4SViacheslav Ovsiienko 			    struct mlx5_flex_parser_devx *devx,
1292b293e8e4SViacheslav Ovsiienko 			    struct mlx5_flex_item *item,
1293b293e8e4SViacheslav Ovsiienko 			    struct rte_flow_error *error)
1294b293e8e4SViacheslav Ovsiienko {
1295b293e8e4SViacheslav Ovsiienko 	struct mlx5_devx_graph_node_attr *node = &devx->devx_conf;
1296b293e8e4SViacheslav Ovsiienko 	bool is_tunnel = conf->tunnel == FLEX_TUNNEL_MODE_TUNNEL;
1297b293e8e4SViacheslav Ovsiienko 	uint32_t i;
1298b293e8e4SViacheslav Ovsiienko 
1299b293e8e4SViacheslav Ovsiienko 	RTE_SET_USED(item);
1300b293e8e4SViacheslav Ovsiienko 	if (conf->nb_outputs > attr->max_num_arc_out)
1301b293e8e4SViacheslav Ovsiienko 		return rte_flow_error_set
1302b293e8e4SViacheslav Ovsiienko 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1303b293e8e4SViacheslav Ovsiienko 			 "too many output links");
1304b293e8e4SViacheslav Ovsiienko 	for (i = 0; i < conf->nb_outputs; i++) {
1305b293e8e4SViacheslav Ovsiienko 		struct mlx5_devx_graph_arc_attr *arc = node->out + i;
1306b293e8e4SViacheslav Ovsiienko 		struct rte_flow_item_flex_link *link = conf->output_link + i;
1307b293e8e4SViacheslav Ovsiienko 		const struct rte_flow_item *rte_item = &link->item;
1308b293e8e4SViacheslav Ovsiienko 		int arc_type;
1309b293e8e4SViacheslav Ovsiienko 
1310b293e8e4SViacheslav Ovsiienko 		if (rte_item->spec || rte_item->mask || rte_item->last)
1311b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
1312b293e8e4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1313b293e8e4SViacheslav Ovsiienko 				 "flex node: invalid OUT arc format");
1314b293e8e4SViacheslav Ovsiienko 		arc_type = mlx5_flex_arc_type(rte_item->type, false);
1315b293e8e4SViacheslav Ovsiienko 		if (arc_type < 0 || !(attr->node_out & RTE_BIT32(arc_type)))
1316b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
1317b293e8e4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1318b293e8e4SViacheslav Ovsiienko 				 "unsupported flex item OUT arc type");
1319b293e8e4SViacheslav Ovsiienko 		arc->arc_parse_graph_node = arc_type;
1320b293e8e4SViacheslav Ovsiienko 		arc->start_inner_tunnel = !!is_tunnel;
1321b293e8e4SViacheslav Ovsiienko 		arc->compare_condition_value = link->next;
1322b293e8e4SViacheslav Ovsiienko 	}
1323b293e8e4SViacheslav Ovsiienko 	return 0;
1324b293e8e4SViacheslav Ovsiienko }
1325b293e8e4SViacheslav Ovsiienko 
1326b293e8e4SViacheslav Ovsiienko /* Translate RTE flex item API configuration into flaex parser settings. */
1327b293e8e4SViacheslav Ovsiienko static int
1328b293e8e4SViacheslav Ovsiienko mlx5_flex_translate_conf(struct rte_eth_dev *dev,
1329b293e8e4SViacheslav Ovsiienko 			 const struct rte_flow_item_flex_conf *conf,
1330b293e8e4SViacheslav Ovsiienko 			 struct mlx5_flex_parser_devx *devx,
1331b293e8e4SViacheslav Ovsiienko 			 struct mlx5_flex_item *item,
1332b293e8e4SViacheslav Ovsiienko 			 struct rte_flow_error *error)
1333b293e8e4SViacheslav Ovsiienko {
1334b293e8e4SViacheslav Ovsiienko 	struct mlx5_priv *priv = dev->data->dev_private;
133553820561SMichael Baum 	struct mlx5_hca_flex_attr *attr = &priv->sh->cdev->config.hca_attr.flex;
1336b293e8e4SViacheslav Ovsiienko 	int ret;
1337b293e8e4SViacheslav Ovsiienko 
1338b293e8e4SViacheslav Ovsiienko 	ret = mlx5_flex_translate_length(attr, conf, devx, error);
1339b293e8e4SViacheslav Ovsiienko 	if (ret)
1340b293e8e4SViacheslav Ovsiienko 		return ret;
1341b293e8e4SViacheslav Ovsiienko 	ret = mlx5_flex_translate_next(attr, conf, devx, error);
1342b293e8e4SViacheslav Ovsiienko 	if (ret)
1343b293e8e4SViacheslav Ovsiienko 		return ret;
1344b293e8e4SViacheslav Ovsiienko 	ret = mlx5_flex_translate_sample(attr, conf, devx, item, error);
1345b293e8e4SViacheslav Ovsiienko 	if (ret)
1346b293e8e4SViacheslav Ovsiienko 		return ret;
1347b293e8e4SViacheslav Ovsiienko 	ret = mlx5_flex_translate_arc_in(attr, conf, devx, item, error);
1348b293e8e4SViacheslav Ovsiienko 	if (ret)
1349b293e8e4SViacheslav Ovsiienko 		return ret;
1350b293e8e4SViacheslav Ovsiienko 	ret = mlx5_flex_translate_arc_out(attr, conf, devx, item, error);
1351b293e8e4SViacheslav Ovsiienko 	if (ret)
1352b293e8e4SViacheslav Ovsiienko 		return ret;
1353b293e8e4SViacheslav Ovsiienko 	return 0;
1354b293e8e4SViacheslav Ovsiienko }
1355b293e8e4SViacheslav Ovsiienko 
1356db25cadcSViacheslav Ovsiienko /**
1357db25cadcSViacheslav Ovsiienko  * Create the flex item with specified configuration over the Ethernet device.
1358db25cadcSViacheslav Ovsiienko  *
1359db25cadcSViacheslav Ovsiienko  * @param dev
1360db25cadcSViacheslav Ovsiienko  *   Ethernet device to create flex item on.
1361db25cadcSViacheslav Ovsiienko  * @param[in] conf
1362db25cadcSViacheslav Ovsiienko  *   Flex item configuration.
1363db25cadcSViacheslav Ovsiienko  * @param[out] error
1364db25cadcSViacheslav Ovsiienko  *   Perform verbose error reporting if not NULL. PMDs initialize this
1365db25cadcSViacheslav Ovsiienko  *   structure in case of error only.
1366db25cadcSViacheslav Ovsiienko  *
1367db25cadcSViacheslav Ovsiienko  * @return
1368db25cadcSViacheslav Ovsiienko  *   Non-NULL opaque pointer on success, NULL otherwise and rte_errno is set.
1369db25cadcSViacheslav Ovsiienko  */
1370db25cadcSViacheslav Ovsiienko struct rte_flow_item_flex_handle *
1371db25cadcSViacheslav Ovsiienko flow_dv_item_create(struct rte_eth_dev *dev,
1372db25cadcSViacheslav Ovsiienko 		    const struct rte_flow_item_flex_conf *conf,
1373db25cadcSViacheslav Ovsiienko 		    struct rte_flow_error *error)
1374db25cadcSViacheslav Ovsiienko {
1375db25cadcSViacheslav Ovsiienko 	struct mlx5_priv *priv = dev->data->dev_private;
13769086ac09SGregory Etelson 	struct mlx5_flex_parser_devx devx_config = { .devx_obj = NULL };
1377db25cadcSViacheslav Ovsiienko 	struct mlx5_flex_item *flex;
13789086ac09SGregory Etelson 	struct mlx5_list_entry *ent;
1379db25cadcSViacheslav Ovsiienko 
1380db25cadcSViacheslav Ovsiienko 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
1381db25cadcSViacheslav Ovsiienko 	flex = mlx5_flex_alloc(priv);
1382db25cadcSViacheslav Ovsiienko 	if (!flex) {
1383db25cadcSViacheslav Ovsiienko 		rte_flow_error_set(error, ENOMEM,
1384db25cadcSViacheslav Ovsiienko 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1385db25cadcSViacheslav Ovsiienko 				   "too many flex items created on the port");
1386db25cadcSViacheslav Ovsiienko 		return NULL;
1387db25cadcSViacheslav Ovsiienko 	}
1388b293e8e4SViacheslav Ovsiienko 	if (mlx5_flex_translate_conf(dev, conf, &devx_config, flex, error))
1389b293e8e4SViacheslav Ovsiienko 		goto error;
13909086ac09SGregory Etelson 	ent = mlx5_list_register(priv->sh->flex_parsers_dv, &devx_config);
13919086ac09SGregory Etelson 	if (!ent) {
13929086ac09SGregory Etelson 		rte_flow_error_set(error, ENOMEM,
13939086ac09SGregory Etelson 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13949086ac09SGregory Etelson 				   "flex item creation failure");
13959086ac09SGregory Etelson 		goto error;
13969086ac09SGregory Etelson 	}
13979086ac09SGregory Etelson 	flex->devx_fp = container_of(ent, struct mlx5_flex_parser_devx, entry);
1398db25cadcSViacheslav Ovsiienko 	/* Mark initialized flex item valid. */
1399e12a0166STyler Retzlaff 	rte_atomic_fetch_add_explicit(&flex->refcnt, 1, rte_memory_order_release);
1400db25cadcSViacheslav Ovsiienko 	return (struct rte_flow_item_flex_handle *)flex;
14019086ac09SGregory Etelson 
14029086ac09SGregory Etelson error:
14039086ac09SGregory Etelson 	mlx5_flex_free(priv, flex);
14049086ac09SGregory Etelson 	return NULL;
1405db25cadcSViacheslav Ovsiienko }
1406db25cadcSViacheslav Ovsiienko 
1407db25cadcSViacheslav Ovsiienko /**
1408db25cadcSViacheslav Ovsiienko  * Release the flex item on the specified Ethernet device.
1409db25cadcSViacheslav Ovsiienko  *
1410db25cadcSViacheslav Ovsiienko  * @param dev
1411db25cadcSViacheslav Ovsiienko  *   Ethernet device to destroy flex item on.
1412db25cadcSViacheslav Ovsiienko  * @param[in] handle
1413db25cadcSViacheslav Ovsiienko  *   Handle of the item existing on the specified device.
1414db25cadcSViacheslav Ovsiienko  * @param[out] error
1415db25cadcSViacheslav Ovsiienko  *   Perform verbose error reporting if not NULL. PMDs initialize this
1416db25cadcSViacheslav Ovsiienko  *   structure in case of error only.
1417db25cadcSViacheslav Ovsiienko  *
1418db25cadcSViacheslav Ovsiienko  * @return
1419db25cadcSViacheslav Ovsiienko  *   0 on success, a negative errno value otherwise and rte_errno is set.
1420db25cadcSViacheslav Ovsiienko  */
1421db25cadcSViacheslav Ovsiienko int
1422db25cadcSViacheslav Ovsiienko flow_dv_item_release(struct rte_eth_dev *dev,
1423db25cadcSViacheslav Ovsiienko 		     const struct rte_flow_item_flex_handle *handle,
1424db25cadcSViacheslav Ovsiienko 		     struct rte_flow_error *error)
1425db25cadcSViacheslav Ovsiienko {
1426db25cadcSViacheslav Ovsiienko 	struct mlx5_priv *priv = dev->data->dev_private;
1427db25cadcSViacheslav Ovsiienko 	struct mlx5_flex_item *flex =
1428db25cadcSViacheslav Ovsiienko 		(struct mlx5_flex_item *)(uintptr_t)handle;
1429db25cadcSViacheslav Ovsiienko 	uint32_t old_refcnt = 1;
14309086ac09SGregory Etelson 	int rc;
1431db25cadcSViacheslav Ovsiienko 
1432db25cadcSViacheslav Ovsiienko 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
1433db25cadcSViacheslav Ovsiienko 	rte_spinlock_lock(&priv->flex_item_sl);
1434db25cadcSViacheslav Ovsiienko 	if (mlx5_flex_index(priv, flex) < 0) {
1435db25cadcSViacheslav Ovsiienko 		rte_spinlock_unlock(&priv->flex_item_sl);
1436db25cadcSViacheslav Ovsiienko 		return rte_flow_error_set(error, EINVAL,
1437db25cadcSViacheslav Ovsiienko 					  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1438db25cadcSViacheslav Ovsiienko 					  "invalid flex item handle value");
1439db25cadcSViacheslav Ovsiienko 	}
1440e12a0166STyler Retzlaff 	if (!rte_atomic_compare_exchange_strong_explicit(&flex->refcnt, &old_refcnt, 0,
1441e12a0166STyler Retzlaff 					 rte_memory_order_acquire, rte_memory_order_relaxed)) {
1442db25cadcSViacheslav Ovsiienko 		rte_spinlock_unlock(&priv->flex_item_sl);
1443db25cadcSViacheslav Ovsiienko 		return rte_flow_error_set(error, EBUSY,
1444db25cadcSViacheslav Ovsiienko 					  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1445db25cadcSViacheslav Ovsiienko 					  "flex item has flow references");
1446db25cadcSViacheslav Ovsiienko 	}
1447db25cadcSViacheslav Ovsiienko 	/* Flex item is marked as invalid, we can leave locked section. */
1448db25cadcSViacheslav Ovsiienko 	rte_spinlock_unlock(&priv->flex_item_sl);
14499086ac09SGregory Etelson 	MLX5_ASSERT(flex->devx_fp);
14509086ac09SGregory Etelson 	rc = mlx5_list_unregister(priv->sh->flex_parsers_dv,
14519086ac09SGregory Etelson 				  &flex->devx_fp->entry);
14529086ac09SGregory Etelson 	flex->devx_fp = NULL;
1453db25cadcSViacheslav Ovsiienko 	mlx5_flex_free(priv, flex);
14549086ac09SGregory Etelson 	if (rc < 0)
14559086ac09SGregory Etelson 		return rte_flow_error_set(error, EBUSY,
14569086ac09SGregory Etelson 					  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
14579086ac09SGregory Etelson 					  "flex item release failure");
1458db25cadcSViacheslav Ovsiienko 	return 0;
1459db25cadcSViacheslav Ovsiienko }
14609086ac09SGregory Etelson 
14619086ac09SGregory Etelson /* DevX flex parser list callbacks. */
14629086ac09SGregory Etelson struct mlx5_list_entry *
14639086ac09SGregory Etelson mlx5_flex_parser_create_cb(void *list_ctx, void *ctx)
14649086ac09SGregory Etelson {
14659086ac09SGregory Etelson 	struct mlx5_dev_ctx_shared *sh = list_ctx;
14669086ac09SGregory Etelson 	struct mlx5_flex_parser_devx *fp, *conf = ctx;
1467bc0a9303SRongwei Liu 	uint32_t i;
1468bc0a9303SRongwei Liu 	uint8_t sample_info = sh->cdev->config.hca_attr.flex.query_match_sample_info;
14699086ac09SGregory Etelson 	int ret;
14709086ac09SGregory Etelson 
14719086ac09SGregory Etelson 	fp = mlx5_malloc(MLX5_MEM_ZERO,	sizeof(struct mlx5_flex_parser_devx),
14729086ac09SGregory Etelson 			 0, SOCKET_ID_ANY);
14739086ac09SGregory Etelson 	if (!fp)
14749086ac09SGregory Etelson 		return NULL;
14759086ac09SGregory Etelson 	/* Copy the requested configurations. */
14769086ac09SGregory Etelson 	fp->num_samples = conf->num_samples;
14779086ac09SGregory Etelson 	memcpy(&fp->devx_conf, &conf->devx_conf, sizeof(fp->devx_conf));
14789086ac09SGregory Etelson 	/* Create DevX flex parser. */
14799086ac09SGregory Etelson 	fp->devx_obj = mlx5_devx_cmd_create_flex_parser(sh->cdev->ctx,
14809086ac09SGregory Etelson 							&fp->devx_conf);
14819086ac09SGregory Etelson 	if (!fp->devx_obj)
14829086ac09SGregory Etelson 		goto error;
14839086ac09SGregory Etelson 	/* Query the firmware assigned sample ids. */
14849086ac09SGregory Etelson 	ret = mlx5_devx_cmd_query_parse_samples(fp->devx_obj,
14859086ac09SGregory Etelson 						fp->sample_ids,
1486f1324a17SRongwei Liu 						fp->num_samples,
1487f1324a17SRongwei Liu 						&fp->anchor_id);
14889086ac09SGregory Etelson 	if (ret)
14899086ac09SGregory Etelson 		goto error;
1490bc0a9303SRongwei Liu 	/* Query sample information per ID. */
1491bc0a9303SRongwei Liu 	for (i = 0; i < fp->num_samples && sample_info; i++) {
1492bc0a9303SRongwei Liu 		ret = mlx5_devx_cmd_match_sample_info_query(sh->cdev->ctx, fp->sample_ids[i],
1493bc0a9303SRongwei Liu 							    &fp->sample_info[i]);
1494bc0a9303SRongwei Liu 		if (ret)
1495bc0a9303SRongwei Liu 			goto error;
1496bc0a9303SRongwei Liu 	}
14979086ac09SGregory Etelson 	DRV_LOG(DEBUG, "DEVx flex parser %p created, samples num: %u",
14989086ac09SGregory Etelson 		(const void *)fp, fp->num_samples);
14999086ac09SGregory Etelson 	return &fp->entry;
15009086ac09SGregory Etelson error:
15019086ac09SGregory Etelson 	if (fp->devx_obj)
15029086ac09SGregory Etelson 		mlx5_devx_cmd_destroy((void *)(uintptr_t)fp->devx_obj);
15039086ac09SGregory Etelson 	if (fp)
15049086ac09SGregory Etelson 		mlx5_free(fp);
15059086ac09SGregory Etelson 	return NULL;
15069086ac09SGregory Etelson }
15079086ac09SGregory Etelson 
15089086ac09SGregory Etelson int
15099086ac09SGregory Etelson mlx5_flex_parser_match_cb(void *list_ctx,
15109086ac09SGregory Etelson 			  struct mlx5_list_entry *iter, void *ctx)
15119086ac09SGregory Etelson {
15129086ac09SGregory Etelson 	struct mlx5_flex_parser_devx *fp =
15139086ac09SGregory Etelson 		container_of(iter, struct mlx5_flex_parser_devx, entry);
15149086ac09SGregory Etelson 	struct mlx5_flex_parser_devx *org =
15159086ac09SGregory Etelson 		container_of(ctx, struct mlx5_flex_parser_devx, entry);
15169086ac09SGregory Etelson 
15179086ac09SGregory Etelson 	RTE_SET_USED(list_ctx);
15189086ac09SGregory Etelson 	return !iter || !ctx || memcmp(&fp->devx_conf,
15199086ac09SGregory Etelson 				       &org->devx_conf,
15209086ac09SGregory Etelson 				       sizeof(fp->devx_conf));
15219086ac09SGregory Etelson }
15229086ac09SGregory Etelson 
15239086ac09SGregory Etelson void
15249086ac09SGregory Etelson mlx5_flex_parser_remove_cb(void *list_ctx, struct mlx5_list_entry *entry)
15259086ac09SGregory Etelson {
15269086ac09SGregory Etelson 	struct mlx5_flex_parser_devx *fp =
15279086ac09SGregory Etelson 		container_of(entry, struct mlx5_flex_parser_devx, entry);
15289086ac09SGregory Etelson 
15299086ac09SGregory Etelson 	RTE_SET_USED(list_ctx);
15309086ac09SGregory Etelson 	MLX5_ASSERT(fp->devx_obj);
15319086ac09SGregory Etelson 	claim_zero(mlx5_devx_cmd_destroy(fp->devx_obj));
15329086ac09SGregory Etelson 	DRV_LOG(DEBUG, "DEVx flex parser %p destroyed", (const void *)fp);
15339086ac09SGregory Etelson 	mlx5_free(entry);
15349086ac09SGregory Etelson }
15359086ac09SGregory Etelson 
15369086ac09SGregory Etelson struct mlx5_list_entry *
15379086ac09SGregory Etelson mlx5_flex_parser_clone_cb(void *list_ctx,
15389086ac09SGregory Etelson 			  struct mlx5_list_entry *entry, void *ctx)
15399086ac09SGregory Etelson {
15409086ac09SGregory Etelson 	struct mlx5_flex_parser_devx *fp;
15419086ac09SGregory Etelson 
15429086ac09SGregory Etelson 	RTE_SET_USED(list_ctx);
15439086ac09SGregory Etelson 	RTE_SET_USED(entry);
15449086ac09SGregory Etelson 	fp = mlx5_malloc(0, sizeof(struct mlx5_flex_parser_devx),
15459086ac09SGregory Etelson 			 0, SOCKET_ID_ANY);
15469086ac09SGregory Etelson 	if (!fp)
15479086ac09SGregory Etelson 		return NULL;
15489086ac09SGregory Etelson 	memcpy(fp, ctx, sizeof(struct mlx5_flex_parser_devx));
15499086ac09SGregory Etelson 	return &fp->entry;
15509086ac09SGregory Etelson }
15519086ac09SGregory Etelson 
15529086ac09SGregory Etelson void
15539086ac09SGregory Etelson mlx5_flex_parser_clone_free_cb(void *list_ctx, struct mlx5_list_entry *entry)
15549086ac09SGregory Etelson {
15559086ac09SGregory Etelson 	struct mlx5_flex_parser_devx *fp =
15569086ac09SGregory Etelson 		container_of(entry, struct mlx5_flex_parser_devx, entry);
15579086ac09SGregory Etelson 	RTE_SET_USED(list_ctx);
15589086ac09SGregory Etelson 	mlx5_free(fp);
15599086ac09SGregory Etelson }
1560