xref: /dpdk/drivers/net/mlx5/mlx5_flow_flex.c (revision d451d1a15a67cca326422c62c9ddaf6ed30cbca2)
1db25cadcSViacheslav Ovsiienko /* SPDX-License-Identifier: BSD-3-Clause
2db25cadcSViacheslav Ovsiienko  * Copyright (c) 2021 NVIDIA Corporation & Affiliates
3db25cadcSViacheslav Ovsiienko  */
4db25cadcSViacheslav Ovsiienko #include <rte_malloc.h>
5db25cadcSViacheslav Ovsiienko #include <mlx5_devx_cmds.h>
6db25cadcSViacheslav Ovsiienko #include <mlx5_malloc.h>
7db25cadcSViacheslav Ovsiienko #include "mlx5.h"
8db25cadcSViacheslav Ovsiienko #include "mlx5_flow.h"
9db25cadcSViacheslav Ovsiienko 
10db25cadcSViacheslav Ovsiienko static_assert(sizeof(uint32_t) * CHAR_BIT >= MLX5_PORT_FLEX_ITEM_NUM,
11db25cadcSViacheslav Ovsiienko 	      "Flex item maximal number exceeds uint32_t bit width");
12db25cadcSViacheslav Ovsiienko 
13db25cadcSViacheslav Ovsiienko /**
14db25cadcSViacheslav Ovsiienko  *  Routine called once on port initialization to init flex item
15db25cadcSViacheslav Ovsiienko  *  related infrastructure initialization
16db25cadcSViacheslav Ovsiienko  *
17db25cadcSViacheslav Ovsiienko  * @param dev
18db25cadcSViacheslav Ovsiienko  *   Ethernet device to perform flex item initialization
19db25cadcSViacheslav Ovsiienko  *
20db25cadcSViacheslav Ovsiienko  * @return
21db25cadcSViacheslav Ovsiienko  *   0 on success, a negative errno value otherwise and rte_errno is set.
22db25cadcSViacheslav Ovsiienko  */
23db25cadcSViacheslav Ovsiienko int
24db25cadcSViacheslav Ovsiienko mlx5_flex_item_port_init(struct rte_eth_dev *dev)
25db25cadcSViacheslav Ovsiienko {
26db25cadcSViacheslav Ovsiienko 	struct mlx5_priv *priv = dev->data->dev_private;
27db25cadcSViacheslav Ovsiienko 
28db25cadcSViacheslav Ovsiienko 	rte_spinlock_init(&priv->flex_item_sl);
29db25cadcSViacheslav Ovsiienko 	MLX5_ASSERT(!priv->flex_item_map);
30db25cadcSViacheslav Ovsiienko 	return 0;
31db25cadcSViacheslav Ovsiienko }
32db25cadcSViacheslav Ovsiienko 
33db25cadcSViacheslav Ovsiienko /**
34db25cadcSViacheslav Ovsiienko  *  Routine called once on port close to perform flex item
35db25cadcSViacheslav Ovsiienko  *  related infrastructure cleanup.
36db25cadcSViacheslav Ovsiienko  *
37db25cadcSViacheslav Ovsiienko  * @param dev
38db25cadcSViacheslav Ovsiienko  *   Ethernet device to perform cleanup
39db25cadcSViacheslav Ovsiienko  */
40db25cadcSViacheslav Ovsiienko void
41db25cadcSViacheslav Ovsiienko mlx5_flex_item_port_cleanup(struct rte_eth_dev *dev)
42db25cadcSViacheslav Ovsiienko {
43db25cadcSViacheslav Ovsiienko 	struct mlx5_priv *priv = dev->data->dev_private;
44db25cadcSViacheslav Ovsiienko 	uint32_t i;
45db25cadcSViacheslav Ovsiienko 
46db25cadcSViacheslav Ovsiienko 	for (i = 0; i < MLX5_PORT_FLEX_ITEM_NUM && priv->flex_item_map ; i++) {
47db25cadcSViacheslav Ovsiienko 		if (priv->flex_item_map & (1 << i)) {
489086ac09SGregory Etelson 			struct mlx5_flex_item *flex = &priv->flex_item[i];
499086ac09SGregory Etelson 
509086ac09SGregory Etelson 			claim_zero(mlx5_list_unregister
519086ac09SGregory Etelson 					(priv->sh->flex_parsers_dv,
529086ac09SGregory Etelson 					 &flex->devx_fp->entry));
539086ac09SGregory Etelson 			flex->devx_fp = NULL;
549086ac09SGregory Etelson 			flex->refcnt = 0;
55db25cadcSViacheslav Ovsiienko 			priv->flex_item_map &= ~(1 << i);
56db25cadcSViacheslav Ovsiienko 		}
57db25cadcSViacheslav Ovsiienko 	}
58db25cadcSViacheslav Ovsiienko }
59db25cadcSViacheslav Ovsiienko 
60db25cadcSViacheslav Ovsiienko static int
61db25cadcSViacheslav Ovsiienko mlx5_flex_index(struct mlx5_priv *priv, struct mlx5_flex_item *item)
62db25cadcSViacheslav Ovsiienko {
63db25cadcSViacheslav Ovsiienko 	uintptr_t start = (uintptr_t)&priv->flex_item[0];
64db25cadcSViacheslav Ovsiienko 	uintptr_t entry = (uintptr_t)item;
65db25cadcSViacheslav Ovsiienko 	uintptr_t idx = (entry - start) / sizeof(struct mlx5_flex_item);
66db25cadcSViacheslav Ovsiienko 
67db25cadcSViacheslav Ovsiienko 	if (entry < start ||
68db25cadcSViacheslav Ovsiienko 	    idx >= MLX5_PORT_FLEX_ITEM_NUM ||
69db25cadcSViacheslav Ovsiienko 	    (entry - start) % sizeof(struct mlx5_flex_item) ||
70db25cadcSViacheslav Ovsiienko 	    !(priv->flex_item_map & (1u << idx)))
71db25cadcSViacheslav Ovsiienko 		return -1;
72db25cadcSViacheslav Ovsiienko 	return (int)idx;
73db25cadcSViacheslav Ovsiienko }
74db25cadcSViacheslav Ovsiienko 
75db25cadcSViacheslav Ovsiienko static struct mlx5_flex_item *
76db25cadcSViacheslav Ovsiienko mlx5_flex_alloc(struct mlx5_priv *priv)
77db25cadcSViacheslav Ovsiienko {
78db25cadcSViacheslav Ovsiienko 	struct mlx5_flex_item *item = NULL;
79db25cadcSViacheslav Ovsiienko 
80db25cadcSViacheslav Ovsiienko 	rte_spinlock_lock(&priv->flex_item_sl);
81db25cadcSViacheslav Ovsiienko 	if (~priv->flex_item_map) {
82db25cadcSViacheslav Ovsiienko 		uint32_t idx = rte_bsf32(~priv->flex_item_map);
83db25cadcSViacheslav Ovsiienko 
84db25cadcSViacheslav Ovsiienko 		if (idx < MLX5_PORT_FLEX_ITEM_NUM) {
85db25cadcSViacheslav Ovsiienko 			item = &priv->flex_item[idx];
86db25cadcSViacheslav Ovsiienko 			MLX5_ASSERT(!item->refcnt);
87db25cadcSViacheslav Ovsiienko 			MLX5_ASSERT(!item->devx_fp);
88db25cadcSViacheslav Ovsiienko 			item->devx_fp = NULL;
89db25cadcSViacheslav Ovsiienko 			__atomic_store_n(&item->refcnt, 0, __ATOMIC_RELEASE);
90db25cadcSViacheslav Ovsiienko 			priv->flex_item_map |= 1u << idx;
91db25cadcSViacheslav Ovsiienko 		}
92db25cadcSViacheslav Ovsiienko 	}
93db25cadcSViacheslav Ovsiienko 	rte_spinlock_unlock(&priv->flex_item_sl);
94db25cadcSViacheslav Ovsiienko 	return item;
95db25cadcSViacheslav Ovsiienko }
96db25cadcSViacheslav Ovsiienko 
97db25cadcSViacheslav Ovsiienko static void
98db25cadcSViacheslav Ovsiienko mlx5_flex_free(struct mlx5_priv *priv, struct mlx5_flex_item *item)
99db25cadcSViacheslav Ovsiienko {
100db25cadcSViacheslav Ovsiienko 	int idx = mlx5_flex_index(priv, item);
101db25cadcSViacheslav Ovsiienko 
102db25cadcSViacheslav Ovsiienko 	MLX5_ASSERT(idx >= 0 &&
103db25cadcSViacheslav Ovsiienko 		    idx < MLX5_PORT_FLEX_ITEM_NUM &&
104db25cadcSViacheslav Ovsiienko 		    (priv->flex_item_map & (1u << idx)));
105db25cadcSViacheslav Ovsiienko 	if (idx >= 0) {
106db25cadcSViacheslav Ovsiienko 		rte_spinlock_lock(&priv->flex_item_sl);
107db25cadcSViacheslav Ovsiienko 		MLX5_ASSERT(!item->refcnt);
108db25cadcSViacheslav Ovsiienko 		MLX5_ASSERT(!item->devx_fp);
109db25cadcSViacheslav Ovsiienko 		item->devx_fp = NULL;
110db25cadcSViacheslav Ovsiienko 		__atomic_store_n(&item->refcnt, 0, __ATOMIC_RELEASE);
111db25cadcSViacheslav Ovsiienko 		priv->flex_item_map &= ~(1u << idx);
112db25cadcSViacheslav Ovsiienko 		rte_spinlock_unlock(&priv->flex_item_sl);
113db25cadcSViacheslav Ovsiienko 	}
114db25cadcSViacheslav Ovsiienko }
115db25cadcSViacheslav Ovsiienko 
1166dac7d7fSViacheslav Ovsiienko static uint32_t
1176dac7d7fSViacheslav Ovsiienko mlx5_flex_get_bitfield(const struct rte_flow_item_flex *item,
1186dac7d7fSViacheslav Ovsiienko 		       uint32_t pos, uint32_t width, uint32_t shift)
1196dac7d7fSViacheslav Ovsiienko {
1206dac7d7fSViacheslav Ovsiienko 	const uint8_t *ptr = item->pattern + pos / CHAR_BIT;
1216dac7d7fSViacheslav Ovsiienko 	uint32_t val, vbits;
1226dac7d7fSViacheslav Ovsiienko 
1236dac7d7fSViacheslav Ovsiienko 	/* Proceed the bitfield start byte. */
1246dac7d7fSViacheslav Ovsiienko 	MLX5_ASSERT(width <= sizeof(uint32_t) * CHAR_BIT && width);
1256dac7d7fSViacheslav Ovsiienko 	MLX5_ASSERT(width + shift <= sizeof(uint32_t) * CHAR_BIT);
1266dac7d7fSViacheslav Ovsiienko 	if (item->length <= pos / CHAR_BIT)
1276dac7d7fSViacheslav Ovsiienko 		return 0;
1286dac7d7fSViacheslav Ovsiienko 	val = *ptr++ >> (pos % CHAR_BIT);
1296dac7d7fSViacheslav Ovsiienko 	vbits = CHAR_BIT - pos % CHAR_BIT;
1306dac7d7fSViacheslav Ovsiienko 	pos = (pos + vbits) / CHAR_BIT;
1316dac7d7fSViacheslav Ovsiienko 	vbits = RTE_MIN(vbits, width);
1326dac7d7fSViacheslav Ovsiienko 	val &= RTE_BIT32(vbits) - 1;
1336dac7d7fSViacheslav Ovsiienko 	while (vbits < width && pos < item->length) {
1346dac7d7fSViacheslav Ovsiienko 		uint32_t part = RTE_MIN(width - vbits, (uint32_t)CHAR_BIT);
1356dac7d7fSViacheslav Ovsiienko 		uint32_t tmp = *ptr++;
1366dac7d7fSViacheslav Ovsiienko 
1376dac7d7fSViacheslav Ovsiienko 		pos++;
1386dac7d7fSViacheslav Ovsiienko 		tmp &= RTE_BIT32(part) - 1;
1396dac7d7fSViacheslav Ovsiienko 		val |= tmp << vbits;
1406dac7d7fSViacheslav Ovsiienko 		vbits += part;
1416dac7d7fSViacheslav Ovsiienko 	}
1426dac7d7fSViacheslav Ovsiienko 	return rte_bswap32(val <<= shift);
1436dac7d7fSViacheslav Ovsiienko }
1446dac7d7fSViacheslav Ovsiienko 
1456dac7d7fSViacheslav Ovsiienko #define SET_FP_MATCH_SAMPLE_ID(x, def, msk, val, sid) \
1466dac7d7fSViacheslav Ovsiienko 	do { \
1476dac7d7fSViacheslav Ovsiienko 		uint32_t tmp, out = (def); \
1486dac7d7fSViacheslav Ovsiienko 		tmp = MLX5_GET(fte_match_set_misc4, misc4_v, \
1496dac7d7fSViacheslav Ovsiienko 			       prog_sample_field_value_##x); \
1506dac7d7fSViacheslav Ovsiienko 		tmp = (tmp & ~out) | (val); \
1516dac7d7fSViacheslav Ovsiienko 		MLX5_SET(fte_match_set_misc4, misc4_v, \
1526dac7d7fSViacheslav Ovsiienko 			 prog_sample_field_value_##x, tmp); \
1536dac7d7fSViacheslav Ovsiienko 		tmp = MLX5_GET(fte_match_set_misc4, misc4_m, \
1546dac7d7fSViacheslav Ovsiienko 			       prog_sample_field_value_##x); \
1556dac7d7fSViacheslav Ovsiienko 		tmp = (tmp & ~out) | (msk); \
1566dac7d7fSViacheslav Ovsiienko 		MLX5_SET(fte_match_set_misc4, misc4_m, \
1576dac7d7fSViacheslav Ovsiienko 			 prog_sample_field_value_##x, tmp); \
1586dac7d7fSViacheslav Ovsiienko 		tmp = tmp ? (sid) : 0; \
1596dac7d7fSViacheslav Ovsiienko 		MLX5_SET(fte_match_set_misc4, misc4_v, \
1606dac7d7fSViacheslav Ovsiienko 			 prog_sample_field_id_##x, tmp);\
1616dac7d7fSViacheslav Ovsiienko 		MLX5_SET(fte_match_set_misc4, misc4_m, \
1626dac7d7fSViacheslav Ovsiienko 			 prog_sample_field_id_##x, tmp); \
1636dac7d7fSViacheslav Ovsiienko 	} while (0)
1646dac7d7fSViacheslav Ovsiienko 
1656dac7d7fSViacheslav Ovsiienko __rte_always_inline static void
1666dac7d7fSViacheslav Ovsiienko mlx5_flex_set_match_sample(void *misc4_m, void *misc4_v,
1676dac7d7fSViacheslav Ovsiienko 			   uint32_t def, uint32_t mask, uint32_t value,
1686dac7d7fSViacheslav Ovsiienko 			   uint32_t sample_id, uint32_t id)
1696dac7d7fSViacheslav Ovsiienko {
1706dac7d7fSViacheslav Ovsiienko 	switch (id) {
1716dac7d7fSViacheslav Ovsiienko 	case 0:
1726dac7d7fSViacheslav Ovsiienko 		SET_FP_MATCH_SAMPLE_ID(0, def, mask, value, sample_id);
1736dac7d7fSViacheslav Ovsiienko 		break;
1746dac7d7fSViacheslav Ovsiienko 	case 1:
1756dac7d7fSViacheslav Ovsiienko 		SET_FP_MATCH_SAMPLE_ID(1, def, mask, value, sample_id);
1766dac7d7fSViacheslav Ovsiienko 		break;
1776dac7d7fSViacheslav Ovsiienko 	case 2:
1786dac7d7fSViacheslav Ovsiienko 		SET_FP_MATCH_SAMPLE_ID(2, def, mask, value, sample_id);
1796dac7d7fSViacheslav Ovsiienko 		break;
1806dac7d7fSViacheslav Ovsiienko 	case 3:
1816dac7d7fSViacheslav Ovsiienko 		SET_FP_MATCH_SAMPLE_ID(3, def, mask, value, sample_id);
1826dac7d7fSViacheslav Ovsiienko 		break;
1836dac7d7fSViacheslav Ovsiienko 	case 4:
1846dac7d7fSViacheslav Ovsiienko 		SET_FP_MATCH_SAMPLE_ID(4, def, mask, value, sample_id);
1856dac7d7fSViacheslav Ovsiienko 		break;
1866dac7d7fSViacheslav Ovsiienko 	case 5:
1876dac7d7fSViacheslav Ovsiienko 		SET_FP_MATCH_SAMPLE_ID(5, def, mask, value, sample_id);
1886dac7d7fSViacheslav Ovsiienko 		break;
1896dac7d7fSViacheslav Ovsiienko 	case 6:
1906dac7d7fSViacheslav Ovsiienko 		SET_FP_MATCH_SAMPLE_ID(6, def, mask, value, sample_id);
1916dac7d7fSViacheslav Ovsiienko 		break;
1926dac7d7fSViacheslav Ovsiienko 	case 7:
1936dac7d7fSViacheslav Ovsiienko 		SET_FP_MATCH_SAMPLE_ID(7, def, mask, value, sample_id);
1946dac7d7fSViacheslav Ovsiienko 		break;
1956dac7d7fSViacheslav Ovsiienko 	default:
1966dac7d7fSViacheslav Ovsiienko 		MLX5_ASSERT(false);
1976dac7d7fSViacheslav Ovsiienko 		break;
1986dac7d7fSViacheslav Ovsiienko 	}
1996dac7d7fSViacheslav Ovsiienko #undef SET_FP_MATCH_SAMPLE_ID
2006dac7d7fSViacheslav Ovsiienko }
2016dac7d7fSViacheslav Ovsiienko /**
2026dac7d7fSViacheslav Ovsiienko  * Translate item pattern into matcher fields according to translation
2036dac7d7fSViacheslav Ovsiienko  * array.
2046dac7d7fSViacheslav Ovsiienko  *
2056dac7d7fSViacheslav Ovsiienko  * @param dev
2066dac7d7fSViacheslav Ovsiienko  *   Ethernet device to translate flex item on.
2076dac7d7fSViacheslav Ovsiienko  * @param[in, out] matcher
2087be78d02SJosh Soref  *   Flow matcher to configure
2096dac7d7fSViacheslav Ovsiienko  * @param[in, out] key
2106dac7d7fSViacheslav Ovsiienko  *   Flow matcher value.
2116dac7d7fSViacheslav Ovsiienko  * @param[in] item
2126dac7d7fSViacheslav Ovsiienko  *   Flow pattern to translate.
2136dac7d7fSViacheslav Ovsiienko  * @param[in] is_inner
2146dac7d7fSViacheslav Ovsiienko  *   Inner Flex Item (follows after tunnel header).
2156dac7d7fSViacheslav Ovsiienko  *
2166dac7d7fSViacheslav Ovsiienko  * @return
2176dac7d7fSViacheslav Ovsiienko  *   0 on success, a negative errno value otherwise and rte_errno is set.
2186dac7d7fSViacheslav Ovsiienko  */
2196dac7d7fSViacheslav Ovsiienko void
2206dac7d7fSViacheslav Ovsiienko mlx5_flex_flow_translate_item(struct rte_eth_dev *dev,
2216dac7d7fSViacheslav Ovsiienko 			      void *matcher, void *key,
2226dac7d7fSViacheslav Ovsiienko 			      const struct rte_flow_item *item,
2236dac7d7fSViacheslav Ovsiienko 			      bool is_inner)
2246dac7d7fSViacheslav Ovsiienko {
2256dac7d7fSViacheslav Ovsiienko 	const struct rte_flow_item_flex *spec, *mask;
2266dac7d7fSViacheslav Ovsiienko 	void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher,
2276dac7d7fSViacheslav Ovsiienko 				     misc_parameters_4);
2286dac7d7fSViacheslav Ovsiienko 	void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4);
229f1324a17SRongwei Liu 	struct mlx5_priv *priv = dev->data->dev_private;
230f1324a17SRongwei Liu 	struct mlx5_hca_flex_attr *attr = &priv->sh->cdev->config.hca_attr.flex;
2316dac7d7fSViacheslav Ovsiienko 	struct mlx5_flex_item *tp;
2326dac7d7fSViacheslav Ovsiienko 	uint32_t i, pos = 0;
233f1324a17SRongwei Liu 	uint32_t sample_id;
2346dac7d7fSViacheslav Ovsiienko 
2356dac7d7fSViacheslav Ovsiienko 	RTE_SET_USED(dev);
2366dac7d7fSViacheslav Ovsiienko 	MLX5_ASSERT(item->spec && item->mask);
2376dac7d7fSViacheslav Ovsiienko 	spec = item->spec;
2386dac7d7fSViacheslav Ovsiienko 	mask = item->mask;
2396dac7d7fSViacheslav Ovsiienko 	tp = (struct mlx5_flex_item *)spec->handle;
240f1324a17SRongwei Liu 	MLX5_ASSERT(mlx5_flex_index(priv, tp) >= 0);
2416dac7d7fSViacheslav Ovsiienko 	for (i = 0; i < tp->mapnum; i++) {
2426dac7d7fSViacheslav Ovsiienko 		struct mlx5_flex_pattern_field *map = tp->map + i;
2436dac7d7fSViacheslav Ovsiienko 		uint32_t id = map->reg_id;
2446dac7d7fSViacheslav Ovsiienko 		uint32_t def = (RTE_BIT64(map->width) - 1) << map->shift;
2456dac7d7fSViacheslav Ovsiienko 		uint32_t val, msk;
2466dac7d7fSViacheslav Ovsiienko 
2476dac7d7fSViacheslav Ovsiienko 		/* Skip placeholders for DUMMY fields. */
2486dac7d7fSViacheslav Ovsiienko 		if (id == MLX5_INVALID_SAMPLE_REG_ID) {
2496dac7d7fSViacheslav Ovsiienko 			pos += map->width;
2506dac7d7fSViacheslav Ovsiienko 			continue;
2516dac7d7fSViacheslav Ovsiienko 		}
2526dac7d7fSViacheslav Ovsiienko 		val = mlx5_flex_get_bitfield(spec, pos, map->width, map->shift);
2536dac7d7fSViacheslav Ovsiienko 		msk = mlx5_flex_get_bitfield(mask, pos, map->width, map->shift);
2546dac7d7fSViacheslav Ovsiienko 		MLX5_ASSERT(map->width);
2556dac7d7fSViacheslav Ovsiienko 		MLX5_ASSERT(id < tp->devx_fp->num_samples);
2566dac7d7fSViacheslav Ovsiienko 		if (tp->tunnel_mode == FLEX_TUNNEL_MODE_MULTI && is_inner) {
2576dac7d7fSViacheslav Ovsiienko 			uint32_t num_samples = tp->devx_fp->num_samples / 2;
2586dac7d7fSViacheslav Ovsiienko 
2596dac7d7fSViacheslav Ovsiienko 			MLX5_ASSERT(tp->devx_fp->num_samples % 2 == 0);
2606dac7d7fSViacheslav Ovsiienko 			MLX5_ASSERT(id < num_samples);
2616dac7d7fSViacheslav Ovsiienko 			id += num_samples;
2626dac7d7fSViacheslav Ovsiienko 		}
263f1324a17SRongwei Liu 		if (attr->ext_sample_id)
264f1324a17SRongwei Liu 			sample_id = tp->devx_fp->sample_ids[id].sample_id;
265f1324a17SRongwei Liu 		else
266f1324a17SRongwei Liu 			sample_id = tp->devx_fp->sample_ids[id].id;
2676dac7d7fSViacheslav Ovsiienko 		mlx5_flex_set_match_sample(misc4_m, misc4_v,
2686dac7d7fSViacheslav Ovsiienko 					   def, msk & def, val & msk & def,
269f1324a17SRongwei Liu 					   sample_id, id);
2706dac7d7fSViacheslav Ovsiienko 		pos += map->width;
2716dac7d7fSViacheslav Ovsiienko 	}
2726dac7d7fSViacheslav Ovsiienko }
2736dac7d7fSViacheslav Ovsiienko 
2746dac7d7fSViacheslav Ovsiienko /**
2756dac7d7fSViacheslav Ovsiienko  * Convert flex item handle (from the RTE flow) to flex item index on port.
2766dac7d7fSViacheslav Ovsiienko  * Optionally can increment flex item object reference count.
2776dac7d7fSViacheslav Ovsiienko  *
2786dac7d7fSViacheslav Ovsiienko  * @param dev
2796dac7d7fSViacheslav Ovsiienko  *   Ethernet device to acquire flex item on.
2806dac7d7fSViacheslav Ovsiienko  * @param[in] handle
2816dac7d7fSViacheslav Ovsiienko  *   Flow item handle from item spec.
2826dac7d7fSViacheslav Ovsiienko  * @param[in] acquire
2836dac7d7fSViacheslav Ovsiienko  *   If set - increment reference counter.
2846dac7d7fSViacheslav Ovsiienko  *
2856dac7d7fSViacheslav Ovsiienko  * @return
2866dac7d7fSViacheslav Ovsiienko  *   >=0 - index on success, a negative errno value otherwise
2876dac7d7fSViacheslav Ovsiienko  *         and rte_errno is set.
2886dac7d7fSViacheslav Ovsiienko  */
2896dac7d7fSViacheslav Ovsiienko int
2906dac7d7fSViacheslav Ovsiienko mlx5_flex_acquire_index(struct rte_eth_dev *dev,
2916dac7d7fSViacheslav Ovsiienko 			struct rte_flow_item_flex_handle *handle,
2926dac7d7fSViacheslav Ovsiienko 			bool acquire)
2936dac7d7fSViacheslav Ovsiienko {
2946dac7d7fSViacheslav Ovsiienko 	struct mlx5_priv *priv = dev->data->dev_private;
2956dac7d7fSViacheslav Ovsiienko 	struct mlx5_flex_item *flex = (struct mlx5_flex_item *)handle;
2966dac7d7fSViacheslav Ovsiienko 	int ret = mlx5_flex_index(priv, flex);
2976dac7d7fSViacheslav Ovsiienko 
2986dac7d7fSViacheslav Ovsiienko 	if (ret < 0) {
2996dac7d7fSViacheslav Ovsiienko 		errno = -EINVAL;
3006dac7d7fSViacheslav Ovsiienko 		rte_errno = EINVAL;
3016dac7d7fSViacheslav Ovsiienko 		return ret;
3026dac7d7fSViacheslav Ovsiienko 	}
3036dac7d7fSViacheslav Ovsiienko 	if (acquire)
3046dac7d7fSViacheslav Ovsiienko 		__atomic_add_fetch(&flex->refcnt, 1, __ATOMIC_RELEASE);
3056dac7d7fSViacheslav Ovsiienko 	return ret;
3066dac7d7fSViacheslav Ovsiienko }
3076dac7d7fSViacheslav Ovsiienko 
3086dac7d7fSViacheslav Ovsiienko /**
3096dac7d7fSViacheslav Ovsiienko  * Release flex item index on port - decrements reference counter by index.
3106dac7d7fSViacheslav Ovsiienko  *
3116dac7d7fSViacheslav Ovsiienko  * @param dev
3126dac7d7fSViacheslav Ovsiienko  *   Ethernet device to acquire flex item on.
3136dac7d7fSViacheslav Ovsiienko  * @param[in] index
3146dac7d7fSViacheslav Ovsiienko  *   Flow item index.
3156dac7d7fSViacheslav Ovsiienko  *
3166dac7d7fSViacheslav Ovsiienko  * @return
3176dac7d7fSViacheslav Ovsiienko  *   0 - on success, a negative errno value otherwise and rte_errno is set.
3186dac7d7fSViacheslav Ovsiienko  */
3196dac7d7fSViacheslav Ovsiienko int
3206dac7d7fSViacheslav Ovsiienko mlx5_flex_release_index(struct rte_eth_dev *dev,
3216dac7d7fSViacheslav Ovsiienko 			int index)
3226dac7d7fSViacheslav Ovsiienko {
3236dac7d7fSViacheslav Ovsiienko 	struct mlx5_priv *priv = dev->data->dev_private;
3246dac7d7fSViacheslav Ovsiienko 	struct mlx5_flex_item *flex;
3256dac7d7fSViacheslav Ovsiienko 
3266dac7d7fSViacheslav Ovsiienko 	if (index >= MLX5_PORT_FLEX_ITEM_NUM ||
3276dac7d7fSViacheslav Ovsiienko 	    !(priv->flex_item_map & (1u << index))) {
3286dac7d7fSViacheslav Ovsiienko 		errno = EINVAL;
3296dac7d7fSViacheslav Ovsiienko 		rte_errno = -EINVAL;
3306dac7d7fSViacheslav Ovsiienko 		return -EINVAL;
3316dac7d7fSViacheslav Ovsiienko 	}
3326dac7d7fSViacheslav Ovsiienko 	flex = priv->flex_item + index;
3336dac7d7fSViacheslav Ovsiienko 	if (flex->refcnt <= 1) {
3346dac7d7fSViacheslav Ovsiienko 		MLX5_ASSERT(false);
3356dac7d7fSViacheslav Ovsiienko 		errno = EINVAL;
3366dac7d7fSViacheslav Ovsiienko 		rte_errno = -EINVAL;
3376dac7d7fSViacheslav Ovsiienko 		return -EINVAL;
3386dac7d7fSViacheslav Ovsiienko 	}
3396dac7d7fSViacheslav Ovsiienko 	__atomic_sub_fetch(&flex->refcnt, 1, __ATOMIC_RELEASE);
3406dac7d7fSViacheslav Ovsiienko 	return 0;
3416dac7d7fSViacheslav Ovsiienko }
3426dac7d7fSViacheslav Ovsiienko 
343b293e8e4SViacheslav Ovsiienko /*
344b293e8e4SViacheslav Ovsiienko  * Calculate largest mask value for a given shift.
345b293e8e4SViacheslav Ovsiienko  *
346b293e8e4SViacheslav Ovsiienko  *   shift      mask
347b293e8e4SViacheslav Ovsiienko  * ------- ---------------
348b293e8e4SViacheslav Ovsiienko  *    0     b111100  0x3C
349b293e8e4SViacheslav Ovsiienko  *    1     b111110  0x3E
350b293e8e4SViacheslav Ovsiienko  *    2     b111111  0x3F
351b293e8e4SViacheslav Ovsiienko  *    3     b011111  0x1F
352b293e8e4SViacheslav Ovsiienko  *    4     b001111  0x0F
353b293e8e4SViacheslav Ovsiienko  *    5     b000111  0x07
354b293e8e4SViacheslav Ovsiienko  */
355b293e8e4SViacheslav Ovsiienko static uint8_t
356b293e8e4SViacheslav Ovsiienko mlx5_flex_hdr_len_mask(uint8_t shift,
357b293e8e4SViacheslav Ovsiienko 		       const struct mlx5_hca_flex_attr *attr)
358b293e8e4SViacheslav Ovsiienko {
359b293e8e4SViacheslav Ovsiienko 	uint32_t base_mask;
360b293e8e4SViacheslav Ovsiienko 	int diff = shift - MLX5_PARSE_GRAPH_NODE_HDR_LEN_SHIFT_DWORD;
361b293e8e4SViacheslav Ovsiienko 
362b293e8e4SViacheslav Ovsiienko 	base_mask = mlx5_hca_parse_graph_node_base_hdr_len_mask(attr);
363b293e8e4SViacheslav Ovsiienko 	return diff == 0 ? base_mask :
364b293e8e4SViacheslav Ovsiienko 	       diff < 0 ? (base_mask << -diff) & base_mask : base_mask >> diff;
365b293e8e4SViacheslav Ovsiienko }
366b293e8e4SViacheslav Ovsiienko 
367b293e8e4SViacheslav Ovsiienko static int
368b293e8e4SViacheslav Ovsiienko mlx5_flex_translate_length(struct mlx5_hca_flex_attr *attr,
369b293e8e4SViacheslav Ovsiienko 			   const struct rte_flow_item_flex_conf *conf,
370b293e8e4SViacheslav Ovsiienko 			   struct mlx5_flex_parser_devx *devx,
371b293e8e4SViacheslav Ovsiienko 			   struct rte_flow_error *error)
372b293e8e4SViacheslav Ovsiienko {
373b293e8e4SViacheslav Ovsiienko 	const struct rte_flow_item_flex_field *field = &conf->next_header;
374b293e8e4SViacheslav Ovsiienko 	struct mlx5_devx_graph_node_attr *node = &devx->devx_conf;
375b293e8e4SViacheslav Ovsiienko 	uint32_t len_width, mask;
376b293e8e4SViacheslav Ovsiienko 
377b293e8e4SViacheslav Ovsiienko 	if (field->field_base % CHAR_BIT)
378b293e8e4SViacheslav Ovsiienko 		return rte_flow_error_set
379b293e8e4SViacheslav Ovsiienko 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
380b293e8e4SViacheslav Ovsiienko 			 "not byte aligned header length field");
381b293e8e4SViacheslav Ovsiienko 	switch (field->field_mode) {
382b293e8e4SViacheslav Ovsiienko 	case FIELD_MODE_DUMMY:
383b293e8e4SViacheslav Ovsiienko 		return rte_flow_error_set
384b293e8e4SViacheslav Ovsiienko 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
385b293e8e4SViacheslav Ovsiienko 			 "invalid header length field mode (DUMMY)");
386b293e8e4SViacheslav Ovsiienko 	case FIELD_MODE_FIXED:
387b293e8e4SViacheslav Ovsiienko 		if (!(attr->header_length_mode &
388b293e8e4SViacheslav Ovsiienko 		    RTE_BIT32(MLX5_GRAPH_NODE_LEN_FIXED)))
389b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
390b293e8e4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
391b293e8e4SViacheslav Ovsiienko 				 "unsupported header length field mode (FIXED)");
3927bda5beeSGregory Etelson 		if (field->field_size ||
3937bda5beeSGregory Etelson 		    field->offset_mask || field->offset_shift)
394b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
395b293e8e4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
3967bda5beeSGregory Etelson 				 "invalid fields for fixed mode");
397b293e8e4SViacheslav Ovsiienko 		if (field->field_base < 0)
398b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
399b293e8e4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
400b293e8e4SViacheslav Ovsiienko 				 "negative header length field base (FIXED)");
401b293e8e4SViacheslav Ovsiienko 		node->header_length_mode = MLX5_GRAPH_NODE_LEN_FIXED;
402b293e8e4SViacheslav Ovsiienko 		break;
403b293e8e4SViacheslav Ovsiienko 	case FIELD_MODE_OFFSET:
404b293e8e4SViacheslav Ovsiienko 		if (!(attr->header_length_mode &
405b293e8e4SViacheslav Ovsiienko 		    RTE_BIT32(MLX5_GRAPH_NODE_LEN_FIELD)))
406b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
407b293e8e4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
408b293e8e4SViacheslav Ovsiienko 				 "unsupported header length field mode (OFFSET)");
409b293e8e4SViacheslav Ovsiienko 		node->header_length_mode = MLX5_GRAPH_NODE_LEN_FIELD;
410b293e8e4SViacheslav Ovsiienko 		if (field->offset_mask == 0 ||
411b293e8e4SViacheslav Ovsiienko 		    !rte_is_power_of_2(field->offset_mask + 1))
412b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
413b293e8e4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
414b293e8e4SViacheslav Ovsiienko 				 "invalid length field offset mask (OFFSET)");
415b293e8e4SViacheslav Ovsiienko 		len_width = rte_fls_u32(field->offset_mask);
416b293e8e4SViacheslav Ovsiienko 		if (len_width > attr->header_length_mask_width)
417b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
418b293e8e4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
419b293e8e4SViacheslav Ovsiienko 				 "length field offset mask too wide (OFFSET)");
420b293e8e4SViacheslav Ovsiienko 		mask = mlx5_flex_hdr_len_mask(field->offset_shift, attr);
421b293e8e4SViacheslav Ovsiienko 		if (mask < field->offset_mask)
422b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
423b293e8e4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
424b293e8e4SViacheslav Ovsiienko 				 "length field shift too big (OFFSET)");
425b293e8e4SViacheslav Ovsiienko 		node->header_length_field_mask = RTE_MIN(mask,
426b293e8e4SViacheslav Ovsiienko 							 field->offset_mask);
427b293e8e4SViacheslav Ovsiienko 		break;
428b293e8e4SViacheslav Ovsiienko 	case FIELD_MODE_BITMASK:
429b293e8e4SViacheslav Ovsiienko 		if (!(attr->header_length_mode &
430b293e8e4SViacheslav Ovsiienko 		    RTE_BIT32(MLX5_GRAPH_NODE_LEN_BITMASK)))
431b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
432b293e8e4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
433b293e8e4SViacheslav Ovsiienko 				 "unsupported header length field mode (BITMASK)");
434b293e8e4SViacheslav Ovsiienko 		if (attr->header_length_mask_width < field->field_size)
435b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
436b293e8e4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
437b293e8e4SViacheslav Ovsiienko 				 "header length field width exceeds limit");
438b293e8e4SViacheslav Ovsiienko 		node->header_length_mode = MLX5_GRAPH_NODE_LEN_BITMASK;
439b293e8e4SViacheslav Ovsiienko 		mask = mlx5_flex_hdr_len_mask(field->offset_shift, attr);
440b293e8e4SViacheslav Ovsiienko 		if (mask < field->offset_mask)
441b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
442b293e8e4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
443b293e8e4SViacheslav Ovsiienko 				 "length field shift too big (BITMASK)");
444b293e8e4SViacheslav Ovsiienko 		node->header_length_field_mask = RTE_MIN(mask,
445b293e8e4SViacheslav Ovsiienko 							 field->offset_mask);
446b293e8e4SViacheslav Ovsiienko 		break;
447b293e8e4SViacheslav Ovsiienko 	default:
448b293e8e4SViacheslav Ovsiienko 		return rte_flow_error_set
449b293e8e4SViacheslav Ovsiienko 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
450b293e8e4SViacheslav Ovsiienko 			 "unknown header length field mode");
451b293e8e4SViacheslav Ovsiienko 	}
452b293e8e4SViacheslav Ovsiienko 	if (field->field_base / CHAR_BIT >= 0 &&
453b293e8e4SViacheslav Ovsiienko 	    field->field_base / CHAR_BIT > attr->max_base_header_length)
454b293e8e4SViacheslav Ovsiienko 		return rte_flow_error_set
455b293e8e4SViacheslav Ovsiienko 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
456b293e8e4SViacheslav Ovsiienko 			 "header length field base exceeds limit");
457b293e8e4SViacheslav Ovsiienko 	node->header_length_base_value = field->field_base / CHAR_BIT;
458b293e8e4SViacheslav Ovsiienko 	if (field->field_mode == FIELD_MODE_OFFSET ||
459b293e8e4SViacheslav Ovsiienko 	    field->field_mode == FIELD_MODE_BITMASK) {
460b293e8e4SViacheslav Ovsiienko 		if (field->offset_shift > 15 || field->offset_shift < 0)
461b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
462b293e8e4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
4637be78d02SJosh Soref 				 "header length field shift exceeds limit");
464b293e8e4SViacheslav Ovsiienko 		node->header_length_field_shift	= field->offset_shift;
465b293e8e4SViacheslav Ovsiienko 		node->header_length_field_offset = field->offset_base;
466b293e8e4SViacheslav Ovsiienko 	}
467b293e8e4SViacheslav Ovsiienko 	return 0;
468b293e8e4SViacheslav Ovsiienko }
469b293e8e4SViacheslav Ovsiienko 
470b293e8e4SViacheslav Ovsiienko static int
471b293e8e4SViacheslav Ovsiienko mlx5_flex_translate_next(struct mlx5_hca_flex_attr *attr,
472b293e8e4SViacheslav Ovsiienko 			 const struct rte_flow_item_flex_conf *conf,
473b293e8e4SViacheslav Ovsiienko 			 struct mlx5_flex_parser_devx *devx,
474b293e8e4SViacheslav Ovsiienko 			 struct rte_flow_error *error)
475b293e8e4SViacheslav Ovsiienko {
476b293e8e4SViacheslav Ovsiienko 	const struct rte_flow_item_flex_field *field = &conf->next_protocol;
477b293e8e4SViacheslav Ovsiienko 	struct mlx5_devx_graph_node_attr *node = &devx->devx_conf;
478b293e8e4SViacheslav Ovsiienko 
479b293e8e4SViacheslav Ovsiienko 	switch (field->field_mode) {
480b293e8e4SViacheslav Ovsiienko 	case FIELD_MODE_DUMMY:
481b293e8e4SViacheslav Ovsiienko 		if (conf->nb_outputs)
482b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
483b293e8e4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
484b293e8e4SViacheslav Ovsiienko 				 "next protocol field is required (DUMMY)");
485b293e8e4SViacheslav Ovsiienko 		return 0;
486b293e8e4SViacheslav Ovsiienko 	case FIELD_MODE_FIXED:
487b293e8e4SViacheslav Ovsiienko 		break;
488b293e8e4SViacheslav Ovsiienko 	case FIELD_MODE_OFFSET:
489b293e8e4SViacheslav Ovsiienko 		return rte_flow_error_set
490b293e8e4SViacheslav Ovsiienko 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
491b293e8e4SViacheslav Ovsiienko 			 "unsupported next protocol field mode (OFFSET)");
492b293e8e4SViacheslav Ovsiienko 		break;
493b293e8e4SViacheslav Ovsiienko 	case FIELD_MODE_BITMASK:
494b293e8e4SViacheslav Ovsiienko 		return rte_flow_error_set
495b293e8e4SViacheslav Ovsiienko 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
496b293e8e4SViacheslav Ovsiienko 			 "unsupported next protocol field mode (BITMASK)");
497b293e8e4SViacheslav Ovsiienko 	default:
498b293e8e4SViacheslav Ovsiienko 		return rte_flow_error_set
499b293e8e4SViacheslav Ovsiienko 			(error, EINVAL,
500b293e8e4SViacheslav Ovsiienko 			 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
501b293e8e4SViacheslav Ovsiienko 			 "unknown next protocol field mode");
502b293e8e4SViacheslav Ovsiienko 	}
503b293e8e4SViacheslav Ovsiienko 	MLX5_ASSERT(field->field_mode == FIELD_MODE_FIXED);
504b293e8e4SViacheslav Ovsiienko 	if (!conf->nb_outputs)
505b293e8e4SViacheslav Ovsiienko 		return rte_flow_error_set
506b293e8e4SViacheslav Ovsiienko 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
507b293e8e4SViacheslav Ovsiienko 			 "out link(s) is required if next field present");
508b293e8e4SViacheslav Ovsiienko 	if (attr->max_next_header_offset < field->field_base)
509b293e8e4SViacheslav Ovsiienko 		return rte_flow_error_set
510b293e8e4SViacheslav Ovsiienko 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
511b293e8e4SViacheslav Ovsiienko 			 "next protocol field base exceeds limit");
512b293e8e4SViacheslav Ovsiienko 	if (field->offset_shift)
513b293e8e4SViacheslav Ovsiienko 		return rte_flow_error_set
514b293e8e4SViacheslav Ovsiienko 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
515b293e8e4SViacheslav Ovsiienko 			 "unsupported next protocol field shift");
516b293e8e4SViacheslav Ovsiienko 	node->next_header_field_offset = field->field_base;
517b293e8e4SViacheslav Ovsiienko 	node->next_header_field_size = field->field_size;
518b293e8e4SViacheslav Ovsiienko 	return 0;
519b293e8e4SViacheslav Ovsiienko }
520b293e8e4SViacheslav Ovsiienko 
521b293e8e4SViacheslav Ovsiienko /* Helper structure to handle field bit intervals. */
522b293e8e4SViacheslav Ovsiienko struct mlx5_flex_field_cover {
523b293e8e4SViacheslav Ovsiienko 	uint16_t num;
524b293e8e4SViacheslav Ovsiienko 	int32_t start[MLX5_FLEX_ITEM_MAPPING_NUM];
525b293e8e4SViacheslav Ovsiienko 	int32_t end[MLX5_FLEX_ITEM_MAPPING_NUM];
526b293e8e4SViacheslav Ovsiienko 	uint8_t mapped[MLX5_FLEX_ITEM_MAPPING_NUM / CHAR_BIT + 1];
527b293e8e4SViacheslav Ovsiienko };
528b293e8e4SViacheslav Ovsiienko 
529b293e8e4SViacheslav Ovsiienko static void
530b293e8e4SViacheslav Ovsiienko mlx5_flex_insert_field(struct mlx5_flex_field_cover *cover,
531b293e8e4SViacheslav Ovsiienko 		       uint16_t num, int32_t start, int32_t end)
532b293e8e4SViacheslav Ovsiienko {
533b293e8e4SViacheslav Ovsiienko 	MLX5_ASSERT(num < MLX5_FLEX_ITEM_MAPPING_NUM);
534b293e8e4SViacheslav Ovsiienko 	MLX5_ASSERT(num <= cover->num);
535b293e8e4SViacheslav Ovsiienko 	if (num < cover->num) {
536b293e8e4SViacheslav Ovsiienko 		memmove(&cover->start[num + 1],	&cover->start[num],
537b293e8e4SViacheslav Ovsiienko 			(cover->num - num) * sizeof(int32_t));
538b293e8e4SViacheslav Ovsiienko 		memmove(&cover->end[num + 1],	&cover->end[num],
539b293e8e4SViacheslav Ovsiienko 			(cover->num - num) * sizeof(int32_t));
540b293e8e4SViacheslav Ovsiienko 	}
541b293e8e4SViacheslav Ovsiienko 	cover->start[num] = start;
542b293e8e4SViacheslav Ovsiienko 	cover->end[num] = end;
543b293e8e4SViacheslav Ovsiienko 	cover->num++;
544b293e8e4SViacheslav Ovsiienko }
545b293e8e4SViacheslav Ovsiienko 
546b293e8e4SViacheslav Ovsiienko static void
547b293e8e4SViacheslav Ovsiienko mlx5_flex_merge_field(struct mlx5_flex_field_cover *cover, uint16_t num)
548b293e8e4SViacheslav Ovsiienko {
549b293e8e4SViacheslav Ovsiienko 	uint32_t i, del = 0;
550b293e8e4SViacheslav Ovsiienko 	int32_t end;
551b293e8e4SViacheslav Ovsiienko 
552b293e8e4SViacheslav Ovsiienko 	MLX5_ASSERT(num < MLX5_FLEX_ITEM_MAPPING_NUM);
553b293e8e4SViacheslav Ovsiienko 	MLX5_ASSERT(num < (cover->num - 1));
554b293e8e4SViacheslav Ovsiienko 	end = cover->end[num];
555b293e8e4SViacheslav Ovsiienko 	for (i = num + 1; i < cover->num; i++) {
556b293e8e4SViacheslav Ovsiienko 		if (end < cover->start[i])
557b293e8e4SViacheslav Ovsiienko 			break;
558b293e8e4SViacheslav Ovsiienko 		del++;
559b293e8e4SViacheslav Ovsiienko 		if (end <= cover->end[i]) {
560b293e8e4SViacheslav Ovsiienko 			cover->end[num] = cover->end[i];
561b293e8e4SViacheslav Ovsiienko 			break;
562b293e8e4SViacheslav Ovsiienko 		}
563b293e8e4SViacheslav Ovsiienko 	}
564b293e8e4SViacheslav Ovsiienko 	if (del) {
565b293e8e4SViacheslav Ovsiienko 		MLX5_ASSERT(del < (cover->num - 1u - num));
566b293e8e4SViacheslav Ovsiienko 		cover->num -= del;
567b293e8e4SViacheslav Ovsiienko 		MLX5_ASSERT(cover->num > num);
568b293e8e4SViacheslav Ovsiienko 		if ((cover->num - num) > 1) {
569b293e8e4SViacheslav Ovsiienko 			memmove(&cover->start[num + 1],
570b293e8e4SViacheslav Ovsiienko 				&cover->start[num + 1 + del],
571b293e8e4SViacheslav Ovsiienko 				(cover->num - num - 1) * sizeof(int32_t));
572b293e8e4SViacheslav Ovsiienko 			memmove(&cover->end[num + 1],
573b293e8e4SViacheslav Ovsiienko 				&cover->end[num + 1 + del],
574b293e8e4SViacheslav Ovsiienko 				(cover->num - num - 1) * sizeof(int32_t));
575b293e8e4SViacheslav Ovsiienko 		}
576b293e8e4SViacheslav Ovsiienko 	}
577b293e8e4SViacheslav Ovsiienko }
578b293e8e4SViacheslav Ovsiienko 
579b293e8e4SViacheslav Ovsiienko /*
580b293e8e4SViacheslav Ovsiienko  * Validate the sample field and update interval array
581b293e8e4SViacheslav Ovsiienko  * if parameters match with the 'match" field.
582b293e8e4SViacheslav Ovsiienko  * Returns:
583b293e8e4SViacheslav Ovsiienko  *    < 0  - error
584b293e8e4SViacheslav Ovsiienko  *    == 0 - no match, interval array not updated
585b293e8e4SViacheslav Ovsiienko  *    > 0  - match, interval array updated
586b293e8e4SViacheslav Ovsiienko  */
587b293e8e4SViacheslav Ovsiienko static int
588b293e8e4SViacheslav Ovsiienko mlx5_flex_cover_sample(struct mlx5_flex_field_cover *cover,
589b293e8e4SViacheslav Ovsiienko 		       struct rte_flow_item_flex_field *field,
590b293e8e4SViacheslav Ovsiienko 		       struct rte_flow_item_flex_field *match,
591b293e8e4SViacheslav Ovsiienko 		       struct mlx5_hca_flex_attr *attr,
592b293e8e4SViacheslav Ovsiienko 		       struct rte_flow_error *error)
593b293e8e4SViacheslav Ovsiienko {
594b293e8e4SViacheslav Ovsiienko 	int32_t start, end;
595b293e8e4SViacheslav Ovsiienko 	uint32_t i;
596b293e8e4SViacheslav Ovsiienko 
597b293e8e4SViacheslav Ovsiienko 	switch (field->field_mode) {
598b293e8e4SViacheslav Ovsiienko 	case FIELD_MODE_DUMMY:
599b293e8e4SViacheslav Ovsiienko 		return 0;
600b293e8e4SViacheslav Ovsiienko 	case FIELD_MODE_FIXED:
601b293e8e4SViacheslav Ovsiienko 		if (!(attr->sample_offset_mode &
602b293e8e4SViacheslav Ovsiienko 		    RTE_BIT32(MLX5_GRAPH_SAMPLE_OFFSET_FIXED)))
603b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
604b293e8e4SViacheslav Ovsiienko 				(error, EINVAL,
605b293e8e4SViacheslav Ovsiienko 				 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
606b293e8e4SViacheslav Ovsiienko 				 "unsupported sample field mode (FIXED)");
607b293e8e4SViacheslav Ovsiienko 		if (field->offset_shift)
608b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
609b293e8e4SViacheslav Ovsiienko 				(error, EINVAL,
610b293e8e4SViacheslav Ovsiienko 				 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
611b293e8e4SViacheslav Ovsiienko 				 "invalid sample field shift (FIXED");
612b293e8e4SViacheslav Ovsiienko 		if (field->field_base < 0)
613b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
614b293e8e4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
615b293e8e4SViacheslav Ovsiienko 				 "invalid sample field base (FIXED)");
616b293e8e4SViacheslav Ovsiienko 		if (field->field_base / CHAR_BIT > attr->max_sample_base_offset)
617b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
618b293e8e4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
619b293e8e4SViacheslav Ovsiienko 				 "sample field base exceeds limit (FIXED)");
620b293e8e4SViacheslav Ovsiienko 		break;
621b293e8e4SViacheslav Ovsiienko 	case FIELD_MODE_OFFSET:
622b293e8e4SViacheslav Ovsiienko 		if (!(attr->sample_offset_mode &
623b293e8e4SViacheslav Ovsiienko 		    RTE_BIT32(MLX5_GRAPH_SAMPLE_OFFSET_FIELD)))
624b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
625b293e8e4SViacheslav Ovsiienko 				(error, EINVAL,
626b293e8e4SViacheslav Ovsiienko 				 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
627b293e8e4SViacheslav Ovsiienko 				 "unsupported sample field mode (OFFSET)");
628b293e8e4SViacheslav Ovsiienko 		if (field->field_base / CHAR_BIT >= 0 &&
629b293e8e4SViacheslav Ovsiienko 		    field->field_base / CHAR_BIT > attr->max_sample_base_offset)
630b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
631b293e8e4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
632b293e8e4SViacheslav Ovsiienko 				"sample field base exceeds limit");
633b293e8e4SViacheslav Ovsiienko 		break;
634b293e8e4SViacheslav Ovsiienko 	case FIELD_MODE_BITMASK:
635b293e8e4SViacheslav Ovsiienko 		if (!(attr->sample_offset_mode &
636b293e8e4SViacheslav Ovsiienko 		    RTE_BIT32(MLX5_GRAPH_SAMPLE_OFFSET_BITMASK)))
637b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
638b293e8e4SViacheslav Ovsiienko 				(error, EINVAL,
639b293e8e4SViacheslav Ovsiienko 				 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
640b293e8e4SViacheslav Ovsiienko 				 "unsupported sample field mode (BITMASK)");
641b293e8e4SViacheslav Ovsiienko 		if (field->field_base / CHAR_BIT >= 0 &&
642b293e8e4SViacheslav Ovsiienko 		    field->field_base / CHAR_BIT > attr->max_sample_base_offset)
643b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
644b293e8e4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
645b293e8e4SViacheslav Ovsiienko 				"sample field base exceeds limit");
646b293e8e4SViacheslav Ovsiienko 		break;
647b293e8e4SViacheslav Ovsiienko 	default:
648b293e8e4SViacheslav Ovsiienko 		return rte_flow_error_set
649b293e8e4SViacheslav Ovsiienko 			(error, EINVAL,
650b293e8e4SViacheslav Ovsiienko 			 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
651b293e8e4SViacheslav Ovsiienko 			 "unknown data sample field mode");
652b293e8e4SViacheslav Ovsiienko 	}
653b293e8e4SViacheslav Ovsiienko 	if (!match) {
654b293e8e4SViacheslav Ovsiienko 		if (!field->field_size)
655b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
656b293e8e4SViacheslav Ovsiienko 				(error, EINVAL,
657b293e8e4SViacheslav Ovsiienko 				RTE_FLOW_ERROR_TYPE_ITEM, NULL,
658b293e8e4SViacheslav Ovsiienko 				"zero sample field width");
659b293e8e4SViacheslav Ovsiienko 		if (field->field_id)
660b293e8e4SViacheslav Ovsiienko 			DRV_LOG(DEBUG, "sample field id hint ignored");
661b293e8e4SViacheslav Ovsiienko 	} else {
662b293e8e4SViacheslav Ovsiienko 		if (field->field_mode != match->field_mode ||
663b293e8e4SViacheslav Ovsiienko 		    field->offset_base | match->offset_base ||
664b293e8e4SViacheslav Ovsiienko 		    field->offset_mask | match->offset_mask ||
665b293e8e4SViacheslav Ovsiienko 		    field->offset_shift | match->offset_shift)
666b293e8e4SViacheslav Ovsiienko 			return 0;
667b293e8e4SViacheslav Ovsiienko 	}
668b293e8e4SViacheslav Ovsiienko 	start = field->field_base;
669b293e8e4SViacheslav Ovsiienko 	end = start + field->field_size;
670b293e8e4SViacheslav Ovsiienko 	/* Add the new or similar field to interval array. */
671b293e8e4SViacheslav Ovsiienko 	if (!cover->num) {
672b293e8e4SViacheslav Ovsiienko 		cover->start[cover->num] = start;
673b293e8e4SViacheslav Ovsiienko 		cover->end[cover->num] = end;
674b293e8e4SViacheslav Ovsiienko 		cover->num = 1;
675b293e8e4SViacheslav Ovsiienko 		return 1;
676b293e8e4SViacheslav Ovsiienko 	}
677b293e8e4SViacheslav Ovsiienko 	for (i = 0; i < cover->num; i++) {
678b293e8e4SViacheslav Ovsiienko 		if (start > cover->end[i]) {
679b293e8e4SViacheslav Ovsiienko 			if (i >= (cover->num - 1u)) {
680b293e8e4SViacheslav Ovsiienko 				mlx5_flex_insert_field(cover, cover->num,
681b293e8e4SViacheslav Ovsiienko 						       start, end);
682b293e8e4SViacheslav Ovsiienko 				break;
683b293e8e4SViacheslav Ovsiienko 			}
684b293e8e4SViacheslav Ovsiienko 			continue;
685b293e8e4SViacheslav Ovsiienko 		}
686b293e8e4SViacheslav Ovsiienko 		if (end < cover->start[i]) {
687b293e8e4SViacheslav Ovsiienko 			mlx5_flex_insert_field(cover, i, start, end);
688b293e8e4SViacheslav Ovsiienko 			break;
689b293e8e4SViacheslav Ovsiienko 		}
690b293e8e4SViacheslav Ovsiienko 		if (start < cover->start[i])
691b293e8e4SViacheslav Ovsiienko 			cover->start[i] = start;
692b293e8e4SViacheslav Ovsiienko 		if (end > cover->end[i]) {
693b293e8e4SViacheslav Ovsiienko 			cover->end[i] = end;
694b293e8e4SViacheslav Ovsiienko 			if (i < (cover->num - 1u))
695b293e8e4SViacheslav Ovsiienko 				mlx5_flex_merge_field(cover, i);
696b293e8e4SViacheslav Ovsiienko 		}
697b293e8e4SViacheslav Ovsiienko 		break;
698b293e8e4SViacheslav Ovsiienko 	}
699b293e8e4SViacheslav Ovsiienko 	return 1;
700b293e8e4SViacheslav Ovsiienko }
701b293e8e4SViacheslav Ovsiienko 
702b293e8e4SViacheslav Ovsiienko static void
703b293e8e4SViacheslav Ovsiienko mlx5_flex_config_sample(struct mlx5_devx_match_sample_attr *na,
704b293e8e4SViacheslav Ovsiienko 			struct rte_flow_item_flex_field *field,
705b293e8e4SViacheslav Ovsiienko 			enum rte_flow_item_flex_tunnel_mode tunnel_mode)
706b293e8e4SViacheslav Ovsiienko {
707b293e8e4SViacheslav Ovsiienko 	memset(na, 0, sizeof(struct mlx5_devx_match_sample_attr));
708b293e8e4SViacheslav Ovsiienko 	na->flow_match_sample_en = 1;
709b293e8e4SViacheslav Ovsiienko 	switch (field->field_mode) {
710b293e8e4SViacheslav Ovsiienko 	case FIELD_MODE_FIXED:
711b293e8e4SViacheslav Ovsiienko 		na->flow_match_sample_offset_mode =
712b293e8e4SViacheslav Ovsiienko 			MLX5_GRAPH_SAMPLE_OFFSET_FIXED;
713b293e8e4SViacheslav Ovsiienko 		break;
714b293e8e4SViacheslav Ovsiienko 	case FIELD_MODE_OFFSET:
715b293e8e4SViacheslav Ovsiienko 		na->flow_match_sample_offset_mode =
716b293e8e4SViacheslav Ovsiienko 			MLX5_GRAPH_SAMPLE_OFFSET_FIELD;
717b293e8e4SViacheslav Ovsiienko 		na->flow_match_sample_field_offset = field->offset_base;
718b293e8e4SViacheslav Ovsiienko 		na->flow_match_sample_field_offset_mask = field->offset_mask;
719b293e8e4SViacheslav Ovsiienko 		na->flow_match_sample_field_offset_shift = field->offset_shift;
720b293e8e4SViacheslav Ovsiienko 		break;
721b293e8e4SViacheslav Ovsiienko 	case FIELD_MODE_BITMASK:
722b293e8e4SViacheslav Ovsiienko 		na->flow_match_sample_offset_mode =
723b293e8e4SViacheslav Ovsiienko 			MLX5_GRAPH_SAMPLE_OFFSET_BITMASK;
724b293e8e4SViacheslav Ovsiienko 		na->flow_match_sample_field_offset = field->offset_base;
725b293e8e4SViacheslav Ovsiienko 		na->flow_match_sample_field_offset_mask = field->offset_mask;
726b293e8e4SViacheslav Ovsiienko 		na->flow_match_sample_field_offset_shift = field->offset_shift;
727b293e8e4SViacheslav Ovsiienko 		break;
728b293e8e4SViacheslav Ovsiienko 	default:
729b293e8e4SViacheslav Ovsiienko 		MLX5_ASSERT(false);
730b293e8e4SViacheslav Ovsiienko 		break;
731b293e8e4SViacheslav Ovsiienko 	}
732b293e8e4SViacheslav Ovsiienko 	switch (tunnel_mode) {
733b293e8e4SViacheslav Ovsiienko 	case FLEX_TUNNEL_MODE_SINGLE:
734b293e8e4SViacheslav Ovsiienko 		/* Fallthrough */
735b293e8e4SViacheslav Ovsiienko 	case FLEX_TUNNEL_MODE_TUNNEL:
736b293e8e4SViacheslav Ovsiienko 		na->flow_match_sample_tunnel_mode =
737b293e8e4SViacheslav Ovsiienko 			MLX5_GRAPH_SAMPLE_TUNNEL_FIRST;
738b293e8e4SViacheslav Ovsiienko 		break;
739b293e8e4SViacheslav Ovsiienko 	case FLEX_TUNNEL_MODE_MULTI:
740b293e8e4SViacheslav Ovsiienko 		/* Fallthrough */
741b293e8e4SViacheslav Ovsiienko 	case FLEX_TUNNEL_MODE_OUTER:
742b293e8e4SViacheslav Ovsiienko 		na->flow_match_sample_tunnel_mode =
743b293e8e4SViacheslav Ovsiienko 			MLX5_GRAPH_SAMPLE_TUNNEL_OUTER;
744b293e8e4SViacheslav Ovsiienko 		break;
745b293e8e4SViacheslav Ovsiienko 	case FLEX_TUNNEL_MODE_INNER:
746b293e8e4SViacheslav Ovsiienko 		na->flow_match_sample_tunnel_mode =
747b293e8e4SViacheslav Ovsiienko 			MLX5_GRAPH_SAMPLE_TUNNEL_INNER;
748b293e8e4SViacheslav Ovsiienko 		break;
749b293e8e4SViacheslav Ovsiienko 	default:
750b293e8e4SViacheslav Ovsiienko 		MLX5_ASSERT(false);
751b293e8e4SViacheslav Ovsiienko 		break;
752b293e8e4SViacheslav Ovsiienko 	}
753b293e8e4SViacheslav Ovsiienko }
754b293e8e4SViacheslav Ovsiienko 
755b293e8e4SViacheslav Ovsiienko /* Map specified field to set/subset of allocated sample registers. */
756b293e8e4SViacheslav Ovsiienko static int
757b293e8e4SViacheslav Ovsiienko mlx5_flex_map_sample(struct rte_flow_item_flex_field *field,
758b293e8e4SViacheslav Ovsiienko 		     struct mlx5_flex_parser_devx *parser,
759b293e8e4SViacheslav Ovsiienko 		     struct mlx5_flex_item *item,
760b293e8e4SViacheslav Ovsiienko 		     struct rte_flow_error *error)
761b293e8e4SViacheslav Ovsiienko {
762b293e8e4SViacheslav Ovsiienko 	struct mlx5_devx_match_sample_attr node;
763b293e8e4SViacheslav Ovsiienko 	int32_t start = field->field_base;
764b293e8e4SViacheslav Ovsiienko 	int32_t end = start + field->field_size;
765b293e8e4SViacheslav Ovsiienko 	struct mlx5_flex_pattern_field *trans;
766b293e8e4SViacheslav Ovsiienko 	uint32_t i, done_bits = 0;
767b293e8e4SViacheslav Ovsiienko 
768b293e8e4SViacheslav Ovsiienko 	if (field->field_mode == FIELD_MODE_DUMMY) {
769b293e8e4SViacheslav Ovsiienko 		done_bits = field->field_size;
770b293e8e4SViacheslav Ovsiienko 		while (done_bits) {
771b293e8e4SViacheslav Ovsiienko 			uint32_t part = RTE_MIN(done_bits,
772b293e8e4SViacheslav Ovsiienko 						sizeof(uint32_t) * CHAR_BIT);
773b293e8e4SViacheslav Ovsiienko 			if (item->mapnum >= MLX5_FLEX_ITEM_MAPPING_NUM)
774b293e8e4SViacheslav Ovsiienko 				return rte_flow_error_set
775b293e8e4SViacheslav Ovsiienko 					(error,
776b293e8e4SViacheslav Ovsiienko 					 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
777b293e8e4SViacheslav Ovsiienko 					 "too many flex item pattern translations");
778b293e8e4SViacheslav Ovsiienko 			trans = &item->map[item->mapnum];
779b293e8e4SViacheslav Ovsiienko 			trans->reg_id = MLX5_INVALID_SAMPLE_REG_ID;
780b293e8e4SViacheslav Ovsiienko 			trans->shift = 0;
781b293e8e4SViacheslav Ovsiienko 			trans->width = part;
782b293e8e4SViacheslav Ovsiienko 			item->mapnum++;
783b293e8e4SViacheslav Ovsiienko 			done_bits -= part;
784b293e8e4SViacheslav Ovsiienko 		}
785b293e8e4SViacheslav Ovsiienko 		return 0;
786b293e8e4SViacheslav Ovsiienko 	}
787b293e8e4SViacheslav Ovsiienko 	mlx5_flex_config_sample(&node, field, item->tunnel_mode);
788b293e8e4SViacheslav Ovsiienko 	for (i = 0; i < parser->num_samples; i++) {
789b293e8e4SViacheslav Ovsiienko 		struct mlx5_devx_match_sample_attr *sample =
790b293e8e4SViacheslav Ovsiienko 			&parser->devx_conf.sample[i];
791b293e8e4SViacheslav Ovsiienko 		int32_t reg_start, reg_end;
792b293e8e4SViacheslav Ovsiienko 		int32_t cov_start, cov_end;
793b293e8e4SViacheslav Ovsiienko 
794b293e8e4SViacheslav Ovsiienko 		MLX5_ASSERT(sample->flow_match_sample_en);
795b293e8e4SViacheslav Ovsiienko 		if (!sample->flow_match_sample_en)
796b293e8e4SViacheslav Ovsiienko 			break;
797b293e8e4SViacheslav Ovsiienko 		node.flow_match_sample_field_base_offset =
798b293e8e4SViacheslav Ovsiienko 			sample->flow_match_sample_field_base_offset;
799b293e8e4SViacheslav Ovsiienko 		if (memcmp(&node, sample, sizeof(node)))
800b293e8e4SViacheslav Ovsiienko 			continue;
801b293e8e4SViacheslav Ovsiienko 		reg_start = (int8_t)sample->flow_match_sample_field_base_offset;
802b293e8e4SViacheslav Ovsiienko 		reg_start *= CHAR_BIT;
803b293e8e4SViacheslav Ovsiienko 		reg_end = reg_start + 32;
804b293e8e4SViacheslav Ovsiienko 		if (end <= reg_start || start >= reg_end)
805b293e8e4SViacheslav Ovsiienko 			continue;
806b293e8e4SViacheslav Ovsiienko 		cov_start = RTE_MAX(reg_start, start);
807b293e8e4SViacheslav Ovsiienko 		cov_end = RTE_MIN(reg_end, end);
808b293e8e4SViacheslav Ovsiienko 		MLX5_ASSERT(cov_end > cov_start);
809b293e8e4SViacheslav Ovsiienko 		done_bits += cov_end - cov_start;
810b293e8e4SViacheslav Ovsiienko 		if (item->mapnum >= MLX5_FLEX_ITEM_MAPPING_NUM)
811b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
812b293e8e4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
813b293e8e4SViacheslav Ovsiienko 				 "too many flex item pattern translations");
814b293e8e4SViacheslav Ovsiienko 		trans = &item->map[item->mapnum];
815b293e8e4SViacheslav Ovsiienko 		item->mapnum++;
816b293e8e4SViacheslav Ovsiienko 		trans->reg_id = i;
817b293e8e4SViacheslav Ovsiienko 		trans->shift = cov_start - reg_start;
818b293e8e4SViacheslav Ovsiienko 		trans->width = cov_end - cov_start;
819b293e8e4SViacheslav Ovsiienko 	}
820b293e8e4SViacheslav Ovsiienko 	if (done_bits != field->field_size) {
821b293e8e4SViacheslav Ovsiienko 		MLX5_ASSERT(false);
822b293e8e4SViacheslav Ovsiienko 		return rte_flow_error_set
823b293e8e4SViacheslav Ovsiienko 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
824b293e8e4SViacheslav Ovsiienko 			 "failed to map field to sample register");
825b293e8e4SViacheslav Ovsiienko 	}
826b293e8e4SViacheslav Ovsiienko 	return 0;
827b293e8e4SViacheslav Ovsiienko }
828b293e8e4SViacheslav Ovsiienko 
829b293e8e4SViacheslav Ovsiienko /* Allocate sample registers for the specified field type and interval array. */
830b293e8e4SViacheslav Ovsiienko static int
831b293e8e4SViacheslav Ovsiienko mlx5_flex_alloc_sample(struct mlx5_flex_field_cover *cover,
832b293e8e4SViacheslav Ovsiienko 		       struct mlx5_flex_parser_devx *parser,
833b293e8e4SViacheslav Ovsiienko 		       struct mlx5_flex_item *item,
834b293e8e4SViacheslav Ovsiienko 		       struct rte_flow_item_flex_field *field,
835b293e8e4SViacheslav Ovsiienko 		       struct mlx5_hca_flex_attr *attr,
836b293e8e4SViacheslav Ovsiienko 		       struct rte_flow_error *error)
837b293e8e4SViacheslav Ovsiienko {
838b293e8e4SViacheslav Ovsiienko 	struct mlx5_devx_match_sample_attr node;
839b293e8e4SViacheslav Ovsiienko 	uint32_t idx = 0;
840b293e8e4SViacheslav Ovsiienko 
841b293e8e4SViacheslav Ovsiienko 	mlx5_flex_config_sample(&node, field, item->tunnel_mode);
842b293e8e4SViacheslav Ovsiienko 	while (idx < cover->num) {
843b293e8e4SViacheslav Ovsiienko 		int32_t start, end;
844b293e8e4SViacheslav Ovsiienko 
845b293e8e4SViacheslav Ovsiienko 		/*
846b293e8e4SViacheslav Ovsiienko 		 * Sample base offsets are in bytes, should be aligned
847b293e8e4SViacheslav Ovsiienko 		 * to 32-bit as required by firmware for samples.
848b293e8e4SViacheslav Ovsiienko 		 */
849b293e8e4SViacheslav Ovsiienko 		start = RTE_ALIGN_FLOOR(cover->start[idx],
850b293e8e4SViacheslav Ovsiienko 					sizeof(uint32_t) * CHAR_BIT);
851b293e8e4SViacheslav Ovsiienko 		node.flow_match_sample_field_base_offset =
852b293e8e4SViacheslav Ovsiienko 						(start / CHAR_BIT) & 0xFF;
853b293e8e4SViacheslav Ovsiienko 		/* Allocate sample register. */
854b293e8e4SViacheslav Ovsiienko 		if (parser->num_samples >= MLX5_GRAPH_NODE_SAMPLE_NUM ||
855b293e8e4SViacheslav Ovsiienko 		    parser->num_samples >= attr->max_num_sample ||
856b293e8e4SViacheslav Ovsiienko 		    parser->num_samples >= attr->max_num_prog_sample)
857b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
858b293e8e4SViacheslav Ovsiienko 				(error, EINVAL,
859b293e8e4SViacheslav Ovsiienko 				 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
860b293e8e4SViacheslav Ovsiienko 				 "no sample registers to handle all flex item fields");
861b293e8e4SViacheslav Ovsiienko 		parser->devx_conf.sample[parser->num_samples] = node;
862b293e8e4SViacheslav Ovsiienko 		parser->num_samples++;
863b293e8e4SViacheslav Ovsiienko 		/* Remove or update covered intervals. */
864b293e8e4SViacheslav Ovsiienko 		end = start + 32;
865b293e8e4SViacheslav Ovsiienko 		while (idx < cover->num) {
866b293e8e4SViacheslav Ovsiienko 			if (end >= cover->end[idx]) {
867b293e8e4SViacheslav Ovsiienko 				idx++;
868b293e8e4SViacheslav Ovsiienko 				continue;
869b293e8e4SViacheslav Ovsiienko 			}
870b293e8e4SViacheslav Ovsiienko 			if (end > cover->start[idx])
871b293e8e4SViacheslav Ovsiienko 				cover->start[idx] = end;
872b293e8e4SViacheslav Ovsiienko 			break;
873b293e8e4SViacheslav Ovsiienko 		}
874b293e8e4SViacheslav Ovsiienko 	}
875b293e8e4SViacheslav Ovsiienko 	return 0;
876b293e8e4SViacheslav Ovsiienko }
877b293e8e4SViacheslav Ovsiienko 
878b293e8e4SViacheslav Ovsiienko static int
879b293e8e4SViacheslav Ovsiienko mlx5_flex_translate_sample(struct mlx5_hca_flex_attr *attr,
880b293e8e4SViacheslav Ovsiienko 			   const struct rte_flow_item_flex_conf *conf,
881b293e8e4SViacheslav Ovsiienko 			   struct mlx5_flex_parser_devx *parser,
882b293e8e4SViacheslav Ovsiienko 			   struct mlx5_flex_item *item,
883b293e8e4SViacheslav Ovsiienko 			   struct rte_flow_error *error)
884b293e8e4SViacheslav Ovsiienko {
885b293e8e4SViacheslav Ovsiienko 	struct mlx5_flex_field_cover cover;
886b293e8e4SViacheslav Ovsiienko 	uint32_t i, j;
887b293e8e4SViacheslav Ovsiienko 	int ret;
888b293e8e4SViacheslav Ovsiienko 
889b293e8e4SViacheslav Ovsiienko 	switch (conf->tunnel) {
890b293e8e4SViacheslav Ovsiienko 	case FLEX_TUNNEL_MODE_SINGLE:
891b293e8e4SViacheslav Ovsiienko 		/* Fallthrough */
892b293e8e4SViacheslav Ovsiienko 	case FLEX_TUNNEL_MODE_OUTER:
893b293e8e4SViacheslav Ovsiienko 		/* Fallthrough */
894b293e8e4SViacheslav Ovsiienko 	case FLEX_TUNNEL_MODE_INNER:
895b293e8e4SViacheslav Ovsiienko 		/* Fallthrough */
896b293e8e4SViacheslav Ovsiienko 	case FLEX_TUNNEL_MODE_MULTI:
897b293e8e4SViacheslav Ovsiienko 		/* Fallthrough */
898b293e8e4SViacheslav Ovsiienko 	case FLEX_TUNNEL_MODE_TUNNEL:
899b293e8e4SViacheslav Ovsiienko 		break;
900b293e8e4SViacheslav Ovsiienko 	default:
901b293e8e4SViacheslav Ovsiienko 		return rte_flow_error_set
902b293e8e4SViacheslav Ovsiienko 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
903b293e8e4SViacheslav Ovsiienko 			 "unrecognized tunnel mode");
904b293e8e4SViacheslav Ovsiienko 	}
905b293e8e4SViacheslav Ovsiienko 	item->tunnel_mode = conf->tunnel;
906b293e8e4SViacheslav Ovsiienko 	if (conf->nb_samples > MLX5_FLEX_ITEM_MAPPING_NUM)
907b293e8e4SViacheslav Ovsiienko 		return rte_flow_error_set
908b293e8e4SViacheslav Ovsiienko 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
909b293e8e4SViacheslav Ovsiienko 			 "sample field number exceeds limit");
910b293e8e4SViacheslav Ovsiienko 	/*
911b293e8e4SViacheslav Ovsiienko 	 * The application can specify fields smaller or bigger than 32 bits
912b293e8e4SViacheslav Ovsiienko 	 * covered with single sample register and it can specify field
913b293e8e4SViacheslav Ovsiienko 	 * offsets in any order.
914b293e8e4SViacheslav Ovsiienko 	 *
915b293e8e4SViacheslav Ovsiienko 	 * Gather all similar fields together, build array of bit intervals
91653820561SMichael Baum 	 * in ascending order and try to cover with the smallest set of sample
917b293e8e4SViacheslav Ovsiienko 	 * registers.
918b293e8e4SViacheslav Ovsiienko 	 */
919b293e8e4SViacheslav Ovsiienko 	memset(&cover, 0, sizeof(cover));
920b293e8e4SViacheslav Ovsiienko 	for (i = 0; i < conf->nb_samples; i++) {
921b293e8e4SViacheslav Ovsiienko 		struct rte_flow_item_flex_field *fl = conf->sample_data + i;
922b293e8e4SViacheslav Ovsiienko 
923b293e8e4SViacheslav Ovsiienko 		/* Check whether field was covered in the previous iteration. */
924b293e8e4SViacheslav Ovsiienko 		if (cover.mapped[i / CHAR_BIT] & (1u << (i % CHAR_BIT)))
925b293e8e4SViacheslav Ovsiienko 			continue;
926b293e8e4SViacheslav Ovsiienko 		if (fl->field_mode == FIELD_MODE_DUMMY)
927b293e8e4SViacheslav Ovsiienko 			continue;
928b293e8e4SViacheslav Ovsiienko 		/* Build an interval array for the field and similar ones */
929b293e8e4SViacheslav Ovsiienko 		cover.num = 0;
930b293e8e4SViacheslav Ovsiienko 		/* Add the first field to array unconditionally. */
931b293e8e4SViacheslav Ovsiienko 		ret = mlx5_flex_cover_sample(&cover, fl, NULL, attr, error);
932b293e8e4SViacheslav Ovsiienko 		if (ret < 0)
933b293e8e4SViacheslav Ovsiienko 			return ret;
934b293e8e4SViacheslav Ovsiienko 		MLX5_ASSERT(ret > 0);
935b293e8e4SViacheslav Ovsiienko 		cover.mapped[i / CHAR_BIT] |= 1u << (i % CHAR_BIT);
936b293e8e4SViacheslav Ovsiienko 		for (j = i + 1; j < conf->nb_samples; j++) {
937b293e8e4SViacheslav Ovsiienko 			struct rte_flow_item_flex_field *ft;
938b293e8e4SViacheslav Ovsiienko 
939b293e8e4SViacheslav Ovsiienko 			/* Add field to array if its type matches. */
940b293e8e4SViacheslav Ovsiienko 			ft = conf->sample_data + j;
941b293e8e4SViacheslav Ovsiienko 			ret = mlx5_flex_cover_sample(&cover, ft, fl,
942b293e8e4SViacheslav Ovsiienko 						     attr, error);
943b293e8e4SViacheslav Ovsiienko 			if (ret < 0)
944b293e8e4SViacheslav Ovsiienko 				return ret;
945b293e8e4SViacheslav Ovsiienko 			if (!ret)
946b293e8e4SViacheslav Ovsiienko 				continue;
947b293e8e4SViacheslav Ovsiienko 			cover.mapped[j / CHAR_BIT] |= 1u << (j % CHAR_BIT);
948b293e8e4SViacheslav Ovsiienko 		}
949b293e8e4SViacheslav Ovsiienko 		/* Allocate sample registers to cover array of intervals. */
950b293e8e4SViacheslav Ovsiienko 		ret = mlx5_flex_alloc_sample(&cover, parser, item,
951b293e8e4SViacheslav Ovsiienko 					     fl, attr, error);
952b293e8e4SViacheslav Ovsiienko 		if (ret)
953b293e8e4SViacheslav Ovsiienko 			return ret;
954b293e8e4SViacheslav Ovsiienko 	}
955b293e8e4SViacheslav Ovsiienko 	/* Build the item pattern translating data on flow creation. */
956b293e8e4SViacheslav Ovsiienko 	item->mapnum = 0;
957b293e8e4SViacheslav Ovsiienko 	memset(&item->map, 0, sizeof(item->map));
958b293e8e4SViacheslav Ovsiienko 	for (i = 0; i < conf->nb_samples; i++) {
959b293e8e4SViacheslav Ovsiienko 		struct rte_flow_item_flex_field *fl = conf->sample_data + i;
960b293e8e4SViacheslav Ovsiienko 
961b293e8e4SViacheslav Ovsiienko 		ret = mlx5_flex_map_sample(fl, parser, item, error);
962b293e8e4SViacheslav Ovsiienko 		if (ret) {
963b293e8e4SViacheslav Ovsiienko 			MLX5_ASSERT(false);
964b293e8e4SViacheslav Ovsiienko 			return ret;
965b293e8e4SViacheslav Ovsiienko 		}
966b293e8e4SViacheslav Ovsiienko 	}
967b293e8e4SViacheslav Ovsiienko 	if (conf->tunnel == FLEX_TUNNEL_MODE_MULTI) {
968b293e8e4SViacheslav Ovsiienko 		/*
969b293e8e4SViacheslav Ovsiienko 		 * In FLEX_TUNNEL_MODE_MULTI tunnel mode PMD creates 2 sets
970b293e8e4SViacheslav Ovsiienko 		 * of samples. The first set is for outer and the second set
971b293e8e4SViacheslav Ovsiienko 		 * for inner flex flow item. Outer and inner samples differ
972b293e8e4SViacheslav Ovsiienko 		 * only in tunnel_mode.
973b293e8e4SViacheslav Ovsiienko 		 */
974b293e8e4SViacheslav Ovsiienko 		if (parser->num_samples > MLX5_GRAPH_NODE_SAMPLE_NUM / 2)
975b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
976b293e8e4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
977b293e8e4SViacheslav Ovsiienko 				 "no sample registers for inner");
978b293e8e4SViacheslav Ovsiienko 		rte_memcpy(parser->devx_conf.sample + parser->num_samples,
979b293e8e4SViacheslav Ovsiienko 			   parser->devx_conf.sample,
980b293e8e4SViacheslav Ovsiienko 			   parser->num_samples *
981b293e8e4SViacheslav Ovsiienko 					sizeof(parser->devx_conf.sample[0]));
982b293e8e4SViacheslav Ovsiienko 		for (i = 0; i < parser->num_samples; i++) {
983b293e8e4SViacheslav Ovsiienko 			struct mlx5_devx_match_sample_attr *sm = i +
984b293e8e4SViacheslav Ovsiienko 				parser->devx_conf.sample + parser->num_samples;
985b293e8e4SViacheslav Ovsiienko 
986b293e8e4SViacheslav Ovsiienko 			sm->flow_match_sample_tunnel_mode =
987b293e8e4SViacheslav Ovsiienko 						MLX5_GRAPH_SAMPLE_TUNNEL_INNER;
988b293e8e4SViacheslav Ovsiienko 		}
989b293e8e4SViacheslav Ovsiienko 		parser->num_samples *= 2;
990b293e8e4SViacheslav Ovsiienko 	}
991b293e8e4SViacheslav Ovsiienko 	return 0;
992b293e8e4SViacheslav Ovsiienko }
993b293e8e4SViacheslav Ovsiienko 
994b293e8e4SViacheslav Ovsiienko static int
995b293e8e4SViacheslav Ovsiienko mlx5_flex_arc_type(enum rte_flow_item_type type, int in)
996b293e8e4SViacheslav Ovsiienko {
997b293e8e4SViacheslav Ovsiienko 	switch (type) {
998b293e8e4SViacheslav Ovsiienko 	case RTE_FLOW_ITEM_TYPE_ETH:
999b293e8e4SViacheslav Ovsiienko 		return  MLX5_GRAPH_ARC_NODE_MAC;
1000b293e8e4SViacheslav Ovsiienko 	case RTE_FLOW_ITEM_TYPE_IPV4:
1001b293e8e4SViacheslav Ovsiienko 		return in ? MLX5_GRAPH_ARC_NODE_IP : MLX5_GRAPH_ARC_NODE_IPV4;
1002b293e8e4SViacheslav Ovsiienko 	case RTE_FLOW_ITEM_TYPE_IPV6:
1003b293e8e4SViacheslav Ovsiienko 		return in ? MLX5_GRAPH_ARC_NODE_IP : MLX5_GRAPH_ARC_NODE_IPV6;
1004b293e8e4SViacheslav Ovsiienko 	case RTE_FLOW_ITEM_TYPE_UDP:
1005b293e8e4SViacheslav Ovsiienko 		return MLX5_GRAPH_ARC_NODE_UDP;
1006b293e8e4SViacheslav Ovsiienko 	case RTE_FLOW_ITEM_TYPE_TCP:
1007b293e8e4SViacheslav Ovsiienko 		return MLX5_GRAPH_ARC_NODE_TCP;
1008b293e8e4SViacheslav Ovsiienko 	case RTE_FLOW_ITEM_TYPE_MPLS:
1009b293e8e4SViacheslav Ovsiienko 		return MLX5_GRAPH_ARC_NODE_MPLS;
1010b293e8e4SViacheslav Ovsiienko 	case RTE_FLOW_ITEM_TYPE_GRE:
1011b293e8e4SViacheslav Ovsiienko 		return MLX5_GRAPH_ARC_NODE_GRE;
1012b293e8e4SViacheslav Ovsiienko 	case RTE_FLOW_ITEM_TYPE_GENEVE:
1013b293e8e4SViacheslav Ovsiienko 		return MLX5_GRAPH_ARC_NODE_GENEVE;
1014b293e8e4SViacheslav Ovsiienko 	case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1015b293e8e4SViacheslav Ovsiienko 		return MLX5_GRAPH_ARC_NODE_VXLAN_GPE;
1016b293e8e4SViacheslav Ovsiienko 	default:
1017b293e8e4SViacheslav Ovsiienko 		return -EINVAL;
1018b293e8e4SViacheslav Ovsiienko 	}
1019b293e8e4SViacheslav Ovsiienko }
1020b293e8e4SViacheslav Ovsiienko 
1021b293e8e4SViacheslav Ovsiienko static int
1022b293e8e4SViacheslav Ovsiienko mlx5_flex_arc_in_eth(const struct rte_flow_item *item,
1023b293e8e4SViacheslav Ovsiienko 		     struct rte_flow_error *error)
1024b293e8e4SViacheslav Ovsiienko {
1025b293e8e4SViacheslav Ovsiienko 	const struct rte_flow_item_eth *spec = item->spec;
1026b293e8e4SViacheslav Ovsiienko 	const struct rte_flow_item_eth *mask = item->mask;
1027b293e8e4SViacheslav Ovsiienko 	struct rte_flow_item_eth eth = { .hdr.ether_type = RTE_BE16(0xFFFF) };
1028b293e8e4SViacheslav Ovsiienko 
1029b293e8e4SViacheslav Ovsiienko 	if (memcmp(mask, &eth, sizeof(struct rte_flow_item_eth))) {
1030b293e8e4SViacheslav Ovsiienko 		return rte_flow_error_set
1031b293e8e4SViacheslav Ovsiienko 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,
1032b293e8e4SViacheslav Ovsiienko 			 "invalid eth item mask");
1033b293e8e4SViacheslav Ovsiienko 	}
1034b293e8e4SViacheslav Ovsiienko 	return rte_be_to_cpu_16(spec->hdr.ether_type);
1035b293e8e4SViacheslav Ovsiienko }
1036b293e8e4SViacheslav Ovsiienko 
1037b293e8e4SViacheslav Ovsiienko static int
1038b293e8e4SViacheslav Ovsiienko mlx5_flex_arc_in_udp(const struct rte_flow_item *item,
1039b293e8e4SViacheslav Ovsiienko 		     struct rte_flow_error *error)
1040b293e8e4SViacheslav Ovsiienko {
1041b293e8e4SViacheslav Ovsiienko 	const struct rte_flow_item_udp *spec = item->spec;
1042b293e8e4SViacheslav Ovsiienko 	const struct rte_flow_item_udp *mask = item->mask;
1043b293e8e4SViacheslav Ovsiienko 	struct rte_flow_item_udp udp = { .hdr.dst_port = RTE_BE16(0xFFFF) };
1044b293e8e4SViacheslav Ovsiienko 
1045b293e8e4SViacheslav Ovsiienko 	if (memcmp(mask, &udp, sizeof(struct rte_flow_item_udp))) {
1046b293e8e4SViacheslav Ovsiienko 		return rte_flow_error_set
1047b293e8e4SViacheslav Ovsiienko 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,
1048b293e8e4SViacheslav Ovsiienko 			 "invalid eth item mask");
1049b293e8e4SViacheslav Ovsiienko 	}
1050b293e8e4SViacheslav Ovsiienko 	return rte_be_to_cpu_16(spec->hdr.dst_port);
1051b293e8e4SViacheslav Ovsiienko }
1052b293e8e4SViacheslav Ovsiienko 
1053b293e8e4SViacheslav Ovsiienko static int
1054*d451d1a1SRongwei Liu mlx5_flex_arc_in_ipv6(const struct rte_flow_item *item,
1055*d451d1a1SRongwei Liu 		      struct rte_flow_error *error)
1056*d451d1a1SRongwei Liu {
1057*d451d1a1SRongwei Liu 	const struct rte_flow_item_ipv6 *spec = item->spec;
1058*d451d1a1SRongwei Liu 	const struct rte_flow_item_ipv6 *mask = item->mask;
1059*d451d1a1SRongwei Liu 	struct rte_flow_item_ipv6 ip = { .hdr.proto = 0xff };
1060*d451d1a1SRongwei Liu 
1061*d451d1a1SRongwei Liu 	if (memcmp(mask, &ip, sizeof(struct rte_flow_item_ipv6))) {
1062*d451d1a1SRongwei Liu 		return rte_flow_error_set
1063*d451d1a1SRongwei Liu 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,
1064*d451d1a1SRongwei Liu 			 "invalid ipv6 item mask, full mask is desired");
1065*d451d1a1SRongwei Liu 	}
1066*d451d1a1SRongwei Liu 	return spec->hdr.proto;
1067*d451d1a1SRongwei Liu }
1068*d451d1a1SRongwei Liu 
1069*d451d1a1SRongwei Liu static int
1070b293e8e4SViacheslav Ovsiienko mlx5_flex_translate_arc_in(struct mlx5_hca_flex_attr *attr,
1071b293e8e4SViacheslav Ovsiienko 			   const struct rte_flow_item_flex_conf *conf,
1072b293e8e4SViacheslav Ovsiienko 			   struct mlx5_flex_parser_devx *devx,
1073b293e8e4SViacheslav Ovsiienko 			   struct mlx5_flex_item *item,
1074b293e8e4SViacheslav Ovsiienko 			   struct rte_flow_error *error)
1075b293e8e4SViacheslav Ovsiienko {
1076b293e8e4SViacheslav Ovsiienko 	struct mlx5_devx_graph_node_attr *node = &devx->devx_conf;
1077b293e8e4SViacheslav Ovsiienko 	uint32_t i;
1078b293e8e4SViacheslav Ovsiienko 
1079b293e8e4SViacheslav Ovsiienko 	RTE_SET_USED(item);
1080b293e8e4SViacheslav Ovsiienko 	if (conf->nb_inputs > attr->max_num_arc_in)
1081b293e8e4SViacheslav Ovsiienko 		return rte_flow_error_set
1082b293e8e4SViacheslav Ovsiienko 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1083b293e8e4SViacheslav Ovsiienko 			 "too many input links");
1084b293e8e4SViacheslav Ovsiienko 	for (i = 0; i < conf->nb_inputs; i++) {
1085b293e8e4SViacheslav Ovsiienko 		struct mlx5_devx_graph_arc_attr *arc = node->in + i;
1086b293e8e4SViacheslav Ovsiienko 		struct rte_flow_item_flex_link *link = conf->input_link + i;
1087b293e8e4SViacheslav Ovsiienko 		const struct rte_flow_item *rte_item = &link->item;
1088b293e8e4SViacheslav Ovsiienko 		int arc_type;
1089b293e8e4SViacheslav Ovsiienko 		int ret;
1090b293e8e4SViacheslav Ovsiienko 
1091b293e8e4SViacheslav Ovsiienko 		if (!rte_item->spec || !rte_item->mask || rte_item->last)
1092b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
1093b293e8e4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1094b293e8e4SViacheslav Ovsiienko 				 "invalid flex item IN arc format");
1095b293e8e4SViacheslav Ovsiienko 		arc_type = mlx5_flex_arc_type(rte_item->type, true);
1096b293e8e4SViacheslav Ovsiienko 		if (arc_type < 0 || !(attr->node_in & RTE_BIT32(arc_type)))
1097b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
1098b293e8e4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1099b293e8e4SViacheslav Ovsiienko 				 "unsupported flex item IN arc type");
1100b293e8e4SViacheslav Ovsiienko 		arc->arc_parse_graph_node = arc_type;
1101b293e8e4SViacheslav Ovsiienko 		arc->start_inner_tunnel = 0;
1102b293e8e4SViacheslav Ovsiienko 		/*
1103b293e8e4SViacheslav Ovsiienko 		 * Configure arc IN condition value. The value location depends
1104b293e8e4SViacheslav Ovsiienko 		 * on protocol. Current FW version supports IP & UDP for IN
1105b293e8e4SViacheslav Ovsiienko 		 * arcs only, and locations for these protocols are defined.
1106b293e8e4SViacheslav Ovsiienko 		 * Add more protocols when available.
1107b293e8e4SViacheslav Ovsiienko 		 */
1108b293e8e4SViacheslav Ovsiienko 		switch (rte_item->type) {
1109b293e8e4SViacheslav Ovsiienko 		case RTE_FLOW_ITEM_TYPE_ETH:
1110b293e8e4SViacheslav Ovsiienko 			ret = mlx5_flex_arc_in_eth(rte_item, error);
1111b293e8e4SViacheslav Ovsiienko 			break;
1112b293e8e4SViacheslav Ovsiienko 		case RTE_FLOW_ITEM_TYPE_UDP:
1113b293e8e4SViacheslav Ovsiienko 			ret = mlx5_flex_arc_in_udp(rte_item, error);
1114b293e8e4SViacheslav Ovsiienko 			break;
1115*d451d1a1SRongwei Liu 		case RTE_FLOW_ITEM_TYPE_IPV6:
1116*d451d1a1SRongwei Liu 			ret = mlx5_flex_arc_in_ipv6(rte_item, error);
1117*d451d1a1SRongwei Liu 			break;
1118b293e8e4SViacheslav Ovsiienko 		default:
1119b293e8e4SViacheslav Ovsiienko 			MLX5_ASSERT(false);
1120b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
1121b293e8e4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1122b293e8e4SViacheslav Ovsiienko 				 "unsupported flex item IN arc type");
1123b293e8e4SViacheslav Ovsiienko 		}
1124b293e8e4SViacheslav Ovsiienko 		if (ret < 0)
1125b293e8e4SViacheslav Ovsiienko 			return ret;
1126b293e8e4SViacheslav Ovsiienko 		arc->compare_condition_value = (uint16_t)ret;
1127b293e8e4SViacheslav Ovsiienko 	}
1128b293e8e4SViacheslav Ovsiienko 	return 0;
1129b293e8e4SViacheslav Ovsiienko }
1130b293e8e4SViacheslav Ovsiienko 
1131b293e8e4SViacheslav Ovsiienko static int
1132b293e8e4SViacheslav Ovsiienko mlx5_flex_translate_arc_out(struct mlx5_hca_flex_attr *attr,
1133b293e8e4SViacheslav Ovsiienko 			    const struct rte_flow_item_flex_conf *conf,
1134b293e8e4SViacheslav Ovsiienko 			    struct mlx5_flex_parser_devx *devx,
1135b293e8e4SViacheslav Ovsiienko 			    struct mlx5_flex_item *item,
1136b293e8e4SViacheslav Ovsiienko 			    struct rte_flow_error *error)
1137b293e8e4SViacheslav Ovsiienko {
1138b293e8e4SViacheslav Ovsiienko 	struct mlx5_devx_graph_node_attr *node = &devx->devx_conf;
1139b293e8e4SViacheslav Ovsiienko 	bool is_tunnel = conf->tunnel == FLEX_TUNNEL_MODE_TUNNEL;
1140b293e8e4SViacheslav Ovsiienko 	uint32_t i;
1141b293e8e4SViacheslav Ovsiienko 
1142b293e8e4SViacheslav Ovsiienko 	RTE_SET_USED(item);
1143b293e8e4SViacheslav Ovsiienko 	if (conf->nb_outputs > attr->max_num_arc_out)
1144b293e8e4SViacheslav Ovsiienko 		return rte_flow_error_set
1145b293e8e4SViacheslav Ovsiienko 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1146b293e8e4SViacheslav Ovsiienko 			 "too many output links");
1147b293e8e4SViacheslav Ovsiienko 	for (i = 0; i < conf->nb_outputs; i++) {
1148b293e8e4SViacheslav Ovsiienko 		struct mlx5_devx_graph_arc_attr *arc = node->out + i;
1149b293e8e4SViacheslav Ovsiienko 		struct rte_flow_item_flex_link *link = conf->output_link + i;
1150b293e8e4SViacheslav Ovsiienko 		const struct rte_flow_item *rte_item = &link->item;
1151b293e8e4SViacheslav Ovsiienko 		int arc_type;
1152b293e8e4SViacheslav Ovsiienko 
1153b293e8e4SViacheslav Ovsiienko 		if (rte_item->spec || rte_item->mask || rte_item->last)
1154b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
1155b293e8e4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1156b293e8e4SViacheslav Ovsiienko 				 "flex node: invalid OUT arc format");
1157b293e8e4SViacheslav Ovsiienko 		arc_type = mlx5_flex_arc_type(rte_item->type, false);
1158b293e8e4SViacheslav Ovsiienko 		if (arc_type < 0 || !(attr->node_out & RTE_BIT32(arc_type)))
1159b293e8e4SViacheslav Ovsiienko 			return rte_flow_error_set
1160b293e8e4SViacheslav Ovsiienko 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1161b293e8e4SViacheslav Ovsiienko 				 "unsupported flex item OUT arc type");
1162b293e8e4SViacheslav Ovsiienko 		arc->arc_parse_graph_node = arc_type;
1163b293e8e4SViacheslav Ovsiienko 		arc->start_inner_tunnel = !!is_tunnel;
1164b293e8e4SViacheslav Ovsiienko 		arc->compare_condition_value = link->next;
1165b293e8e4SViacheslav Ovsiienko 	}
1166b293e8e4SViacheslav Ovsiienko 	return 0;
1167b293e8e4SViacheslav Ovsiienko }
1168b293e8e4SViacheslav Ovsiienko 
1169b293e8e4SViacheslav Ovsiienko /* Translate RTE flex item API configuration into flaex parser settings. */
1170b293e8e4SViacheslav Ovsiienko static int
1171b293e8e4SViacheslav Ovsiienko mlx5_flex_translate_conf(struct rte_eth_dev *dev,
1172b293e8e4SViacheslav Ovsiienko 			 const struct rte_flow_item_flex_conf *conf,
1173b293e8e4SViacheslav Ovsiienko 			 struct mlx5_flex_parser_devx *devx,
1174b293e8e4SViacheslav Ovsiienko 			 struct mlx5_flex_item *item,
1175b293e8e4SViacheslav Ovsiienko 			 struct rte_flow_error *error)
1176b293e8e4SViacheslav Ovsiienko {
1177b293e8e4SViacheslav Ovsiienko 	struct mlx5_priv *priv = dev->data->dev_private;
117853820561SMichael Baum 	struct mlx5_hca_flex_attr *attr = &priv->sh->cdev->config.hca_attr.flex;
1179b293e8e4SViacheslav Ovsiienko 	int ret;
1180b293e8e4SViacheslav Ovsiienko 
1181b293e8e4SViacheslav Ovsiienko 	ret = mlx5_flex_translate_length(attr, conf, devx, error);
1182b293e8e4SViacheslav Ovsiienko 	if (ret)
1183b293e8e4SViacheslav Ovsiienko 		return ret;
1184b293e8e4SViacheslav Ovsiienko 	ret = mlx5_flex_translate_next(attr, conf, devx, error);
1185b293e8e4SViacheslav Ovsiienko 	if (ret)
1186b293e8e4SViacheslav Ovsiienko 		return ret;
1187b293e8e4SViacheslav Ovsiienko 	ret = mlx5_flex_translate_sample(attr, conf, devx, item, error);
1188b293e8e4SViacheslav Ovsiienko 	if (ret)
1189b293e8e4SViacheslav Ovsiienko 		return ret;
1190b293e8e4SViacheslav Ovsiienko 	ret = mlx5_flex_translate_arc_in(attr, conf, devx, item, error);
1191b293e8e4SViacheslav Ovsiienko 	if (ret)
1192b293e8e4SViacheslav Ovsiienko 		return ret;
1193b293e8e4SViacheslav Ovsiienko 	ret = mlx5_flex_translate_arc_out(attr, conf, devx, item, error);
1194b293e8e4SViacheslav Ovsiienko 	if (ret)
1195b293e8e4SViacheslav Ovsiienko 		return ret;
1196b293e8e4SViacheslav Ovsiienko 	return 0;
1197b293e8e4SViacheslav Ovsiienko }
1198b293e8e4SViacheslav Ovsiienko 
1199db25cadcSViacheslav Ovsiienko /**
1200db25cadcSViacheslav Ovsiienko  * Create the flex item with specified configuration over the Ethernet device.
1201db25cadcSViacheslav Ovsiienko  *
1202db25cadcSViacheslav Ovsiienko  * @param dev
1203db25cadcSViacheslav Ovsiienko  *   Ethernet device to create flex item on.
1204db25cadcSViacheslav Ovsiienko  * @param[in] conf
1205db25cadcSViacheslav Ovsiienko  *   Flex item configuration.
1206db25cadcSViacheslav Ovsiienko  * @param[out] error
1207db25cadcSViacheslav Ovsiienko  *   Perform verbose error reporting if not NULL. PMDs initialize this
1208db25cadcSViacheslav Ovsiienko  *   structure in case of error only.
1209db25cadcSViacheslav Ovsiienko  *
1210db25cadcSViacheslav Ovsiienko  * @return
1211db25cadcSViacheslav Ovsiienko  *   Non-NULL opaque pointer on success, NULL otherwise and rte_errno is set.
1212db25cadcSViacheslav Ovsiienko  */
1213db25cadcSViacheslav Ovsiienko struct rte_flow_item_flex_handle *
1214db25cadcSViacheslav Ovsiienko flow_dv_item_create(struct rte_eth_dev *dev,
1215db25cadcSViacheslav Ovsiienko 		    const struct rte_flow_item_flex_conf *conf,
1216db25cadcSViacheslav Ovsiienko 		    struct rte_flow_error *error)
1217db25cadcSViacheslav Ovsiienko {
1218db25cadcSViacheslav Ovsiienko 	struct mlx5_priv *priv = dev->data->dev_private;
12199086ac09SGregory Etelson 	struct mlx5_flex_parser_devx devx_config = { .devx_obj = NULL };
1220db25cadcSViacheslav Ovsiienko 	struct mlx5_flex_item *flex;
12219086ac09SGregory Etelson 	struct mlx5_list_entry *ent;
1222db25cadcSViacheslav Ovsiienko 
1223db25cadcSViacheslav Ovsiienko 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
1224db25cadcSViacheslav Ovsiienko 	flex = mlx5_flex_alloc(priv);
1225db25cadcSViacheslav Ovsiienko 	if (!flex) {
1226db25cadcSViacheslav Ovsiienko 		rte_flow_error_set(error, ENOMEM,
1227db25cadcSViacheslav Ovsiienko 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1228db25cadcSViacheslav Ovsiienko 				   "too many flex items created on the port");
1229db25cadcSViacheslav Ovsiienko 		return NULL;
1230db25cadcSViacheslav Ovsiienko 	}
1231b293e8e4SViacheslav Ovsiienko 	if (mlx5_flex_translate_conf(dev, conf, &devx_config, flex, error))
1232b293e8e4SViacheslav Ovsiienko 		goto error;
12339086ac09SGregory Etelson 	ent = mlx5_list_register(priv->sh->flex_parsers_dv, &devx_config);
12349086ac09SGregory Etelson 	if (!ent) {
12359086ac09SGregory Etelson 		rte_flow_error_set(error, ENOMEM,
12369086ac09SGregory Etelson 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
12379086ac09SGregory Etelson 				   "flex item creation failure");
12389086ac09SGregory Etelson 		goto error;
12399086ac09SGregory Etelson 	}
12409086ac09SGregory Etelson 	flex->devx_fp = container_of(ent, struct mlx5_flex_parser_devx, entry);
1241db25cadcSViacheslav Ovsiienko 	/* Mark initialized flex item valid. */
1242db25cadcSViacheslav Ovsiienko 	__atomic_add_fetch(&flex->refcnt, 1, __ATOMIC_RELEASE);
1243db25cadcSViacheslav Ovsiienko 	return (struct rte_flow_item_flex_handle *)flex;
12449086ac09SGregory Etelson 
12459086ac09SGregory Etelson error:
12469086ac09SGregory Etelson 	mlx5_flex_free(priv, flex);
12479086ac09SGregory Etelson 	return NULL;
1248db25cadcSViacheslav Ovsiienko }
1249db25cadcSViacheslav Ovsiienko 
1250db25cadcSViacheslav Ovsiienko /**
1251db25cadcSViacheslav Ovsiienko  * Release the flex item on the specified Ethernet device.
1252db25cadcSViacheslav Ovsiienko  *
1253db25cadcSViacheslav Ovsiienko  * @param dev
1254db25cadcSViacheslav Ovsiienko  *   Ethernet device to destroy flex item on.
1255db25cadcSViacheslav Ovsiienko  * @param[in] handle
1256db25cadcSViacheslav Ovsiienko  *   Handle of the item existing on the specified device.
1257db25cadcSViacheslav Ovsiienko  * @param[out] error
1258db25cadcSViacheslav Ovsiienko  *   Perform verbose error reporting if not NULL. PMDs initialize this
1259db25cadcSViacheslav Ovsiienko  *   structure in case of error only.
1260db25cadcSViacheslav Ovsiienko  *
1261db25cadcSViacheslav Ovsiienko  * @return
1262db25cadcSViacheslav Ovsiienko  *   0 on success, a negative errno value otherwise and rte_errno is set.
1263db25cadcSViacheslav Ovsiienko  */
1264db25cadcSViacheslav Ovsiienko int
1265db25cadcSViacheslav Ovsiienko flow_dv_item_release(struct rte_eth_dev *dev,
1266db25cadcSViacheslav Ovsiienko 		     const struct rte_flow_item_flex_handle *handle,
1267db25cadcSViacheslav Ovsiienko 		     struct rte_flow_error *error)
1268db25cadcSViacheslav Ovsiienko {
1269db25cadcSViacheslav Ovsiienko 	struct mlx5_priv *priv = dev->data->dev_private;
1270db25cadcSViacheslav Ovsiienko 	struct mlx5_flex_item *flex =
1271db25cadcSViacheslav Ovsiienko 		(struct mlx5_flex_item *)(uintptr_t)handle;
1272db25cadcSViacheslav Ovsiienko 	uint32_t old_refcnt = 1;
12739086ac09SGregory Etelson 	int rc;
1274db25cadcSViacheslav Ovsiienko 
1275db25cadcSViacheslav Ovsiienko 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
1276db25cadcSViacheslav Ovsiienko 	rte_spinlock_lock(&priv->flex_item_sl);
1277db25cadcSViacheslav Ovsiienko 	if (mlx5_flex_index(priv, flex) < 0) {
1278db25cadcSViacheslav Ovsiienko 		rte_spinlock_unlock(&priv->flex_item_sl);
1279db25cadcSViacheslav Ovsiienko 		return rte_flow_error_set(error, EINVAL,
1280db25cadcSViacheslav Ovsiienko 					  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1281db25cadcSViacheslav Ovsiienko 					  "invalid flex item handle value");
1282db25cadcSViacheslav Ovsiienko 	}
1283db25cadcSViacheslav Ovsiienko 	if (!__atomic_compare_exchange_n(&flex->refcnt, &old_refcnt, 0, 0,
1284db25cadcSViacheslav Ovsiienko 					 __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
1285db25cadcSViacheslav Ovsiienko 		rte_spinlock_unlock(&priv->flex_item_sl);
1286db25cadcSViacheslav Ovsiienko 		return rte_flow_error_set(error, EBUSY,
1287db25cadcSViacheslav Ovsiienko 					  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1288db25cadcSViacheslav Ovsiienko 					  "flex item has flow references");
1289db25cadcSViacheslav Ovsiienko 	}
1290db25cadcSViacheslav Ovsiienko 	/* Flex item is marked as invalid, we can leave locked section. */
1291db25cadcSViacheslav Ovsiienko 	rte_spinlock_unlock(&priv->flex_item_sl);
12929086ac09SGregory Etelson 	MLX5_ASSERT(flex->devx_fp);
12939086ac09SGregory Etelson 	rc = mlx5_list_unregister(priv->sh->flex_parsers_dv,
12949086ac09SGregory Etelson 				  &flex->devx_fp->entry);
12959086ac09SGregory Etelson 	flex->devx_fp = NULL;
1296db25cadcSViacheslav Ovsiienko 	mlx5_flex_free(priv, flex);
12979086ac09SGregory Etelson 	if (rc < 0)
12989086ac09SGregory Etelson 		return rte_flow_error_set(error, EBUSY,
12999086ac09SGregory Etelson 					  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
13009086ac09SGregory Etelson 					  "flex item release failure");
1301db25cadcSViacheslav Ovsiienko 	return 0;
1302db25cadcSViacheslav Ovsiienko }
13039086ac09SGregory Etelson 
13049086ac09SGregory Etelson /* DevX flex parser list callbacks. */
13059086ac09SGregory Etelson struct mlx5_list_entry *
13069086ac09SGregory Etelson mlx5_flex_parser_create_cb(void *list_ctx, void *ctx)
13079086ac09SGregory Etelson {
13089086ac09SGregory Etelson 	struct mlx5_dev_ctx_shared *sh = list_ctx;
13099086ac09SGregory Etelson 	struct mlx5_flex_parser_devx *fp, *conf = ctx;
13109086ac09SGregory Etelson 	int ret;
13119086ac09SGregory Etelson 
13129086ac09SGregory Etelson 	fp = mlx5_malloc(MLX5_MEM_ZERO,	sizeof(struct mlx5_flex_parser_devx),
13139086ac09SGregory Etelson 			 0, SOCKET_ID_ANY);
13149086ac09SGregory Etelson 	if (!fp)
13159086ac09SGregory Etelson 		return NULL;
13169086ac09SGregory Etelson 	/* Copy the requested configurations. */
13179086ac09SGregory Etelson 	fp->num_samples = conf->num_samples;
13189086ac09SGregory Etelson 	memcpy(&fp->devx_conf, &conf->devx_conf, sizeof(fp->devx_conf));
13199086ac09SGregory Etelson 	/* Create DevX flex parser. */
13209086ac09SGregory Etelson 	fp->devx_obj = mlx5_devx_cmd_create_flex_parser(sh->cdev->ctx,
13219086ac09SGregory Etelson 							&fp->devx_conf);
13229086ac09SGregory Etelson 	if (!fp->devx_obj)
13239086ac09SGregory Etelson 		goto error;
13249086ac09SGregory Etelson 	/* Query the firmware assigned sample ids. */
13259086ac09SGregory Etelson 	ret = mlx5_devx_cmd_query_parse_samples(fp->devx_obj,
13269086ac09SGregory Etelson 						fp->sample_ids,
1327f1324a17SRongwei Liu 						fp->num_samples,
1328f1324a17SRongwei Liu 						&fp->anchor_id);
13299086ac09SGregory Etelson 	if (ret)
13309086ac09SGregory Etelson 		goto error;
13319086ac09SGregory Etelson 	DRV_LOG(DEBUG, "DEVx flex parser %p created, samples num: %u",
13329086ac09SGregory Etelson 		(const void *)fp, fp->num_samples);
13339086ac09SGregory Etelson 	return &fp->entry;
13349086ac09SGregory Etelson error:
13359086ac09SGregory Etelson 	if (fp->devx_obj)
13369086ac09SGregory Etelson 		mlx5_devx_cmd_destroy((void *)(uintptr_t)fp->devx_obj);
13379086ac09SGregory Etelson 	if (fp)
13389086ac09SGregory Etelson 		mlx5_free(fp);
13399086ac09SGregory Etelson 	return NULL;
13409086ac09SGregory Etelson }
13419086ac09SGregory Etelson 
13429086ac09SGregory Etelson int
13439086ac09SGregory Etelson mlx5_flex_parser_match_cb(void *list_ctx,
13449086ac09SGregory Etelson 			  struct mlx5_list_entry *iter, void *ctx)
13459086ac09SGregory Etelson {
13469086ac09SGregory Etelson 	struct mlx5_flex_parser_devx *fp =
13479086ac09SGregory Etelson 		container_of(iter, struct mlx5_flex_parser_devx, entry);
13489086ac09SGregory Etelson 	struct mlx5_flex_parser_devx *org =
13499086ac09SGregory Etelson 		container_of(ctx, struct mlx5_flex_parser_devx, entry);
13509086ac09SGregory Etelson 
13519086ac09SGregory Etelson 	RTE_SET_USED(list_ctx);
13529086ac09SGregory Etelson 	return !iter || !ctx || memcmp(&fp->devx_conf,
13539086ac09SGregory Etelson 				       &org->devx_conf,
13549086ac09SGregory Etelson 				       sizeof(fp->devx_conf));
13559086ac09SGregory Etelson }
13569086ac09SGregory Etelson 
13579086ac09SGregory Etelson void
13589086ac09SGregory Etelson mlx5_flex_parser_remove_cb(void *list_ctx, struct mlx5_list_entry *entry)
13599086ac09SGregory Etelson {
13609086ac09SGregory Etelson 	struct mlx5_flex_parser_devx *fp =
13619086ac09SGregory Etelson 		container_of(entry, struct mlx5_flex_parser_devx, entry);
13629086ac09SGregory Etelson 
13639086ac09SGregory Etelson 	RTE_SET_USED(list_ctx);
13649086ac09SGregory Etelson 	MLX5_ASSERT(fp->devx_obj);
13659086ac09SGregory Etelson 	claim_zero(mlx5_devx_cmd_destroy(fp->devx_obj));
13669086ac09SGregory Etelson 	DRV_LOG(DEBUG, "DEVx flex parser %p destroyed", (const void *)fp);
13679086ac09SGregory Etelson 	mlx5_free(entry);
13689086ac09SGregory Etelson }
13699086ac09SGregory Etelson 
13709086ac09SGregory Etelson struct mlx5_list_entry *
13719086ac09SGregory Etelson mlx5_flex_parser_clone_cb(void *list_ctx,
13729086ac09SGregory Etelson 			  struct mlx5_list_entry *entry, void *ctx)
13739086ac09SGregory Etelson {
13749086ac09SGregory Etelson 	struct mlx5_flex_parser_devx *fp;
13759086ac09SGregory Etelson 
13769086ac09SGregory Etelson 	RTE_SET_USED(list_ctx);
13779086ac09SGregory Etelson 	RTE_SET_USED(entry);
13789086ac09SGregory Etelson 	fp = mlx5_malloc(0, sizeof(struct mlx5_flex_parser_devx),
13799086ac09SGregory Etelson 			 0, SOCKET_ID_ANY);
13809086ac09SGregory Etelson 	if (!fp)
13819086ac09SGregory Etelson 		return NULL;
13829086ac09SGregory Etelson 	memcpy(fp, ctx, sizeof(struct mlx5_flex_parser_devx));
13839086ac09SGregory Etelson 	return &fp->entry;
13849086ac09SGregory Etelson }
13859086ac09SGregory Etelson 
13869086ac09SGregory Etelson void
13879086ac09SGregory Etelson mlx5_flex_parser_clone_free_cb(void *list_ctx, struct mlx5_list_entry *entry)
13889086ac09SGregory Etelson {
13899086ac09SGregory Etelson 	struct mlx5_flex_parser_devx *fp =
13909086ac09SGregory Etelson 		container_of(entry, struct mlx5_flex_parser_devx, entry);
13919086ac09SGregory Etelson 	RTE_SET_USED(list_ctx);
13929086ac09SGregory Etelson 	mlx5_free(fp);
13939086ac09SGregory Etelson }
1394