xref: /dpdk/drivers/net/mlx5/mlx5_flow_flex.c (revision db25cadc0887822d26776810075b5233b868534c)
1*db25cadcSViacheslav Ovsiienko /* SPDX-License-Identifier: BSD-3-Clause
2*db25cadcSViacheslav Ovsiienko  * Copyright (c) 2021 NVIDIA Corporation & Affiliates
3*db25cadcSViacheslav Ovsiienko  */
4*db25cadcSViacheslav Ovsiienko #include <rte_malloc.h>
5*db25cadcSViacheslav Ovsiienko #include <mlx5_devx_cmds.h>
6*db25cadcSViacheslav Ovsiienko #include <mlx5_malloc.h>
7*db25cadcSViacheslav Ovsiienko #include "mlx5.h"
8*db25cadcSViacheslav Ovsiienko #include "mlx5_flow.h"
9*db25cadcSViacheslav Ovsiienko 
10*db25cadcSViacheslav Ovsiienko static_assert(sizeof(uint32_t) * CHAR_BIT >= MLX5_PORT_FLEX_ITEM_NUM,
11*db25cadcSViacheslav Ovsiienko 	      "Flex item maximal number exceeds uint32_t bit width");
12*db25cadcSViacheslav Ovsiienko 
13*db25cadcSViacheslav Ovsiienko /**
14*db25cadcSViacheslav Ovsiienko  *  Routine called once on port initialization to init flex item
15*db25cadcSViacheslav Ovsiienko  *  related infrastructure initialization
16*db25cadcSViacheslav Ovsiienko  *
17*db25cadcSViacheslav Ovsiienko  * @param dev
18*db25cadcSViacheslav Ovsiienko  *   Ethernet device to perform flex item initialization
19*db25cadcSViacheslav Ovsiienko  *
20*db25cadcSViacheslav Ovsiienko  * @return
21*db25cadcSViacheslav Ovsiienko  *   0 on success, a negative errno value otherwise and rte_errno is set.
22*db25cadcSViacheslav Ovsiienko  */
23*db25cadcSViacheslav Ovsiienko int
24*db25cadcSViacheslav Ovsiienko mlx5_flex_item_port_init(struct rte_eth_dev *dev)
25*db25cadcSViacheslav Ovsiienko {
26*db25cadcSViacheslav Ovsiienko 	struct mlx5_priv *priv = dev->data->dev_private;
27*db25cadcSViacheslav Ovsiienko 
28*db25cadcSViacheslav Ovsiienko 	rte_spinlock_init(&priv->flex_item_sl);
29*db25cadcSViacheslav Ovsiienko 	MLX5_ASSERT(!priv->flex_item_map);
30*db25cadcSViacheslav Ovsiienko 	return 0;
31*db25cadcSViacheslav Ovsiienko }
32*db25cadcSViacheslav Ovsiienko 
33*db25cadcSViacheslav Ovsiienko /**
34*db25cadcSViacheslav Ovsiienko  *  Routine called once on port close to perform flex item
35*db25cadcSViacheslav Ovsiienko  *  related infrastructure cleanup.
36*db25cadcSViacheslav Ovsiienko  *
37*db25cadcSViacheslav Ovsiienko  * @param dev
38*db25cadcSViacheslav Ovsiienko  *   Ethernet device to perform cleanup
39*db25cadcSViacheslav Ovsiienko  */
40*db25cadcSViacheslav Ovsiienko void
41*db25cadcSViacheslav Ovsiienko mlx5_flex_item_port_cleanup(struct rte_eth_dev *dev)
42*db25cadcSViacheslav Ovsiienko {
43*db25cadcSViacheslav Ovsiienko 	struct mlx5_priv *priv = dev->data->dev_private;
44*db25cadcSViacheslav Ovsiienko 	uint32_t i;
45*db25cadcSViacheslav Ovsiienko 
46*db25cadcSViacheslav Ovsiienko 	for (i = 0; i < MLX5_PORT_FLEX_ITEM_NUM && priv->flex_item_map ; i++) {
47*db25cadcSViacheslav Ovsiienko 		if (priv->flex_item_map & (1 << i)) {
48*db25cadcSViacheslav Ovsiienko 			/* DevX object dereferencing should be provided here. */
49*db25cadcSViacheslav Ovsiienko 			priv->flex_item_map &= ~(1 << i);
50*db25cadcSViacheslav Ovsiienko 		}
51*db25cadcSViacheslav Ovsiienko 	}
52*db25cadcSViacheslav Ovsiienko }
53*db25cadcSViacheslav Ovsiienko 
54*db25cadcSViacheslav Ovsiienko static int
55*db25cadcSViacheslav Ovsiienko mlx5_flex_index(struct mlx5_priv *priv, struct mlx5_flex_item *item)
56*db25cadcSViacheslav Ovsiienko {
57*db25cadcSViacheslav Ovsiienko 	uintptr_t start = (uintptr_t)&priv->flex_item[0];
58*db25cadcSViacheslav Ovsiienko 	uintptr_t entry = (uintptr_t)item;
59*db25cadcSViacheslav Ovsiienko 	uintptr_t idx = (entry - start) / sizeof(struct mlx5_flex_item);
60*db25cadcSViacheslav Ovsiienko 
61*db25cadcSViacheslav Ovsiienko 	if (entry < start ||
62*db25cadcSViacheslav Ovsiienko 	    idx >= MLX5_PORT_FLEX_ITEM_NUM ||
63*db25cadcSViacheslav Ovsiienko 	    (entry - start) % sizeof(struct mlx5_flex_item) ||
64*db25cadcSViacheslav Ovsiienko 	    !(priv->flex_item_map & (1u << idx)))
65*db25cadcSViacheslav Ovsiienko 		return -1;
66*db25cadcSViacheslav Ovsiienko 	return (int)idx;
67*db25cadcSViacheslav Ovsiienko }
68*db25cadcSViacheslav Ovsiienko 
69*db25cadcSViacheslav Ovsiienko static struct mlx5_flex_item *
70*db25cadcSViacheslav Ovsiienko mlx5_flex_alloc(struct mlx5_priv *priv)
71*db25cadcSViacheslav Ovsiienko {
72*db25cadcSViacheslav Ovsiienko 	struct mlx5_flex_item *item = NULL;
73*db25cadcSViacheslav Ovsiienko 
74*db25cadcSViacheslav Ovsiienko 	rte_spinlock_lock(&priv->flex_item_sl);
75*db25cadcSViacheslav Ovsiienko 	if (~priv->flex_item_map) {
76*db25cadcSViacheslav Ovsiienko 		uint32_t idx = rte_bsf32(~priv->flex_item_map);
77*db25cadcSViacheslav Ovsiienko 
78*db25cadcSViacheslav Ovsiienko 		if (idx < MLX5_PORT_FLEX_ITEM_NUM) {
79*db25cadcSViacheslav Ovsiienko 			item = &priv->flex_item[idx];
80*db25cadcSViacheslav Ovsiienko 			MLX5_ASSERT(!item->refcnt);
81*db25cadcSViacheslav Ovsiienko 			MLX5_ASSERT(!item->devx_fp);
82*db25cadcSViacheslav Ovsiienko 			item->devx_fp = NULL;
83*db25cadcSViacheslav Ovsiienko 			__atomic_store_n(&item->refcnt, 0, __ATOMIC_RELEASE);
84*db25cadcSViacheslav Ovsiienko 			priv->flex_item_map |= 1u << idx;
85*db25cadcSViacheslav Ovsiienko 		}
86*db25cadcSViacheslav Ovsiienko 	}
87*db25cadcSViacheslav Ovsiienko 	rte_spinlock_unlock(&priv->flex_item_sl);
88*db25cadcSViacheslav Ovsiienko 	return item;
89*db25cadcSViacheslav Ovsiienko }
90*db25cadcSViacheslav Ovsiienko 
91*db25cadcSViacheslav Ovsiienko static void
92*db25cadcSViacheslav Ovsiienko mlx5_flex_free(struct mlx5_priv *priv, struct mlx5_flex_item *item)
93*db25cadcSViacheslav Ovsiienko {
94*db25cadcSViacheslav Ovsiienko 	int idx = mlx5_flex_index(priv, item);
95*db25cadcSViacheslav Ovsiienko 
96*db25cadcSViacheslav Ovsiienko 	MLX5_ASSERT(idx >= 0 &&
97*db25cadcSViacheslav Ovsiienko 		    idx < MLX5_PORT_FLEX_ITEM_NUM &&
98*db25cadcSViacheslav Ovsiienko 		    (priv->flex_item_map & (1u << idx)));
99*db25cadcSViacheslav Ovsiienko 	if (idx >= 0) {
100*db25cadcSViacheslav Ovsiienko 		rte_spinlock_lock(&priv->flex_item_sl);
101*db25cadcSViacheslav Ovsiienko 		MLX5_ASSERT(!item->refcnt);
102*db25cadcSViacheslav Ovsiienko 		MLX5_ASSERT(!item->devx_fp);
103*db25cadcSViacheslav Ovsiienko 		item->devx_fp = NULL;
104*db25cadcSViacheslav Ovsiienko 		__atomic_store_n(&item->refcnt, 0, __ATOMIC_RELEASE);
105*db25cadcSViacheslav Ovsiienko 		priv->flex_item_map &= ~(1u << idx);
106*db25cadcSViacheslav Ovsiienko 		rte_spinlock_unlock(&priv->flex_item_sl);
107*db25cadcSViacheslav Ovsiienko 	}
108*db25cadcSViacheslav Ovsiienko }
109*db25cadcSViacheslav Ovsiienko 
110*db25cadcSViacheslav Ovsiienko /**
111*db25cadcSViacheslav Ovsiienko  * Create the flex item with specified configuration over the Ethernet device.
112*db25cadcSViacheslav Ovsiienko  *
113*db25cadcSViacheslav Ovsiienko  * @param dev
114*db25cadcSViacheslav Ovsiienko  *   Ethernet device to create flex item on.
115*db25cadcSViacheslav Ovsiienko  * @param[in] conf
116*db25cadcSViacheslav Ovsiienko  *   Flex item configuration.
117*db25cadcSViacheslav Ovsiienko  * @param[out] error
118*db25cadcSViacheslav Ovsiienko  *   Perform verbose error reporting if not NULL. PMDs initialize this
119*db25cadcSViacheslav Ovsiienko  *   structure in case of error only.
120*db25cadcSViacheslav Ovsiienko  *
121*db25cadcSViacheslav Ovsiienko  * @return
122*db25cadcSViacheslav Ovsiienko  *   Non-NULL opaque pointer on success, NULL otherwise and rte_errno is set.
123*db25cadcSViacheslav Ovsiienko  */
124*db25cadcSViacheslav Ovsiienko struct rte_flow_item_flex_handle *
125*db25cadcSViacheslav Ovsiienko flow_dv_item_create(struct rte_eth_dev *dev,
126*db25cadcSViacheslav Ovsiienko 		    const struct rte_flow_item_flex_conf *conf,
127*db25cadcSViacheslav Ovsiienko 		    struct rte_flow_error *error)
128*db25cadcSViacheslav Ovsiienko {
129*db25cadcSViacheslav Ovsiienko 	struct mlx5_priv *priv = dev->data->dev_private;
130*db25cadcSViacheslav Ovsiienko 	struct mlx5_flex_item *flex;
131*db25cadcSViacheslav Ovsiienko 
132*db25cadcSViacheslav Ovsiienko 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
133*db25cadcSViacheslav Ovsiienko 	flex = mlx5_flex_alloc(priv);
134*db25cadcSViacheslav Ovsiienko 	if (!flex) {
135*db25cadcSViacheslav Ovsiienko 		rte_flow_error_set(error, ENOMEM,
136*db25cadcSViacheslav Ovsiienko 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
137*db25cadcSViacheslav Ovsiienko 				   "too many flex items created on the port");
138*db25cadcSViacheslav Ovsiienko 		return NULL;
139*db25cadcSViacheslav Ovsiienko 	}
140*db25cadcSViacheslav Ovsiienko 	RTE_SET_USED(conf);
141*db25cadcSViacheslav Ovsiienko 	/* Mark initialized flex item valid. */
142*db25cadcSViacheslav Ovsiienko 	__atomic_add_fetch(&flex->refcnt, 1, __ATOMIC_RELEASE);
143*db25cadcSViacheslav Ovsiienko 	return (struct rte_flow_item_flex_handle *)flex;
144*db25cadcSViacheslav Ovsiienko }
145*db25cadcSViacheslav Ovsiienko 
146*db25cadcSViacheslav Ovsiienko /**
147*db25cadcSViacheslav Ovsiienko  * Release the flex item on the specified Ethernet device.
148*db25cadcSViacheslav Ovsiienko  *
149*db25cadcSViacheslav Ovsiienko  * @param dev
150*db25cadcSViacheslav Ovsiienko  *   Ethernet device to destroy flex item on.
151*db25cadcSViacheslav Ovsiienko  * @param[in] handle
152*db25cadcSViacheslav Ovsiienko  *   Handle of the item existing on the specified device.
153*db25cadcSViacheslav Ovsiienko  * @param[out] error
154*db25cadcSViacheslav Ovsiienko  *   Perform verbose error reporting if not NULL. PMDs initialize this
155*db25cadcSViacheslav Ovsiienko  *   structure in case of error only.
156*db25cadcSViacheslav Ovsiienko  *
157*db25cadcSViacheslav Ovsiienko  * @return
158*db25cadcSViacheslav Ovsiienko  *   0 on success, a negative errno value otherwise and rte_errno is set.
159*db25cadcSViacheslav Ovsiienko  */
160*db25cadcSViacheslav Ovsiienko int
161*db25cadcSViacheslav Ovsiienko flow_dv_item_release(struct rte_eth_dev *dev,
162*db25cadcSViacheslav Ovsiienko 		     const struct rte_flow_item_flex_handle *handle,
163*db25cadcSViacheslav Ovsiienko 		     struct rte_flow_error *error)
164*db25cadcSViacheslav Ovsiienko {
165*db25cadcSViacheslav Ovsiienko 	struct mlx5_priv *priv = dev->data->dev_private;
166*db25cadcSViacheslav Ovsiienko 	struct mlx5_flex_item *flex =
167*db25cadcSViacheslav Ovsiienko 		(struct mlx5_flex_item *)(uintptr_t)handle;
168*db25cadcSViacheslav Ovsiienko 	uint32_t old_refcnt = 1;
169*db25cadcSViacheslav Ovsiienko 
170*db25cadcSViacheslav Ovsiienko 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
171*db25cadcSViacheslav Ovsiienko 	rte_spinlock_lock(&priv->flex_item_sl);
172*db25cadcSViacheslav Ovsiienko 	if (mlx5_flex_index(priv, flex) < 0) {
173*db25cadcSViacheslav Ovsiienko 		rte_spinlock_unlock(&priv->flex_item_sl);
174*db25cadcSViacheslav Ovsiienko 		return rte_flow_error_set(error, EINVAL,
175*db25cadcSViacheslav Ovsiienko 					  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
176*db25cadcSViacheslav Ovsiienko 					  "invalid flex item handle value");
177*db25cadcSViacheslav Ovsiienko 	}
178*db25cadcSViacheslav Ovsiienko 	if (!__atomic_compare_exchange_n(&flex->refcnt, &old_refcnt, 0, 0,
179*db25cadcSViacheslav Ovsiienko 					 __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
180*db25cadcSViacheslav Ovsiienko 		rte_spinlock_unlock(&priv->flex_item_sl);
181*db25cadcSViacheslav Ovsiienko 		return rte_flow_error_set(error, EBUSY,
182*db25cadcSViacheslav Ovsiienko 					  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
183*db25cadcSViacheslav Ovsiienko 					  "flex item has flow references");
184*db25cadcSViacheslav Ovsiienko 	}
185*db25cadcSViacheslav Ovsiienko 	/* Flex item is marked as invalid, we can leave locked section. */
186*db25cadcSViacheslav Ovsiienko 	rte_spinlock_unlock(&priv->flex_item_sl);
187*db25cadcSViacheslav Ovsiienko 	mlx5_flex_free(priv, flex);
188*db25cadcSViacheslav Ovsiienko 	return 0;
189*db25cadcSViacheslav Ovsiienko }
190