xref: /dpdk/drivers/regex/mlx5/mlx5_regex_control.c (revision dd25bd201d18729b883acc4d4120a5e751807f5f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2020 Mellanox Technologies, Ltd
3  */
4 
5 #include <errno.h>
6 
7 #include <rte_log.h>
8 #include <rte_errno.h>
9 #include <rte_memory.h>
10 #include <rte_malloc.h>
11 #include <rte_regexdev.h>
12 #include <rte_regexdev_core.h>
13 #include <rte_regexdev_driver.h>
14 #include <rte_dev.h>
15 
16 #include <mlx5_common.h>
17 #include <mlx5_glue.h>
18 #include <mlx5_devx_cmds.h>
19 #include <mlx5_prm.h>
20 #include <mlx5_common_os.h>
21 #include <mlx5_common_devx.h>
22 
23 #include "mlx5_regex.h"
24 #include "mlx5_regex_utils.h"
25 #include "mlx5_rxp_csrs.h"
26 #include "mlx5_rxp.h"
27 
28 #define MLX5_REGEX_NUM_WQE_PER_PAGE (4096/64)
29 
30 /**
31  * Returns the number of qp obj to be created.
32  *
33  * @param nb_desc
34  *   The number of descriptors for the queue.
35  *
36  * @return
37  *   The number of obj to be created.
38  */
39 static uint16_t
40 regex_ctrl_get_nb_obj(uint16_t nb_desc)
41 {
42 	return ((nb_desc / MLX5_REGEX_NUM_WQE_PER_PAGE) +
43 		!!(nb_desc % MLX5_REGEX_NUM_WQE_PER_PAGE));
44 }
45 
46 /**
47  * destroy CQ.
48  *
49  * @param cp
50  *   Pointer to the CQ to be destroyed.
51  *
52  * @return
53  *   0 on success, a negative errno value otherwise and rte_errno is set.
54  */
55 static int
56 regex_ctrl_destroy_cq(struct mlx5_regex_cq *cq)
57 {
58 	mlx5_devx_cq_destroy(&cq->cq_obj);
59 	memset(cq, 0, sizeof(*cq));
60 	return 0;
61 }
62 
63 /**
64  * create the CQ object.
65  *
66  * @param priv
67  *   Pointer to the priv object.
68  * @param cp
69  *   Pointer to the CQ to be created.
70  *
71  * @return
72  *   0 on success, a negative errno value otherwise and rte_errno is set.
73  */
74 static int
75 regex_ctrl_create_cq(struct mlx5_regex_priv *priv, struct mlx5_regex_cq *cq)
76 {
77 	struct mlx5_devx_cq_attr attr = {
78 		.uar_page_id = priv->uar->page_id,
79 	};
80 	int ret;
81 
82 	cq->ci = 0;
83 	ret = mlx5_devx_cq_create(priv->ctx, &cq->cq_obj, cq->log_nb_desc,
84 				  &attr, SOCKET_ID_ANY);
85 	if (ret) {
86 		DRV_LOG(ERR, "Can't create CQ object.");
87 		memset(cq, 0, sizeof(*cq));
88 		rte_errno = ENOMEM;
89 		return -rte_errno;
90 	}
91 	return 0;
92 }
93 
94 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
95 static int
96 regex_get_pdn(void *pd, uint32_t *pdn)
97 {
98 	struct mlx5dv_obj obj;
99 	struct mlx5dv_pd pd_info;
100 	int ret = 0;
101 
102 	obj.pd.in = pd;
103 	obj.pd.out = &pd_info;
104 	ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_PD);
105 	if (ret) {
106 		DRV_LOG(DEBUG, "Fail to get PD object info");
107 		return ret;
108 	}
109 	*pdn = pd_info.pdn;
110 	return 0;
111 }
112 #endif
113 
114 /**
115  * Destroy the SQ object.
116  *
117  * @param qp
118  *   Pointer to the QP element
119  * @param q_ind
120  *   The index of the queue.
121  *
122  * @return
123  *   0 on success, a negative errno value otherwise and rte_errno is set.
124  */
125 static int
126 regex_ctrl_destroy_sq(struct mlx5_regex_qp *qp, uint16_t q_ind)
127 {
128 	struct mlx5_regex_sq *sq = &qp->sqs[q_ind];
129 
130 	mlx5_devx_sq_destroy(&sq->sq_obj);
131 	memset(sq, 0, sizeof(*sq));
132 	return 0;
133 }
134 
135 /**
136  * create the SQ object.
137  *
138  * @param priv
139  *   Pointer to the priv object.
140  * @param qp
141  *   Pointer to the QP element
142  * @param q_ind
143  *   The index of the queue.
144  * @param log_nb_desc
145  *   Log 2 of the number of descriptors to be used.
146  *
147  * @return
148  *   0 on success, a negative errno value otherwise and rte_errno is set.
149  */
150 static int
151 regex_ctrl_create_sq(struct mlx5_regex_priv *priv, struct mlx5_regex_qp *qp,
152 		     uint16_t q_ind, uint16_t log_nb_desc)
153 {
154 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
155 	struct mlx5_devx_create_sq_attr attr = {
156 		.user_index = q_ind,
157 		.cqn = qp->cq.cq_obj.cq->id,
158 		.wq_attr = (struct mlx5_devx_wq_attr){
159 			.uar_page = priv->uar->page_id,
160 		},
161 		.ts_format = mlx5_ts_format_conv(priv->sq_ts_format),
162 	};
163 	struct mlx5_devx_modify_sq_attr modify_attr = {
164 		.state = MLX5_SQC_STATE_RDY,
165 	};
166 	struct mlx5_regex_sq *sq = &qp->sqs[q_ind];
167 	uint32_t pd_num = 0;
168 	int ret;
169 
170 	sq->log_nb_desc = log_nb_desc;
171 	sq->ci = 0;
172 	sq->pi = 0;
173 	ret = regex_get_pdn(priv->pd, &pd_num);
174 	if (ret)
175 		return ret;
176 	attr.wq_attr.pd = pd_num;
177 	ret = mlx5_devx_sq_create(priv->ctx, &sq->sq_obj, log_nb_desc, &attr,
178 				  SOCKET_ID_ANY);
179 	if (ret) {
180 		DRV_LOG(ERR, "Can't create SQ object.");
181 		rte_errno = ENOMEM;
182 		return -rte_errno;
183 	}
184 	ret = mlx5_devx_cmd_modify_sq(sq->sq_obj.sq, &modify_attr);
185 	if (ret) {
186 		DRV_LOG(ERR, "Can't change SQ state to ready.");
187 		regex_ctrl_destroy_sq(qp, q_ind);
188 		rte_errno = ENOMEM;
189 		return -rte_errno;
190 	}
191 	return 0;
192 #else
193 	(void)priv;
194 	(void)qp;
195 	(void)q_ind;
196 	(void)log_nb_desc;
197 	DRV_LOG(ERR, "Cannot get pdn - no DV support.");
198 	return -ENOTSUP;
199 #endif
200 }
201 
202 /**
203  * Setup the qp.
204  *
205  * @param dev
206  *   Pointer to RegEx dev structure.
207  * @param qp_ind
208  *   The queue index to setup.
209  * @param cfg
210  *   The queue requested configuration.
211  *
212  * @return
213  *   0 on success, a negative errno value otherwise and rte_errno is set.
214  */
215 int
216 mlx5_regex_qp_setup(struct rte_regexdev *dev, uint16_t qp_ind,
217 		    const struct rte_regexdev_qp_conf *cfg)
218 {
219 	struct mlx5_regex_priv *priv = dev->data->dev_private;
220 	struct mlx5_regex_qp *qp;
221 	int i;
222 	int nb_sq_config = 0;
223 	int ret;
224 	uint16_t log_desc;
225 
226 	qp = &priv->qps[qp_ind];
227 	qp->flags = cfg->qp_conf_flags;
228 	qp->cq.log_nb_desc = rte_log2_u32(cfg->nb_desc);
229 	qp->nb_desc = 1 << qp->cq.log_nb_desc;
230 	if (qp->flags & RTE_REGEX_QUEUE_PAIR_CFG_OOS_F)
231 		qp->nb_obj = regex_ctrl_get_nb_obj(qp->nb_desc);
232 	else
233 		qp->nb_obj = 1;
234 	qp->sqs = rte_malloc(NULL,
235 			     qp->nb_obj * sizeof(struct mlx5_regex_sq), 64);
236 	if (!qp->sqs) {
237 		DRV_LOG(ERR, "Can't allocate sq array memory.");
238 		rte_errno = ENOMEM;
239 		return -rte_errno;
240 	}
241 	log_desc = rte_log2_u32(qp->nb_desc / qp->nb_obj);
242 	ret = regex_ctrl_create_cq(priv, &qp->cq);
243 	if (ret) {
244 		DRV_LOG(ERR, "Can't create cq.");
245 		goto err_cq;
246 	}
247 	for (i = 0; i < qp->nb_obj; i++) {
248 		ret = regex_ctrl_create_sq(priv, qp, i, log_desc);
249 		if (ret) {
250 			DRV_LOG(ERR, "Can't create sq.");
251 			goto err_btree;
252 		}
253 		nb_sq_config++;
254 	}
255 
256 	ret = mlx5_mr_btree_init(&qp->mr_ctrl.cache_bh, MLX5_MR_BTREE_CACHE_N,
257 				 rte_socket_id());
258 	if (ret) {
259 		DRV_LOG(ERR, "Error setting up mr btree");
260 		goto err_btree;
261 	}
262 
263 	ret = mlx5_regexdev_setup_fastpath(priv, qp_ind);
264 	if (ret) {
265 		DRV_LOG(ERR, "Error setting up fastpath");
266 		goto err_fp;
267 	}
268 	return 0;
269 
270 err_fp:
271 	mlx5_mr_btree_free(&qp->mr_ctrl.cache_bh);
272 err_btree:
273 	for (i = 0; i < nb_sq_config; i++)
274 		regex_ctrl_destroy_sq(qp, i);
275 	regex_ctrl_destroy_cq(&qp->cq);
276 err_cq:
277 	rte_free(qp->sqs);
278 	return ret;
279 }
280