xref: /dpdk/drivers/regex/mlx5/mlx5_regex_control.c (revision 56bb5841fd0608989101d933f091852a3126b4fe)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2020 Mellanox Technologies, Ltd
3  */
4 
5 #include <errno.h>
6 
7 #include <rte_log.h>
8 #include <rte_errno.h>
9 #include <rte_malloc.h>
10 #include <rte_regexdev.h>
11 #include <rte_regexdev_core.h>
12 #include <rte_regexdev_driver.h>
13 #include <rte_dev.h>
14 
15 #include <mlx5_common.h>
16 #include <mlx5_glue.h>
17 #include <mlx5_devx_cmds.h>
18 #include <mlx5_prm.h>
19 #include <mlx5_common_os.h>
20 
21 #include "mlx5_regex.h"
22 #include "mlx5_regex_utils.h"
23 #include "mlx5_rxp_csrs.h"
24 #include "mlx5_rxp.h"
25 
26 #define MLX5_REGEX_NUM_WQE_PER_PAGE (4096/64)
27 
28 /**
29  * Returns the number of qp obj to be created.
30  *
31  * @param nb_desc
32  *   The number of descriptors for the queue.
33  *
34  * @return
35  *   The number of obj to be created.
36  */
37 static uint16_t
38 regex_ctrl_get_nb_obj(uint16_t nb_desc)
39 {
40 	return ((nb_desc / MLX5_REGEX_NUM_WQE_PER_PAGE) +
41 		!!(nb_desc % MLX5_REGEX_NUM_WQE_PER_PAGE));
42 }
43 
44 /**
45  * destroy CQ.
46  *
47  * @param priv
48  *   Pointer to the priv object.
49  * @param cp
50  *   Pointer to the CQ to be destroyed.
51  *
52  * @return
53  *   0 on success, a negative errno value otherwise and rte_errno is set.
54  */
55 static int
56 regex_ctrl_destroy_cq(struct mlx5_regex_priv *priv, struct mlx5_regex_cq *cq)
57 {
58 	if (cq->cqe_umem) {
59 		mlx5_glue->devx_umem_dereg(cq->cqe_umem);
60 		cq->cqe_umem = NULL;
61 	}
62 	if (cq->cqe) {
63 		rte_free((void *)(uintptr_t)cq->cqe);
64 		cq->cqe = NULL;
65 	}
66 	if (cq->dbr_offset) {
67 		mlx5_release_dbr(&priv->dbrpgs, cq->dbr_umem, cq->dbr_offset);
68 		cq->dbr_offset = -1;
69 	}
70 	if (cq->obj) {
71 		mlx5_devx_cmd_destroy(cq->obj);
72 		cq->obj = NULL;
73 	}
74 	return 0;
75 }
76 
77 /**
78  * create the CQ object.
79  *
80  * @param priv
81  *   Pointer to the priv object.
82  * @param cp
83  *   Pointer to the CQ to be created.
84  *
85  * @return
86  *   0 on success, a negative errno value otherwise and rte_errno is set.
87  */
88 static int
89 regex_ctrl_create_cq(struct mlx5_regex_priv *priv, struct mlx5_regex_cq *cq)
90 {
91 	struct mlx5_devx_cq_attr attr = {
92 		.q_umem_valid = 1,
93 		.db_umem_valid = 1,
94 		.eqn = priv->eqn,
95 	};
96 	struct mlx5_devx_dbr_page *dbr_page = NULL;
97 	void *buf = NULL;
98 	size_t pgsize = sysconf(_SC_PAGESIZE);
99 	uint32_t cq_size = 1 << cq->log_nb_desc;
100 	uint32_t i;
101 
102 	cq->dbr_offset = mlx5_get_dbr(priv->ctx, &priv->dbrpgs, &dbr_page);
103 	if (cq->dbr_offset < 0) {
104 		DRV_LOG(ERR, "Can't allocate cq door bell record.");
105 		rte_errno  = ENOMEM;
106 		goto error;
107 	}
108 	cq->dbr_umem = mlx5_os_get_umem_id(dbr_page->umem);
109 	cq->dbr = (uint32_t *)((uintptr_t)dbr_page->dbrs +
110 			       (uintptr_t)cq->dbr_offset);
111 
112 	buf = rte_calloc(NULL, 1, sizeof(struct mlx5_cqe) * cq_size, 4096);
113 	if (!buf) {
114 		DRV_LOG(ERR, "Can't allocate cqe buffer.");
115 		rte_errno  = ENOMEM;
116 		goto error;
117 	}
118 	cq->cqe = buf;
119 	for (i = 0; i < cq_size; i++)
120 		cq->cqe[i].op_own = 0xff;
121 	cq->cqe_umem = mlx5_glue->devx_umem_reg(priv->ctx, buf,
122 						sizeof(struct mlx5_cqe) *
123 						cq_size, 7);
124 	cq->ci = 0;
125 	if (!cq->cqe_umem) {
126 		DRV_LOG(ERR, "Can't register cqe mem.");
127 		rte_errno  = ENOMEM;
128 		goto error;
129 	}
130 	attr.db_umem_offset = cq->dbr_offset;
131 	attr.db_umem_id = cq->dbr_umem;
132 	attr.q_umem_id = mlx5_os_get_umem_id(cq->cqe_umem);
133 	attr.log_cq_size = cq->log_nb_desc;
134 	attr.uar_page_id = priv->uar->page_id;
135 	attr.log_page_size = rte_log2_u32(pgsize);
136 	cq->obj = mlx5_devx_cmd_create_cq(priv->ctx, &attr);
137 	if (!cq->obj) {
138 		DRV_LOG(ERR, "Can't create cq object.");
139 		rte_errno  = ENOMEM;
140 		goto error;
141 	}
142 	return 0;
143 error:
144 	if (cq->cqe_umem)
145 		mlx5_glue->devx_umem_dereg(cq->cqe_umem);
146 	if (buf)
147 		rte_free(buf);
148 	if (cq->dbr_offset)
149 		mlx5_release_dbr(&priv->dbrpgs, cq->dbr_umem, cq->dbr_offset);
150 	return -rte_errno;
151 }
152 
153 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
154 static int
155 regex_get_pdn(void *pd, uint32_t *pdn)
156 {
157 	struct mlx5dv_obj obj;
158 	struct mlx5dv_pd pd_info;
159 	int ret = 0;
160 
161 	obj.pd.in = pd;
162 	obj.pd.out = &pd_info;
163 	ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_PD);
164 	if (ret) {
165 		DRV_LOG(DEBUG, "Fail to get PD object info");
166 		return ret;
167 	}
168 	*pdn = pd_info.pdn;
169 	return 0;
170 }
171 #endif
172 
173 /**
174  * create the SQ object.
175  *
176  * @param priv
177  *   Pointer to the priv object.
178  * @param qp
179  *   Pointer to the QP element
180  * @param q_ind
181  *   The index of the queue.
182  * @param log_nb_desc
183  *   Log 2 of the number of descriptors to be used.
184  *
185  * @return
186  *   0 on success, a negative errno value otherwise and rte_errno is set.
187  */
188 static int
189 regex_ctrl_create_sq(struct mlx5_regex_priv *priv, struct mlx5_regex_qp *qp,
190 		     uint16_t q_ind, uint16_t log_nb_desc)
191 {
192 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
193 	struct mlx5_devx_create_sq_attr attr = { 0 };
194 	struct mlx5_devx_modify_sq_attr modify_attr = { 0 };
195 	struct mlx5_devx_wq_attr *wq_attr = &attr.wq_attr;
196 	struct mlx5_devx_dbr_page *dbr_page = NULL;
197 	struct mlx5_regex_sq *sq = &qp->sqs[q_ind];
198 	void *buf = NULL;
199 	uint32_t sq_size;
200 	uint32_t pd_num = 0;
201 	int ret;
202 
203 	sq->log_nb_desc = log_nb_desc;
204 	sq_size = 1 << sq->log_nb_desc;
205 	sq->dbr_offset = mlx5_get_dbr(priv->ctx, &priv->dbrpgs, &dbr_page);
206 	if (sq->dbr_offset < 0) {
207 		DRV_LOG(ERR, "Can't allocate sq door bell record.");
208 		rte_errno  = ENOMEM;
209 		goto error;
210 	}
211 	sq->dbr_umem = mlx5_os_get_umem_id(dbr_page->umem);
212 	sq->dbr = (uint32_t *)((uintptr_t)dbr_page->dbrs +
213 			       (uintptr_t)sq->dbr_offset);
214 
215 	buf = rte_calloc(NULL, 1, 64 * sq_size, 4096);
216 	if (!buf) {
217 		DRV_LOG(ERR, "Can't allocate wqe buffer.");
218 		rte_errno  = ENOMEM;
219 		goto error;
220 	}
221 	sq->wqe = buf;
222 	sq->wqe_umem = mlx5_glue->devx_umem_reg(priv->ctx, buf, 64 * sq_size,
223 						7);
224 	sq->ci = 0;
225 	sq->pi = 0;
226 	if (!sq->wqe_umem) {
227 		DRV_LOG(ERR, "Can't register wqe mem.");
228 		rte_errno  = ENOMEM;
229 		goto error;
230 	}
231 	attr.state = MLX5_SQC_STATE_RST;
232 	attr.tis_lst_sz = 0;
233 	attr.tis_num = 0;
234 	attr.user_index = q_ind;
235 	attr.cqn = qp->cq.obj->id;
236 	wq_attr->uar_page = priv->uar->page_id;
237 	regex_get_pdn(priv->pd, &pd_num);
238 	wq_attr->pd = pd_num;
239 	wq_attr->wq_type = MLX5_WQ_TYPE_CYCLIC;
240 	wq_attr->dbr_umem_id = sq->dbr_umem;
241 	wq_attr->dbr_addr = sq->dbr_offset;
242 	wq_attr->dbr_umem_valid = 1;
243 	wq_attr->wq_umem_id = mlx5_os_get_umem_id(sq->wqe_umem);
244 	wq_attr->wq_umem_offset = 0;
245 	wq_attr->wq_umem_valid = 1;
246 	wq_attr->log_wq_stride = 6;
247 	wq_attr->log_wq_sz = sq->log_nb_desc;
248 	sq->obj = mlx5_devx_cmd_create_sq(priv->ctx, &attr);
249 	if (!sq->obj) {
250 		DRV_LOG(ERR, "Can't create sq object.");
251 		rte_errno  = ENOMEM;
252 		goto error;
253 	}
254 	modify_attr.state = MLX5_SQC_STATE_RDY;
255 	ret = mlx5_devx_cmd_modify_sq(sq->obj, &modify_attr);
256 	if (ret) {
257 		DRV_LOG(ERR, "Can't change sq state to ready.");
258 		rte_errno  = ENOMEM;
259 		goto error;
260 	}
261 
262 	return 0;
263 error:
264 	if (sq->wqe_umem)
265 		mlx5_glue->devx_umem_dereg(sq->wqe_umem);
266 	if (buf)
267 		rte_free(buf);
268 	if (sq->dbr_offset)
269 		mlx5_release_dbr(&priv->dbrpgs, sq->dbr_umem, sq->dbr_offset);
270 	return -rte_errno;
271 #else
272 	(void)priv;
273 	(void)qp;
274 	(void)q_ind;
275 	(void)log_nb_desc;
276 	DRV_LOG(ERR, "Cannot get pdn - no DV support.");
277 	return -ENOTSUP;
278 #endif
279 }
280 
281 /**
282  * Destroy the SQ object.
283  *
284  * @param priv
285  *   Pointer to the priv object.
286  * @param qp
287  *   Pointer to the QP element
288  * @param q_ind
289  *   The index of the queue.
290  *
291  * @return
292  *   0 on success, a negative errno value otherwise and rte_errno is set.
293  */
294 static int
295 regex_ctrl_destroy_sq(struct mlx5_regex_priv *priv, struct mlx5_regex_qp *qp,
296 		      uint16_t q_ind)
297 {
298 	struct mlx5_regex_sq *sq = &qp->sqs[q_ind];
299 
300 	if (sq->wqe_umem) {
301 		mlx5_glue->devx_umem_dereg(sq->wqe_umem);
302 		sq->wqe_umem = NULL;
303 	}
304 	if (sq->wqe) {
305 		rte_free((void *)(uintptr_t)sq->wqe);
306 		sq->wqe = NULL;
307 	}
308 	if (sq->dbr_offset) {
309 		mlx5_release_dbr(&priv->dbrpgs, sq->dbr_umem, sq->dbr_offset);
310 		sq->dbr_offset = -1;
311 	}
312 	if (sq->obj) {
313 		mlx5_devx_cmd_destroy(sq->obj);
314 		sq->obj = NULL;
315 	}
316 	return 0;
317 }
318 
319 /**
320  * Setup the qp.
321  *
322  * @param dev
323  *   Pointer to RegEx dev structure.
324  * @param qp_ind
325  *   The queue index to setup.
326  * @param cfg
327  *   The queue requested configuration.
328  *
329  * @return
330  *   0 on success, a negative errno value otherwise and rte_errno is set.
331  */
332 int
333 mlx5_regex_qp_setup(struct rte_regexdev *dev, uint16_t qp_ind,
334 		    const struct rte_regexdev_qp_conf *cfg)
335 {
336 	struct mlx5_regex_priv *priv = dev->data->dev_private;
337 	struct mlx5_regex_qp *qp;
338 	int i;
339 	int ret;
340 	uint16_t log_desc;
341 
342 	qp = &priv->qps[qp_ind];
343 	qp->flags = cfg->qp_conf_flags;
344 	qp->cq.log_nb_desc = rte_log2_u32(cfg->nb_desc);
345 	qp->nb_desc = 1 << qp->cq.log_nb_desc;
346 	if (qp->flags & RTE_REGEX_QUEUE_PAIR_CFG_OOS_F)
347 		qp->nb_obj = regex_ctrl_get_nb_obj(qp->nb_desc);
348 	else
349 		qp->nb_obj = 1;
350 	qp->sqs = rte_malloc(NULL,
351 			     qp->nb_obj * sizeof(struct mlx5_regex_sq), 64);
352 	if (!qp->sqs) {
353 		DRV_LOG(ERR, "Can't allocate sq array memory.");
354 		rte_errno = ENOMEM;
355 		return -rte_errno;
356 	}
357 	log_desc = rte_log2_u32(qp->nb_desc / qp->nb_obj);
358 	ret = regex_ctrl_create_cq(priv, &qp->cq);
359 	if (ret) {
360 		DRV_LOG(ERR, "Can't create cq.");
361 		goto err_cq;
362 	}
363 	for (i = 0; i < qp->nb_obj; i++) {
364 		ret = regex_ctrl_create_sq(priv, qp, i, log_desc);
365 		if (ret) {
366 			DRV_LOG(ERR, "Can't create sq.");
367 			goto err_sq;
368 		}
369 	}
370 
371 	ret = mlx5_mr_btree_init(&qp->mr_ctrl.cache_bh, MLX5_MR_BTREE_CACHE_N,
372 				 rte_socket_id());
373 	if (ret) {
374 		DRV_LOG(ERR, "Error setting up mr btree");
375 		goto err_btree;
376 	}
377 
378 	ret = mlx5_regexdev_setup_fastpath(priv, qp_ind);
379 	if (ret) {
380 		DRV_LOG(ERR, "Error setting up fastpath");
381 		goto err_fp;
382 	}
383 	return 0;
384 
385 err_fp:
386 	mlx5_mr_btree_free(&qp->mr_ctrl.cache_bh);
387 err_btree:
388 	for (i = 0; i < qp->nb_obj; i++)
389 		regex_ctrl_destroy_sq(priv, qp, i);
390 err_sq:
391 	regex_ctrl_destroy_cq(priv, &qp->cq);
392 err_cq:
393 	rte_free(qp->sqs);
394 	return ret;
395 }
396