xref: /dpdk/drivers/regex/mlx5/mlx5_regex_control.c (revision 3ddf57069bdeaa6090ac5ac44e9e1823dbec56d5)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2020 Mellanox Technologies, Ltd
3  */
4 
5 #include <errno.h>
6 
7 #include <rte_log.h>
8 #include <rte_errno.h>
9 #include <rte_memory.h>
10 #include <rte_malloc.h>
11 #include <rte_regexdev.h>
12 #include <rte_regexdev_core.h>
13 #include <rte_regexdev_driver.h>
14 #include <rte_dev.h>
15 
16 #include <mlx5_common.h>
17 #include <mlx5_glue.h>
18 #include <mlx5_devx_cmds.h>
19 #include <mlx5_prm.h>
20 #include <mlx5_common_os.h>
21 #include <mlx5_common_devx.h>
22 
23 #include "mlx5_regex.h"
24 #include "mlx5_regex_utils.h"
25 #include "mlx5_rxp_csrs.h"
26 #include "mlx5_rxp.h"
27 
28 #define MLX5_REGEX_NUM_WQE_PER_PAGE (4096/64)
29 
30 /**
31  * Returns the number of qp obj to be created.
32  *
33  * @param nb_desc
34  *   The number of descriptors for the queue.
35  *
36  * @return
37  *   The number of obj to be created.
38  */
39 static uint16_t
40 regex_ctrl_get_nb_obj(uint16_t nb_desc)
41 {
42 	return ((nb_desc / MLX5_REGEX_NUM_WQE_PER_PAGE) +
43 		!!(nb_desc % MLX5_REGEX_NUM_WQE_PER_PAGE));
44 }
45 
46 /**
47  * destroy CQ.
48  *
49  * @param cp
50  *   Pointer to the CQ to be destroyed.
51  *
52  * @return
53  *   0 on success, a negative errno value otherwise and rte_errno is set.
54  */
55 static int
56 regex_ctrl_destroy_cq(struct mlx5_regex_cq *cq)
57 {
58 	mlx5_devx_cq_destroy(&cq->cq_obj);
59 	memset(cq, 0, sizeof(*cq));
60 	return 0;
61 }
62 
63 /**
64  * create the CQ object.
65  *
66  * @param priv
67  *   Pointer to the priv object.
68  * @param cp
69  *   Pointer to the CQ to be created.
70  *
71  * @return
72  *   0 on success, a negative errno value otherwise and rte_errno is set.
73  */
74 static int
75 regex_ctrl_create_cq(struct mlx5_regex_priv *priv, struct mlx5_regex_cq *cq)
76 {
77 	struct mlx5_devx_cq_attr attr = {
78 		.uar_page_id = priv->uar->page_id,
79 	};
80 	int ret;
81 
82 	cq->ci = 0;
83 	ret = mlx5_devx_cq_create(priv->ctx, &cq->cq_obj, cq->log_nb_desc,
84 				  &attr, SOCKET_ID_ANY);
85 	if (ret) {
86 		DRV_LOG(ERR, "Can't create CQ object.");
87 		memset(cq, 0, sizeof(*cq));
88 		rte_errno = ENOMEM;
89 		return -rte_errno;
90 	}
91 	return 0;
92 }
93 
94 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
95 static int
96 regex_get_pdn(void *pd, uint32_t *pdn)
97 {
98 	struct mlx5dv_obj obj;
99 	struct mlx5dv_pd pd_info;
100 	int ret = 0;
101 
102 	obj.pd.in = pd;
103 	obj.pd.out = &pd_info;
104 	ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_PD);
105 	if (ret) {
106 		DRV_LOG(DEBUG, "Fail to get PD object info");
107 		return ret;
108 	}
109 	*pdn = pd_info.pdn;
110 	return 0;
111 }
112 #endif
113 
114 /**
115  * create the SQ object.
116  *
117  * @param priv
118  *   Pointer to the priv object.
119  * @param qp
120  *   Pointer to the QP element
121  * @param q_ind
122  *   The index of the queue.
123  * @param log_nb_desc
124  *   Log 2 of the number of descriptors to be used.
125  *
126  * @return
127  *   0 on success, a negative errno value otherwise and rte_errno is set.
128  */
129 static int
130 regex_ctrl_create_sq(struct mlx5_regex_priv *priv, struct mlx5_regex_qp *qp,
131 		     uint16_t q_ind, uint16_t log_nb_desc)
132 {
133 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
134 	struct mlx5_devx_create_sq_attr attr = { 0 };
135 	struct mlx5_devx_modify_sq_attr modify_attr = { 0 };
136 	struct mlx5_devx_wq_attr *wq_attr = &attr.wq_attr;
137 	struct mlx5_devx_dbr_page *dbr_page = NULL;
138 	struct mlx5_regex_sq *sq = &qp->sqs[q_ind];
139 	void *buf = NULL;
140 	uint32_t sq_size;
141 	uint32_t pd_num = 0;
142 	int ret;
143 
144 	sq->log_nb_desc = log_nb_desc;
145 	sq_size = 1 << sq->log_nb_desc;
146 	sq->dbr_offset = mlx5_get_dbr(priv->ctx, &priv->dbrpgs, &dbr_page);
147 	if (sq->dbr_offset < 0) {
148 		DRV_LOG(ERR, "Can't allocate sq door bell record.");
149 		rte_errno  = ENOMEM;
150 		goto error;
151 	}
152 	sq->dbr_umem = mlx5_os_get_umem_id(dbr_page->umem);
153 	sq->dbr = (uint32_t *)((uintptr_t)dbr_page->dbrs +
154 			       (uintptr_t)sq->dbr_offset);
155 
156 	buf = rte_calloc(NULL, 1, 64 * sq_size, 4096);
157 	if (!buf) {
158 		DRV_LOG(ERR, "Can't allocate wqe buffer.");
159 		rte_errno  = ENOMEM;
160 		goto error;
161 	}
162 	sq->wqe = buf;
163 	sq->wqe_umem = mlx5_glue->devx_umem_reg(priv->ctx, buf, 64 * sq_size,
164 						7);
165 	sq->ci = 0;
166 	sq->pi = 0;
167 	if (!sq->wqe_umem) {
168 		DRV_LOG(ERR, "Can't register wqe mem.");
169 		rte_errno  = ENOMEM;
170 		goto error;
171 	}
172 	attr.state = MLX5_SQC_STATE_RST;
173 	attr.tis_lst_sz = 0;
174 	attr.tis_num = 0;
175 	attr.user_index = q_ind;
176 	attr.cqn = qp->cq.cq_obj.cq->id;
177 	wq_attr->uar_page = priv->uar->page_id;
178 	regex_get_pdn(priv->pd, &pd_num);
179 	wq_attr->pd = pd_num;
180 	wq_attr->wq_type = MLX5_WQ_TYPE_CYCLIC;
181 	wq_attr->dbr_umem_id = sq->dbr_umem;
182 	wq_attr->dbr_addr = sq->dbr_offset;
183 	wq_attr->dbr_umem_valid = 1;
184 	wq_attr->wq_umem_id = mlx5_os_get_umem_id(sq->wqe_umem);
185 	wq_attr->wq_umem_offset = 0;
186 	wq_attr->wq_umem_valid = 1;
187 	wq_attr->log_wq_stride = 6;
188 	wq_attr->log_wq_sz = sq->log_nb_desc;
189 	sq->obj = mlx5_devx_cmd_create_sq(priv->ctx, &attr);
190 	if (!sq->obj) {
191 		DRV_LOG(ERR, "Can't create sq object.");
192 		rte_errno  = ENOMEM;
193 		goto error;
194 	}
195 	modify_attr.state = MLX5_SQC_STATE_RDY;
196 	ret = mlx5_devx_cmd_modify_sq(sq->obj, &modify_attr);
197 	if (ret) {
198 		DRV_LOG(ERR, "Can't change sq state to ready.");
199 		rte_errno  = ENOMEM;
200 		goto error;
201 	}
202 
203 	return 0;
204 error:
205 	if (sq->wqe_umem)
206 		mlx5_glue->devx_umem_dereg(sq->wqe_umem);
207 	if (buf)
208 		rte_free(buf);
209 	if (sq->dbr_offset)
210 		mlx5_release_dbr(&priv->dbrpgs, sq->dbr_umem, sq->dbr_offset);
211 	return -rte_errno;
212 #else
213 	(void)priv;
214 	(void)qp;
215 	(void)q_ind;
216 	(void)log_nb_desc;
217 	DRV_LOG(ERR, "Cannot get pdn - no DV support.");
218 	return -ENOTSUP;
219 #endif
220 }
221 
222 /**
223  * Destroy the SQ object.
224  *
225  * @param priv
226  *   Pointer to the priv object.
227  * @param qp
228  *   Pointer to the QP element
229  * @param q_ind
230  *   The index of the queue.
231  *
232  * @return
233  *   0 on success, a negative errno value otherwise and rte_errno is set.
234  */
235 static int
236 regex_ctrl_destroy_sq(struct mlx5_regex_priv *priv, struct mlx5_regex_qp *qp,
237 		      uint16_t q_ind)
238 {
239 	struct mlx5_regex_sq *sq = &qp->sqs[q_ind];
240 
241 	if (sq->wqe_umem) {
242 		mlx5_glue->devx_umem_dereg(sq->wqe_umem);
243 		sq->wqe_umem = NULL;
244 	}
245 	if (sq->wqe) {
246 		rte_free((void *)(uintptr_t)sq->wqe);
247 		sq->wqe = NULL;
248 	}
249 	if (sq->dbr_offset) {
250 		mlx5_release_dbr(&priv->dbrpgs, sq->dbr_umem, sq->dbr_offset);
251 		sq->dbr_offset = -1;
252 	}
253 	if (sq->obj) {
254 		mlx5_devx_cmd_destroy(sq->obj);
255 		sq->obj = NULL;
256 	}
257 	return 0;
258 }
259 
260 /**
261  * Setup the qp.
262  *
263  * @param dev
264  *   Pointer to RegEx dev structure.
265  * @param qp_ind
266  *   The queue index to setup.
267  * @param cfg
268  *   The queue requested configuration.
269  *
270  * @return
271  *   0 on success, a negative errno value otherwise and rte_errno is set.
272  */
273 int
274 mlx5_regex_qp_setup(struct rte_regexdev *dev, uint16_t qp_ind,
275 		    const struct rte_regexdev_qp_conf *cfg)
276 {
277 	struct mlx5_regex_priv *priv = dev->data->dev_private;
278 	struct mlx5_regex_qp *qp;
279 	int i;
280 	int nb_sq_config = 0;
281 	int ret;
282 	uint16_t log_desc;
283 
284 	qp = &priv->qps[qp_ind];
285 	qp->flags = cfg->qp_conf_flags;
286 	qp->cq.log_nb_desc = rte_log2_u32(cfg->nb_desc);
287 	qp->nb_desc = 1 << qp->cq.log_nb_desc;
288 	if (qp->flags & RTE_REGEX_QUEUE_PAIR_CFG_OOS_F)
289 		qp->nb_obj = regex_ctrl_get_nb_obj(qp->nb_desc);
290 	else
291 		qp->nb_obj = 1;
292 	qp->sqs = rte_malloc(NULL,
293 			     qp->nb_obj * sizeof(struct mlx5_regex_sq), 64);
294 	if (!qp->sqs) {
295 		DRV_LOG(ERR, "Can't allocate sq array memory.");
296 		rte_errno = ENOMEM;
297 		return -rte_errno;
298 	}
299 	log_desc = rte_log2_u32(qp->nb_desc / qp->nb_obj);
300 	ret = regex_ctrl_create_cq(priv, &qp->cq);
301 	if (ret) {
302 		DRV_LOG(ERR, "Can't create cq.");
303 		goto err_cq;
304 	}
305 	for (i = 0; i < qp->nb_obj; i++) {
306 		ret = regex_ctrl_create_sq(priv, qp, i, log_desc);
307 		if (ret) {
308 			DRV_LOG(ERR, "Can't create sq.");
309 			goto err_btree;
310 		}
311 		nb_sq_config++;
312 	}
313 
314 	ret = mlx5_mr_btree_init(&qp->mr_ctrl.cache_bh, MLX5_MR_BTREE_CACHE_N,
315 				 rte_socket_id());
316 	if (ret) {
317 		DRV_LOG(ERR, "Error setting up mr btree");
318 		goto err_btree;
319 	}
320 
321 	ret = mlx5_regexdev_setup_fastpath(priv, qp_ind);
322 	if (ret) {
323 		DRV_LOG(ERR, "Error setting up fastpath");
324 		goto err_fp;
325 	}
326 	return 0;
327 
328 err_fp:
329 	mlx5_mr_btree_free(&qp->mr_ctrl.cache_bh);
330 err_btree:
331 	for (i = 0; i < nb_sq_config; i++)
332 		regex_ctrl_destroy_sq(priv, qp, i);
333 	regex_ctrl_destroy_cq(&qp->cq);
334 err_cq:
335 	rte_free(qp->sqs);
336 	return ret;
337 }
338