1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2020 Mellanox Technologies, Ltd
3 */
4
5 #include <errno.h>
6
7 #include <rte_log.h>
8 #include <rte_errno.h>
9 #include <rte_memory.h>
10 #include <rte_malloc.h>
11 #include <rte_regexdev.h>
12 #include <rte_regexdev_core.h>
13 #include <rte_regexdev_driver.h>
14 #include <dev_driver.h>
15
16 #include <mlx5_common.h>
17 #include <mlx5_glue.h>
18 #include <mlx5_devx_cmds.h>
19 #include <mlx5_prm.h>
20 #include <mlx5_common_os.h>
21 #include <mlx5_common_devx.h>
22
23 #include "mlx5_regex.h"
24 #include "mlx5_regex_utils.h"
25 #include "mlx5_rxp.h"
26
27 #define MLX5_REGEX_NUM_WQE_PER_PAGE (4096/64)
28
29 #define MLX5_REGEX_WQE_LOG_NUM(has_umr, log_desc) \
30 ((has_umr) ? ((log_desc) + 2) : (log_desc))
31
32 /**
33 * Returns the number of qp obj to be created.
34 *
35 * @param nb_desc
36 * The number of descriptors for the queue.
37 *
38 * @return
39 * The number of obj to be created.
40 */
41 static uint16_t
regex_ctrl_get_nb_obj(uint16_t nb_desc)42 regex_ctrl_get_nb_obj(uint16_t nb_desc)
43 {
44 return ((nb_desc / MLX5_REGEX_NUM_WQE_PER_PAGE) +
45 !!(nb_desc % MLX5_REGEX_NUM_WQE_PER_PAGE));
46 }
47
48 /**
49 * destroy CQ.
50 *
51 * @param cp
52 * Pointer to the CQ to be destroyed.
53 *
54 * @return
55 * 0 on success, a negative errno value otherwise and rte_errno is set.
56 */
57 static int
regex_ctrl_destroy_cq(struct mlx5_regex_cq * cq)58 regex_ctrl_destroy_cq(struct mlx5_regex_cq *cq)
59 {
60 mlx5_devx_cq_destroy(&cq->cq_obj);
61 memset(cq, 0, sizeof(*cq));
62 return 0;
63 }
64
65 /**
66 * create the CQ object.
67 *
68 * @param priv
69 * Pointer to the priv object.
70 * @param cp
71 * Pointer to the CQ to be created.
72 *
73 * @return
74 * 0 on success, a negative errno value otherwise and rte_errno is set.
75 */
76 static int
regex_ctrl_create_cq(struct mlx5_regex_priv * priv,struct mlx5_regex_cq * cq)77 regex_ctrl_create_cq(struct mlx5_regex_priv *priv, struct mlx5_regex_cq *cq)
78 {
79 struct mlx5_devx_cq_attr attr = {
80 .uar_page_id = mlx5_os_get_devx_uar_page_id(priv->uar.obj),
81 };
82 int ret;
83
84 cq->ci = 0;
85 ret = mlx5_devx_cq_create(priv->cdev->ctx, &cq->cq_obj, cq->log_nb_desc,
86 &attr, SOCKET_ID_ANY);
87 if (ret) {
88 DRV_LOG(ERR, "Can't create CQ object.");
89 memset(cq, 0, sizeof(*cq));
90 rte_errno = ENOMEM;
91 return -rte_errno;
92 }
93 return 0;
94 }
95
96 /**
97 * Destroy the SQ object.
98 *
99 * @param qp
100 * Pointer to the QP element
101 * @param q_ind
102 * The index of the queue.
103 *
104 * @return
105 * 0 on success, a negative errno value otherwise and rte_errno is set.
106 */
107 static int
regex_ctrl_destroy_hw_qp(struct mlx5_regex_qp * qp,uint16_t q_ind)108 regex_ctrl_destroy_hw_qp(struct mlx5_regex_qp *qp, uint16_t q_ind)
109 {
110 struct mlx5_regex_hw_qp *qp_obj = &qp->qps[q_ind];
111
112 mlx5_devx_qp_destroy(&qp_obj->qp_obj);
113 memset(qp, 0, sizeof(*qp));
114 return 0;
115 }
116
117 /**
118 * create the SQ object.
119 *
120 * @param priv
121 * Pointer to the priv object.
122 * @param qp
123 * Pointer to the QP element
124 * @param q_ind
125 * The index of the queue.
126 * @param log_nb_desc
127 * Log 2 of the number of descriptors to be used.
128 *
129 * @return
130 * 0 on success, a negative errno value otherwise and rte_errno is set.
131 */
132 static int
regex_ctrl_create_hw_qp(struct mlx5_regex_priv * priv,struct mlx5_regex_qp * qp,uint16_t q_ind,uint16_t log_nb_desc)133 regex_ctrl_create_hw_qp(struct mlx5_regex_priv *priv, struct mlx5_regex_qp *qp,
134 uint16_t q_ind, uint16_t log_nb_desc)
135 {
136 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
137 struct mlx5_devx_qp_attr attr = {
138 .cqn = qp->cq.cq_obj.cq->id,
139 .uar_index = mlx5_os_get_devx_uar_page_id(priv->uar.obj),
140 .pd = priv->cdev->pdn,
141 .ts_format = mlx5_ts_format_conv
142 (priv->cdev->config.hca_attr.qp_ts_format),
143 .user_index = q_ind,
144 };
145 struct mlx5_regex_hw_qp *qp_obj = &qp->qps[q_ind];
146 int ret;
147
148 qp_obj->log_nb_desc = log_nb_desc;
149 qp_obj->qpn = q_ind;
150 qp_obj->ci = 0;
151 qp_obj->pi = 0;
152 attr.num_of_receive_wqes = 0;
153 attr.num_of_send_wqbbs = RTE_BIT32(MLX5_REGEX_WQE_LOG_NUM(priv->has_umr,
154 log_nb_desc));
155 attr.mmo = priv->mmo_regex_qp_cap;
156 ret = mlx5_devx_qp_create(priv->cdev->ctx, &qp_obj->qp_obj,
157 attr.num_of_send_wqbbs * MLX5_WQE_SIZE, &attr,
158 SOCKET_ID_ANY);
159 if (ret) {
160 DRV_LOG(ERR, "Can't create QP object.");
161 rte_errno = ENOMEM;
162 return -rte_errno;
163 }
164 ret = mlx5_devx_qp2rts(&qp_obj->qp_obj, 0);
165 if (ret) {
166 DRV_LOG(ERR, "Can't change QP state to RTS.");
167 regex_ctrl_destroy_hw_qp(qp, q_ind);
168 rte_errno = ENOMEM;
169 return -rte_errno;
170 }
171 return 0;
172 #else
173 (void)priv;
174 (void)qp;
175 (void)q_ind;
176 (void)log_nb_desc;
177 DRV_LOG(ERR, "Cannot get pdn - no DV support.");
178 return -ENOTSUP;
179 #endif
180 }
181
182 /**
183 * Setup the qp.
184 *
185 * @param dev
186 * Pointer to RegEx dev structure.
187 * @param qp_ind
188 * The queue index to setup.
189 * @param cfg
190 * The queue requested configuration.
191 *
192 * @return
193 * 0 on success, a negative errno value otherwise and rte_errno is set.
194 */
195 int
mlx5_regex_qp_setup(struct rte_regexdev * dev,uint16_t qp_ind,const struct rte_regexdev_qp_conf * cfg)196 mlx5_regex_qp_setup(struct rte_regexdev *dev, uint16_t qp_ind,
197 const struct rte_regexdev_qp_conf *cfg)
198 {
199 struct mlx5_regex_priv *priv = dev->data->dev_private;
200 struct mlx5_regex_qp *qp;
201 int i;
202 int nb_sq_config = 0;
203 int ret;
204 uint16_t log_desc;
205
206 qp = &priv->qps[qp_ind];
207 if (qp->jobs) {
208 DRV_LOG(ERR, "Attempting to setup QP a second time.");
209 rte_errno = EINVAL;
210 return -rte_errno;
211 }
212
213 qp->flags = cfg->qp_conf_flags;
214 log_desc = rte_log2_u32(cfg->nb_desc);
215 /*
216 * UMR mode requires two WQEs(UMR and RegEx WQE) for one descriptor.
217 * For CQ, expand the CQE number multiple with 2.
218 * For SQ, the UMR and RegEx WQE for one descriptor consumes 4 WQEBBS,
219 * expand the WQE number multiple with 4.
220 */
221 qp->cq.log_nb_desc = log_desc + (!!priv->has_umr);
222 qp->nb_desc = 1 << log_desc;
223 if (qp->flags & RTE_REGEX_QUEUE_PAIR_CFG_OOS_F)
224 qp->nb_obj = regex_ctrl_get_nb_obj
225 (1 << MLX5_REGEX_WQE_LOG_NUM(priv->has_umr, log_desc));
226 else
227 qp->nb_obj = 1;
228 qp->qps = rte_malloc(NULL,
229 qp->nb_obj * sizeof(struct mlx5_regex_hw_qp), 64);
230 if (!qp->qps) {
231 DRV_LOG(ERR, "Can't allocate qp array memory.");
232 rte_errno = ENOMEM;
233 return -rte_errno;
234 }
235 log_desc = rte_log2_u32(qp->nb_desc / qp->nb_obj);
236 ret = regex_ctrl_create_cq(priv, &qp->cq);
237 if (ret) {
238 DRV_LOG(ERR, "Can't create cq.");
239 goto err_cq;
240 }
241 for (i = 0; i < qp->nb_obj; i++) {
242 ret = regex_ctrl_create_hw_qp(priv, qp, i, log_desc);
243 if (ret) {
244 DRV_LOG(ERR, "Can't create qp object.");
245 goto err_btree;
246 }
247 nb_sq_config++;
248 }
249
250 ret = mlx5_mr_ctrl_init(&qp->mr_ctrl, &priv->cdev->mr_scache.dev_gen,
251 rte_socket_id());
252 if (ret) {
253 DRV_LOG(ERR, "Error setting up mr btree");
254 goto err_btree;
255 }
256
257 ret = mlx5_regexdev_setup_fastpath(priv, qp_ind);
258 if (ret) {
259 DRV_LOG(ERR, "Error setting up fastpath");
260 goto err_fp;
261 }
262 return 0;
263
264 err_fp:
265 mlx5_mr_btree_free(&qp->mr_ctrl.cache_bh);
266 err_btree:
267 for (i = 0; i < nb_sq_config; i++)
268 regex_ctrl_destroy_hw_qp(qp, i);
269 regex_ctrl_destroy_cq(&qp->cq);
270 err_cq:
271 rte_free(qp->qps);
272 return ret;
273 }
274
275 void
mlx5_regex_clean_ctrl(struct rte_regexdev * dev)276 mlx5_regex_clean_ctrl(struct rte_regexdev *dev)
277 {
278 struct mlx5_regex_priv *priv = dev->data->dev_private;
279 struct mlx5_regex_qp *qp;
280 int qp_ind;
281 int i;
282
283 if (!priv->qps)
284 return;
285 for (qp_ind = 0; qp_ind < priv->nb_queues; qp_ind++) {
286 qp = &priv->qps[qp_ind];
287 /* Check if mlx5_regex_qp_setup() was called for this QP */
288 if (!qp->jobs)
289 continue;
290 mlx5_regexdev_teardown_fastpath(priv, qp_ind);
291 mlx5_mr_btree_free(&qp->mr_ctrl.cache_bh);
292 for (i = 0; i < qp->nb_obj; i++)
293 regex_ctrl_destroy_hw_qp(qp, i);
294 regex_ctrl_destroy_cq(&qp->cq);
295 }
296 }
297