1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2020 Mellanox Technologies, Ltd 3 */ 4 5 #include <errno.h> 6 7 #include <rte_log.h> 8 #include <rte_errno.h> 9 #include <rte_malloc.h> 10 #include <rte_regexdev.h> 11 #include <rte_regexdev_core.h> 12 #include <rte_regexdev_driver.h> 13 14 #include <mlx5_common.h> 15 #include <mlx5_glue.h> 16 #include <mlx5_devx_cmds.h> 17 #include <mlx5_prm.h> 18 #include <mlx5_common_os.h> 19 20 #include "mlx5_regex.h" 21 #include "mlx5_regex_utils.h" 22 #include "mlx5_rxp_csrs.h" 23 #include "mlx5_rxp.h" 24 25 #define MLX5_REGEX_NUM_WQE_PER_PAGE (4096/64) 26 27 /** 28 * Returns the number of qp obj to be created. 29 * 30 * @param nb_desc 31 * The number of descriptors for the queue. 32 * 33 * @return 34 * The number of obj to be created. 35 */ 36 static uint16_t 37 regex_ctrl_get_nb_obj(uint16_t nb_desc) 38 { 39 return ((nb_desc / MLX5_REGEX_NUM_WQE_PER_PAGE) + 40 !!(nb_desc % MLX5_REGEX_NUM_WQE_PER_PAGE)); 41 } 42 43 /** 44 * destroy CQ. 45 * 46 * @param priv 47 * Pointer to the priv object. 48 * @param cp 49 * Pointer to the CQ to be destroyed. 50 * 51 * @return 52 * 0 on success, a negative errno value otherwise and rte_errno is set. 53 */ 54 static int 55 regex_ctrl_destroy_cq(struct mlx5_regex_priv *priv, struct mlx5_regex_cq *cq) 56 { 57 if (cq->cqe_umem) { 58 mlx5_glue->devx_umem_dereg(cq->cqe_umem); 59 cq->cqe_umem = NULL; 60 } 61 if (cq->cqe) { 62 rte_free((void *)(uintptr_t)cq->cqe); 63 cq->cqe = NULL; 64 } 65 if (cq->dbr_offset) { 66 mlx5_release_dbr(&priv->dbrpgs, cq->dbr_umem, cq->dbr_offset); 67 cq->dbr_offset = -1; 68 } 69 if (cq->obj) { 70 mlx5_devx_cmd_destroy(cq->obj); 71 cq->obj = NULL; 72 } 73 return 0; 74 } 75 76 /** 77 * create the CQ object. 78 * 79 * @param priv 80 * Pointer to the priv object. 81 * @param cp 82 * Pointer to the CQ to be created. 83 * 84 * @return 85 * 0 on success, a negative errno value otherwise and rte_errno is set. 86 */ 87 static int 88 regex_ctrl_create_cq(struct mlx5_regex_priv *priv, struct mlx5_regex_cq *cq) 89 { 90 struct mlx5_devx_cq_attr attr = { 91 .q_umem_valid = 1, 92 .db_umem_valid = 1, 93 .eqn = priv->eqn, 94 }; 95 struct mlx5_devx_dbr_page *dbr_page = NULL; 96 void *buf = NULL; 97 size_t pgsize = sysconf(_SC_PAGESIZE); 98 uint32_t cq_size = 1 << cq->log_nb_desc; 99 uint32_t i; 100 101 cq->dbr_offset = mlx5_get_dbr(priv->ctx, &priv->dbrpgs, &dbr_page); 102 if (cq->dbr_offset < 0) { 103 DRV_LOG(ERR, "Can't allocate cq door bell record."); 104 rte_errno = ENOMEM; 105 goto error; 106 } 107 cq->dbr_umem = mlx5_os_get_umem_id(dbr_page->umem); 108 cq->dbr = (uint32_t *)((uintptr_t)dbr_page->dbrs + 109 (uintptr_t)cq->dbr_offset); 110 111 buf = rte_calloc(NULL, 1, sizeof(struct mlx5_cqe) * cq_size, 4096); 112 if (!buf) { 113 DRV_LOG(ERR, "Can't allocate cqe buffer."); 114 rte_errno = ENOMEM; 115 goto error; 116 } 117 cq->cqe = buf; 118 for (i = 0; i < cq_size; i++) 119 cq->cqe[i].op_own = 0xff; 120 cq->cqe_umem = mlx5_glue->devx_umem_reg(priv->ctx, buf, 121 sizeof(struct mlx5_cqe) * 122 cq_size, 7); 123 cq->ci = 0; 124 if (!cq->cqe_umem) { 125 DRV_LOG(ERR, "Can't register cqe mem."); 126 rte_errno = ENOMEM; 127 goto error; 128 } 129 attr.db_umem_offset = cq->dbr_offset; 130 attr.db_umem_id = cq->dbr_umem; 131 attr.q_umem_id = mlx5_os_get_umem_id(cq->cqe_umem); 132 attr.log_cq_size = cq->log_nb_desc; 133 attr.uar_page_id = priv->uar->page_id; 134 attr.log_page_size = rte_log2_u32(pgsize); 135 cq->obj = mlx5_devx_cmd_create_cq(priv->ctx, &attr); 136 if (!cq->obj) { 137 DRV_LOG(ERR, "Can't create cq object."); 138 rte_errno = ENOMEM; 139 goto error; 140 } 141 return 0; 142 error: 143 if (cq->cqe_umem) 144 mlx5_glue->devx_umem_dereg(cq->cqe_umem); 145 if (buf) 146 rte_free(buf); 147 if (cq->dbr_offset) 148 mlx5_release_dbr(&priv->dbrpgs, cq->dbr_umem, cq->dbr_offset); 149 return -rte_errno; 150 } 151 152 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 153 static int 154 regex_get_pdn(void *pd, uint32_t *pdn) 155 { 156 struct mlx5dv_obj obj; 157 struct mlx5dv_pd pd_info; 158 int ret = 0; 159 160 obj.pd.in = pd; 161 obj.pd.out = &pd_info; 162 ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_PD); 163 if (ret) { 164 DRV_LOG(DEBUG, "Fail to get PD object info"); 165 return ret; 166 } 167 *pdn = pd_info.pdn; 168 return 0; 169 } 170 #endif 171 172 /** 173 * create the SQ object. 174 * 175 * @param priv 176 * Pointer to the priv object. 177 * @param qp 178 * Pointer to the QP element 179 * @param q_ind 180 * The index of the queue. 181 * @param log_nb_desc 182 * Log 2 of the number of descriptors to be used. 183 * 184 * @return 185 * 0 on success, a negative errno value otherwise and rte_errno is set. 186 */ 187 static int 188 regex_ctrl_create_sq(struct mlx5_regex_priv *priv, struct mlx5_regex_qp *qp, 189 uint16_t q_ind, uint16_t log_nb_desc) 190 { 191 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 192 struct mlx5_devx_create_sq_attr attr = { 0 }; 193 struct mlx5_devx_modify_sq_attr modify_attr = { 0 }; 194 struct mlx5_devx_wq_attr *wq_attr = &attr.wq_attr; 195 struct mlx5_devx_dbr_page *dbr_page = NULL; 196 struct mlx5_regex_sq *sq = &qp->sqs[q_ind]; 197 void *buf = NULL; 198 uint32_t sq_size; 199 uint32_t pd_num = 0; 200 int ret; 201 202 sq->log_nb_desc = log_nb_desc; 203 sq_size = 1 << sq->log_nb_desc; 204 sq->dbr_offset = mlx5_get_dbr(priv->ctx, &priv->dbrpgs, &dbr_page); 205 if (sq->dbr_offset < 0) { 206 DRV_LOG(ERR, "Can't allocate sq door bell record."); 207 rte_errno = ENOMEM; 208 goto error; 209 } 210 sq->dbr_umem = mlx5_os_get_umem_id(dbr_page->umem); 211 sq->dbr = (uint32_t *)((uintptr_t)dbr_page->dbrs + 212 (uintptr_t)sq->dbr_offset); 213 214 buf = rte_calloc(NULL, 1, 64 * sq_size, 4096); 215 if (!buf) { 216 DRV_LOG(ERR, "Can't allocate wqe buffer."); 217 rte_errno = ENOMEM; 218 goto error; 219 } 220 sq->wqe = buf; 221 sq->wqe_umem = mlx5_glue->devx_umem_reg(priv->ctx, buf, 64 * sq_size, 222 7); 223 sq->ci = 0; 224 sq->pi = 0; 225 if (!sq->wqe_umem) { 226 DRV_LOG(ERR, "Can't register wqe mem."); 227 rte_errno = ENOMEM; 228 goto error; 229 } 230 attr.state = MLX5_SQC_STATE_RST; 231 attr.tis_lst_sz = 0; 232 attr.tis_num = 0; 233 attr.user_index = q_ind; 234 attr.cqn = qp->cq.obj->id; 235 wq_attr->uar_page = priv->uar->page_id; 236 regex_get_pdn(priv->pd, &pd_num); 237 wq_attr->pd = pd_num; 238 wq_attr->wq_type = MLX5_WQ_TYPE_CYCLIC; 239 wq_attr->dbr_umem_id = sq->dbr_umem; 240 wq_attr->dbr_addr = sq->dbr_offset; 241 wq_attr->dbr_umem_valid = 1; 242 wq_attr->wq_umem_id = mlx5_os_get_umem_id(sq->wqe_umem); 243 wq_attr->wq_umem_offset = 0; 244 wq_attr->wq_umem_valid = 1; 245 wq_attr->log_wq_stride = 6; 246 wq_attr->log_wq_sz = sq->log_nb_desc; 247 sq->obj = mlx5_devx_cmd_create_sq(priv->ctx, &attr); 248 if (!sq->obj) { 249 DRV_LOG(ERR, "Can't create sq object."); 250 rte_errno = ENOMEM; 251 goto error; 252 } 253 modify_attr.state = MLX5_SQC_STATE_RDY; 254 ret = mlx5_devx_cmd_modify_sq(sq->obj, &modify_attr); 255 if (ret) { 256 DRV_LOG(ERR, "Can't change sq state to ready."); 257 rte_errno = ENOMEM; 258 goto error; 259 } 260 261 return 0; 262 error: 263 if (sq->wqe_umem) 264 mlx5_glue->devx_umem_dereg(sq->wqe_umem); 265 if (buf) 266 rte_free(buf); 267 if (sq->dbr_offset) 268 mlx5_release_dbr(&priv->dbrpgs, sq->dbr_umem, sq->dbr_offset); 269 return -rte_errno; 270 #else 271 (void)priv; 272 (void)qp; 273 (void)q_ind; 274 (void)log_nb_desc; 275 DRV_LOG(ERR, "Cannot get pdn - no DV support."); 276 return -ENOTSUP; 277 #endif 278 } 279 280 /** 281 * Destroy the SQ object. 282 * 283 * @param priv 284 * Pointer to the priv object. 285 * @param qp 286 * Pointer to the QP element 287 * @param q_ind 288 * The index of the queue. 289 * 290 * @return 291 * 0 on success, a negative errno value otherwise and rte_errno is set. 292 */ 293 static int 294 regex_ctrl_destroy_sq(struct mlx5_regex_priv *priv, struct mlx5_regex_qp *qp, 295 uint16_t q_ind) 296 { 297 struct mlx5_regex_sq *sq = &qp->sqs[q_ind]; 298 299 if (sq->wqe_umem) { 300 mlx5_glue->devx_umem_dereg(sq->wqe_umem); 301 sq->wqe_umem = NULL; 302 } 303 if (sq->wqe) { 304 rte_free((void *)(uintptr_t)sq->wqe); 305 sq->wqe = NULL; 306 } 307 if (sq->dbr_offset) { 308 mlx5_release_dbr(&priv->dbrpgs, sq->dbr_umem, sq->dbr_offset); 309 sq->dbr_offset = -1; 310 } 311 if (sq->obj) { 312 mlx5_devx_cmd_destroy(sq->obj); 313 sq->obj = NULL; 314 } 315 return 0; 316 } 317 318 /** 319 * Setup the qp. 320 * 321 * @param dev 322 * Pointer to RegEx dev structure. 323 * @param qp_ind 324 * The queue index to setup. 325 * @param cfg 326 * The queue requested configuration. 327 * 328 * @return 329 * 0 on success, a negative errno value otherwise and rte_errno is set. 330 */ 331 int 332 mlx5_regex_qp_setup(struct rte_regexdev *dev, uint16_t qp_ind, 333 const struct rte_regexdev_qp_conf *cfg) 334 { 335 struct mlx5_regex_priv *priv = dev->data->dev_private; 336 struct mlx5_regex_qp *qp; 337 int i; 338 int ret; 339 uint16_t log_desc; 340 341 qp = &priv->qps[qp_ind]; 342 qp->flags = cfg->qp_conf_flags; 343 qp->cq.log_nb_desc = rte_log2_u32(cfg->nb_desc); 344 qp->nb_desc = 1 << qp->cq.log_nb_desc; 345 if (qp->flags & RTE_REGEX_QUEUE_PAIR_CFG_OOS_F) 346 qp->nb_obj = regex_ctrl_get_nb_obj(qp->nb_desc); 347 else 348 qp->nb_obj = 1; 349 qp->sqs = rte_malloc(NULL, 350 qp->nb_obj * sizeof(struct mlx5_regex_sq), 64); 351 if (!qp->sqs) { 352 DRV_LOG(ERR, "Can't allocate sq array memory."); 353 rte_errno = ENOMEM; 354 return -rte_errno; 355 } 356 log_desc = rte_log2_u32(qp->nb_desc / qp->nb_obj); 357 ret = regex_ctrl_create_cq(priv, &qp->cq); 358 if (ret) { 359 DRV_LOG(ERR, "Can't create cq."); 360 goto error; 361 } 362 for (i = 0; i < qp->nb_obj; i++) { 363 ret = regex_ctrl_create_sq(priv, qp, i, log_desc); 364 if (ret) { 365 DRV_LOG(ERR, "Can't create sq."); 366 goto error; 367 } 368 } 369 370 mlx5_regexdev_setup_fastpath(priv, qp_ind); 371 return 0; 372 373 error: 374 regex_ctrl_destroy_cq(priv, &qp->cq); 375 for (i = 0; i < qp->nb_obj; i++) 376 ret = regex_ctrl_destroy_sq(priv, qp, i); 377 return -rte_errno; 378 379 } 380