1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2020 Mellanox Technologies, Ltd 3 */ 4 5 #include <errno.h> 6 7 #include <rte_log.h> 8 #include <rte_errno.h> 9 #include <rte_malloc.h> 10 #include <rte_regexdev.h> 11 #include <rte_regexdev_core.h> 12 #include <rte_regexdev_driver.h> 13 14 #include <mlx5_common.h> 15 #include <mlx5_glue.h> 16 #include <mlx5_devx_cmds.h> 17 #include <mlx5_prm.h> 18 #include <mlx5_common_os.h> 19 20 #include "mlx5_regex.h" 21 #include "mlx5_regex_utils.h" 22 #include "mlx5_rxp_csrs.h" 23 #include "mlx5_rxp.h" 24 25 #define MLX5_REGEX_NUM_WQE_PER_PAGE (4096/64) 26 27 /** 28 * Returns the number of qp obj to be created. 29 * 30 * @param nb_desc 31 * The number of descriptors for the queue. 32 * 33 * @return 34 * The number of obj to be created. 35 */ 36 static uint16_t 37 regex_ctrl_get_nb_obj(uint16_t nb_desc) 38 { 39 return ((nb_desc / MLX5_REGEX_NUM_WQE_PER_PAGE) + 40 !!(nb_desc % MLX5_REGEX_NUM_WQE_PER_PAGE)); 41 } 42 43 /** 44 * destroy CQ. 45 * 46 * @param priv 47 * Pointer to the priv object. 48 * @param cp 49 * Pointer to the CQ to be destroyed. 50 * 51 * @return 52 * 0 on success, a negative errno value otherwise and rte_errno is set. 53 */ 54 static int 55 regex_ctrl_destroy_cq(struct mlx5_regex_priv *priv, struct mlx5_regex_cq *cq) 56 { 57 if (cq->cqe_umem) { 58 mlx5_glue->devx_umem_dereg(cq->cqe_umem); 59 cq->cqe_umem = NULL; 60 } 61 if (cq->cqe) { 62 rte_free((void *)(uintptr_t)cq->cqe); 63 cq->cqe = NULL; 64 } 65 if (cq->dbr_offset) { 66 mlx5_release_dbr(&priv->dbrpgs, cq->dbr_umem, cq->dbr_offset); 67 cq->dbr_offset = -1; 68 } 69 if (cq->obj) { 70 mlx5_devx_cmd_destroy(cq->obj); 71 cq->obj = NULL; 72 } 73 return 0; 74 } 75 76 /** 77 * create the CQ object. 78 * 79 * @param priv 80 * Pointer to the priv object. 81 * @param cp 82 * Pointer to the CQ to be created. 83 * 84 * @return 85 * 0 on success, a negative errno value otherwise and rte_errno is set. 86 */ 87 static int 88 regex_ctrl_create_cq(struct mlx5_regex_priv *priv, struct mlx5_regex_cq *cq) 89 { 90 struct mlx5_devx_cq_attr attr = { 91 .q_umem_valid = 1, 92 .db_umem_valid = 1, 93 .eqn = priv->eqn, 94 }; 95 struct mlx5_devx_dbr_page *dbr_page = NULL; 96 void *buf = NULL; 97 size_t pgsize = sysconf(_SC_PAGESIZE); 98 uint32_t cq_size = 1 << cq->log_nb_desc; 99 uint32_t i; 100 101 cq->dbr_offset = mlx5_get_dbr(priv->ctx, &priv->dbrpgs, &dbr_page); 102 if (cq->dbr_offset < 0) { 103 DRV_LOG(ERR, "Can't allocate cq door bell record."); 104 rte_errno = ENOMEM; 105 goto error; 106 } 107 cq->dbr_umem = mlx5_os_get_umem_id(dbr_page->umem); 108 cq->dbr = (uint32_t *)((uintptr_t)dbr_page->dbrs + 109 (uintptr_t)cq->dbr_offset); 110 111 buf = rte_calloc(NULL, 1, sizeof(struct mlx5_cqe) * cq_size, 4096); 112 if (!buf) { 113 DRV_LOG(ERR, "Can't allocate cqe buffer."); 114 rte_errno = ENOMEM; 115 goto error; 116 } 117 cq->cqe = buf; 118 for (i = 0; i < cq_size; i++) 119 cq->cqe[i].op_own = 0xff; 120 cq->cqe_umem = mlx5_glue->devx_umem_reg(priv->ctx, buf, 121 sizeof(struct mlx5_cqe) * 122 cq_size, 7); 123 if (!cq->cqe_umem) { 124 DRV_LOG(ERR, "Can't register cqe mem."); 125 rte_errno = ENOMEM; 126 goto error; 127 } 128 attr.db_umem_offset = cq->dbr_offset; 129 attr.db_umem_id = cq->dbr_umem; 130 attr.q_umem_id = mlx5_os_get_umem_id(cq->cqe_umem); 131 attr.log_cq_size = cq->log_nb_desc; 132 attr.uar_page_id = priv->uar->page_id; 133 attr.log_page_size = rte_log2_u32(pgsize); 134 cq->obj = mlx5_devx_cmd_create_cq(priv->ctx, &attr); 135 if (!cq->obj) { 136 DRV_LOG(ERR, "Can't create cq object."); 137 rte_errno = ENOMEM; 138 goto error; 139 } 140 return 0; 141 error: 142 if (cq->cqe_umem) 143 mlx5_glue->devx_umem_dereg(cq->cqe_umem); 144 if (buf) 145 rte_free(buf); 146 if (cq->dbr_offset) 147 mlx5_release_dbr(&priv->dbrpgs, cq->dbr_umem, cq->dbr_offset); 148 return -rte_errno; 149 } 150 151 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 152 static int 153 regex_get_pdn(void *pd, uint32_t *pdn) 154 { 155 struct mlx5dv_obj obj; 156 struct mlx5dv_pd pd_info; 157 int ret = 0; 158 159 obj.pd.in = pd; 160 obj.pd.out = &pd_info; 161 ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_PD); 162 if (ret) { 163 DRV_LOG(DEBUG, "Fail to get PD object info"); 164 return ret; 165 } 166 *pdn = pd_info.pdn; 167 return 0; 168 } 169 #endif 170 171 /** 172 * create the SQ object. 173 * 174 * @param priv 175 * Pointer to the priv object. 176 * @param qp 177 * Pointer to the QP element 178 * @param q_ind 179 * The index of the queue. 180 * @param log_nb_desc 181 * Log 2 of the number of descriptors to be used. 182 * 183 * @return 184 * 0 on success, a negative errno value otherwise and rte_errno is set. 185 */ 186 static int 187 regex_ctrl_create_sq(struct mlx5_regex_priv *priv, struct mlx5_regex_qp *qp, 188 uint16_t q_ind, uint16_t log_nb_desc) 189 { 190 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 191 struct mlx5_devx_create_sq_attr attr = { 0 }; 192 struct mlx5_devx_modify_sq_attr modify_attr = { 0 }; 193 struct mlx5_devx_wq_attr *wq_attr = &attr.wq_attr; 194 struct mlx5_devx_dbr_page *dbr_page = NULL; 195 struct mlx5_regex_sq *sq = &qp->sqs[q_ind]; 196 void *buf = NULL; 197 uint32_t sq_size; 198 uint32_t pd_num = 0; 199 int ret; 200 201 sq->log_nb_desc = log_nb_desc; 202 sq_size = 1 << sq->log_nb_desc; 203 sq->dbr_offset = mlx5_get_dbr(priv->ctx, &priv->dbrpgs, &dbr_page); 204 if (sq->dbr_offset < 0) { 205 DRV_LOG(ERR, "Can't allocate sq door bell record."); 206 rte_errno = ENOMEM; 207 goto error; 208 } 209 sq->dbr_umem = mlx5_os_get_umem_id(dbr_page->umem); 210 sq->dbr = (uint32_t *)((uintptr_t)dbr_page->dbrs + 211 (uintptr_t)sq->dbr_offset); 212 213 buf = rte_calloc(NULL, 1, 64 * sq_size, 4096); 214 if (!buf) { 215 DRV_LOG(ERR, "Can't allocate wqe buffer."); 216 rte_errno = ENOMEM; 217 goto error; 218 } 219 sq->wqe = buf; 220 sq->wqe_umem = mlx5_glue->devx_umem_reg(priv->ctx, buf, 64 * sq_size, 221 7); 222 sq->ci = 0; 223 sq->pi = 0; 224 if (!sq->wqe_umem) { 225 DRV_LOG(ERR, "Can't register wqe mem."); 226 rte_errno = ENOMEM; 227 goto error; 228 } 229 attr.state = MLX5_SQC_STATE_RST; 230 attr.tis_lst_sz = 0; 231 attr.tis_num = 0; 232 attr.user_index = q_ind; 233 attr.cqn = qp->cq.obj->id; 234 wq_attr->uar_page = priv->uar->page_id; 235 regex_get_pdn(priv->pd, &pd_num); 236 wq_attr->pd = pd_num; 237 wq_attr->wq_type = MLX5_WQ_TYPE_CYCLIC; 238 wq_attr->dbr_umem_id = sq->dbr_umem; 239 wq_attr->dbr_addr = sq->dbr_offset; 240 wq_attr->dbr_umem_valid = 1; 241 wq_attr->wq_umem_id = mlx5_os_get_umem_id(sq->wqe_umem); 242 wq_attr->wq_umem_offset = 0; 243 wq_attr->wq_umem_valid = 1; 244 wq_attr->log_wq_stride = 6; 245 wq_attr->log_wq_sz = sq->log_nb_desc; 246 sq->obj = mlx5_devx_cmd_create_sq(priv->ctx, &attr); 247 if (!sq->obj) { 248 DRV_LOG(ERR, "Can't create sq object."); 249 rte_errno = ENOMEM; 250 goto error; 251 } 252 modify_attr.state = MLX5_SQC_STATE_RDY; 253 ret = mlx5_devx_cmd_modify_sq(sq->obj, &modify_attr); 254 if (ret) { 255 DRV_LOG(ERR, "Can't change sq state to ready."); 256 rte_errno = ENOMEM; 257 goto error; 258 } 259 260 return 0; 261 error: 262 if (sq->wqe_umem) 263 mlx5_glue->devx_umem_dereg(sq->wqe_umem); 264 if (buf) 265 rte_free(buf); 266 if (sq->dbr_offset) 267 mlx5_release_dbr(&priv->dbrpgs, sq->dbr_umem, sq->dbr_offset); 268 return -rte_errno; 269 #else 270 (void)priv; 271 (void)qp; 272 (void)q_ind; 273 (void)log_nb_desc; 274 DRV_LOG(ERR, "Cannot get pdn - no DV support."); 275 return -ENOTSUP; 276 #endif 277 } 278 279 /** 280 * Destroy the SQ object. 281 * 282 * @param priv 283 * Pointer to the priv object. 284 * @param qp 285 * Pointer to the QP element 286 * @param q_ind 287 * The index of the queue. 288 * 289 * @return 290 * 0 on success, a negative errno value otherwise and rte_errno is set. 291 */ 292 static int 293 regex_ctrl_destroy_sq(struct mlx5_regex_priv *priv, struct mlx5_regex_qp *qp, 294 uint16_t q_ind) 295 { 296 struct mlx5_regex_sq *sq = &qp->sqs[q_ind]; 297 298 if (sq->wqe_umem) { 299 mlx5_glue->devx_umem_dereg(sq->wqe_umem); 300 sq->wqe_umem = NULL; 301 } 302 if (sq->wqe) { 303 rte_free((void *)(uintptr_t)sq->wqe); 304 sq->wqe = NULL; 305 } 306 if (sq->dbr_offset) { 307 mlx5_release_dbr(&priv->dbrpgs, sq->dbr_umem, sq->dbr_offset); 308 sq->dbr_offset = -1; 309 } 310 if (sq->obj) { 311 mlx5_devx_cmd_destroy(sq->obj); 312 sq->obj = NULL; 313 } 314 return 0; 315 } 316 317 /** 318 * Setup the qp. 319 * 320 * @param dev 321 * Pointer to RegEx dev structure. 322 * @param qp_ind 323 * The queue index to setup. 324 * @param cfg 325 * The queue requested configuration. 326 * 327 * @return 328 * 0 on success, a negative errno value otherwise and rte_errno is set. 329 */ 330 int 331 mlx5_regex_qp_setup(struct rte_regexdev *dev, uint16_t qp_ind, 332 const struct rte_regexdev_qp_conf *cfg) 333 { 334 struct mlx5_regex_priv *priv = dev->data->dev_private; 335 struct mlx5_regex_qp *qp; 336 int i; 337 int ret; 338 uint16_t log_desc; 339 340 qp = &priv->qps[qp_ind]; 341 qp->flags = cfg->qp_conf_flags; 342 qp->cq.log_nb_desc = rte_log2_u32(cfg->nb_desc); 343 qp->nb_desc = 1 << qp->cq.log_nb_desc; 344 if (qp->flags & RTE_REGEX_QUEUE_PAIR_CFG_OOS_F) 345 qp->nb_obj = regex_ctrl_get_nb_obj(qp->nb_desc); 346 else 347 qp->nb_obj = 1; 348 qp->sqs = rte_malloc(NULL, 349 qp->nb_obj * sizeof(struct mlx5_regex_sq), 64); 350 if (!qp->sqs) { 351 DRV_LOG(ERR, "Can't allocate sq array memory."); 352 rte_errno = ENOMEM; 353 return -rte_errno; 354 } 355 log_desc = rte_log2_u32(qp->nb_desc / qp->nb_obj); 356 ret = regex_ctrl_create_cq(priv, &qp->cq); 357 if (ret) { 358 DRV_LOG(ERR, "Can't create cq."); 359 goto error; 360 } 361 for (i = 0; i < qp->nb_obj; i++) { 362 ret = regex_ctrl_create_sq(priv, qp, i, log_desc); 363 if (ret) { 364 DRV_LOG(ERR, "Can't create sq."); 365 goto error; 366 } 367 } 368 369 mlx5_regexdev_setup_fastpath(priv, qp_ind); 370 return 0; 371 372 error: 373 regex_ctrl_destroy_cq(priv, &qp->cq); 374 for (i = 0; i < qp->nb_obj; i++) 375 ret = regex_ctrl_destroy_sq(priv, qp, i); 376 return -rte_errno; 377 378 } 379