1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2020 Mellanox Technologies, Ltd 3 */ 4 5 #include <errno.h> 6 7 #include <rte_log.h> 8 #include <rte_errno.h> 9 #include <rte_memory.h> 10 #include <rte_malloc.h> 11 #include <rte_regexdev.h> 12 #include <rte_regexdev_core.h> 13 #include <rte_regexdev_driver.h> 14 #include <rte_dev.h> 15 16 #include <mlx5_common.h> 17 #include <mlx5_glue.h> 18 #include <mlx5_devx_cmds.h> 19 #include <mlx5_prm.h> 20 #include <mlx5_common_os.h> 21 #include <mlx5_common_devx.h> 22 23 #include "mlx5_regex.h" 24 #include "mlx5_regex_utils.h" 25 #include "mlx5_rxp_csrs.h" 26 #include "mlx5_rxp.h" 27 28 #define MLX5_REGEX_NUM_WQE_PER_PAGE (4096/64) 29 30 #define MLX5_REGEX_WQE_LOG_NUM(has_umr, log_desc) \ 31 ((has_umr) ? ((log_desc) + 2) : (log_desc)) 32 33 /** 34 * Returns the number of qp obj to be created. 35 * 36 * @param nb_desc 37 * The number of descriptors for the queue. 38 * 39 * @return 40 * The number of obj to be created. 41 */ 42 static uint16_t 43 regex_ctrl_get_nb_obj(uint16_t nb_desc) 44 { 45 return ((nb_desc / MLX5_REGEX_NUM_WQE_PER_PAGE) + 46 !!(nb_desc % MLX5_REGEX_NUM_WQE_PER_PAGE)); 47 } 48 49 /** 50 * destroy CQ. 51 * 52 * @param cp 53 * Pointer to the CQ to be destroyed. 54 * 55 * @return 56 * 0 on success, a negative errno value otherwise and rte_errno is set. 57 */ 58 static int 59 regex_ctrl_destroy_cq(struct mlx5_regex_cq *cq) 60 { 61 mlx5_devx_cq_destroy(&cq->cq_obj); 62 memset(cq, 0, sizeof(*cq)); 63 return 0; 64 } 65 66 /** 67 * create the CQ object. 68 * 69 * @param priv 70 * Pointer to the priv object. 71 * @param cp 72 * Pointer to the CQ to be created. 73 * 74 * @return 75 * 0 on success, a negative errno value otherwise and rte_errno is set. 76 */ 77 static int 78 regex_ctrl_create_cq(struct mlx5_regex_priv *priv, struct mlx5_regex_cq *cq) 79 { 80 struct mlx5_devx_cq_attr attr = { 81 .uar_page_id = priv->uar->page_id, 82 }; 83 int ret; 84 85 cq->ci = 0; 86 ret = mlx5_devx_cq_create(priv->cdev->ctx, &cq->cq_obj, cq->log_nb_desc, 87 &attr, SOCKET_ID_ANY); 88 if (ret) { 89 DRV_LOG(ERR, "Can't create CQ object."); 90 memset(cq, 0, sizeof(*cq)); 91 rte_errno = ENOMEM; 92 return -rte_errno; 93 } 94 return 0; 95 } 96 97 /** 98 * Destroy the SQ object. 99 * 100 * @param qp 101 * Pointer to the QP element 102 * @param q_ind 103 * The index of the queue. 104 * 105 * @return 106 * 0 on success, a negative errno value otherwise and rte_errno is set. 107 */ 108 static int 109 regex_ctrl_destroy_hw_qp(struct mlx5_regex_qp *qp, uint16_t q_ind) 110 { 111 struct mlx5_regex_hw_qp *qp_obj = &qp->qps[q_ind]; 112 113 mlx5_devx_qp_destroy(&qp_obj->qp_obj); 114 memset(qp, 0, sizeof(*qp)); 115 return 0; 116 } 117 118 /** 119 * create the SQ object. 120 * 121 * @param priv 122 * Pointer to the priv object. 123 * @param qp 124 * Pointer to the QP element 125 * @param q_ind 126 * The index of the queue. 127 * @param log_nb_desc 128 * Log 2 of the number of descriptors to be used. 129 * 130 * @return 131 * 0 on success, a negative errno value otherwise and rte_errno is set. 132 */ 133 static int 134 regex_ctrl_create_hw_qp(struct mlx5_regex_priv *priv, struct mlx5_regex_qp *qp, 135 uint16_t q_ind, uint16_t log_nb_desc) 136 { 137 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 138 struct mlx5_devx_qp_attr attr = { 139 .cqn = qp->cq.cq_obj.cq->id, 140 .uar_index = priv->uar->page_id, 141 .pd = priv->cdev->pdn, 142 .ts_format = mlx5_ts_format_conv(priv->qp_ts_format), 143 .user_index = q_ind, 144 }; 145 struct mlx5_regex_hw_qp *qp_obj = &qp->qps[q_ind]; 146 int ret; 147 148 qp_obj->log_nb_desc = log_nb_desc; 149 qp_obj->qpn = q_ind; 150 qp_obj->ci = 0; 151 qp_obj->pi = 0; 152 attr.rq_size = 0; 153 attr.sq_size = RTE_BIT32(MLX5_REGEX_WQE_LOG_NUM(priv->has_umr, 154 log_nb_desc)); 155 attr.mmo = priv->mmo_regex_qp_cap; 156 ret = mlx5_devx_qp_create(priv->cdev->ctx, &qp_obj->qp_obj, 157 MLX5_REGEX_WQE_LOG_NUM(priv->has_umr, log_nb_desc), 158 &attr, SOCKET_ID_ANY); 159 if (ret) { 160 DRV_LOG(ERR, "Can't create QP object."); 161 rte_errno = ENOMEM; 162 return -rte_errno; 163 } 164 ret = mlx5_devx_qp2rts(&qp_obj->qp_obj, 0); 165 if (ret) { 166 DRV_LOG(ERR, "Can't change QP state to RTS."); 167 regex_ctrl_destroy_hw_qp(qp, q_ind); 168 rte_errno = ENOMEM; 169 return -rte_errno; 170 } 171 return 0; 172 #else 173 (void)priv; 174 (void)qp; 175 (void)q_ind; 176 (void)log_nb_desc; 177 DRV_LOG(ERR, "Cannot get pdn - no DV support."); 178 return -ENOTSUP; 179 #endif 180 } 181 182 /** 183 * Setup the qp. 184 * 185 * @param dev 186 * Pointer to RegEx dev structure. 187 * @param qp_ind 188 * The queue index to setup. 189 * @param cfg 190 * The queue requested configuration. 191 * 192 * @return 193 * 0 on success, a negative errno value otherwise and rte_errno is set. 194 */ 195 int 196 mlx5_regex_qp_setup(struct rte_regexdev *dev, uint16_t qp_ind, 197 const struct rte_regexdev_qp_conf *cfg) 198 { 199 struct mlx5_regex_priv *priv = dev->data->dev_private; 200 struct mlx5_regex_qp *qp; 201 int i; 202 int nb_sq_config = 0; 203 int ret; 204 uint16_t log_desc; 205 206 qp = &priv->qps[qp_ind]; 207 qp->flags = cfg->qp_conf_flags; 208 log_desc = rte_log2_u32(cfg->nb_desc); 209 /* 210 * UMR mode requires two WQEs(UMR and RegEx WQE) for one descriptor. 211 * For CQ, expand the CQE number multiple with 2. 212 * For SQ, the UMR and RegEx WQE for one descriptor consumes 4 WQEBBS, 213 * expand the WQE number multiple with 4. 214 */ 215 qp->cq.log_nb_desc = log_desc + (!!priv->has_umr); 216 qp->nb_desc = 1 << log_desc; 217 if (qp->flags & RTE_REGEX_QUEUE_PAIR_CFG_OOS_F) 218 qp->nb_obj = regex_ctrl_get_nb_obj 219 (1 << MLX5_REGEX_WQE_LOG_NUM(priv->has_umr, log_desc)); 220 else 221 qp->nb_obj = 1; 222 qp->qps = rte_malloc(NULL, 223 qp->nb_obj * sizeof(struct mlx5_regex_hw_qp), 64); 224 if (!qp->qps) { 225 DRV_LOG(ERR, "Can't allocate qp array memory."); 226 rte_errno = ENOMEM; 227 return -rte_errno; 228 } 229 log_desc = rte_log2_u32(qp->nb_desc / qp->nb_obj); 230 ret = regex_ctrl_create_cq(priv, &qp->cq); 231 if (ret) { 232 DRV_LOG(ERR, "Can't create cq."); 233 goto err_cq; 234 } 235 for (i = 0; i < qp->nb_obj; i++) { 236 ret = regex_ctrl_create_hw_qp(priv, qp, i, log_desc); 237 if (ret) { 238 DRV_LOG(ERR, "Can't create qp object."); 239 goto err_btree; 240 } 241 nb_sq_config++; 242 } 243 244 /* Save pointer of global generation number to check memory event. */ 245 qp->mr_ctrl.dev_gen_ptr = &priv->mr_scache.dev_gen; 246 ret = mlx5_mr_btree_init(&qp->mr_ctrl.cache_bh, MLX5_MR_BTREE_CACHE_N, 247 rte_socket_id()); 248 if (ret) { 249 DRV_LOG(ERR, "Error setting up mr btree"); 250 goto err_btree; 251 } 252 253 ret = mlx5_regexdev_setup_fastpath(priv, qp_ind); 254 if (ret) { 255 DRV_LOG(ERR, "Error setting up fastpath"); 256 goto err_fp; 257 } 258 return 0; 259 260 err_fp: 261 mlx5_mr_btree_free(&qp->mr_ctrl.cache_bh); 262 err_btree: 263 for (i = 0; i < nb_sq_config; i++) 264 regex_ctrl_destroy_hw_qp(qp, i); 265 regex_ctrl_destroy_cq(&qp->cq); 266 err_cq: 267 rte_free(qp->qps); 268 return ret; 269 } 270