1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2020 Mellanox Technologies, Ltd 3 */ 4 5 #include <errno.h> 6 7 #include <rte_log.h> 8 #include <rte_errno.h> 9 #include <rte_memory.h> 10 #include <rte_malloc.h> 11 #include <rte_regexdev.h> 12 #include <rte_regexdev_core.h> 13 #include <rte_regexdev_driver.h> 14 #include <rte_dev.h> 15 16 #include <mlx5_common.h> 17 #include <mlx5_glue.h> 18 #include <mlx5_devx_cmds.h> 19 #include <mlx5_prm.h> 20 #include <mlx5_common_os.h> 21 #include <mlx5_common_devx.h> 22 23 #include "mlx5_regex.h" 24 #include "mlx5_regex_utils.h" 25 #include "mlx5_rxp_csrs.h" 26 #include "mlx5_rxp.h" 27 28 #define MLX5_REGEX_NUM_WQE_PER_PAGE (4096/64) 29 30 #define MLX5_REGEX_WQE_LOG_NUM(has_umr, log_desc) \ 31 ((has_umr) ? ((log_desc) + 2) : (log_desc)) 32 33 /** 34 * Returns the number of qp obj to be created. 35 * 36 * @param nb_desc 37 * The number of descriptors for the queue. 38 * 39 * @return 40 * The number of obj to be created. 41 */ 42 static uint16_t 43 regex_ctrl_get_nb_obj(uint16_t nb_desc) 44 { 45 return ((nb_desc / MLX5_REGEX_NUM_WQE_PER_PAGE) + 46 !!(nb_desc % MLX5_REGEX_NUM_WQE_PER_PAGE)); 47 } 48 49 /** 50 * destroy CQ. 51 * 52 * @param cp 53 * Pointer to the CQ to be destroyed. 54 * 55 * @return 56 * 0 on success, a negative errno value otherwise and rte_errno is set. 57 */ 58 static int 59 regex_ctrl_destroy_cq(struct mlx5_regex_cq *cq) 60 { 61 mlx5_devx_cq_destroy(&cq->cq_obj); 62 memset(cq, 0, sizeof(*cq)); 63 return 0; 64 } 65 66 /** 67 * create the CQ object. 68 * 69 * @param priv 70 * Pointer to the priv object. 71 * @param cp 72 * Pointer to the CQ to be created. 73 * 74 * @return 75 * 0 on success, a negative errno value otherwise and rte_errno is set. 76 */ 77 static int 78 regex_ctrl_create_cq(struct mlx5_regex_priv *priv, struct mlx5_regex_cq *cq) 79 { 80 struct mlx5_devx_cq_attr attr = { 81 .uar_page_id = priv->uar->page_id, 82 }; 83 int ret; 84 85 cq->ci = 0; 86 ret = mlx5_devx_cq_create(priv->ctx, &cq->cq_obj, cq->log_nb_desc, 87 &attr, SOCKET_ID_ANY); 88 if (ret) { 89 DRV_LOG(ERR, "Can't create CQ object."); 90 memset(cq, 0, sizeof(*cq)); 91 rte_errno = ENOMEM; 92 return -rte_errno; 93 } 94 return 0; 95 } 96 97 /** 98 * Destroy the SQ object. 99 * 100 * @param qp 101 * Pointer to the QP element 102 * @param q_ind 103 * The index of the queue. 104 * 105 * @return 106 * 0 on success, a negative errno value otherwise and rte_errno is set. 107 */ 108 static int 109 regex_ctrl_destroy_hw_qp(struct mlx5_regex_qp *qp, uint16_t q_ind) 110 { 111 struct mlx5_regex_hw_qp *qp_obj = &qp->qps[q_ind]; 112 113 mlx5_devx_qp_destroy(&qp_obj->qp_obj); 114 memset(qp, 0, sizeof(*qp)); 115 return 0; 116 } 117 118 /** 119 * create the SQ object. 120 * 121 * @param priv 122 * Pointer to the priv object. 123 * @param qp 124 * Pointer to the QP element 125 * @param q_ind 126 * The index of the queue. 127 * @param log_nb_desc 128 * Log 2 of the number of descriptors to be used. 129 * 130 * @return 131 * 0 on success, a negative errno value otherwise and rte_errno is set. 132 */ 133 static int 134 regex_ctrl_create_hw_qp(struct mlx5_regex_priv *priv, struct mlx5_regex_qp *qp, 135 uint16_t q_ind, uint16_t log_nb_desc) 136 { 137 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 138 struct mlx5_devx_qp_attr attr = { 139 .cqn = qp->cq.cq_obj.cq->id, 140 .uar_index = priv->uar->page_id, 141 .ts_format = mlx5_ts_format_conv(priv->qp_ts_format), 142 .user_index = q_ind, 143 }; 144 struct mlx5_regex_hw_qp *qp_obj = &qp->qps[q_ind]; 145 uint32_t pd_num = 0; 146 int ret; 147 148 qp_obj->log_nb_desc = log_nb_desc; 149 qp_obj->qpn = q_ind; 150 qp_obj->ci = 0; 151 qp_obj->pi = 0; 152 ret = regex_get_pdn(priv->pd, &pd_num); 153 if (ret) 154 return ret; 155 attr.pd = pd_num; 156 attr.rq_size = 0; 157 attr.sq_size = RTE_BIT32(MLX5_REGEX_WQE_LOG_NUM(priv->has_umr, 158 log_nb_desc)); 159 attr.mmo = priv->mmo_regex_qp_cap; 160 ret = mlx5_devx_qp_create(priv->ctx, &qp_obj->qp_obj, 161 MLX5_REGEX_WQE_LOG_NUM(priv->has_umr, log_nb_desc), 162 &attr, SOCKET_ID_ANY); 163 if (ret) { 164 DRV_LOG(ERR, "Can't create QP object."); 165 rte_errno = ENOMEM; 166 return -rte_errno; 167 } 168 ret = mlx5_devx_qp2rts(&qp_obj->qp_obj, 0); 169 if (ret) { 170 DRV_LOG(ERR, "Can't change QP state to RTS."); 171 regex_ctrl_destroy_hw_qp(qp, q_ind); 172 rte_errno = ENOMEM; 173 return -rte_errno; 174 } 175 return 0; 176 #else 177 (void)priv; 178 (void)qp; 179 (void)q_ind; 180 (void)log_nb_desc; 181 DRV_LOG(ERR, "Cannot get pdn - no DV support."); 182 return -ENOTSUP; 183 #endif 184 } 185 186 /** 187 * Setup the qp. 188 * 189 * @param dev 190 * Pointer to RegEx dev structure. 191 * @param qp_ind 192 * The queue index to setup. 193 * @param cfg 194 * The queue requested configuration. 195 * 196 * @return 197 * 0 on success, a negative errno value otherwise and rte_errno is set. 198 */ 199 int 200 mlx5_regex_qp_setup(struct rte_regexdev *dev, uint16_t qp_ind, 201 const struct rte_regexdev_qp_conf *cfg) 202 { 203 struct mlx5_regex_priv *priv = dev->data->dev_private; 204 struct mlx5_regex_qp *qp; 205 int i; 206 int nb_sq_config = 0; 207 int ret; 208 uint16_t log_desc; 209 210 qp = &priv->qps[qp_ind]; 211 qp->flags = cfg->qp_conf_flags; 212 log_desc = rte_log2_u32(cfg->nb_desc); 213 /* 214 * UMR mode requires two WQEs(UMR and RegEx WQE) for one descriptor. 215 * For CQ, expand the CQE number multiple with 2. 216 * For SQ, the UMR and RegEx WQE for one descriptor consumes 4 WQEBBS, 217 * expand the WQE number multiple with 4. 218 */ 219 qp->cq.log_nb_desc = log_desc + (!!priv->has_umr); 220 qp->nb_desc = 1 << log_desc; 221 if (qp->flags & RTE_REGEX_QUEUE_PAIR_CFG_OOS_F) 222 qp->nb_obj = regex_ctrl_get_nb_obj 223 (1 << MLX5_REGEX_WQE_LOG_NUM(priv->has_umr, log_desc)); 224 else 225 qp->nb_obj = 1; 226 qp->qps = rte_malloc(NULL, 227 qp->nb_obj * sizeof(struct mlx5_regex_hw_qp), 64); 228 if (!qp->qps) { 229 DRV_LOG(ERR, "Can't allocate qp array memory."); 230 rte_errno = ENOMEM; 231 return -rte_errno; 232 } 233 log_desc = rte_log2_u32(qp->nb_desc / qp->nb_obj); 234 ret = regex_ctrl_create_cq(priv, &qp->cq); 235 if (ret) { 236 DRV_LOG(ERR, "Can't create cq."); 237 goto err_cq; 238 } 239 for (i = 0; i < qp->nb_obj; i++) { 240 ret = regex_ctrl_create_hw_qp(priv, qp, i, log_desc); 241 if (ret) { 242 DRV_LOG(ERR, "Can't create qp object."); 243 goto err_btree; 244 } 245 nb_sq_config++; 246 } 247 248 /* Save pointer of global generation number to check memory event. */ 249 qp->mr_ctrl.dev_gen_ptr = &priv->mr_scache.dev_gen; 250 ret = mlx5_mr_btree_init(&qp->mr_ctrl.cache_bh, MLX5_MR_BTREE_CACHE_N, 251 rte_socket_id()); 252 if (ret) { 253 DRV_LOG(ERR, "Error setting up mr btree"); 254 goto err_btree; 255 } 256 257 ret = mlx5_regexdev_setup_fastpath(priv, qp_ind); 258 if (ret) { 259 DRV_LOG(ERR, "Error setting up fastpath"); 260 goto err_fp; 261 } 262 return 0; 263 264 err_fp: 265 mlx5_mr_btree_free(&qp->mr_ctrl.cache_bh); 266 err_btree: 267 for (i = 0; i < nb_sq_config; i++) 268 regex_ctrl_destroy_hw_qp(qp, i); 269 regex_ctrl_destroy_cq(&qp->cq); 270 err_cq: 271 rte_free(qp->qps); 272 return ret; 273 } 274