1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2020 Mellanox Technologies, Ltd 3 */ 4 5 #include <errno.h> 6 7 #include <rte_log.h> 8 #include <rte_errno.h> 9 #include <rte_memory.h> 10 #include <rte_malloc.h> 11 #include <rte_regexdev.h> 12 #include <rte_regexdev_core.h> 13 #include <rte_regexdev_driver.h> 14 #include <rte_dev.h> 15 16 #include <mlx5_common.h> 17 #include <mlx5_glue.h> 18 #include <mlx5_devx_cmds.h> 19 #include <mlx5_prm.h> 20 #include <mlx5_common_os.h> 21 #include <mlx5_common_devx.h> 22 23 #include "mlx5_regex.h" 24 #include "mlx5_regex_utils.h" 25 #include "mlx5_rxp_csrs.h" 26 #include "mlx5_rxp.h" 27 28 #define MLX5_REGEX_NUM_WQE_PER_PAGE (4096/64) 29 30 #define MLX5_REGEX_WQE_LOG_NUM(has_umr, log_desc) \ 31 ((has_umr) ? ((log_desc) + 2) : (log_desc)) 32 33 /** 34 * Returns the number of qp obj to be created. 35 * 36 * @param nb_desc 37 * The number of descriptors for the queue. 38 * 39 * @return 40 * The number of obj to be created. 41 */ 42 static uint16_t 43 regex_ctrl_get_nb_obj(uint16_t nb_desc) 44 { 45 return ((nb_desc / MLX5_REGEX_NUM_WQE_PER_PAGE) + 46 !!(nb_desc % MLX5_REGEX_NUM_WQE_PER_PAGE)); 47 } 48 49 /** 50 * destroy CQ. 51 * 52 * @param cp 53 * Pointer to the CQ to be destroyed. 54 * 55 * @return 56 * 0 on success, a negative errno value otherwise and rte_errno is set. 57 */ 58 static int 59 regex_ctrl_destroy_cq(struct mlx5_regex_cq *cq) 60 { 61 mlx5_devx_cq_destroy(&cq->cq_obj); 62 memset(cq, 0, sizeof(*cq)); 63 return 0; 64 } 65 66 /** 67 * create the CQ object. 68 * 69 * @param priv 70 * Pointer to the priv object. 71 * @param cp 72 * Pointer to the CQ to be created. 73 * 74 * @return 75 * 0 on success, a negative errno value otherwise and rte_errno is set. 76 */ 77 static int 78 regex_ctrl_create_cq(struct mlx5_regex_priv *priv, struct mlx5_regex_cq *cq) 79 { 80 struct mlx5_devx_cq_attr attr = { 81 .uar_page_id = priv->uar->page_id, 82 }; 83 int ret; 84 85 cq->ci = 0; 86 ret = mlx5_devx_cq_create(priv->ctx, &cq->cq_obj, cq->log_nb_desc, 87 &attr, SOCKET_ID_ANY); 88 if (ret) { 89 DRV_LOG(ERR, "Can't create CQ object."); 90 memset(cq, 0, sizeof(*cq)); 91 rte_errno = ENOMEM; 92 return -rte_errno; 93 } 94 return 0; 95 } 96 97 /** 98 * Destroy the SQ object. 99 * 100 * @param qp 101 * Pointer to the QP element 102 * @param q_ind 103 * The index of the queue. 104 * 105 * @return 106 * 0 on success, a negative errno value otherwise and rte_errno is set. 107 */ 108 static int 109 regex_ctrl_destroy_sq(struct mlx5_regex_qp *qp, uint16_t q_ind) 110 { 111 struct mlx5_regex_sq *sq = &qp->sqs[q_ind]; 112 113 mlx5_devx_sq_destroy(&sq->sq_obj); 114 memset(sq, 0, sizeof(*sq)); 115 return 0; 116 } 117 118 /** 119 * create the SQ object. 120 * 121 * @param priv 122 * Pointer to the priv object. 123 * @param qp 124 * Pointer to the QP element 125 * @param q_ind 126 * The index of the queue. 127 * @param log_nb_desc 128 * Log 2 of the number of descriptors to be used. 129 * 130 * @return 131 * 0 on success, a negative errno value otherwise and rte_errno is set. 132 */ 133 static int 134 regex_ctrl_create_sq(struct mlx5_regex_priv *priv, struct mlx5_regex_qp *qp, 135 uint16_t q_ind, uint16_t log_nb_desc) 136 { 137 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 138 struct mlx5_devx_create_sq_attr attr = { 139 .user_index = q_ind, 140 .cqn = qp->cq.cq_obj.cq->id, 141 .wq_attr = (struct mlx5_devx_wq_attr){ 142 .uar_page = priv->uar->page_id, 143 }, 144 .ts_format = mlx5_ts_format_conv(priv->sq_ts_format), 145 }; 146 struct mlx5_devx_modify_sq_attr modify_attr = { 147 .state = MLX5_SQC_STATE_RDY, 148 }; 149 struct mlx5_regex_sq *sq = &qp->sqs[q_ind]; 150 uint32_t pd_num = 0; 151 int ret; 152 153 sq->log_nb_desc = log_nb_desc; 154 sq->sqn = q_ind; 155 sq->ci = 0; 156 sq->pi = 0; 157 ret = regex_get_pdn(priv->pd, &pd_num); 158 if (ret) 159 return ret; 160 attr.wq_attr.pd = pd_num; 161 ret = mlx5_devx_sq_create(priv->ctx, &sq->sq_obj, 162 MLX5_REGEX_WQE_LOG_NUM(priv->has_umr, log_nb_desc), 163 &attr, SOCKET_ID_ANY); 164 if (ret) { 165 DRV_LOG(ERR, "Can't create SQ object."); 166 rte_errno = ENOMEM; 167 return -rte_errno; 168 } 169 ret = mlx5_devx_cmd_modify_sq(sq->sq_obj.sq, &modify_attr); 170 if (ret) { 171 DRV_LOG(ERR, "Can't change SQ state to ready."); 172 regex_ctrl_destroy_sq(qp, q_ind); 173 rte_errno = ENOMEM; 174 return -rte_errno; 175 } 176 return 0; 177 #else 178 (void)priv; 179 (void)qp; 180 (void)q_ind; 181 (void)log_nb_desc; 182 DRV_LOG(ERR, "Cannot get pdn - no DV support."); 183 return -ENOTSUP; 184 #endif 185 } 186 187 /** 188 * Setup the qp. 189 * 190 * @param dev 191 * Pointer to RegEx dev structure. 192 * @param qp_ind 193 * The queue index to setup. 194 * @param cfg 195 * The queue requested configuration. 196 * 197 * @return 198 * 0 on success, a negative errno value otherwise and rte_errno is set. 199 */ 200 int 201 mlx5_regex_qp_setup(struct rte_regexdev *dev, uint16_t qp_ind, 202 const struct rte_regexdev_qp_conf *cfg) 203 { 204 struct mlx5_regex_priv *priv = dev->data->dev_private; 205 struct mlx5_regex_qp *qp; 206 int i; 207 int nb_sq_config = 0; 208 int ret; 209 uint16_t log_desc; 210 211 qp = &priv->qps[qp_ind]; 212 qp->flags = cfg->qp_conf_flags; 213 log_desc = rte_log2_u32(cfg->nb_desc); 214 /* 215 * UMR mode requires two WQEs(UMR and RegEx WQE) for one descriptor. 216 * For CQ, expand the CQE number multiple with 2. 217 * For SQ, the UMR and RegEx WQE for one descriptor consumes 4 WQEBBS, 218 * expand the WQE number multiple with 4. 219 */ 220 qp->cq.log_nb_desc = log_desc + (!!priv->has_umr); 221 qp->nb_desc = 1 << log_desc; 222 if (qp->flags & RTE_REGEX_QUEUE_PAIR_CFG_OOS_F) 223 qp->nb_obj = regex_ctrl_get_nb_obj 224 (1 << MLX5_REGEX_WQE_LOG_NUM(priv->has_umr, log_desc)); 225 else 226 qp->nb_obj = 1; 227 qp->sqs = rte_malloc(NULL, 228 qp->nb_obj * sizeof(struct mlx5_regex_sq), 64); 229 if (!qp->sqs) { 230 DRV_LOG(ERR, "Can't allocate sq array memory."); 231 rte_errno = ENOMEM; 232 return -rte_errno; 233 } 234 log_desc = rte_log2_u32(qp->nb_desc / qp->nb_obj); 235 ret = regex_ctrl_create_cq(priv, &qp->cq); 236 if (ret) { 237 DRV_LOG(ERR, "Can't create cq."); 238 goto err_cq; 239 } 240 for (i = 0; i < qp->nb_obj; i++) { 241 ret = regex_ctrl_create_sq(priv, qp, i, log_desc); 242 if (ret) { 243 DRV_LOG(ERR, "Can't create sq."); 244 goto err_btree; 245 } 246 nb_sq_config++; 247 } 248 249 /* Save pointer of global generation number to check memory event. */ 250 qp->mr_ctrl.dev_gen_ptr = &priv->mr_scache.dev_gen; 251 ret = mlx5_mr_btree_init(&qp->mr_ctrl.cache_bh, MLX5_MR_BTREE_CACHE_N, 252 rte_socket_id()); 253 if (ret) { 254 DRV_LOG(ERR, "Error setting up mr btree"); 255 goto err_btree; 256 } 257 258 ret = mlx5_regexdev_setup_fastpath(priv, qp_ind); 259 if (ret) { 260 DRV_LOG(ERR, "Error setting up fastpath"); 261 goto err_fp; 262 } 263 return 0; 264 265 err_fp: 266 mlx5_mr_btree_free(&qp->mr_ctrl.cache_bh); 267 err_btree: 268 for (i = 0; i < nb_sq_config; i++) 269 regex_ctrl_destroy_sq(qp, i); 270 regex_ctrl_destroy_cq(&qp->cq); 271 err_cq: 272 rte_free(qp->sqs); 273 return ret; 274 } 275