1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2020 Mellanox Technologies, Ltd 3 */ 4 5 #include <errno.h> 6 7 #include <rte_log.h> 8 #include <rte_errno.h> 9 #include <rte_memory.h> 10 #include <rte_malloc.h> 11 #include <rte_regexdev.h> 12 #include <rte_regexdev_core.h> 13 #include <rte_regexdev_driver.h> 14 #include <rte_dev.h> 15 16 #include <mlx5_common.h> 17 #include <mlx5_glue.h> 18 #include <mlx5_devx_cmds.h> 19 #include <mlx5_prm.h> 20 #include <mlx5_common_os.h> 21 #include <mlx5_common_devx.h> 22 23 #include "mlx5_regex.h" 24 #include "mlx5_regex_utils.h" 25 #include "mlx5_rxp_csrs.h" 26 #include "mlx5_rxp.h" 27 28 #define MLX5_REGEX_NUM_WQE_PER_PAGE (4096/64) 29 30 /** 31 * Returns the number of qp obj to be created. 32 * 33 * @param nb_desc 34 * The number of descriptors for the queue. 35 * 36 * @return 37 * The number of obj to be created. 38 */ 39 static uint16_t 40 regex_ctrl_get_nb_obj(uint16_t nb_desc) 41 { 42 return ((nb_desc / MLX5_REGEX_NUM_WQE_PER_PAGE) + 43 !!(nb_desc % MLX5_REGEX_NUM_WQE_PER_PAGE)); 44 } 45 46 /** 47 * destroy CQ. 48 * 49 * @param cp 50 * Pointer to the CQ to be destroyed. 51 * 52 * @return 53 * 0 on success, a negative errno value otherwise and rte_errno is set. 54 */ 55 static int 56 regex_ctrl_destroy_cq(struct mlx5_regex_cq *cq) 57 { 58 mlx5_devx_cq_destroy(&cq->cq_obj); 59 memset(cq, 0, sizeof(*cq)); 60 return 0; 61 } 62 63 /** 64 * create the CQ object. 65 * 66 * @param priv 67 * Pointer to the priv object. 68 * @param cp 69 * Pointer to the CQ to be created. 70 * 71 * @return 72 * 0 on success, a negative errno value otherwise and rte_errno is set. 73 */ 74 static int 75 regex_ctrl_create_cq(struct mlx5_regex_priv *priv, struct mlx5_regex_cq *cq) 76 { 77 struct mlx5_devx_cq_attr attr = { 78 .uar_page_id = priv->uar->page_id, 79 }; 80 int ret; 81 82 cq->ci = 0; 83 ret = mlx5_devx_cq_create(priv->ctx, &cq->cq_obj, cq->log_nb_desc, 84 &attr, SOCKET_ID_ANY); 85 if (ret) { 86 DRV_LOG(ERR, "Can't create CQ object."); 87 memset(cq, 0, sizeof(*cq)); 88 rte_errno = ENOMEM; 89 return -rte_errno; 90 } 91 return 0; 92 } 93 94 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 95 static int 96 regex_get_pdn(void *pd, uint32_t *pdn) 97 { 98 struct mlx5dv_obj obj; 99 struct mlx5dv_pd pd_info; 100 int ret = 0; 101 102 obj.pd.in = pd; 103 obj.pd.out = &pd_info; 104 ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_PD); 105 if (ret) { 106 DRV_LOG(DEBUG, "Fail to get PD object info"); 107 return ret; 108 } 109 *pdn = pd_info.pdn; 110 return 0; 111 } 112 #endif 113 114 /** 115 * Destroy the SQ object. 116 * 117 * @param qp 118 * Pointer to the QP element 119 * @param q_ind 120 * The index of the queue. 121 * 122 * @return 123 * 0 on success, a negative errno value otherwise and rte_errno is set. 124 */ 125 static int 126 regex_ctrl_destroy_sq(struct mlx5_regex_qp *qp, uint16_t q_ind) 127 { 128 struct mlx5_regex_sq *sq = &qp->sqs[q_ind]; 129 130 mlx5_devx_sq_destroy(&sq->sq_obj); 131 memset(sq, 0, sizeof(*sq)); 132 return 0; 133 } 134 135 /** 136 * create the SQ object. 137 * 138 * @param priv 139 * Pointer to the priv object. 140 * @param qp 141 * Pointer to the QP element 142 * @param q_ind 143 * The index of the queue. 144 * @param log_nb_desc 145 * Log 2 of the number of descriptors to be used. 146 * 147 * @return 148 * 0 on success, a negative errno value otherwise and rte_errno is set. 149 */ 150 static int 151 regex_ctrl_create_sq(struct mlx5_regex_priv *priv, struct mlx5_regex_qp *qp, 152 uint16_t q_ind, uint16_t log_nb_desc) 153 { 154 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 155 struct mlx5_devx_create_sq_attr attr = { 156 .user_index = q_ind, 157 .cqn = qp->cq.cq_obj.cq->id, 158 .wq_attr = (struct mlx5_devx_wq_attr){ 159 .uar_page = priv->uar->page_id, 160 }, 161 }; 162 struct mlx5_devx_modify_sq_attr modify_attr = { 163 .state = MLX5_SQC_STATE_RDY, 164 }; 165 struct mlx5_regex_sq *sq = &qp->sqs[q_ind]; 166 uint32_t pd_num = 0; 167 int ret; 168 169 sq->log_nb_desc = log_nb_desc; 170 sq->ci = 0; 171 sq->pi = 0; 172 ret = regex_get_pdn(priv->pd, &pd_num); 173 if (ret) 174 return ret; 175 attr.wq_attr.pd = pd_num; 176 ret = mlx5_devx_sq_create(priv->ctx, &sq->sq_obj, log_nb_desc, &attr, 177 SOCKET_ID_ANY); 178 if (ret) { 179 DRV_LOG(ERR, "Can't create SQ object."); 180 rte_errno = ENOMEM; 181 return -rte_errno; 182 } 183 ret = mlx5_devx_cmd_modify_sq(sq->sq_obj.sq, &modify_attr); 184 if (ret) { 185 DRV_LOG(ERR, "Can't change SQ state to ready."); 186 regex_ctrl_destroy_sq(qp, q_ind); 187 rte_errno = ENOMEM; 188 return -rte_errno; 189 } 190 return 0; 191 #else 192 (void)priv; 193 (void)qp; 194 (void)q_ind; 195 (void)log_nb_desc; 196 DRV_LOG(ERR, "Cannot get pdn - no DV support."); 197 return -ENOTSUP; 198 #endif 199 } 200 201 /** 202 * Setup the qp. 203 * 204 * @param dev 205 * Pointer to RegEx dev structure. 206 * @param qp_ind 207 * The queue index to setup. 208 * @param cfg 209 * The queue requested configuration. 210 * 211 * @return 212 * 0 on success, a negative errno value otherwise and rte_errno is set. 213 */ 214 int 215 mlx5_regex_qp_setup(struct rte_regexdev *dev, uint16_t qp_ind, 216 const struct rte_regexdev_qp_conf *cfg) 217 { 218 struct mlx5_regex_priv *priv = dev->data->dev_private; 219 struct mlx5_regex_qp *qp; 220 int i; 221 int nb_sq_config = 0; 222 int ret; 223 uint16_t log_desc; 224 225 qp = &priv->qps[qp_ind]; 226 qp->flags = cfg->qp_conf_flags; 227 qp->cq.log_nb_desc = rte_log2_u32(cfg->nb_desc); 228 qp->nb_desc = 1 << qp->cq.log_nb_desc; 229 if (qp->flags & RTE_REGEX_QUEUE_PAIR_CFG_OOS_F) 230 qp->nb_obj = regex_ctrl_get_nb_obj(qp->nb_desc); 231 else 232 qp->nb_obj = 1; 233 qp->sqs = rte_malloc(NULL, 234 qp->nb_obj * sizeof(struct mlx5_regex_sq), 64); 235 if (!qp->sqs) { 236 DRV_LOG(ERR, "Can't allocate sq array memory."); 237 rte_errno = ENOMEM; 238 return -rte_errno; 239 } 240 log_desc = rte_log2_u32(qp->nb_desc / qp->nb_obj); 241 ret = regex_ctrl_create_cq(priv, &qp->cq); 242 if (ret) { 243 DRV_LOG(ERR, "Can't create cq."); 244 goto err_cq; 245 } 246 for (i = 0; i < qp->nb_obj; i++) { 247 ret = regex_ctrl_create_sq(priv, qp, i, log_desc); 248 if (ret) { 249 DRV_LOG(ERR, "Can't create sq."); 250 goto err_btree; 251 } 252 nb_sq_config++; 253 } 254 255 ret = mlx5_mr_btree_init(&qp->mr_ctrl.cache_bh, MLX5_MR_BTREE_CACHE_N, 256 rte_socket_id()); 257 if (ret) { 258 DRV_LOG(ERR, "Error setting up mr btree"); 259 goto err_btree; 260 } 261 262 ret = mlx5_regexdev_setup_fastpath(priv, qp_ind); 263 if (ret) { 264 DRV_LOG(ERR, "Error setting up fastpath"); 265 goto err_fp; 266 } 267 return 0; 268 269 err_fp: 270 mlx5_mr_btree_free(&qp->mr_ctrl.cache_bh); 271 err_btree: 272 for (i = 0; i < nb_sq_config; i++) 273 regex_ctrl_destroy_sq(qp, i); 274 regex_ctrl_destroy_cq(&qp->cq); 275 err_cq: 276 rte_free(qp->sqs); 277 return ret; 278 } 279