1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2021 Intel Corporation 3 */ 4 5 #include <string.h> 6 #include <unistd.h> 7 8 #include <rte_common.h> 9 #include <rte_malloc.h> 10 11 #include "ipsec_mb_private.h" 12 13 #define IMB_MP_REQ_VER_STR "1.1.0" 14 15 /** Configure device */ 16 int 17 ipsec_mb_config(__rte_unused struct rte_cryptodev *dev, 18 __rte_unused struct rte_cryptodev_config *config) 19 { 20 return 0; 21 } 22 23 /** Start device */ 24 int 25 ipsec_mb_start(__rte_unused struct rte_cryptodev *dev) 26 { 27 return 0; 28 } 29 30 /** Stop device */ 31 void 32 ipsec_mb_stop(__rte_unused struct rte_cryptodev *dev) 33 { 34 } 35 36 /** Close device */ 37 int 38 ipsec_mb_close(__rte_unused struct rte_cryptodev *dev) 39 { 40 return 0; 41 } 42 43 /** Get device statistics */ 44 void 45 ipsec_mb_stats_get(struct rte_cryptodev *dev, 46 struct rte_cryptodev_stats *stats) 47 { 48 int qp_id; 49 50 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { 51 struct ipsec_mb_qp *qp = dev->data->queue_pairs[qp_id]; 52 if (qp == NULL) { 53 IPSEC_MB_LOG(DEBUG, "Uninitialised qp %d", qp_id); 54 continue; 55 } 56 57 stats->enqueued_count += qp->stats.enqueued_count; 58 stats->dequeued_count += qp->stats.dequeued_count; 59 60 stats->enqueue_err_count += qp->stats.enqueue_err_count; 61 stats->dequeue_err_count += qp->stats.dequeue_err_count; 62 } 63 } 64 65 /** Reset device statistics */ 66 void 67 ipsec_mb_stats_reset(struct rte_cryptodev *dev) 68 { 69 int qp_id; 70 71 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { 72 struct ipsec_mb_qp *qp = dev->data->queue_pairs[qp_id]; 73 74 memset(&qp->stats, 0, sizeof(qp->stats)); 75 } 76 } 77 78 /** Get device info */ 79 void 80 ipsec_mb_info_get(struct rte_cryptodev *dev, 81 struct rte_cryptodev_info *dev_info) 82 { 83 struct ipsec_mb_dev_private *internals = dev->data->dev_private; 84 struct ipsec_mb_internals *pmd_info = 85 &ipsec_mb_pmds[internals->pmd_type]; 86 87 if (dev_info != NULL) { 88 dev_info->driver_id = dev->driver_id; 89 dev_info->feature_flags = dev->feature_flags; 90 dev_info->capabilities = pmd_info->caps; 91 dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs; 92 /* No limit of number of sessions */ 93 dev_info->sym.max_nb_sessions = 0; 94 } 95 } 96 97 static int 98 ipsec_mb_secondary_qp_op(int dev_id, int qp_id, 99 const struct rte_cryptodev_qp_conf *qp_conf, 100 int socket_id, enum ipsec_mb_mp_req_type op_type) 101 { 102 int ret; 103 struct rte_mp_msg qp_req_msg; 104 struct rte_mp_msg *qp_resp_msg; 105 struct rte_mp_reply qp_resp; 106 struct ipsec_mb_mp_param *req_param; 107 struct ipsec_mb_mp_param *resp_param; 108 struct timespec ts = {.tv_sec = 1, .tv_nsec = 0}; 109 110 memset(&qp_req_msg, 0, sizeof(IPSEC_MB_MP_MSG)); 111 memcpy(qp_req_msg.name, IPSEC_MB_MP_MSG, sizeof(IPSEC_MB_MP_MSG)); 112 req_param = (struct ipsec_mb_mp_param *)&qp_req_msg.param; 113 114 qp_req_msg.len_param = sizeof(struct ipsec_mb_mp_param); 115 req_param->type = op_type; 116 req_param->dev_id = dev_id; 117 req_param->qp_id = qp_id; 118 req_param->socket_id = socket_id; 119 req_param->process_id = getpid(); 120 if (qp_conf) { 121 req_param->nb_descriptors = qp_conf->nb_descriptors; 122 req_param->mp_session = (void *)qp_conf->mp_session; 123 } 124 125 qp_req_msg.num_fds = 0; 126 ret = rte_mp_request_sync(&qp_req_msg, &qp_resp, &ts); 127 if (ret) { 128 RTE_LOG(ERR, USER1, "Create MR request to primary process failed."); 129 return -1; 130 } 131 qp_resp_msg = &qp_resp.msgs[0]; 132 resp_param = (struct ipsec_mb_mp_param *)qp_resp_msg->param; 133 134 return resp_param->result; 135 } 136 137 /** Release queue pair */ 138 int 139 ipsec_mb_qp_release(struct rte_cryptodev *dev, uint16_t qp_id) 140 { 141 struct ipsec_mb_qp *qp = dev->data->queue_pairs[qp_id]; 142 struct rte_ring *r = NULL; 143 144 if (!qp) 145 return 0; 146 147 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 148 r = rte_ring_lookup(qp->name); 149 if (r) 150 rte_ring_free(r); 151 152 #if IMB_VERSION(1, 1, 0) > IMB_VERSION_NUM 153 if (qp->mb_mgr) 154 free_mb_mgr(qp->mb_mgr); 155 #else 156 if (qp->mb_mgr_mz) { 157 rte_memzone_free(qp->mb_mgr_mz); 158 qp->mb_mgr = NULL; 159 } 160 #endif 161 rte_free(qp); 162 dev->data->queue_pairs[qp_id] = NULL; 163 } else { /* secondary process */ 164 return ipsec_mb_secondary_qp_op(dev->data->dev_id, qp_id, 165 NULL, 0, RTE_IPSEC_MB_MP_REQ_QP_FREE); 166 } 167 return 0; 168 } 169 170 /** Set a unique name for the queue pair */ 171 int 172 ipsec_mb_qp_set_unique_name(struct rte_cryptodev *dev, 173 struct ipsec_mb_qp *qp) 174 { 175 uint32_t n = 176 snprintf(qp->name, sizeof(qp->name), "ipsec_mb_pmd_%u_qp_%u", 177 dev->data->dev_id, qp->id); 178 179 if (n >= sizeof(qp->name)) 180 return -1; 181 182 return 0; 183 } 184 185 /** Create a ring to place processed operations on */ 186 static struct rte_ring 187 *ipsec_mb_qp_create_processed_ops_ring( 188 struct ipsec_mb_qp *qp, unsigned int ring_size, int socket_id) 189 { 190 struct rte_ring *r; 191 char ring_name[RTE_CRYPTODEV_NAME_MAX_LEN]; 192 193 unsigned int n = rte_strlcpy(ring_name, qp->name, sizeof(ring_name)); 194 195 if (n >= sizeof(ring_name)) 196 return NULL; 197 198 r = rte_ring_lookup(ring_name); 199 if (r) { 200 if (rte_ring_get_size(r) >= ring_size) { 201 IPSEC_MB_LOG( 202 INFO, "Reusing existing ring %s for processed ops", 203 ring_name); 204 return r; 205 } 206 IPSEC_MB_LOG( 207 ERR, "Unable to reuse existing ring %s for processed ops", 208 ring_name); 209 return NULL; 210 } 211 212 return rte_ring_create(ring_name, ring_size, socket_id, 213 RING_F_SP_ENQ | RING_F_SC_DEQ); 214 } 215 216 #if IMB_VERSION(1, 1, 0) <= IMB_VERSION_NUM 217 static IMB_MGR * 218 ipsec_mb_alloc_mgr_from_memzone(const struct rte_memzone **mb_mgr_mz, 219 const char *mb_mgr_mz_name) 220 { 221 IMB_MGR *mb_mgr; 222 223 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 224 *mb_mgr_mz = rte_memzone_lookup(mb_mgr_mz_name); 225 if (*mb_mgr_mz == NULL) { 226 *mb_mgr_mz = rte_memzone_reserve(mb_mgr_mz_name, 227 imb_get_mb_mgr_size(), 228 rte_socket_id(), 0); 229 } 230 if (*mb_mgr_mz == NULL) { 231 IPSEC_MB_LOG(DEBUG, "Error allocating memzone for %s", 232 mb_mgr_mz_name); 233 return NULL; 234 } 235 mb_mgr = imb_set_pointers_mb_mgr((*mb_mgr_mz)->addr, 0, 1); 236 init_mb_mgr_auto(mb_mgr, NULL); 237 } else { 238 *mb_mgr_mz = rte_memzone_lookup(mb_mgr_mz_name); 239 if (*mb_mgr_mz == NULL) { 240 IPSEC_MB_LOG(ERR, 241 "Secondary can't find %s mz, did primary create it?", 242 mb_mgr_mz_name); 243 return NULL; 244 } 245 mb_mgr = imb_set_pointers_mb_mgr((*mb_mgr_mz)->addr, 0, 0); 246 } 247 return mb_mgr; 248 } 249 #endif 250 251 /** Setup a queue pair */ 252 int 253 ipsec_mb_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, 254 const struct rte_cryptodev_qp_conf *qp_conf, 255 int socket_id) 256 { 257 struct ipsec_mb_qp *qp = NULL; 258 struct ipsec_mb_dev_private *internals = dev->data->dev_private; 259 struct ipsec_mb_internals *pmd_data = 260 &ipsec_mb_pmds[internals->pmd_type]; 261 uint32_t qp_size; 262 int ret; 263 264 if (rte_eal_process_type() == RTE_PROC_SECONDARY) { 265 #if IMB_VERSION(1, 1, 0) > IMB_VERSION_NUM 266 IPSEC_MB_LOG(ERR, "The intel-ipsec-mb version (%s) does not support multiprocess," 267 "the minimum version required for this feature is %s.", 268 IMB_VERSION_STR, IMB_MP_REQ_VER_STR); 269 return -EINVAL; 270 #endif 271 qp = dev->data->queue_pairs[qp_id]; 272 if (qp == NULL) { 273 IPSEC_MB_LOG(DEBUG, "Secondary process setting up device qp."); 274 return ipsec_mb_secondary_qp_op(dev->data->dev_id, qp_id, 275 qp_conf, socket_id, RTE_IPSEC_MB_MP_REQ_QP_SET); 276 } 277 } else { 278 /* Free memory prior to re-allocation if needed. */ 279 if (dev->data->queue_pairs[qp_id] != NULL) 280 ipsec_mb_qp_release(dev, qp_id); 281 282 qp_size = sizeof(*qp) + pmd_data->qp_priv_size; 283 /* Allocate the queue pair data structure. */ 284 qp = rte_zmalloc_socket("IPSEC PMD Queue Pair", qp_size, 285 RTE_CACHE_LINE_SIZE, socket_id); 286 if (qp == NULL) 287 return -ENOMEM; 288 } 289 290 #if IMB_VERSION(1, 1, 0) > IMB_VERSION_NUM 291 qp->mb_mgr = alloc_init_mb_mgr(); 292 #else 293 char mz_name[IPSEC_MB_MAX_MZ_NAME]; 294 snprintf(mz_name, sizeof(mz_name), "IMB_MGR_DEV_%d_QP_%d", 295 dev->data->dev_id, qp_id); 296 qp->mb_mgr = ipsec_mb_alloc_mgr_from_memzone(&(qp->mb_mgr_mz), 297 mz_name); 298 #endif 299 if (qp->mb_mgr == NULL) { 300 ret = -ENOMEM; 301 goto qp_setup_cleanup; 302 } 303 304 if (rte_eal_process_type() == RTE_PROC_SECONDARY) 305 return 0; 306 307 qp->id = qp_id; 308 dev->data->queue_pairs[qp_id] = qp; 309 if (ipsec_mb_qp_set_unique_name(dev, qp)) { 310 ret = -EINVAL; 311 goto qp_setup_cleanup; 312 } 313 314 qp->pmd_type = internals->pmd_type; 315 qp->sess_mp = qp_conf->mp_session; 316 317 qp->ingress_queue = ipsec_mb_qp_create_processed_ops_ring(qp, 318 qp_conf->nb_descriptors, socket_id); 319 if (qp->ingress_queue == NULL) { 320 ret = -EINVAL; 321 goto qp_setup_cleanup; 322 } 323 324 memset(&qp->stats, 0, sizeof(qp->stats)); 325 326 if (pmd_data->queue_pair_configure) { 327 ret = pmd_data->queue_pair_configure(qp); 328 if (ret < 0) 329 goto qp_setup_cleanup; 330 } 331 332 return 0; 333 334 qp_setup_cleanup: 335 #if IMB_VERSION(1, 1, 0) > IMB_VERSION_NUM 336 if (qp->mb_mgr) 337 free_mb_mgr(qp->mb_mgr); 338 #else 339 if (rte_eal_process_type() == RTE_PROC_SECONDARY) 340 return ret; 341 if (qp->mb_mgr_mz) 342 rte_memzone_free(qp->mb_mgr_mz); 343 #endif 344 rte_free(qp); 345 return ret; 346 } 347 348 int 349 ipsec_mb_ipc_request(const struct rte_mp_msg *mp_msg, const void *peer) 350 { 351 struct rte_mp_msg ipc_resp; 352 struct ipsec_mb_mp_param *resp_param = 353 (struct ipsec_mb_mp_param *)ipc_resp.param; 354 const struct ipsec_mb_mp_param *req_param = 355 (const struct ipsec_mb_mp_param *)mp_msg->param; 356 357 int ret; 358 struct rte_cryptodev *dev; 359 struct ipsec_mb_qp *qp; 360 struct rte_cryptodev_qp_conf queue_conf; 361 int dev_id = req_param->dev_id; 362 int qp_id = req_param->qp_id; 363 364 queue_conf.nb_descriptors = req_param->nb_descriptors; 365 queue_conf.mp_session = (struct rte_mempool *)req_param->mp_session; 366 memset(resp_param, 0, sizeof(struct ipsec_mb_mp_param)); 367 memcpy(ipc_resp.name, IPSEC_MB_MP_MSG, sizeof(IPSEC_MB_MP_MSG)); 368 369 if (!rte_cryptodev_is_valid_dev(dev_id)) { 370 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id); 371 goto out; 372 } 373 374 dev = rte_cryptodev_pmd_get_dev(dev_id); 375 switch (req_param->type) { 376 case RTE_IPSEC_MB_MP_REQ_QP_SET: 377 qp = dev->data->queue_pairs[qp_id]; 378 if (qp) { 379 CDEV_LOG_DEBUG("qp %d on dev %d is initialised", qp_id, dev_id); 380 goto out; 381 } 382 383 ret = ipsec_mb_qp_setup(dev, qp_id, &queue_conf, req_param->socket_id); 384 if (!ret) { 385 qp = dev->data->queue_pairs[qp_id]; 386 if (!qp) { 387 CDEV_LOG_DEBUG("qp %d on dev %d is not initialised", 388 qp_id, dev_id); 389 goto out; 390 } 391 qp->qp_used_by_pid = req_param->process_id; 392 } 393 resp_param->result = ret; 394 break; 395 case RTE_IPSEC_MB_MP_REQ_QP_FREE: 396 qp = dev->data->queue_pairs[qp_id]; 397 if (!qp) { 398 CDEV_LOG_DEBUG("qp %d on dev %d is not initialised", 399 qp_id, dev_id); 400 goto out; 401 } 402 403 if (qp->qp_used_by_pid != req_param->process_id) { 404 CDEV_LOG_ERR("Unable to release qp_id=%d", qp_id); 405 goto out; 406 } 407 408 qp->qp_used_by_pid = 0; 409 resp_param->result = ipsec_mb_qp_release(dev, qp_id); 410 break; 411 default: 412 CDEV_LOG_ERR("invalid mp request type\n"); 413 } 414 415 out: 416 ret = rte_mp_reply(&ipc_resp, peer); 417 return ret; 418 } 419 420 /** Return the size of the specific pmd session structure */ 421 unsigned 422 ipsec_mb_sym_session_get_size(struct rte_cryptodev *dev) 423 { 424 struct ipsec_mb_dev_private *internals = dev->data->dev_private; 425 struct ipsec_mb_internals *pmd_data = 426 &ipsec_mb_pmds[internals->pmd_type]; 427 428 return pmd_data->session_priv_size; 429 } 430 431 /** Configure pmd specific multi-buffer session from a crypto xform chain */ 432 int 433 ipsec_mb_sym_session_configure( 434 struct rte_cryptodev *dev, struct rte_crypto_sym_xform *xform, 435 struct rte_cryptodev_sym_session *sess) 436 { 437 struct ipsec_mb_dev_private *internals = dev->data->dev_private; 438 struct ipsec_mb_internals *pmd_data = 439 &ipsec_mb_pmds[internals->pmd_type]; 440 IMB_MGR *mb_mgr = alloc_init_mb_mgr(); 441 int ret = 0; 442 443 if (!mb_mgr) 444 return -ENOMEM; 445 446 if (unlikely(sess == NULL)) { 447 IPSEC_MB_LOG(ERR, "invalid session struct"); 448 free_mb_mgr(mb_mgr); 449 return -EINVAL; 450 } 451 452 ret = (*pmd_data->session_configure)(mb_mgr, 453 CRYPTODEV_GET_SYM_SESS_PRIV(sess), xform); 454 if (ret != 0) { 455 IPSEC_MB_LOG(ERR, "failed configure session parameters"); 456 457 /* Return session to mempool */ 458 free_mb_mgr(mb_mgr); 459 return ret; 460 } 461 462 free_mb_mgr(mb_mgr); 463 return 0; 464 } 465 466 /** Clear the session memory */ 467 void 468 ipsec_mb_sym_session_clear(struct rte_cryptodev *dev __rte_unused, 469 struct rte_cryptodev_sym_session *sess __rte_unused) 470 {} 471