1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(C) 2019 Marvell International Ltd. 3 */ 4 5 #include <stdbool.h> 6 7 #include <rte_cryptodev_pmd.h> 8 #include <rte_crypto.h> 9 10 #include "nitrox_sym.h" 11 #include "nitrox_device.h" 12 #include "nitrox_sym_capabilities.h" 13 #include "nitrox_qp.h" 14 #include "nitrox_sym_reqmgr.h" 15 #include "nitrox_sym_ctx.h" 16 #include "nitrox_logs.h" 17 18 #define CRYPTODEV_NAME_NITROX_PMD crypto_nitrox_sym 19 #define MC_MAC_MISMATCH_ERR_CODE 0x4c 20 #define NPS_PKT_IN_INSTR_SIZE 64 21 #define IV_FROM_DPTR 1 22 #define FLEXI_CRYPTO_ENCRYPT_HMAC 0x33 23 #define AES_KEYSIZE_128 16 24 #define AES_KEYSIZE_192 24 25 #define AES_KEYSIZE_256 32 26 #define MAX_IV_LEN 16 27 28 struct nitrox_sym_device { 29 struct rte_cryptodev *cdev; 30 struct nitrox_device *ndev; 31 }; 32 33 /* Cipher opcodes */ 34 enum flexi_cipher { 35 CIPHER_NULL = 0, 36 CIPHER_3DES_CBC, 37 CIPHER_3DES_ECB, 38 CIPHER_AES_CBC, 39 CIPHER_AES_ECB, 40 CIPHER_AES_CFB, 41 CIPHER_AES_CTR, 42 CIPHER_AES_GCM, 43 CIPHER_AES_XTS, 44 CIPHER_AES_CCM, 45 CIPHER_AES_CBC_CTS, 46 CIPHER_AES_ECB_CTS, 47 CIPHER_INVALID 48 }; 49 50 /* Auth opcodes */ 51 enum flexi_auth { 52 AUTH_NULL = 0, 53 AUTH_MD5, 54 AUTH_SHA1, 55 AUTH_SHA2_SHA224, 56 AUTH_SHA2_SHA256, 57 AUTH_SHA2_SHA384, 58 AUTH_SHA2_SHA512, 59 AUTH_GMAC, 60 AUTH_INVALID 61 }; 62 63 uint8_t nitrox_sym_drv_id; 64 static const char nitrox_sym_drv_name[] = RTE_STR(CRYPTODEV_NAME_NITROX_PMD); 65 static const struct rte_driver nitrox_rte_sym_drv = { 66 .name = nitrox_sym_drv_name, 67 .alias = nitrox_sym_drv_name 68 }; 69 70 static int nitrox_sym_dev_qp_release(struct rte_cryptodev *cdev, 71 uint16_t qp_id); 72 73 static int 74 nitrox_sym_dev_config(struct rte_cryptodev *cdev, 75 struct rte_cryptodev_config *config) 76 { 77 struct nitrox_sym_device *sym_dev = cdev->data->dev_private; 78 struct nitrox_device *ndev = sym_dev->ndev; 79 80 if (config->nb_queue_pairs > ndev->nr_queues) { 81 NITROX_LOG(ERR, "Invalid queue pairs, max supported %d\n", 82 ndev->nr_queues); 83 return -EINVAL; 84 } 85 86 return 0; 87 } 88 89 static int 90 nitrox_sym_dev_start(struct rte_cryptodev *cdev) 91 { 92 /* SE cores initialization is done in PF */ 93 RTE_SET_USED(cdev); 94 return 0; 95 } 96 97 static void 98 nitrox_sym_dev_stop(struct rte_cryptodev *cdev) 99 { 100 /* SE cores cleanup is done in PF */ 101 RTE_SET_USED(cdev); 102 } 103 104 static int 105 nitrox_sym_dev_close(struct rte_cryptodev *cdev) 106 { 107 int i, ret; 108 109 for (i = 0; i < cdev->data->nb_queue_pairs; i++) { 110 ret = nitrox_sym_dev_qp_release(cdev, i); 111 if (ret) 112 return ret; 113 } 114 115 return 0; 116 } 117 118 static void 119 nitrox_sym_dev_info_get(struct rte_cryptodev *cdev, 120 struct rte_cryptodev_info *info) 121 { 122 struct nitrox_sym_device *sym_dev = cdev->data->dev_private; 123 struct nitrox_device *ndev = sym_dev->ndev; 124 125 if (!info) 126 return; 127 128 info->max_nb_queue_pairs = ndev->nr_queues; 129 info->feature_flags = cdev->feature_flags; 130 info->capabilities = nitrox_get_sym_capabilities(); 131 info->driver_id = nitrox_sym_drv_id; 132 info->sym.max_nb_sessions = 0; 133 } 134 135 static void 136 nitrox_sym_dev_stats_get(struct rte_cryptodev *cdev, 137 struct rte_cryptodev_stats *stats) 138 { 139 int qp_id; 140 141 for (qp_id = 0; qp_id < cdev->data->nb_queue_pairs; qp_id++) { 142 struct nitrox_qp *qp = cdev->data->queue_pairs[qp_id]; 143 144 if (!qp) 145 continue; 146 147 stats->enqueued_count += qp->stats.enqueued_count; 148 stats->dequeued_count += qp->stats.dequeued_count; 149 stats->enqueue_err_count += qp->stats.enqueue_err_count; 150 stats->dequeue_err_count += qp->stats.dequeue_err_count; 151 } 152 } 153 154 static void 155 nitrox_sym_dev_stats_reset(struct rte_cryptodev *cdev) 156 { 157 int qp_id; 158 159 for (qp_id = 0; qp_id < cdev->data->nb_queue_pairs; qp_id++) { 160 struct nitrox_qp *qp = cdev->data->queue_pairs[qp_id]; 161 162 if (!qp) 163 continue; 164 165 memset(&qp->stats, 0, sizeof(qp->stats)); 166 } 167 } 168 169 static int 170 nitrox_sym_dev_qp_setup(struct rte_cryptodev *cdev, uint16_t qp_id, 171 const struct rte_cryptodev_qp_conf *qp_conf, 172 int socket_id) 173 { 174 struct nitrox_sym_device *sym_dev = cdev->data->dev_private; 175 struct nitrox_device *ndev = sym_dev->ndev; 176 struct nitrox_qp *qp = NULL; 177 int err; 178 179 NITROX_LOG(DEBUG, "queue %d\n", qp_id); 180 if (qp_id >= ndev->nr_queues) { 181 NITROX_LOG(ERR, "queue %u invalid, max queues supported %d\n", 182 qp_id, ndev->nr_queues); 183 return -EINVAL; 184 } 185 186 if (cdev->data->queue_pairs[qp_id]) { 187 err = nitrox_sym_dev_qp_release(cdev, qp_id); 188 if (err) 189 return err; 190 } 191 192 qp = rte_zmalloc_socket("nitrox PMD qp", sizeof(*qp), 193 RTE_CACHE_LINE_SIZE, 194 socket_id); 195 if (!qp) { 196 NITROX_LOG(ERR, "Failed to allocate nitrox qp\n"); 197 return -ENOMEM; 198 } 199 200 qp->qno = qp_id; 201 err = nitrox_qp_setup(qp, ndev->bar_addr, cdev->data->name, 202 qp_conf->nb_descriptors, NPS_PKT_IN_INSTR_SIZE, 203 socket_id); 204 if (unlikely(err)) 205 goto qp_setup_err; 206 207 qp->sr_mp = nitrox_sym_req_pool_create(cdev, qp->count, qp_id, 208 socket_id); 209 if (unlikely(!qp->sr_mp)) 210 goto req_pool_err; 211 212 cdev->data->queue_pairs[qp_id] = qp; 213 NITROX_LOG(DEBUG, "queue %d setup done\n", qp_id); 214 return 0; 215 216 req_pool_err: 217 nitrox_qp_release(qp, ndev->bar_addr); 218 qp_setup_err: 219 rte_free(qp); 220 return err; 221 } 222 223 static int 224 nitrox_sym_dev_qp_release(struct rte_cryptodev *cdev, uint16_t qp_id) 225 { 226 struct nitrox_sym_device *sym_dev = cdev->data->dev_private; 227 struct nitrox_device *ndev = sym_dev->ndev; 228 struct nitrox_qp *qp; 229 int err; 230 231 NITROX_LOG(DEBUG, "queue %d\n", qp_id); 232 if (qp_id >= ndev->nr_queues) { 233 NITROX_LOG(ERR, "queue %u invalid, max queues supported %d\n", 234 qp_id, ndev->nr_queues); 235 return -EINVAL; 236 } 237 238 qp = cdev->data->queue_pairs[qp_id]; 239 if (!qp) { 240 NITROX_LOG(DEBUG, "queue %u already freed\n", qp_id); 241 return 0; 242 } 243 244 if (!nitrox_qp_is_empty(qp)) { 245 NITROX_LOG(ERR, "queue %d not empty\n", qp_id); 246 return -EAGAIN; 247 } 248 249 cdev->data->queue_pairs[qp_id] = NULL; 250 err = nitrox_qp_release(qp, ndev->bar_addr); 251 nitrox_sym_req_pool_free(qp->sr_mp); 252 rte_free(qp); 253 NITROX_LOG(DEBUG, "queue %d release done\n", qp_id); 254 return err; 255 } 256 257 static unsigned int 258 nitrox_sym_dev_sess_get_size(__rte_unused struct rte_cryptodev *cdev) 259 { 260 return sizeof(struct nitrox_crypto_ctx); 261 } 262 263 static enum nitrox_chain 264 get_crypto_chain_order(const struct rte_crypto_sym_xform *xform) 265 { 266 enum nitrox_chain res = NITROX_CHAIN_NOT_SUPPORTED; 267 268 if (unlikely(xform == NULL)) 269 return res; 270 271 switch (xform->type) { 272 case RTE_CRYPTO_SYM_XFORM_AUTH: 273 if (xform->next == NULL) { 274 res = NITROX_CHAIN_NOT_SUPPORTED; 275 } else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 276 if (xform->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY && 277 xform->next->cipher.op == 278 RTE_CRYPTO_CIPHER_OP_DECRYPT) { 279 res = NITROX_CHAIN_AUTH_CIPHER; 280 } else { 281 NITROX_LOG(ERR, "auth op %d, cipher op %d\n", 282 xform->auth.op, xform->next->cipher.op); 283 } 284 } 285 break; 286 case RTE_CRYPTO_SYM_XFORM_CIPHER: 287 if (xform->next == NULL) { 288 res = NITROX_CHAIN_CIPHER_ONLY; 289 } else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 290 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT && 291 xform->next->auth.op == 292 RTE_CRYPTO_AUTH_OP_GENERATE) { 293 res = NITROX_CHAIN_CIPHER_AUTH; 294 } else { 295 NITROX_LOG(ERR, "cipher op %d, auth op %d\n", 296 xform->cipher.op, xform->next->auth.op); 297 } 298 } 299 break; 300 default: 301 break; 302 } 303 304 return res; 305 } 306 307 static enum flexi_cipher 308 get_flexi_cipher_type(enum rte_crypto_cipher_algorithm algo, bool *is_aes) 309 { 310 enum flexi_cipher type; 311 312 switch (algo) { 313 case RTE_CRYPTO_CIPHER_AES_CBC: 314 type = CIPHER_AES_CBC; 315 *is_aes = true; 316 break; 317 case RTE_CRYPTO_CIPHER_3DES_CBC: 318 type = CIPHER_3DES_CBC; 319 *is_aes = false; 320 break; 321 default: 322 type = CIPHER_INVALID; 323 NITROX_LOG(ERR, "Algorithm not supported %d\n", algo); 324 break; 325 } 326 327 return type; 328 } 329 330 static int 331 flexi_aes_keylen(size_t keylen, bool is_aes) 332 { 333 int aes_keylen; 334 335 if (!is_aes) 336 return 0; 337 338 switch (keylen) { 339 case AES_KEYSIZE_128: 340 aes_keylen = 1; 341 break; 342 case AES_KEYSIZE_192: 343 aes_keylen = 2; 344 break; 345 case AES_KEYSIZE_256: 346 aes_keylen = 3; 347 break; 348 default: 349 NITROX_LOG(ERR, "Invalid keylen %zu\n", keylen); 350 aes_keylen = -EINVAL; 351 break; 352 } 353 354 return aes_keylen; 355 } 356 357 static bool 358 crypto_key_is_valid(struct rte_crypto_cipher_xform *xform, 359 struct flexi_crypto_context *fctx) 360 { 361 if (unlikely(xform->key.length > sizeof(fctx->crypto.key))) { 362 NITROX_LOG(ERR, "Invalid crypto key length %d\n", 363 xform->key.length); 364 return false; 365 } 366 367 return true; 368 } 369 370 static int 371 configure_cipher_ctx(struct rte_crypto_cipher_xform *xform, 372 struct nitrox_crypto_ctx *ctx) 373 { 374 enum flexi_cipher type; 375 bool cipher_is_aes = false; 376 int aes_keylen; 377 struct flexi_crypto_context *fctx = &ctx->fctx; 378 379 type = get_flexi_cipher_type(xform->algo, &cipher_is_aes); 380 if (unlikely(type == CIPHER_INVALID)) 381 return -ENOTSUP; 382 383 aes_keylen = flexi_aes_keylen(xform->key.length, cipher_is_aes); 384 if (unlikely(aes_keylen < 0)) 385 return -EINVAL; 386 387 if (unlikely(!cipher_is_aes && !crypto_key_is_valid(xform, fctx))) 388 return -EINVAL; 389 390 if (unlikely(xform->iv.length > MAX_IV_LEN)) 391 return -EINVAL; 392 393 fctx->flags = rte_be_to_cpu_64(fctx->flags); 394 fctx->w0.cipher_type = type; 395 fctx->w0.aes_keylen = aes_keylen; 396 fctx->w0.iv_source = IV_FROM_DPTR; 397 fctx->flags = rte_cpu_to_be_64(fctx->flags); 398 memset(fctx->crypto.key, 0, sizeof(fctx->crypto.key)); 399 memcpy(fctx->crypto.key, xform->key.data, xform->key.length); 400 401 ctx->opcode = FLEXI_CRYPTO_ENCRYPT_HMAC; 402 ctx->req_op = (xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 403 NITROX_OP_ENCRYPT : NITROX_OP_DECRYPT; 404 ctx->iv.offset = xform->iv.offset; 405 ctx->iv.length = xform->iv.length; 406 return 0; 407 } 408 409 static enum flexi_auth 410 get_flexi_auth_type(enum rte_crypto_auth_algorithm algo) 411 { 412 enum flexi_auth type; 413 414 switch (algo) { 415 case RTE_CRYPTO_AUTH_SHA1_HMAC: 416 type = AUTH_SHA1; 417 break; 418 case RTE_CRYPTO_AUTH_SHA224_HMAC: 419 type = AUTH_SHA2_SHA224; 420 break; 421 case RTE_CRYPTO_AUTH_SHA256_HMAC: 422 type = AUTH_SHA2_SHA256; 423 break; 424 default: 425 NITROX_LOG(ERR, "Algorithm not supported %d\n", algo); 426 type = AUTH_INVALID; 427 break; 428 } 429 430 return type; 431 } 432 433 static bool 434 auth_key_digest_is_valid(struct rte_crypto_auth_xform *xform, 435 struct flexi_crypto_context *fctx) 436 { 437 if (unlikely(!xform->key.data && xform->key.length)) { 438 NITROX_LOG(ERR, "Invalid auth key\n"); 439 return false; 440 } 441 442 if (unlikely(xform->key.length > sizeof(fctx->auth.opad))) { 443 NITROX_LOG(ERR, "Invalid auth key length %d\n", 444 xform->key.length); 445 return false; 446 } 447 448 return true; 449 } 450 451 static int 452 configure_auth_ctx(struct rte_crypto_auth_xform *xform, 453 struct nitrox_crypto_ctx *ctx) 454 { 455 enum flexi_auth type; 456 struct flexi_crypto_context *fctx = &ctx->fctx; 457 458 type = get_flexi_auth_type(xform->algo); 459 if (unlikely(type == AUTH_INVALID)) 460 return -ENOTSUP; 461 462 if (unlikely(!auth_key_digest_is_valid(xform, fctx))) 463 return -EINVAL; 464 465 ctx->auth_op = xform->op; 466 ctx->auth_algo = xform->algo; 467 ctx->digest_length = xform->digest_length; 468 469 fctx->flags = rte_be_to_cpu_64(fctx->flags); 470 fctx->w0.hash_type = type; 471 fctx->w0.auth_input_type = 1; 472 fctx->w0.mac_len = xform->digest_length; 473 fctx->flags = rte_cpu_to_be_64(fctx->flags); 474 memset(&fctx->auth, 0, sizeof(fctx->auth)); 475 memcpy(fctx->auth.opad, xform->key.data, xform->key.length); 476 return 0; 477 } 478 479 static int 480 nitrox_sym_dev_sess_configure(struct rte_cryptodev *cdev, 481 struct rte_crypto_sym_xform *xform, 482 struct rte_cryptodev_sym_session *sess, 483 struct rte_mempool *mempool) 484 { 485 void *mp_obj; 486 struct nitrox_crypto_ctx *ctx; 487 struct rte_crypto_cipher_xform *cipher_xform = NULL; 488 struct rte_crypto_auth_xform *auth_xform = NULL; 489 490 if (rte_mempool_get(mempool, &mp_obj)) { 491 NITROX_LOG(ERR, "Couldn't allocate context\n"); 492 return -ENOMEM; 493 } 494 495 ctx = mp_obj; 496 ctx->nitrox_chain = get_crypto_chain_order(xform); 497 switch (ctx->nitrox_chain) { 498 case NITROX_CHAIN_CIPHER_AUTH: 499 cipher_xform = &xform->cipher; 500 auth_xform = &xform->next->auth; 501 break; 502 case NITROX_CHAIN_AUTH_CIPHER: 503 auth_xform = &xform->auth; 504 cipher_xform = &xform->next->cipher; 505 break; 506 default: 507 NITROX_LOG(ERR, "Crypto chain not supported\n"); 508 goto err; 509 } 510 511 if (cipher_xform && unlikely(configure_cipher_ctx(cipher_xform, ctx))) { 512 NITROX_LOG(ERR, "Failed to configure cipher ctx\n"); 513 goto err; 514 } 515 516 if (auth_xform && unlikely(configure_auth_ctx(auth_xform, ctx))) { 517 NITROX_LOG(ERR, "Failed to configure auth ctx\n"); 518 goto err; 519 } 520 521 ctx->iova = rte_mempool_virt2iova(ctx); 522 set_sym_session_private_data(sess, cdev->driver_id, ctx); 523 return 0; 524 err: 525 rte_mempool_put(mempool, mp_obj); 526 return -EINVAL; 527 } 528 529 static void 530 nitrox_sym_dev_sess_clear(struct rte_cryptodev *cdev, 531 struct rte_cryptodev_sym_session *sess) 532 { 533 struct nitrox_crypto_ctx *ctx = get_sym_session_private_data(sess, 534 cdev->driver_id); 535 struct rte_mempool *sess_mp; 536 537 if (!ctx) 538 return; 539 540 memset(ctx, 0, sizeof(*ctx)); 541 sess_mp = rte_mempool_from_obj(ctx); 542 set_sym_session_private_data(sess, cdev->driver_id, NULL); 543 rte_mempool_put(sess_mp, ctx); 544 } 545 546 static struct nitrox_crypto_ctx * 547 get_crypto_ctx(struct rte_crypto_op *op) 548 { 549 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) { 550 if (likely(op->sym->session)) 551 return get_sym_session_private_data(op->sym->session, 552 nitrox_sym_drv_id); 553 } 554 555 return NULL; 556 } 557 558 static int 559 nitrox_enq_single_op(struct nitrox_qp *qp, struct rte_crypto_op *op) 560 { 561 struct nitrox_crypto_ctx *ctx; 562 struct nitrox_softreq *sr; 563 int err; 564 565 op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; 566 ctx = get_crypto_ctx(op); 567 if (unlikely(!ctx)) { 568 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION; 569 return -EINVAL; 570 } 571 572 if (unlikely(rte_mempool_get(qp->sr_mp, (void **)&sr))) 573 return -ENOMEM; 574 575 err = nitrox_process_se_req(qp->qno, op, ctx, sr); 576 if (unlikely(err)) { 577 rte_mempool_put(qp->sr_mp, sr); 578 op->status = RTE_CRYPTO_OP_STATUS_ERROR; 579 return err; 580 } 581 582 nitrox_qp_enqueue(qp, nitrox_sym_instr_addr(sr), sr); 583 return 0; 584 } 585 586 static uint16_t 587 nitrox_sym_dev_enq_burst(void *queue_pair, struct rte_crypto_op **ops, 588 uint16_t nb_ops) 589 { 590 struct nitrox_qp *qp = queue_pair; 591 uint16_t free_slots = 0; 592 uint16_t cnt = 0; 593 bool err = false; 594 595 free_slots = nitrox_qp_free_count(qp); 596 if (nb_ops > free_slots) 597 nb_ops = free_slots; 598 599 for (cnt = 0; cnt < nb_ops; cnt++) { 600 if (unlikely(nitrox_enq_single_op(qp, ops[cnt]))) { 601 err = true; 602 break; 603 } 604 } 605 606 nitrox_ring_dbell(qp, cnt); 607 qp->stats.enqueued_count += cnt; 608 if (unlikely(err)) 609 qp->stats.enqueue_err_count++; 610 611 return cnt; 612 } 613 614 static int 615 nitrox_deq_single_op(struct nitrox_qp *qp, struct rte_crypto_op **op_ptr) 616 { 617 struct nitrox_softreq *sr; 618 int ret; 619 struct rte_crypto_op *op; 620 621 sr = nitrox_qp_get_softreq(qp); 622 ret = nitrox_check_se_req(sr, op_ptr); 623 if (ret < 0) 624 return -EAGAIN; 625 626 op = *op_ptr; 627 nitrox_qp_dequeue(qp); 628 rte_mempool_put(qp->sr_mp, sr); 629 if (!ret) { 630 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; 631 qp->stats.dequeued_count++; 632 633 return 0; 634 } 635 636 if (ret == MC_MAC_MISMATCH_ERR_CODE) 637 op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED; 638 else 639 op->status = RTE_CRYPTO_OP_STATUS_ERROR; 640 641 qp->stats.dequeue_err_count++; 642 return 0; 643 } 644 645 static uint16_t 646 nitrox_sym_dev_deq_burst(void *queue_pair, struct rte_crypto_op **ops, 647 uint16_t nb_ops) 648 { 649 struct nitrox_qp *qp = queue_pair; 650 uint16_t filled_slots = nitrox_qp_used_count(qp); 651 int cnt = 0; 652 653 if (nb_ops > filled_slots) 654 nb_ops = filled_slots; 655 656 for (cnt = 0; cnt < nb_ops; cnt++) 657 if (nitrox_deq_single_op(qp, &ops[cnt])) 658 break; 659 660 return cnt; 661 } 662 663 static struct rte_cryptodev_ops nitrox_cryptodev_ops = { 664 .dev_configure = nitrox_sym_dev_config, 665 .dev_start = nitrox_sym_dev_start, 666 .dev_stop = nitrox_sym_dev_stop, 667 .dev_close = nitrox_sym_dev_close, 668 .dev_infos_get = nitrox_sym_dev_info_get, 669 .stats_get = nitrox_sym_dev_stats_get, 670 .stats_reset = nitrox_sym_dev_stats_reset, 671 .queue_pair_setup = nitrox_sym_dev_qp_setup, 672 .queue_pair_release = nitrox_sym_dev_qp_release, 673 .sym_session_get_size = nitrox_sym_dev_sess_get_size, 674 .sym_session_configure = nitrox_sym_dev_sess_configure, 675 .sym_session_clear = nitrox_sym_dev_sess_clear 676 }; 677 678 int 679 nitrox_sym_pmd_create(struct nitrox_device *ndev) 680 { 681 char name[RTE_CRYPTODEV_NAME_MAX_LEN]; 682 struct rte_cryptodev_pmd_init_params init_params = { 683 .name = "", 684 .socket_id = ndev->pdev->device.numa_node, 685 .private_data_size = sizeof(struct nitrox_sym_device) 686 }; 687 struct rte_cryptodev *cdev; 688 689 rte_pci_device_name(&ndev->pdev->addr, name, sizeof(name)); 690 snprintf(name + strlen(name), RTE_CRYPTODEV_NAME_MAX_LEN - strlen(name), 691 "_n5sym"); 692 ndev->rte_sym_dev.driver = &nitrox_rte_sym_drv; 693 ndev->rte_sym_dev.numa_node = ndev->pdev->device.numa_node; 694 ndev->rte_sym_dev.devargs = NULL; 695 cdev = rte_cryptodev_pmd_create(name, &ndev->rte_sym_dev, 696 &init_params); 697 if (!cdev) { 698 NITROX_LOG(ERR, "Cryptodev '%s' creation failed\n", name); 699 return -ENODEV; 700 } 701 702 ndev->rte_sym_dev.name = cdev->data->name; 703 cdev->driver_id = nitrox_sym_drv_id; 704 cdev->dev_ops = &nitrox_cryptodev_ops; 705 cdev->enqueue_burst = nitrox_sym_dev_enq_burst; 706 cdev->dequeue_burst = nitrox_sym_dev_deq_burst; 707 cdev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | 708 RTE_CRYPTODEV_FF_HW_ACCELERATED | 709 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | 710 RTE_CRYPTODEV_FF_IN_PLACE_SGL | 711 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT | 712 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT | 713 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT | 714 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT; 715 716 ndev->sym_dev = cdev->data->dev_private; 717 ndev->sym_dev->cdev = cdev; 718 ndev->sym_dev->ndev = ndev; 719 NITROX_LOG(DEBUG, "Created cryptodev '%s', dev_id %d, drv_id %d\n", 720 cdev->data->name, cdev->data->dev_id, nitrox_sym_drv_id); 721 return 0; 722 } 723 724 int 725 nitrox_sym_pmd_destroy(struct nitrox_device *ndev) 726 { 727 return rte_cryptodev_pmd_destroy(ndev->sym_dev->cdev); 728 } 729 730 static struct cryptodev_driver nitrox_crypto_drv; 731 RTE_PMD_REGISTER_CRYPTO_DRIVER(nitrox_crypto_drv, 732 nitrox_rte_sym_drv, 733 nitrox_sym_drv_id); 734