1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(C) 2019 Marvell International Ltd. 3 */ 4 5 #include <stdbool.h> 6 7 #include <rte_cryptodev_pmd.h> 8 #include <rte_crypto.h> 9 10 #include "nitrox_sym.h" 11 #include "nitrox_device.h" 12 #include "nitrox_sym_capabilities.h" 13 #include "nitrox_qp.h" 14 #include "nitrox_sym_reqmgr.h" 15 #include "nitrox_sym_ctx.h" 16 #include "nitrox_logs.h" 17 18 #define CRYPTODEV_NAME_NITROX_PMD crypto_nitrox_sym 19 #define MC_MAC_MISMATCH_ERR_CODE 0x4c 20 #define NPS_PKT_IN_INSTR_SIZE 64 21 #define IV_FROM_DPTR 1 22 #define FLEXI_CRYPTO_ENCRYPT_HMAC 0x33 23 #define AES_KEYSIZE_128 16 24 #define AES_KEYSIZE_192 24 25 #define AES_KEYSIZE_256 32 26 #define MAX_IV_LEN 16 27 28 struct nitrox_sym_device { 29 struct rte_cryptodev *cdev; 30 struct nitrox_device *ndev; 31 }; 32 33 /* Cipher opcodes */ 34 enum flexi_cipher { 35 CIPHER_NULL = 0, 36 CIPHER_3DES_CBC, 37 CIPHER_3DES_ECB, 38 CIPHER_AES_CBC, 39 CIPHER_AES_ECB, 40 CIPHER_AES_CFB, 41 CIPHER_AES_CTR, 42 CIPHER_AES_GCM, 43 CIPHER_AES_XTS, 44 CIPHER_AES_CCM, 45 CIPHER_AES_CBC_CTS, 46 CIPHER_AES_ECB_CTS, 47 CIPHER_INVALID 48 }; 49 50 /* Auth opcodes */ 51 enum flexi_auth { 52 AUTH_NULL = 0, 53 AUTH_MD5, 54 AUTH_SHA1, 55 AUTH_SHA2_SHA224, 56 AUTH_SHA2_SHA256, 57 AUTH_SHA2_SHA384, 58 AUTH_SHA2_SHA512, 59 AUTH_GMAC, 60 AUTH_INVALID 61 }; 62 63 uint8_t nitrox_sym_drv_id; 64 static const char nitrox_sym_drv_name[] = RTE_STR(CRYPTODEV_NAME_NITROX_PMD); 65 static const struct rte_driver nitrox_rte_sym_drv = { 66 .name = nitrox_sym_drv_name, 67 .alias = nitrox_sym_drv_name 68 }; 69 70 static int nitrox_sym_dev_qp_release(struct rte_cryptodev *cdev, 71 uint16_t qp_id); 72 73 static int 74 nitrox_sym_dev_config(struct rte_cryptodev *cdev, 75 struct rte_cryptodev_config *config) 76 { 77 struct nitrox_sym_device *sym_dev = cdev->data->dev_private; 78 struct nitrox_device *ndev = sym_dev->ndev; 79 80 if (config->nb_queue_pairs > ndev->nr_queues) { 81 NITROX_LOG(ERR, "Invalid queue pairs, max supported %d\n", 82 ndev->nr_queues); 83 return -EINVAL; 84 } 85 86 return 0; 87 } 88 89 static int 90 nitrox_sym_dev_start(struct rte_cryptodev *cdev) 91 { 92 /* SE cores initialization is done in PF */ 93 RTE_SET_USED(cdev); 94 return 0; 95 } 96 97 static void 98 nitrox_sym_dev_stop(struct rte_cryptodev *cdev) 99 { 100 /* SE cores cleanup is done in PF */ 101 RTE_SET_USED(cdev); 102 } 103 104 static int 105 nitrox_sym_dev_close(struct rte_cryptodev *cdev) 106 { 107 int i, ret; 108 109 for (i = 0; i < cdev->data->nb_queue_pairs; i++) { 110 ret = nitrox_sym_dev_qp_release(cdev, i); 111 if (ret) 112 return ret; 113 } 114 115 return 0; 116 } 117 118 static void 119 nitrox_sym_dev_info_get(struct rte_cryptodev *cdev, 120 struct rte_cryptodev_info *info) 121 { 122 struct nitrox_sym_device *sym_dev = cdev->data->dev_private; 123 struct nitrox_device *ndev = sym_dev->ndev; 124 125 if (!info) 126 return; 127 128 info->max_nb_queue_pairs = ndev->nr_queues; 129 info->feature_flags = cdev->feature_flags; 130 info->capabilities = nitrox_get_sym_capabilities(); 131 info->driver_id = nitrox_sym_drv_id; 132 info->sym.max_nb_sessions = 0; 133 } 134 135 static void 136 nitrox_sym_dev_stats_get(struct rte_cryptodev *cdev, 137 struct rte_cryptodev_stats *stats) 138 { 139 int qp_id; 140 141 for (qp_id = 0; qp_id < cdev->data->nb_queue_pairs; qp_id++) { 142 struct nitrox_qp *qp = cdev->data->queue_pairs[qp_id]; 143 144 if (!qp) 145 continue; 146 147 stats->enqueued_count += qp->stats.enqueued_count; 148 stats->dequeued_count += qp->stats.dequeued_count; 149 stats->enqueue_err_count += qp->stats.enqueue_err_count; 150 stats->dequeue_err_count += qp->stats.dequeue_err_count; 151 } 152 } 153 154 static void 155 nitrox_sym_dev_stats_reset(struct rte_cryptodev *cdev) 156 { 157 int qp_id; 158 159 for (qp_id = 0; qp_id < cdev->data->nb_queue_pairs; qp_id++) { 160 struct nitrox_qp *qp = cdev->data->queue_pairs[qp_id]; 161 162 if (!qp) 163 continue; 164 165 memset(&qp->stats, 0, sizeof(qp->stats)); 166 } 167 } 168 169 static int 170 nitrox_sym_dev_qp_setup(struct rte_cryptodev *cdev, uint16_t qp_id, 171 const struct rte_cryptodev_qp_conf *qp_conf, 172 int socket_id) 173 { 174 struct nitrox_sym_device *sym_dev = cdev->data->dev_private; 175 struct nitrox_device *ndev = sym_dev->ndev; 176 struct nitrox_qp *qp = NULL; 177 int err; 178 179 NITROX_LOG(DEBUG, "queue %d\n", qp_id); 180 if (qp_id >= ndev->nr_queues) { 181 NITROX_LOG(ERR, "queue %u invalid, max queues supported %d\n", 182 qp_id, ndev->nr_queues); 183 return -EINVAL; 184 } 185 186 if (cdev->data->queue_pairs[qp_id]) { 187 err = nitrox_sym_dev_qp_release(cdev, qp_id); 188 if (err) 189 return err; 190 } 191 192 qp = rte_zmalloc_socket("nitrox PMD qp", sizeof(*qp), 193 RTE_CACHE_LINE_SIZE, 194 socket_id); 195 if (!qp) { 196 NITROX_LOG(ERR, "Failed to allocate nitrox qp\n"); 197 return -ENOMEM; 198 } 199 200 qp->qno = qp_id; 201 err = nitrox_qp_setup(qp, ndev->bar_addr, cdev->data->name, 202 qp_conf->nb_descriptors, NPS_PKT_IN_INSTR_SIZE, 203 socket_id); 204 if (unlikely(err)) 205 goto qp_setup_err; 206 207 qp->sr_mp = nitrox_sym_req_pool_create(cdev, qp->count, qp_id, 208 socket_id); 209 if (unlikely(!qp->sr_mp)) 210 goto req_pool_err; 211 212 cdev->data->queue_pairs[qp_id] = qp; 213 NITROX_LOG(DEBUG, "queue %d setup done\n", qp_id); 214 return 0; 215 216 req_pool_err: 217 nitrox_qp_release(qp, ndev->bar_addr); 218 qp_setup_err: 219 rte_free(qp); 220 return err; 221 } 222 223 static int 224 nitrox_sym_dev_qp_release(struct rte_cryptodev *cdev, uint16_t qp_id) 225 { 226 struct nitrox_sym_device *sym_dev = cdev->data->dev_private; 227 struct nitrox_device *ndev = sym_dev->ndev; 228 struct nitrox_qp *qp; 229 int err; 230 231 NITROX_LOG(DEBUG, "queue %d\n", qp_id); 232 if (qp_id >= ndev->nr_queues) { 233 NITROX_LOG(ERR, "queue %u invalid, max queues supported %d\n", 234 qp_id, ndev->nr_queues); 235 return -EINVAL; 236 } 237 238 qp = cdev->data->queue_pairs[qp_id]; 239 if (!qp) { 240 NITROX_LOG(DEBUG, "queue %u already freed\n", qp_id); 241 return 0; 242 } 243 244 if (!nitrox_qp_is_empty(qp)) { 245 NITROX_LOG(ERR, "queue %d not empty\n", qp_id); 246 return -EAGAIN; 247 } 248 249 cdev->data->queue_pairs[qp_id] = NULL; 250 err = nitrox_qp_release(qp, ndev->bar_addr); 251 nitrox_sym_req_pool_free(qp->sr_mp); 252 rte_free(qp); 253 NITROX_LOG(DEBUG, "queue %d release done\n", qp_id); 254 return err; 255 } 256 257 static unsigned int 258 nitrox_sym_dev_sess_get_size(__rte_unused struct rte_cryptodev *cdev) 259 { 260 return sizeof(struct nitrox_crypto_ctx); 261 } 262 263 static enum nitrox_chain 264 get_crypto_chain_order(const struct rte_crypto_sym_xform *xform) 265 { 266 enum nitrox_chain res = NITROX_CHAIN_NOT_SUPPORTED; 267 268 if (unlikely(xform == NULL)) 269 return res; 270 271 switch (xform->type) { 272 case RTE_CRYPTO_SYM_XFORM_AUTH: 273 if (xform->next == NULL) { 274 res = NITROX_CHAIN_NOT_SUPPORTED; 275 } else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { 276 if (xform->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY && 277 xform->next->cipher.op == 278 RTE_CRYPTO_CIPHER_OP_DECRYPT) { 279 res = NITROX_CHAIN_AUTH_CIPHER; 280 } else { 281 NITROX_LOG(ERR, "auth op %d, cipher op %d\n", 282 xform->auth.op, xform->next->cipher.op); 283 } 284 } 285 break; 286 case RTE_CRYPTO_SYM_XFORM_CIPHER: 287 if (xform->next == NULL) { 288 res = NITROX_CHAIN_CIPHER_ONLY; 289 } else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) { 290 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT && 291 xform->next->auth.op == 292 RTE_CRYPTO_AUTH_OP_GENERATE) { 293 res = NITROX_CHAIN_CIPHER_AUTH; 294 } else { 295 NITROX_LOG(ERR, "cipher op %d, auth op %d\n", 296 xform->cipher.op, xform->next->auth.op); 297 } 298 } 299 break; 300 default: 301 break; 302 } 303 304 return res; 305 } 306 307 static enum flexi_cipher 308 get_flexi_cipher_type(enum rte_crypto_cipher_algorithm algo, bool *is_aes) 309 { 310 enum flexi_cipher type; 311 312 switch (algo) { 313 case RTE_CRYPTO_CIPHER_AES_CBC: 314 type = CIPHER_AES_CBC; 315 *is_aes = true; 316 break; 317 default: 318 type = CIPHER_INVALID; 319 NITROX_LOG(ERR, "Algorithm not supported %d\n", algo); 320 break; 321 } 322 323 return type; 324 } 325 326 static int 327 flexi_aes_keylen(size_t keylen, bool is_aes) 328 { 329 int aes_keylen; 330 331 if (!is_aes) 332 return 0; 333 334 switch (keylen) { 335 case AES_KEYSIZE_128: 336 aes_keylen = 1; 337 break; 338 case AES_KEYSIZE_192: 339 aes_keylen = 2; 340 break; 341 case AES_KEYSIZE_256: 342 aes_keylen = 3; 343 break; 344 default: 345 NITROX_LOG(ERR, "Invalid keylen %zu\n", keylen); 346 aes_keylen = -EINVAL; 347 break; 348 } 349 350 return aes_keylen; 351 } 352 353 static bool 354 crypto_key_is_valid(struct rte_crypto_cipher_xform *xform, 355 struct flexi_crypto_context *fctx) 356 { 357 if (unlikely(xform->key.length > sizeof(fctx->crypto.key))) { 358 NITROX_LOG(ERR, "Invalid crypto key length %d\n", 359 xform->key.length); 360 return false; 361 } 362 363 return true; 364 } 365 366 static int 367 configure_cipher_ctx(struct rte_crypto_cipher_xform *xform, 368 struct nitrox_crypto_ctx *ctx) 369 { 370 enum flexi_cipher type; 371 bool cipher_is_aes = false; 372 int aes_keylen; 373 struct flexi_crypto_context *fctx = &ctx->fctx; 374 375 type = get_flexi_cipher_type(xform->algo, &cipher_is_aes); 376 if (unlikely(type == CIPHER_INVALID)) 377 return -ENOTSUP; 378 379 aes_keylen = flexi_aes_keylen(xform->key.length, cipher_is_aes); 380 if (unlikely(aes_keylen < 0)) 381 return -EINVAL; 382 383 if (unlikely(!cipher_is_aes && !crypto_key_is_valid(xform, fctx))) 384 return -EINVAL; 385 386 if (unlikely(xform->iv.length > MAX_IV_LEN)) 387 return -EINVAL; 388 389 fctx->flags = rte_be_to_cpu_64(fctx->flags); 390 fctx->w0.cipher_type = type; 391 fctx->w0.aes_keylen = aes_keylen; 392 fctx->w0.iv_source = IV_FROM_DPTR; 393 fctx->flags = rte_cpu_to_be_64(fctx->flags); 394 memset(fctx->crypto.key, 0, sizeof(fctx->crypto.key)); 395 memcpy(fctx->crypto.key, xform->key.data, xform->key.length); 396 397 ctx->opcode = FLEXI_CRYPTO_ENCRYPT_HMAC; 398 ctx->req_op = (xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? 399 NITROX_OP_ENCRYPT : NITROX_OP_DECRYPT; 400 ctx->iv.offset = xform->iv.offset; 401 ctx->iv.length = xform->iv.length; 402 return 0; 403 } 404 405 static enum flexi_auth 406 get_flexi_auth_type(enum rte_crypto_auth_algorithm algo) 407 { 408 enum flexi_auth type; 409 410 switch (algo) { 411 case RTE_CRYPTO_AUTH_SHA1_HMAC: 412 type = AUTH_SHA1; 413 break; 414 case RTE_CRYPTO_AUTH_SHA224_HMAC: 415 type = AUTH_SHA2_SHA224; 416 break; 417 case RTE_CRYPTO_AUTH_SHA256_HMAC: 418 type = AUTH_SHA2_SHA256; 419 break; 420 default: 421 NITROX_LOG(ERR, "Algorithm not supported %d\n", algo); 422 type = AUTH_INVALID; 423 break; 424 } 425 426 return type; 427 } 428 429 static bool 430 auth_key_digest_is_valid(struct rte_crypto_auth_xform *xform, 431 struct flexi_crypto_context *fctx) 432 { 433 if (unlikely(!xform->key.data && xform->key.length)) { 434 NITROX_LOG(ERR, "Invalid auth key\n"); 435 return false; 436 } 437 438 if (unlikely(xform->key.length > sizeof(fctx->auth.opad))) { 439 NITROX_LOG(ERR, "Invalid auth key length %d\n", 440 xform->key.length); 441 return false; 442 } 443 444 return true; 445 } 446 447 static int 448 configure_auth_ctx(struct rte_crypto_auth_xform *xform, 449 struct nitrox_crypto_ctx *ctx) 450 { 451 enum flexi_auth type; 452 struct flexi_crypto_context *fctx = &ctx->fctx; 453 454 type = get_flexi_auth_type(xform->algo); 455 if (unlikely(type == AUTH_INVALID)) 456 return -ENOTSUP; 457 458 if (unlikely(!auth_key_digest_is_valid(xform, fctx))) 459 return -EINVAL; 460 461 ctx->auth_op = xform->op; 462 ctx->auth_algo = xform->algo; 463 ctx->digest_length = xform->digest_length; 464 465 fctx->flags = rte_be_to_cpu_64(fctx->flags); 466 fctx->w0.hash_type = type; 467 fctx->w0.auth_input_type = 1; 468 fctx->w0.mac_len = xform->digest_length; 469 fctx->flags = rte_cpu_to_be_64(fctx->flags); 470 memset(&fctx->auth, 0, sizeof(fctx->auth)); 471 memcpy(fctx->auth.opad, xform->key.data, xform->key.length); 472 return 0; 473 } 474 475 static int 476 nitrox_sym_dev_sess_configure(struct rte_cryptodev *cdev, 477 struct rte_crypto_sym_xform *xform, 478 struct rte_cryptodev_sym_session *sess, 479 struct rte_mempool *mempool) 480 { 481 void *mp_obj; 482 struct nitrox_crypto_ctx *ctx; 483 struct rte_crypto_cipher_xform *cipher_xform = NULL; 484 struct rte_crypto_auth_xform *auth_xform = NULL; 485 486 if (rte_mempool_get(mempool, &mp_obj)) { 487 NITROX_LOG(ERR, "Couldn't allocate context\n"); 488 return -ENOMEM; 489 } 490 491 ctx = mp_obj; 492 ctx->nitrox_chain = get_crypto_chain_order(xform); 493 switch (ctx->nitrox_chain) { 494 case NITROX_CHAIN_CIPHER_AUTH: 495 cipher_xform = &xform->cipher; 496 auth_xform = &xform->next->auth; 497 break; 498 case NITROX_CHAIN_AUTH_CIPHER: 499 auth_xform = &xform->auth; 500 cipher_xform = &xform->next->cipher; 501 break; 502 default: 503 NITROX_LOG(ERR, "Crypto chain not supported\n"); 504 goto err; 505 } 506 507 if (cipher_xform && unlikely(configure_cipher_ctx(cipher_xform, ctx))) { 508 NITROX_LOG(ERR, "Failed to configure cipher ctx\n"); 509 goto err; 510 } 511 512 if (auth_xform && unlikely(configure_auth_ctx(auth_xform, ctx))) { 513 NITROX_LOG(ERR, "Failed to configure auth ctx\n"); 514 goto err; 515 } 516 517 ctx->iova = rte_mempool_virt2iova(ctx); 518 set_sym_session_private_data(sess, cdev->driver_id, ctx); 519 return 0; 520 err: 521 rte_mempool_put(mempool, mp_obj); 522 return -EINVAL; 523 } 524 525 static void 526 nitrox_sym_dev_sess_clear(struct rte_cryptodev *cdev, 527 struct rte_cryptodev_sym_session *sess) 528 { 529 struct nitrox_crypto_ctx *ctx = get_sym_session_private_data(sess, 530 cdev->driver_id); 531 struct rte_mempool *sess_mp; 532 533 if (!ctx) 534 return; 535 536 memset(ctx, 0, sizeof(*ctx)); 537 sess_mp = rte_mempool_from_obj(ctx); 538 set_sym_session_private_data(sess, cdev->driver_id, NULL); 539 rte_mempool_put(sess_mp, ctx); 540 } 541 542 static struct nitrox_crypto_ctx * 543 get_crypto_ctx(struct rte_crypto_op *op) 544 { 545 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) { 546 if (likely(op->sym->session)) 547 return get_sym_session_private_data(op->sym->session, 548 nitrox_sym_drv_id); 549 } 550 551 return NULL; 552 } 553 554 static int 555 nitrox_enq_single_op(struct nitrox_qp *qp, struct rte_crypto_op *op) 556 { 557 struct nitrox_crypto_ctx *ctx; 558 struct nitrox_softreq *sr; 559 int err; 560 561 op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; 562 ctx = get_crypto_ctx(op); 563 if (unlikely(!ctx)) { 564 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION; 565 return -EINVAL; 566 } 567 568 if (unlikely(rte_mempool_get(qp->sr_mp, (void **)&sr))) 569 return -ENOMEM; 570 571 err = nitrox_process_se_req(qp->qno, op, ctx, sr); 572 if (unlikely(err)) { 573 rte_mempool_put(qp->sr_mp, sr); 574 op->status = RTE_CRYPTO_OP_STATUS_ERROR; 575 return err; 576 } 577 578 nitrox_qp_enqueue(qp, nitrox_sym_instr_addr(sr), sr); 579 return 0; 580 } 581 582 static uint16_t 583 nitrox_sym_dev_enq_burst(void *queue_pair, struct rte_crypto_op **ops, 584 uint16_t nb_ops) 585 { 586 struct nitrox_qp *qp = queue_pair; 587 uint16_t free_slots = 0; 588 uint16_t cnt = 0; 589 bool err = false; 590 591 free_slots = nitrox_qp_free_count(qp); 592 if (nb_ops > free_slots) 593 nb_ops = free_slots; 594 595 for (cnt = 0; cnt < nb_ops; cnt++) { 596 if (unlikely(nitrox_enq_single_op(qp, ops[cnt]))) { 597 err = true; 598 break; 599 } 600 } 601 602 nitrox_ring_dbell(qp, cnt); 603 qp->stats.enqueued_count += cnt; 604 if (unlikely(err)) 605 qp->stats.enqueue_err_count++; 606 607 return cnt; 608 } 609 610 static int 611 nitrox_deq_single_op(struct nitrox_qp *qp, struct rte_crypto_op **op_ptr) 612 { 613 struct nitrox_softreq *sr; 614 int ret; 615 struct rte_crypto_op *op; 616 617 sr = nitrox_qp_get_softreq(qp); 618 ret = nitrox_check_se_req(sr, op_ptr); 619 if (ret < 0) 620 return -EAGAIN; 621 622 op = *op_ptr; 623 nitrox_qp_dequeue(qp); 624 rte_mempool_put(qp->sr_mp, sr); 625 if (!ret) { 626 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; 627 qp->stats.dequeued_count++; 628 629 return 0; 630 } 631 632 if (ret == MC_MAC_MISMATCH_ERR_CODE) 633 op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED; 634 else 635 op->status = RTE_CRYPTO_OP_STATUS_ERROR; 636 637 qp->stats.dequeue_err_count++; 638 return 0; 639 } 640 641 static uint16_t 642 nitrox_sym_dev_deq_burst(void *queue_pair, struct rte_crypto_op **ops, 643 uint16_t nb_ops) 644 { 645 struct nitrox_qp *qp = queue_pair; 646 uint16_t filled_slots = nitrox_qp_used_count(qp); 647 int cnt = 0; 648 649 if (nb_ops > filled_slots) 650 nb_ops = filled_slots; 651 652 for (cnt = 0; cnt < nb_ops; cnt++) 653 if (nitrox_deq_single_op(qp, &ops[cnt])) 654 break; 655 656 return cnt; 657 } 658 659 static struct rte_cryptodev_ops nitrox_cryptodev_ops = { 660 .dev_configure = nitrox_sym_dev_config, 661 .dev_start = nitrox_sym_dev_start, 662 .dev_stop = nitrox_sym_dev_stop, 663 .dev_close = nitrox_sym_dev_close, 664 .dev_infos_get = nitrox_sym_dev_info_get, 665 .stats_get = nitrox_sym_dev_stats_get, 666 .stats_reset = nitrox_sym_dev_stats_reset, 667 .queue_pair_setup = nitrox_sym_dev_qp_setup, 668 .queue_pair_release = nitrox_sym_dev_qp_release, 669 .sym_session_get_size = nitrox_sym_dev_sess_get_size, 670 .sym_session_configure = nitrox_sym_dev_sess_configure, 671 .sym_session_clear = nitrox_sym_dev_sess_clear 672 }; 673 674 int 675 nitrox_sym_pmd_create(struct nitrox_device *ndev) 676 { 677 char name[RTE_CRYPTODEV_NAME_MAX_LEN]; 678 struct rte_cryptodev_pmd_init_params init_params = { 679 .name = "", 680 .socket_id = ndev->pdev->device.numa_node, 681 .private_data_size = sizeof(struct nitrox_sym_device) 682 }; 683 struct rte_cryptodev *cdev; 684 685 rte_pci_device_name(&ndev->pdev->addr, name, sizeof(name)); 686 snprintf(name + strlen(name), RTE_CRYPTODEV_NAME_MAX_LEN, "_n5sym"); 687 ndev->rte_sym_dev.driver = &nitrox_rte_sym_drv; 688 ndev->rte_sym_dev.numa_node = ndev->pdev->device.numa_node; 689 ndev->rte_sym_dev.devargs = NULL; 690 cdev = rte_cryptodev_pmd_create(name, &ndev->rte_sym_dev, 691 &init_params); 692 if (!cdev) { 693 NITROX_LOG(ERR, "Cryptodev '%s' creation failed\n", name); 694 return -ENODEV; 695 } 696 697 ndev->rte_sym_dev.name = cdev->data->name; 698 cdev->driver_id = nitrox_sym_drv_id; 699 cdev->dev_ops = &nitrox_cryptodev_ops; 700 cdev->enqueue_burst = nitrox_sym_dev_enq_burst; 701 cdev->dequeue_burst = nitrox_sym_dev_deq_burst; 702 cdev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | 703 RTE_CRYPTODEV_FF_HW_ACCELERATED | 704 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | 705 RTE_CRYPTODEV_FF_IN_PLACE_SGL | 706 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT | 707 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT | 708 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT | 709 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT; 710 711 ndev->sym_dev = cdev->data->dev_private; 712 ndev->sym_dev->cdev = cdev; 713 ndev->sym_dev->ndev = ndev; 714 NITROX_LOG(DEBUG, "Created cryptodev '%s', dev_id %d, drv_id %d\n", 715 cdev->data->name, cdev->data->dev_id, nitrox_sym_drv_id); 716 return 0; 717 } 718 719 int 720 nitrox_sym_pmd_destroy(struct nitrox_device *ndev) 721 { 722 return rte_cryptodev_pmd_destroy(ndev->sym_dev->cdev); 723 } 724 725 static struct cryptodev_driver nitrox_crypto_drv; 726 RTE_PMD_REGISTER_CRYPTO_DRIVER(nitrox_crypto_drv, 727 nitrox_rte_sym_drv, 728 nitrox_sym_drv_id); 729