1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2015-2020 Intel Corporation 3 */ 4 5 #include <sys/queue.h> 6 #include <ctype.h> 7 #include <stdio.h> 8 #include <stdlib.h> 9 #include <string.h> 10 #include <errno.h> 11 #include <stdint.h> 12 #include <inttypes.h> 13 14 #include <rte_log.h> 15 #include <rte_debug.h> 16 #include <dev_driver.h> 17 #include <rte_memory.h> 18 #include <rte_memcpy.h> 19 #include <rte_memzone.h> 20 #include <rte_eal.h> 21 #include <rte_common.h> 22 #include <rte_mempool.h> 23 #include <rte_malloc.h> 24 #include <rte_errno.h> 25 #include <rte_spinlock.h> 26 #include <rte_string_fns.h> 27 #include <rte_telemetry.h> 28 29 #include "rte_crypto.h" 30 #include "rte_cryptodev.h" 31 #include "cryptodev_pmd.h" 32 #include "rte_cryptodev_trace.h" 33 34 static uint8_t nb_drivers; 35 36 static struct rte_cryptodev rte_crypto_devices[RTE_CRYPTO_MAX_DEVS]; 37 38 struct rte_cryptodev *rte_cryptodevs = rte_crypto_devices; 39 40 static struct rte_cryptodev_global cryptodev_globals = { 41 .devs = rte_crypto_devices, 42 .data = { NULL }, 43 .nb_devs = 0 44 }; 45 46 /* Public fastpath APIs. */ 47 struct rte_crypto_fp_ops rte_crypto_fp_ops[RTE_CRYPTO_MAX_DEVS]; 48 49 /* spinlock for crypto device callbacks */ 50 static rte_spinlock_t rte_cryptodev_cb_lock = RTE_SPINLOCK_INITIALIZER; 51 52 /** 53 * The user application callback description. 54 * 55 * It contains callback address to be registered by user application, 56 * the pointer to the parameters for callback, and the event type. 57 */ 58 struct rte_cryptodev_callback { 59 TAILQ_ENTRY(rte_cryptodev_callback) next; /**< Callbacks list */ 60 rte_cryptodev_cb_fn cb_fn; /**< Callback address */ 61 void *cb_arg; /**< Parameter for callback */ 62 enum rte_cryptodev_event_type event; /**< Interrupt event type */ 63 uint32_t active; /**< Callback is executing */ 64 }; 65 66 /** 67 * The crypto cipher algorithm strings identifiers. 68 * It could be used in application command line. 69 */ 70 const char * 71 rte_crypto_cipher_algorithm_strings[] = { 72 [RTE_CRYPTO_CIPHER_3DES_CBC] = "3des-cbc", 73 [RTE_CRYPTO_CIPHER_3DES_ECB] = "3des-ecb", 74 [RTE_CRYPTO_CIPHER_3DES_CTR] = "3des-ctr", 75 76 [RTE_CRYPTO_CIPHER_AES_CBC] = "aes-cbc", 77 [RTE_CRYPTO_CIPHER_AES_CTR] = "aes-ctr", 78 [RTE_CRYPTO_CIPHER_AES_DOCSISBPI] = "aes-docsisbpi", 79 [RTE_CRYPTO_CIPHER_AES_ECB] = "aes-ecb", 80 [RTE_CRYPTO_CIPHER_AES_F8] = "aes-f8", 81 [RTE_CRYPTO_CIPHER_AES_XTS] = "aes-xts", 82 83 [RTE_CRYPTO_CIPHER_ARC4] = "arc4", 84 85 [RTE_CRYPTO_CIPHER_DES_CBC] = "des-cbc", 86 [RTE_CRYPTO_CIPHER_DES_DOCSISBPI] = "des-docsisbpi", 87 88 [RTE_CRYPTO_CIPHER_NULL] = "null", 89 90 [RTE_CRYPTO_CIPHER_KASUMI_F8] = "kasumi-f8", 91 [RTE_CRYPTO_CIPHER_SNOW3G_UEA2] = "snow3g-uea2", 92 [RTE_CRYPTO_CIPHER_ZUC_EEA3] = "zuc-eea3", 93 [RTE_CRYPTO_CIPHER_SM4_ECB] = "sm4-ecb", 94 [RTE_CRYPTO_CIPHER_SM4_CBC] = "sm4-cbc", 95 [RTE_CRYPTO_CIPHER_SM4_CTR] = "sm4-ctr" 96 }; 97 98 /** 99 * The crypto cipher operation strings identifiers. 100 * It could be used in application command line. 101 */ 102 const char * 103 rte_crypto_cipher_operation_strings[] = { 104 [RTE_CRYPTO_CIPHER_OP_ENCRYPT] = "encrypt", 105 [RTE_CRYPTO_CIPHER_OP_DECRYPT] = "decrypt" 106 }; 107 108 /** 109 * The crypto auth algorithm strings identifiers. 110 * It could be used in application command line. 111 */ 112 const char * 113 rte_crypto_auth_algorithm_strings[] = { 114 [RTE_CRYPTO_AUTH_AES_CBC_MAC] = "aes-cbc-mac", 115 [RTE_CRYPTO_AUTH_AES_CMAC] = "aes-cmac", 116 [RTE_CRYPTO_AUTH_AES_GMAC] = "aes-gmac", 117 [RTE_CRYPTO_AUTH_AES_XCBC_MAC] = "aes-xcbc-mac", 118 119 [RTE_CRYPTO_AUTH_MD5] = "md5", 120 [RTE_CRYPTO_AUTH_MD5_HMAC] = "md5-hmac", 121 122 [RTE_CRYPTO_AUTH_NULL] = "null", 123 124 [RTE_CRYPTO_AUTH_SHA1] = "sha1", 125 [RTE_CRYPTO_AUTH_SHA1_HMAC] = "sha1-hmac", 126 127 [RTE_CRYPTO_AUTH_SHA224] = "sha2-224", 128 [RTE_CRYPTO_AUTH_SHA224_HMAC] = "sha2-224-hmac", 129 [RTE_CRYPTO_AUTH_SHA256] = "sha2-256", 130 [RTE_CRYPTO_AUTH_SHA256_HMAC] = "sha2-256-hmac", 131 [RTE_CRYPTO_AUTH_SHA384] = "sha2-384", 132 [RTE_CRYPTO_AUTH_SHA384_HMAC] = "sha2-384-hmac", 133 [RTE_CRYPTO_AUTH_SHA512] = "sha2-512", 134 [RTE_CRYPTO_AUTH_SHA512_HMAC] = "sha2-512-hmac", 135 136 [RTE_CRYPTO_AUTH_SHA3_224] = "sha3-224", 137 [RTE_CRYPTO_AUTH_SHA3_224_HMAC] = "sha3-224-hmac", 138 [RTE_CRYPTO_AUTH_SHA3_256] = "sha3-256", 139 [RTE_CRYPTO_AUTH_SHA3_256_HMAC] = "sha3-256-hmac", 140 [RTE_CRYPTO_AUTH_SHA3_384] = "sha3-384", 141 [RTE_CRYPTO_AUTH_SHA3_384_HMAC] = "sha3-384-hmac", 142 [RTE_CRYPTO_AUTH_SHA3_512] = "sha3-512", 143 [RTE_CRYPTO_AUTH_SHA3_512_HMAC] = "sha3-512-hmac", 144 145 [RTE_CRYPTO_AUTH_KASUMI_F9] = "kasumi-f9", 146 [RTE_CRYPTO_AUTH_SNOW3G_UIA2] = "snow3g-uia2", 147 [RTE_CRYPTO_AUTH_ZUC_EIA3] = "zuc-eia3", 148 [RTE_CRYPTO_AUTH_SM3] = "sm3" 149 }; 150 151 /** 152 * The crypto AEAD algorithm strings identifiers. 153 * It could be used in application command line. 154 */ 155 const char * 156 rte_crypto_aead_algorithm_strings[] = { 157 [RTE_CRYPTO_AEAD_AES_CCM] = "aes-ccm", 158 [RTE_CRYPTO_AEAD_AES_GCM] = "aes-gcm", 159 [RTE_CRYPTO_AEAD_CHACHA20_POLY1305] = "chacha20-poly1305" 160 }; 161 162 /** 163 * The crypto AEAD operation strings identifiers. 164 * It could be used in application command line. 165 */ 166 const char * 167 rte_crypto_aead_operation_strings[] = { 168 [RTE_CRYPTO_AEAD_OP_ENCRYPT] = "encrypt", 169 [RTE_CRYPTO_AEAD_OP_DECRYPT] = "decrypt" 170 }; 171 172 /** 173 * Asymmetric crypto transform operation strings identifiers. 174 */ 175 const char *rte_crypto_asym_xform_strings[] = { 176 [RTE_CRYPTO_ASYM_XFORM_NONE] = "none", 177 [RTE_CRYPTO_ASYM_XFORM_RSA] = "rsa", 178 [RTE_CRYPTO_ASYM_XFORM_MODEX] = "modexp", 179 [RTE_CRYPTO_ASYM_XFORM_MODINV] = "modinv", 180 [RTE_CRYPTO_ASYM_XFORM_DH] = "dh", 181 [RTE_CRYPTO_ASYM_XFORM_DSA] = "dsa", 182 [RTE_CRYPTO_ASYM_XFORM_ECDSA] = "ecdsa", 183 [RTE_CRYPTO_ASYM_XFORM_ECPM] = "ecpm", 184 }; 185 186 /** 187 * Asymmetric crypto operation strings identifiers. 188 */ 189 const char *rte_crypto_asym_op_strings[] = { 190 [RTE_CRYPTO_ASYM_OP_ENCRYPT] = "encrypt", 191 [RTE_CRYPTO_ASYM_OP_DECRYPT] = "decrypt", 192 [RTE_CRYPTO_ASYM_OP_SIGN] = "sign", 193 [RTE_CRYPTO_ASYM_OP_VERIFY] = "verify" 194 }; 195 196 /** 197 * Asymmetric crypto key exchange operation strings identifiers. 198 */ 199 const char *rte_crypto_asym_ke_strings[] = { 200 [RTE_CRYPTO_ASYM_KE_PRIV_KEY_GENERATE] = "priv_key_generate", 201 [RTE_CRYPTO_ASYM_KE_PUB_KEY_GENERATE] = "pub_key_generate", 202 [RTE_CRYPTO_ASYM_KE_SHARED_SECRET_COMPUTE] = "sharedsecret_compute", 203 [RTE_CRYPTO_ASYM_KE_PUB_KEY_VERIFY] = "pub_ec_key_verify" 204 }; 205 206 struct rte_cryptodev_sym_session_pool_private_data { 207 uint16_t sess_data_sz; 208 /**< driver session data size */ 209 uint16_t user_data_sz; 210 /**< session user data will be placed after sess_data */ 211 }; 212 213 /** 214 * The private data structure stored in the asym session mempool private data. 215 */ 216 struct rte_cryptodev_asym_session_pool_private_data { 217 uint16_t max_priv_session_sz; 218 /**< Size of private session data used when creating mempool */ 219 uint16_t user_data_sz; 220 /**< Session user data will be placed after sess_private_data */ 221 }; 222 223 int 224 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum, 225 const char *algo_string) 226 { 227 unsigned int i; 228 int ret = -1; /* Invalid string */ 229 230 for (i = 1; i < RTE_DIM(rte_crypto_cipher_algorithm_strings); i++) { 231 if (strcmp(algo_string, rte_crypto_cipher_algorithm_strings[i]) == 0) { 232 *algo_enum = (enum rte_crypto_cipher_algorithm) i; 233 ret = 0; 234 break; 235 } 236 } 237 238 rte_cryptodev_trace_get_cipher_algo_enum(algo_string, *algo_enum, ret); 239 240 return ret; 241 } 242 243 int 244 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum, 245 const char *algo_string) 246 { 247 unsigned int i; 248 int ret = -1; /* Invalid string */ 249 250 for (i = 1; i < RTE_DIM(rte_crypto_auth_algorithm_strings); i++) { 251 if (strcmp(algo_string, rte_crypto_auth_algorithm_strings[i]) == 0) { 252 *algo_enum = (enum rte_crypto_auth_algorithm) i; 253 ret = 0; 254 break; 255 } 256 } 257 258 rte_cryptodev_trace_get_auth_algo_enum(algo_string, *algo_enum, ret); 259 260 return ret; 261 } 262 263 int 264 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum, 265 const char *algo_string) 266 { 267 unsigned int i; 268 int ret = -1; /* Invalid string */ 269 270 for (i = 1; i < RTE_DIM(rte_crypto_aead_algorithm_strings); i++) { 271 if (strcmp(algo_string, rte_crypto_aead_algorithm_strings[i]) == 0) { 272 *algo_enum = (enum rte_crypto_aead_algorithm) i; 273 ret = 0; 274 break; 275 } 276 } 277 278 rte_cryptodev_trace_get_aead_algo_enum(algo_string, *algo_enum, ret); 279 280 return ret; 281 } 282 283 int 284 rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum, 285 const char *xform_string) 286 { 287 unsigned int i; 288 int ret = -1; /* Invalid string */ 289 290 for (i = 1; i < RTE_DIM(rte_crypto_asym_xform_strings); i++) { 291 if (strcmp(xform_string, 292 rte_crypto_asym_xform_strings[i]) == 0) { 293 *xform_enum = (enum rte_crypto_asym_xform_type) i; 294 ret = 0; 295 break; 296 } 297 } 298 299 rte_cryptodev_trace_asym_get_xform_enum(xform_string, *xform_enum, ret); 300 301 return ret; 302 } 303 304 /** 305 * The crypto auth operation strings identifiers. 306 * It could be used in application command line. 307 */ 308 const char * 309 rte_crypto_auth_operation_strings[] = { 310 [RTE_CRYPTO_AUTH_OP_VERIFY] = "verify", 311 [RTE_CRYPTO_AUTH_OP_GENERATE] = "generate" 312 }; 313 314 const struct rte_cryptodev_symmetric_capability * 315 rte_cryptodev_sym_capability_get(uint8_t dev_id, 316 const struct rte_cryptodev_sym_capability_idx *idx) 317 { 318 const struct rte_cryptodev_capabilities *capability; 319 const struct rte_cryptodev_symmetric_capability *sym_capability = NULL; 320 struct rte_cryptodev_info dev_info; 321 int i = 0; 322 323 rte_cryptodev_info_get(dev_id, &dev_info); 324 325 while ((capability = &dev_info.capabilities[i++])->op != 326 RTE_CRYPTO_OP_TYPE_UNDEFINED) { 327 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC) 328 continue; 329 330 if (capability->sym.xform_type != idx->type) 331 continue; 332 333 if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH && 334 capability->sym.auth.algo == idx->algo.auth) { 335 sym_capability = &capability->sym; 336 break; 337 } 338 339 if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER && 340 capability->sym.cipher.algo == idx->algo.cipher) { 341 sym_capability = &capability->sym; 342 break; 343 } 344 345 if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD && 346 capability->sym.aead.algo == idx->algo.aead) { 347 sym_capability = &capability->sym; 348 break; 349 } 350 } 351 352 rte_cryptodev_trace_sym_capability_get(dev_id, dev_info.driver_name, 353 dev_info.driver_id, idx->type, sym_capability); 354 355 return sym_capability; 356 } 357 358 static int 359 param_range_check(uint16_t size, const struct rte_crypto_param_range *range) 360 { 361 unsigned int next_size; 362 363 /* Check lower/upper bounds */ 364 if (size < range->min) 365 return -1; 366 367 if (size > range->max) 368 return -1; 369 370 /* If range is actually only one value, size is correct */ 371 if (range->increment == 0) 372 return 0; 373 374 /* Check if value is one of the supported sizes */ 375 for (next_size = range->min; next_size <= range->max; 376 next_size += range->increment) 377 if (size == next_size) 378 return 0; 379 380 return -1; 381 } 382 383 const struct rte_cryptodev_asymmetric_xform_capability * 384 rte_cryptodev_asym_capability_get(uint8_t dev_id, 385 const struct rte_cryptodev_asym_capability_idx *idx) 386 { 387 const struct rte_cryptodev_capabilities *capability; 388 const struct rte_cryptodev_asymmetric_xform_capability *asym_cap = NULL; 389 struct rte_cryptodev_info dev_info; 390 unsigned int i = 0; 391 392 memset(&dev_info, 0, sizeof(struct rte_cryptodev_info)); 393 rte_cryptodev_info_get(dev_id, &dev_info); 394 395 while ((capability = &dev_info.capabilities[i++])->op != 396 RTE_CRYPTO_OP_TYPE_UNDEFINED) { 397 if (capability->op != RTE_CRYPTO_OP_TYPE_ASYMMETRIC) 398 continue; 399 400 if (capability->asym.xform_capa.xform_type == idx->type) { 401 asym_cap = &capability->asym.xform_capa; 402 break; 403 } 404 } 405 406 rte_cryptodev_trace_asym_capability_get(dev_info.driver_name, 407 dev_info.driver_id, idx->type, asym_cap); 408 409 return asym_cap; 410 }; 411 412 int 413 rte_cryptodev_sym_capability_check_cipher( 414 const struct rte_cryptodev_symmetric_capability *capability, 415 uint16_t key_size, uint16_t iv_size) 416 { 417 int ret = 0; /* success */ 418 419 if (param_range_check(key_size, &capability->cipher.key_size) != 0) { 420 ret = -1; 421 goto done; 422 } 423 424 if (param_range_check(iv_size, &capability->cipher.iv_size) != 0) 425 ret = -1; 426 427 done: 428 rte_cryptodev_trace_sym_capability_check_cipher(capability, key_size, 429 iv_size, ret); 430 431 return ret; 432 } 433 434 int 435 rte_cryptodev_sym_capability_check_auth( 436 const struct rte_cryptodev_symmetric_capability *capability, 437 uint16_t key_size, uint16_t digest_size, uint16_t iv_size) 438 { 439 int ret = 0; /* success */ 440 441 if (param_range_check(key_size, &capability->auth.key_size) != 0) { 442 ret = -1; 443 goto done; 444 } 445 446 if (param_range_check(digest_size, 447 &capability->auth.digest_size) != 0) { 448 ret = -1; 449 goto done; 450 } 451 452 if (param_range_check(iv_size, &capability->auth.iv_size) != 0) 453 ret = -1; 454 455 done: 456 rte_cryptodev_trace_sym_capability_check_auth(capability, key_size, 457 digest_size, iv_size, ret); 458 459 return ret; 460 } 461 462 int 463 rte_cryptodev_sym_capability_check_aead( 464 const struct rte_cryptodev_symmetric_capability *capability, 465 uint16_t key_size, uint16_t digest_size, uint16_t aad_size, 466 uint16_t iv_size) 467 { 468 int ret = 0; /* success */ 469 470 if (param_range_check(key_size, &capability->aead.key_size) != 0) { 471 ret = -1; 472 goto done; 473 } 474 475 if (param_range_check(digest_size, 476 &capability->aead.digest_size) != 0) { 477 ret = -1; 478 goto done; 479 } 480 481 if (param_range_check(aad_size, &capability->aead.aad_size) != 0) { 482 ret = -1; 483 goto done; 484 } 485 486 if (param_range_check(iv_size, &capability->aead.iv_size) != 0) 487 ret = -1; 488 489 done: 490 rte_cryptodev_trace_sym_capability_check_aead(capability, key_size, 491 digest_size, aad_size, iv_size, ret); 492 493 return ret; 494 } 495 496 int 497 rte_cryptodev_asym_xform_capability_check_optype( 498 const struct rte_cryptodev_asymmetric_xform_capability *capability, 499 enum rte_crypto_asym_op_type op_type) 500 { 501 int ret = 0; 502 503 if (capability->op_types & (1 << op_type)) 504 ret = 1; 505 506 rte_cryptodev_trace_asym_xform_capability_check_optype( 507 capability->op_types, op_type, ret); 508 509 return ret; 510 } 511 512 int 513 rte_cryptodev_asym_xform_capability_check_modlen( 514 const struct rte_cryptodev_asymmetric_xform_capability *capability, 515 uint16_t modlen) 516 { 517 int ret = 0; /* success */ 518 519 /* no need to check for limits, if min or max = 0 */ 520 if (capability->modlen.min != 0) { 521 if (modlen < capability->modlen.min) { 522 ret = -1; 523 goto done; 524 } 525 } 526 527 if (capability->modlen.max != 0) { 528 if (modlen > capability->modlen.max) { 529 ret = -1; 530 goto done; 531 } 532 } 533 534 /* in any case, check if given modlen is module increment */ 535 if (capability->modlen.increment != 0) { 536 if (modlen % (capability->modlen.increment)) 537 ret = -1; 538 } 539 540 done: 541 rte_cryptodev_trace_asym_xform_capability_check_modlen(capability, 542 modlen, ret); 543 544 return ret; 545 } 546 547 /* spinlock for crypto device enq callbacks */ 548 static rte_spinlock_t rte_cryptodev_callback_lock = RTE_SPINLOCK_INITIALIZER; 549 550 static void 551 cryptodev_cb_cleanup(struct rte_cryptodev *dev) 552 { 553 struct rte_cryptodev_cb_rcu *list; 554 struct rte_cryptodev_cb *cb, *next; 555 uint16_t qp_id; 556 557 if (dev->enq_cbs == NULL && dev->deq_cbs == NULL) 558 return; 559 560 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { 561 list = &dev->enq_cbs[qp_id]; 562 cb = list->next; 563 while (cb != NULL) { 564 next = cb->next; 565 rte_free(cb); 566 cb = next; 567 } 568 569 rte_free(list->qsbr); 570 } 571 572 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { 573 list = &dev->deq_cbs[qp_id]; 574 cb = list->next; 575 while (cb != NULL) { 576 next = cb->next; 577 rte_free(cb); 578 cb = next; 579 } 580 581 rte_free(list->qsbr); 582 } 583 584 rte_free(dev->enq_cbs); 585 dev->enq_cbs = NULL; 586 rte_free(dev->deq_cbs); 587 dev->deq_cbs = NULL; 588 } 589 590 static int 591 cryptodev_cb_init(struct rte_cryptodev *dev) 592 { 593 struct rte_cryptodev_cb_rcu *list; 594 struct rte_rcu_qsbr *qsbr; 595 uint16_t qp_id; 596 size_t size; 597 598 /* Max thread set to 1, as one DP thread accessing a queue-pair */ 599 const uint32_t max_threads = 1; 600 601 dev->enq_cbs = rte_zmalloc(NULL, 602 sizeof(struct rte_cryptodev_cb_rcu) * 603 dev->data->nb_queue_pairs, 0); 604 if (dev->enq_cbs == NULL) { 605 CDEV_LOG_ERR("Failed to allocate memory for enq callbacks"); 606 return -ENOMEM; 607 } 608 609 dev->deq_cbs = rte_zmalloc(NULL, 610 sizeof(struct rte_cryptodev_cb_rcu) * 611 dev->data->nb_queue_pairs, 0); 612 if (dev->deq_cbs == NULL) { 613 CDEV_LOG_ERR("Failed to allocate memory for deq callbacks"); 614 rte_free(dev->enq_cbs); 615 return -ENOMEM; 616 } 617 618 /* Create RCU QSBR variable */ 619 size = rte_rcu_qsbr_get_memsize(max_threads); 620 621 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { 622 list = &dev->enq_cbs[qp_id]; 623 qsbr = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE); 624 if (qsbr == NULL) { 625 CDEV_LOG_ERR("Failed to allocate memory for RCU on " 626 "queue_pair_id=%d", qp_id); 627 goto cb_init_err; 628 } 629 630 if (rte_rcu_qsbr_init(qsbr, max_threads)) { 631 CDEV_LOG_ERR("Failed to initialize for RCU on " 632 "queue_pair_id=%d", qp_id); 633 goto cb_init_err; 634 } 635 636 list->qsbr = qsbr; 637 } 638 639 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { 640 list = &dev->deq_cbs[qp_id]; 641 qsbr = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE); 642 if (qsbr == NULL) { 643 CDEV_LOG_ERR("Failed to allocate memory for RCU on " 644 "queue_pair_id=%d", qp_id); 645 goto cb_init_err; 646 } 647 648 if (rte_rcu_qsbr_init(qsbr, max_threads)) { 649 CDEV_LOG_ERR("Failed to initialize for RCU on " 650 "queue_pair_id=%d", qp_id); 651 goto cb_init_err; 652 } 653 654 list->qsbr = qsbr; 655 } 656 657 return 0; 658 659 cb_init_err: 660 cryptodev_cb_cleanup(dev); 661 return -ENOMEM; 662 } 663 664 const char * 665 rte_cryptodev_get_feature_name(uint64_t flag) 666 { 667 rte_cryptodev_trace_get_feature_name(flag); 668 669 switch (flag) { 670 case RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO: 671 return "SYMMETRIC_CRYPTO"; 672 case RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO: 673 return "ASYMMETRIC_CRYPTO"; 674 case RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING: 675 return "SYM_OPERATION_CHAINING"; 676 case RTE_CRYPTODEV_FF_CPU_SSE: 677 return "CPU_SSE"; 678 case RTE_CRYPTODEV_FF_CPU_AVX: 679 return "CPU_AVX"; 680 case RTE_CRYPTODEV_FF_CPU_AVX2: 681 return "CPU_AVX2"; 682 case RTE_CRYPTODEV_FF_CPU_AVX512: 683 return "CPU_AVX512"; 684 case RTE_CRYPTODEV_FF_CPU_AESNI: 685 return "CPU_AESNI"; 686 case RTE_CRYPTODEV_FF_HW_ACCELERATED: 687 return "HW_ACCELERATED"; 688 case RTE_CRYPTODEV_FF_IN_PLACE_SGL: 689 return "IN_PLACE_SGL"; 690 case RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT: 691 return "OOP_SGL_IN_SGL_OUT"; 692 case RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT: 693 return "OOP_SGL_IN_LB_OUT"; 694 case RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT: 695 return "OOP_LB_IN_SGL_OUT"; 696 case RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT: 697 return "OOP_LB_IN_LB_OUT"; 698 case RTE_CRYPTODEV_FF_CPU_NEON: 699 return "CPU_NEON"; 700 case RTE_CRYPTODEV_FF_CPU_ARM_CE: 701 return "CPU_ARM_CE"; 702 case RTE_CRYPTODEV_FF_SECURITY: 703 return "SECURITY_PROTOCOL"; 704 case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP: 705 return "RSA_PRIV_OP_KEY_EXP"; 706 case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT: 707 return "RSA_PRIV_OP_KEY_QT"; 708 case RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED: 709 return "DIGEST_ENCRYPTED"; 710 case RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO: 711 return "SYM_CPU_CRYPTO"; 712 case RTE_CRYPTODEV_FF_ASYM_SESSIONLESS: 713 return "ASYM_SESSIONLESS"; 714 case RTE_CRYPTODEV_FF_SYM_SESSIONLESS: 715 return "SYM_SESSIONLESS"; 716 case RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA: 717 return "NON_BYTE_ALIGNED_DATA"; 718 case RTE_CRYPTODEV_FF_CIPHER_MULTIPLE_DATA_UNITS: 719 return "CIPHER_MULTIPLE_DATA_UNITS"; 720 case RTE_CRYPTODEV_FF_CIPHER_WRAPPED_KEY: 721 return "CIPHER_WRAPPED_KEY"; 722 default: 723 return NULL; 724 } 725 } 726 727 struct rte_cryptodev * 728 rte_cryptodev_pmd_get_dev(uint8_t dev_id) 729 { 730 return &cryptodev_globals.devs[dev_id]; 731 } 732 733 struct rte_cryptodev * 734 rte_cryptodev_pmd_get_named_dev(const char *name) 735 { 736 struct rte_cryptodev *dev; 737 unsigned int i; 738 739 if (name == NULL) 740 return NULL; 741 742 for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) { 743 dev = &cryptodev_globals.devs[i]; 744 745 if ((dev->attached == RTE_CRYPTODEV_ATTACHED) && 746 (strcmp(dev->data->name, name) == 0)) 747 return dev; 748 } 749 750 return NULL; 751 } 752 753 static inline uint8_t 754 rte_cryptodev_is_valid_device_data(uint8_t dev_id) 755 { 756 if (dev_id >= RTE_CRYPTO_MAX_DEVS || 757 rte_crypto_devices[dev_id].data == NULL) 758 return 0; 759 760 return 1; 761 } 762 763 unsigned int 764 rte_cryptodev_is_valid_dev(uint8_t dev_id) 765 { 766 struct rte_cryptodev *dev = NULL; 767 unsigned int ret = 1; 768 769 if (!rte_cryptodev_is_valid_device_data(dev_id)) { 770 ret = 0; 771 goto done; 772 } 773 774 dev = rte_cryptodev_pmd_get_dev(dev_id); 775 if (dev->attached != RTE_CRYPTODEV_ATTACHED) 776 ret = 0; 777 778 done: 779 rte_cryptodev_trace_is_valid_dev(dev_id, ret); 780 781 return ret; 782 } 783 784 int 785 rte_cryptodev_get_dev_id(const char *name) 786 { 787 unsigned i; 788 int ret = -1; 789 790 if (name == NULL) 791 return -1; 792 793 for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) { 794 if (!rte_cryptodev_is_valid_device_data(i)) 795 continue; 796 if ((strcmp(cryptodev_globals.devs[i].data->name, name) 797 == 0) && 798 (cryptodev_globals.devs[i].attached == 799 RTE_CRYPTODEV_ATTACHED)) { 800 ret = (int)i; 801 break; 802 } 803 } 804 805 rte_cryptodev_trace_get_dev_id(name, ret); 806 807 return ret; 808 } 809 810 uint8_t 811 rte_cryptodev_count(void) 812 { 813 rte_cryptodev_trace_count(cryptodev_globals.nb_devs); 814 815 return cryptodev_globals.nb_devs; 816 } 817 818 uint8_t 819 rte_cryptodev_device_count_by_driver(uint8_t driver_id) 820 { 821 uint8_t i, dev_count = 0; 822 823 for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) 824 if (cryptodev_globals.devs[i].driver_id == driver_id && 825 cryptodev_globals.devs[i].attached == 826 RTE_CRYPTODEV_ATTACHED) 827 dev_count++; 828 829 rte_cryptodev_trace_device_count_by_driver(driver_id, dev_count); 830 831 return dev_count; 832 } 833 834 uint8_t 835 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices, 836 uint8_t nb_devices) 837 { 838 uint8_t i, count = 0; 839 struct rte_cryptodev *devs = cryptodev_globals.devs; 840 841 for (i = 0; i < RTE_CRYPTO_MAX_DEVS && count < nb_devices; i++) { 842 if (!rte_cryptodev_is_valid_device_data(i)) 843 continue; 844 845 if (devs[i].attached == RTE_CRYPTODEV_ATTACHED) { 846 int cmp; 847 848 cmp = strncmp(devs[i].device->driver->name, 849 driver_name, 850 strlen(driver_name) + 1); 851 852 if (cmp == 0) 853 devices[count++] = devs[i].data->dev_id; 854 } 855 } 856 857 rte_cryptodev_trace_devices_get(driver_name, count); 858 859 return count; 860 } 861 862 void * 863 rte_cryptodev_get_sec_ctx(uint8_t dev_id) 864 { 865 void *sec_ctx = NULL; 866 867 if (dev_id < RTE_CRYPTO_MAX_DEVS && 868 (rte_crypto_devices[dev_id].feature_flags & 869 RTE_CRYPTODEV_FF_SECURITY)) 870 sec_ctx = rte_crypto_devices[dev_id].security_ctx; 871 872 rte_cryptodev_trace_get_sec_ctx(dev_id, sec_ctx); 873 874 return sec_ctx; 875 } 876 877 int 878 rte_cryptodev_socket_id(uint8_t dev_id) 879 { 880 struct rte_cryptodev *dev; 881 882 if (!rte_cryptodev_is_valid_dev(dev_id)) 883 return -1; 884 885 dev = rte_cryptodev_pmd_get_dev(dev_id); 886 887 rte_cryptodev_trace_socket_id(dev_id, dev->data->name, 888 dev->data->socket_id); 889 return dev->data->socket_id; 890 } 891 892 static inline int 893 rte_cryptodev_data_alloc(uint8_t dev_id, struct rte_cryptodev_data **data, 894 int socket_id) 895 { 896 char mz_name[RTE_MEMZONE_NAMESIZE]; 897 const struct rte_memzone *mz; 898 int n; 899 900 /* generate memzone name */ 901 n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id); 902 if (n >= (int)sizeof(mz_name)) 903 return -EINVAL; 904 905 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 906 mz = rte_memzone_reserve(mz_name, 907 sizeof(struct rte_cryptodev_data), 908 socket_id, 0); 909 CDEV_LOG_DEBUG("PRIMARY:reserved memzone for %s (%p)", 910 mz_name, mz); 911 } else { 912 mz = rte_memzone_lookup(mz_name); 913 CDEV_LOG_DEBUG("SECONDARY:looked up memzone for %s (%p)", 914 mz_name, mz); 915 } 916 917 if (mz == NULL) 918 return -ENOMEM; 919 920 *data = mz->addr; 921 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 922 memset(*data, 0, sizeof(struct rte_cryptodev_data)); 923 924 return 0; 925 } 926 927 static inline int 928 rte_cryptodev_data_free(uint8_t dev_id, struct rte_cryptodev_data **data) 929 { 930 char mz_name[RTE_MEMZONE_NAMESIZE]; 931 const struct rte_memzone *mz; 932 int n; 933 934 /* generate memzone name */ 935 n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id); 936 if (n >= (int)sizeof(mz_name)) 937 return -EINVAL; 938 939 mz = rte_memzone_lookup(mz_name); 940 if (mz == NULL) 941 return -ENOMEM; 942 943 RTE_ASSERT(*data == mz->addr); 944 *data = NULL; 945 946 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 947 CDEV_LOG_DEBUG("PRIMARY:free memzone of %s (%p)", 948 mz_name, mz); 949 return rte_memzone_free(mz); 950 } else { 951 CDEV_LOG_DEBUG("SECONDARY:don't free memzone of %s (%p)", 952 mz_name, mz); 953 } 954 955 return 0; 956 } 957 958 static uint8_t 959 rte_cryptodev_find_free_device_index(void) 960 { 961 uint8_t dev_id; 962 963 for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++) { 964 if (rte_crypto_devices[dev_id].attached == 965 RTE_CRYPTODEV_DETACHED) 966 return dev_id; 967 } 968 return RTE_CRYPTO_MAX_DEVS; 969 } 970 971 struct rte_cryptodev * 972 rte_cryptodev_pmd_allocate(const char *name, int socket_id) 973 { 974 struct rte_cryptodev *cryptodev; 975 uint8_t dev_id; 976 977 if (rte_cryptodev_pmd_get_named_dev(name) != NULL) { 978 CDEV_LOG_ERR("Crypto device with name %s already " 979 "allocated!", name); 980 return NULL; 981 } 982 983 dev_id = rte_cryptodev_find_free_device_index(); 984 if (dev_id == RTE_CRYPTO_MAX_DEVS) { 985 CDEV_LOG_ERR("Reached maximum number of crypto devices"); 986 return NULL; 987 } 988 989 cryptodev = rte_cryptodev_pmd_get_dev(dev_id); 990 991 if (cryptodev->data == NULL) { 992 struct rte_cryptodev_data **cryptodev_data = 993 &cryptodev_globals.data[dev_id]; 994 995 int retval = rte_cryptodev_data_alloc(dev_id, cryptodev_data, 996 socket_id); 997 998 if (retval < 0 || *cryptodev_data == NULL) 999 return NULL; 1000 1001 cryptodev->data = *cryptodev_data; 1002 1003 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 1004 strlcpy(cryptodev->data->name, name, 1005 RTE_CRYPTODEV_NAME_MAX_LEN); 1006 1007 cryptodev->data->dev_id = dev_id; 1008 cryptodev->data->socket_id = socket_id; 1009 cryptodev->data->dev_started = 0; 1010 CDEV_LOG_DEBUG("PRIMARY:init data"); 1011 } 1012 1013 CDEV_LOG_DEBUG("Data for %s: dev_id %d, socket %d, started %d", 1014 cryptodev->data->name, 1015 cryptodev->data->dev_id, 1016 cryptodev->data->socket_id, 1017 cryptodev->data->dev_started); 1018 1019 /* init user callbacks */ 1020 TAILQ_INIT(&(cryptodev->link_intr_cbs)); 1021 1022 cryptodev->attached = RTE_CRYPTODEV_ATTACHED; 1023 1024 cryptodev_globals.nb_devs++; 1025 } 1026 1027 return cryptodev; 1028 } 1029 1030 int 1031 rte_cryptodev_pmd_release_device(struct rte_cryptodev *cryptodev) 1032 { 1033 int ret; 1034 uint8_t dev_id; 1035 1036 if (cryptodev == NULL) 1037 return -EINVAL; 1038 1039 dev_id = cryptodev->data->dev_id; 1040 1041 cryptodev_fp_ops_reset(rte_crypto_fp_ops + dev_id); 1042 1043 /* Close device only if device operations have been set */ 1044 if (cryptodev->dev_ops) { 1045 ret = rte_cryptodev_close(dev_id); 1046 if (ret < 0) 1047 return ret; 1048 } 1049 1050 ret = rte_cryptodev_data_free(dev_id, &cryptodev_globals.data[dev_id]); 1051 if (ret < 0) 1052 return ret; 1053 1054 cryptodev->attached = RTE_CRYPTODEV_DETACHED; 1055 cryptodev_globals.nb_devs--; 1056 return 0; 1057 } 1058 1059 uint16_t 1060 rte_cryptodev_queue_pair_count(uint8_t dev_id) 1061 { 1062 struct rte_cryptodev *dev; 1063 1064 if (!rte_cryptodev_is_valid_device_data(dev_id)) { 1065 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 1066 return 0; 1067 } 1068 1069 dev = &rte_crypto_devices[dev_id]; 1070 rte_cryptodev_trace_queue_pair_count(dev, dev->data->name, 1071 dev->data->socket_id, dev->data->dev_id, 1072 dev->data->nb_queue_pairs); 1073 1074 return dev->data->nb_queue_pairs; 1075 } 1076 1077 static int 1078 rte_cryptodev_queue_pairs_config(struct rte_cryptodev *dev, uint16_t nb_qpairs, 1079 int socket_id) 1080 { 1081 struct rte_cryptodev_info dev_info; 1082 void **qp; 1083 unsigned i; 1084 1085 if ((dev == NULL) || (nb_qpairs < 1)) { 1086 CDEV_LOG_ERR("invalid param: dev %p, nb_queues %u", 1087 dev, nb_qpairs); 1088 return -EINVAL; 1089 } 1090 1091 CDEV_LOG_DEBUG("Setup %d queues pairs on device %u", 1092 nb_qpairs, dev->data->dev_id); 1093 1094 memset(&dev_info, 0, sizeof(struct rte_cryptodev_info)); 1095 1096 if (*dev->dev_ops->dev_infos_get == NULL) 1097 return -ENOTSUP; 1098 (*dev->dev_ops->dev_infos_get)(dev, &dev_info); 1099 1100 if (nb_qpairs > (dev_info.max_nb_queue_pairs)) { 1101 CDEV_LOG_ERR("Invalid num queue_pairs (%u) for dev %u", 1102 nb_qpairs, dev->data->dev_id); 1103 return -EINVAL; 1104 } 1105 1106 if (dev->data->queue_pairs == NULL) { /* first time configuration */ 1107 dev->data->queue_pairs = rte_zmalloc_socket( 1108 "cryptodev->queue_pairs", 1109 sizeof(dev->data->queue_pairs[0]) * 1110 dev_info.max_nb_queue_pairs, 1111 RTE_CACHE_LINE_SIZE, socket_id); 1112 1113 if (dev->data->queue_pairs == NULL) { 1114 dev->data->nb_queue_pairs = 0; 1115 CDEV_LOG_ERR("failed to get memory for qp meta data, " 1116 "nb_queues %u", 1117 nb_qpairs); 1118 return -(ENOMEM); 1119 } 1120 } else { /* re-configure */ 1121 int ret; 1122 uint16_t old_nb_queues = dev->data->nb_queue_pairs; 1123 1124 qp = dev->data->queue_pairs; 1125 1126 if (*dev->dev_ops->queue_pair_release == NULL) 1127 return -ENOTSUP; 1128 1129 for (i = nb_qpairs; i < old_nb_queues; i++) { 1130 ret = (*dev->dev_ops->queue_pair_release)(dev, i); 1131 if (ret < 0) 1132 return ret; 1133 qp[i] = NULL; 1134 } 1135 1136 } 1137 dev->data->nb_queue_pairs = nb_qpairs; 1138 return 0; 1139 } 1140 1141 int 1142 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config) 1143 { 1144 struct rte_cryptodev *dev; 1145 int diag; 1146 1147 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1148 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 1149 return -EINVAL; 1150 } 1151 1152 dev = &rte_crypto_devices[dev_id]; 1153 1154 if (dev->data->dev_started) { 1155 CDEV_LOG_ERR( 1156 "device %d must be stopped to allow configuration", dev_id); 1157 return -EBUSY; 1158 } 1159 1160 if (*dev->dev_ops->dev_configure == NULL) 1161 return -ENOTSUP; 1162 1163 rte_spinlock_lock(&rte_cryptodev_callback_lock); 1164 cryptodev_cb_cleanup(dev); 1165 rte_spinlock_unlock(&rte_cryptodev_callback_lock); 1166 1167 /* Setup new number of queue pairs and reconfigure device. */ 1168 diag = rte_cryptodev_queue_pairs_config(dev, config->nb_queue_pairs, 1169 config->socket_id); 1170 if (diag != 0) { 1171 CDEV_LOG_ERR("dev%d rte_crypto_dev_queue_pairs_config = %d", 1172 dev_id, diag); 1173 return diag; 1174 } 1175 1176 rte_spinlock_lock(&rte_cryptodev_callback_lock); 1177 diag = cryptodev_cb_init(dev); 1178 rte_spinlock_unlock(&rte_cryptodev_callback_lock); 1179 if (diag) { 1180 CDEV_LOG_ERR("Callback init failed for dev_id=%d", dev_id); 1181 return diag; 1182 } 1183 1184 rte_cryptodev_trace_configure(dev_id, config); 1185 return (*dev->dev_ops->dev_configure)(dev, config); 1186 } 1187 1188 int 1189 rte_cryptodev_start(uint8_t dev_id) 1190 { 1191 struct rte_cryptodev *dev; 1192 int diag; 1193 1194 CDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id); 1195 1196 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1197 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 1198 return -EINVAL; 1199 } 1200 1201 dev = &rte_crypto_devices[dev_id]; 1202 1203 if (*dev->dev_ops->dev_start == NULL) 1204 return -ENOTSUP; 1205 1206 if (dev->data->dev_started != 0) { 1207 CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already started", 1208 dev_id); 1209 return 0; 1210 } 1211 1212 diag = (*dev->dev_ops->dev_start)(dev); 1213 /* expose selection of PMD fast-path functions */ 1214 cryptodev_fp_ops_set(rte_crypto_fp_ops + dev_id, dev); 1215 1216 rte_cryptodev_trace_start(dev_id, diag); 1217 if (diag == 0) 1218 dev->data->dev_started = 1; 1219 else 1220 return diag; 1221 1222 return 0; 1223 } 1224 1225 void 1226 rte_cryptodev_stop(uint8_t dev_id) 1227 { 1228 struct rte_cryptodev *dev; 1229 1230 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1231 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 1232 return; 1233 } 1234 1235 dev = &rte_crypto_devices[dev_id]; 1236 1237 if (*dev->dev_ops->dev_stop == NULL) 1238 return; 1239 1240 if (dev->data->dev_started == 0) { 1241 CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already stopped", 1242 dev_id); 1243 return; 1244 } 1245 1246 /* point fast-path functions to dummy ones */ 1247 cryptodev_fp_ops_reset(rte_crypto_fp_ops + dev_id); 1248 1249 (*dev->dev_ops->dev_stop)(dev); 1250 rte_cryptodev_trace_stop(dev_id); 1251 dev->data->dev_started = 0; 1252 } 1253 1254 int 1255 rte_cryptodev_close(uint8_t dev_id) 1256 { 1257 struct rte_cryptodev *dev; 1258 int retval; 1259 1260 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1261 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 1262 return -1; 1263 } 1264 1265 dev = &rte_crypto_devices[dev_id]; 1266 1267 /* Device must be stopped before it can be closed */ 1268 if (dev->data->dev_started == 1) { 1269 CDEV_LOG_ERR("Device %u must be stopped before closing", 1270 dev_id); 1271 return -EBUSY; 1272 } 1273 1274 /* We can't close the device if there are outstanding sessions in use */ 1275 if (dev->data->session_pool != NULL) { 1276 if (!rte_mempool_full(dev->data->session_pool)) { 1277 CDEV_LOG_ERR("dev_id=%u close failed, session mempool " 1278 "has sessions still in use, free " 1279 "all sessions before calling close", 1280 (unsigned)dev_id); 1281 return -EBUSY; 1282 } 1283 } 1284 1285 if (*dev->dev_ops->dev_close == NULL) 1286 return -ENOTSUP; 1287 retval = (*dev->dev_ops->dev_close)(dev); 1288 rte_cryptodev_trace_close(dev_id, retval); 1289 1290 if (retval < 0) 1291 return retval; 1292 1293 return 0; 1294 } 1295 1296 int 1297 rte_cryptodev_get_qp_status(uint8_t dev_id, uint16_t queue_pair_id) 1298 { 1299 struct rte_cryptodev *dev; 1300 int ret = 0; 1301 1302 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1303 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 1304 ret = -EINVAL; 1305 goto done; 1306 } 1307 1308 dev = &rte_crypto_devices[dev_id]; 1309 if (queue_pair_id >= dev->data->nb_queue_pairs) { 1310 CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id); 1311 ret = -EINVAL; 1312 goto done; 1313 } 1314 void **qps = dev->data->queue_pairs; 1315 1316 if (qps[queue_pair_id]) { 1317 CDEV_LOG_DEBUG("qp %d on dev %d is initialised", 1318 queue_pair_id, dev_id); 1319 ret = 1; 1320 goto done; 1321 } 1322 1323 CDEV_LOG_DEBUG("qp %d on dev %d is not initialised", 1324 queue_pair_id, dev_id); 1325 1326 done: 1327 rte_cryptodev_trace_get_qp_status(dev_id, queue_pair_id, ret); 1328 1329 return ret; 1330 } 1331 1332 static uint8_t 1333 rte_cryptodev_sym_is_valid_session_pool(struct rte_mempool *mp, 1334 uint32_t sess_priv_size) 1335 { 1336 struct rte_cryptodev_sym_session_pool_private_data *pool_priv; 1337 1338 if (!mp) 1339 return 0; 1340 1341 pool_priv = rte_mempool_get_priv(mp); 1342 1343 if (!pool_priv || mp->private_data_size < sizeof(*pool_priv) || 1344 pool_priv->sess_data_sz < sess_priv_size) 1345 return 0; 1346 1347 return 1; 1348 } 1349 1350 int 1351 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id, 1352 const struct rte_cryptodev_qp_conf *qp_conf, int socket_id) 1353 1354 { 1355 struct rte_cryptodev *dev; 1356 1357 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1358 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 1359 return -EINVAL; 1360 } 1361 1362 dev = &rte_crypto_devices[dev_id]; 1363 if (queue_pair_id >= dev->data->nb_queue_pairs) { 1364 CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id); 1365 return -EINVAL; 1366 } 1367 1368 if (!qp_conf) { 1369 CDEV_LOG_ERR("qp_conf cannot be NULL"); 1370 return -EINVAL; 1371 } 1372 1373 if (qp_conf->mp_session) { 1374 struct rte_cryptodev_sym_session_pool_private_data *pool_priv; 1375 1376 pool_priv = rte_mempool_get_priv(qp_conf->mp_session); 1377 if (!pool_priv || qp_conf->mp_session->private_data_size < 1378 sizeof(*pool_priv)) { 1379 CDEV_LOG_ERR("Invalid mempool"); 1380 return -EINVAL; 1381 } 1382 1383 if (!rte_cryptodev_sym_is_valid_session_pool(qp_conf->mp_session, 1384 rte_cryptodev_sym_get_private_session_size(dev_id))) { 1385 CDEV_LOG_ERR("Invalid mempool"); 1386 return -EINVAL; 1387 } 1388 } 1389 1390 if (dev->data->dev_started) { 1391 CDEV_LOG_ERR( 1392 "device %d must be stopped to allow configuration", dev_id); 1393 return -EBUSY; 1394 } 1395 1396 if (*dev->dev_ops->queue_pair_setup == NULL) 1397 return -ENOTSUP; 1398 1399 rte_cryptodev_trace_queue_pair_setup(dev_id, queue_pair_id, qp_conf); 1400 return (*dev->dev_ops->queue_pair_setup)(dev, queue_pair_id, qp_conf, 1401 socket_id); 1402 } 1403 1404 struct rte_cryptodev_cb * 1405 rte_cryptodev_add_enq_callback(uint8_t dev_id, 1406 uint16_t qp_id, 1407 rte_cryptodev_callback_fn cb_fn, 1408 void *cb_arg) 1409 { 1410 struct rte_cryptodev *dev; 1411 struct rte_cryptodev_cb_rcu *list; 1412 struct rte_cryptodev_cb *cb, *tail; 1413 1414 if (!cb_fn) { 1415 CDEV_LOG_ERR("Callback is NULL on dev_id=%d", dev_id); 1416 rte_errno = EINVAL; 1417 return NULL; 1418 } 1419 1420 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1421 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id); 1422 rte_errno = ENODEV; 1423 return NULL; 1424 } 1425 1426 dev = &rte_crypto_devices[dev_id]; 1427 if (qp_id >= dev->data->nb_queue_pairs) { 1428 CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id); 1429 rte_errno = ENODEV; 1430 return NULL; 1431 } 1432 1433 cb = rte_zmalloc(NULL, sizeof(*cb), 0); 1434 if (cb == NULL) { 1435 CDEV_LOG_ERR("Failed to allocate memory for callback on " 1436 "dev=%d, queue_pair_id=%d", dev_id, qp_id); 1437 rte_errno = ENOMEM; 1438 return NULL; 1439 } 1440 1441 rte_spinlock_lock(&rte_cryptodev_callback_lock); 1442 1443 cb->fn = cb_fn; 1444 cb->arg = cb_arg; 1445 1446 /* Add the callbacks in fifo order. */ 1447 list = &dev->enq_cbs[qp_id]; 1448 tail = list->next; 1449 1450 if (tail) { 1451 while (tail->next) 1452 tail = tail->next; 1453 /* Stores to cb->fn and cb->param should complete before 1454 * cb is visible to data plane. 1455 */ 1456 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE); 1457 } else { 1458 /* Stores to cb->fn and cb->param should complete before 1459 * cb is visible to data plane. 1460 */ 1461 __atomic_store_n(&list->next, cb, __ATOMIC_RELEASE); 1462 } 1463 1464 rte_spinlock_unlock(&rte_cryptodev_callback_lock); 1465 1466 rte_cryptodev_trace_add_enq_callback(dev_id, qp_id, cb_fn); 1467 return cb; 1468 } 1469 1470 int 1471 rte_cryptodev_remove_enq_callback(uint8_t dev_id, 1472 uint16_t qp_id, 1473 struct rte_cryptodev_cb *cb) 1474 { 1475 struct rte_cryptodev *dev; 1476 struct rte_cryptodev_cb **prev_cb, *curr_cb; 1477 struct rte_cryptodev_cb_rcu *list; 1478 int ret; 1479 1480 ret = -EINVAL; 1481 1482 if (!cb) { 1483 CDEV_LOG_ERR("Callback is NULL"); 1484 return -EINVAL; 1485 } 1486 1487 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1488 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id); 1489 return -ENODEV; 1490 } 1491 1492 rte_cryptodev_trace_remove_enq_callback(dev_id, qp_id, cb->fn); 1493 1494 dev = &rte_crypto_devices[dev_id]; 1495 if (qp_id >= dev->data->nb_queue_pairs) { 1496 CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id); 1497 return -ENODEV; 1498 } 1499 1500 rte_spinlock_lock(&rte_cryptodev_callback_lock); 1501 if (dev->enq_cbs == NULL) { 1502 CDEV_LOG_ERR("Callback not initialized"); 1503 goto cb_err; 1504 } 1505 1506 list = &dev->enq_cbs[qp_id]; 1507 if (list == NULL) { 1508 CDEV_LOG_ERR("Callback list is NULL"); 1509 goto cb_err; 1510 } 1511 1512 if (list->qsbr == NULL) { 1513 CDEV_LOG_ERR("Rcu qsbr is NULL"); 1514 goto cb_err; 1515 } 1516 1517 prev_cb = &list->next; 1518 for (; *prev_cb != NULL; prev_cb = &curr_cb->next) { 1519 curr_cb = *prev_cb; 1520 if (curr_cb == cb) { 1521 /* Remove the user cb from the callback list. */ 1522 __atomic_store_n(prev_cb, curr_cb->next, 1523 __ATOMIC_RELAXED); 1524 ret = 0; 1525 break; 1526 } 1527 } 1528 1529 if (!ret) { 1530 /* Call sync with invalid thread id as this is part of 1531 * control plane API 1532 */ 1533 rte_rcu_qsbr_synchronize(list->qsbr, RTE_QSBR_THRID_INVALID); 1534 rte_free(cb); 1535 } 1536 1537 cb_err: 1538 rte_spinlock_unlock(&rte_cryptodev_callback_lock); 1539 return ret; 1540 } 1541 1542 struct rte_cryptodev_cb * 1543 rte_cryptodev_add_deq_callback(uint8_t dev_id, 1544 uint16_t qp_id, 1545 rte_cryptodev_callback_fn cb_fn, 1546 void *cb_arg) 1547 { 1548 struct rte_cryptodev *dev; 1549 struct rte_cryptodev_cb_rcu *list; 1550 struct rte_cryptodev_cb *cb, *tail; 1551 1552 if (!cb_fn) { 1553 CDEV_LOG_ERR("Callback is NULL on dev_id=%d", dev_id); 1554 rte_errno = EINVAL; 1555 return NULL; 1556 } 1557 1558 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1559 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id); 1560 rte_errno = ENODEV; 1561 return NULL; 1562 } 1563 1564 dev = &rte_crypto_devices[dev_id]; 1565 if (qp_id >= dev->data->nb_queue_pairs) { 1566 CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id); 1567 rte_errno = ENODEV; 1568 return NULL; 1569 } 1570 1571 cb = rte_zmalloc(NULL, sizeof(*cb), 0); 1572 if (cb == NULL) { 1573 CDEV_LOG_ERR("Failed to allocate memory for callback on " 1574 "dev=%d, queue_pair_id=%d", dev_id, qp_id); 1575 rte_errno = ENOMEM; 1576 return NULL; 1577 } 1578 1579 rte_spinlock_lock(&rte_cryptodev_callback_lock); 1580 1581 cb->fn = cb_fn; 1582 cb->arg = cb_arg; 1583 1584 /* Add the callbacks in fifo order. */ 1585 list = &dev->deq_cbs[qp_id]; 1586 tail = list->next; 1587 1588 if (tail) { 1589 while (tail->next) 1590 tail = tail->next; 1591 /* Stores to cb->fn and cb->param should complete before 1592 * cb is visible to data plane. 1593 */ 1594 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE); 1595 } else { 1596 /* Stores to cb->fn and cb->param should complete before 1597 * cb is visible to data plane. 1598 */ 1599 __atomic_store_n(&list->next, cb, __ATOMIC_RELEASE); 1600 } 1601 1602 rte_spinlock_unlock(&rte_cryptodev_callback_lock); 1603 1604 rte_cryptodev_trace_add_deq_callback(dev_id, qp_id, cb_fn); 1605 1606 return cb; 1607 } 1608 1609 int 1610 rte_cryptodev_remove_deq_callback(uint8_t dev_id, 1611 uint16_t qp_id, 1612 struct rte_cryptodev_cb *cb) 1613 { 1614 struct rte_cryptodev *dev; 1615 struct rte_cryptodev_cb **prev_cb, *curr_cb; 1616 struct rte_cryptodev_cb_rcu *list; 1617 int ret; 1618 1619 ret = -EINVAL; 1620 1621 if (!cb) { 1622 CDEV_LOG_ERR("Callback is NULL"); 1623 return -EINVAL; 1624 } 1625 1626 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1627 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id); 1628 return -ENODEV; 1629 } 1630 1631 rte_cryptodev_trace_remove_deq_callback(dev_id, qp_id, cb->fn); 1632 1633 dev = &rte_crypto_devices[dev_id]; 1634 if (qp_id >= dev->data->nb_queue_pairs) { 1635 CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id); 1636 return -ENODEV; 1637 } 1638 1639 rte_spinlock_lock(&rte_cryptodev_callback_lock); 1640 if (dev->enq_cbs == NULL) { 1641 CDEV_LOG_ERR("Callback not initialized"); 1642 goto cb_err; 1643 } 1644 1645 list = &dev->deq_cbs[qp_id]; 1646 if (list == NULL) { 1647 CDEV_LOG_ERR("Callback list is NULL"); 1648 goto cb_err; 1649 } 1650 1651 if (list->qsbr == NULL) { 1652 CDEV_LOG_ERR("Rcu qsbr is NULL"); 1653 goto cb_err; 1654 } 1655 1656 prev_cb = &list->next; 1657 for (; *prev_cb != NULL; prev_cb = &curr_cb->next) { 1658 curr_cb = *prev_cb; 1659 if (curr_cb == cb) { 1660 /* Remove the user cb from the callback list. */ 1661 __atomic_store_n(prev_cb, curr_cb->next, 1662 __ATOMIC_RELAXED); 1663 ret = 0; 1664 break; 1665 } 1666 } 1667 1668 if (!ret) { 1669 /* Call sync with invalid thread id as this is part of 1670 * control plane API 1671 */ 1672 rte_rcu_qsbr_synchronize(list->qsbr, RTE_QSBR_THRID_INVALID); 1673 rte_free(cb); 1674 } 1675 1676 cb_err: 1677 rte_spinlock_unlock(&rte_cryptodev_callback_lock); 1678 return ret; 1679 } 1680 1681 int 1682 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats) 1683 { 1684 struct rte_cryptodev *dev; 1685 1686 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1687 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id); 1688 return -ENODEV; 1689 } 1690 1691 if (stats == NULL) { 1692 CDEV_LOG_ERR("Invalid stats ptr"); 1693 return -EINVAL; 1694 } 1695 1696 dev = &rte_crypto_devices[dev_id]; 1697 memset(stats, 0, sizeof(*stats)); 1698 1699 if (*dev->dev_ops->stats_get == NULL) 1700 return -ENOTSUP; 1701 (*dev->dev_ops->stats_get)(dev, stats); 1702 1703 rte_cryptodev_trace_stats_get(dev_id, stats); 1704 return 0; 1705 } 1706 1707 void 1708 rte_cryptodev_stats_reset(uint8_t dev_id) 1709 { 1710 struct rte_cryptodev *dev; 1711 1712 rte_cryptodev_trace_stats_reset(dev_id); 1713 1714 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1715 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 1716 return; 1717 } 1718 1719 dev = &rte_crypto_devices[dev_id]; 1720 1721 if (*dev->dev_ops->stats_reset == NULL) 1722 return; 1723 (*dev->dev_ops->stats_reset)(dev); 1724 } 1725 1726 void 1727 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info) 1728 { 1729 struct rte_cryptodev *dev; 1730 1731 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1732 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id); 1733 return; 1734 } 1735 1736 dev = &rte_crypto_devices[dev_id]; 1737 1738 memset(dev_info, 0, sizeof(struct rte_cryptodev_info)); 1739 1740 if (*dev->dev_ops->dev_infos_get == NULL) 1741 return; 1742 (*dev->dev_ops->dev_infos_get)(dev, dev_info); 1743 1744 dev_info->driver_name = dev->device->driver->name; 1745 dev_info->device = dev->device; 1746 1747 rte_cryptodev_trace_info_get(dev_id, dev_info->driver_name); 1748 1749 } 1750 1751 int 1752 rte_cryptodev_callback_register(uint8_t dev_id, 1753 enum rte_cryptodev_event_type event, 1754 rte_cryptodev_cb_fn cb_fn, void *cb_arg) 1755 { 1756 struct rte_cryptodev *dev; 1757 struct rte_cryptodev_callback *user_cb; 1758 1759 if (!cb_fn) 1760 return -EINVAL; 1761 1762 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1763 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 1764 return -EINVAL; 1765 } 1766 1767 dev = &rte_crypto_devices[dev_id]; 1768 rte_spinlock_lock(&rte_cryptodev_cb_lock); 1769 1770 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) { 1771 if (user_cb->cb_fn == cb_fn && 1772 user_cb->cb_arg == cb_arg && 1773 user_cb->event == event) { 1774 break; 1775 } 1776 } 1777 1778 /* create a new callback. */ 1779 if (user_cb == NULL) { 1780 user_cb = rte_zmalloc("INTR_USER_CALLBACK", 1781 sizeof(struct rte_cryptodev_callback), 0); 1782 if (user_cb != NULL) { 1783 user_cb->cb_fn = cb_fn; 1784 user_cb->cb_arg = cb_arg; 1785 user_cb->event = event; 1786 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next); 1787 } 1788 } 1789 1790 rte_spinlock_unlock(&rte_cryptodev_cb_lock); 1791 1792 rte_cryptodev_trace_callback_register(dev_id, event, cb_fn); 1793 return (user_cb == NULL) ? -ENOMEM : 0; 1794 } 1795 1796 int 1797 rte_cryptodev_callback_unregister(uint8_t dev_id, 1798 enum rte_cryptodev_event_type event, 1799 rte_cryptodev_cb_fn cb_fn, void *cb_arg) 1800 { 1801 int ret; 1802 struct rte_cryptodev *dev; 1803 struct rte_cryptodev_callback *cb, *next; 1804 1805 if (!cb_fn) 1806 return -EINVAL; 1807 1808 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1809 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 1810 return -EINVAL; 1811 } 1812 1813 dev = &rte_crypto_devices[dev_id]; 1814 rte_spinlock_lock(&rte_cryptodev_cb_lock); 1815 1816 ret = 0; 1817 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) { 1818 1819 next = TAILQ_NEXT(cb, next); 1820 1821 if (cb->cb_fn != cb_fn || cb->event != event || 1822 (cb->cb_arg != (void *)-1 && 1823 cb->cb_arg != cb_arg)) 1824 continue; 1825 1826 /* 1827 * if this callback is not executing right now, 1828 * then remove it. 1829 */ 1830 if (cb->active == 0) { 1831 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next); 1832 rte_free(cb); 1833 } else { 1834 ret = -EAGAIN; 1835 } 1836 } 1837 1838 rte_spinlock_unlock(&rte_cryptodev_cb_lock); 1839 1840 rte_cryptodev_trace_callback_unregister(dev_id, event, cb_fn); 1841 return ret; 1842 } 1843 1844 void 1845 rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev, 1846 enum rte_cryptodev_event_type event) 1847 { 1848 struct rte_cryptodev_callback *cb_lst; 1849 struct rte_cryptodev_callback dev_cb; 1850 1851 rte_spinlock_lock(&rte_cryptodev_cb_lock); 1852 TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) { 1853 if (cb_lst->cb_fn == NULL || cb_lst->event != event) 1854 continue; 1855 dev_cb = *cb_lst; 1856 cb_lst->active = 1; 1857 rte_spinlock_unlock(&rte_cryptodev_cb_lock); 1858 dev_cb.cb_fn(dev->data->dev_id, dev_cb.event, 1859 dev_cb.cb_arg); 1860 rte_spinlock_lock(&rte_cryptodev_cb_lock); 1861 cb_lst->active = 0; 1862 } 1863 rte_spinlock_unlock(&rte_cryptodev_cb_lock); 1864 } 1865 1866 struct rte_mempool * 1867 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts, 1868 uint32_t elt_size, uint32_t cache_size, uint16_t user_data_size, 1869 int socket_id) 1870 { 1871 struct rte_mempool *mp; 1872 struct rte_cryptodev_sym_session_pool_private_data *pool_priv; 1873 uint32_t obj_sz; 1874 1875 obj_sz = sizeof(struct rte_cryptodev_sym_session) + elt_size + user_data_size; 1876 1877 obj_sz = RTE_ALIGN_CEIL(obj_sz, RTE_CACHE_LINE_SIZE); 1878 mp = rte_mempool_create(name, nb_elts, obj_sz, cache_size, 1879 (uint32_t)(sizeof(*pool_priv)), NULL, NULL, 1880 NULL, NULL, 1881 socket_id, 0); 1882 if (mp == NULL) { 1883 CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d", 1884 __func__, name, rte_errno); 1885 return NULL; 1886 } 1887 1888 pool_priv = rte_mempool_get_priv(mp); 1889 if (!pool_priv) { 1890 CDEV_LOG_ERR("%s(name=%s) failed to get private data", 1891 __func__, name); 1892 rte_mempool_free(mp); 1893 return NULL; 1894 } 1895 1896 pool_priv->sess_data_sz = elt_size; 1897 pool_priv->user_data_sz = user_data_size; 1898 1899 rte_cryptodev_trace_sym_session_pool_create(name, nb_elts, 1900 elt_size, cache_size, user_data_size, mp); 1901 return mp; 1902 } 1903 1904 struct rte_mempool * 1905 rte_cryptodev_asym_session_pool_create(const char *name, uint32_t nb_elts, 1906 uint32_t cache_size, uint16_t user_data_size, int socket_id) 1907 { 1908 struct rte_mempool *mp; 1909 struct rte_cryptodev_asym_session_pool_private_data *pool_priv; 1910 uint32_t obj_sz, obj_sz_aligned; 1911 uint8_t dev_id; 1912 unsigned int priv_sz, max_priv_sz = 0; 1913 1914 for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++) 1915 if (rte_cryptodev_is_valid_dev(dev_id)) { 1916 priv_sz = rte_cryptodev_asym_get_private_session_size(dev_id); 1917 if (priv_sz > max_priv_sz) 1918 max_priv_sz = priv_sz; 1919 } 1920 if (max_priv_sz == 0) { 1921 CDEV_LOG_INFO("Could not set max private session size"); 1922 return NULL; 1923 } 1924 1925 obj_sz = rte_cryptodev_asym_get_header_session_size() + max_priv_sz + 1926 user_data_size; 1927 obj_sz_aligned = RTE_ALIGN_CEIL(obj_sz, RTE_CACHE_LINE_SIZE); 1928 1929 mp = rte_mempool_create(name, nb_elts, obj_sz_aligned, cache_size, 1930 (uint32_t)(sizeof(*pool_priv)), 1931 NULL, NULL, NULL, NULL, 1932 socket_id, 0); 1933 if (mp == NULL) { 1934 CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d", 1935 __func__, name, rte_errno); 1936 return NULL; 1937 } 1938 1939 pool_priv = rte_mempool_get_priv(mp); 1940 if (!pool_priv) { 1941 CDEV_LOG_ERR("%s(name=%s) failed to get private data", 1942 __func__, name); 1943 rte_mempool_free(mp); 1944 return NULL; 1945 } 1946 pool_priv->max_priv_session_sz = max_priv_sz; 1947 pool_priv->user_data_sz = user_data_size; 1948 1949 rte_cryptodev_trace_asym_session_pool_create(name, nb_elts, 1950 user_data_size, cache_size, mp); 1951 return mp; 1952 } 1953 1954 void * 1955 rte_cryptodev_sym_session_create(uint8_t dev_id, 1956 struct rte_crypto_sym_xform *xforms, 1957 struct rte_mempool *mp) 1958 { 1959 struct rte_cryptodev *dev; 1960 struct rte_cryptodev_sym_session *sess; 1961 struct rte_cryptodev_sym_session_pool_private_data *pool_priv; 1962 uint32_t sess_priv_sz; 1963 int ret; 1964 1965 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1966 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 1967 rte_errno = EINVAL; 1968 return NULL; 1969 } 1970 1971 if (xforms == NULL) { 1972 CDEV_LOG_ERR("Invalid xform\n"); 1973 rte_errno = EINVAL; 1974 return NULL; 1975 } 1976 1977 sess_priv_sz = rte_cryptodev_sym_get_private_session_size(dev_id); 1978 if (!rte_cryptodev_sym_is_valid_session_pool(mp, sess_priv_sz)) { 1979 CDEV_LOG_ERR("Invalid mempool"); 1980 rte_errno = EINVAL; 1981 return NULL; 1982 } 1983 1984 dev = rte_cryptodev_pmd_get_dev(dev_id); 1985 1986 /* Allocate a session structure from the session pool */ 1987 if (rte_mempool_get(mp, (void **)&sess)) { 1988 CDEV_LOG_ERR("couldn't get object from session mempool"); 1989 rte_errno = ENOMEM; 1990 return NULL; 1991 } 1992 1993 pool_priv = rte_mempool_get_priv(mp); 1994 sess->driver_id = dev->driver_id; 1995 sess->sess_data_sz = pool_priv->sess_data_sz; 1996 sess->user_data_sz = pool_priv->user_data_sz; 1997 sess->driver_priv_data_iova = rte_mempool_virt2iova(sess) + 1998 offsetof(struct rte_cryptodev_sym_session, driver_priv_data); 1999 2000 if (dev->dev_ops->sym_session_configure == NULL) { 2001 rte_errno = ENOTSUP; 2002 goto error_exit; 2003 } 2004 memset(sess->driver_priv_data, 0, pool_priv->sess_data_sz + pool_priv->user_data_sz); 2005 2006 ret = dev->dev_ops->sym_session_configure(dev, xforms, sess); 2007 if (ret < 0) { 2008 rte_errno = -ret; 2009 goto error_exit; 2010 } 2011 sess->driver_id = dev->driver_id; 2012 2013 rte_cryptodev_trace_sym_session_create(dev_id, sess, xforms, mp); 2014 2015 return (void *)sess; 2016 error_exit: 2017 rte_mempool_put(mp, (void *)sess); 2018 return NULL; 2019 } 2020 2021 int 2022 rte_cryptodev_asym_session_create(uint8_t dev_id, 2023 struct rte_crypto_asym_xform *xforms, struct rte_mempool *mp, 2024 void **session) 2025 { 2026 struct rte_cryptodev_asym_session *sess; 2027 uint32_t session_priv_data_sz; 2028 struct rte_cryptodev_asym_session_pool_private_data *pool_priv; 2029 unsigned int session_header_size = 2030 rte_cryptodev_asym_get_header_session_size(); 2031 struct rte_cryptodev *dev; 2032 int ret; 2033 2034 if (!rte_cryptodev_is_valid_dev(dev_id)) { 2035 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 2036 return -EINVAL; 2037 } 2038 2039 dev = rte_cryptodev_pmd_get_dev(dev_id); 2040 2041 if (dev == NULL) 2042 return -EINVAL; 2043 2044 if (!mp) { 2045 CDEV_LOG_ERR("invalid mempool"); 2046 return -EINVAL; 2047 } 2048 2049 session_priv_data_sz = rte_cryptodev_asym_get_private_session_size( 2050 dev_id); 2051 pool_priv = rte_mempool_get_priv(mp); 2052 2053 if (pool_priv->max_priv_session_sz < session_priv_data_sz) { 2054 CDEV_LOG_DEBUG( 2055 "The private session data size used when creating the mempool is smaller than this device's private session data."); 2056 return -EINVAL; 2057 } 2058 2059 /* Verify if provided mempool can hold elements big enough. */ 2060 if (mp->elt_size < session_header_size + session_priv_data_sz) { 2061 CDEV_LOG_ERR( 2062 "mempool elements too small to hold session objects"); 2063 return -EINVAL; 2064 } 2065 2066 /* Allocate a session structure from the session pool */ 2067 if (rte_mempool_get(mp, session)) { 2068 CDEV_LOG_ERR("couldn't get object from session mempool"); 2069 return -ENOMEM; 2070 } 2071 2072 sess = *session; 2073 sess->driver_id = dev->driver_id; 2074 sess->user_data_sz = pool_priv->user_data_sz; 2075 sess->max_priv_data_sz = pool_priv->max_priv_session_sz; 2076 2077 /* Clear device session pointer.*/ 2078 memset(sess->sess_private_data, 0, session_priv_data_sz + sess->user_data_sz); 2079 2080 if (*dev->dev_ops->asym_session_configure == NULL) 2081 return -ENOTSUP; 2082 2083 if (sess->sess_private_data[0] == 0) { 2084 ret = dev->dev_ops->asym_session_configure(dev, xforms, sess); 2085 if (ret < 0) { 2086 CDEV_LOG_ERR( 2087 "dev_id %d failed to configure session details", 2088 dev_id); 2089 return ret; 2090 } 2091 } 2092 2093 rte_cryptodev_trace_asym_session_create(dev_id, xforms, mp, sess); 2094 return 0; 2095 } 2096 2097 int 2098 rte_cryptodev_sym_session_free(uint8_t dev_id, void *_sess) 2099 { 2100 struct rte_cryptodev *dev; 2101 struct rte_mempool *sess_mp; 2102 struct rte_cryptodev_sym_session *sess = _sess; 2103 struct rte_cryptodev_sym_session_pool_private_data *pool_priv; 2104 2105 if (sess == NULL) 2106 return -EINVAL; 2107 2108 if (!rte_cryptodev_is_valid_dev(dev_id)) { 2109 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 2110 return -EINVAL; 2111 } 2112 2113 dev = rte_cryptodev_pmd_get_dev(dev_id); 2114 2115 if (dev == NULL || sess == NULL) 2116 return -EINVAL; 2117 2118 sess_mp = rte_mempool_from_obj(sess); 2119 if (!sess_mp) 2120 return -EINVAL; 2121 pool_priv = rte_mempool_get_priv(sess_mp); 2122 2123 if (sess->driver_id != dev->driver_id) { 2124 CDEV_LOG_ERR("Session created by driver %u but freed by %u", 2125 sess->driver_id, dev->driver_id); 2126 return -EINVAL; 2127 } 2128 2129 if (*dev->dev_ops->sym_session_clear == NULL) 2130 return -ENOTSUP; 2131 2132 dev->dev_ops->sym_session_clear(dev, sess); 2133 2134 memset(sess->driver_priv_data, 0, pool_priv->sess_data_sz + pool_priv->user_data_sz); 2135 2136 /* Return session to mempool */ 2137 rte_mempool_put(sess_mp, sess); 2138 2139 rte_cryptodev_trace_sym_session_free(dev_id, sess); 2140 return 0; 2141 } 2142 2143 int 2144 rte_cryptodev_asym_session_free(uint8_t dev_id, void *sess) 2145 { 2146 struct rte_mempool *sess_mp; 2147 struct rte_cryptodev *dev; 2148 2149 if (!rte_cryptodev_is_valid_dev(dev_id)) { 2150 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 2151 return -EINVAL; 2152 } 2153 2154 dev = rte_cryptodev_pmd_get_dev(dev_id); 2155 2156 if (dev == NULL || sess == NULL) 2157 return -EINVAL; 2158 2159 if (*dev->dev_ops->asym_session_clear == NULL) 2160 return -ENOTSUP; 2161 2162 dev->dev_ops->asym_session_clear(dev, sess); 2163 2164 rte_free(((struct rte_cryptodev_asym_session *)sess)->event_mdata); 2165 2166 /* Return session to mempool */ 2167 sess_mp = rte_mempool_from_obj(sess); 2168 rte_mempool_put(sess_mp, sess); 2169 2170 rte_cryptodev_trace_asym_session_free(dev_id, sess); 2171 return 0; 2172 } 2173 2174 unsigned int 2175 rte_cryptodev_asym_get_header_session_size(void) 2176 { 2177 return sizeof(struct rte_cryptodev_asym_session); 2178 } 2179 2180 unsigned int 2181 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id) 2182 { 2183 struct rte_cryptodev *dev; 2184 unsigned int priv_sess_size; 2185 2186 if (!rte_cryptodev_is_valid_dev(dev_id)) 2187 return 0; 2188 2189 dev = rte_cryptodev_pmd_get_dev(dev_id); 2190 2191 if (*dev->dev_ops->sym_session_get_size == NULL) 2192 return 0; 2193 2194 priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev); 2195 2196 rte_cryptodev_trace_sym_get_private_session_size(dev_id, 2197 priv_sess_size); 2198 2199 return priv_sess_size; 2200 } 2201 2202 unsigned int 2203 rte_cryptodev_asym_get_private_session_size(uint8_t dev_id) 2204 { 2205 struct rte_cryptodev *dev; 2206 unsigned int priv_sess_size; 2207 2208 if (!rte_cryptodev_is_valid_dev(dev_id)) 2209 return 0; 2210 2211 dev = rte_cryptodev_pmd_get_dev(dev_id); 2212 2213 if (*dev->dev_ops->asym_session_get_size == NULL) 2214 return 0; 2215 2216 priv_sess_size = (*dev->dev_ops->asym_session_get_size)(dev); 2217 2218 rte_cryptodev_trace_asym_get_private_session_size(dev_id, 2219 priv_sess_size); 2220 2221 return priv_sess_size; 2222 } 2223 2224 int 2225 rte_cryptodev_sym_session_set_user_data(void *_sess, void *data, 2226 uint16_t size) 2227 { 2228 struct rte_cryptodev_sym_session *sess = _sess; 2229 2230 if (sess == NULL) 2231 return -EINVAL; 2232 2233 if (sess->user_data_sz < size) 2234 return -ENOMEM; 2235 2236 rte_memcpy(sess->driver_priv_data + sess->sess_data_sz, data, size); 2237 2238 rte_cryptodev_trace_sym_session_set_user_data(sess, data, size); 2239 2240 return 0; 2241 } 2242 2243 void * 2244 rte_cryptodev_sym_session_get_user_data(void *_sess) 2245 { 2246 struct rte_cryptodev_sym_session *sess = _sess; 2247 void *data = NULL; 2248 2249 if (sess == NULL || sess->user_data_sz == 0) 2250 return NULL; 2251 2252 data = (void *)(sess->driver_priv_data + sess->sess_data_sz); 2253 2254 rte_cryptodev_trace_sym_session_get_user_data(sess, data); 2255 2256 return data; 2257 } 2258 2259 int 2260 rte_cryptodev_asym_session_set_user_data(void *session, void *data, uint16_t size) 2261 { 2262 struct rte_cryptodev_asym_session *sess = session; 2263 if (sess == NULL) 2264 return -EINVAL; 2265 2266 if (sess->user_data_sz < size) 2267 return -ENOMEM; 2268 2269 rte_memcpy(sess->sess_private_data + 2270 sess->max_priv_data_sz, 2271 data, size); 2272 2273 rte_cryptodev_trace_asym_session_set_user_data(sess, data, size); 2274 2275 return 0; 2276 } 2277 2278 void * 2279 rte_cryptodev_asym_session_get_user_data(void *session) 2280 { 2281 struct rte_cryptodev_asym_session *sess = session; 2282 void *data = NULL; 2283 2284 if (sess == NULL || sess->user_data_sz == 0) 2285 return NULL; 2286 2287 data = (void *)(sess->sess_private_data + sess->max_priv_data_sz); 2288 2289 rte_cryptodev_trace_asym_session_get_user_data(sess, data); 2290 2291 return data; 2292 } 2293 2294 static inline void 2295 sym_crypto_fill_status(struct rte_crypto_sym_vec *vec, int32_t errnum) 2296 { 2297 uint32_t i; 2298 for (i = 0; i < vec->num; i++) 2299 vec->status[i] = errnum; 2300 } 2301 2302 uint32_t 2303 rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id, 2304 void *_sess, union rte_crypto_sym_ofs ofs, 2305 struct rte_crypto_sym_vec *vec) 2306 { 2307 struct rte_cryptodev *dev; 2308 struct rte_cryptodev_sym_session *sess = _sess; 2309 2310 if (!rte_cryptodev_is_valid_dev(dev_id)) { 2311 sym_crypto_fill_status(vec, EINVAL); 2312 return 0; 2313 } 2314 2315 dev = rte_cryptodev_pmd_get_dev(dev_id); 2316 2317 if (*dev->dev_ops->sym_cpu_process == NULL || 2318 !(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO)) { 2319 sym_crypto_fill_status(vec, ENOTSUP); 2320 return 0; 2321 } 2322 2323 rte_cryptodev_trace_sym_cpu_crypto_process(dev_id, sess); 2324 2325 return dev->dev_ops->sym_cpu_process(dev, sess, ofs, vec); 2326 } 2327 2328 int 2329 rte_cryptodev_get_raw_dp_ctx_size(uint8_t dev_id) 2330 { 2331 struct rte_cryptodev *dev; 2332 int32_t size = sizeof(struct rte_crypto_raw_dp_ctx); 2333 int32_t priv_size; 2334 2335 if (!rte_cryptodev_is_valid_dev(dev_id)) 2336 return -EINVAL; 2337 2338 dev = rte_cryptodev_pmd_get_dev(dev_id); 2339 2340 if (*dev->dev_ops->sym_get_raw_dp_ctx_size == NULL || 2341 !(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP)) { 2342 return -ENOTSUP; 2343 } 2344 2345 priv_size = (*dev->dev_ops->sym_get_raw_dp_ctx_size)(dev); 2346 if (priv_size < 0) 2347 return -ENOTSUP; 2348 2349 rte_cryptodev_trace_get_raw_dp_ctx_size(dev_id); 2350 2351 return RTE_ALIGN_CEIL((size + priv_size), 8); 2352 } 2353 2354 int 2355 rte_cryptodev_configure_raw_dp_ctx(uint8_t dev_id, uint16_t qp_id, 2356 struct rte_crypto_raw_dp_ctx *ctx, 2357 enum rte_crypto_op_sess_type sess_type, 2358 union rte_cryptodev_session_ctx session_ctx, 2359 uint8_t is_update) 2360 { 2361 struct rte_cryptodev *dev; 2362 2363 if (!rte_cryptodev_get_qp_status(dev_id, qp_id)) 2364 return -EINVAL; 2365 2366 dev = rte_cryptodev_pmd_get_dev(dev_id); 2367 if (!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP) 2368 || dev->dev_ops->sym_configure_raw_dp_ctx == NULL) 2369 return -ENOTSUP; 2370 2371 rte_cryptodev_trace_configure_raw_dp_ctx(dev_id, qp_id, sess_type); 2372 2373 return (*dev->dev_ops->sym_configure_raw_dp_ctx)(dev, qp_id, ctx, 2374 sess_type, session_ctx, is_update); 2375 } 2376 2377 int 2378 rte_cryptodev_session_event_mdata_set(uint8_t dev_id, void *sess, 2379 enum rte_crypto_op_type op_type, 2380 enum rte_crypto_op_sess_type sess_type, 2381 void *ev_mdata, 2382 uint16_t size) 2383 { 2384 struct rte_cryptodev *dev; 2385 2386 if (sess == NULL || ev_mdata == NULL) 2387 return -EINVAL; 2388 2389 if (!rte_cryptodev_is_valid_dev(dev_id)) 2390 goto skip_pmd_op; 2391 2392 dev = rte_cryptodev_pmd_get_dev(dev_id); 2393 if (dev->dev_ops->session_ev_mdata_set == NULL) 2394 goto skip_pmd_op; 2395 2396 rte_cryptodev_trace_session_event_mdata_set(dev_id, sess, op_type, 2397 sess_type, ev_mdata, size); 2398 2399 return (*dev->dev_ops->session_ev_mdata_set)(dev, sess, op_type, 2400 sess_type, ev_mdata); 2401 2402 skip_pmd_op: 2403 if (op_type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) 2404 return rte_cryptodev_sym_session_set_user_data(sess, ev_mdata, 2405 size); 2406 else if (op_type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) { 2407 struct rte_cryptodev_asym_session *s = sess; 2408 2409 if (s->event_mdata == NULL) { 2410 s->event_mdata = rte_malloc(NULL, size, 0); 2411 if (s->event_mdata == NULL) 2412 return -ENOMEM; 2413 } 2414 rte_memcpy(s->event_mdata, ev_mdata, size); 2415 2416 return 0; 2417 } else 2418 return -ENOTSUP; 2419 } 2420 2421 uint32_t 2422 rte_cryptodev_raw_enqueue_burst(struct rte_crypto_raw_dp_ctx *ctx, 2423 struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs, 2424 void **user_data, int *enqueue_status) 2425 { 2426 return (*ctx->enqueue_burst)(ctx->qp_data, ctx->drv_ctx_data, vec, 2427 ofs, user_data, enqueue_status); 2428 } 2429 2430 int 2431 rte_cryptodev_raw_enqueue_done(struct rte_crypto_raw_dp_ctx *ctx, 2432 uint32_t n) 2433 { 2434 return (*ctx->enqueue_done)(ctx->qp_data, ctx->drv_ctx_data, n); 2435 } 2436 2437 uint32_t 2438 rte_cryptodev_raw_dequeue_burst(struct rte_crypto_raw_dp_ctx *ctx, 2439 rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count, 2440 uint32_t max_nb_to_dequeue, 2441 rte_cryptodev_raw_post_dequeue_t post_dequeue, 2442 void **out_user_data, uint8_t is_user_data_array, 2443 uint32_t *n_success_jobs, int *status) 2444 { 2445 return (*ctx->dequeue_burst)(ctx->qp_data, ctx->drv_ctx_data, 2446 get_dequeue_count, max_nb_to_dequeue, post_dequeue, 2447 out_user_data, is_user_data_array, n_success_jobs, status); 2448 } 2449 2450 int 2451 rte_cryptodev_raw_dequeue_done(struct rte_crypto_raw_dp_ctx *ctx, 2452 uint32_t n) 2453 { 2454 return (*ctx->dequeue_done)(ctx->qp_data, ctx->drv_ctx_data, n); 2455 } 2456 2457 /** Initialise rte_crypto_op mempool element */ 2458 static void 2459 rte_crypto_op_init(struct rte_mempool *mempool, 2460 void *opaque_arg, 2461 void *_op_data, 2462 __rte_unused unsigned i) 2463 { 2464 struct rte_crypto_op *op = _op_data; 2465 enum rte_crypto_op_type type = *(enum rte_crypto_op_type *)opaque_arg; 2466 2467 memset(_op_data, 0, mempool->elt_size); 2468 2469 __rte_crypto_op_reset(op, type); 2470 2471 op->phys_addr = rte_mem_virt2iova(_op_data); 2472 op->mempool = mempool; 2473 } 2474 2475 2476 struct rte_mempool * 2477 rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type, 2478 unsigned nb_elts, unsigned cache_size, uint16_t priv_size, 2479 int socket_id) 2480 { 2481 struct rte_crypto_op_pool_private *priv; 2482 2483 unsigned elt_size = sizeof(struct rte_crypto_op) + 2484 priv_size; 2485 2486 if (type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) { 2487 elt_size += sizeof(struct rte_crypto_sym_op); 2488 } else if (type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) { 2489 elt_size += sizeof(struct rte_crypto_asym_op); 2490 } else if (type == RTE_CRYPTO_OP_TYPE_UNDEFINED) { 2491 elt_size += RTE_MAX(sizeof(struct rte_crypto_sym_op), 2492 sizeof(struct rte_crypto_asym_op)); 2493 } else { 2494 CDEV_LOG_ERR("Invalid op_type"); 2495 return NULL; 2496 } 2497 2498 /* lookup mempool in case already allocated */ 2499 struct rte_mempool *mp = rte_mempool_lookup(name); 2500 2501 if (mp != NULL) { 2502 priv = (struct rte_crypto_op_pool_private *) 2503 rte_mempool_get_priv(mp); 2504 2505 if (mp->elt_size != elt_size || 2506 mp->cache_size < cache_size || 2507 mp->size < nb_elts || 2508 priv->priv_size < priv_size) { 2509 mp = NULL; 2510 CDEV_LOG_ERR("Mempool %s already exists but with " 2511 "incompatible parameters", name); 2512 return NULL; 2513 } 2514 return mp; 2515 } 2516 2517 mp = rte_mempool_create( 2518 name, 2519 nb_elts, 2520 elt_size, 2521 cache_size, 2522 sizeof(struct rte_crypto_op_pool_private), 2523 NULL, 2524 NULL, 2525 rte_crypto_op_init, 2526 &type, 2527 socket_id, 2528 0); 2529 2530 if (mp == NULL) { 2531 CDEV_LOG_ERR("Failed to create mempool %s", name); 2532 return NULL; 2533 } 2534 2535 priv = (struct rte_crypto_op_pool_private *) 2536 rte_mempool_get_priv(mp); 2537 2538 priv->priv_size = priv_size; 2539 priv->type = type; 2540 2541 rte_cryptodev_trace_op_pool_create(name, socket_id, type, nb_elts, mp); 2542 return mp; 2543 } 2544 2545 int 2546 rte_cryptodev_pmd_create_dev_name(char *name, const char *dev_name_prefix) 2547 { 2548 struct rte_cryptodev *dev = NULL; 2549 uint32_t i = 0; 2550 2551 if (name == NULL) 2552 return -EINVAL; 2553 2554 for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) { 2555 int ret = snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, 2556 "%s_%u", dev_name_prefix, i); 2557 2558 if (ret < 0) 2559 return ret; 2560 2561 dev = rte_cryptodev_pmd_get_named_dev(name); 2562 if (!dev) 2563 return 0; 2564 } 2565 2566 return -1; 2567 } 2568 2569 TAILQ_HEAD(cryptodev_driver_list, cryptodev_driver); 2570 2571 static struct cryptodev_driver_list cryptodev_driver_list = 2572 TAILQ_HEAD_INITIALIZER(cryptodev_driver_list); 2573 2574 int 2575 rte_cryptodev_driver_id_get(const char *name) 2576 { 2577 struct cryptodev_driver *driver; 2578 const char *driver_name; 2579 int driver_id = -1; 2580 2581 if (name == NULL) { 2582 RTE_LOG(DEBUG, CRYPTODEV, "name pointer NULL"); 2583 return -1; 2584 } 2585 2586 TAILQ_FOREACH(driver, &cryptodev_driver_list, next) { 2587 driver_name = driver->driver->name; 2588 if (strncmp(driver_name, name, strlen(driver_name) + 1) == 0) { 2589 driver_id = driver->id; 2590 break; 2591 } 2592 } 2593 2594 rte_cryptodev_trace_driver_id_get(name, driver_id); 2595 2596 return driver_id; 2597 } 2598 2599 const char * 2600 rte_cryptodev_name_get(uint8_t dev_id) 2601 { 2602 struct rte_cryptodev *dev; 2603 2604 if (!rte_cryptodev_is_valid_device_data(dev_id)) { 2605 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 2606 return NULL; 2607 } 2608 2609 dev = rte_cryptodev_pmd_get_dev(dev_id); 2610 if (dev == NULL) 2611 return NULL; 2612 2613 rte_cryptodev_trace_name_get(dev_id, dev->data->name); 2614 2615 return dev->data->name; 2616 } 2617 2618 const char * 2619 rte_cryptodev_driver_name_get(uint8_t driver_id) 2620 { 2621 struct cryptodev_driver *driver; 2622 2623 TAILQ_FOREACH(driver, &cryptodev_driver_list, next) { 2624 if (driver->id == driver_id) { 2625 rte_cryptodev_trace_driver_name_get(driver_id, 2626 driver->driver->name); 2627 return driver->driver->name; 2628 } 2629 } 2630 return NULL; 2631 } 2632 2633 uint8_t 2634 rte_cryptodev_allocate_driver(struct cryptodev_driver *crypto_drv, 2635 const struct rte_driver *drv) 2636 { 2637 crypto_drv->driver = drv; 2638 crypto_drv->id = nb_drivers; 2639 2640 TAILQ_INSERT_TAIL(&cryptodev_driver_list, crypto_drv, next); 2641 2642 rte_cryptodev_trace_allocate_driver(drv->name); 2643 2644 return nb_drivers++; 2645 } 2646 2647 RTE_INIT(cryptodev_init_fp_ops) 2648 { 2649 uint32_t i; 2650 2651 for (i = 0; i != RTE_DIM(rte_crypto_fp_ops); i++) 2652 cryptodev_fp_ops_reset(rte_crypto_fp_ops + i); 2653 } 2654 2655 static int 2656 cryptodev_handle_dev_list(const char *cmd __rte_unused, 2657 const char *params __rte_unused, 2658 struct rte_tel_data *d) 2659 { 2660 int dev_id; 2661 2662 if (rte_cryptodev_count() < 1) 2663 return -EINVAL; 2664 2665 rte_tel_data_start_array(d, RTE_TEL_INT_VAL); 2666 for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++) 2667 if (rte_cryptodev_is_valid_dev(dev_id)) 2668 rte_tel_data_add_array_int(d, dev_id); 2669 2670 return 0; 2671 } 2672 2673 static int 2674 cryptodev_handle_dev_info(const char *cmd __rte_unused, 2675 const char *params, struct rte_tel_data *d) 2676 { 2677 struct rte_cryptodev_info cryptodev_info; 2678 int dev_id; 2679 char *end_param; 2680 2681 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 2682 return -EINVAL; 2683 2684 dev_id = strtoul(params, &end_param, 0); 2685 if (*end_param != '\0') 2686 CDEV_LOG_ERR("Extra parameters passed to command, ignoring"); 2687 if (!rte_cryptodev_is_valid_dev(dev_id)) 2688 return -EINVAL; 2689 2690 rte_cryptodev_info_get(dev_id, &cryptodev_info); 2691 2692 rte_tel_data_start_dict(d); 2693 rte_tel_data_add_dict_string(d, "device_name", 2694 cryptodev_info.device->name); 2695 rte_tel_data_add_dict_int(d, "max_nb_queue_pairs", 2696 cryptodev_info.max_nb_queue_pairs); 2697 2698 return 0; 2699 } 2700 2701 #define ADD_DICT_STAT(s) rte_tel_data_add_dict_u64(d, #s, cryptodev_stats.s) 2702 2703 static int 2704 cryptodev_handle_dev_stats(const char *cmd __rte_unused, 2705 const char *params, 2706 struct rte_tel_data *d) 2707 { 2708 struct rte_cryptodev_stats cryptodev_stats; 2709 int dev_id, ret; 2710 char *end_param; 2711 2712 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 2713 return -EINVAL; 2714 2715 dev_id = strtoul(params, &end_param, 0); 2716 if (*end_param != '\0') 2717 CDEV_LOG_ERR("Extra parameters passed to command, ignoring"); 2718 if (!rte_cryptodev_is_valid_dev(dev_id)) 2719 return -EINVAL; 2720 2721 ret = rte_cryptodev_stats_get(dev_id, &cryptodev_stats); 2722 if (ret < 0) 2723 return ret; 2724 2725 rte_tel_data_start_dict(d); 2726 ADD_DICT_STAT(enqueued_count); 2727 ADD_DICT_STAT(dequeued_count); 2728 ADD_DICT_STAT(enqueue_err_count); 2729 ADD_DICT_STAT(dequeue_err_count); 2730 2731 return 0; 2732 } 2733 2734 #define CRYPTO_CAPS_SZ \ 2735 (RTE_ALIGN_CEIL(sizeof(struct rte_cryptodev_capabilities), \ 2736 sizeof(uint64_t)) / \ 2737 sizeof(uint64_t)) 2738 2739 static int 2740 crypto_caps_array(struct rte_tel_data *d, 2741 const struct rte_cryptodev_capabilities *capabilities) 2742 { 2743 const struct rte_cryptodev_capabilities *dev_caps; 2744 uint64_t caps_val[CRYPTO_CAPS_SZ]; 2745 unsigned int i = 0, j; 2746 2747 rte_tel_data_start_array(d, RTE_TEL_U64_VAL); 2748 2749 while ((dev_caps = &capabilities[i++])->op != 2750 RTE_CRYPTO_OP_TYPE_UNDEFINED) { 2751 memset(&caps_val, 0, CRYPTO_CAPS_SZ * sizeof(caps_val[0])); 2752 rte_memcpy(caps_val, dev_caps, sizeof(capabilities[0])); 2753 for (j = 0; j < CRYPTO_CAPS_SZ; j++) 2754 rte_tel_data_add_array_u64(d, caps_val[j]); 2755 } 2756 2757 return i; 2758 } 2759 2760 static int 2761 cryptodev_handle_dev_caps(const char *cmd __rte_unused, const char *params, 2762 struct rte_tel_data *d) 2763 { 2764 struct rte_cryptodev_info dev_info; 2765 struct rte_tel_data *crypto_caps; 2766 int crypto_caps_n; 2767 char *end_param; 2768 int dev_id; 2769 2770 if (!params || strlen(params) == 0 || !isdigit(*params)) 2771 return -EINVAL; 2772 2773 dev_id = strtoul(params, &end_param, 0); 2774 if (*end_param != '\0') 2775 CDEV_LOG_ERR("Extra parameters passed to command, ignoring"); 2776 if (!rte_cryptodev_is_valid_dev(dev_id)) 2777 return -EINVAL; 2778 2779 rte_tel_data_start_dict(d); 2780 crypto_caps = rte_tel_data_alloc(); 2781 if (!crypto_caps) 2782 return -ENOMEM; 2783 2784 rte_cryptodev_info_get(dev_id, &dev_info); 2785 crypto_caps_n = crypto_caps_array(crypto_caps, dev_info.capabilities); 2786 rte_tel_data_add_dict_container(d, "crypto_caps", crypto_caps, 0); 2787 rte_tel_data_add_dict_int(d, "crypto_caps_n", crypto_caps_n); 2788 2789 return 0; 2790 } 2791 2792 RTE_INIT(cryptodev_init_telemetry) 2793 { 2794 rte_telemetry_register_cmd("/cryptodev/info", cryptodev_handle_dev_info, 2795 "Returns information for a cryptodev. Parameters: int dev_id"); 2796 rte_telemetry_register_cmd("/cryptodev/list", 2797 cryptodev_handle_dev_list, 2798 "Returns list of available crypto devices by IDs. No parameters."); 2799 rte_telemetry_register_cmd("/cryptodev/stats", 2800 cryptodev_handle_dev_stats, 2801 "Returns the stats for a cryptodev. Parameters: int dev_id"); 2802 rte_telemetry_register_cmd("/cryptodev/caps", 2803 cryptodev_handle_dev_caps, 2804 "Returns the capabilities for a cryptodev. Parameters: int dev_id"); 2805 } 2806