1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2015-2020 Intel Corporation 3 */ 4 5 #include <sys/queue.h> 6 #include <ctype.h> 7 #include <stdio.h> 8 #include <stdlib.h> 9 #include <string.h> 10 #include <errno.h> 11 #include <stdint.h> 12 #include <inttypes.h> 13 14 #include <rte_log.h> 15 #include <rte_debug.h> 16 #include <rte_dev.h> 17 #include <rte_memory.h> 18 #include <rte_memcpy.h> 19 #include <rte_memzone.h> 20 #include <rte_eal.h> 21 #include <rte_common.h> 22 #include <rte_mempool.h> 23 #include <rte_malloc.h> 24 #include <rte_errno.h> 25 #include <rte_spinlock.h> 26 #include <rte_string_fns.h> 27 #include <rte_telemetry.h> 28 29 #include "rte_crypto.h" 30 #include "rte_cryptodev.h" 31 #include "cryptodev_pmd.h" 32 #include "rte_cryptodev_trace.h" 33 34 static uint8_t nb_drivers; 35 36 static struct rte_cryptodev rte_crypto_devices[RTE_CRYPTO_MAX_DEVS]; 37 38 struct rte_cryptodev *rte_cryptodevs = rte_crypto_devices; 39 40 static struct rte_cryptodev_global cryptodev_globals = { 41 .devs = rte_crypto_devices, 42 .data = { NULL }, 43 .nb_devs = 0 44 }; 45 46 /* Public fastpath APIs. */ 47 struct rte_crypto_fp_ops rte_crypto_fp_ops[RTE_CRYPTO_MAX_DEVS]; 48 49 /* spinlock for crypto device callbacks */ 50 static rte_spinlock_t rte_cryptodev_cb_lock = RTE_SPINLOCK_INITIALIZER; 51 52 /** 53 * The user application callback description. 54 * 55 * It contains callback address to be registered by user application, 56 * the pointer to the parameters for callback, and the event type. 57 */ 58 struct rte_cryptodev_callback { 59 TAILQ_ENTRY(rte_cryptodev_callback) next; /**< Callbacks list */ 60 rte_cryptodev_cb_fn cb_fn; /**< Callback address */ 61 void *cb_arg; /**< Parameter for callback */ 62 enum rte_cryptodev_event_type event; /**< Interrupt event type */ 63 uint32_t active; /**< Callback is executing */ 64 }; 65 66 /** 67 * The crypto cipher algorithm strings identifiers. 68 * It could be used in application command line. 69 */ 70 const char * 71 rte_crypto_cipher_algorithm_strings[] = { 72 [RTE_CRYPTO_CIPHER_3DES_CBC] = "3des-cbc", 73 [RTE_CRYPTO_CIPHER_3DES_ECB] = "3des-ecb", 74 [RTE_CRYPTO_CIPHER_3DES_CTR] = "3des-ctr", 75 76 [RTE_CRYPTO_CIPHER_AES_CBC] = "aes-cbc", 77 [RTE_CRYPTO_CIPHER_AES_CTR] = "aes-ctr", 78 [RTE_CRYPTO_CIPHER_AES_DOCSISBPI] = "aes-docsisbpi", 79 [RTE_CRYPTO_CIPHER_AES_ECB] = "aes-ecb", 80 [RTE_CRYPTO_CIPHER_AES_F8] = "aes-f8", 81 [RTE_CRYPTO_CIPHER_AES_XTS] = "aes-xts", 82 83 [RTE_CRYPTO_CIPHER_ARC4] = "arc4", 84 85 [RTE_CRYPTO_CIPHER_DES_CBC] = "des-cbc", 86 [RTE_CRYPTO_CIPHER_DES_DOCSISBPI] = "des-docsisbpi", 87 88 [RTE_CRYPTO_CIPHER_NULL] = "null", 89 90 [RTE_CRYPTO_CIPHER_KASUMI_F8] = "kasumi-f8", 91 [RTE_CRYPTO_CIPHER_SNOW3G_UEA2] = "snow3g-uea2", 92 [RTE_CRYPTO_CIPHER_ZUC_EEA3] = "zuc-eea3" 93 }; 94 95 /** 96 * The crypto cipher operation strings identifiers. 97 * It could be used in application command line. 98 */ 99 const char * 100 rte_crypto_cipher_operation_strings[] = { 101 [RTE_CRYPTO_CIPHER_OP_ENCRYPT] = "encrypt", 102 [RTE_CRYPTO_CIPHER_OP_DECRYPT] = "decrypt" 103 }; 104 105 /** 106 * The crypto auth algorithm strings identifiers. 107 * It could be used in application command line. 108 */ 109 const char * 110 rte_crypto_auth_algorithm_strings[] = { 111 [RTE_CRYPTO_AUTH_AES_CBC_MAC] = "aes-cbc-mac", 112 [RTE_CRYPTO_AUTH_AES_CMAC] = "aes-cmac", 113 [RTE_CRYPTO_AUTH_AES_GMAC] = "aes-gmac", 114 [RTE_CRYPTO_AUTH_AES_XCBC_MAC] = "aes-xcbc-mac", 115 116 [RTE_CRYPTO_AUTH_MD5] = "md5", 117 [RTE_CRYPTO_AUTH_MD5_HMAC] = "md5-hmac", 118 119 [RTE_CRYPTO_AUTH_NULL] = "null", 120 121 [RTE_CRYPTO_AUTH_SHA1] = "sha1", 122 [RTE_CRYPTO_AUTH_SHA1_HMAC] = "sha1-hmac", 123 124 [RTE_CRYPTO_AUTH_SHA224] = "sha2-224", 125 [RTE_CRYPTO_AUTH_SHA224_HMAC] = "sha2-224-hmac", 126 [RTE_CRYPTO_AUTH_SHA256] = "sha2-256", 127 [RTE_CRYPTO_AUTH_SHA256_HMAC] = "sha2-256-hmac", 128 [RTE_CRYPTO_AUTH_SHA384] = "sha2-384", 129 [RTE_CRYPTO_AUTH_SHA384_HMAC] = "sha2-384-hmac", 130 [RTE_CRYPTO_AUTH_SHA512] = "sha2-512", 131 [RTE_CRYPTO_AUTH_SHA512_HMAC] = "sha2-512-hmac", 132 133 [RTE_CRYPTO_AUTH_KASUMI_F9] = "kasumi-f9", 134 [RTE_CRYPTO_AUTH_SNOW3G_UIA2] = "snow3g-uia2", 135 [RTE_CRYPTO_AUTH_ZUC_EIA3] = "zuc-eia3" 136 }; 137 138 /** 139 * The crypto AEAD algorithm strings identifiers. 140 * It could be used in application command line. 141 */ 142 const char * 143 rte_crypto_aead_algorithm_strings[] = { 144 [RTE_CRYPTO_AEAD_AES_CCM] = "aes-ccm", 145 [RTE_CRYPTO_AEAD_AES_GCM] = "aes-gcm", 146 [RTE_CRYPTO_AEAD_CHACHA20_POLY1305] = "chacha20-poly1305" 147 }; 148 149 /** 150 * The crypto AEAD operation strings identifiers. 151 * It could be used in application command line. 152 */ 153 const char * 154 rte_crypto_aead_operation_strings[] = { 155 [RTE_CRYPTO_AEAD_OP_ENCRYPT] = "encrypt", 156 [RTE_CRYPTO_AEAD_OP_DECRYPT] = "decrypt" 157 }; 158 159 /** 160 * Asymmetric crypto transform operation strings identifiers. 161 */ 162 const char *rte_crypto_asym_xform_strings[] = { 163 [RTE_CRYPTO_ASYM_XFORM_NONE] = "none", 164 [RTE_CRYPTO_ASYM_XFORM_RSA] = "rsa", 165 [RTE_CRYPTO_ASYM_XFORM_MODEX] = "modexp", 166 [RTE_CRYPTO_ASYM_XFORM_MODINV] = "modinv", 167 [RTE_CRYPTO_ASYM_XFORM_DH] = "dh", 168 [RTE_CRYPTO_ASYM_XFORM_DSA] = "dsa", 169 [RTE_CRYPTO_ASYM_XFORM_ECDSA] = "ecdsa", 170 [RTE_CRYPTO_ASYM_XFORM_ECPM] = "ecpm", 171 }; 172 173 /** 174 * Asymmetric crypto operation strings identifiers. 175 */ 176 const char *rte_crypto_asym_op_strings[] = { 177 [RTE_CRYPTO_ASYM_OP_ENCRYPT] = "encrypt", 178 [RTE_CRYPTO_ASYM_OP_DECRYPT] = "decrypt", 179 [RTE_CRYPTO_ASYM_OP_SIGN] = "sign", 180 [RTE_CRYPTO_ASYM_OP_VERIFY] = "verify", 181 [RTE_CRYPTO_ASYM_OP_PRIVATE_KEY_GENERATE] = "priv_key_generate", 182 [RTE_CRYPTO_ASYM_OP_PUBLIC_KEY_GENERATE] = "pub_key_generate", 183 [RTE_CRYPTO_ASYM_OP_SHARED_SECRET_COMPUTE] = "sharedsecret_compute", 184 }; 185 186 /** 187 * The private data structure stored in the sym session mempool private data. 188 */ 189 struct rte_cryptodev_sym_session_pool_private_data { 190 uint16_t nb_drivers; 191 /**< number of elements in sess_data array */ 192 uint16_t user_data_sz; 193 /**< session user data will be placed after sess_data */ 194 }; 195 196 /** 197 * The private data structure stored in the asym session mempool private data. 198 */ 199 struct rte_cryptodev_asym_session_pool_private_data { 200 uint16_t max_priv_session_sz; 201 /**< Size of private session data used when creating mempool */ 202 uint16_t user_data_sz; 203 /**< Session user data will be placed after sess_private_data */ 204 }; 205 206 int 207 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum, 208 const char *algo_string) 209 { 210 unsigned int i; 211 212 for (i = 1; i < RTE_DIM(rte_crypto_cipher_algorithm_strings); i++) { 213 if (strcmp(algo_string, rte_crypto_cipher_algorithm_strings[i]) == 0) { 214 *algo_enum = (enum rte_crypto_cipher_algorithm) i; 215 return 0; 216 } 217 } 218 219 /* Invalid string */ 220 return -1; 221 } 222 223 int 224 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum, 225 const char *algo_string) 226 { 227 unsigned int i; 228 229 for (i = 1; i < RTE_DIM(rte_crypto_auth_algorithm_strings); i++) { 230 if (strcmp(algo_string, rte_crypto_auth_algorithm_strings[i]) == 0) { 231 *algo_enum = (enum rte_crypto_auth_algorithm) i; 232 return 0; 233 } 234 } 235 236 /* Invalid string */ 237 return -1; 238 } 239 240 int 241 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum, 242 const char *algo_string) 243 { 244 unsigned int i; 245 246 for (i = 1; i < RTE_DIM(rte_crypto_aead_algorithm_strings); i++) { 247 if (strcmp(algo_string, rte_crypto_aead_algorithm_strings[i]) == 0) { 248 *algo_enum = (enum rte_crypto_aead_algorithm) i; 249 return 0; 250 } 251 } 252 253 /* Invalid string */ 254 return -1; 255 } 256 257 int 258 rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum, 259 const char *xform_string) 260 { 261 unsigned int i; 262 263 for (i = 1; i < RTE_DIM(rte_crypto_asym_xform_strings); i++) { 264 if (strcmp(xform_string, 265 rte_crypto_asym_xform_strings[i]) == 0) { 266 *xform_enum = (enum rte_crypto_asym_xform_type) i; 267 return 0; 268 } 269 } 270 271 /* Invalid string */ 272 return -1; 273 } 274 275 /** 276 * The crypto auth operation strings identifiers. 277 * It could be used in application command line. 278 */ 279 const char * 280 rte_crypto_auth_operation_strings[] = { 281 [RTE_CRYPTO_AUTH_OP_VERIFY] = "verify", 282 [RTE_CRYPTO_AUTH_OP_GENERATE] = "generate" 283 }; 284 285 const struct rte_cryptodev_symmetric_capability * 286 rte_cryptodev_sym_capability_get(uint8_t dev_id, 287 const struct rte_cryptodev_sym_capability_idx *idx) 288 { 289 const struct rte_cryptodev_capabilities *capability; 290 struct rte_cryptodev_info dev_info; 291 int i = 0; 292 293 rte_cryptodev_info_get(dev_id, &dev_info); 294 295 while ((capability = &dev_info.capabilities[i++])->op != 296 RTE_CRYPTO_OP_TYPE_UNDEFINED) { 297 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC) 298 continue; 299 300 if (capability->sym.xform_type != idx->type) 301 continue; 302 303 if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH && 304 capability->sym.auth.algo == idx->algo.auth) 305 return &capability->sym; 306 307 if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER && 308 capability->sym.cipher.algo == idx->algo.cipher) 309 return &capability->sym; 310 311 if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD && 312 capability->sym.aead.algo == idx->algo.aead) 313 return &capability->sym; 314 } 315 316 return NULL; 317 } 318 319 static int 320 param_range_check(uint16_t size, const struct rte_crypto_param_range *range) 321 { 322 unsigned int next_size; 323 324 /* Check lower/upper bounds */ 325 if (size < range->min) 326 return -1; 327 328 if (size > range->max) 329 return -1; 330 331 /* If range is actually only one value, size is correct */ 332 if (range->increment == 0) 333 return 0; 334 335 /* Check if value is one of the supported sizes */ 336 for (next_size = range->min; next_size <= range->max; 337 next_size += range->increment) 338 if (size == next_size) 339 return 0; 340 341 return -1; 342 } 343 344 const struct rte_cryptodev_asymmetric_xform_capability * 345 rte_cryptodev_asym_capability_get(uint8_t dev_id, 346 const struct rte_cryptodev_asym_capability_idx *idx) 347 { 348 const struct rte_cryptodev_capabilities *capability; 349 struct rte_cryptodev_info dev_info; 350 unsigned int i = 0; 351 352 memset(&dev_info, 0, sizeof(struct rte_cryptodev_info)); 353 rte_cryptodev_info_get(dev_id, &dev_info); 354 355 while ((capability = &dev_info.capabilities[i++])->op != 356 RTE_CRYPTO_OP_TYPE_UNDEFINED) { 357 if (capability->op != RTE_CRYPTO_OP_TYPE_ASYMMETRIC) 358 continue; 359 360 if (capability->asym.xform_capa.xform_type == idx->type) 361 return &capability->asym.xform_capa; 362 } 363 return NULL; 364 }; 365 366 int 367 rte_cryptodev_sym_capability_check_cipher( 368 const struct rte_cryptodev_symmetric_capability *capability, 369 uint16_t key_size, uint16_t iv_size) 370 { 371 if (param_range_check(key_size, &capability->cipher.key_size) != 0) 372 return -1; 373 374 if (param_range_check(iv_size, &capability->cipher.iv_size) != 0) 375 return -1; 376 377 return 0; 378 } 379 380 int 381 rte_cryptodev_sym_capability_check_auth( 382 const struct rte_cryptodev_symmetric_capability *capability, 383 uint16_t key_size, uint16_t digest_size, uint16_t iv_size) 384 { 385 if (param_range_check(key_size, &capability->auth.key_size) != 0) 386 return -1; 387 388 if (param_range_check(digest_size, &capability->auth.digest_size) != 0) 389 return -1; 390 391 if (param_range_check(iv_size, &capability->auth.iv_size) != 0) 392 return -1; 393 394 return 0; 395 } 396 397 int 398 rte_cryptodev_sym_capability_check_aead( 399 const struct rte_cryptodev_symmetric_capability *capability, 400 uint16_t key_size, uint16_t digest_size, uint16_t aad_size, 401 uint16_t iv_size) 402 { 403 if (param_range_check(key_size, &capability->aead.key_size) != 0) 404 return -1; 405 406 if (param_range_check(digest_size, &capability->aead.digest_size) != 0) 407 return -1; 408 409 if (param_range_check(aad_size, &capability->aead.aad_size) != 0) 410 return -1; 411 412 if (param_range_check(iv_size, &capability->aead.iv_size) != 0) 413 return -1; 414 415 return 0; 416 } 417 int 418 rte_cryptodev_asym_xform_capability_check_optype( 419 const struct rte_cryptodev_asymmetric_xform_capability *capability, 420 enum rte_crypto_asym_op_type op_type) 421 { 422 if (capability->op_types & (1 << op_type)) 423 return 1; 424 425 return 0; 426 } 427 428 int 429 rte_cryptodev_asym_xform_capability_check_modlen( 430 const struct rte_cryptodev_asymmetric_xform_capability *capability, 431 uint16_t modlen) 432 { 433 /* no need to check for limits, if min or max = 0 */ 434 if (capability->modlen.min != 0) { 435 if (modlen < capability->modlen.min) 436 return -1; 437 } 438 439 if (capability->modlen.max != 0) { 440 if (modlen > capability->modlen.max) 441 return -1; 442 } 443 444 /* in any case, check if given modlen is module increment */ 445 if (capability->modlen.increment != 0) { 446 if (modlen % (capability->modlen.increment)) 447 return -1; 448 } 449 450 return 0; 451 } 452 453 /* spinlock for crypto device enq callbacks */ 454 static rte_spinlock_t rte_cryptodev_callback_lock = RTE_SPINLOCK_INITIALIZER; 455 456 static void 457 cryptodev_cb_cleanup(struct rte_cryptodev *dev) 458 { 459 struct rte_cryptodev_cb_rcu *list; 460 struct rte_cryptodev_cb *cb, *next; 461 uint16_t qp_id; 462 463 if (dev->enq_cbs == NULL && dev->deq_cbs == NULL) 464 return; 465 466 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { 467 list = &dev->enq_cbs[qp_id]; 468 cb = list->next; 469 while (cb != NULL) { 470 next = cb->next; 471 rte_free(cb); 472 cb = next; 473 } 474 475 rte_free(list->qsbr); 476 } 477 478 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { 479 list = &dev->deq_cbs[qp_id]; 480 cb = list->next; 481 while (cb != NULL) { 482 next = cb->next; 483 rte_free(cb); 484 cb = next; 485 } 486 487 rte_free(list->qsbr); 488 } 489 490 rte_free(dev->enq_cbs); 491 dev->enq_cbs = NULL; 492 rte_free(dev->deq_cbs); 493 dev->deq_cbs = NULL; 494 } 495 496 static int 497 cryptodev_cb_init(struct rte_cryptodev *dev) 498 { 499 struct rte_cryptodev_cb_rcu *list; 500 struct rte_rcu_qsbr *qsbr; 501 uint16_t qp_id; 502 size_t size; 503 504 /* Max thread set to 1, as one DP thread accessing a queue-pair */ 505 const uint32_t max_threads = 1; 506 507 dev->enq_cbs = rte_zmalloc(NULL, 508 sizeof(struct rte_cryptodev_cb_rcu) * 509 dev->data->nb_queue_pairs, 0); 510 if (dev->enq_cbs == NULL) { 511 CDEV_LOG_ERR("Failed to allocate memory for enq callbacks"); 512 return -ENOMEM; 513 } 514 515 dev->deq_cbs = rte_zmalloc(NULL, 516 sizeof(struct rte_cryptodev_cb_rcu) * 517 dev->data->nb_queue_pairs, 0); 518 if (dev->deq_cbs == NULL) { 519 CDEV_LOG_ERR("Failed to allocate memory for deq callbacks"); 520 rte_free(dev->enq_cbs); 521 return -ENOMEM; 522 } 523 524 /* Create RCU QSBR variable */ 525 size = rte_rcu_qsbr_get_memsize(max_threads); 526 527 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { 528 list = &dev->enq_cbs[qp_id]; 529 qsbr = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE); 530 if (qsbr == NULL) { 531 CDEV_LOG_ERR("Failed to allocate memory for RCU on " 532 "queue_pair_id=%d", qp_id); 533 goto cb_init_err; 534 } 535 536 if (rte_rcu_qsbr_init(qsbr, max_threads)) { 537 CDEV_LOG_ERR("Failed to initialize for RCU on " 538 "queue_pair_id=%d", qp_id); 539 goto cb_init_err; 540 } 541 542 list->qsbr = qsbr; 543 } 544 545 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { 546 list = &dev->deq_cbs[qp_id]; 547 qsbr = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE); 548 if (qsbr == NULL) { 549 CDEV_LOG_ERR("Failed to allocate memory for RCU on " 550 "queue_pair_id=%d", qp_id); 551 goto cb_init_err; 552 } 553 554 if (rte_rcu_qsbr_init(qsbr, max_threads)) { 555 CDEV_LOG_ERR("Failed to initialize for RCU on " 556 "queue_pair_id=%d", qp_id); 557 goto cb_init_err; 558 } 559 560 list->qsbr = qsbr; 561 } 562 563 return 0; 564 565 cb_init_err: 566 cryptodev_cb_cleanup(dev); 567 return -ENOMEM; 568 } 569 570 const char * 571 rte_cryptodev_get_feature_name(uint64_t flag) 572 { 573 switch (flag) { 574 case RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO: 575 return "SYMMETRIC_CRYPTO"; 576 case RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO: 577 return "ASYMMETRIC_CRYPTO"; 578 case RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING: 579 return "SYM_OPERATION_CHAINING"; 580 case RTE_CRYPTODEV_FF_CPU_SSE: 581 return "CPU_SSE"; 582 case RTE_CRYPTODEV_FF_CPU_AVX: 583 return "CPU_AVX"; 584 case RTE_CRYPTODEV_FF_CPU_AVX2: 585 return "CPU_AVX2"; 586 case RTE_CRYPTODEV_FF_CPU_AVX512: 587 return "CPU_AVX512"; 588 case RTE_CRYPTODEV_FF_CPU_AESNI: 589 return "CPU_AESNI"; 590 case RTE_CRYPTODEV_FF_HW_ACCELERATED: 591 return "HW_ACCELERATED"; 592 case RTE_CRYPTODEV_FF_IN_PLACE_SGL: 593 return "IN_PLACE_SGL"; 594 case RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT: 595 return "OOP_SGL_IN_SGL_OUT"; 596 case RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT: 597 return "OOP_SGL_IN_LB_OUT"; 598 case RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT: 599 return "OOP_LB_IN_SGL_OUT"; 600 case RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT: 601 return "OOP_LB_IN_LB_OUT"; 602 case RTE_CRYPTODEV_FF_CPU_NEON: 603 return "CPU_NEON"; 604 case RTE_CRYPTODEV_FF_CPU_ARM_CE: 605 return "CPU_ARM_CE"; 606 case RTE_CRYPTODEV_FF_SECURITY: 607 return "SECURITY_PROTOCOL"; 608 case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP: 609 return "RSA_PRIV_OP_KEY_EXP"; 610 case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT: 611 return "RSA_PRIV_OP_KEY_QT"; 612 case RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED: 613 return "DIGEST_ENCRYPTED"; 614 case RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO: 615 return "SYM_CPU_CRYPTO"; 616 case RTE_CRYPTODEV_FF_ASYM_SESSIONLESS: 617 return "ASYM_SESSIONLESS"; 618 case RTE_CRYPTODEV_FF_SYM_SESSIONLESS: 619 return "SYM_SESSIONLESS"; 620 case RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA: 621 return "NON_BYTE_ALIGNED_DATA"; 622 case RTE_CRYPTODEV_FF_CIPHER_MULTIPLE_DATA_UNITS: 623 return "CIPHER_MULTIPLE_DATA_UNITS"; 624 case RTE_CRYPTODEV_FF_CIPHER_WRAPPED_KEY: 625 return "CIPHER_WRAPPED_KEY"; 626 default: 627 return NULL; 628 } 629 } 630 631 struct rte_cryptodev * 632 rte_cryptodev_pmd_get_dev(uint8_t dev_id) 633 { 634 return &cryptodev_globals.devs[dev_id]; 635 } 636 637 struct rte_cryptodev * 638 rte_cryptodev_pmd_get_named_dev(const char *name) 639 { 640 struct rte_cryptodev *dev; 641 unsigned int i; 642 643 if (name == NULL) 644 return NULL; 645 646 for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) { 647 dev = &cryptodev_globals.devs[i]; 648 649 if ((dev->attached == RTE_CRYPTODEV_ATTACHED) && 650 (strcmp(dev->data->name, name) == 0)) 651 return dev; 652 } 653 654 return NULL; 655 } 656 657 static inline uint8_t 658 rte_cryptodev_is_valid_device_data(uint8_t dev_id) 659 { 660 if (dev_id >= RTE_CRYPTO_MAX_DEVS || 661 rte_crypto_devices[dev_id].data == NULL) 662 return 0; 663 664 return 1; 665 } 666 667 unsigned int 668 rte_cryptodev_is_valid_dev(uint8_t dev_id) 669 { 670 struct rte_cryptodev *dev = NULL; 671 672 if (!rte_cryptodev_is_valid_device_data(dev_id)) 673 return 0; 674 675 dev = rte_cryptodev_pmd_get_dev(dev_id); 676 if (dev->attached != RTE_CRYPTODEV_ATTACHED) 677 return 0; 678 else 679 return 1; 680 } 681 682 683 int 684 rte_cryptodev_get_dev_id(const char *name) 685 { 686 unsigned i; 687 688 if (name == NULL) 689 return -1; 690 691 for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) { 692 if (!rte_cryptodev_is_valid_device_data(i)) 693 continue; 694 if ((strcmp(cryptodev_globals.devs[i].data->name, name) 695 == 0) && 696 (cryptodev_globals.devs[i].attached == 697 RTE_CRYPTODEV_ATTACHED)) 698 return i; 699 } 700 701 return -1; 702 } 703 704 uint8_t 705 rte_cryptodev_count(void) 706 { 707 return cryptodev_globals.nb_devs; 708 } 709 710 uint8_t 711 rte_cryptodev_device_count_by_driver(uint8_t driver_id) 712 { 713 uint8_t i, dev_count = 0; 714 715 for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) 716 if (cryptodev_globals.devs[i].driver_id == driver_id && 717 cryptodev_globals.devs[i].attached == 718 RTE_CRYPTODEV_ATTACHED) 719 dev_count++; 720 721 return dev_count; 722 } 723 724 uint8_t 725 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices, 726 uint8_t nb_devices) 727 { 728 uint8_t i, count = 0; 729 struct rte_cryptodev *devs = cryptodev_globals.devs; 730 731 for (i = 0; i < RTE_CRYPTO_MAX_DEVS && count < nb_devices; i++) { 732 if (!rte_cryptodev_is_valid_device_data(i)) 733 continue; 734 735 if (devs[i].attached == RTE_CRYPTODEV_ATTACHED) { 736 int cmp; 737 738 cmp = strncmp(devs[i].device->driver->name, 739 driver_name, 740 strlen(driver_name) + 1); 741 742 if (cmp == 0) 743 devices[count++] = devs[i].data->dev_id; 744 } 745 } 746 747 return count; 748 } 749 750 void * 751 rte_cryptodev_get_sec_ctx(uint8_t dev_id) 752 { 753 if (dev_id < RTE_CRYPTO_MAX_DEVS && 754 (rte_crypto_devices[dev_id].feature_flags & 755 RTE_CRYPTODEV_FF_SECURITY)) 756 return rte_crypto_devices[dev_id].security_ctx; 757 758 return NULL; 759 } 760 761 int 762 rte_cryptodev_socket_id(uint8_t dev_id) 763 { 764 struct rte_cryptodev *dev; 765 766 if (!rte_cryptodev_is_valid_dev(dev_id)) 767 return -1; 768 769 dev = rte_cryptodev_pmd_get_dev(dev_id); 770 771 return dev->data->socket_id; 772 } 773 774 static inline int 775 rte_cryptodev_data_alloc(uint8_t dev_id, struct rte_cryptodev_data **data, 776 int socket_id) 777 { 778 char mz_name[RTE_MEMZONE_NAMESIZE]; 779 const struct rte_memzone *mz; 780 int n; 781 782 /* generate memzone name */ 783 n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id); 784 if (n >= (int)sizeof(mz_name)) 785 return -EINVAL; 786 787 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 788 mz = rte_memzone_reserve(mz_name, 789 sizeof(struct rte_cryptodev_data), 790 socket_id, 0); 791 CDEV_LOG_DEBUG("PRIMARY:reserved memzone for %s (%p)", 792 mz_name, mz); 793 } else { 794 mz = rte_memzone_lookup(mz_name); 795 CDEV_LOG_DEBUG("SECONDARY:looked up memzone for %s (%p)", 796 mz_name, mz); 797 } 798 799 if (mz == NULL) 800 return -ENOMEM; 801 802 *data = mz->addr; 803 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 804 memset(*data, 0, sizeof(struct rte_cryptodev_data)); 805 806 return 0; 807 } 808 809 static inline int 810 rte_cryptodev_data_free(uint8_t dev_id, struct rte_cryptodev_data **data) 811 { 812 char mz_name[RTE_MEMZONE_NAMESIZE]; 813 const struct rte_memzone *mz; 814 int n; 815 816 /* generate memzone name */ 817 n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id); 818 if (n >= (int)sizeof(mz_name)) 819 return -EINVAL; 820 821 mz = rte_memzone_lookup(mz_name); 822 if (mz == NULL) 823 return -ENOMEM; 824 825 RTE_ASSERT(*data == mz->addr); 826 *data = NULL; 827 828 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 829 CDEV_LOG_DEBUG("PRIMARY:free memzone of %s (%p)", 830 mz_name, mz); 831 return rte_memzone_free(mz); 832 } else { 833 CDEV_LOG_DEBUG("SECONDARY:don't free memzone of %s (%p)", 834 mz_name, mz); 835 } 836 837 return 0; 838 } 839 840 static uint8_t 841 rte_cryptodev_find_free_device_index(void) 842 { 843 uint8_t dev_id; 844 845 for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++) { 846 if (rte_crypto_devices[dev_id].attached == 847 RTE_CRYPTODEV_DETACHED) 848 return dev_id; 849 } 850 return RTE_CRYPTO_MAX_DEVS; 851 } 852 853 struct rte_cryptodev * 854 rte_cryptodev_pmd_allocate(const char *name, int socket_id) 855 { 856 struct rte_cryptodev *cryptodev; 857 uint8_t dev_id; 858 859 if (rte_cryptodev_pmd_get_named_dev(name) != NULL) { 860 CDEV_LOG_ERR("Crypto device with name %s already " 861 "allocated!", name); 862 return NULL; 863 } 864 865 dev_id = rte_cryptodev_find_free_device_index(); 866 if (dev_id == RTE_CRYPTO_MAX_DEVS) { 867 CDEV_LOG_ERR("Reached maximum number of crypto devices"); 868 return NULL; 869 } 870 871 cryptodev = rte_cryptodev_pmd_get_dev(dev_id); 872 873 if (cryptodev->data == NULL) { 874 struct rte_cryptodev_data **cryptodev_data = 875 &cryptodev_globals.data[dev_id]; 876 877 int retval = rte_cryptodev_data_alloc(dev_id, cryptodev_data, 878 socket_id); 879 880 if (retval < 0 || *cryptodev_data == NULL) 881 return NULL; 882 883 cryptodev->data = *cryptodev_data; 884 885 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 886 strlcpy(cryptodev->data->name, name, 887 RTE_CRYPTODEV_NAME_MAX_LEN); 888 889 cryptodev->data->dev_id = dev_id; 890 cryptodev->data->socket_id = socket_id; 891 cryptodev->data->dev_started = 0; 892 CDEV_LOG_DEBUG("PRIMARY:init data"); 893 } 894 895 CDEV_LOG_DEBUG("Data for %s: dev_id %d, socket %d, started %d", 896 cryptodev->data->name, 897 cryptodev->data->dev_id, 898 cryptodev->data->socket_id, 899 cryptodev->data->dev_started); 900 901 /* init user callbacks */ 902 TAILQ_INIT(&(cryptodev->link_intr_cbs)); 903 904 cryptodev->attached = RTE_CRYPTODEV_ATTACHED; 905 906 cryptodev_globals.nb_devs++; 907 } 908 909 return cryptodev; 910 } 911 912 int 913 rte_cryptodev_pmd_release_device(struct rte_cryptodev *cryptodev) 914 { 915 int ret; 916 uint8_t dev_id; 917 918 if (cryptodev == NULL) 919 return -EINVAL; 920 921 dev_id = cryptodev->data->dev_id; 922 923 cryptodev_fp_ops_reset(rte_crypto_fp_ops + dev_id); 924 925 /* Close device only if device operations have been set */ 926 if (cryptodev->dev_ops) { 927 ret = rte_cryptodev_close(dev_id); 928 if (ret < 0) 929 return ret; 930 } 931 932 ret = rte_cryptodev_data_free(dev_id, &cryptodev_globals.data[dev_id]); 933 if (ret < 0) 934 return ret; 935 936 cryptodev->attached = RTE_CRYPTODEV_DETACHED; 937 cryptodev_globals.nb_devs--; 938 return 0; 939 } 940 941 uint16_t 942 rte_cryptodev_queue_pair_count(uint8_t dev_id) 943 { 944 struct rte_cryptodev *dev; 945 946 if (!rte_cryptodev_is_valid_device_data(dev_id)) { 947 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 948 return 0; 949 } 950 951 dev = &rte_crypto_devices[dev_id]; 952 return dev->data->nb_queue_pairs; 953 } 954 955 static int 956 rte_cryptodev_queue_pairs_config(struct rte_cryptodev *dev, uint16_t nb_qpairs, 957 int socket_id) 958 { 959 struct rte_cryptodev_info dev_info; 960 void **qp; 961 unsigned i; 962 963 if ((dev == NULL) || (nb_qpairs < 1)) { 964 CDEV_LOG_ERR("invalid param: dev %p, nb_queues %u", 965 dev, nb_qpairs); 966 return -EINVAL; 967 } 968 969 CDEV_LOG_DEBUG("Setup %d queues pairs on device %u", 970 nb_qpairs, dev->data->dev_id); 971 972 memset(&dev_info, 0, sizeof(struct rte_cryptodev_info)); 973 974 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP); 975 (*dev->dev_ops->dev_infos_get)(dev, &dev_info); 976 977 if (nb_qpairs > (dev_info.max_nb_queue_pairs)) { 978 CDEV_LOG_ERR("Invalid num queue_pairs (%u) for dev %u", 979 nb_qpairs, dev->data->dev_id); 980 return -EINVAL; 981 } 982 983 if (dev->data->queue_pairs == NULL) { /* first time configuration */ 984 dev->data->queue_pairs = rte_zmalloc_socket( 985 "cryptodev->queue_pairs", 986 sizeof(dev->data->queue_pairs[0]) * 987 dev_info.max_nb_queue_pairs, 988 RTE_CACHE_LINE_SIZE, socket_id); 989 990 if (dev->data->queue_pairs == NULL) { 991 dev->data->nb_queue_pairs = 0; 992 CDEV_LOG_ERR("failed to get memory for qp meta data, " 993 "nb_queues %u", 994 nb_qpairs); 995 return -(ENOMEM); 996 } 997 } else { /* re-configure */ 998 int ret; 999 uint16_t old_nb_queues = dev->data->nb_queue_pairs; 1000 1001 qp = dev->data->queue_pairs; 1002 1003 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_release, 1004 -ENOTSUP); 1005 1006 for (i = nb_qpairs; i < old_nb_queues; i++) { 1007 ret = (*dev->dev_ops->queue_pair_release)(dev, i); 1008 if (ret < 0) 1009 return ret; 1010 qp[i] = NULL; 1011 } 1012 1013 } 1014 dev->data->nb_queue_pairs = nb_qpairs; 1015 return 0; 1016 } 1017 1018 int 1019 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config) 1020 { 1021 struct rte_cryptodev *dev; 1022 int diag; 1023 1024 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1025 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 1026 return -EINVAL; 1027 } 1028 1029 dev = &rte_crypto_devices[dev_id]; 1030 1031 if (dev->data->dev_started) { 1032 CDEV_LOG_ERR( 1033 "device %d must be stopped to allow configuration", dev_id); 1034 return -EBUSY; 1035 } 1036 1037 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP); 1038 1039 rte_spinlock_lock(&rte_cryptodev_callback_lock); 1040 cryptodev_cb_cleanup(dev); 1041 rte_spinlock_unlock(&rte_cryptodev_callback_lock); 1042 1043 /* Setup new number of queue pairs and reconfigure device. */ 1044 diag = rte_cryptodev_queue_pairs_config(dev, config->nb_queue_pairs, 1045 config->socket_id); 1046 if (diag != 0) { 1047 CDEV_LOG_ERR("dev%d rte_crypto_dev_queue_pairs_config = %d", 1048 dev_id, diag); 1049 return diag; 1050 } 1051 1052 rte_spinlock_lock(&rte_cryptodev_callback_lock); 1053 diag = cryptodev_cb_init(dev); 1054 rte_spinlock_unlock(&rte_cryptodev_callback_lock); 1055 if (diag) { 1056 CDEV_LOG_ERR("Callback init failed for dev_id=%d", dev_id); 1057 return diag; 1058 } 1059 1060 rte_cryptodev_trace_configure(dev_id, config); 1061 return (*dev->dev_ops->dev_configure)(dev, config); 1062 } 1063 1064 int 1065 rte_cryptodev_start(uint8_t dev_id) 1066 { 1067 struct rte_cryptodev *dev; 1068 int diag; 1069 1070 CDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id); 1071 1072 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1073 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 1074 return -EINVAL; 1075 } 1076 1077 dev = &rte_crypto_devices[dev_id]; 1078 1079 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP); 1080 1081 if (dev->data->dev_started != 0) { 1082 CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already started", 1083 dev_id); 1084 return 0; 1085 } 1086 1087 diag = (*dev->dev_ops->dev_start)(dev); 1088 /* expose selection of PMD fast-path functions */ 1089 cryptodev_fp_ops_set(rte_crypto_fp_ops + dev_id, dev); 1090 1091 rte_cryptodev_trace_start(dev_id, diag); 1092 if (diag == 0) 1093 dev->data->dev_started = 1; 1094 else 1095 return diag; 1096 1097 return 0; 1098 } 1099 1100 void 1101 rte_cryptodev_stop(uint8_t dev_id) 1102 { 1103 struct rte_cryptodev *dev; 1104 1105 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1106 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 1107 return; 1108 } 1109 1110 dev = &rte_crypto_devices[dev_id]; 1111 1112 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop); 1113 1114 if (dev->data->dev_started == 0) { 1115 CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already stopped", 1116 dev_id); 1117 return; 1118 } 1119 1120 /* point fast-path functions to dummy ones */ 1121 cryptodev_fp_ops_reset(rte_crypto_fp_ops + dev_id); 1122 1123 (*dev->dev_ops->dev_stop)(dev); 1124 rte_cryptodev_trace_stop(dev_id); 1125 dev->data->dev_started = 0; 1126 } 1127 1128 int 1129 rte_cryptodev_close(uint8_t dev_id) 1130 { 1131 struct rte_cryptodev *dev; 1132 int retval; 1133 1134 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1135 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 1136 return -1; 1137 } 1138 1139 dev = &rte_crypto_devices[dev_id]; 1140 1141 /* Device must be stopped before it can be closed */ 1142 if (dev->data->dev_started == 1) { 1143 CDEV_LOG_ERR("Device %u must be stopped before closing", 1144 dev_id); 1145 return -EBUSY; 1146 } 1147 1148 /* We can't close the device if there are outstanding sessions in use */ 1149 if (dev->data->session_pool != NULL) { 1150 if (!rte_mempool_full(dev->data->session_pool)) { 1151 CDEV_LOG_ERR("dev_id=%u close failed, session mempool " 1152 "has sessions still in use, free " 1153 "all sessions before calling close", 1154 (unsigned)dev_id); 1155 return -EBUSY; 1156 } 1157 } 1158 1159 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP); 1160 retval = (*dev->dev_ops->dev_close)(dev); 1161 rte_cryptodev_trace_close(dev_id, retval); 1162 1163 if (retval < 0) 1164 return retval; 1165 1166 return 0; 1167 } 1168 1169 int 1170 rte_cryptodev_get_qp_status(uint8_t dev_id, uint16_t queue_pair_id) 1171 { 1172 struct rte_cryptodev *dev; 1173 1174 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1175 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 1176 return -EINVAL; 1177 } 1178 1179 dev = &rte_crypto_devices[dev_id]; 1180 if (queue_pair_id >= dev->data->nb_queue_pairs) { 1181 CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id); 1182 return -EINVAL; 1183 } 1184 void **qps = dev->data->queue_pairs; 1185 1186 if (qps[queue_pair_id]) { 1187 CDEV_LOG_DEBUG("qp %d on dev %d is initialised", 1188 queue_pair_id, dev_id); 1189 return 1; 1190 } 1191 1192 CDEV_LOG_DEBUG("qp %d on dev %d is not initialised", 1193 queue_pair_id, dev_id); 1194 1195 return 0; 1196 } 1197 1198 int 1199 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id, 1200 const struct rte_cryptodev_qp_conf *qp_conf, int socket_id) 1201 1202 { 1203 struct rte_cryptodev *dev; 1204 1205 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1206 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 1207 return -EINVAL; 1208 } 1209 1210 dev = &rte_crypto_devices[dev_id]; 1211 if (queue_pair_id >= dev->data->nb_queue_pairs) { 1212 CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id); 1213 return -EINVAL; 1214 } 1215 1216 if (!qp_conf) { 1217 CDEV_LOG_ERR("qp_conf cannot be NULL\n"); 1218 return -EINVAL; 1219 } 1220 1221 if ((qp_conf->mp_session && !qp_conf->mp_session_private) || 1222 (!qp_conf->mp_session && qp_conf->mp_session_private)) { 1223 CDEV_LOG_ERR("Invalid mempools\n"); 1224 return -EINVAL; 1225 } 1226 1227 if (qp_conf->mp_session) { 1228 struct rte_cryptodev_sym_session_pool_private_data *pool_priv; 1229 uint32_t obj_size = qp_conf->mp_session->elt_size; 1230 uint32_t obj_priv_size = qp_conf->mp_session_private->elt_size; 1231 struct rte_cryptodev_sym_session s = {0}; 1232 1233 pool_priv = rte_mempool_get_priv(qp_conf->mp_session); 1234 if (!pool_priv || qp_conf->mp_session->private_data_size < 1235 sizeof(*pool_priv)) { 1236 CDEV_LOG_ERR("Invalid mempool\n"); 1237 return -EINVAL; 1238 } 1239 1240 s.nb_drivers = pool_priv->nb_drivers; 1241 s.user_data_sz = pool_priv->user_data_sz; 1242 1243 if ((rte_cryptodev_sym_get_existing_header_session_size(&s) > 1244 obj_size) || (s.nb_drivers <= dev->driver_id) || 1245 rte_cryptodev_sym_get_private_session_size(dev_id) > 1246 obj_priv_size) { 1247 CDEV_LOG_ERR("Invalid mempool\n"); 1248 return -EINVAL; 1249 } 1250 } 1251 1252 if (dev->data->dev_started) { 1253 CDEV_LOG_ERR( 1254 "device %d must be stopped to allow configuration", dev_id); 1255 return -EBUSY; 1256 } 1257 1258 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_setup, -ENOTSUP); 1259 1260 rte_cryptodev_trace_queue_pair_setup(dev_id, queue_pair_id, qp_conf); 1261 return (*dev->dev_ops->queue_pair_setup)(dev, queue_pair_id, qp_conf, 1262 socket_id); 1263 } 1264 1265 struct rte_cryptodev_cb * 1266 rte_cryptodev_add_enq_callback(uint8_t dev_id, 1267 uint16_t qp_id, 1268 rte_cryptodev_callback_fn cb_fn, 1269 void *cb_arg) 1270 { 1271 struct rte_cryptodev *dev; 1272 struct rte_cryptodev_cb_rcu *list; 1273 struct rte_cryptodev_cb *cb, *tail; 1274 1275 if (!cb_fn) { 1276 CDEV_LOG_ERR("Callback is NULL on dev_id=%d", dev_id); 1277 rte_errno = EINVAL; 1278 return NULL; 1279 } 1280 1281 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1282 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id); 1283 rte_errno = ENODEV; 1284 return NULL; 1285 } 1286 1287 dev = &rte_crypto_devices[dev_id]; 1288 if (qp_id >= dev->data->nb_queue_pairs) { 1289 CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id); 1290 rte_errno = ENODEV; 1291 return NULL; 1292 } 1293 1294 cb = rte_zmalloc(NULL, sizeof(*cb), 0); 1295 if (cb == NULL) { 1296 CDEV_LOG_ERR("Failed to allocate memory for callback on " 1297 "dev=%d, queue_pair_id=%d", dev_id, qp_id); 1298 rte_errno = ENOMEM; 1299 return NULL; 1300 } 1301 1302 rte_spinlock_lock(&rte_cryptodev_callback_lock); 1303 1304 cb->fn = cb_fn; 1305 cb->arg = cb_arg; 1306 1307 /* Add the callbacks in fifo order. */ 1308 list = &dev->enq_cbs[qp_id]; 1309 tail = list->next; 1310 1311 if (tail) { 1312 while (tail->next) 1313 tail = tail->next; 1314 /* Stores to cb->fn and cb->param should complete before 1315 * cb is visible to data plane. 1316 */ 1317 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE); 1318 } else { 1319 /* Stores to cb->fn and cb->param should complete before 1320 * cb is visible to data plane. 1321 */ 1322 __atomic_store_n(&list->next, cb, __ATOMIC_RELEASE); 1323 } 1324 1325 rte_spinlock_unlock(&rte_cryptodev_callback_lock); 1326 1327 return cb; 1328 } 1329 1330 int 1331 rte_cryptodev_remove_enq_callback(uint8_t dev_id, 1332 uint16_t qp_id, 1333 struct rte_cryptodev_cb *cb) 1334 { 1335 struct rte_cryptodev *dev; 1336 struct rte_cryptodev_cb **prev_cb, *curr_cb; 1337 struct rte_cryptodev_cb_rcu *list; 1338 int ret; 1339 1340 ret = -EINVAL; 1341 1342 if (!cb) { 1343 CDEV_LOG_ERR("Callback is NULL"); 1344 return -EINVAL; 1345 } 1346 1347 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1348 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id); 1349 return -ENODEV; 1350 } 1351 1352 dev = &rte_crypto_devices[dev_id]; 1353 if (qp_id >= dev->data->nb_queue_pairs) { 1354 CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id); 1355 return -ENODEV; 1356 } 1357 1358 rte_spinlock_lock(&rte_cryptodev_callback_lock); 1359 if (dev->enq_cbs == NULL) { 1360 CDEV_LOG_ERR("Callback not initialized"); 1361 goto cb_err; 1362 } 1363 1364 list = &dev->enq_cbs[qp_id]; 1365 if (list == NULL) { 1366 CDEV_LOG_ERR("Callback list is NULL"); 1367 goto cb_err; 1368 } 1369 1370 if (list->qsbr == NULL) { 1371 CDEV_LOG_ERR("Rcu qsbr is NULL"); 1372 goto cb_err; 1373 } 1374 1375 prev_cb = &list->next; 1376 for (; *prev_cb != NULL; prev_cb = &curr_cb->next) { 1377 curr_cb = *prev_cb; 1378 if (curr_cb == cb) { 1379 /* Remove the user cb from the callback list. */ 1380 __atomic_store_n(prev_cb, curr_cb->next, 1381 __ATOMIC_RELAXED); 1382 ret = 0; 1383 break; 1384 } 1385 } 1386 1387 if (!ret) { 1388 /* Call sync with invalid thread id as this is part of 1389 * control plane API 1390 */ 1391 rte_rcu_qsbr_synchronize(list->qsbr, RTE_QSBR_THRID_INVALID); 1392 rte_free(cb); 1393 } 1394 1395 cb_err: 1396 rte_spinlock_unlock(&rte_cryptodev_callback_lock); 1397 return ret; 1398 } 1399 1400 struct rte_cryptodev_cb * 1401 rte_cryptodev_add_deq_callback(uint8_t dev_id, 1402 uint16_t qp_id, 1403 rte_cryptodev_callback_fn cb_fn, 1404 void *cb_arg) 1405 { 1406 struct rte_cryptodev *dev; 1407 struct rte_cryptodev_cb_rcu *list; 1408 struct rte_cryptodev_cb *cb, *tail; 1409 1410 if (!cb_fn) { 1411 CDEV_LOG_ERR("Callback is NULL on dev_id=%d", dev_id); 1412 rte_errno = EINVAL; 1413 return NULL; 1414 } 1415 1416 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1417 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id); 1418 rte_errno = ENODEV; 1419 return NULL; 1420 } 1421 1422 dev = &rte_crypto_devices[dev_id]; 1423 if (qp_id >= dev->data->nb_queue_pairs) { 1424 CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id); 1425 rte_errno = ENODEV; 1426 return NULL; 1427 } 1428 1429 cb = rte_zmalloc(NULL, sizeof(*cb), 0); 1430 if (cb == NULL) { 1431 CDEV_LOG_ERR("Failed to allocate memory for callback on " 1432 "dev=%d, queue_pair_id=%d", dev_id, qp_id); 1433 rte_errno = ENOMEM; 1434 return NULL; 1435 } 1436 1437 rte_spinlock_lock(&rte_cryptodev_callback_lock); 1438 1439 cb->fn = cb_fn; 1440 cb->arg = cb_arg; 1441 1442 /* Add the callbacks in fifo order. */ 1443 list = &dev->deq_cbs[qp_id]; 1444 tail = list->next; 1445 1446 if (tail) { 1447 while (tail->next) 1448 tail = tail->next; 1449 /* Stores to cb->fn and cb->param should complete before 1450 * cb is visible to data plane. 1451 */ 1452 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE); 1453 } else { 1454 /* Stores to cb->fn and cb->param should complete before 1455 * cb is visible to data plane. 1456 */ 1457 __atomic_store_n(&list->next, cb, __ATOMIC_RELEASE); 1458 } 1459 1460 rte_spinlock_unlock(&rte_cryptodev_callback_lock); 1461 1462 return cb; 1463 } 1464 1465 int 1466 rte_cryptodev_remove_deq_callback(uint8_t dev_id, 1467 uint16_t qp_id, 1468 struct rte_cryptodev_cb *cb) 1469 { 1470 struct rte_cryptodev *dev; 1471 struct rte_cryptodev_cb **prev_cb, *curr_cb; 1472 struct rte_cryptodev_cb_rcu *list; 1473 int ret; 1474 1475 ret = -EINVAL; 1476 1477 if (!cb) { 1478 CDEV_LOG_ERR("Callback is NULL"); 1479 return -EINVAL; 1480 } 1481 1482 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1483 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id); 1484 return -ENODEV; 1485 } 1486 1487 dev = &rte_crypto_devices[dev_id]; 1488 if (qp_id >= dev->data->nb_queue_pairs) { 1489 CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id); 1490 return -ENODEV; 1491 } 1492 1493 rte_spinlock_lock(&rte_cryptodev_callback_lock); 1494 if (dev->enq_cbs == NULL) { 1495 CDEV_LOG_ERR("Callback not initialized"); 1496 goto cb_err; 1497 } 1498 1499 list = &dev->deq_cbs[qp_id]; 1500 if (list == NULL) { 1501 CDEV_LOG_ERR("Callback list is NULL"); 1502 goto cb_err; 1503 } 1504 1505 if (list->qsbr == NULL) { 1506 CDEV_LOG_ERR("Rcu qsbr is NULL"); 1507 goto cb_err; 1508 } 1509 1510 prev_cb = &list->next; 1511 for (; *prev_cb != NULL; prev_cb = &curr_cb->next) { 1512 curr_cb = *prev_cb; 1513 if (curr_cb == cb) { 1514 /* Remove the user cb from the callback list. */ 1515 __atomic_store_n(prev_cb, curr_cb->next, 1516 __ATOMIC_RELAXED); 1517 ret = 0; 1518 break; 1519 } 1520 } 1521 1522 if (!ret) { 1523 /* Call sync with invalid thread id as this is part of 1524 * control plane API 1525 */ 1526 rte_rcu_qsbr_synchronize(list->qsbr, RTE_QSBR_THRID_INVALID); 1527 rte_free(cb); 1528 } 1529 1530 cb_err: 1531 rte_spinlock_unlock(&rte_cryptodev_callback_lock); 1532 return ret; 1533 } 1534 1535 int 1536 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats) 1537 { 1538 struct rte_cryptodev *dev; 1539 1540 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1541 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id); 1542 return -ENODEV; 1543 } 1544 1545 if (stats == NULL) { 1546 CDEV_LOG_ERR("Invalid stats ptr"); 1547 return -EINVAL; 1548 } 1549 1550 dev = &rte_crypto_devices[dev_id]; 1551 memset(stats, 0, sizeof(*stats)); 1552 1553 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP); 1554 (*dev->dev_ops->stats_get)(dev, stats); 1555 return 0; 1556 } 1557 1558 void 1559 rte_cryptodev_stats_reset(uint8_t dev_id) 1560 { 1561 struct rte_cryptodev *dev; 1562 1563 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1564 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 1565 return; 1566 } 1567 1568 dev = &rte_crypto_devices[dev_id]; 1569 1570 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset); 1571 (*dev->dev_ops->stats_reset)(dev); 1572 } 1573 1574 void 1575 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info) 1576 { 1577 struct rte_cryptodev *dev; 1578 1579 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1580 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id); 1581 return; 1582 } 1583 1584 dev = &rte_crypto_devices[dev_id]; 1585 1586 memset(dev_info, 0, sizeof(struct rte_cryptodev_info)); 1587 1588 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get); 1589 (*dev->dev_ops->dev_infos_get)(dev, dev_info); 1590 1591 dev_info->driver_name = dev->device->driver->name; 1592 dev_info->device = dev->device; 1593 } 1594 1595 int 1596 rte_cryptodev_callback_register(uint8_t dev_id, 1597 enum rte_cryptodev_event_type event, 1598 rte_cryptodev_cb_fn cb_fn, void *cb_arg) 1599 { 1600 struct rte_cryptodev *dev; 1601 struct rte_cryptodev_callback *user_cb; 1602 1603 if (!cb_fn) 1604 return -EINVAL; 1605 1606 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1607 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 1608 return -EINVAL; 1609 } 1610 1611 dev = &rte_crypto_devices[dev_id]; 1612 rte_spinlock_lock(&rte_cryptodev_cb_lock); 1613 1614 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) { 1615 if (user_cb->cb_fn == cb_fn && 1616 user_cb->cb_arg == cb_arg && 1617 user_cb->event == event) { 1618 break; 1619 } 1620 } 1621 1622 /* create a new callback. */ 1623 if (user_cb == NULL) { 1624 user_cb = rte_zmalloc("INTR_USER_CALLBACK", 1625 sizeof(struct rte_cryptodev_callback), 0); 1626 if (user_cb != NULL) { 1627 user_cb->cb_fn = cb_fn; 1628 user_cb->cb_arg = cb_arg; 1629 user_cb->event = event; 1630 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next); 1631 } 1632 } 1633 1634 rte_spinlock_unlock(&rte_cryptodev_cb_lock); 1635 return (user_cb == NULL) ? -ENOMEM : 0; 1636 } 1637 1638 int 1639 rte_cryptodev_callback_unregister(uint8_t dev_id, 1640 enum rte_cryptodev_event_type event, 1641 rte_cryptodev_cb_fn cb_fn, void *cb_arg) 1642 { 1643 int ret; 1644 struct rte_cryptodev *dev; 1645 struct rte_cryptodev_callback *cb, *next; 1646 1647 if (!cb_fn) 1648 return -EINVAL; 1649 1650 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1651 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 1652 return -EINVAL; 1653 } 1654 1655 dev = &rte_crypto_devices[dev_id]; 1656 rte_spinlock_lock(&rte_cryptodev_cb_lock); 1657 1658 ret = 0; 1659 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) { 1660 1661 next = TAILQ_NEXT(cb, next); 1662 1663 if (cb->cb_fn != cb_fn || cb->event != event || 1664 (cb->cb_arg != (void *)-1 && 1665 cb->cb_arg != cb_arg)) 1666 continue; 1667 1668 /* 1669 * if this callback is not executing right now, 1670 * then remove it. 1671 */ 1672 if (cb->active == 0) { 1673 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next); 1674 rte_free(cb); 1675 } else { 1676 ret = -EAGAIN; 1677 } 1678 } 1679 1680 rte_spinlock_unlock(&rte_cryptodev_cb_lock); 1681 return ret; 1682 } 1683 1684 void 1685 rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev, 1686 enum rte_cryptodev_event_type event) 1687 { 1688 struct rte_cryptodev_callback *cb_lst; 1689 struct rte_cryptodev_callback dev_cb; 1690 1691 rte_spinlock_lock(&rte_cryptodev_cb_lock); 1692 TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) { 1693 if (cb_lst->cb_fn == NULL || cb_lst->event != event) 1694 continue; 1695 dev_cb = *cb_lst; 1696 cb_lst->active = 1; 1697 rte_spinlock_unlock(&rte_cryptodev_cb_lock); 1698 dev_cb.cb_fn(dev->data->dev_id, dev_cb.event, 1699 dev_cb.cb_arg); 1700 rte_spinlock_lock(&rte_cryptodev_cb_lock); 1701 cb_lst->active = 0; 1702 } 1703 rte_spinlock_unlock(&rte_cryptodev_cb_lock); 1704 } 1705 1706 int 1707 rte_cryptodev_sym_session_init(uint8_t dev_id, 1708 struct rte_cryptodev_sym_session *sess, 1709 struct rte_crypto_sym_xform *xforms, 1710 struct rte_mempool *mp) 1711 { 1712 struct rte_cryptodev *dev; 1713 uint32_t sess_priv_sz = rte_cryptodev_sym_get_private_session_size( 1714 dev_id); 1715 uint8_t index; 1716 int ret; 1717 1718 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1719 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 1720 return -EINVAL; 1721 } 1722 1723 dev = rte_cryptodev_pmd_get_dev(dev_id); 1724 1725 if (sess == NULL || xforms == NULL || dev == NULL || mp == NULL) 1726 return -EINVAL; 1727 1728 if (mp->elt_size < sess_priv_sz) 1729 return -EINVAL; 1730 1731 index = dev->driver_id; 1732 if (index >= sess->nb_drivers) 1733 return -EINVAL; 1734 1735 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_configure, -ENOTSUP); 1736 1737 if (sess->sess_data[index].refcnt == 0) { 1738 ret = dev->dev_ops->sym_session_configure(dev, xforms, 1739 sess, mp); 1740 if (ret < 0) { 1741 CDEV_LOG_ERR( 1742 "dev_id %d failed to configure session details", 1743 dev_id); 1744 return ret; 1745 } 1746 } 1747 1748 rte_cryptodev_trace_sym_session_init(dev_id, sess, xforms, mp); 1749 sess->sess_data[index].refcnt++; 1750 return 0; 1751 } 1752 1753 struct rte_mempool * 1754 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts, 1755 uint32_t elt_size, uint32_t cache_size, uint16_t user_data_size, 1756 int socket_id) 1757 { 1758 struct rte_mempool *mp; 1759 struct rte_cryptodev_sym_session_pool_private_data *pool_priv; 1760 uint32_t obj_sz; 1761 1762 obj_sz = rte_cryptodev_sym_get_header_session_size() + user_data_size; 1763 if (obj_sz > elt_size) 1764 CDEV_LOG_INFO("elt_size %u is expanded to %u\n", elt_size, 1765 obj_sz); 1766 else 1767 obj_sz = elt_size; 1768 1769 mp = rte_mempool_create(name, nb_elts, obj_sz, cache_size, 1770 (uint32_t)(sizeof(*pool_priv)), 1771 NULL, NULL, NULL, NULL, 1772 socket_id, 0); 1773 if (mp == NULL) { 1774 CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d\n", 1775 __func__, name, rte_errno); 1776 return NULL; 1777 } 1778 1779 pool_priv = rte_mempool_get_priv(mp); 1780 if (!pool_priv) { 1781 CDEV_LOG_ERR("%s(name=%s) failed to get private data\n", 1782 __func__, name); 1783 rte_mempool_free(mp); 1784 return NULL; 1785 } 1786 1787 pool_priv->nb_drivers = nb_drivers; 1788 pool_priv->user_data_sz = user_data_size; 1789 1790 rte_cryptodev_trace_sym_session_pool_create(name, nb_elts, 1791 elt_size, cache_size, user_data_size, mp); 1792 return mp; 1793 } 1794 1795 struct rte_mempool * 1796 rte_cryptodev_asym_session_pool_create(const char *name, uint32_t nb_elts, 1797 uint32_t cache_size, uint16_t user_data_size, int socket_id) 1798 { 1799 struct rte_mempool *mp; 1800 struct rte_cryptodev_asym_session_pool_private_data *pool_priv; 1801 uint32_t obj_sz, obj_sz_aligned; 1802 uint8_t dev_id, priv_sz, max_priv_sz = 0; 1803 1804 for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++) 1805 if (rte_cryptodev_is_valid_dev(dev_id)) { 1806 priv_sz = rte_cryptodev_asym_get_private_session_size(dev_id); 1807 if (priv_sz > max_priv_sz) 1808 max_priv_sz = priv_sz; 1809 } 1810 if (max_priv_sz == 0) { 1811 CDEV_LOG_INFO("Could not set max private session size\n"); 1812 return NULL; 1813 } 1814 1815 obj_sz = rte_cryptodev_asym_get_header_session_size() + max_priv_sz + 1816 user_data_size; 1817 obj_sz_aligned = RTE_ALIGN_CEIL(obj_sz, RTE_CACHE_LINE_SIZE); 1818 1819 mp = rte_mempool_create(name, nb_elts, obj_sz_aligned, cache_size, 1820 (uint32_t)(sizeof(*pool_priv)), 1821 NULL, NULL, NULL, NULL, 1822 socket_id, 0); 1823 if (mp == NULL) { 1824 CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d\n", 1825 __func__, name, rte_errno); 1826 return NULL; 1827 } 1828 1829 pool_priv = rte_mempool_get_priv(mp); 1830 if (!pool_priv) { 1831 CDEV_LOG_ERR("%s(name=%s) failed to get private data\n", 1832 __func__, name); 1833 rte_mempool_free(mp); 1834 return NULL; 1835 } 1836 pool_priv->max_priv_session_sz = max_priv_sz; 1837 pool_priv->user_data_sz = user_data_size; 1838 1839 rte_cryptodev_trace_asym_session_pool_create(name, nb_elts, 1840 user_data_size, cache_size, mp); 1841 return mp; 1842 } 1843 1844 static unsigned int 1845 rte_cryptodev_sym_session_data_size(struct rte_cryptodev_sym_session *sess) 1846 { 1847 return (sizeof(sess->sess_data[0]) * sess->nb_drivers) + 1848 sess->user_data_sz; 1849 } 1850 1851 static uint8_t 1852 rte_cryptodev_sym_is_valid_session_pool(struct rte_mempool *mp) 1853 { 1854 struct rte_cryptodev_sym_session_pool_private_data *pool_priv; 1855 1856 if (!mp) 1857 return 0; 1858 1859 pool_priv = rte_mempool_get_priv(mp); 1860 1861 if (!pool_priv || mp->private_data_size < sizeof(*pool_priv) || 1862 pool_priv->nb_drivers != nb_drivers || 1863 mp->elt_size < 1864 rte_cryptodev_sym_get_header_session_size() 1865 + pool_priv->user_data_sz) 1866 return 0; 1867 1868 return 1; 1869 } 1870 1871 struct rte_cryptodev_sym_session * 1872 rte_cryptodev_sym_session_create(struct rte_mempool *mp) 1873 { 1874 struct rte_cryptodev_sym_session *sess; 1875 struct rte_cryptodev_sym_session_pool_private_data *pool_priv; 1876 1877 if (!rte_cryptodev_sym_is_valid_session_pool(mp)) { 1878 CDEV_LOG_ERR("Invalid mempool\n"); 1879 return NULL; 1880 } 1881 1882 pool_priv = rte_mempool_get_priv(mp); 1883 1884 /* Allocate a session structure from the session pool */ 1885 if (rte_mempool_get(mp, (void **)&sess)) { 1886 CDEV_LOG_ERR("couldn't get object from session mempool"); 1887 return NULL; 1888 } 1889 1890 sess->nb_drivers = pool_priv->nb_drivers; 1891 sess->user_data_sz = pool_priv->user_data_sz; 1892 sess->opaque_data = 0; 1893 1894 /* Clear device session pointer. 1895 * Include the flag indicating presence of user data 1896 */ 1897 memset(sess->sess_data, 0, 1898 rte_cryptodev_sym_session_data_size(sess)); 1899 1900 rte_cryptodev_trace_sym_session_create(mp, sess); 1901 return sess; 1902 } 1903 1904 int 1905 rte_cryptodev_asym_session_create(uint8_t dev_id, 1906 struct rte_crypto_asym_xform *xforms, struct rte_mempool *mp, 1907 void **session) 1908 { 1909 struct rte_cryptodev_asym_session *sess; 1910 uint32_t session_priv_data_sz; 1911 struct rte_cryptodev_asym_session_pool_private_data *pool_priv; 1912 unsigned int session_header_size = 1913 rte_cryptodev_asym_get_header_session_size(); 1914 struct rte_cryptodev *dev; 1915 int ret; 1916 1917 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1918 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 1919 return -EINVAL; 1920 } 1921 1922 dev = rte_cryptodev_pmd_get_dev(dev_id); 1923 1924 if (dev == NULL) 1925 return -EINVAL; 1926 1927 if (!mp) { 1928 CDEV_LOG_ERR("invalid mempool\n"); 1929 return -EINVAL; 1930 } 1931 1932 session_priv_data_sz = rte_cryptodev_asym_get_private_session_size( 1933 dev_id); 1934 pool_priv = rte_mempool_get_priv(mp); 1935 1936 if (pool_priv->max_priv_session_sz < session_priv_data_sz) { 1937 CDEV_LOG_DEBUG( 1938 "The private session data size used when creating the mempool is smaller than this device's private session data."); 1939 return -EINVAL; 1940 } 1941 1942 /* Verify if provided mempool can hold elements big enough. */ 1943 if (mp->elt_size < session_header_size + session_priv_data_sz) { 1944 CDEV_LOG_ERR( 1945 "mempool elements too small to hold session objects"); 1946 return -EINVAL; 1947 } 1948 1949 /* Allocate a session structure from the session pool */ 1950 if (rte_mempool_get(mp, session)) { 1951 CDEV_LOG_ERR("couldn't get object from session mempool"); 1952 return -ENOMEM; 1953 } 1954 1955 sess = *session; 1956 sess->driver_id = dev->driver_id; 1957 sess->user_data_sz = pool_priv->user_data_sz; 1958 sess->max_priv_data_sz = pool_priv->max_priv_session_sz; 1959 1960 /* Clear device session pointer.*/ 1961 memset(sess->sess_private_data, 0, session_priv_data_sz + sess->user_data_sz); 1962 1963 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_configure, -ENOTSUP); 1964 1965 if (sess->sess_private_data[0] == 0) { 1966 ret = dev->dev_ops->asym_session_configure(dev, xforms, sess); 1967 if (ret < 0) { 1968 CDEV_LOG_ERR( 1969 "dev_id %d failed to configure session details", 1970 dev_id); 1971 return ret; 1972 } 1973 } 1974 1975 rte_cryptodev_trace_asym_session_create(dev_id, xforms, mp, sess); 1976 return 0; 1977 } 1978 1979 int 1980 rte_cryptodev_sym_session_clear(uint8_t dev_id, 1981 struct rte_cryptodev_sym_session *sess) 1982 { 1983 struct rte_cryptodev *dev; 1984 uint8_t driver_id; 1985 1986 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1987 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 1988 return -EINVAL; 1989 } 1990 1991 dev = rte_cryptodev_pmd_get_dev(dev_id); 1992 1993 if (dev == NULL || sess == NULL) 1994 return -EINVAL; 1995 1996 driver_id = dev->driver_id; 1997 if (sess->sess_data[driver_id].refcnt == 0) 1998 return 0; 1999 if (--sess->sess_data[driver_id].refcnt != 0) 2000 return -EBUSY; 2001 2002 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_clear, -ENOTSUP); 2003 2004 dev->dev_ops->sym_session_clear(dev, sess); 2005 2006 rte_cryptodev_trace_sym_session_clear(dev_id, sess); 2007 return 0; 2008 } 2009 2010 int 2011 rte_cryptodev_sym_session_free(struct rte_cryptodev_sym_session *sess) 2012 { 2013 uint8_t i; 2014 struct rte_mempool *sess_mp; 2015 2016 if (sess == NULL) 2017 return -EINVAL; 2018 2019 /* Check that all device private data has been freed */ 2020 for (i = 0; i < sess->nb_drivers; i++) { 2021 if (sess->sess_data[i].refcnt != 0) 2022 return -EBUSY; 2023 } 2024 2025 /* Return session to mempool */ 2026 sess_mp = rte_mempool_from_obj(sess); 2027 rte_mempool_put(sess_mp, sess); 2028 2029 rte_cryptodev_trace_sym_session_free(sess); 2030 return 0; 2031 } 2032 2033 int 2034 rte_cryptodev_asym_session_free(uint8_t dev_id, void *sess) 2035 { 2036 struct rte_mempool *sess_mp; 2037 struct rte_cryptodev *dev; 2038 2039 if (!rte_cryptodev_is_valid_dev(dev_id)) { 2040 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 2041 return -EINVAL; 2042 } 2043 2044 dev = rte_cryptodev_pmd_get_dev(dev_id); 2045 2046 if (dev == NULL || sess == NULL) 2047 return -EINVAL; 2048 2049 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_clear, -ENOTSUP); 2050 2051 dev->dev_ops->asym_session_clear(dev, sess); 2052 2053 /* Return session to mempool */ 2054 sess_mp = rte_mempool_from_obj(sess); 2055 rte_mempool_put(sess_mp, sess); 2056 2057 rte_cryptodev_trace_asym_session_free(dev_id, sess); 2058 return 0; 2059 } 2060 2061 unsigned int 2062 rte_cryptodev_sym_get_header_session_size(void) 2063 { 2064 /* 2065 * Header contains pointers to the private data of all registered 2066 * drivers and all necessary information to ensure safely clear 2067 * or free al session. 2068 */ 2069 struct rte_cryptodev_sym_session s = {0}; 2070 2071 s.nb_drivers = nb_drivers; 2072 2073 return (unsigned int)(sizeof(s) + 2074 rte_cryptodev_sym_session_data_size(&s)); 2075 } 2076 2077 unsigned int 2078 rte_cryptodev_sym_get_existing_header_session_size( 2079 struct rte_cryptodev_sym_session *sess) 2080 { 2081 if (!sess) 2082 return 0; 2083 else 2084 return (unsigned int)(sizeof(*sess) + 2085 rte_cryptodev_sym_session_data_size(sess)); 2086 } 2087 2088 unsigned int 2089 rte_cryptodev_asym_get_header_session_size(void) 2090 { 2091 return sizeof(struct rte_cryptodev_asym_session); 2092 } 2093 2094 unsigned int 2095 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id) 2096 { 2097 struct rte_cryptodev *dev; 2098 unsigned int priv_sess_size; 2099 2100 if (!rte_cryptodev_is_valid_dev(dev_id)) 2101 return 0; 2102 2103 dev = rte_cryptodev_pmd_get_dev(dev_id); 2104 2105 if (*dev->dev_ops->sym_session_get_size == NULL) 2106 return 0; 2107 2108 priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev); 2109 2110 return priv_sess_size; 2111 } 2112 2113 unsigned int 2114 rte_cryptodev_asym_get_private_session_size(uint8_t dev_id) 2115 { 2116 struct rte_cryptodev *dev; 2117 unsigned int priv_sess_size; 2118 2119 if (!rte_cryptodev_is_valid_dev(dev_id)) 2120 return 0; 2121 2122 dev = rte_cryptodev_pmd_get_dev(dev_id); 2123 2124 if (*dev->dev_ops->asym_session_get_size == NULL) 2125 return 0; 2126 2127 priv_sess_size = (*dev->dev_ops->asym_session_get_size)(dev); 2128 2129 return priv_sess_size; 2130 } 2131 2132 int 2133 rte_cryptodev_sym_session_set_user_data( 2134 struct rte_cryptodev_sym_session *sess, 2135 void *data, 2136 uint16_t size) 2137 { 2138 if (sess == NULL) 2139 return -EINVAL; 2140 2141 if (sess->user_data_sz < size) 2142 return -ENOMEM; 2143 2144 rte_memcpy(sess->sess_data + sess->nb_drivers, data, size); 2145 return 0; 2146 } 2147 2148 void * 2149 rte_cryptodev_sym_session_get_user_data( 2150 struct rte_cryptodev_sym_session *sess) 2151 { 2152 if (sess == NULL || sess->user_data_sz == 0) 2153 return NULL; 2154 2155 return (void *)(sess->sess_data + sess->nb_drivers); 2156 } 2157 2158 int 2159 rte_cryptodev_asym_session_set_user_data(void *session, void *data, uint16_t size) 2160 { 2161 struct rte_cryptodev_asym_session *sess = session; 2162 if (sess == NULL) 2163 return -EINVAL; 2164 2165 if (sess->user_data_sz < size) 2166 return -ENOMEM; 2167 2168 rte_memcpy(sess->sess_private_data + 2169 sess->max_priv_data_sz, 2170 data, size); 2171 return 0; 2172 } 2173 2174 void * 2175 rte_cryptodev_asym_session_get_user_data(void *session) 2176 { 2177 struct rte_cryptodev_asym_session *sess = session; 2178 if (sess == NULL || sess->user_data_sz == 0) 2179 return NULL; 2180 2181 return (void *)(sess->sess_private_data + 2182 sess->max_priv_data_sz); 2183 } 2184 2185 static inline void 2186 sym_crypto_fill_status(struct rte_crypto_sym_vec *vec, int32_t errnum) 2187 { 2188 uint32_t i; 2189 for (i = 0; i < vec->num; i++) 2190 vec->status[i] = errnum; 2191 } 2192 2193 uint32_t 2194 rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id, 2195 struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs ofs, 2196 struct rte_crypto_sym_vec *vec) 2197 { 2198 struct rte_cryptodev *dev; 2199 2200 if (!rte_cryptodev_is_valid_dev(dev_id)) { 2201 sym_crypto_fill_status(vec, EINVAL); 2202 return 0; 2203 } 2204 2205 dev = rte_cryptodev_pmd_get_dev(dev_id); 2206 2207 if (*dev->dev_ops->sym_cpu_process == NULL || 2208 !(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO)) { 2209 sym_crypto_fill_status(vec, ENOTSUP); 2210 return 0; 2211 } 2212 2213 return dev->dev_ops->sym_cpu_process(dev, sess, ofs, vec); 2214 } 2215 2216 int 2217 rte_cryptodev_get_raw_dp_ctx_size(uint8_t dev_id) 2218 { 2219 struct rte_cryptodev *dev; 2220 int32_t size = sizeof(struct rte_crypto_raw_dp_ctx); 2221 int32_t priv_size; 2222 2223 if (!rte_cryptodev_is_valid_dev(dev_id)) 2224 return -EINVAL; 2225 2226 dev = rte_cryptodev_pmd_get_dev(dev_id); 2227 2228 if (*dev->dev_ops->sym_get_raw_dp_ctx_size == NULL || 2229 !(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP)) { 2230 return -ENOTSUP; 2231 } 2232 2233 priv_size = (*dev->dev_ops->sym_get_raw_dp_ctx_size)(dev); 2234 if (priv_size < 0) 2235 return -ENOTSUP; 2236 2237 return RTE_ALIGN_CEIL((size + priv_size), 8); 2238 } 2239 2240 int 2241 rte_cryptodev_configure_raw_dp_ctx(uint8_t dev_id, uint16_t qp_id, 2242 struct rte_crypto_raw_dp_ctx *ctx, 2243 enum rte_crypto_op_sess_type sess_type, 2244 union rte_cryptodev_session_ctx session_ctx, 2245 uint8_t is_update) 2246 { 2247 struct rte_cryptodev *dev; 2248 2249 if (!rte_cryptodev_get_qp_status(dev_id, qp_id)) 2250 return -EINVAL; 2251 2252 dev = rte_cryptodev_pmd_get_dev(dev_id); 2253 if (!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP) 2254 || dev->dev_ops->sym_configure_raw_dp_ctx == NULL) 2255 return -ENOTSUP; 2256 2257 return (*dev->dev_ops->sym_configure_raw_dp_ctx)(dev, qp_id, ctx, 2258 sess_type, session_ctx, is_update); 2259 } 2260 2261 uint32_t 2262 rte_cryptodev_raw_enqueue_burst(struct rte_crypto_raw_dp_ctx *ctx, 2263 struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs, 2264 void **user_data, int *enqueue_status) 2265 { 2266 return (*ctx->enqueue_burst)(ctx->qp_data, ctx->drv_ctx_data, vec, 2267 ofs, user_data, enqueue_status); 2268 } 2269 2270 int 2271 rte_cryptodev_raw_enqueue_done(struct rte_crypto_raw_dp_ctx *ctx, 2272 uint32_t n) 2273 { 2274 return (*ctx->enqueue_done)(ctx->qp_data, ctx->drv_ctx_data, n); 2275 } 2276 2277 uint32_t 2278 rte_cryptodev_raw_dequeue_burst(struct rte_crypto_raw_dp_ctx *ctx, 2279 rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count, 2280 uint32_t max_nb_to_dequeue, 2281 rte_cryptodev_raw_post_dequeue_t post_dequeue, 2282 void **out_user_data, uint8_t is_user_data_array, 2283 uint32_t *n_success_jobs, int *status) 2284 { 2285 return (*ctx->dequeue_burst)(ctx->qp_data, ctx->drv_ctx_data, 2286 get_dequeue_count, max_nb_to_dequeue, post_dequeue, 2287 out_user_data, is_user_data_array, n_success_jobs, status); 2288 } 2289 2290 int 2291 rte_cryptodev_raw_dequeue_done(struct rte_crypto_raw_dp_ctx *ctx, 2292 uint32_t n) 2293 { 2294 return (*ctx->dequeue_done)(ctx->qp_data, ctx->drv_ctx_data, n); 2295 } 2296 2297 /** Initialise rte_crypto_op mempool element */ 2298 static void 2299 rte_crypto_op_init(struct rte_mempool *mempool, 2300 void *opaque_arg, 2301 void *_op_data, 2302 __rte_unused unsigned i) 2303 { 2304 struct rte_crypto_op *op = _op_data; 2305 enum rte_crypto_op_type type = *(enum rte_crypto_op_type *)opaque_arg; 2306 2307 memset(_op_data, 0, mempool->elt_size); 2308 2309 __rte_crypto_op_reset(op, type); 2310 2311 op->phys_addr = rte_mem_virt2iova(_op_data); 2312 op->mempool = mempool; 2313 } 2314 2315 2316 struct rte_mempool * 2317 rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type, 2318 unsigned nb_elts, unsigned cache_size, uint16_t priv_size, 2319 int socket_id) 2320 { 2321 struct rte_crypto_op_pool_private *priv; 2322 2323 unsigned elt_size = sizeof(struct rte_crypto_op) + 2324 priv_size; 2325 2326 if (type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) { 2327 elt_size += sizeof(struct rte_crypto_sym_op); 2328 } else if (type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) { 2329 elt_size += sizeof(struct rte_crypto_asym_op); 2330 } else if (type == RTE_CRYPTO_OP_TYPE_UNDEFINED) { 2331 elt_size += RTE_MAX(sizeof(struct rte_crypto_sym_op), 2332 sizeof(struct rte_crypto_asym_op)); 2333 } else { 2334 CDEV_LOG_ERR("Invalid op_type\n"); 2335 return NULL; 2336 } 2337 2338 /* lookup mempool in case already allocated */ 2339 struct rte_mempool *mp = rte_mempool_lookup(name); 2340 2341 if (mp != NULL) { 2342 priv = (struct rte_crypto_op_pool_private *) 2343 rte_mempool_get_priv(mp); 2344 2345 if (mp->elt_size != elt_size || 2346 mp->cache_size < cache_size || 2347 mp->size < nb_elts || 2348 priv->priv_size < priv_size) { 2349 mp = NULL; 2350 CDEV_LOG_ERR("Mempool %s already exists but with " 2351 "incompatible parameters", name); 2352 return NULL; 2353 } 2354 return mp; 2355 } 2356 2357 mp = rte_mempool_create( 2358 name, 2359 nb_elts, 2360 elt_size, 2361 cache_size, 2362 sizeof(struct rte_crypto_op_pool_private), 2363 NULL, 2364 NULL, 2365 rte_crypto_op_init, 2366 &type, 2367 socket_id, 2368 0); 2369 2370 if (mp == NULL) { 2371 CDEV_LOG_ERR("Failed to create mempool %s", name); 2372 return NULL; 2373 } 2374 2375 priv = (struct rte_crypto_op_pool_private *) 2376 rte_mempool_get_priv(mp); 2377 2378 priv->priv_size = priv_size; 2379 priv->type = type; 2380 2381 return mp; 2382 } 2383 2384 int 2385 rte_cryptodev_pmd_create_dev_name(char *name, const char *dev_name_prefix) 2386 { 2387 struct rte_cryptodev *dev = NULL; 2388 uint32_t i = 0; 2389 2390 if (name == NULL) 2391 return -EINVAL; 2392 2393 for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) { 2394 int ret = snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, 2395 "%s_%u", dev_name_prefix, i); 2396 2397 if (ret < 0) 2398 return ret; 2399 2400 dev = rte_cryptodev_pmd_get_named_dev(name); 2401 if (!dev) 2402 return 0; 2403 } 2404 2405 return -1; 2406 } 2407 2408 TAILQ_HEAD(cryptodev_driver_list, cryptodev_driver); 2409 2410 static struct cryptodev_driver_list cryptodev_driver_list = 2411 TAILQ_HEAD_INITIALIZER(cryptodev_driver_list); 2412 2413 int 2414 rte_cryptodev_driver_id_get(const char *name) 2415 { 2416 struct cryptodev_driver *driver; 2417 const char *driver_name; 2418 2419 if (name == NULL) { 2420 RTE_LOG(DEBUG, CRYPTODEV, "name pointer NULL"); 2421 return -1; 2422 } 2423 2424 TAILQ_FOREACH(driver, &cryptodev_driver_list, next) { 2425 driver_name = driver->driver->name; 2426 if (strncmp(driver_name, name, strlen(driver_name) + 1) == 0) 2427 return driver->id; 2428 } 2429 return -1; 2430 } 2431 2432 const char * 2433 rte_cryptodev_name_get(uint8_t dev_id) 2434 { 2435 struct rte_cryptodev *dev; 2436 2437 if (!rte_cryptodev_is_valid_device_data(dev_id)) { 2438 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 2439 return NULL; 2440 } 2441 2442 dev = rte_cryptodev_pmd_get_dev(dev_id); 2443 if (dev == NULL) 2444 return NULL; 2445 2446 return dev->data->name; 2447 } 2448 2449 const char * 2450 rte_cryptodev_driver_name_get(uint8_t driver_id) 2451 { 2452 struct cryptodev_driver *driver; 2453 2454 TAILQ_FOREACH(driver, &cryptodev_driver_list, next) 2455 if (driver->id == driver_id) 2456 return driver->driver->name; 2457 return NULL; 2458 } 2459 2460 uint8_t 2461 rte_cryptodev_allocate_driver(struct cryptodev_driver *crypto_drv, 2462 const struct rte_driver *drv) 2463 { 2464 crypto_drv->driver = drv; 2465 crypto_drv->id = nb_drivers; 2466 2467 TAILQ_INSERT_TAIL(&cryptodev_driver_list, crypto_drv, next); 2468 2469 return nb_drivers++; 2470 } 2471 2472 RTE_INIT(cryptodev_init_fp_ops) 2473 { 2474 uint32_t i; 2475 2476 for (i = 0; i != RTE_DIM(rte_crypto_fp_ops); i++) 2477 cryptodev_fp_ops_reset(rte_crypto_fp_ops + i); 2478 } 2479 2480 static int 2481 cryptodev_handle_dev_list(const char *cmd __rte_unused, 2482 const char *params __rte_unused, 2483 struct rte_tel_data *d) 2484 { 2485 int dev_id; 2486 2487 if (rte_cryptodev_count() < 1) 2488 return -EINVAL; 2489 2490 rte_tel_data_start_array(d, RTE_TEL_INT_VAL); 2491 for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++) 2492 if (rte_cryptodev_is_valid_dev(dev_id)) 2493 rte_tel_data_add_array_int(d, dev_id); 2494 2495 return 0; 2496 } 2497 2498 static int 2499 cryptodev_handle_dev_info(const char *cmd __rte_unused, 2500 const char *params, struct rte_tel_data *d) 2501 { 2502 struct rte_cryptodev_info cryptodev_info; 2503 int dev_id; 2504 char *end_param; 2505 2506 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 2507 return -EINVAL; 2508 2509 dev_id = strtoul(params, &end_param, 0); 2510 if (*end_param != '\0') 2511 CDEV_LOG_ERR("Extra parameters passed to command, ignoring"); 2512 if (!rte_cryptodev_is_valid_dev(dev_id)) 2513 return -EINVAL; 2514 2515 rte_cryptodev_info_get(dev_id, &cryptodev_info); 2516 2517 rte_tel_data_start_dict(d); 2518 rte_tel_data_add_dict_string(d, "device_name", 2519 cryptodev_info.device->name); 2520 rte_tel_data_add_dict_int(d, "max_nb_queue_pairs", 2521 cryptodev_info.max_nb_queue_pairs); 2522 2523 return 0; 2524 } 2525 2526 #define ADD_DICT_STAT(s) rte_tel_data_add_dict_u64(d, #s, cryptodev_stats.s) 2527 2528 static int 2529 cryptodev_handle_dev_stats(const char *cmd __rte_unused, 2530 const char *params, 2531 struct rte_tel_data *d) 2532 { 2533 struct rte_cryptodev_stats cryptodev_stats; 2534 int dev_id, ret; 2535 char *end_param; 2536 2537 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 2538 return -EINVAL; 2539 2540 dev_id = strtoul(params, &end_param, 0); 2541 if (*end_param != '\0') 2542 CDEV_LOG_ERR("Extra parameters passed to command, ignoring"); 2543 if (!rte_cryptodev_is_valid_dev(dev_id)) 2544 return -EINVAL; 2545 2546 ret = rte_cryptodev_stats_get(dev_id, &cryptodev_stats); 2547 if (ret < 0) 2548 return ret; 2549 2550 rte_tel_data_start_dict(d); 2551 ADD_DICT_STAT(enqueued_count); 2552 ADD_DICT_STAT(dequeued_count); 2553 ADD_DICT_STAT(enqueue_err_count); 2554 ADD_DICT_STAT(dequeue_err_count); 2555 2556 return 0; 2557 } 2558 2559 #define CRYPTO_CAPS_SZ \ 2560 (RTE_ALIGN_CEIL(sizeof(struct rte_cryptodev_capabilities), \ 2561 sizeof(uint64_t)) / \ 2562 sizeof(uint64_t)) 2563 2564 static int 2565 crypto_caps_array(struct rte_tel_data *d, 2566 const struct rte_cryptodev_capabilities *capabilities) 2567 { 2568 const struct rte_cryptodev_capabilities *dev_caps; 2569 uint64_t caps_val[CRYPTO_CAPS_SZ]; 2570 unsigned int i = 0, j; 2571 2572 rte_tel_data_start_array(d, RTE_TEL_U64_VAL); 2573 2574 while ((dev_caps = &capabilities[i++])->op != 2575 RTE_CRYPTO_OP_TYPE_UNDEFINED) { 2576 memset(&caps_val, 0, CRYPTO_CAPS_SZ * sizeof(caps_val[0])); 2577 rte_memcpy(caps_val, dev_caps, sizeof(capabilities[0])); 2578 for (j = 0; j < CRYPTO_CAPS_SZ; j++) 2579 rte_tel_data_add_array_u64(d, caps_val[j]); 2580 } 2581 2582 return i; 2583 } 2584 2585 static int 2586 cryptodev_handle_dev_caps(const char *cmd __rte_unused, const char *params, 2587 struct rte_tel_data *d) 2588 { 2589 struct rte_cryptodev_info dev_info; 2590 struct rte_tel_data *crypto_caps; 2591 int crypto_caps_n; 2592 char *end_param; 2593 int dev_id; 2594 2595 if (!params || strlen(params) == 0 || !isdigit(*params)) 2596 return -EINVAL; 2597 2598 dev_id = strtoul(params, &end_param, 0); 2599 if (*end_param != '\0') 2600 CDEV_LOG_ERR("Extra parameters passed to command, ignoring"); 2601 if (!rte_cryptodev_is_valid_dev(dev_id)) 2602 return -EINVAL; 2603 2604 rte_tel_data_start_dict(d); 2605 crypto_caps = rte_tel_data_alloc(); 2606 if (!crypto_caps) 2607 return -ENOMEM; 2608 2609 rte_cryptodev_info_get(dev_id, &dev_info); 2610 crypto_caps_n = crypto_caps_array(crypto_caps, dev_info.capabilities); 2611 rte_tel_data_add_dict_container(d, "crypto_caps", crypto_caps, 0); 2612 rte_tel_data_add_dict_int(d, "crypto_caps_n", crypto_caps_n); 2613 2614 return 0; 2615 } 2616 2617 RTE_INIT(cryptodev_init_telemetry) 2618 { 2619 rte_telemetry_register_cmd("/cryptodev/info", cryptodev_handle_dev_info, 2620 "Returns information for a cryptodev. Parameters: int dev_id"); 2621 rte_telemetry_register_cmd("/cryptodev/list", 2622 cryptodev_handle_dev_list, 2623 "Returns list of available crypto devices by IDs. No parameters."); 2624 rte_telemetry_register_cmd("/cryptodev/stats", 2625 cryptodev_handle_dev_stats, 2626 "Returns the stats for a cryptodev. Parameters: int dev_id"); 2627 rte_telemetry_register_cmd("/cryptodev/caps", 2628 cryptodev_handle_dev_caps, 2629 "Returns the capabilities for a cryptodev. Parameters: int dev_id"); 2630 } 2631