1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2015-2020 Intel Corporation 3 */ 4 5 #include <sys/queue.h> 6 #include <ctype.h> 7 #include <stdio.h> 8 #include <stdlib.h> 9 #include <string.h> 10 #include <errno.h> 11 #include <stdint.h> 12 #include <inttypes.h> 13 14 #include <rte_log.h> 15 #include <rte_debug.h> 16 #include <dev_driver.h> 17 #include <rte_memory.h> 18 #include <rte_memcpy.h> 19 #include <rte_memzone.h> 20 #include <rte_eal.h> 21 #include <rte_common.h> 22 #include <rte_mempool.h> 23 #include <rte_malloc.h> 24 #include <rte_errno.h> 25 #include <rte_spinlock.h> 26 #include <rte_string_fns.h> 27 #include <rte_telemetry.h> 28 29 #include "rte_crypto.h" 30 #include "rte_cryptodev.h" 31 #include "cryptodev_pmd.h" 32 #include "rte_cryptodev_trace.h" 33 34 static uint8_t nb_drivers; 35 36 static struct rte_cryptodev rte_crypto_devices[RTE_CRYPTO_MAX_DEVS]; 37 38 struct rte_cryptodev *rte_cryptodevs = rte_crypto_devices; 39 40 static struct rte_cryptodev_global cryptodev_globals = { 41 .devs = rte_crypto_devices, 42 .data = { NULL }, 43 .nb_devs = 0 44 }; 45 46 /* Public fastpath APIs. */ 47 struct rte_crypto_fp_ops rte_crypto_fp_ops[RTE_CRYPTO_MAX_DEVS]; 48 49 /* spinlock for crypto device callbacks */ 50 static rte_spinlock_t rte_cryptodev_cb_lock = RTE_SPINLOCK_INITIALIZER; 51 52 /** 53 * The user application callback description. 54 * 55 * It contains callback address to be registered by user application, 56 * the pointer to the parameters for callback, and the event type. 57 */ 58 struct rte_cryptodev_callback { 59 TAILQ_ENTRY(rte_cryptodev_callback) next; /**< Callbacks list */ 60 rte_cryptodev_cb_fn cb_fn; /**< Callback address */ 61 void *cb_arg; /**< Parameter for callback */ 62 enum rte_cryptodev_event_type event; /**< Interrupt event type */ 63 uint32_t active; /**< Callback is executing */ 64 }; 65 66 /** 67 * The crypto cipher algorithm strings identifiers. 68 * It could be used in application command line. 69 */ 70 const char * 71 rte_crypto_cipher_algorithm_strings[] = { 72 [RTE_CRYPTO_CIPHER_3DES_CBC] = "3des-cbc", 73 [RTE_CRYPTO_CIPHER_3DES_ECB] = "3des-ecb", 74 [RTE_CRYPTO_CIPHER_3DES_CTR] = "3des-ctr", 75 76 [RTE_CRYPTO_CIPHER_AES_CBC] = "aes-cbc", 77 [RTE_CRYPTO_CIPHER_AES_CTR] = "aes-ctr", 78 [RTE_CRYPTO_CIPHER_AES_DOCSISBPI] = "aes-docsisbpi", 79 [RTE_CRYPTO_CIPHER_AES_ECB] = "aes-ecb", 80 [RTE_CRYPTO_CIPHER_AES_F8] = "aes-f8", 81 [RTE_CRYPTO_CIPHER_AES_XTS] = "aes-xts", 82 83 [RTE_CRYPTO_CIPHER_ARC4] = "arc4", 84 85 [RTE_CRYPTO_CIPHER_DES_CBC] = "des-cbc", 86 [RTE_CRYPTO_CIPHER_DES_DOCSISBPI] = "des-docsisbpi", 87 88 [RTE_CRYPTO_CIPHER_NULL] = "null", 89 90 [RTE_CRYPTO_CIPHER_KASUMI_F8] = "kasumi-f8", 91 [RTE_CRYPTO_CIPHER_SNOW3G_UEA2] = "snow3g-uea2", 92 [RTE_CRYPTO_CIPHER_ZUC_EEA3] = "zuc-eea3", 93 [RTE_CRYPTO_CIPHER_SM4_ECB] = "sm4-ecb", 94 [RTE_CRYPTO_CIPHER_SM4_CBC] = "sm4-cbc", 95 [RTE_CRYPTO_CIPHER_SM4_CTR] = "sm4-ctr" 96 }; 97 98 /** 99 * The crypto cipher operation strings identifiers. 100 * It could be used in application command line. 101 */ 102 const char * 103 rte_crypto_cipher_operation_strings[] = { 104 [RTE_CRYPTO_CIPHER_OP_ENCRYPT] = "encrypt", 105 [RTE_CRYPTO_CIPHER_OP_DECRYPT] = "decrypt" 106 }; 107 108 /** 109 * The crypto auth algorithm strings identifiers. 110 * It could be used in application command line. 111 */ 112 const char * 113 rte_crypto_auth_algorithm_strings[] = { 114 [RTE_CRYPTO_AUTH_AES_CBC_MAC] = "aes-cbc-mac", 115 [RTE_CRYPTO_AUTH_AES_CMAC] = "aes-cmac", 116 [RTE_CRYPTO_AUTH_AES_GMAC] = "aes-gmac", 117 [RTE_CRYPTO_AUTH_AES_XCBC_MAC] = "aes-xcbc-mac", 118 119 [RTE_CRYPTO_AUTH_MD5] = "md5", 120 [RTE_CRYPTO_AUTH_MD5_HMAC] = "md5-hmac", 121 122 [RTE_CRYPTO_AUTH_NULL] = "null", 123 124 [RTE_CRYPTO_AUTH_SHA1] = "sha1", 125 [RTE_CRYPTO_AUTH_SHA1_HMAC] = "sha1-hmac", 126 127 [RTE_CRYPTO_AUTH_SHA224] = "sha2-224", 128 [RTE_CRYPTO_AUTH_SHA224_HMAC] = "sha2-224-hmac", 129 [RTE_CRYPTO_AUTH_SHA256] = "sha2-256", 130 [RTE_CRYPTO_AUTH_SHA256_HMAC] = "sha2-256-hmac", 131 [RTE_CRYPTO_AUTH_SHA384] = "sha2-384", 132 [RTE_CRYPTO_AUTH_SHA384_HMAC] = "sha2-384-hmac", 133 [RTE_CRYPTO_AUTH_SHA512] = "sha2-512", 134 [RTE_CRYPTO_AUTH_SHA512_HMAC] = "sha2-512-hmac", 135 136 [RTE_CRYPTO_AUTH_SHA3_224] = "sha3-224", 137 [RTE_CRYPTO_AUTH_SHA3_224_HMAC] = "sha3-224-hmac", 138 [RTE_CRYPTO_AUTH_SHA3_256] = "sha3-256", 139 [RTE_CRYPTO_AUTH_SHA3_256_HMAC] = "sha3-256-hmac", 140 [RTE_CRYPTO_AUTH_SHA3_384] = "sha3-384", 141 [RTE_CRYPTO_AUTH_SHA3_384_HMAC] = "sha3-384-hmac", 142 [RTE_CRYPTO_AUTH_SHA3_512] = "sha3-512", 143 [RTE_CRYPTO_AUTH_SHA3_512_HMAC] = "sha3-512-hmac", 144 145 [RTE_CRYPTO_AUTH_KASUMI_F9] = "kasumi-f9", 146 [RTE_CRYPTO_AUTH_SNOW3G_UIA2] = "snow3g-uia2", 147 [RTE_CRYPTO_AUTH_ZUC_EIA3] = "zuc-eia3" 148 }; 149 150 /** 151 * The crypto AEAD algorithm strings identifiers. 152 * It could be used in application command line. 153 */ 154 const char * 155 rte_crypto_aead_algorithm_strings[] = { 156 [RTE_CRYPTO_AEAD_AES_CCM] = "aes-ccm", 157 [RTE_CRYPTO_AEAD_AES_GCM] = "aes-gcm", 158 [RTE_CRYPTO_AEAD_CHACHA20_POLY1305] = "chacha20-poly1305" 159 }; 160 161 /** 162 * The crypto AEAD operation strings identifiers. 163 * It could be used in application command line. 164 */ 165 const char * 166 rte_crypto_aead_operation_strings[] = { 167 [RTE_CRYPTO_AEAD_OP_ENCRYPT] = "encrypt", 168 [RTE_CRYPTO_AEAD_OP_DECRYPT] = "decrypt" 169 }; 170 171 /** 172 * Asymmetric crypto transform operation strings identifiers. 173 */ 174 const char *rte_crypto_asym_xform_strings[] = { 175 [RTE_CRYPTO_ASYM_XFORM_NONE] = "none", 176 [RTE_CRYPTO_ASYM_XFORM_RSA] = "rsa", 177 [RTE_CRYPTO_ASYM_XFORM_MODEX] = "modexp", 178 [RTE_CRYPTO_ASYM_XFORM_MODINV] = "modinv", 179 [RTE_CRYPTO_ASYM_XFORM_DH] = "dh", 180 [RTE_CRYPTO_ASYM_XFORM_DSA] = "dsa", 181 [RTE_CRYPTO_ASYM_XFORM_ECDSA] = "ecdsa", 182 [RTE_CRYPTO_ASYM_XFORM_ECPM] = "ecpm", 183 }; 184 185 /** 186 * Asymmetric crypto operation strings identifiers. 187 */ 188 const char *rte_crypto_asym_op_strings[] = { 189 [RTE_CRYPTO_ASYM_OP_ENCRYPT] = "encrypt", 190 [RTE_CRYPTO_ASYM_OP_DECRYPT] = "decrypt", 191 [RTE_CRYPTO_ASYM_OP_SIGN] = "sign", 192 [RTE_CRYPTO_ASYM_OP_VERIFY] = "verify" 193 }; 194 195 /** 196 * Asymmetric crypto key exchange operation strings identifiers. 197 */ 198 const char *rte_crypto_asym_ke_strings[] = { 199 [RTE_CRYPTO_ASYM_KE_PRIV_KEY_GENERATE] = "priv_key_generate", 200 [RTE_CRYPTO_ASYM_KE_PUB_KEY_GENERATE] = "pub_key_generate", 201 [RTE_CRYPTO_ASYM_KE_SHARED_SECRET_COMPUTE] = "sharedsecret_compute", 202 [RTE_CRYPTO_ASYM_KE_PUB_KEY_VERIFY] = "pub_ec_key_verify" 203 }; 204 205 /** 206 * The private data structure stored in the sym session mempool private data. 207 */ 208 struct rte_cryptodev_sym_session_pool_private_data { 209 uint16_t nb_drivers; 210 /**< number of elements in sess_data array */ 211 uint16_t user_data_sz; 212 /**< session user data will be placed after sess_data */ 213 }; 214 215 /** 216 * The private data structure stored in the asym session mempool private data. 217 */ 218 struct rte_cryptodev_asym_session_pool_private_data { 219 uint16_t max_priv_session_sz; 220 /**< Size of private session data used when creating mempool */ 221 uint16_t user_data_sz; 222 /**< Session user data will be placed after sess_private_data */ 223 }; 224 225 int 226 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum, 227 const char *algo_string) 228 { 229 unsigned int i; 230 231 for (i = 1; i < RTE_DIM(rte_crypto_cipher_algorithm_strings); i++) { 232 if (strcmp(algo_string, rte_crypto_cipher_algorithm_strings[i]) == 0) { 233 *algo_enum = (enum rte_crypto_cipher_algorithm) i; 234 return 0; 235 } 236 } 237 238 /* Invalid string */ 239 return -1; 240 } 241 242 int 243 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum, 244 const char *algo_string) 245 { 246 unsigned int i; 247 248 for (i = 1; i < RTE_DIM(rte_crypto_auth_algorithm_strings); i++) { 249 if (strcmp(algo_string, rte_crypto_auth_algorithm_strings[i]) == 0) { 250 *algo_enum = (enum rte_crypto_auth_algorithm) i; 251 return 0; 252 } 253 } 254 255 /* Invalid string */ 256 return -1; 257 } 258 259 int 260 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum, 261 const char *algo_string) 262 { 263 unsigned int i; 264 265 for (i = 1; i < RTE_DIM(rte_crypto_aead_algorithm_strings); i++) { 266 if (strcmp(algo_string, rte_crypto_aead_algorithm_strings[i]) == 0) { 267 *algo_enum = (enum rte_crypto_aead_algorithm) i; 268 return 0; 269 } 270 } 271 272 /* Invalid string */ 273 return -1; 274 } 275 276 int 277 rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum, 278 const char *xform_string) 279 { 280 unsigned int i; 281 282 for (i = 1; i < RTE_DIM(rte_crypto_asym_xform_strings); i++) { 283 if (strcmp(xform_string, 284 rte_crypto_asym_xform_strings[i]) == 0) { 285 *xform_enum = (enum rte_crypto_asym_xform_type) i; 286 return 0; 287 } 288 } 289 290 /* Invalid string */ 291 return -1; 292 } 293 294 /** 295 * The crypto auth operation strings identifiers. 296 * It could be used in application command line. 297 */ 298 const char * 299 rte_crypto_auth_operation_strings[] = { 300 [RTE_CRYPTO_AUTH_OP_VERIFY] = "verify", 301 [RTE_CRYPTO_AUTH_OP_GENERATE] = "generate" 302 }; 303 304 const struct rte_cryptodev_symmetric_capability * 305 rte_cryptodev_sym_capability_get(uint8_t dev_id, 306 const struct rte_cryptodev_sym_capability_idx *idx) 307 { 308 const struct rte_cryptodev_capabilities *capability; 309 struct rte_cryptodev_info dev_info; 310 int i = 0; 311 312 rte_cryptodev_info_get(dev_id, &dev_info); 313 314 while ((capability = &dev_info.capabilities[i++])->op != 315 RTE_CRYPTO_OP_TYPE_UNDEFINED) { 316 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC) 317 continue; 318 319 if (capability->sym.xform_type != idx->type) 320 continue; 321 322 if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH && 323 capability->sym.auth.algo == idx->algo.auth) 324 return &capability->sym; 325 326 if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER && 327 capability->sym.cipher.algo == idx->algo.cipher) 328 return &capability->sym; 329 330 if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD && 331 capability->sym.aead.algo == idx->algo.aead) 332 return &capability->sym; 333 } 334 335 return NULL; 336 } 337 338 static int 339 param_range_check(uint16_t size, const struct rte_crypto_param_range *range) 340 { 341 unsigned int next_size; 342 343 /* Check lower/upper bounds */ 344 if (size < range->min) 345 return -1; 346 347 if (size > range->max) 348 return -1; 349 350 /* If range is actually only one value, size is correct */ 351 if (range->increment == 0) 352 return 0; 353 354 /* Check if value is one of the supported sizes */ 355 for (next_size = range->min; next_size <= range->max; 356 next_size += range->increment) 357 if (size == next_size) 358 return 0; 359 360 return -1; 361 } 362 363 const struct rte_cryptodev_asymmetric_xform_capability * 364 rte_cryptodev_asym_capability_get(uint8_t dev_id, 365 const struct rte_cryptodev_asym_capability_idx *idx) 366 { 367 const struct rte_cryptodev_capabilities *capability; 368 struct rte_cryptodev_info dev_info; 369 unsigned int i = 0; 370 371 memset(&dev_info, 0, sizeof(struct rte_cryptodev_info)); 372 rte_cryptodev_info_get(dev_id, &dev_info); 373 374 while ((capability = &dev_info.capabilities[i++])->op != 375 RTE_CRYPTO_OP_TYPE_UNDEFINED) { 376 if (capability->op != RTE_CRYPTO_OP_TYPE_ASYMMETRIC) 377 continue; 378 379 if (capability->asym.xform_capa.xform_type == idx->type) 380 return &capability->asym.xform_capa; 381 } 382 return NULL; 383 }; 384 385 int 386 rte_cryptodev_sym_capability_check_cipher( 387 const struct rte_cryptodev_symmetric_capability *capability, 388 uint16_t key_size, uint16_t iv_size) 389 { 390 if (param_range_check(key_size, &capability->cipher.key_size) != 0) 391 return -1; 392 393 if (param_range_check(iv_size, &capability->cipher.iv_size) != 0) 394 return -1; 395 396 return 0; 397 } 398 399 int 400 rte_cryptodev_sym_capability_check_auth( 401 const struct rte_cryptodev_symmetric_capability *capability, 402 uint16_t key_size, uint16_t digest_size, uint16_t iv_size) 403 { 404 if (param_range_check(key_size, &capability->auth.key_size) != 0) 405 return -1; 406 407 if (param_range_check(digest_size, &capability->auth.digest_size) != 0) 408 return -1; 409 410 if (param_range_check(iv_size, &capability->auth.iv_size) != 0) 411 return -1; 412 413 return 0; 414 } 415 416 int 417 rte_cryptodev_sym_capability_check_aead( 418 const struct rte_cryptodev_symmetric_capability *capability, 419 uint16_t key_size, uint16_t digest_size, uint16_t aad_size, 420 uint16_t iv_size) 421 { 422 if (param_range_check(key_size, &capability->aead.key_size) != 0) 423 return -1; 424 425 if (param_range_check(digest_size, &capability->aead.digest_size) != 0) 426 return -1; 427 428 if (param_range_check(aad_size, &capability->aead.aad_size) != 0) 429 return -1; 430 431 if (param_range_check(iv_size, &capability->aead.iv_size) != 0) 432 return -1; 433 434 return 0; 435 } 436 int 437 rte_cryptodev_asym_xform_capability_check_optype( 438 const struct rte_cryptodev_asymmetric_xform_capability *capability, 439 enum rte_crypto_asym_op_type op_type) 440 { 441 if (capability->op_types & (1 << op_type)) 442 return 1; 443 444 return 0; 445 } 446 447 int 448 rte_cryptodev_asym_xform_capability_check_modlen( 449 const struct rte_cryptodev_asymmetric_xform_capability *capability, 450 uint16_t modlen) 451 { 452 /* no need to check for limits, if min or max = 0 */ 453 if (capability->modlen.min != 0) { 454 if (modlen < capability->modlen.min) 455 return -1; 456 } 457 458 if (capability->modlen.max != 0) { 459 if (modlen > capability->modlen.max) 460 return -1; 461 } 462 463 /* in any case, check if given modlen is module increment */ 464 if (capability->modlen.increment != 0) { 465 if (modlen % (capability->modlen.increment)) 466 return -1; 467 } 468 469 return 0; 470 } 471 472 /* spinlock for crypto device enq callbacks */ 473 static rte_spinlock_t rte_cryptodev_callback_lock = RTE_SPINLOCK_INITIALIZER; 474 475 static void 476 cryptodev_cb_cleanup(struct rte_cryptodev *dev) 477 { 478 struct rte_cryptodev_cb_rcu *list; 479 struct rte_cryptodev_cb *cb, *next; 480 uint16_t qp_id; 481 482 if (dev->enq_cbs == NULL && dev->deq_cbs == NULL) 483 return; 484 485 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { 486 list = &dev->enq_cbs[qp_id]; 487 cb = list->next; 488 while (cb != NULL) { 489 next = cb->next; 490 rte_free(cb); 491 cb = next; 492 } 493 494 rte_free(list->qsbr); 495 } 496 497 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { 498 list = &dev->deq_cbs[qp_id]; 499 cb = list->next; 500 while (cb != NULL) { 501 next = cb->next; 502 rte_free(cb); 503 cb = next; 504 } 505 506 rte_free(list->qsbr); 507 } 508 509 rte_free(dev->enq_cbs); 510 dev->enq_cbs = NULL; 511 rte_free(dev->deq_cbs); 512 dev->deq_cbs = NULL; 513 } 514 515 static int 516 cryptodev_cb_init(struct rte_cryptodev *dev) 517 { 518 struct rte_cryptodev_cb_rcu *list; 519 struct rte_rcu_qsbr *qsbr; 520 uint16_t qp_id; 521 size_t size; 522 523 /* Max thread set to 1, as one DP thread accessing a queue-pair */ 524 const uint32_t max_threads = 1; 525 526 dev->enq_cbs = rte_zmalloc(NULL, 527 sizeof(struct rte_cryptodev_cb_rcu) * 528 dev->data->nb_queue_pairs, 0); 529 if (dev->enq_cbs == NULL) { 530 CDEV_LOG_ERR("Failed to allocate memory for enq callbacks"); 531 return -ENOMEM; 532 } 533 534 dev->deq_cbs = rte_zmalloc(NULL, 535 sizeof(struct rte_cryptodev_cb_rcu) * 536 dev->data->nb_queue_pairs, 0); 537 if (dev->deq_cbs == NULL) { 538 CDEV_LOG_ERR("Failed to allocate memory for deq callbacks"); 539 rte_free(dev->enq_cbs); 540 return -ENOMEM; 541 } 542 543 /* Create RCU QSBR variable */ 544 size = rte_rcu_qsbr_get_memsize(max_threads); 545 546 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { 547 list = &dev->enq_cbs[qp_id]; 548 qsbr = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE); 549 if (qsbr == NULL) { 550 CDEV_LOG_ERR("Failed to allocate memory for RCU on " 551 "queue_pair_id=%d", qp_id); 552 goto cb_init_err; 553 } 554 555 if (rte_rcu_qsbr_init(qsbr, max_threads)) { 556 CDEV_LOG_ERR("Failed to initialize for RCU on " 557 "queue_pair_id=%d", qp_id); 558 goto cb_init_err; 559 } 560 561 list->qsbr = qsbr; 562 } 563 564 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { 565 list = &dev->deq_cbs[qp_id]; 566 qsbr = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE); 567 if (qsbr == NULL) { 568 CDEV_LOG_ERR("Failed to allocate memory for RCU on " 569 "queue_pair_id=%d", qp_id); 570 goto cb_init_err; 571 } 572 573 if (rte_rcu_qsbr_init(qsbr, max_threads)) { 574 CDEV_LOG_ERR("Failed to initialize for RCU on " 575 "queue_pair_id=%d", qp_id); 576 goto cb_init_err; 577 } 578 579 list->qsbr = qsbr; 580 } 581 582 return 0; 583 584 cb_init_err: 585 cryptodev_cb_cleanup(dev); 586 return -ENOMEM; 587 } 588 589 const char * 590 rte_cryptodev_get_feature_name(uint64_t flag) 591 { 592 switch (flag) { 593 case RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO: 594 return "SYMMETRIC_CRYPTO"; 595 case RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO: 596 return "ASYMMETRIC_CRYPTO"; 597 case RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING: 598 return "SYM_OPERATION_CHAINING"; 599 case RTE_CRYPTODEV_FF_CPU_SSE: 600 return "CPU_SSE"; 601 case RTE_CRYPTODEV_FF_CPU_AVX: 602 return "CPU_AVX"; 603 case RTE_CRYPTODEV_FF_CPU_AVX2: 604 return "CPU_AVX2"; 605 case RTE_CRYPTODEV_FF_CPU_AVX512: 606 return "CPU_AVX512"; 607 case RTE_CRYPTODEV_FF_CPU_AESNI: 608 return "CPU_AESNI"; 609 case RTE_CRYPTODEV_FF_HW_ACCELERATED: 610 return "HW_ACCELERATED"; 611 case RTE_CRYPTODEV_FF_IN_PLACE_SGL: 612 return "IN_PLACE_SGL"; 613 case RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT: 614 return "OOP_SGL_IN_SGL_OUT"; 615 case RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT: 616 return "OOP_SGL_IN_LB_OUT"; 617 case RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT: 618 return "OOP_LB_IN_SGL_OUT"; 619 case RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT: 620 return "OOP_LB_IN_LB_OUT"; 621 case RTE_CRYPTODEV_FF_CPU_NEON: 622 return "CPU_NEON"; 623 case RTE_CRYPTODEV_FF_CPU_ARM_CE: 624 return "CPU_ARM_CE"; 625 case RTE_CRYPTODEV_FF_SECURITY: 626 return "SECURITY_PROTOCOL"; 627 case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP: 628 return "RSA_PRIV_OP_KEY_EXP"; 629 case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT: 630 return "RSA_PRIV_OP_KEY_QT"; 631 case RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED: 632 return "DIGEST_ENCRYPTED"; 633 case RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO: 634 return "SYM_CPU_CRYPTO"; 635 case RTE_CRYPTODEV_FF_ASYM_SESSIONLESS: 636 return "ASYM_SESSIONLESS"; 637 case RTE_CRYPTODEV_FF_SYM_SESSIONLESS: 638 return "SYM_SESSIONLESS"; 639 case RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA: 640 return "NON_BYTE_ALIGNED_DATA"; 641 case RTE_CRYPTODEV_FF_CIPHER_MULTIPLE_DATA_UNITS: 642 return "CIPHER_MULTIPLE_DATA_UNITS"; 643 case RTE_CRYPTODEV_FF_CIPHER_WRAPPED_KEY: 644 return "CIPHER_WRAPPED_KEY"; 645 default: 646 return NULL; 647 } 648 } 649 650 struct rte_cryptodev * 651 rte_cryptodev_pmd_get_dev(uint8_t dev_id) 652 { 653 return &cryptodev_globals.devs[dev_id]; 654 } 655 656 struct rte_cryptodev * 657 rte_cryptodev_pmd_get_named_dev(const char *name) 658 { 659 struct rte_cryptodev *dev; 660 unsigned int i; 661 662 if (name == NULL) 663 return NULL; 664 665 for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) { 666 dev = &cryptodev_globals.devs[i]; 667 668 if ((dev->attached == RTE_CRYPTODEV_ATTACHED) && 669 (strcmp(dev->data->name, name) == 0)) 670 return dev; 671 } 672 673 return NULL; 674 } 675 676 static inline uint8_t 677 rte_cryptodev_is_valid_device_data(uint8_t dev_id) 678 { 679 if (dev_id >= RTE_CRYPTO_MAX_DEVS || 680 rte_crypto_devices[dev_id].data == NULL) 681 return 0; 682 683 return 1; 684 } 685 686 unsigned int 687 rte_cryptodev_is_valid_dev(uint8_t dev_id) 688 { 689 struct rte_cryptodev *dev = NULL; 690 691 if (!rte_cryptodev_is_valid_device_data(dev_id)) 692 return 0; 693 694 dev = rte_cryptodev_pmd_get_dev(dev_id); 695 if (dev->attached != RTE_CRYPTODEV_ATTACHED) 696 return 0; 697 else 698 return 1; 699 } 700 701 702 int 703 rte_cryptodev_get_dev_id(const char *name) 704 { 705 unsigned i; 706 707 if (name == NULL) 708 return -1; 709 710 for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) { 711 if (!rte_cryptodev_is_valid_device_data(i)) 712 continue; 713 if ((strcmp(cryptodev_globals.devs[i].data->name, name) 714 == 0) && 715 (cryptodev_globals.devs[i].attached == 716 RTE_CRYPTODEV_ATTACHED)) 717 return i; 718 } 719 720 return -1; 721 } 722 723 uint8_t 724 rte_cryptodev_count(void) 725 { 726 return cryptodev_globals.nb_devs; 727 } 728 729 uint8_t 730 rte_cryptodev_device_count_by_driver(uint8_t driver_id) 731 { 732 uint8_t i, dev_count = 0; 733 734 for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) 735 if (cryptodev_globals.devs[i].driver_id == driver_id && 736 cryptodev_globals.devs[i].attached == 737 RTE_CRYPTODEV_ATTACHED) 738 dev_count++; 739 740 return dev_count; 741 } 742 743 uint8_t 744 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices, 745 uint8_t nb_devices) 746 { 747 uint8_t i, count = 0; 748 struct rte_cryptodev *devs = cryptodev_globals.devs; 749 750 for (i = 0; i < RTE_CRYPTO_MAX_DEVS && count < nb_devices; i++) { 751 if (!rte_cryptodev_is_valid_device_data(i)) 752 continue; 753 754 if (devs[i].attached == RTE_CRYPTODEV_ATTACHED) { 755 int cmp; 756 757 cmp = strncmp(devs[i].device->driver->name, 758 driver_name, 759 strlen(driver_name) + 1); 760 761 if (cmp == 0) 762 devices[count++] = devs[i].data->dev_id; 763 } 764 } 765 766 return count; 767 } 768 769 void * 770 rte_cryptodev_get_sec_ctx(uint8_t dev_id) 771 { 772 if (dev_id < RTE_CRYPTO_MAX_DEVS && 773 (rte_crypto_devices[dev_id].feature_flags & 774 RTE_CRYPTODEV_FF_SECURITY)) 775 return rte_crypto_devices[dev_id].security_ctx; 776 777 return NULL; 778 } 779 780 int 781 rte_cryptodev_socket_id(uint8_t dev_id) 782 { 783 struct rte_cryptodev *dev; 784 785 if (!rte_cryptodev_is_valid_dev(dev_id)) 786 return -1; 787 788 dev = rte_cryptodev_pmd_get_dev(dev_id); 789 790 return dev->data->socket_id; 791 } 792 793 static inline int 794 rte_cryptodev_data_alloc(uint8_t dev_id, struct rte_cryptodev_data **data, 795 int socket_id) 796 { 797 char mz_name[RTE_MEMZONE_NAMESIZE]; 798 const struct rte_memzone *mz; 799 int n; 800 801 /* generate memzone name */ 802 n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id); 803 if (n >= (int)sizeof(mz_name)) 804 return -EINVAL; 805 806 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 807 mz = rte_memzone_reserve(mz_name, 808 sizeof(struct rte_cryptodev_data), 809 socket_id, 0); 810 CDEV_LOG_DEBUG("PRIMARY:reserved memzone for %s (%p)", 811 mz_name, mz); 812 } else { 813 mz = rte_memzone_lookup(mz_name); 814 CDEV_LOG_DEBUG("SECONDARY:looked up memzone for %s (%p)", 815 mz_name, mz); 816 } 817 818 if (mz == NULL) 819 return -ENOMEM; 820 821 *data = mz->addr; 822 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 823 memset(*data, 0, sizeof(struct rte_cryptodev_data)); 824 825 return 0; 826 } 827 828 static inline int 829 rte_cryptodev_data_free(uint8_t dev_id, struct rte_cryptodev_data **data) 830 { 831 char mz_name[RTE_MEMZONE_NAMESIZE]; 832 const struct rte_memzone *mz; 833 int n; 834 835 /* generate memzone name */ 836 n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id); 837 if (n >= (int)sizeof(mz_name)) 838 return -EINVAL; 839 840 mz = rte_memzone_lookup(mz_name); 841 if (mz == NULL) 842 return -ENOMEM; 843 844 RTE_ASSERT(*data == mz->addr); 845 *data = NULL; 846 847 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 848 CDEV_LOG_DEBUG("PRIMARY:free memzone of %s (%p)", 849 mz_name, mz); 850 return rte_memzone_free(mz); 851 } else { 852 CDEV_LOG_DEBUG("SECONDARY:don't free memzone of %s (%p)", 853 mz_name, mz); 854 } 855 856 return 0; 857 } 858 859 static uint8_t 860 rte_cryptodev_find_free_device_index(void) 861 { 862 uint8_t dev_id; 863 864 for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++) { 865 if (rte_crypto_devices[dev_id].attached == 866 RTE_CRYPTODEV_DETACHED) 867 return dev_id; 868 } 869 return RTE_CRYPTO_MAX_DEVS; 870 } 871 872 struct rte_cryptodev * 873 rte_cryptodev_pmd_allocate(const char *name, int socket_id) 874 { 875 struct rte_cryptodev *cryptodev; 876 uint8_t dev_id; 877 878 if (rte_cryptodev_pmd_get_named_dev(name) != NULL) { 879 CDEV_LOG_ERR("Crypto device with name %s already " 880 "allocated!", name); 881 return NULL; 882 } 883 884 dev_id = rte_cryptodev_find_free_device_index(); 885 if (dev_id == RTE_CRYPTO_MAX_DEVS) { 886 CDEV_LOG_ERR("Reached maximum number of crypto devices"); 887 return NULL; 888 } 889 890 cryptodev = rte_cryptodev_pmd_get_dev(dev_id); 891 892 if (cryptodev->data == NULL) { 893 struct rte_cryptodev_data **cryptodev_data = 894 &cryptodev_globals.data[dev_id]; 895 896 int retval = rte_cryptodev_data_alloc(dev_id, cryptodev_data, 897 socket_id); 898 899 if (retval < 0 || *cryptodev_data == NULL) 900 return NULL; 901 902 cryptodev->data = *cryptodev_data; 903 904 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 905 strlcpy(cryptodev->data->name, name, 906 RTE_CRYPTODEV_NAME_MAX_LEN); 907 908 cryptodev->data->dev_id = dev_id; 909 cryptodev->data->socket_id = socket_id; 910 cryptodev->data->dev_started = 0; 911 CDEV_LOG_DEBUG("PRIMARY:init data"); 912 } 913 914 CDEV_LOG_DEBUG("Data for %s: dev_id %d, socket %d, started %d", 915 cryptodev->data->name, 916 cryptodev->data->dev_id, 917 cryptodev->data->socket_id, 918 cryptodev->data->dev_started); 919 920 /* init user callbacks */ 921 TAILQ_INIT(&(cryptodev->link_intr_cbs)); 922 923 cryptodev->attached = RTE_CRYPTODEV_ATTACHED; 924 925 cryptodev_globals.nb_devs++; 926 } 927 928 return cryptodev; 929 } 930 931 int 932 rte_cryptodev_pmd_release_device(struct rte_cryptodev *cryptodev) 933 { 934 int ret; 935 uint8_t dev_id; 936 937 if (cryptodev == NULL) 938 return -EINVAL; 939 940 dev_id = cryptodev->data->dev_id; 941 942 cryptodev_fp_ops_reset(rte_crypto_fp_ops + dev_id); 943 944 /* Close device only if device operations have been set */ 945 if (cryptodev->dev_ops) { 946 ret = rte_cryptodev_close(dev_id); 947 if (ret < 0) 948 return ret; 949 } 950 951 ret = rte_cryptodev_data_free(dev_id, &cryptodev_globals.data[dev_id]); 952 if (ret < 0) 953 return ret; 954 955 cryptodev->attached = RTE_CRYPTODEV_DETACHED; 956 cryptodev_globals.nb_devs--; 957 return 0; 958 } 959 960 uint16_t 961 rte_cryptodev_queue_pair_count(uint8_t dev_id) 962 { 963 struct rte_cryptodev *dev; 964 965 if (!rte_cryptodev_is_valid_device_data(dev_id)) { 966 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 967 return 0; 968 } 969 970 dev = &rte_crypto_devices[dev_id]; 971 return dev->data->nb_queue_pairs; 972 } 973 974 static int 975 rte_cryptodev_queue_pairs_config(struct rte_cryptodev *dev, uint16_t nb_qpairs, 976 int socket_id) 977 { 978 struct rte_cryptodev_info dev_info; 979 void **qp; 980 unsigned i; 981 982 if ((dev == NULL) || (nb_qpairs < 1)) { 983 CDEV_LOG_ERR("invalid param: dev %p, nb_queues %u", 984 dev, nb_qpairs); 985 return -EINVAL; 986 } 987 988 CDEV_LOG_DEBUG("Setup %d queues pairs on device %u", 989 nb_qpairs, dev->data->dev_id); 990 991 memset(&dev_info, 0, sizeof(struct rte_cryptodev_info)); 992 993 if (*dev->dev_ops->dev_infos_get == NULL) 994 return -ENOTSUP; 995 (*dev->dev_ops->dev_infos_get)(dev, &dev_info); 996 997 if (nb_qpairs > (dev_info.max_nb_queue_pairs)) { 998 CDEV_LOG_ERR("Invalid num queue_pairs (%u) for dev %u", 999 nb_qpairs, dev->data->dev_id); 1000 return -EINVAL; 1001 } 1002 1003 if (dev->data->queue_pairs == NULL) { /* first time configuration */ 1004 dev->data->queue_pairs = rte_zmalloc_socket( 1005 "cryptodev->queue_pairs", 1006 sizeof(dev->data->queue_pairs[0]) * 1007 dev_info.max_nb_queue_pairs, 1008 RTE_CACHE_LINE_SIZE, socket_id); 1009 1010 if (dev->data->queue_pairs == NULL) { 1011 dev->data->nb_queue_pairs = 0; 1012 CDEV_LOG_ERR("failed to get memory for qp meta data, " 1013 "nb_queues %u", 1014 nb_qpairs); 1015 return -(ENOMEM); 1016 } 1017 } else { /* re-configure */ 1018 int ret; 1019 uint16_t old_nb_queues = dev->data->nb_queue_pairs; 1020 1021 qp = dev->data->queue_pairs; 1022 1023 if (*dev->dev_ops->queue_pair_release == NULL) 1024 return -ENOTSUP; 1025 1026 for (i = nb_qpairs; i < old_nb_queues; i++) { 1027 ret = (*dev->dev_ops->queue_pair_release)(dev, i); 1028 if (ret < 0) 1029 return ret; 1030 qp[i] = NULL; 1031 } 1032 1033 } 1034 dev->data->nb_queue_pairs = nb_qpairs; 1035 return 0; 1036 } 1037 1038 int 1039 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config) 1040 { 1041 struct rte_cryptodev *dev; 1042 int diag; 1043 1044 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1045 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 1046 return -EINVAL; 1047 } 1048 1049 dev = &rte_crypto_devices[dev_id]; 1050 1051 if (dev->data->dev_started) { 1052 CDEV_LOG_ERR( 1053 "device %d must be stopped to allow configuration", dev_id); 1054 return -EBUSY; 1055 } 1056 1057 if (*dev->dev_ops->dev_configure == NULL) 1058 return -ENOTSUP; 1059 1060 rte_spinlock_lock(&rte_cryptodev_callback_lock); 1061 cryptodev_cb_cleanup(dev); 1062 rte_spinlock_unlock(&rte_cryptodev_callback_lock); 1063 1064 /* Setup new number of queue pairs and reconfigure device. */ 1065 diag = rte_cryptodev_queue_pairs_config(dev, config->nb_queue_pairs, 1066 config->socket_id); 1067 if (diag != 0) { 1068 CDEV_LOG_ERR("dev%d rte_crypto_dev_queue_pairs_config = %d", 1069 dev_id, diag); 1070 return diag; 1071 } 1072 1073 rte_spinlock_lock(&rte_cryptodev_callback_lock); 1074 diag = cryptodev_cb_init(dev); 1075 rte_spinlock_unlock(&rte_cryptodev_callback_lock); 1076 if (diag) { 1077 CDEV_LOG_ERR("Callback init failed for dev_id=%d", dev_id); 1078 return diag; 1079 } 1080 1081 rte_cryptodev_trace_configure(dev_id, config); 1082 return (*dev->dev_ops->dev_configure)(dev, config); 1083 } 1084 1085 int 1086 rte_cryptodev_start(uint8_t dev_id) 1087 { 1088 struct rte_cryptodev *dev; 1089 int diag; 1090 1091 CDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id); 1092 1093 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1094 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 1095 return -EINVAL; 1096 } 1097 1098 dev = &rte_crypto_devices[dev_id]; 1099 1100 if (*dev->dev_ops->dev_start == NULL) 1101 return -ENOTSUP; 1102 1103 if (dev->data->dev_started != 0) { 1104 CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already started", 1105 dev_id); 1106 return 0; 1107 } 1108 1109 diag = (*dev->dev_ops->dev_start)(dev); 1110 /* expose selection of PMD fast-path functions */ 1111 cryptodev_fp_ops_set(rte_crypto_fp_ops + dev_id, dev); 1112 1113 rte_cryptodev_trace_start(dev_id, diag); 1114 if (diag == 0) 1115 dev->data->dev_started = 1; 1116 else 1117 return diag; 1118 1119 return 0; 1120 } 1121 1122 void 1123 rte_cryptodev_stop(uint8_t dev_id) 1124 { 1125 struct rte_cryptodev *dev; 1126 1127 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1128 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 1129 return; 1130 } 1131 1132 dev = &rte_crypto_devices[dev_id]; 1133 1134 if (*dev->dev_ops->dev_stop == NULL) 1135 return; 1136 1137 if (dev->data->dev_started == 0) { 1138 CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already stopped", 1139 dev_id); 1140 return; 1141 } 1142 1143 /* point fast-path functions to dummy ones */ 1144 cryptodev_fp_ops_reset(rte_crypto_fp_ops + dev_id); 1145 1146 (*dev->dev_ops->dev_stop)(dev); 1147 rte_cryptodev_trace_stop(dev_id); 1148 dev->data->dev_started = 0; 1149 } 1150 1151 int 1152 rte_cryptodev_close(uint8_t dev_id) 1153 { 1154 struct rte_cryptodev *dev; 1155 int retval; 1156 1157 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1158 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 1159 return -1; 1160 } 1161 1162 dev = &rte_crypto_devices[dev_id]; 1163 1164 /* Device must be stopped before it can be closed */ 1165 if (dev->data->dev_started == 1) { 1166 CDEV_LOG_ERR("Device %u must be stopped before closing", 1167 dev_id); 1168 return -EBUSY; 1169 } 1170 1171 /* We can't close the device if there are outstanding sessions in use */ 1172 if (dev->data->session_pool != NULL) { 1173 if (!rte_mempool_full(dev->data->session_pool)) { 1174 CDEV_LOG_ERR("dev_id=%u close failed, session mempool " 1175 "has sessions still in use, free " 1176 "all sessions before calling close", 1177 (unsigned)dev_id); 1178 return -EBUSY; 1179 } 1180 } 1181 1182 if (*dev->dev_ops->dev_close == NULL) 1183 return -ENOTSUP; 1184 retval = (*dev->dev_ops->dev_close)(dev); 1185 rte_cryptodev_trace_close(dev_id, retval); 1186 1187 if (retval < 0) 1188 return retval; 1189 1190 return 0; 1191 } 1192 1193 int 1194 rte_cryptodev_get_qp_status(uint8_t dev_id, uint16_t queue_pair_id) 1195 { 1196 struct rte_cryptodev *dev; 1197 1198 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1199 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 1200 return -EINVAL; 1201 } 1202 1203 dev = &rte_crypto_devices[dev_id]; 1204 if (queue_pair_id >= dev->data->nb_queue_pairs) { 1205 CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id); 1206 return -EINVAL; 1207 } 1208 void **qps = dev->data->queue_pairs; 1209 1210 if (qps[queue_pair_id]) { 1211 CDEV_LOG_DEBUG("qp %d on dev %d is initialised", 1212 queue_pair_id, dev_id); 1213 return 1; 1214 } 1215 1216 CDEV_LOG_DEBUG("qp %d on dev %d is not initialised", 1217 queue_pair_id, dev_id); 1218 1219 return 0; 1220 } 1221 1222 int 1223 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id, 1224 const struct rte_cryptodev_qp_conf *qp_conf, int socket_id) 1225 1226 { 1227 struct rte_cryptodev *dev; 1228 1229 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1230 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 1231 return -EINVAL; 1232 } 1233 1234 dev = &rte_crypto_devices[dev_id]; 1235 if (queue_pair_id >= dev->data->nb_queue_pairs) { 1236 CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id); 1237 return -EINVAL; 1238 } 1239 1240 if (!qp_conf) { 1241 CDEV_LOG_ERR("qp_conf cannot be NULL\n"); 1242 return -EINVAL; 1243 } 1244 1245 if ((qp_conf->mp_session && !qp_conf->mp_session_private) || 1246 (!qp_conf->mp_session && qp_conf->mp_session_private)) { 1247 CDEV_LOG_ERR("Invalid mempools\n"); 1248 return -EINVAL; 1249 } 1250 1251 if (qp_conf->mp_session) { 1252 struct rte_cryptodev_sym_session_pool_private_data *pool_priv; 1253 uint32_t obj_size = qp_conf->mp_session->elt_size; 1254 uint32_t obj_priv_size = qp_conf->mp_session_private->elt_size; 1255 struct rte_cryptodev_sym_session s = {0}; 1256 1257 pool_priv = rte_mempool_get_priv(qp_conf->mp_session); 1258 if (!pool_priv || qp_conf->mp_session->private_data_size < 1259 sizeof(*pool_priv)) { 1260 CDEV_LOG_ERR("Invalid mempool\n"); 1261 return -EINVAL; 1262 } 1263 1264 s.nb_drivers = pool_priv->nb_drivers; 1265 s.user_data_sz = pool_priv->user_data_sz; 1266 1267 if ((rte_cryptodev_sym_get_existing_header_session_size(&s) > 1268 obj_size) || (s.nb_drivers <= dev->driver_id) || 1269 rte_cryptodev_sym_get_private_session_size(dev_id) > 1270 obj_priv_size) { 1271 CDEV_LOG_ERR("Invalid mempool\n"); 1272 return -EINVAL; 1273 } 1274 } 1275 1276 if (dev->data->dev_started) { 1277 CDEV_LOG_ERR( 1278 "device %d must be stopped to allow configuration", dev_id); 1279 return -EBUSY; 1280 } 1281 1282 if (*dev->dev_ops->queue_pair_setup == NULL) 1283 return -ENOTSUP; 1284 1285 rte_cryptodev_trace_queue_pair_setup(dev_id, queue_pair_id, qp_conf); 1286 return (*dev->dev_ops->queue_pair_setup)(dev, queue_pair_id, qp_conf, 1287 socket_id); 1288 } 1289 1290 struct rte_cryptodev_cb * 1291 rte_cryptodev_add_enq_callback(uint8_t dev_id, 1292 uint16_t qp_id, 1293 rte_cryptodev_callback_fn cb_fn, 1294 void *cb_arg) 1295 { 1296 struct rte_cryptodev *dev; 1297 struct rte_cryptodev_cb_rcu *list; 1298 struct rte_cryptodev_cb *cb, *tail; 1299 1300 if (!cb_fn) { 1301 CDEV_LOG_ERR("Callback is NULL on dev_id=%d", dev_id); 1302 rte_errno = EINVAL; 1303 return NULL; 1304 } 1305 1306 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1307 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id); 1308 rte_errno = ENODEV; 1309 return NULL; 1310 } 1311 1312 dev = &rte_crypto_devices[dev_id]; 1313 if (qp_id >= dev->data->nb_queue_pairs) { 1314 CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id); 1315 rte_errno = ENODEV; 1316 return NULL; 1317 } 1318 1319 cb = rte_zmalloc(NULL, sizeof(*cb), 0); 1320 if (cb == NULL) { 1321 CDEV_LOG_ERR("Failed to allocate memory for callback on " 1322 "dev=%d, queue_pair_id=%d", dev_id, qp_id); 1323 rte_errno = ENOMEM; 1324 return NULL; 1325 } 1326 1327 rte_spinlock_lock(&rte_cryptodev_callback_lock); 1328 1329 cb->fn = cb_fn; 1330 cb->arg = cb_arg; 1331 1332 /* Add the callbacks in fifo order. */ 1333 list = &dev->enq_cbs[qp_id]; 1334 tail = list->next; 1335 1336 if (tail) { 1337 while (tail->next) 1338 tail = tail->next; 1339 /* Stores to cb->fn and cb->param should complete before 1340 * cb is visible to data plane. 1341 */ 1342 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE); 1343 } else { 1344 /* Stores to cb->fn and cb->param should complete before 1345 * cb is visible to data plane. 1346 */ 1347 __atomic_store_n(&list->next, cb, __ATOMIC_RELEASE); 1348 } 1349 1350 rte_spinlock_unlock(&rte_cryptodev_callback_lock); 1351 1352 return cb; 1353 } 1354 1355 int 1356 rte_cryptodev_remove_enq_callback(uint8_t dev_id, 1357 uint16_t qp_id, 1358 struct rte_cryptodev_cb *cb) 1359 { 1360 struct rte_cryptodev *dev; 1361 struct rte_cryptodev_cb **prev_cb, *curr_cb; 1362 struct rte_cryptodev_cb_rcu *list; 1363 int ret; 1364 1365 ret = -EINVAL; 1366 1367 if (!cb) { 1368 CDEV_LOG_ERR("Callback is NULL"); 1369 return -EINVAL; 1370 } 1371 1372 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1373 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id); 1374 return -ENODEV; 1375 } 1376 1377 dev = &rte_crypto_devices[dev_id]; 1378 if (qp_id >= dev->data->nb_queue_pairs) { 1379 CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id); 1380 return -ENODEV; 1381 } 1382 1383 rte_spinlock_lock(&rte_cryptodev_callback_lock); 1384 if (dev->enq_cbs == NULL) { 1385 CDEV_LOG_ERR("Callback not initialized"); 1386 goto cb_err; 1387 } 1388 1389 list = &dev->enq_cbs[qp_id]; 1390 if (list == NULL) { 1391 CDEV_LOG_ERR("Callback list is NULL"); 1392 goto cb_err; 1393 } 1394 1395 if (list->qsbr == NULL) { 1396 CDEV_LOG_ERR("Rcu qsbr is NULL"); 1397 goto cb_err; 1398 } 1399 1400 prev_cb = &list->next; 1401 for (; *prev_cb != NULL; prev_cb = &curr_cb->next) { 1402 curr_cb = *prev_cb; 1403 if (curr_cb == cb) { 1404 /* Remove the user cb from the callback list. */ 1405 __atomic_store_n(prev_cb, curr_cb->next, 1406 __ATOMIC_RELAXED); 1407 ret = 0; 1408 break; 1409 } 1410 } 1411 1412 if (!ret) { 1413 /* Call sync with invalid thread id as this is part of 1414 * control plane API 1415 */ 1416 rte_rcu_qsbr_synchronize(list->qsbr, RTE_QSBR_THRID_INVALID); 1417 rte_free(cb); 1418 } 1419 1420 cb_err: 1421 rte_spinlock_unlock(&rte_cryptodev_callback_lock); 1422 return ret; 1423 } 1424 1425 struct rte_cryptodev_cb * 1426 rte_cryptodev_add_deq_callback(uint8_t dev_id, 1427 uint16_t qp_id, 1428 rte_cryptodev_callback_fn cb_fn, 1429 void *cb_arg) 1430 { 1431 struct rte_cryptodev *dev; 1432 struct rte_cryptodev_cb_rcu *list; 1433 struct rte_cryptodev_cb *cb, *tail; 1434 1435 if (!cb_fn) { 1436 CDEV_LOG_ERR("Callback is NULL on dev_id=%d", dev_id); 1437 rte_errno = EINVAL; 1438 return NULL; 1439 } 1440 1441 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1442 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id); 1443 rte_errno = ENODEV; 1444 return NULL; 1445 } 1446 1447 dev = &rte_crypto_devices[dev_id]; 1448 if (qp_id >= dev->data->nb_queue_pairs) { 1449 CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id); 1450 rte_errno = ENODEV; 1451 return NULL; 1452 } 1453 1454 cb = rte_zmalloc(NULL, sizeof(*cb), 0); 1455 if (cb == NULL) { 1456 CDEV_LOG_ERR("Failed to allocate memory for callback on " 1457 "dev=%d, queue_pair_id=%d", dev_id, qp_id); 1458 rte_errno = ENOMEM; 1459 return NULL; 1460 } 1461 1462 rte_spinlock_lock(&rte_cryptodev_callback_lock); 1463 1464 cb->fn = cb_fn; 1465 cb->arg = cb_arg; 1466 1467 /* Add the callbacks in fifo order. */ 1468 list = &dev->deq_cbs[qp_id]; 1469 tail = list->next; 1470 1471 if (tail) { 1472 while (tail->next) 1473 tail = tail->next; 1474 /* Stores to cb->fn and cb->param should complete before 1475 * cb is visible to data plane. 1476 */ 1477 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE); 1478 } else { 1479 /* Stores to cb->fn and cb->param should complete before 1480 * cb is visible to data plane. 1481 */ 1482 __atomic_store_n(&list->next, cb, __ATOMIC_RELEASE); 1483 } 1484 1485 rte_spinlock_unlock(&rte_cryptodev_callback_lock); 1486 1487 return cb; 1488 } 1489 1490 int 1491 rte_cryptodev_remove_deq_callback(uint8_t dev_id, 1492 uint16_t qp_id, 1493 struct rte_cryptodev_cb *cb) 1494 { 1495 struct rte_cryptodev *dev; 1496 struct rte_cryptodev_cb **prev_cb, *curr_cb; 1497 struct rte_cryptodev_cb_rcu *list; 1498 int ret; 1499 1500 ret = -EINVAL; 1501 1502 if (!cb) { 1503 CDEV_LOG_ERR("Callback is NULL"); 1504 return -EINVAL; 1505 } 1506 1507 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1508 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id); 1509 return -ENODEV; 1510 } 1511 1512 dev = &rte_crypto_devices[dev_id]; 1513 if (qp_id >= dev->data->nb_queue_pairs) { 1514 CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id); 1515 return -ENODEV; 1516 } 1517 1518 rte_spinlock_lock(&rte_cryptodev_callback_lock); 1519 if (dev->enq_cbs == NULL) { 1520 CDEV_LOG_ERR("Callback not initialized"); 1521 goto cb_err; 1522 } 1523 1524 list = &dev->deq_cbs[qp_id]; 1525 if (list == NULL) { 1526 CDEV_LOG_ERR("Callback list is NULL"); 1527 goto cb_err; 1528 } 1529 1530 if (list->qsbr == NULL) { 1531 CDEV_LOG_ERR("Rcu qsbr is NULL"); 1532 goto cb_err; 1533 } 1534 1535 prev_cb = &list->next; 1536 for (; *prev_cb != NULL; prev_cb = &curr_cb->next) { 1537 curr_cb = *prev_cb; 1538 if (curr_cb == cb) { 1539 /* Remove the user cb from the callback list. */ 1540 __atomic_store_n(prev_cb, curr_cb->next, 1541 __ATOMIC_RELAXED); 1542 ret = 0; 1543 break; 1544 } 1545 } 1546 1547 if (!ret) { 1548 /* Call sync with invalid thread id as this is part of 1549 * control plane API 1550 */ 1551 rte_rcu_qsbr_synchronize(list->qsbr, RTE_QSBR_THRID_INVALID); 1552 rte_free(cb); 1553 } 1554 1555 cb_err: 1556 rte_spinlock_unlock(&rte_cryptodev_callback_lock); 1557 return ret; 1558 } 1559 1560 int 1561 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats) 1562 { 1563 struct rte_cryptodev *dev; 1564 1565 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1566 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id); 1567 return -ENODEV; 1568 } 1569 1570 if (stats == NULL) { 1571 CDEV_LOG_ERR("Invalid stats ptr"); 1572 return -EINVAL; 1573 } 1574 1575 dev = &rte_crypto_devices[dev_id]; 1576 memset(stats, 0, sizeof(*stats)); 1577 1578 if (*dev->dev_ops->stats_get == NULL) 1579 return -ENOTSUP; 1580 (*dev->dev_ops->stats_get)(dev, stats); 1581 return 0; 1582 } 1583 1584 void 1585 rte_cryptodev_stats_reset(uint8_t dev_id) 1586 { 1587 struct rte_cryptodev *dev; 1588 1589 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1590 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 1591 return; 1592 } 1593 1594 dev = &rte_crypto_devices[dev_id]; 1595 1596 if (*dev->dev_ops->stats_reset == NULL) 1597 return; 1598 (*dev->dev_ops->stats_reset)(dev); 1599 } 1600 1601 void 1602 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info) 1603 { 1604 struct rte_cryptodev *dev; 1605 1606 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1607 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id); 1608 return; 1609 } 1610 1611 dev = &rte_crypto_devices[dev_id]; 1612 1613 memset(dev_info, 0, sizeof(struct rte_cryptodev_info)); 1614 1615 if (*dev->dev_ops->dev_infos_get == NULL) 1616 return; 1617 (*dev->dev_ops->dev_infos_get)(dev, dev_info); 1618 1619 dev_info->driver_name = dev->device->driver->name; 1620 dev_info->device = dev->device; 1621 } 1622 1623 int 1624 rte_cryptodev_callback_register(uint8_t dev_id, 1625 enum rte_cryptodev_event_type event, 1626 rte_cryptodev_cb_fn cb_fn, void *cb_arg) 1627 { 1628 struct rte_cryptodev *dev; 1629 struct rte_cryptodev_callback *user_cb; 1630 1631 if (!cb_fn) 1632 return -EINVAL; 1633 1634 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1635 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 1636 return -EINVAL; 1637 } 1638 1639 dev = &rte_crypto_devices[dev_id]; 1640 rte_spinlock_lock(&rte_cryptodev_cb_lock); 1641 1642 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) { 1643 if (user_cb->cb_fn == cb_fn && 1644 user_cb->cb_arg == cb_arg && 1645 user_cb->event == event) { 1646 break; 1647 } 1648 } 1649 1650 /* create a new callback. */ 1651 if (user_cb == NULL) { 1652 user_cb = rte_zmalloc("INTR_USER_CALLBACK", 1653 sizeof(struct rte_cryptodev_callback), 0); 1654 if (user_cb != NULL) { 1655 user_cb->cb_fn = cb_fn; 1656 user_cb->cb_arg = cb_arg; 1657 user_cb->event = event; 1658 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next); 1659 } 1660 } 1661 1662 rte_spinlock_unlock(&rte_cryptodev_cb_lock); 1663 return (user_cb == NULL) ? -ENOMEM : 0; 1664 } 1665 1666 int 1667 rte_cryptodev_callback_unregister(uint8_t dev_id, 1668 enum rte_cryptodev_event_type event, 1669 rte_cryptodev_cb_fn cb_fn, void *cb_arg) 1670 { 1671 int ret; 1672 struct rte_cryptodev *dev; 1673 struct rte_cryptodev_callback *cb, *next; 1674 1675 if (!cb_fn) 1676 return -EINVAL; 1677 1678 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1679 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 1680 return -EINVAL; 1681 } 1682 1683 dev = &rte_crypto_devices[dev_id]; 1684 rte_spinlock_lock(&rte_cryptodev_cb_lock); 1685 1686 ret = 0; 1687 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) { 1688 1689 next = TAILQ_NEXT(cb, next); 1690 1691 if (cb->cb_fn != cb_fn || cb->event != event || 1692 (cb->cb_arg != (void *)-1 && 1693 cb->cb_arg != cb_arg)) 1694 continue; 1695 1696 /* 1697 * if this callback is not executing right now, 1698 * then remove it. 1699 */ 1700 if (cb->active == 0) { 1701 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next); 1702 rte_free(cb); 1703 } else { 1704 ret = -EAGAIN; 1705 } 1706 } 1707 1708 rte_spinlock_unlock(&rte_cryptodev_cb_lock); 1709 return ret; 1710 } 1711 1712 void 1713 rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev, 1714 enum rte_cryptodev_event_type event) 1715 { 1716 struct rte_cryptodev_callback *cb_lst; 1717 struct rte_cryptodev_callback dev_cb; 1718 1719 rte_spinlock_lock(&rte_cryptodev_cb_lock); 1720 TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) { 1721 if (cb_lst->cb_fn == NULL || cb_lst->event != event) 1722 continue; 1723 dev_cb = *cb_lst; 1724 cb_lst->active = 1; 1725 rte_spinlock_unlock(&rte_cryptodev_cb_lock); 1726 dev_cb.cb_fn(dev->data->dev_id, dev_cb.event, 1727 dev_cb.cb_arg); 1728 rte_spinlock_lock(&rte_cryptodev_cb_lock); 1729 cb_lst->active = 0; 1730 } 1731 rte_spinlock_unlock(&rte_cryptodev_cb_lock); 1732 } 1733 1734 int 1735 rte_cryptodev_sym_session_init(uint8_t dev_id, 1736 struct rte_cryptodev_sym_session *sess, 1737 struct rte_crypto_sym_xform *xforms, 1738 struct rte_mempool *mp) 1739 { 1740 struct rte_cryptodev *dev; 1741 uint32_t sess_priv_sz = rte_cryptodev_sym_get_private_session_size( 1742 dev_id); 1743 uint8_t index; 1744 int ret; 1745 1746 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1747 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 1748 return -EINVAL; 1749 } 1750 1751 dev = rte_cryptodev_pmd_get_dev(dev_id); 1752 1753 if (sess == NULL || xforms == NULL || dev == NULL || mp == NULL) 1754 return -EINVAL; 1755 1756 if (mp->elt_size < sess_priv_sz) 1757 return -EINVAL; 1758 1759 index = dev->driver_id; 1760 if (index >= sess->nb_drivers) 1761 return -EINVAL; 1762 1763 if (*dev->dev_ops->sym_session_configure == NULL) 1764 return -ENOTSUP; 1765 1766 if (sess->sess_data[index].refcnt == 0) { 1767 ret = dev->dev_ops->sym_session_configure(dev, xforms, 1768 sess, mp); 1769 if (ret < 0) { 1770 CDEV_LOG_ERR( 1771 "dev_id %d failed to configure session details", 1772 dev_id); 1773 return ret; 1774 } 1775 } 1776 1777 rte_cryptodev_trace_sym_session_init(dev_id, sess, xforms, mp); 1778 sess->sess_data[index].refcnt++; 1779 return 0; 1780 } 1781 1782 struct rte_mempool * 1783 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts, 1784 uint32_t elt_size, uint32_t cache_size, uint16_t user_data_size, 1785 int socket_id) 1786 { 1787 struct rte_mempool *mp; 1788 struct rte_cryptodev_sym_session_pool_private_data *pool_priv; 1789 uint32_t obj_sz; 1790 1791 obj_sz = rte_cryptodev_sym_get_header_session_size() + user_data_size; 1792 if (obj_sz > elt_size) 1793 CDEV_LOG_INFO("elt_size %u is expanded to %u\n", elt_size, 1794 obj_sz); 1795 else 1796 obj_sz = elt_size; 1797 1798 mp = rte_mempool_create(name, nb_elts, obj_sz, cache_size, 1799 (uint32_t)(sizeof(*pool_priv)), 1800 NULL, NULL, NULL, NULL, 1801 socket_id, 0); 1802 if (mp == NULL) { 1803 CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d\n", 1804 __func__, name, rte_errno); 1805 return NULL; 1806 } 1807 1808 pool_priv = rte_mempool_get_priv(mp); 1809 if (!pool_priv) { 1810 CDEV_LOG_ERR("%s(name=%s) failed to get private data\n", 1811 __func__, name); 1812 rte_mempool_free(mp); 1813 return NULL; 1814 } 1815 1816 pool_priv->nb_drivers = nb_drivers; 1817 pool_priv->user_data_sz = user_data_size; 1818 1819 rte_cryptodev_trace_sym_session_pool_create(name, nb_elts, 1820 elt_size, cache_size, user_data_size, mp); 1821 return mp; 1822 } 1823 1824 struct rte_mempool * 1825 rte_cryptodev_asym_session_pool_create(const char *name, uint32_t nb_elts, 1826 uint32_t cache_size, uint16_t user_data_size, int socket_id) 1827 { 1828 struct rte_mempool *mp; 1829 struct rte_cryptodev_asym_session_pool_private_data *pool_priv; 1830 uint32_t obj_sz, obj_sz_aligned; 1831 uint8_t dev_id; 1832 unsigned int priv_sz, max_priv_sz = 0; 1833 1834 for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++) 1835 if (rte_cryptodev_is_valid_dev(dev_id)) { 1836 priv_sz = rte_cryptodev_asym_get_private_session_size(dev_id); 1837 if (priv_sz > max_priv_sz) 1838 max_priv_sz = priv_sz; 1839 } 1840 if (max_priv_sz == 0) { 1841 CDEV_LOG_INFO("Could not set max private session size\n"); 1842 return NULL; 1843 } 1844 1845 obj_sz = rte_cryptodev_asym_get_header_session_size() + max_priv_sz + 1846 user_data_size; 1847 obj_sz_aligned = RTE_ALIGN_CEIL(obj_sz, RTE_CACHE_LINE_SIZE); 1848 1849 mp = rte_mempool_create(name, nb_elts, obj_sz_aligned, cache_size, 1850 (uint32_t)(sizeof(*pool_priv)), 1851 NULL, NULL, NULL, NULL, 1852 socket_id, 0); 1853 if (mp == NULL) { 1854 CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d\n", 1855 __func__, name, rte_errno); 1856 return NULL; 1857 } 1858 1859 pool_priv = rte_mempool_get_priv(mp); 1860 if (!pool_priv) { 1861 CDEV_LOG_ERR("%s(name=%s) failed to get private data\n", 1862 __func__, name); 1863 rte_mempool_free(mp); 1864 return NULL; 1865 } 1866 pool_priv->max_priv_session_sz = max_priv_sz; 1867 pool_priv->user_data_sz = user_data_size; 1868 1869 rte_cryptodev_trace_asym_session_pool_create(name, nb_elts, 1870 user_data_size, cache_size, mp); 1871 return mp; 1872 } 1873 1874 static unsigned int 1875 rte_cryptodev_sym_session_data_size(struct rte_cryptodev_sym_session *sess) 1876 { 1877 return (sizeof(sess->sess_data[0]) * sess->nb_drivers) + 1878 sess->user_data_sz; 1879 } 1880 1881 static uint8_t 1882 rte_cryptodev_sym_is_valid_session_pool(struct rte_mempool *mp) 1883 { 1884 struct rte_cryptodev_sym_session_pool_private_data *pool_priv; 1885 1886 if (!mp) 1887 return 0; 1888 1889 pool_priv = rte_mempool_get_priv(mp); 1890 1891 if (!pool_priv || mp->private_data_size < sizeof(*pool_priv) || 1892 pool_priv->nb_drivers != nb_drivers || 1893 mp->elt_size < 1894 rte_cryptodev_sym_get_header_session_size() 1895 + pool_priv->user_data_sz) 1896 return 0; 1897 1898 return 1; 1899 } 1900 1901 struct rte_cryptodev_sym_session * 1902 rte_cryptodev_sym_session_create(struct rte_mempool *mp) 1903 { 1904 struct rte_cryptodev_sym_session *sess; 1905 struct rte_cryptodev_sym_session_pool_private_data *pool_priv; 1906 1907 if (!rte_cryptodev_sym_is_valid_session_pool(mp)) { 1908 CDEV_LOG_ERR("Invalid mempool\n"); 1909 return NULL; 1910 } 1911 1912 pool_priv = rte_mempool_get_priv(mp); 1913 1914 /* Allocate a session structure from the session pool */ 1915 if (rte_mempool_get(mp, (void **)&sess)) { 1916 CDEV_LOG_ERR("couldn't get object from session mempool"); 1917 return NULL; 1918 } 1919 1920 sess->nb_drivers = pool_priv->nb_drivers; 1921 sess->user_data_sz = pool_priv->user_data_sz; 1922 sess->opaque_data = 0; 1923 1924 /* Clear device session pointer. 1925 * Include the flag indicating presence of user data 1926 */ 1927 memset(sess->sess_data, 0, 1928 rte_cryptodev_sym_session_data_size(sess)); 1929 1930 rte_cryptodev_trace_sym_session_create(mp, sess); 1931 return sess; 1932 } 1933 1934 int 1935 rte_cryptodev_asym_session_create(uint8_t dev_id, 1936 struct rte_crypto_asym_xform *xforms, struct rte_mempool *mp, 1937 void **session) 1938 { 1939 struct rte_cryptodev_asym_session *sess; 1940 uint32_t session_priv_data_sz; 1941 struct rte_cryptodev_asym_session_pool_private_data *pool_priv; 1942 unsigned int session_header_size = 1943 rte_cryptodev_asym_get_header_session_size(); 1944 struct rte_cryptodev *dev; 1945 int ret; 1946 1947 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1948 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 1949 return -EINVAL; 1950 } 1951 1952 dev = rte_cryptodev_pmd_get_dev(dev_id); 1953 1954 if (dev == NULL) 1955 return -EINVAL; 1956 1957 if (!mp) { 1958 CDEV_LOG_ERR("invalid mempool\n"); 1959 return -EINVAL; 1960 } 1961 1962 session_priv_data_sz = rte_cryptodev_asym_get_private_session_size( 1963 dev_id); 1964 pool_priv = rte_mempool_get_priv(mp); 1965 1966 if (pool_priv->max_priv_session_sz < session_priv_data_sz) { 1967 CDEV_LOG_DEBUG( 1968 "The private session data size used when creating the mempool is smaller than this device's private session data."); 1969 return -EINVAL; 1970 } 1971 1972 /* Verify if provided mempool can hold elements big enough. */ 1973 if (mp->elt_size < session_header_size + session_priv_data_sz) { 1974 CDEV_LOG_ERR( 1975 "mempool elements too small to hold session objects"); 1976 return -EINVAL; 1977 } 1978 1979 /* Allocate a session structure from the session pool */ 1980 if (rte_mempool_get(mp, session)) { 1981 CDEV_LOG_ERR("couldn't get object from session mempool"); 1982 return -ENOMEM; 1983 } 1984 1985 sess = *session; 1986 sess->driver_id = dev->driver_id; 1987 sess->user_data_sz = pool_priv->user_data_sz; 1988 sess->max_priv_data_sz = pool_priv->max_priv_session_sz; 1989 1990 /* Clear device session pointer.*/ 1991 memset(sess->sess_private_data, 0, session_priv_data_sz + sess->user_data_sz); 1992 1993 if (*dev->dev_ops->asym_session_configure == NULL) 1994 return -ENOTSUP; 1995 1996 if (sess->sess_private_data[0] == 0) { 1997 ret = dev->dev_ops->asym_session_configure(dev, xforms, sess); 1998 if (ret < 0) { 1999 CDEV_LOG_ERR( 2000 "dev_id %d failed to configure session details", 2001 dev_id); 2002 return ret; 2003 } 2004 } 2005 2006 rte_cryptodev_trace_asym_session_create(dev_id, xforms, mp, sess); 2007 return 0; 2008 } 2009 2010 int 2011 rte_cryptodev_sym_session_clear(uint8_t dev_id, 2012 struct rte_cryptodev_sym_session *sess) 2013 { 2014 struct rte_cryptodev *dev; 2015 uint8_t driver_id; 2016 2017 if (!rte_cryptodev_is_valid_dev(dev_id)) { 2018 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 2019 return -EINVAL; 2020 } 2021 2022 dev = rte_cryptodev_pmd_get_dev(dev_id); 2023 2024 if (dev == NULL || sess == NULL) 2025 return -EINVAL; 2026 2027 driver_id = dev->driver_id; 2028 if (sess->sess_data[driver_id].refcnt == 0) 2029 return 0; 2030 if (--sess->sess_data[driver_id].refcnt != 0) 2031 return -EBUSY; 2032 2033 if (*dev->dev_ops->sym_session_clear == NULL) 2034 return -ENOTSUP; 2035 2036 dev->dev_ops->sym_session_clear(dev, sess); 2037 2038 rte_cryptodev_trace_sym_session_clear(dev_id, sess); 2039 return 0; 2040 } 2041 2042 int 2043 rte_cryptodev_sym_session_free(struct rte_cryptodev_sym_session *sess) 2044 { 2045 uint8_t i; 2046 struct rte_mempool *sess_mp; 2047 2048 if (sess == NULL) 2049 return -EINVAL; 2050 2051 /* Check that all device private data has been freed */ 2052 for (i = 0; i < sess->nb_drivers; i++) { 2053 if (sess->sess_data[i].refcnt != 0) 2054 return -EBUSY; 2055 } 2056 2057 /* Return session to mempool */ 2058 sess_mp = rte_mempool_from_obj(sess); 2059 rte_mempool_put(sess_mp, sess); 2060 2061 rte_cryptodev_trace_sym_session_free(sess); 2062 return 0; 2063 } 2064 2065 int 2066 rte_cryptodev_asym_session_free(uint8_t dev_id, void *sess) 2067 { 2068 struct rte_mempool *sess_mp; 2069 struct rte_cryptodev *dev; 2070 2071 if (!rte_cryptodev_is_valid_dev(dev_id)) { 2072 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 2073 return -EINVAL; 2074 } 2075 2076 dev = rte_cryptodev_pmd_get_dev(dev_id); 2077 2078 if (dev == NULL || sess == NULL) 2079 return -EINVAL; 2080 2081 if (*dev->dev_ops->asym_session_clear == NULL) 2082 return -ENOTSUP; 2083 2084 dev->dev_ops->asym_session_clear(dev, sess); 2085 2086 rte_free(((struct rte_cryptodev_asym_session *)sess)->event_mdata); 2087 2088 /* Return session to mempool */ 2089 sess_mp = rte_mempool_from_obj(sess); 2090 rte_mempool_put(sess_mp, sess); 2091 2092 rte_cryptodev_trace_asym_session_free(dev_id, sess); 2093 return 0; 2094 } 2095 2096 unsigned int 2097 rte_cryptodev_sym_get_header_session_size(void) 2098 { 2099 /* 2100 * Header contains pointers to the private data of all registered 2101 * drivers and all necessary information to ensure safely clear 2102 * or free al session. 2103 */ 2104 struct rte_cryptodev_sym_session s = {0}; 2105 2106 s.nb_drivers = nb_drivers; 2107 2108 return (unsigned int)(sizeof(s) + 2109 rte_cryptodev_sym_session_data_size(&s)); 2110 } 2111 2112 unsigned int 2113 rte_cryptodev_sym_get_existing_header_session_size( 2114 struct rte_cryptodev_sym_session *sess) 2115 { 2116 if (!sess) 2117 return 0; 2118 else 2119 return (unsigned int)(sizeof(*sess) + 2120 rte_cryptodev_sym_session_data_size(sess)); 2121 } 2122 2123 unsigned int 2124 rte_cryptodev_asym_get_header_session_size(void) 2125 { 2126 return sizeof(struct rte_cryptodev_asym_session); 2127 } 2128 2129 unsigned int 2130 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id) 2131 { 2132 struct rte_cryptodev *dev; 2133 unsigned int priv_sess_size; 2134 2135 if (!rte_cryptodev_is_valid_dev(dev_id)) 2136 return 0; 2137 2138 dev = rte_cryptodev_pmd_get_dev(dev_id); 2139 2140 if (*dev->dev_ops->sym_session_get_size == NULL) 2141 return 0; 2142 2143 priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev); 2144 2145 return priv_sess_size; 2146 } 2147 2148 unsigned int 2149 rte_cryptodev_asym_get_private_session_size(uint8_t dev_id) 2150 { 2151 struct rte_cryptodev *dev; 2152 unsigned int priv_sess_size; 2153 2154 if (!rte_cryptodev_is_valid_dev(dev_id)) 2155 return 0; 2156 2157 dev = rte_cryptodev_pmd_get_dev(dev_id); 2158 2159 if (*dev->dev_ops->asym_session_get_size == NULL) 2160 return 0; 2161 2162 priv_sess_size = (*dev->dev_ops->asym_session_get_size)(dev); 2163 2164 return priv_sess_size; 2165 } 2166 2167 int 2168 rte_cryptodev_sym_session_set_user_data( 2169 struct rte_cryptodev_sym_session *sess, 2170 void *data, 2171 uint16_t size) 2172 { 2173 if (sess == NULL) 2174 return -EINVAL; 2175 2176 if (sess->user_data_sz < size) 2177 return -ENOMEM; 2178 2179 rte_memcpy(sess->sess_data + sess->nb_drivers, data, size); 2180 return 0; 2181 } 2182 2183 void * 2184 rte_cryptodev_sym_session_get_user_data( 2185 struct rte_cryptodev_sym_session *sess) 2186 { 2187 if (sess == NULL || sess->user_data_sz == 0) 2188 return NULL; 2189 2190 return (void *)(sess->sess_data + sess->nb_drivers); 2191 } 2192 2193 int 2194 rte_cryptodev_asym_session_set_user_data(void *session, void *data, uint16_t size) 2195 { 2196 struct rte_cryptodev_asym_session *sess = session; 2197 if (sess == NULL) 2198 return -EINVAL; 2199 2200 if (sess->user_data_sz < size) 2201 return -ENOMEM; 2202 2203 rte_memcpy(sess->sess_private_data + 2204 sess->max_priv_data_sz, 2205 data, size); 2206 return 0; 2207 } 2208 2209 void * 2210 rte_cryptodev_asym_session_get_user_data(void *session) 2211 { 2212 struct rte_cryptodev_asym_session *sess = session; 2213 if (sess == NULL || sess->user_data_sz == 0) 2214 return NULL; 2215 2216 return (void *)(sess->sess_private_data + 2217 sess->max_priv_data_sz); 2218 } 2219 2220 static inline void 2221 sym_crypto_fill_status(struct rte_crypto_sym_vec *vec, int32_t errnum) 2222 { 2223 uint32_t i; 2224 for (i = 0; i < vec->num; i++) 2225 vec->status[i] = errnum; 2226 } 2227 2228 uint32_t 2229 rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id, 2230 struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs ofs, 2231 struct rte_crypto_sym_vec *vec) 2232 { 2233 struct rte_cryptodev *dev; 2234 2235 if (!rte_cryptodev_is_valid_dev(dev_id)) { 2236 sym_crypto_fill_status(vec, EINVAL); 2237 return 0; 2238 } 2239 2240 dev = rte_cryptodev_pmd_get_dev(dev_id); 2241 2242 if (*dev->dev_ops->sym_cpu_process == NULL || 2243 !(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO)) { 2244 sym_crypto_fill_status(vec, ENOTSUP); 2245 return 0; 2246 } 2247 2248 return dev->dev_ops->sym_cpu_process(dev, sess, ofs, vec); 2249 } 2250 2251 int 2252 rte_cryptodev_get_raw_dp_ctx_size(uint8_t dev_id) 2253 { 2254 struct rte_cryptodev *dev; 2255 int32_t size = sizeof(struct rte_crypto_raw_dp_ctx); 2256 int32_t priv_size; 2257 2258 if (!rte_cryptodev_is_valid_dev(dev_id)) 2259 return -EINVAL; 2260 2261 dev = rte_cryptodev_pmd_get_dev(dev_id); 2262 2263 if (*dev->dev_ops->sym_get_raw_dp_ctx_size == NULL || 2264 !(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP)) { 2265 return -ENOTSUP; 2266 } 2267 2268 priv_size = (*dev->dev_ops->sym_get_raw_dp_ctx_size)(dev); 2269 if (priv_size < 0) 2270 return -ENOTSUP; 2271 2272 return RTE_ALIGN_CEIL((size + priv_size), 8); 2273 } 2274 2275 int 2276 rte_cryptodev_configure_raw_dp_ctx(uint8_t dev_id, uint16_t qp_id, 2277 struct rte_crypto_raw_dp_ctx *ctx, 2278 enum rte_crypto_op_sess_type sess_type, 2279 union rte_cryptodev_session_ctx session_ctx, 2280 uint8_t is_update) 2281 { 2282 struct rte_cryptodev *dev; 2283 2284 if (!rte_cryptodev_get_qp_status(dev_id, qp_id)) 2285 return -EINVAL; 2286 2287 dev = rte_cryptodev_pmd_get_dev(dev_id); 2288 if (!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP) 2289 || dev->dev_ops->sym_configure_raw_dp_ctx == NULL) 2290 return -ENOTSUP; 2291 2292 return (*dev->dev_ops->sym_configure_raw_dp_ctx)(dev, qp_id, ctx, 2293 sess_type, session_ctx, is_update); 2294 } 2295 2296 int 2297 rte_cryptodev_session_event_mdata_set(uint8_t dev_id, void *sess, 2298 enum rte_crypto_op_type op_type, 2299 enum rte_crypto_op_sess_type sess_type, 2300 void *ev_mdata, 2301 uint16_t size) 2302 { 2303 struct rte_cryptodev *dev; 2304 2305 if (sess == NULL || ev_mdata == NULL) 2306 return -EINVAL; 2307 2308 if (!rte_cryptodev_is_valid_dev(dev_id)) 2309 goto skip_pmd_op; 2310 2311 dev = rte_cryptodev_pmd_get_dev(dev_id); 2312 if (dev->dev_ops->session_ev_mdata_set == NULL) 2313 goto skip_pmd_op; 2314 2315 return (*dev->dev_ops->session_ev_mdata_set)(dev, sess, op_type, 2316 sess_type, ev_mdata); 2317 2318 skip_pmd_op: 2319 if (op_type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) 2320 return rte_cryptodev_sym_session_set_user_data(sess, ev_mdata, 2321 size); 2322 else if (op_type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) { 2323 struct rte_cryptodev_asym_session *s = sess; 2324 2325 if (s->event_mdata == NULL) { 2326 s->event_mdata = rte_malloc(NULL, size, 0); 2327 if (s->event_mdata == NULL) 2328 return -ENOMEM; 2329 } 2330 rte_memcpy(s->event_mdata, ev_mdata, size); 2331 2332 return 0; 2333 } else 2334 return -ENOTSUP; 2335 } 2336 2337 uint32_t 2338 rte_cryptodev_raw_enqueue_burst(struct rte_crypto_raw_dp_ctx *ctx, 2339 struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs, 2340 void **user_data, int *enqueue_status) 2341 { 2342 return (*ctx->enqueue_burst)(ctx->qp_data, ctx->drv_ctx_data, vec, 2343 ofs, user_data, enqueue_status); 2344 } 2345 2346 int 2347 rte_cryptodev_raw_enqueue_done(struct rte_crypto_raw_dp_ctx *ctx, 2348 uint32_t n) 2349 { 2350 return (*ctx->enqueue_done)(ctx->qp_data, ctx->drv_ctx_data, n); 2351 } 2352 2353 uint32_t 2354 rte_cryptodev_raw_dequeue_burst(struct rte_crypto_raw_dp_ctx *ctx, 2355 rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count, 2356 uint32_t max_nb_to_dequeue, 2357 rte_cryptodev_raw_post_dequeue_t post_dequeue, 2358 void **out_user_data, uint8_t is_user_data_array, 2359 uint32_t *n_success_jobs, int *status) 2360 { 2361 return (*ctx->dequeue_burst)(ctx->qp_data, ctx->drv_ctx_data, 2362 get_dequeue_count, max_nb_to_dequeue, post_dequeue, 2363 out_user_data, is_user_data_array, n_success_jobs, status); 2364 } 2365 2366 int 2367 rte_cryptodev_raw_dequeue_done(struct rte_crypto_raw_dp_ctx *ctx, 2368 uint32_t n) 2369 { 2370 return (*ctx->dequeue_done)(ctx->qp_data, ctx->drv_ctx_data, n); 2371 } 2372 2373 /** Initialise rte_crypto_op mempool element */ 2374 static void 2375 rte_crypto_op_init(struct rte_mempool *mempool, 2376 void *opaque_arg, 2377 void *_op_data, 2378 __rte_unused unsigned i) 2379 { 2380 struct rte_crypto_op *op = _op_data; 2381 enum rte_crypto_op_type type = *(enum rte_crypto_op_type *)opaque_arg; 2382 2383 memset(_op_data, 0, mempool->elt_size); 2384 2385 __rte_crypto_op_reset(op, type); 2386 2387 op->phys_addr = rte_mem_virt2iova(_op_data); 2388 op->mempool = mempool; 2389 } 2390 2391 2392 struct rte_mempool * 2393 rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type, 2394 unsigned nb_elts, unsigned cache_size, uint16_t priv_size, 2395 int socket_id) 2396 { 2397 struct rte_crypto_op_pool_private *priv; 2398 2399 unsigned elt_size = sizeof(struct rte_crypto_op) + 2400 priv_size; 2401 2402 if (type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) { 2403 elt_size += sizeof(struct rte_crypto_sym_op); 2404 } else if (type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) { 2405 elt_size += sizeof(struct rte_crypto_asym_op); 2406 } else if (type == RTE_CRYPTO_OP_TYPE_UNDEFINED) { 2407 elt_size += RTE_MAX(sizeof(struct rte_crypto_sym_op), 2408 sizeof(struct rte_crypto_asym_op)); 2409 } else { 2410 CDEV_LOG_ERR("Invalid op_type\n"); 2411 return NULL; 2412 } 2413 2414 /* lookup mempool in case already allocated */ 2415 struct rte_mempool *mp = rte_mempool_lookup(name); 2416 2417 if (mp != NULL) { 2418 priv = (struct rte_crypto_op_pool_private *) 2419 rte_mempool_get_priv(mp); 2420 2421 if (mp->elt_size != elt_size || 2422 mp->cache_size < cache_size || 2423 mp->size < nb_elts || 2424 priv->priv_size < priv_size) { 2425 mp = NULL; 2426 CDEV_LOG_ERR("Mempool %s already exists but with " 2427 "incompatible parameters", name); 2428 return NULL; 2429 } 2430 return mp; 2431 } 2432 2433 mp = rte_mempool_create( 2434 name, 2435 nb_elts, 2436 elt_size, 2437 cache_size, 2438 sizeof(struct rte_crypto_op_pool_private), 2439 NULL, 2440 NULL, 2441 rte_crypto_op_init, 2442 &type, 2443 socket_id, 2444 0); 2445 2446 if (mp == NULL) { 2447 CDEV_LOG_ERR("Failed to create mempool %s", name); 2448 return NULL; 2449 } 2450 2451 priv = (struct rte_crypto_op_pool_private *) 2452 rte_mempool_get_priv(mp); 2453 2454 priv->priv_size = priv_size; 2455 priv->type = type; 2456 2457 return mp; 2458 } 2459 2460 int 2461 rte_cryptodev_pmd_create_dev_name(char *name, const char *dev_name_prefix) 2462 { 2463 struct rte_cryptodev *dev = NULL; 2464 uint32_t i = 0; 2465 2466 if (name == NULL) 2467 return -EINVAL; 2468 2469 for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) { 2470 int ret = snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, 2471 "%s_%u", dev_name_prefix, i); 2472 2473 if (ret < 0) 2474 return ret; 2475 2476 dev = rte_cryptodev_pmd_get_named_dev(name); 2477 if (!dev) 2478 return 0; 2479 } 2480 2481 return -1; 2482 } 2483 2484 TAILQ_HEAD(cryptodev_driver_list, cryptodev_driver); 2485 2486 static struct cryptodev_driver_list cryptodev_driver_list = 2487 TAILQ_HEAD_INITIALIZER(cryptodev_driver_list); 2488 2489 int 2490 rte_cryptodev_driver_id_get(const char *name) 2491 { 2492 struct cryptodev_driver *driver; 2493 const char *driver_name; 2494 2495 if (name == NULL) { 2496 RTE_LOG(DEBUG, CRYPTODEV, "name pointer NULL"); 2497 return -1; 2498 } 2499 2500 TAILQ_FOREACH(driver, &cryptodev_driver_list, next) { 2501 driver_name = driver->driver->name; 2502 if (strncmp(driver_name, name, strlen(driver_name) + 1) == 0) 2503 return driver->id; 2504 } 2505 return -1; 2506 } 2507 2508 const char * 2509 rte_cryptodev_name_get(uint8_t dev_id) 2510 { 2511 struct rte_cryptodev *dev; 2512 2513 if (!rte_cryptodev_is_valid_device_data(dev_id)) { 2514 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 2515 return NULL; 2516 } 2517 2518 dev = rte_cryptodev_pmd_get_dev(dev_id); 2519 if (dev == NULL) 2520 return NULL; 2521 2522 return dev->data->name; 2523 } 2524 2525 const char * 2526 rte_cryptodev_driver_name_get(uint8_t driver_id) 2527 { 2528 struct cryptodev_driver *driver; 2529 2530 TAILQ_FOREACH(driver, &cryptodev_driver_list, next) 2531 if (driver->id == driver_id) 2532 return driver->driver->name; 2533 return NULL; 2534 } 2535 2536 uint8_t 2537 rte_cryptodev_allocate_driver(struct cryptodev_driver *crypto_drv, 2538 const struct rte_driver *drv) 2539 { 2540 crypto_drv->driver = drv; 2541 crypto_drv->id = nb_drivers; 2542 2543 TAILQ_INSERT_TAIL(&cryptodev_driver_list, crypto_drv, next); 2544 2545 return nb_drivers++; 2546 } 2547 2548 RTE_INIT(cryptodev_init_fp_ops) 2549 { 2550 uint32_t i; 2551 2552 for (i = 0; i != RTE_DIM(rte_crypto_fp_ops); i++) 2553 cryptodev_fp_ops_reset(rte_crypto_fp_ops + i); 2554 } 2555 2556 static int 2557 cryptodev_handle_dev_list(const char *cmd __rte_unused, 2558 const char *params __rte_unused, 2559 struct rte_tel_data *d) 2560 { 2561 int dev_id; 2562 2563 if (rte_cryptodev_count() < 1) 2564 return -EINVAL; 2565 2566 rte_tel_data_start_array(d, RTE_TEL_INT_VAL); 2567 for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++) 2568 if (rte_cryptodev_is_valid_dev(dev_id)) 2569 rte_tel_data_add_array_int(d, dev_id); 2570 2571 return 0; 2572 } 2573 2574 static int 2575 cryptodev_handle_dev_info(const char *cmd __rte_unused, 2576 const char *params, struct rte_tel_data *d) 2577 { 2578 struct rte_cryptodev_info cryptodev_info; 2579 int dev_id; 2580 char *end_param; 2581 2582 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 2583 return -EINVAL; 2584 2585 dev_id = strtoul(params, &end_param, 0); 2586 if (*end_param != '\0') 2587 CDEV_LOG_ERR("Extra parameters passed to command, ignoring"); 2588 if (!rte_cryptodev_is_valid_dev(dev_id)) 2589 return -EINVAL; 2590 2591 rte_cryptodev_info_get(dev_id, &cryptodev_info); 2592 2593 rte_tel_data_start_dict(d); 2594 rte_tel_data_add_dict_string(d, "device_name", 2595 cryptodev_info.device->name); 2596 rte_tel_data_add_dict_int(d, "max_nb_queue_pairs", 2597 cryptodev_info.max_nb_queue_pairs); 2598 2599 return 0; 2600 } 2601 2602 #define ADD_DICT_STAT(s) rte_tel_data_add_dict_u64(d, #s, cryptodev_stats.s) 2603 2604 static int 2605 cryptodev_handle_dev_stats(const char *cmd __rte_unused, 2606 const char *params, 2607 struct rte_tel_data *d) 2608 { 2609 struct rte_cryptodev_stats cryptodev_stats; 2610 int dev_id, ret; 2611 char *end_param; 2612 2613 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 2614 return -EINVAL; 2615 2616 dev_id = strtoul(params, &end_param, 0); 2617 if (*end_param != '\0') 2618 CDEV_LOG_ERR("Extra parameters passed to command, ignoring"); 2619 if (!rte_cryptodev_is_valid_dev(dev_id)) 2620 return -EINVAL; 2621 2622 ret = rte_cryptodev_stats_get(dev_id, &cryptodev_stats); 2623 if (ret < 0) 2624 return ret; 2625 2626 rte_tel_data_start_dict(d); 2627 ADD_DICT_STAT(enqueued_count); 2628 ADD_DICT_STAT(dequeued_count); 2629 ADD_DICT_STAT(enqueue_err_count); 2630 ADD_DICT_STAT(dequeue_err_count); 2631 2632 return 0; 2633 } 2634 2635 #define CRYPTO_CAPS_SZ \ 2636 (RTE_ALIGN_CEIL(sizeof(struct rte_cryptodev_capabilities), \ 2637 sizeof(uint64_t)) / \ 2638 sizeof(uint64_t)) 2639 2640 static int 2641 crypto_caps_array(struct rte_tel_data *d, 2642 const struct rte_cryptodev_capabilities *capabilities) 2643 { 2644 const struct rte_cryptodev_capabilities *dev_caps; 2645 uint64_t caps_val[CRYPTO_CAPS_SZ]; 2646 unsigned int i = 0, j; 2647 2648 rte_tel_data_start_array(d, RTE_TEL_U64_VAL); 2649 2650 while ((dev_caps = &capabilities[i++])->op != 2651 RTE_CRYPTO_OP_TYPE_UNDEFINED) { 2652 memset(&caps_val, 0, CRYPTO_CAPS_SZ * sizeof(caps_val[0])); 2653 rte_memcpy(caps_val, dev_caps, sizeof(capabilities[0])); 2654 for (j = 0; j < CRYPTO_CAPS_SZ; j++) 2655 rte_tel_data_add_array_u64(d, caps_val[j]); 2656 } 2657 2658 return i; 2659 } 2660 2661 static int 2662 cryptodev_handle_dev_caps(const char *cmd __rte_unused, const char *params, 2663 struct rte_tel_data *d) 2664 { 2665 struct rte_cryptodev_info dev_info; 2666 struct rte_tel_data *crypto_caps; 2667 int crypto_caps_n; 2668 char *end_param; 2669 int dev_id; 2670 2671 if (!params || strlen(params) == 0 || !isdigit(*params)) 2672 return -EINVAL; 2673 2674 dev_id = strtoul(params, &end_param, 0); 2675 if (*end_param != '\0') 2676 CDEV_LOG_ERR("Extra parameters passed to command, ignoring"); 2677 if (!rte_cryptodev_is_valid_dev(dev_id)) 2678 return -EINVAL; 2679 2680 rte_tel_data_start_dict(d); 2681 crypto_caps = rte_tel_data_alloc(); 2682 if (!crypto_caps) 2683 return -ENOMEM; 2684 2685 rte_cryptodev_info_get(dev_id, &dev_info); 2686 crypto_caps_n = crypto_caps_array(crypto_caps, dev_info.capabilities); 2687 rte_tel_data_add_dict_container(d, "crypto_caps", crypto_caps, 0); 2688 rte_tel_data_add_dict_int(d, "crypto_caps_n", crypto_caps_n); 2689 2690 return 0; 2691 } 2692 2693 RTE_INIT(cryptodev_init_telemetry) 2694 { 2695 rte_telemetry_register_cmd("/cryptodev/info", cryptodev_handle_dev_info, 2696 "Returns information for a cryptodev. Parameters: int dev_id"); 2697 rte_telemetry_register_cmd("/cryptodev/list", 2698 cryptodev_handle_dev_list, 2699 "Returns list of available crypto devices by IDs. No parameters."); 2700 rte_telemetry_register_cmd("/cryptodev/stats", 2701 cryptodev_handle_dev_stats, 2702 "Returns the stats for a cryptodev. Parameters: int dev_id"); 2703 rte_telemetry_register_cmd("/cryptodev/caps", 2704 cryptodev_handle_dev_caps, 2705 "Returns the capabilities for a cryptodev. Parameters: int dev_id"); 2706 } 2707