1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2015-2020 Intel Corporation 3 */ 4 5 #include <sys/queue.h> 6 #include <ctype.h> 7 #include <stdio.h> 8 #include <stdlib.h> 9 #include <string.h> 10 #include <errno.h> 11 #include <stdint.h> 12 #include <inttypes.h> 13 14 #include <rte_log.h> 15 #include <rte_debug.h> 16 #include <rte_dev.h> 17 #include <rte_memory.h> 18 #include <rte_memcpy.h> 19 #include <rte_memzone.h> 20 #include <rte_eal.h> 21 #include <rte_common.h> 22 #include <rte_mempool.h> 23 #include <rte_malloc.h> 24 #include <rte_errno.h> 25 #include <rte_spinlock.h> 26 #include <rte_string_fns.h> 27 #include <rte_telemetry.h> 28 29 #include "rte_crypto.h" 30 #include "rte_cryptodev.h" 31 #include "cryptodev_pmd.h" 32 #include "rte_cryptodev_trace.h" 33 34 static uint8_t nb_drivers; 35 36 static struct rte_cryptodev rte_crypto_devices[RTE_CRYPTO_MAX_DEVS]; 37 38 struct rte_cryptodev *rte_cryptodevs = rte_crypto_devices; 39 40 static struct rte_cryptodev_global cryptodev_globals = { 41 .devs = rte_crypto_devices, 42 .data = { NULL }, 43 .nb_devs = 0 44 }; 45 46 /* Public fastpath APIs. */ 47 struct rte_crypto_fp_ops rte_crypto_fp_ops[RTE_CRYPTO_MAX_DEVS]; 48 49 /* spinlock for crypto device callbacks */ 50 static rte_spinlock_t rte_cryptodev_cb_lock = RTE_SPINLOCK_INITIALIZER; 51 52 /** 53 * The user application callback description. 54 * 55 * It contains callback address to be registered by user application, 56 * the pointer to the parameters for callback, and the event type. 57 */ 58 struct rte_cryptodev_callback { 59 TAILQ_ENTRY(rte_cryptodev_callback) next; /**< Callbacks list */ 60 rte_cryptodev_cb_fn cb_fn; /**< Callback address */ 61 void *cb_arg; /**< Parameter for callback */ 62 enum rte_cryptodev_event_type event; /**< Interrupt event type */ 63 uint32_t active; /**< Callback is executing */ 64 }; 65 66 /** 67 * The crypto cipher algorithm strings identifiers. 68 * It could be used in application command line. 69 */ 70 const char * 71 rte_crypto_cipher_algorithm_strings[] = { 72 [RTE_CRYPTO_CIPHER_3DES_CBC] = "3des-cbc", 73 [RTE_CRYPTO_CIPHER_3DES_ECB] = "3des-ecb", 74 [RTE_CRYPTO_CIPHER_3DES_CTR] = "3des-ctr", 75 76 [RTE_CRYPTO_CIPHER_AES_CBC] = "aes-cbc", 77 [RTE_CRYPTO_CIPHER_AES_CTR] = "aes-ctr", 78 [RTE_CRYPTO_CIPHER_AES_DOCSISBPI] = "aes-docsisbpi", 79 [RTE_CRYPTO_CIPHER_AES_ECB] = "aes-ecb", 80 [RTE_CRYPTO_CIPHER_AES_F8] = "aes-f8", 81 [RTE_CRYPTO_CIPHER_AES_XTS] = "aes-xts", 82 83 [RTE_CRYPTO_CIPHER_ARC4] = "arc4", 84 85 [RTE_CRYPTO_CIPHER_DES_CBC] = "des-cbc", 86 [RTE_CRYPTO_CIPHER_DES_DOCSISBPI] = "des-docsisbpi", 87 88 [RTE_CRYPTO_CIPHER_NULL] = "null", 89 90 [RTE_CRYPTO_CIPHER_KASUMI_F8] = "kasumi-f8", 91 [RTE_CRYPTO_CIPHER_SNOW3G_UEA2] = "snow3g-uea2", 92 [RTE_CRYPTO_CIPHER_ZUC_EEA3] = "zuc-eea3" 93 }; 94 95 /** 96 * The crypto cipher operation strings identifiers. 97 * It could be used in application command line. 98 */ 99 const char * 100 rte_crypto_cipher_operation_strings[] = { 101 [RTE_CRYPTO_CIPHER_OP_ENCRYPT] = "encrypt", 102 [RTE_CRYPTO_CIPHER_OP_DECRYPT] = "decrypt" 103 }; 104 105 /** 106 * The crypto auth algorithm strings identifiers. 107 * It could be used in application command line. 108 */ 109 const char * 110 rte_crypto_auth_algorithm_strings[] = { 111 [RTE_CRYPTO_AUTH_AES_CBC_MAC] = "aes-cbc-mac", 112 [RTE_CRYPTO_AUTH_AES_CMAC] = "aes-cmac", 113 [RTE_CRYPTO_AUTH_AES_GMAC] = "aes-gmac", 114 [RTE_CRYPTO_AUTH_AES_XCBC_MAC] = "aes-xcbc-mac", 115 116 [RTE_CRYPTO_AUTH_MD5] = "md5", 117 [RTE_CRYPTO_AUTH_MD5_HMAC] = "md5-hmac", 118 119 [RTE_CRYPTO_AUTH_NULL] = "null", 120 121 [RTE_CRYPTO_AUTH_SHA1] = "sha1", 122 [RTE_CRYPTO_AUTH_SHA1_HMAC] = "sha1-hmac", 123 124 [RTE_CRYPTO_AUTH_SHA224] = "sha2-224", 125 [RTE_CRYPTO_AUTH_SHA224_HMAC] = "sha2-224-hmac", 126 [RTE_CRYPTO_AUTH_SHA256] = "sha2-256", 127 [RTE_CRYPTO_AUTH_SHA256_HMAC] = "sha2-256-hmac", 128 [RTE_CRYPTO_AUTH_SHA384] = "sha2-384", 129 [RTE_CRYPTO_AUTH_SHA384_HMAC] = "sha2-384-hmac", 130 [RTE_CRYPTO_AUTH_SHA512] = "sha2-512", 131 [RTE_CRYPTO_AUTH_SHA512_HMAC] = "sha2-512-hmac", 132 133 [RTE_CRYPTO_AUTH_KASUMI_F9] = "kasumi-f9", 134 [RTE_CRYPTO_AUTH_SNOW3G_UIA2] = "snow3g-uia2", 135 [RTE_CRYPTO_AUTH_ZUC_EIA3] = "zuc-eia3" 136 }; 137 138 /** 139 * The crypto AEAD algorithm strings identifiers. 140 * It could be used in application command line. 141 */ 142 const char * 143 rte_crypto_aead_algorithm_strings[] = { 144 [RTE_CRYPTO_AEAD_AES_CCM] = "aes-ccm", 145 [RTE_CRYPTO_AEAD_AES_GCM] = "aes-gcm", 146 [RTE_CRYPTO_AEAD_CHACHA20_POLY1305] = "chacha20-poly1305" 147 }; 148 149 /** 150 * The crypto AEAD operation strings identifiers. 151 * It could be used in application command line. 152 */ 153 const char * 154 rte_crypto_aead_operation_strings[] = { 155 [RTE_CRYPTO_AEAD_OP_ENCRYPT] = "encrypt", 156 [RTE_CRYPTO_AEAD_OP_DECRYPT] = "decrypt" 157 }; 158 159 /** 160 * Asymmetric crypto transform operation strings identifiers. 161 */ 162 const char *rte_crypto_asym_xform_strings[] = { 163 [RTE_CRYPTO_ASYM_XFORM_NONE] = "none", 164 [RTE_CRYPTO_ASYM_XFORM_RSA] = "rsa", 165 [RTE_CRYPTO_ASYM_XFORM_MODEX] = "modexp", 166 [RTE_CRYPTO_ASYM_XFORM_MODINV] = "modinv", 167 [RTE_CRYPTO_ASYM_XFORM_DH] = "dh", 168 [RTE_CRYPTO_ASYM_XFORM_DSA] = "dsa", 169 [RTE_CRYPTO_ASYM_XFORM_ECDSA] = "ecdsa", 170 [RTE_CRYPTO_ASYM_XFORM_ECPM] = "ecpm", 171 }; 172 173 /** 174 * Asymmetric crypto operation strings identifiers. 175 */ 176 const char *rte_crypto_asym_op_strings[] = { 177 [RTE_CRYPTO_ASYM_OP_ENCRYPT] = "encrypt", 178 [RTE_CRYPTO_ASYM_OP_DECRYPT] = "decrypt", 179 [RTE_CRYPTO_ASYM_OP_SIGN] = "sign", 180 [RTE_CRYPTO_ASYM_OP_VERIFY] = "verify" 181 }; 182 183 /** 184 * Asymmetric crypto key exchange operation strings identifiers. 185 */ 186 const char *rte_crypto_asym_ke_strings[] = { 187 [RTE_CRYPTO_ASYM_KE_PRIV_KEY_GENERATE] = "priv_key_generate", 188 [RTE_CRYPTO_ASYM_KE_PUB_KEY_GENERATE] = "pub_key_generate", 189 [RTE_CRYPTO_ASYM_KE_SHARED_SECRET_COMPUTE] = "sharedsecret_compute" 190 }; 191 192 /** 193 * The private data structure stored in the sym session mempool private data. 194 */ 195 struct rte_cryptodev_sym_session_pool_private_data { 196 uint16_t nb_drivers; 197 /**< number of elements in sess_data array */ 198 uint16_t user_data_sz; 199 /**< session user data will be placed after sess_data */ 200 }; 201 202 /** 203 * The private data structure stored in the asym session mempool private data. 204 */ 205 struct rte_cryptodev_asym_session_pool_private_data { 206 uint16_t max_priv_session_sz; 207 /**< Size of private session data used when creating mempool */ 208 uint16_t user_data_sz; 209 /**< Session user data will be placed after sess_private_data */ 210 }; 211 212 int 213 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum, 214 const char *algo_string) 215 { 216 unsigned int i; 217 218 for (i = 1; i < RTE_DIM(rte_crypto_cipher_algorithm_strings); i++) { 219 if (strcmp(algo_string, rte_crypto_cipher_algorithm_strings[i]) == 0) { 220 *algo_enum = (enum rte_crypto_cipher_algorithm) i; 221 return 0; 222 } 223 } 224 225 /* Invalid string */ 226 return -1; 227 } 228 229 int 230 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum, 231 const char *algo_string) 232 { 233 unsigned int i; 234 235 for (i = 1; i < RTE_DIM(rte_crypto_auth_algorithm_strings); i++) { 236 if (strcmp(algo_string, rte_crypto_auth_algorithm_strings[i]) == 0) { 237 *algo_enum = (enum rte_crypto_auth_algorithm) i; 238 return 0; 239 } 240 } 241 242 /* Invalid string */ 243 return -1; 244 } 245 246 int 247 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum, 248 const char *algo_string) 249 { 250 unsigned int i; 251 252 for (i = 1; i < RTE_DIM(rte_crypto_aead_algorithm_strings); i++) { 253 if (strcmp(algo_string, rte_crypto_aead_algorithm_strings[i]) == 0) { 254 *algo_enum = (enum rte_crypto_aead_algorithm) i; 255 return 0; 256 } 257 } 258 259 /* Invalid string */ 260 return -1; 261 } 262 263 int 264 rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum, 265 const char *xform_string) 266 { 267 unsigned int i; 268 269 for (i = 1; i < RTE_DIM(rte_crypto_asym_xform_strings); i++) { 270 if (strcmp(xform_string, 271 rte_crypto_asym_xform_strings[i]) == 0) { 272 *xform_enum = (enum rte_crypto_asym_xform_type) i; 273 return 0; 274 } 275 } 276 277 /* Invalid string */ 278 return -1; 279 } 280 281 /** 282 * The crypto auth operation strings identifiers. 283 * It could be used in application command line. 284 */ 285 const char * 286 rte_crypto_auth_operation_strings[] = { 287 [RTE_CRYPTO_AUTH_OP_VERIFY] = "verify", 288 [RTE_CRYPTO_AUTH_OP_GENERATE] = "generate" 289 }; 290 291 const struct rte_cryptodev_symmetric_capability * 292 rte_cryptodev_sym_capability_get(uint8_t dev_id, 293 const struct rte_cryptodev_sym_capability_idx *idx) 294 { 295 const struct rte_cryptodev_capabilities *capability; 296 struct rte_cryptodev_info dev_info; 297 int i = 0; 298 299 rte_cryptodev_info_get(dev_id, &dev_info); 300 301 while ((capability = &dev_info.capabilities[i++])->op != 302 RTE_CRYPTO_OP_TYPE_UNDEFINED) { 303 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC) 304 continue; 305 306 if (capability->sym.xform_type != idx->type) 307 continue; 308 309 if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH && 310 capability->sym.auth.algo == idx->algo.auth) 311 return &capability->sym; 312 313 if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER && 314 capability->sym.cipher.algo == idx->algo.cipher) 315 return &capability->sym; 316 317 if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD && 318 capability->sym.aead.algo == idx->algo.aead) 319 return &capability->sym; 320 } 321 322 return NULL; 323 } 324 325 static int 326 param_range_check(uint16_t size, const struct rte_crypto_param_range *range) 327 { 328 unsigned int next_size; 329 330 /* Check lower/upper bounds */ 331 if (size < range->min) 332 return -1; 333 334 if (size > range->max) 335 return -1; 336 337 /* If range is actually only one value, size is correct */ 338 if (range->increment == 0) 339 return 0; 340 341 /* Check if value is one of the supported sizes */ 342 for (next_size = range->min; next_size <= range->max; 343 next_size += range->increment) 344 if (size == next_size) 345 return 0; 346 347 return -1; 348 } 349 350 const struct rte_cryptodev_asymmetric_xform_capability * 351 rte_cryptodev_asym_capability_get(uint8_t dev_id, 352 const struct rte_cryptodev_asym_capability_idx *idx) 353 { 354 const struct rte_cryptodev_capabilities *capability; 355 struct rte_cryptodev_info dev_info; 356 unsigned int i = 0; 357 358 memset(&dev_info, 0, sizeof(struct rte_cryptodev_info)); 359 rte_cryptodev_info_get(dev_id, &dev_info); 360 361 while ((capability = &dev_info.capabilities[i++])->op != 362 RTE_CRYPTO_OP_TYPE_UNDEFINED) { 363 if (capability->op != RTE_CRYPTO_OP_TYPE_ASYMMETRIC) 364 continue; 365 366 if (capability->asym.xform_capa.xform_type == idx->type) 367 return &capability->asym.xform_capa; 368 } 369 return NULL; 370 }; 371 372 int 373 rte_cryptodev_sym_capability_check_cipher( 374 const struct rte_cryptodev_symmetric_capability *capability, 375 uint16_t key_size, uint16_t iv_size) 376 { 377 if (param_range_check(key_size, &capability->cipher.key_size) != 0) 378 return -1; 379 380 if (param_range_check(iv_size, &capability->cipher.iv_size) != 0) 381 return -1; 382 383 return 0; 384 } 385 386 int 387 rte_cryptodev_sym_capability_check_auth( 388 const struct rte_cryptodev_symmetric_capability *capability, 389 uint16_t key_size, uint16_t digest_size, uint16_t iv_size) 390 { 391 if (param_range_check(key_size, &capability->auth.key_size) != 0) 392 return -1; 393 394 if (param_range_check(digest_size, &capability->auth.digest_size) != 0) 395 return -1; 396 397 if (param_range_check(iv_size, &capability->auth.iv_size) != 0) 398 return -1; 399 400 return 0; 401 } 402 403 int 404 rte_cryptodev_sym_capability_check_aead( 405 const struct rte_cryptodev_symmetric_capability *capability, 406 uint16_t key_size, uint16_t digest_size, uint16_t aad_size, 407 uint16_t iv_size) 408 { 409 if (param_range_check(key_size, &capability->aead.key_size) != 0) 410 return -1; 411 412 if (param_range_check(digest_size, &capability->aead.digest_size) != 0) 413 return -1; 414 415 if (param_range_check(aad_size, &capability->aead.aad_size) != 0) 416 return -1; 417 418 if (param_range_check(iv_size, &capability->aead.iv_size) != 0) 419 return -1; 420 421 return 0; 422 } 423 int 424 rte_cryptodev_asym_xform_capability_check_optype( 425 const struct rte_cryptodev_asymmetric_xform_capability *capability, 426 enum rte_crypto_asym_op_type op_type) 427 { 428 if (capability->op_types & (1 << op_type)) 429 return 1; 430 431 return 0; 432 } 433 434 int 435 rte_cryptodev_asym_xform_capability_check_modlen( 436 const struct rte_cryptodev_asymmetric_xform_capability *capability, 437 uint16_t modlen) 438 { 439 /* no need to check for limits, if min or max = 0 */ 440 if (capability->modlen.min != 0) { 441 if (modlen < capability->modlen.min) 442 return -1; 443 } 444 445 if (capability->modlen.max != 0) { 446 if (modlen > capability->modlen.max) 447 return -1; 448 } 449 450 /* in any case, check if given modlen is module increment */ 451 if (capability->modlen.increment != 0) { 452 if (modlen % (capability->modlen.increment)) 453 return -1; 454 } 455 456 return 0; 457 } 458 459 /* spinlock for crypto device enq callbacks */ 460 static rte_spinlock_t rte_cryptodev_callback_lock = RTE_SPINLOCK_INITIALIZER; 461 462 static void 463 cryptodev_cb_cleanup(struct rte_cryptodev *dev) 464 { 465 struct rte_cryptodev_cb_rcu *list; 466 struct rte_cryptodev_cb *cb, *next; 467 uint16_t qp_id; 468 469 if (dev->enq_cbs == NULL && dev->deq_cbs == NULL) 470 return; 471 472 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { 473 list = &dev->enq_cbs[qp_id]; 474 cb = list->next; 475 while (cb != NULL) { 476 next = cb->next; 477 rte_free(cb); 478 cb = next; 479 } 480 481 rte_free(list->qsbr); 482 } 483 484 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { 485 list = &dev->deq_cbs[qp_id]; 486 cb = list->next; 487 while (cb != NULL) { 488 next = cb->next; 489 rte_free(cb); 490 cb = next; 491 } 492 493 rte_free(list->qsbr); 494 } 495 496 rte_free(dev->enq_cbs); 497 dev->enq_cbs = NULL; 498 rte_free(dev->deq_cbs); 499 dev->deq_cbs = NULL; 500 } 501 502 static int 503 cryptodev_cb_init(struct rte_cryptodev *dev) 504 { 505 struct rte_cryptodev_cb_rcu *list; 506 struct rte_rcu_qsbr *qsbr; 507 uint16_t qp_id; 508 size_t size; 509 510 /* Max thread set to 1, as one DP thread accessing a queue-pair */ 511 const uint32_t max_threads = 1; 512 513 dev->enq_cbs = rte_zmalloc(NULL, 514 sizeof(struct rte_cryptodev_cb_rcu) * 515 dev->data->nb_queue_pairs, 0); 516 if (dev->enq_cbs == NULL) { 517 CDEV_LOG_ERR("Failed to allocate memory for enq callbacks"); 518 return -ENOMEM; 519 } 520 521 dev->deq_cbs = rte_zmalloc(NULL, 522 sizeof(struct rte_cryptodev_cb_rcu) * 523 dev->data->nb_queue_pairs, 0); 524 if (dev->deq_cbs == NULL) { 525 CDEV_LOG_ERR("Failed to allocate memory for deq callbacks"); 526 rte_free(dev->enq_cbs); 527 return -ENOMEM; 528 } 529 530 /* Create RCU QSBR variable */ 531 size = rte_rcu_qsbr_get_memsize(max_threads); 532 533 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { 534 list = &dev->enq_cbs[qp_id]; 535 qsbr = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE); 536 if (qsbr == NULL) { 537 CDEV_LOG_ERR("Failed to allocate memory for RCU on " 538 "queue_pair_id=%d", qp_id); 539 goto cb_init_err; 540 } 541 542 if (rte_rcu_qsbr_init(qsbr, max_threads)) { 543 CDEV_LOG_ERR("Failed to initialize for RCU on " 544 "queue_pair_id=%d", qp_id); 545 goto cb_init_err; 546 } 547 548 list->qsbr = qsbr; 549 } 550 551 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { 552 list = &dev->deq_cbs[qp_id]; 553 qsbr = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE); 554 if (qsbr == NULL) { 555 CDEV_LOG_ERR("Failed to allocate memory for RCU on " 556 "queue_pair_id=%d", qp_id); 557 goto cb_init_err; 558 } 559 560 if (rte_rcu_qsbr_init(qsbr, max_threads)) { 561 CDEV_LOG_ERR("Failed to initialize for RCU on " 562 "queue_pair_id=%d", qp_id); 563 goto cb_init_err; 564 } 565 566 list->qsbr = qsbr; 567 } 568 569 return 0; 570 571 cb_init_err: 572 cryptodev_cb_cleanup(dev); 573 return -ENOMEM; 574 } 575 576 const char * 577 rte_cryptodev_get_feature_name(uint64_t flag) 578 { 579 switch (flag) { 580 case RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO: 581 return "SYMMETRIC_CRYPTO"; 582 case RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO: 583 return "ASYMMETRIC_CRYPTO"; 584 case RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING: 585 return "SYM_OPERATION_CHAINING"; 586 case RTE_CRYPTODEV_FF_CPU_SSE: 587 return "CPU_SSE"; 588 case RTE_CRYPTODEV_FF_CPU_AVX: 589 return "CPU_AVX"; 590 case RTE_CRYPTODEV_FF_CPU_AVX2: 591 return "CPU_AVX2"; 592 case RTE_CRYPTODEV_FF_CPU_AVX512: 593 return "CPU_AVX512"; 594 case RTE_CRYPTODEV_FF_CPU_AESNI: 595 return "CPU_AESNI"; 596 case RTE_CRYPTODEV_FF_HW_ACCELERATED: 597 return "HW_ACCELERATED"; 598 case RTE_CRYPTODEV_FF_IN_PLACE_SGL: 599 return "IN_PLACE_SGL"; 600 case RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT: 601 return "OOP_SGL_IN_SGL_OUT"; 602 case RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT: 603 return "OOP_SGL_IN_LB_OUT"; 604 case RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT: 605 return "OOP_LB_IN_SGL_OUT"; 606 case RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT: 607 return "OOP_LB_IN_LB_OUT"; 608 case RTE_CRYPTODEV_FF_CPU_NEON: 609 return "CPU_NEON"; 610 case RTE_CRYPTODEV_FF_CPU_ARM_CE: 611 return "CPU_ARM_CE"; 612 case RTE_CRYPTODEV_FF_SECURITY: 613 return "SECURITY_PROTOCOL"; 614 case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP: 615 return "RSA_PRIV_OP_KEY_EXP"; 616 case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT: 617 return "RSA_PRIV_OP_KEY_QT"; 618 case RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED: 619 return "DIGEST_ENCRYPTED"; 620 case RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO: 621 return "SYM_CPU_CRYPTO"; 622 case RTE_CRYPTODEV_FF_ASYM_SESSIONLESS: 623 return "ASYM_SESSIONLESS"; 624 case RTE_CRYPTODEV_FF_SYM_SESSIONLESS: 625 return "SYM_SESSIONLESS"; 626 case RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA: 627 return "NON_BYTE_ALIGNED_DATA"; 628 case RTE_CRYPTODEV_FF_CIPHER_MULTIPLE_DATA_UNITS: 629 return "CIPHER_MULTIPLE_DATA_UNITS"; 630 case RTE_CRYPTODEV_FF_CIPHER_WRAPPED_KEY: 631 return "CIPHER_WRAPPED_KEY"; 632 default: 633 return NULL; 634 } 635 } 636 637 struct rte_cryptodev * 638 rte_cryptodev_pmd_get_dev(uint8_t dev_id) 639 { 640 return &cryptodev_globals.devs[dev_id]; 641 } 642 643 struct rte_cryptodev * 644 rte_cryptodev_pmd_get_named_dev(const char *name) 645 { 646 struct rte_cryptodev *dev; 647 unsigned int i; 648 649 if (name == NULL) 650 return NULL; 651 652 for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) { 653 dev = &cryptodev_globals.devs[i]; 654 655 if ((dev->attached == RTE_CRYPTODEV_ATTACHED) && 656 (strcmp(dev->data->name, name) == 0)) 657 return dev; 658 } 659 660 return NULL; 661 } 662 663 static inline uint8_t 664 rte_cryptodev_is_valid_device_data(uint8_t dev_id) 665 { 666 if (dev_id >= RTE_CRYPTO_MAX_DEVS || 667 rte_crypto_devices[dev_id].data == NULL) 668 return 0; 669 670 return 1; 671 } 672 673 unsigned int 674 rte_cryptodev_is_valid_dev(uint8_t dev_id) 675 { 676 struct rte_cryptodev *dev = NULL; 677 678 if (!rte_cryptodev_is_valid_device_data(dev_id)) 679 return 0; 680 681 dev = rte_cryptodev_pmd_get_dev(dev_id); 682 if (dev->attached != RTE_CRYPTODEV_ATTACHED) 683 return 0; 684 else 685 return 1; 686 } 687 688 689 int 690 rte_cryptodev_get_dev_id(const char *name) 691 { 692 unsigned i; 693 694 if (name == NULL) 695 return -1; 696 697 for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) { 698 if (!rte_cryptodev_is_valid_device_data(i)) 699 continue; 700 if ((strcmp(cryptodev_globals.devs[i].data->name, name) 701 == 0) && 702 (cryptodev_globals.devs[i].attached == 703 RTE_CRYPTODEV_ATTACHED)) 704 return i; 705 } 706 707 return -1; 708 } 709 710 uint8_t 711 rte_cryptodev_count(void) 712 { 713 return cryptodev_globals.nb_devs; 714 } 715 716 uint8_t 717 rte_cryptodev_device_count_by_driver(uint8_t driver_id) 718 { 719 uint8_t i, dev_count = 0; 720 721 for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) 722 if (cryptodev_globals.devs[i].driver_id == driver_id && 723 cryptodev_globals.devs[i].attached == 724 RTE_CRYPTODEV_ATTACHED) 725 dev_count++; 726 727 return dev_count; 728 } 729 730 uint8_t 731 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices, 732 uint8_t nb_devices) 733 { 734 uint8_t i, count = 0; 735 struct rte_cryptodev *devs = cryptodev_globals.devs; 736 737 for (i = 0; i < RTE_CRYPTO_MAX_DEVS && count < nb_devices; i++) { 738 if (!rte_cryptodev_is_valid_device_data(i)) 739 continue; 740 741 if (devs[i].attached == RTE_CRYPTODEV_ATTACHED) { 742 int cmp; 743 744 cmp = strncmp(devs[i].device->driver->name, 745 driver_name, 746 strlen(driver_name) + 1); 747 748 if (cmp == 0) 749 devices[count++] = devs[i].data->dev_id; 750 } 751 } 752 753 return count; 754 } 755 756 void * 757 rte_cryptodev_get_sec_ctx(uint8_t dev_id) 758 { 759 if (dev_id < RTE_CRYPTO_MAX_DEVS && 760 (rte_crypto_devices[dev_id].feature_flags & 761 RTE_CRYPTODEV_FF_SECURITY)) 762 return rte_crypto_devices[dev_id].security_ctx; 763 764 return NULL; 765 } 766 767 int 768 rte_cryptodev_socket_id(uint8_t dev_id) 769 { 770 struct rte_cryptodev *dev; 771 772 if (!rte_cryptodev_is_valid_dev(dev_id)) 773 return -1; 774 775 dev = rte_cryptodev_pmd_get_dev(dev_id); 776 777 return dev->data->socket_id; 778 } 779 780 static inline int 781 rte_cryptodev_data_alloc(uint8_t dev_id, struct rte_cryptodev_data **data, 782 int socket_id) 783 { 784 char mz_name[RTE_MEMZONE_NAMESIZE]; 785 const struct rte_memzone *mz; 786 int n; 787 788 /* generate memzone name */ 789 n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id); 790 if (n >= (int)sizeof(mz_name)) 791 return -EINVAL; 792 793 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 794 mz = rte_memzone_reserve(mz_name, 795 sizeof(struct rte_cryptodev_data), 796 socket_id, 0); 797 CDEV_LOG_DEBUG("PRIMARY:reserved memzone for %s (%p)", 798 mz_name, mz); 799 } else { 800 mz = rte_memzone_lookup(mz_name); 801 CDEV_LOG_DEBUG("SECONDARY:looked up memzone for %s (%p)", 802 mz_name, mz); 803 } 804 805 if (mz == NULL) 806 return -ENOMEM; 807 808 *data = mz->addr; 809 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 810 memset(*data, 0, sizeof(struct rte_cryptodev_data)); 811 812 return 0; 813 } 814 815 static inline int 816 rte_cryptodev_data_free(uint8_t dev_id, struct rte_cryptodev_data **data) 817 { 818 char mz_name[RTE_MEMZONE_NAMESIZE]; 819 const struct rte_memzone *mz; 820 int n; 821 822 /* generate memzone name */ 823 n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id); 824 if (n >= (int)sizeof(mz_name)) 825 return -EINVAL; 826 827 mz = rte_memzone_lookup(mz_name); 828 if (mz == NULL) 829 return -ENOMEM; 830 831 RTE_ASSERT(*data == mz->addr); 832 *data = NULL; 833 834 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 835 CDEV_LOG_DEBUG("PRIMARY:free memzone of %s (%p)", 836 mz_name, mz); 837 return rte_memzone_free(mz); 838 } else { 839 CDEV_LOG_DEBUG("SECONDARY:don't free memzone of %s (%p)", 840 mz_name, mz); 841 } 842 843 return 0; 844 } 845 846 static uint8_t 847 rte_cryptodev_find_free_device_index(void) 848 { 849 uint8_t dev_id; 850 851 for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++) { 852 if (rte_crypto_devices[dev_id].attached == 853 RTE_CRYPTODEV_DETACHED) 854 return dev_id; 855 } 856 return RTE_CRYPTO_MAX_DEVS; 857 } 858 859 struct rte_cryptodev * 860 rte_cryptodev_pmd_allocate(const char *name, int socket_id) 861 { 862 struct rte_cryptodev *cryptodev; 863 uint8_t dev_id; 864 865 if (rte_cryptodev_pmd_get_named_dev(name) != NULL) { 866 CDEV_LOG_ERR("Crypto device with name %s already " 867 "allocated!", name); 868 return NULL; 869 } 870 871 dev_id = rte_cryptodev_find_free_device_index(); 872 if (dev_id == RTE_CRYPTO_MAX_DEVS) { 873 CDEV_LOG_ERR("Reached maximum number of crypto devices"); 874 return NULL; 875 } 876 877 cryptodev = rte_cryptodev_pmd_get_dev(dev_id); 878 879 if (cryptodev->data == NULL) { 880 struct rte_cryptodev_data **cryptodev_data = 881 &cryptodev_globals.data[dev_id]; 882 883 int retval = rte_cryptodev_data_alloc(dev_id, cryptodev_data, 884 socket_id); 885 886 if (retval < 0 || *cryptodev_data == NULL) 887 return NULL; 888 889 cryptodev->data = *cryptodev_data; 890 891 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 892 strlcpy(cryptodev->data->name, name, 893 RTE_CRYPTODEV_NAME_MAX_LEN); 894 895 cryptodev->data->dev_id = dev_id; 896 cryptodev->data->socket_id = socket_id; 897 cryptodev->data->dev_started = 0; 898 CDEV_LOG_DEBUG("PRIMARY:init data"); 899 } 900 901 CDEV_LOG_DEBUG("Data for %s: dev_id %d, socket %d, started %d", 902 cryptodev->data->name, 903 cryptodev->data->dev_id, 904 cryptodev->data->socket_id, 905 cryptodev->data->dev_started); 906 907 /* init user callbacks */ 908 TAILQ_INIT(&(cryptodev->link_intr_cbs)); 909 910 cryptodev->attached = RTE_CRYPTODEV_ATTACHED; 911 912 cryptodev_globals.nb_devs++; 913 } 914 915 return cryptodev; 916 } 917 918 int 919 rte_cryptodev_pmd_release_device(struct rte_cryptodev *cryptodev) 920 { 921 int ret; 922 uint8_t dev_id; 923 924 if (cryptodev == NULL) 925 return -EINVAL; 926 927 dev_id = cryptodev->data->dev_id; 928 929 cryptodev_fp_ops_reset(rte_crypto_fp_ops + dev_id); 930 931 /* Close device only if device operations have been set */ 932 if (cryptodev->dev_ops) { 933 ret = rte_cryptodev_close(dev_id); 934 if (ret < 0) 935 return ret; 936 } 937 938 ret = rte_cryptodev_data_free(dev_id, &cryptodev_globals.data[dev_id]); 939 if (ret < 0) 940 return ret; 941 942 cryptodev->attached = RTE_CRYPTODEV_DETACHED; 943 cryptodev_globals.nb_devs--; 944 return 0; 945 } 946 947 uint16_t 948 rte_cryptodev_queue_pair_count(uint8_t dev_id) 949 { 950 struct rte_cryptodev *dev; 951 952 if (!rte_cryptodev_is_valid_device_data(dev_id)) { 953 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 954 return 0; 955 } 956 957 dev = &rte_crypto_devices[dev_id]; 958 return dev->data->nb_queue_pairs; 959 } 960 961 static int 962 rte_cryptodev_queue_pairs_config(struct rte_cryptodev *dev, uint16_t nb_qpairs, 963 int socket_id) 964 { 965 struct rte_cryptodev_info dev_info; 966 void **qp; 967 unsigned i; 968 969 if ((dev == NULL) || (nb_qpairs < 1)) { 970 CDEV_LOG_ERR("invalid param: dev %p, nb_queues %u", 971 dev, nb_qpairs); 972 return -EINVAL; 973 } 974 975 CDEV_LOG_DEBUG("Setup %d queues pairs on device %u", 976 nb_qpairs, dev->data->dev_id); 977 978 memset(&dev_info, 0, sizeof(struct rte_cryptodev_info)); 979 980 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP); 981 (*dev->dev_ops->dev_infos_get)(dev, &dev_info); 982 983 if (nb_qpairs > (dev_info.max_nb_queue_pairs)) { 984 CDEV_LOG_ERR("Invalid num queue_pairs (%u) for dev %u", 985 nb_qpairs, dev->data->dev_id); 986 return -EINVAL; 987 } 988 989 if (dev->data->queue_pairs == NULL) { /* first time configuration */ 990 dev->data->queue_pairs = rte_zmalloc_socket( 991 "cryptodev->queue_pairs", 992 sizeof(dev->data->queue_pairs[0]) * 993 dev_info.max_nb_queue_pairs, 994 RTE_CACHE_LINE_SIZE, socket_id); 995 996 if (dev->data->queue_pairs == NULL) { 997 dev->data->nb_queue_pairs = 0; 998 CDEV_LOG_ERR("failed to get memory for qp meta data, " 999 "nb_queues %u", 1000 nb_qpairs); 1001 return -(ENOMEM); 1002 } 1003 } else { /* re-configure */ 1004 int ret; 1005 uint16_t old_nb_queues = dev->data->nb_queue_pairs; 1006 1007 qp = dev->data->queue_pairs; 1008 1009 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_release, 1010 -ENOTSUP); 1011 1012 for (i = nb_qpairs; i < old_nb_queues; i++) { 1013 ret = (*dev->dev_ops->queue_pair_release)(dev, i); 1014 if (ret < 0) 1015 return ret; 1016 qp[i] = NULL; 1017 } 1018 1019 } 1020 dev->data->nb_queue_pairs = nb_qpairs; 1021 return 0; 1022 } 1023 1024 int 1025 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config) 1026 { 1027 struct rte_cryptodev *dev; 1028 int diag; 1029 1030 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1031 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 1032 return -EINVAL; 1033 } 1034 1035 dev = &rte_crypto_devices[dev_id]; 1036 1037 if (dev->data->dev_started) { 1038 CDEV_LOG_ERR( 1039 "device %d must be stopped to allow configuration", dev_id); 1040 return -EBUSY; 1041 } 1042 1043 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP); 1044 1045 rte_spinlock_lock(&rte_cryptodev_callback_lock); 1046 cryptodev_cb_cleanup(dev); 1047 rte_spinlock_unlock(&rte_cryptodev_callback_lock); 1048 1049 /* Setup new number of queue pairs and reconfigure device. */ 1050 diag = rte_cryptodev_queue_pairs_config(dev, config->nb_queue_pairs, 1051 config->socket_id); 1052 if (diag != 0) { 1053 CDEV_LOG_ERR("dev%d rte_crypto_dev_queue_pairs_config = %d", 1054 dev_id, diag); 1055 return diag; 1056 } 1057 1058 rte_spinlock_lock(&rte_cryptodev_callback_lock); 1059 diag = cryptodev_cb_init(dev); 1060 rte_spinlock_unlock(&rte_cryptodev_callback_lock); 1061 if (diag) { 1062 CDEV_LOG_ERR("Callback init failed for dev_id=%d", dev_id); 1063 return diag; 1064 } 1065 1066 rte_cryptodev_trace_configure(dev_id, config); 1067 return (*dev->dev_ops->dev_configure)(dev, config); 1068 } 1069 1070 int 1071 rte_cryptodev_start(uint8_t dev_id) 1072 { 1073 struct rte_cryptodev *dev; 1074 int diag; 1075 1076 CDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id); 1077 1078 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1079 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 1080 return -EINVAL; 1081 } 1082 1083 dev = &rte_crypto_devices[dev_id]; 1084 1085 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP); 1086 1087 if (dev->data->dev_started != 0) { 1088 CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already started", 1089 dev_id); 1090 return 0; 1091 } 1092 1093 diag = (*dev->dev_ops->dev_start)(dev); 1094 /* expose selection of PMD fast-path functions */ 1095 cryptodev_fp_ops_set(rte_crypto_fp_ops + dev_id, dev); 1096 1097 rte_cryptodev_trace_start(dev_id, diag); 1098 if (diag == 0) 1099 dev->data->dev_started = 1; 1100 else 1101 return diag; 1102 1103 return 0; 1104 } 1105 1106 void 1107 rte_cryptodev_stop(uint8_t dev_id) 1108 { 1109 struct rte_cryptodev *dev; 1110 1111 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1112 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 1113 return; 1114 } 1115 1116 dev = &rte_crypto_devices[dev_id]; 1117 1118 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop); 1119 1120 if (dev->data->dev_started == 0) { 1121 CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already stopped", 1122 dev_id); 1123 return; 1124 } 1125 1126 /* point fast-path functions to dummy ones */ 1127 cryptodev_fp_ops_reset(rte_crypto_fp_ops + dev_id); 1128 1129 (*dev->dev_ops->dev_stop)(dev); 1130 rte_cryptodev_trace_stop(dev_id); 1131 dev->data->dev_started = 0; 1132 } 1133 1134 int 1135 rte_cryptodev_close(uint8_t dev_id) 1136 { 1137 struct rte_cryptodev *dev; 1138 int retval; 1139 1140 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1141 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 1142 return -1; 1143 } 1144 1145 dev = &rte_crypto_devices[dev_id]; 1146 1147 /* Device must be stopped before it can be closed */ 1148 if (dev->data->dev_started == 1) { 1149 CDEV_LOG_ERR("Device %u must be stopped before closing", 1150 dev_id); 1151 return -EBUSY; 1152 } 1153 1154 /* We can't close the device if there are outstanding sessions in use */ 1155 if (dev->data->session_pool != NULL) { 1156 if (!rte_mempool_full(dev->data->session_pool)) { 1157 CDEV_LOG_ERR("dev_id=%u close failed, session mempool " 1158 "has sessions still in use, free " 1159 "all sessions before calling close", 1160 (unsigned)dev_id); 1161 return -EBUSY; 1162 } 1163 } 1164 1165 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP); 1166 retval = (*dev->dev_ops->dev_close)(dev); 1167 rte_cryptodev_trace_close(dev_id, retval); 1168 1169 if (retval < 0) 1170 return retval; 1171 1172 return 0; 1173 } 1174 1175 int 1176 rte_cryptodev_get_qp_status(uint8_t dev_id, uint16_t queue_pair_id) 1177 { 1178 struct rte_cryptodev *dev; 1179 1180 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1181 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 1182 return -EINVAL; 1183 } 1184 1185 dev = &rte_crypto_devices[dev_id]; 1186 if (queue_pair_id >= dev->data->nb_queue_pairs) { 1187 CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id); 1188 return -EINVAL; 1189 } 1190 void **qps = dev->data->queue_pairs; 1191 1192 if (qps[queue_pair_id]) { 1193 CDEV_LOG_DEBUG("qp %d on dev %d is initialised", 1194 queue_pair_id, dev_id); 1195 return 1; 1196 } 1197 1198 CDEV_LOG_DEBUG("qp %d on dev %d is not initialised", 1199 queue_pair_id, dev_id); 1200 1201 return 0; 1202 } 1203 1204 int 1205 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id, 1206 const struct rte_cryptodev_qp_conf *qp_conf, int socket_id) 1207 1208 { 1209 struct rte_cryptodev *dev; 1210 1211 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1212 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 1213 return -EINVAL; 1214 } 1215 1216 dev = &rte_crypto_devices[dev_id]; 1217 if (queue_pair_id >= dev->data->nb_queue_pairs) { 1218 CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id); 1219 return -EINVAL; 1220 } 1221 1222 if (!qp_conf) { 1223 CDEV_LOG_ERR("qp_conf cannot be NULL\n"); 1224 return -EINVAL; 1225 } 1226 1227 if ((qp_conf->mp_session && !qp_conf->mp_session_private) || 1228 (!qp_conf->mp_session && qp_conf->mp_session_private)) { 1229 CDEV_LOG_ERR("Invalid mempools\n"); 1230 return -EINVAL; 1231 } 1232 1233 if (qp_conf->mp_session) { 1234 struct rte_cryptodev_sym_session_pool_private_data *pool_priv; 1235 uint32_t obj_size = qp_conf->mp_session->elt_size; 1236 uint32_t obj_priv_size = qp_conf->mp_session_private->elt_size; 1237 struct rte_cryptodev_sym_session s = {0}; 1238 1239 pool_priv = rte_mempool_get_priv(qp_conf->mp_session); 1240 if (!pool_priv || qp_conf->mp_session->private_data_size < 1241 sizeof(*pool_priv)) { 1242 CDEV_LOG_ERR("Invalid mempool\n"); 1243 return -EINVAL; 1244 } 1245 1246 s.nb_drivers = pool_priv->nb_drivers; 1247 s.user_data_sz = pool_priv->user_data_sz; 1248 1249 if ((rte_cryptodev_sym_get_existing_header_session_size(&s) > 1250 obj_size) || (s.nb_drivers <= dev->driver_id) || 1251 rte_cryptodev_sym_get_private_session_size(dev_id) > 1252 obj_priv_size) { 1253 CDEV_LOG_ERR("Invalid mempool\n"); 1254 return -EINVAL; 1255 } 1256 } 1257 1258 if (dev->data->dev_started) { 1259 CDEV_LOG_ERR( 1260 "device %d must be stopped to allow configuration", dev_id); 1261 return -EBUSY; 1262 } 1263 1264 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_setup, -ENOTSUP); 1265 1266 rte_cryptodev_trace_queue_pair_setup(dev_id, queue_pair_id, qp_conf); 1267 return (*dev->dev_ops->queue_pair_setup)(dev, queue_pair_id, qp_conf, 1268 socket_id); 1269 } 1270 1271 struct rte_cryptodev_cb * 1272 rte_cryptodev_add_enq_callback(uint8_t dev_id, 1273 uint16_t qp_id, 1274 rte_cryptodev_callback_fn cb_fn, 1275 void *cb_arg) 1276 { 1277 struct rte_cryptodev *dev; 1278 struct rte_cryptodev_cb_rcu *list; 1279 struct rte_cryptodev_cb *cb, *tail; 1280 1281 if (!cb_fn) { 1282 CDEV_LOG_ERR("Callback is NULL on dev_id=%d", dev_id); 1283 rte_errno = EINVAL; 1284 return NULL; 1285 } 1286 1287 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1288 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id); 1289 rte_errno = ENODEV; 1290 return NULL; 1291 } 1292 1293 dev = &rte_crypto_devices[dev_id]; 1294 if (qp_id >= dev->data->nb_queue_pairs) { 1295 CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id); 1296 rte_errno = ENODEV; 1297 return NULL; 1298 } 1299 1300 cb = rte_zmalloc(NULL, sizeof(*cb), 0); 1301 if (cb == NULL) { 1302 CDEV_LOG_ERR("Failed to allocate memory for callback on " 1303 "dev=%d, queue_pair_id=%d", dev_id, qp_id); 1304 rte_errno = ENOMEM; 1305 return NULL; 1306 } 1307 1308 rte_spinlock_lock(&rte_cryptodev_callback_lock); 1309 1310 cb->fn = cb_fn; 1311 cb->arg = cb_arg; 1312 1313 /* Add the callbacks in fifo order. */ 1314 list = &dev->enq_cbs[qp_id]; 1315 tail = list->next; 1316 1317 if (tail) { 1318 while (tail->next) 1319 tail = tail->next; 1320 /* Stores to cb->fn and cb->param should complete before 1321 * cb is visible to data plane. 1322 */ 1323 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE); 1324 } else { 1325 /* Stores to cb->fn and cb->param should complete before 1326 * cb is visible to data plane. 1327 */ 1328 __atomic_store_n(&list->next, cb, __ATOMIC_RELEASE); 1329 } 1330 1331 rte_spinlock_unlock(&rte_cryptodev_callback_lock); 1332 1333 return cb; 1334 } 1335 1336 int 1337 rte_cryptodev_remove_enq_callback(uint8_t dev_id, 1338 uint16_t qp_id, 1339 struct rte_cryptodev_cb *cb) 1340 { 1341 struct rte_cryptodev *dev; 1342 struct rte_cryptodev_cb **prev_cb, *curr_cb; 1343 struct rte_cryptodev_cb_rcu *list; 1344 int ret; 1345 1346 ret = -EINVAL; 1347 1348 if (!cb) { 1349 CDEV_LOG_ERR("Callback is NULL"); 1350 return -EINVAL; 1351 } 1352 1353 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1354 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id); 1355 return -ENODEV; 1356 } 1357 1358 dev = &rte_crypto_devices[dev_id]; 1359 if (qp_id >= dev->data->nb_queue_pairs) { 1360 CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id); 1361 return -ENODEV; 1362 } 1363 1364 rte_spinlock_lock(&rte_cryptodev_callback_lock); 1365 if (dev->enq_cbs == NULL) { 1366 CDEV_LOG_ERR("Callback not initialized"); 1367 goto cb_err; 1368 } 1369 1370 list = &dev->enq_cbs[qp_id]; 1371 if (list == NULL) { 1372 CDEV_LOG_ERR("Callback list is NULL"); 1373 goto cb_err; 1374 } 1375 1376 if (list->qsbr == NULL) { 1377 CDEV_LOG_ERR("Rcu qsbr is NULL"); 1378 goto cb_err; 1379 } 1380 1381 prev_cb = &list->next; 1382 for (; *prev_cb != NULL; prev_cb = &curr_cb->next) { 1383 curr_cb = *prev_cb; 1384 if (curr_cb == cb) { 1385 /* Remove the user cb from the callback list. */ 1386 __atomic_store_n(prev_cb, curr_cb->next, 1387 __ATOMIC_RELAXED); 1388 ret = 0; 1389 break; 1390 } 1391 } 1392 1393 if (!ret) { 1394 /* Call sync with invalid thread id as this is part of 1395 * control plane API 1396 */ 1397 rte_rcu_qsbr_synchronize(list->qsbr, RTE_QSBR_THRID_INVALID); 1398 rte_free(cb); 1399 } 1400 1401 cb_err: 1402 rte_spinlock_unlock(&rte_cryptodev_callback_lock); 1403 return ret; 1404 } 1405 1406 struct rte_cryptodev_cb * 1407 rte_cryptodev_add_deq_callback(uint8_t dev_id, 1408 uint16_t qp_id, 1409 rte_cryptodev_callback_fn cb_fn, 1410 void *cb_arg) 1411 { 1412 struct rte_cryptodev *dev; 1413 struct rte_cryptodev_cb_rcu *list; 1414 struct rte_cryptodev_cb *cb, *tail; 1415 1416 if (!cb_fn) { 1417 CDEV_LOG_ERR("Callback is NULL on dev_id=%d", dev_id); 1418 rte_errno = EINVAL; 1419 return NULL; 1420 } 1421 1422 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1423 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id); 1424 rte_errno = ENODEV; 1425 return NULL; 1426 } 1427 1428 dev = &rte_crypto_devices[dev_id]; 1429 if (qp_id >= dev->data->nb_queue_pairs) { 1430 CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id); 1431 rte_errno = ENODEV; 1432 return NULL; 1433 } 1434 1435 cb = rte_zmalloc(NULL, sizeof(*cb), 0); 1436 if (cb == NULL) { 1437 CDEV_LOG_ERR("Failed to allocate memory for callback on " 1438 "dev=%d, queue_pair_id=%d", dev_id, qp_id); 1439 rte_errno = ENOMEM; 1440 return NULL; 1441 } 1442 1443 rte_spinlock_lock(&rte_cryptodev_callback_lock); 1444 1445 cb->fn = cb_fn; 1446 cb->arg = cb_arg; 1447 1448 /* Add the callbacks in fifo order. */ 1449 list = &dev->deq_cbs[qp_id]; 1450 tail = list->next; 1451 1452 if (tail) { 1453 while (tail->next) 1454 tail = tail->next; 1455 /* Stores to cb->fn and cb->param should complete before 1456 * cb is visible to data plane. 1457 */ 1458 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE); 1459 } else { 1460 /* Stores to cb->fn and cb->param should complete before 1461 * cb is visible to data plane. 1462 */ 1463 __atomic_store_n(&list->next, cb, __ATOMIC_RELEASE); 1464 } 1465 1466 rte_spinlock_unlock(&rte_cryptodev_callback_lock); 1467 1468 return cb; 1469 } 1470 1471 int 1472 rte_cryptodev_remove_deq_callback(uint8_t dev_id, 1473 uint16_t qp_id, 1474 struct rte_cryptodev_cb *cb) 1475 { 1476 struct rte_cryptodev *dev; 1477 struct rte_cryptodev_cb **prev_cb, *curr_cb; 1478 struct rte_cryptodev_cb_rcu *list; 1479 int ret; 1480 1481 ret = -EINVAL; 1482 1483 if (!cb) { 1484 CDEV_LOG_ERR("Callback is NULL"); 1485 return -EINVAL; 1486 } 1487 1488 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1489 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id); 1490 return -ENODEV; 1491 } 1492 1493 dev = &rte_crypto_devices[dev_id]; 1494 if (qp_id >= dev->data->nb_queue_pairs) { 1495 CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id); 1496 return -ENODEV; 1497 } 1498 1499 rte_spinlock_lock(&rte_cryptodev_callback_lock); 1500 if (dev->enq_cbs == NULL) { 1501 CDEV_LOG_ERR("Callback not initialized"); 1502 goto cb_err; 1503 } 1504 1505 list = &dev->deq_cbs[qp_id]; 1506 if (list == NULL) { 1507 CDEV_LOG_ERR("Callback list is NULL"); 1508 goto cb_err; 1509 } 1510 1511 if (list->qsbr == NULL) { 1512 CDEV_LOG_ERR("Rcu qsbr is NULL"); 1513 goto cb_err; 1514 } 1515 1516 prev_cb = &list->next; 1517 for (; *prev_cb != NULL; prev_cb = &curr_cb->next) { 1518 curr_cb = *prev_cb; 1519 if (curr_cb == cb) { 1520 /* Remove the user cb from the callback list. */ 1521 __atomic_store_n(prev_cb, curr_cb->next, 1522 __ATOMIC_RELAXED); 1523 ret = 0; 1524 break; 1525 } 1526 } 1527 1528 if (!ret) { 1529 /* Call sync with invalid thread id as this is part of 1530 * control plane API 1531 */ 1532 rte_rcu_qsbr_synchronize(list->qsbr, RTE_QSBR_THRID_INVALID); 1533 rte_free(cb); 1534 } 1535 1536 cb_err: 1537 rte_spinlock_unlock(&rte_cryptodev_callback_lock); 1538 return ret; 1539 } 1540 1541 int 1542 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats) 1543 { 1544 struct rte_cryptodev *dev; 1545 1546 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1547 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id); 1548 return -ENODEV; 1549 } 1550 1551 if (stats == NULL) { 1552 CDEV_LOG_ERR("Invalid stats ptr"); 1553 return -EINVAL; 1554 } 1555 1556 dev = &rte_crypto_devices[dev_id]; 1557 memset(stats, 0, sizeof(*stats)); 1558 1559 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP); 1560 (*dev->dev_ops->stats_get)(dev, stats); 1561 return 0; 1562 } 1563 1564 void 1565 rte_cryptodev_stats_reset(uint8_t dev_id) 1566 { 1567 struct rte_cryptodev *dev; 1568 1569 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1570 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 1571 return; 1572 } 1573 1574 dev = &rte_crypto_devices[dev_id]; 1575 1576 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset); 1577 (*dev->dev_ops->stats_reset)(dev); 1578 } 1579 1580 void 1581 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info) 1582 { 1583 struct rte_cryptodev *dev; 1584 1585 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1586 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id); 1587 return; 1588 } 1589 1590 dev = &rte_crypto_devices[dev_id]; 1591 1592 memset(dev_info, 0, sizeof(struct rte_cryptodev_info)); 1593 1594 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get); 1595 (*dev->dev_ops->dev_infos_get)(dev, dev_info); 1596 1597 dev_info->driver_name = dev->device->driver->name; 1598 dev_info->device = dev->device; 1599 } 1600 1601 int 1602 rte_cryptodev_callback_register(uint8_t dev_id, 1603 enum rte_cryptodev_event_type event, 1604 rte_cryptodev_cb_fn cb_fn, void *cb_arg) 1605 { 1606 struct rte_cryptodev *dev; 1607 struct rte_cryptodev_callback *user_cb; 1608 1609 if (!cb_fn) 1610 return -EINVAL; 1611 1612 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1613 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 1614 return -EINVAL; 1615 } 1616 1617 dev = &rte_crypto_devices[dev_id]; 1618 rte_spinlock_lock(&rte_cryptodev_cb_lock); 1619 1620 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) { 1621 if (user_cb->cb_fn == cb_fn && 1622 user_cb->cb_arg == cb_arg && 1623 user_cb->event == event) { 1624 break; 1625 } 1626 } 1627 1628 /* create a new callback. */ 1629 if (user_cb == NULL) { 1630 user_cb = rte_zmalloc("INTR_USER_CALLBACK", 1631 sizeof(struct rte_cryptodev_callback), 0); 1632 if (user_cb != NULL) { 1633 user_cb->cb_fn = cb_fn; 1634 user_cb->cb_arg = cb_arg; 1635 user_cb->event = event; 1636 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next); 1637 } 1638 } 1639 1640 rte_spinlock_unlock(&rte_cryptodev_cb_lock); 1641 return (user_cb == NULL) ? -ENOMEM : 0; 1642 } 1643 1644 int 1645 rte_cryptodev_callback_unregister(uint8_t dev_id, 1646 enum rte_cryptodev_event_type event, 1647 rte_cryptodev_cb_fn cb_fn, void *cb_arg) 1648 { 1649 int ret; 1650 struct rte_cryptodev *dev; 1651 struct rte_cryptodev_callback *cb, *next; 1652 1653 if (!cb_fn) 1654 return -EINVAL; 1655 1656 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1657 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 1658 return -EINVAL; 1659 } 1660 1661 dev = &rte_crypto_devices[dev_id]; 1662 rte_spinlock_lock(&rte_cryptodev_cb_lock); 1663 1664 ret = 0; 1665 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) { 1666 1667 next = TAILQ_NEXT(cb, next); 1668 1669 if (cb->cb_fn != cb_fn || cb->event != event || 1670 (cb->cb_arg != (void *)-1 && 1671 cb->cb_arg != cb_arg)) 1672 continue; 1673 1674 /* 1675 * if this callback is not executing right now, 1676 * then remove it. 1677 */ 1678 if (cb->active == 0) { 1679 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next); 1680 rte_free(cb); 1681 } else { 1682 ret = -EAGAIN; 1683 } 1684 } 1685 1686 rte_spinlock_unlock(&rte_cryptodev_cb_lock); 1687 return ret; 1688 } 1689 1690 void 1691 rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev, 1692 enum rte_cryptodev_event_type event) 1693 { 1694 struct rte_cryptodev_callback *cb_lst; 1695 struct rte_cryptodev_callback dev_cb; 1696 1697 rte_spinlock_lock(&rte_cryptodev_cb_lock); 1698 TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) { 1699 if (cb_lst->cb_fn == NULL || cb_lst->event != event) 1700 continue; 1701 dev_cb = *cb_lst; 1702 cb_lst->active = 1; 1703 rte_spinlock_unlock(&rte_cryptodev_cb_lock); 1704 dev_cb.cb_fn(dev->data->dev_id, dev_cb.event, 1705 dev_cb.cb_arg); 1706 rte_spinlock_lock(&rte_cryptodev_cb_lock); 1707 cb_lst->active = 0; 1708 } 1709 rte_spinlock_unlock(&rte_cryptodev_cb_lock); 1710 } 1711 1712 int 1713 rte_cryptodev_sym_session_init(uint8_t dev_id, 1714 struct rte_cryptodev_sym_session *sess, 1715 struct rte_crypto_sym_xform *xforms, 1716 struct rte_mempool *mp) 1717 { 1718 struct rte_cryptodev *dev; 1719 uint32_t sess_priv_sz = rte_cryptodev_sym_get_private_session_size( 1720 dev_id); 1721 uint8_t index; 1722 int ret; 1723 1724 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1725 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 1726 return -EINVAL; 1727 } 1728 1729 dev = rte_cryptodev_pmd_get_dev(dev_id); 1730 1731 if (sess == NULL || xforms == NULL || dev == NULL || mp == NULL) 1732 return -EINVAL; 1733 1734 if (mp->elt_size < sess_priv_sz) 1735 return -EINVAL; 1736 1737 index = dev->driver_id; 1738 if (index >= sess->nb_drivers) 1739 return -EINVAL; 1740 1741 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_configure, -ENOTSUP); 1742 1743 if (sess->sess_data[index].refcnt == 0) { 1744 ret = dev->dev_ops->sym_session_configure(dev, xforms, 1745 sess, mp); 1746 if (ret < 0) { 1747 CDEV_LOG_ERR( 1748 "dev_id %d failed to configure session details", 1749 dev_id); 1750 return ret; 1751 } 1752 } 1753 1754 rte_cryptodev_trace_sym_session_init(dev_id, sess, xforms, mp); 1755 sess->sess_data[index].refcnt++; 1756 return 0; 1757 } 1758 1759 struct rte_mempool * 1760 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts, 1761 uint32_t elt_size, uint32_t cache_size, uint16_t user_data_size, 1762 int socket_id) 1763 { 1764 struct rte_mempool *mp; 1765 struct rte_cryptodev_sym_session_pool_private_data *pool_priv; 1766 uint32_t obj_sz; 1767 1768 obj_sz = rte_cryptodev_sym_get_header_session_size() + user_data_size; 1769 if (obj_sz > elt_size) 1770 CDEV_LOG_INFO("elt_size %u is expanded to %u\n", elt_size, 1771 obj_sz); 1772 else 1773 obj_sz = elt_size; 1774 1775 mp = rte_mempool_create(name, nb_elts, obj_sz, cache_size, 1776 (uint32_t)(sizeof(*pool_priv)), 1777 NULL, NULL, NULL, NULL, 1778 socket_id, 0); 1779 if (mp == NULL) { 1780 CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d\n", 1781 __func__, name, rte_errno); 1782 return NULL; 1783 } 1784 1785 pool_priv = rte_mempool_get_priv(mp); 1786 if (!pool_priv) { 1787 CDEV_LOG_ERR("%s(name=%s) failed to get private data\n", 1788 __func__, name); 1789 rte_mempool_free(mp); 1790 return NULL; 1791 } 1792 1793 pool_priv->nb_drivers = nb_drivers; 1794 pool_priv->user_data_sz = user_data_size; 1795 1796 rte_cryptodev_trace_sym_session_pool_create(name, nb_elts, 1797 elt_size, cache_size, user_data_size, mp); 1798 return mp; 1799 } 1800 1801 struct rte_mempool * 1802 rte_cryptodev_asym_session_pool_create(const char *name, uint32_t nb_elts, 1803 uint32_t cache_size, uint16_t user_data_size, int socket_id) 1804 { 1805 struct rte_mempool *mp; 1806 struct rte_cryptodev_asym_session_pool_private_data *pool_priv; 1807 uint32_t obj_sz, obj_sz_aligned; 1808 uint8_t dev_id; 1809 unsigned int priv_sz, max_priv_sz = 0; 1810 1811 for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++) 1812 if (rte_cryptodev_is_valid_dev(dev_id)) { 1813 priv_sz = rte_cryptodev_asym_get_private_session_size(dev_id); 1814 if (priv_sz > max_priv_sz) 1815 max_priv_sz = priv_sz; 1816 } 1817 if (max_priv_sz == 0) { 1818 CDEV_LOG_INFO("Could not set max private session size\n"); 1819 return NULL; 1820 } 1821 1822 obj_sz = rte_cryptodev_asym_get_header_session_size() + max_priv_sz + 1823 user_data_size; 1824 obj_sz_aligned = RTE_ALIGN_CEIL(obj_sz, RTE_CACHE_LINE_SIZE); 1825 1826 mp = rte_mempool_create(name, nb_elts, obj_sz_aligned, cache_size, 1827 (uint32_t)(sizeof(*pool_priv)), 1828 NULL, NULL, NULL, NULL, 1829 socket_id, 0); 1830 if (mp == NULL) { 1831 CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d\n", 1832 __func__, name, rte_errno); 1833 return NULL; 1834 } 1835 1836 pool_priv = rte_mempool_get_priv(mp); 1837 if (!pool_priv) { 1838 CDEV_LOG_ERR("%s(name=%s) failed to get private data\n", 1839 __func__, name); 1840 rte_mempool_free(mp); 1841 return NULL; 1842 } 1843 pool_priv->max_priv_session_sz = max_priv_sz; 1844 pool_priv->user_data_sz = user_data_size; 1845 1846 rte_cryptodev_trace_asym_session_pool_create(name, nb_elts, 1847 user_data_size, cache_size, mp); 1848 return mp; 1849 } 1850 1851 static unsigned int 1852 rte_cryptodev_sym_session_data_size(struct rte_cryptodev_sym_session *sess) 1853 { 1854 return (sizeof(sess->sess_data[0]) * sess->nb_drivers) + 1855 sess->user_data_sz; 1856 } 1857 1858 static uint8_t 1859 rte_cryptodev_sym_is_valid_session_pool(struct rte_mempool *mp) 1860 { 1861 struct rte_cryptodev_sym_session_pool_private_data *pool_priv; 1862 1863 if (!mp) 1864 return 0; 1865 1866 pool_priv = rte_mempool_get_priv(mp); 1867 1868 if (!pool_priv || mp->private_data_size < sizeof(*pool_priv) || 1869 pool_priv->nb_drivers != nb_drivers || 1870 mp->elt_size < 1871 rte_cryptodev_sym_get_header_session_size() 1872 + pool_priv->user_data_sz) 1873 return 0; 1874 1875 return 1; 1876 } 1877 1878 struct rte_cryptodev_sym_session * 1879 rte_cryptodev_sym_session_create(struct rte_mempool *mp) 1880 { 1881 struct rte_cryptodev_sym_session *sess; 1882 struct rte_cryptodev_sym_session_pool_private_data *pool_priv; 1883 1884 if (!rte_cryptodev_sym_is_valid_session_pool(mp)) { 1885 CDEV_LOG_ERR("Invalid mempool\n"); 1886 return NULL; 1887 } 1888 1889 pool_priv = rte_mempool_get_priv(mp); 1890 1891 /* Allocate a session structure from the session pool */ 1892 if (rte_mempool_get(mp, (void **)&sess)) { 1893 CDEV_LOG_ERR("couldn't get object from session mempool"); 1894 return NULL; 1895 } 1896 1897 sess->nb_drivers = pool_priv->nb_drivers; 1898 sess->user_data_sz = pool_priv->user_data_sz; 1899 sess->opaque_data = 0; 1900 1901 /* Clear device session pointer. 1902 * Include the flag indicating presence of user data 1903 */ 1904 memset(sess->sess_data, 0, 1905 rte_cryptodev_sym_session_data_size(sess)); 1906 1907 rte_cryptodev_trace_sym_session_create(mp, sess); 1908 return sess; 1909 } 1910 1911 int 1912 rte_cryptodev_asym_session_create(uint8_t dev_id, 1913 struct rte_crypto_asym_xform *xforms, struct rte_mempool *mp, 1914 void **session) 1915 { 1916 struct rte_cryptodev_asym_session *sess; 1917 uint32_t session_priv_data_sz; 1918 struct rte_cryptodev_asym_session_pool_private_data *pool_priv; 1919 unsigned int session_header_size = 1920 rte_cryptodev_asym_get_header_session_size(); 1921 struct rte_cryptodev *dev; 1922 int ret; 1923 1924 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1925 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 1926 return -EINVAL; 1927 } 1928 1929 dev = rte_cryptodev_pmd_get_dev(dev_id); 1930 1931 if (dev == NULL) 1932 return -EINVAL; 1933 1934 if (!mp) { 1935 CDEV_LOG_ERR("invalid mempool\n"); 1936 return -EINVAL; 1937 } 1938 1939 session_priv_data_sz = rte_cryptodev_asym_get_private_session_size( 1940 dev_id); 1941 pool_priv = rte_mempool_get_priv(mp); 1942 1943 if (pool_priv->max_priv_session_sz < session_priv_data_sz) { 1944 CDEV_LOG_DEBUG( 1945 "The private session data size used when creating the mempool is smaller than this device's private session data."); 1946 return -EINVAL; 1947 } 1948 1949 /* Verify if provided mempool can hold elements big enough. */ 1950 if (mp->elt_size < session_header_size + session_priv_data_sz) { 1951 CDEV_LOG_ERR( 1952 "mempool elements too small to hold session objects"); 1953 return -EINVAL; 1954 } 1955 1956 /* Allocate a session structure from the session pool */ 1957 if (rte_mempool_get(mp, session)) { 1958 CDEV_LOG_ERR("couldn't get object from session mempool"); 1959 return -ENOMEM; 1960 } 1961 1962 sess = *session; 1963 sess->driver_id = dev->driver_id; 1964 sess->user_data_sz = pool_priv->user_data_sz; 1965 sess->max_priv_data_sz = pool_priv->max_priv_session_sz; 1966 1967 /* Clear device session pointer.*/ 1968 memset(sess->sess_private_data, 0, session_priv_data_sz + sess->user_data_sz); 1969 1970 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_configure, -ENOTSUP); 1971 1972 if (sess->sess_private_data[0] == 0) { 1973 ret = dev->dev_ops->asym_session_configure(dev, xforms, sess); 1974 if (ret < 0) { 1975 CDEV_LOG_ERR( 1976 "dev_id %d failed to configure session details", 1977 dev_id); 1978 return ret; 1979 } 1980 } 1981 1982 rte_cryptodev_trace_asym_session_create(dev_id, xforms, mp, sess); 1983 return 0; 1984 } 1985 1986 int 1987 rte_cryptodev_sym_session_clear(uint8_t dev_id, 1988 struct rte_cryptodev_sym_session *sess) 1989 { 1990 struct rte_cryptodev *dev; 1991 uint8_t driver_id; 1992 1993 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1994 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 1995 return -EINVAL; 1996 } 1997 1998 dev = rte_cryptodev_pmd_get_dev(dev_id); 1999 2000 if (dev == NULL || sess == NULL) 2001 return -EINVAL; 2002 2003 driver_id = dev->driver_id; 2004 if (sess->sess_data[driver_id].refcnt == 0) 2005 return 0; 2006 if (--sess->sess_data[driver_id].refcnt != 0) 2007 return -EBUSY; 2008 2009 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_clear, -ENOTSUP); 2010 2011 dev->dev_ops->sym_session_clear(dev, sess); 2012 2013 rte_cryptodev_trace_sym_session_clear(dev_id, sess); 2014 return 0; 2015 } 2016 2017 int 2018 rte_cryptodev_sym_session_free(struct rte_cryptodev_sym_session *sess) 2019 { 2020 uint8_t i; 2021 struct rte_mempool *sess_mp; 2022 2023 if (sess == NULL) 2024 return -EINVAL; 2025 2026 /* Check that all device private data has been freed */ 2027 for (i = 0; i < sess->nb_drivers; i++) { 2028 if (sess->sess_data[i].refcnt != 0) 2029 return -EBUSY; 2030 } 2031 2032 /* Return session to mempool */ 2033 sess_mp = rte_mempool_from_obj(sess); 2034 rte_mempool_put(sess_mp, sess); 2035 2036 rte_cryptodev_trace_sym_session_free(sess); 2037 return 0; 2038 } 2039 2040 int 2041 rte_cryptodev_asym_session_free(uint8_t dev_id, void *sess) 2042 { 2043 struct rte_mempool *sess_mp; 2044 struct rte_cryptodev *dev; 2045 2046 if (!rte_cryptodev_is_valid_dev(dev_id)) { 2047 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 2048 return -EINVAL; 2049 } 2050 2051 dev = rte_cryptodev_pmd_get_dev(dev_id); 2052 2053 if (dev == NULL || sess == NULL) 2054 return -EINVAL; 2055 2056 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_clear, -ENOTSUP); 2057 2058 dev->dev_ops->asym_session_clear(dev, sess); 2059 2060 rte_free(((struct rte_cryptodev_asym_session *)sess)->event_mdata); 2061 2062 /* Return session to mempool */ 2063 sess_mp = rte_mempool_from_obj(sess); 2064 rte_mempool_put(sess_mp, sess); 2065 2066 rte_cryptodev_trace_asym_session_free(dev_id, sess); 2067 return 0; 2068 } 2069 2070 unsigned int 2071 rte_cryptodev_sym_get_header_session_size(void) 2072 { 2073 /* 2074 * Header contains pointers to the private data of all registered 2075 * drivers and all necessary information to ensure safely clear 2076 * or free al session. 2077 */ 2078 struct rte_cryptodev_sym_session s = {0}; 2079 2080 s.nb_drivers = nb_drivers; 2081 2082 return (unsigned int)(sizeof(s) + 2083 rte_cryptodev_sym_session_data_size(&s)); 2084 } 2085 2086 unsigned int 2087 rte_cryptodev_sym_get_existing_header_session_size( 2088 struct rte_cryptodev_sym_session *sess) 2089 { 2090 if (!sess) 2091 return 0; 2092 else 2093 return (unsigned int)(sizeof(*sess) + 2094 rte_cryptodev_sym_session_data_size(sess)); 2095 } 2096 2097 unsigned int 2098 rte_cryptodev_asym_get_header_session_size(void) 2099 { 2100 return sizeof(struct rte_cryptodev_asym_session); 2101 } 2102 2103 unsigned int 2104 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id) 2105 { 2106 struct rte_cryptodev *dev; 2107 unsigned int priv_sess_size; 2108 2109 if (!rte_cryptodev_is_valid_dev(dev_id)) 2110 return 0; 2111 2112 dev = rte_cryptodev_pmd_get_dev(dev_id); 2113 2114 if (*dev->dev_ops->sym_session_get_size == NULL) 2115 return 0; 2116 2117 priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev); 2118 2119 return priv_sess_size; 2120 } 2121 2122 unsigned int 2123 rte_cryptodev_asym_get_private_session_size(uint8_t dev_id) 2124 { 2125 struct rte_cryptodev *dev; 2126 unsigned int priv_sess_size; 2127 2128 if (!rte_cryptodev_is_valid_dev(dev_id)) 2129 return 0; 2130 2131 dev = rte_cryptodev_pmd_get_dev(dev_id); 2132 2133 if (*dev->dev_ops->asym_session_get_size == NULL) 2134 return 0; 2135 2136 priv_sess_size = (*dev->dev_ops->asym_session_get_size)(dev); 2137 2138 return priv_sess_size; 2139 } 2140 2141 int 2142 rte_cryptodev_sym_session_set_user_data( 2143 struct rte_cryptodev_sym_session *sess, 2144 void *data, 2145 uint16_t size) 2146 { 2147 if (sess == NULL) 2148 return -EINVAL; 2149 2150 if (sess->user_data_sz < size) 2151 return -ENOMEM; 2152 2153 rte_memcpy(sess->sess_data + sess->nb_drivers, data, size); 2154 return 0; 2155 } 2156 2157 void * 2158 rte_cryptodev_sym_session_get_user_data( 2159 struct rte_cryptodev_sym_session *sess) 2160 { 2161 if (sess == NULL || sess->user_data_sz == 0) 2162 return NULL; 2163 2164 return (void *)(sess->sess_data + sess->nb_drivers); 2165 } 2166 2167 int 2168 rte_cryptodev_asym_session_set_user_data(void *session, void *data, uint16_t size) 2169 { 2170 struct rte_cryptodev_asym_session *sess = session; 2171 if (sess == NULL) 2172 return -EINVAL; 2173 2174 if (sess->user_data_sz < size) 2175 return -ENOMEM; 2176 2177 rte_memcpy(sess->sess_private_data + 2178 sess->max_priv_data_sz, 2179 data, size); 2180 return 0; 2181 } 2182 2183 void * 2184 rte_cryptodev_asym_session_get_user_data(void *session) 2185 { 2186 struct rte_cryptodev_asym_session *sess = session; 2187 if (sess == NULL || sess->user_data_sz == 0) 2188 return NULL; 2189 2190 return (void *)(sess->sess_private_data + 2191 sess->max_priv_data_sz); 2192 } 2193 2194 static inline void 2195 sym_crypto_fill_status(struct rte_crypto_sym_vec *vec, int32_t errnum) 2196 { 2197 uint32_t i; 2198 for (i = 0; i < vec->num; i++) 2199 vec->status[i] = errnum; 2200 } 2201 2202 uint32_t 2203 rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id, 2204 struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs ofs, 2205 struct rte_crypto_sym_vec *vec) 2206 { 2207 struct rte_cryptodev *dev; 2208 2209 if (!rte_cryptodev_is_valid_dev(dev_id)) { 2210 sym_crypto_fill_status(vec, EINVAL); 2211 return 0; 2212 } 2213 2214 dev = rte_cryptodev_pmd_get_dev(dev_id); 2215 2216 if (*dev->dev_ops->sym_cpu_process == NULL || 2217 !(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO)) { 2218 sym_crypto_fill_status(vec, ENOTSUP); 2219 return 0; 2220 } 2221 2222 return dev->dev_ops->sym_cpu_process(dev, sess, ofs, vec); 2223 } 2224 2225 int 2226 rte_cryptodev_get_raw_dp_ctx_size(uint8_t dev_id) 2227 { 2228 struct rte_cryptodev *dev; 2229 int32_t size = sizeof(struct rte_crypto_raw_dp_ctx); 2230 int32_t priv_size; 2231 2232 if (!rte_cryptodev_is_valid_dev(dev_id)) 2233 return -EINVAL; 2234 2235 dev = rte_cryptodev_pmd_get_dev(dev_id); 2236 2237 if (*dev->dev_ops->sym_get_raw_dp_ctx_size == NULL || 2238 !(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP)) { 2239 return -ENOTSUP; 2240 } 2241 2242 priv_size = (*dev->dev_ops->sym_get_raw_dp_ctx_size)(dev); 2243 if (priv_size < 0) 2244 return -ENOTSUP; 2245 2246 return RTE_ALIGN_CEIL((size + priv_size), 8); 2247 } 2248 2249 int 2250 rte_cryptodev_configure_raw_dp_ctx(uint8_t dev_id, uint16_t qp_id, 2251 struct rte_crypto_raw_dp_ctx *ctx, 2252 enum rte_crypto_op_sess_type sess_type, 2253 union rte_cryptodev_session_ctx session_ctx, 2254 uint8_t is_update) 2255 { 2256 struct rte_cryptodev *dev; 2257 2258 if (!rte_cryptodev_get_qp_status(dev_id, qp_id)) 2259 return -EINVAL; 2260 2261 dev = rte_cryptodev_pmd_get_dev(dev_id); 2262 if (!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP) 2263 || dev->dev_ops->sym_configure_raw_dp_ctx == NULL) 2264 return -ENOTSUP; 2265 2266 return (*dev->dev_ops->sym_configure_raw_dp_ctx)(dev, qp_id, ctx, 2267 sess_type, session_ctx, is_update); 2268 } 2269 2270 int 2271 rte_cryptodev_session_event_mdata_set(uint8_t dev_id, void *sess, 2272 enum rte_crypto_op_type op_type, 2273 enum rte_crypto_op_sess_type sess_type, 2274 void *ev_mdata, 2275 uint16_t size) 2276 { 2277 struct rte_cryptodev *dev; 2278 2279 if (sess == NULL || ev_mdata == NULL) 2280 return -EINVAL; 2281 2282 if (!rte_cryptodev_is_valid_dev(dev_id)) 2283 goto skip_pmd_op; 2284 2285 dev = rte_cryptodev_pmd_get_dev(dev_id); 2286 if (dev->dev_ops->session_ev_mdata_set == NULL) 2287 goto skip_pmd_op; 2288 2289 return (*dev->dev_ops->session_ev_mdata_set)(dev, sess, op_type, 2290 sess_type, ev_mdata); 2291 2292 skip_pmd_op: 2293 if (op_type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) 2294 return rte_cryptodev_sym_session_set_user_data(sess, ev_mdata, 2295 size); 2296 else if (op_type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) { 2297 struct rte_cryptodev_asym_session *s = sess; 2298 2299 if (s->event_mdata == NULL) { 2300 s->event_mdata = rte_malloc(NULL, size, 0); 2301 if (s->event_mdata == NULL) 2302 return -ENOMEM; 2303 } 2304 rte_memcpy(s->event_mdata, ev_mdata, size); 2305 2306 return 0; 2307 } else 2308 return -ENOTSUP; 2309 } 2310 2311 uint32_t 2312 rte_cryptodev_raw_enqueue_burst(struct rte_crypto_raw_dp_ctx *ctx, 2313 struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs, 2314 void **user_data, int *enqueue_status) 2315 { 2316 return (*ctx->enqueue_burst)(ctx->qp_data, ctx->drv_ctx_data, vec, 2317 ofs, user_data, enqueue_status); 2318 } 2319 2320 int 2321 rte_cryptodev_raw_enqueue_done(struct rte_crypto_raw_dp_ctx *ctx, 2322 uint32_t n) 2323 { 2324 return (*ctx->enqueue_done)(ctx->qp_data, ctx->drv_ctx_data, n); 2325 } 2326 2327 uint32_t 2328 rte_cryptodev_raw_dequeue_burst(struct rte_crypto_raw_dp_ctx *ctx, 2329 rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count, 2330 uint32_t max_nb_to_dequeue, 2331 rte_cryptodev_raw_post_dequeue_t post_dequeue, 2332 void **out_user_data, uint8_t is_user_data_array, 2333 uint32_t *n_success_jobs, int *status) 2334 { 2335 return (*ctx->dequeue_burst)(ctx->qp_data, ctx->drv_ctx_data, 2336 get_dequeue_count, max_nb_to_dequeue, post_dequeue, 2337 out_user_data, is_user_data_array, n_success_jobs, status); 2338 } 2339 2340 int 2341 rte_cryptodev_raw_dequeue_done(struct rte_crypto_raw_dp_ctx *ctx, 2342 uint32_t n) 2343 { 2344 return (*ctx->dequeue_done)(ctx->qp_data, ctx->drv_ctx_data, n); 2345 } 2346 2347 /** Initialise rte_crypto_op mempool element */ 2348 static void 2349 rte_crypto_op_init(struct rte_mempool *mempool, 2350 void *opaque_arg, 2351 void *_op_data, 2352 __rte_unused unsigned i) 2353 { 2354 struct rte_crypto_op *op = _op_data; 2355 enum rte_crypto_op_type type = *(enum rte_crypto_op_type *)opaque_arg; 2356 2357 memset(_op_data, 0, mempool->elt_size); 2358 2359 __rte_crypto_op_reset(op, type); 2360 2361 op->phys_addr = rte_mem_virt2iova(_op_data); 2362 op->mempool = mempool; 2363 } 2364 2365 2366 struct rte_mempool * 2367 rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type, 2368 unsigned nb_elts, unsigned cache_size, uint16_t priv_size, 2369 int socket_id) 2370 { 2371 struct rte_crypto_op_pool_private *priv; 2372 2373 unsigned elt_size = sizeof(struct rte_crypto_op) + 2374 priv_size; 2375 2376 if (type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) { 2377 elt_size += sizeof(struct rte_crypto_sym_op); 2378 } else if (type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) { 2379 elt_size += sizeof(struct rte_crypto_asym_op); 2380 } else if (type == RTE_CRYPTO_OP_TYPE_UNDEFINED) { 2381 elt_size += RTE_MAX(sizeof(struct rte_crypto_sym_op), 2382 sizeof(struct rte_crypto_asym_op)); 2383 } else { 2384 CDEV_LOG_ERR("Invalid op_type\n"); 2385 return NULL; 2386 } 2387 2388 /* lookup mempool in case already allocated */ 2389 struct rte_mempool *mp = rte_mempool_lookup(name); 2390 2391 if (mp != NULL) { 2392 priv = (struct rte_crypto_op_pool_private *) 2393 rte_mempool_get_priv(mp); 2394 2395 if (mp->elt_size != elt_size || 2396 mp->cache_size < cache_size || 2397 mp->size < nb_elts || 2398 priv->priv_size < priv_size) { 2399 mp = NULL; 2400 CDEV_LOG_ERR("Mempool %s already exists but with " 2401 "incompatible parameters", name); 2402 return NULL; 2403 } 2404 return mp; 2405 } 2406 2407 mp = rte_mempool_create( 2408 name, 2409 nb_elts, 2410 elt_size, 2411 cache_size, 2412 sizeof(struct rte_crypto_op_pool_private), 2413 NULL, 2414 NULL, 2415 rte_crypto_op_init, 2416 &type, 2417 socket_id, 2418 0); 2419 2420 if (mp == NULL) { 2421 CDEV_LOG_ERR("Failed to create mempool %s", name); 2422 return NULL; 2423 } 2424 2425 priv = (struct rte_crypto_op_pool_private *) 2426 rte_mempool_get_priv(mp); 2427 2428 priv->priv_size = priv_size; 2429 priv->type = type; 2430 2431 return mp; 2432 } 2433 2434 int 2435 rte_cryptodev_pmd_create_dev_name(char *name, const char *dev_name_prefix) 2436 { 2437 struct rte_cryptodev *dev = NULL; 2438 uint32_t i = 0; 2439 2440 if (name == NULL) 2441 return -EINVAL; 2442 2443 for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) { 2444 int ret = snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, 2445 "%s_%u", dev_name_prefix, i); 2446 2447 if (ret < 0) 2448 return ret; 2449 2450 dev = rte_cryptodev_pmd_get_named_dev(name); 2451 if (!dev) 2452 return 0; 2453 } 2454 2455 return -1; 2456 } 2457 2458 TAILQ_HEAD(cryptodev_driver_list, cryptodev_driver); 2459 2460 static struct cryptodev_driver_list cryptodev_driver_list = 2461 TAILQ_HEAD_INITIALIZER(cryptodev_driver_list); 2462 2463 int 2464 rte_cryptodev_driver_id_get(const char *name) 2465 { 2466 struct cryptodev_driver *driver; 2467 const char *driver_name; 2468 2469 if (name == NULL) { 2470 RTE_LOG(DEBUG, CRYPTODEV, "name pointer NULL"); 2471 return -1; 2472 } 2473 2474 TAILQ_FOREACH(driver, &cryptodev_driver_list, next) { 2475 driver_name = driver->driver->name; 2476 if (strncmp(driver_name, name, strlen(driver_name) + 1) == 0) 2477 return driver->id; 2478 } 2479 return -1; 2480 } 2481 2482 const char * 2483 rte_cryptodev_name_get(uint8_t dev_id) 2484 { 2485 struct rte_cryptodev *dev; 2486 2487 if (!rte_cryptodev_is_valid_device_data(dev_id)) { 2488 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 2489 return NULL; 2490 } 2491 2492 dev = rte_cryptodev_pmd_get_dev(dev_id); 2493 if (dev == NULL) 2494 return NULL; 2495 2496 return dev->data->name; 2497 } 2498 2499 const char * 2500 rte_cryptodev_driver_name_get(uint8_t driver_id) 2501 { 2502 struct cryptodev_driver *driver; 2503 2504 TAILQ_FOREACH(driver, &cryptodev_driver_list, next) 2505 if (driver->id == driver_id) 2506 return driver->driver->name; 2507 return NULL; 2508 } 2509 2510 uint8_t 2511 rte_cryptodev_allocate_driver(struct cryptodev_driver *crypto_drv, 2512 const struct rte_driver *drv) 2513 { 2514 crypto_drv->driver = drv; 2515 crypto_drv->id = nb_drivers; 2516 2517 TAILQ_INSERT_TAIL(&cryptodev_driver_list, crypto_drv, next); 2518 2519 return nb_drivers++; 2520 } 2521 2522 RTE_INIT(cryptodev_init_fp_ops) 2523 { 2524 uint32_t i; 2525 2526 for (i = 0; i != RTE_DIM(rte_crypto_fp_ops); i++) 2527 cryptodev_fp_ops_reset(rte_crypto_fp_ops + i); 2528 } 2529 2530 static int 2531 cryptodev_handle_dev_list(const char *cmd __rte_unused, 2532 const char *params __rte_unused, 2533 struct rte_tel_data *d) 2534 { 2535 int dev_id; 2536 2537 if (rte_cryptodev_count() < 1) 2538 return -EINVAL; 2539 2540 rte_tel_data_start_array(d, RTE_TEL_INT_VAL); 2541 for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++) 2542 if (rte_cryptodev_is_valid_dev(dev_id)) 2543 rte_tel_data_add_array_int(d, dev_id); 2544 2545 return 0; 2546 } 2547 2548 static int 2549 cryptodev_handle_dev_info(const char *cmd __rte_unused, 2550 const char *params, struct rte_tel_data *d) 2551 { 2552 struct rte_cryptodev_info cryptodev_info; 2553 int dev_id; 2554 char *end_param; 2555 2556 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 2557 return -EINVAL; 2558 2559 dev_id = strtoul(params, &end_param, 0); 2560 if (*end_param != '\0') 2561 CDEV_LOG_ERR("Extra parameters passed to command, ignoring"); 2562 if (!rte_cryptodev_is_valid_dev(dev_id)) 2563 return -EINVAL; 2564 2565 rte_cryptodev_info_get(dev_id, &cryptodev_info); 2566 2567 rte_tel_data_start_dict(d); 2568 rte_tel_data_add_dict_string(d, "device_name", 2569 cryptodev_info.device->name); 2570 rte_tel_data_add_dict_int(d, "max_nb_queue_pairs", 2571 cryptodev_info.max_nb_queue_pairs); 2572 2573 return 0; 2574 } 2575 2576 #define ADD_DICT_STAT(s) rte_tel_data_add_dict_u64(d, #s, cryptodev_stats.s) 2577 2578 static int 2579 cryptodev_handle_dev_stats(const char *cmd __rte_unused, 2580 const char *params, 2581 struct rte_tel_data *d) 2582 { 2583 struct rte_cryptodev_stats cryptodev_stats; 2584 int dev_id, ret; 2585 char *end_param; 2586 2587 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 2588 return -EINVAL; 2589 2590 dev_id = strtoul(params, &end_param, 0); 2591 if (*end_param != '\0') 2592 CDEV_LOG_ERR("Extra parameters passed to command, ignoring"); 2593 if (!rte_cryptodev_is_valid_dev(dev_id)) 2594 return -EINVAL; 2595 2596 ret = rte_cryptodev_stats_get(dev_id, &cryptodev_stats); 2597 if (ret < 0) 2598 return ret; 2599 2600 rte_tel_data_start_dict(d); 2601 ADD_DICT_STAT(enqueued_count); 2602 ADD_DICT_STAT(dequeued_count); 2603 ADD_DICT_STAT(enqueue_err_count); 2604 ADD_DICT_STAT(dequeue_err_count); 2605 2606 return 0; 2607 } 2608 2609 #define CRYPTO_CAPS_SZ \ 2610 (RTE_ALIGN_CEIL(sizeof(struct rte_cryptodev_capabilities), \ 2611 sizeof(uint64_t)) / \ 2612 sizeof(uint64_t)) 2613 2614 static int 2615 crypto_caps_array(struct rte_tel_data *d, 2616 const struct rte_cryptodev_capabilities *capabilities) 2617 { 2618 const struct rte_cryptodev_capabilities *dev_caps; 2619 uint64_t caps_val[CRYPTO_CAPS_SZ]; 2620 unsigned int i = 0, j; 2621 2622 rte_tel_data_start_array(d, RTE_TEL_U64_VAL); 2623 2624 while ((dev_caps = &capabilities[i++])->op != 2625 RTE_CRYPTO_OP_TYPE_UNDEFINED) { 2626 memset(&caps_val, 0, CRYPTO_CAPS_SZ * sizeof(caps_val[0])); 2627 rte_memcpy(caps_val, dev_caps, sizeof(capabilities[0])); 2628 for (j = 0; j < CRYPTO_CAPS_SZ; j++) 2629 rte_tel_data_add_array_u64(d, caps_val[j]); 2630 } 2631 2632 return i; 2633 } 2634 2635 static int 2636 cryptodev_handle_dev_caps(const char *cmd __rte_unused, const char *params, 2637 struct rte_tel_data *d) 2638 { 2639 struct rte_cryptodev_info dev_info; 2640 struct rte_tel_data *crypto_caps; 2641 int crypto_caps_n; 2642 char *end_param; 2643 int dev_id; 2644 2645 if (!params || strlen(params) == 0 || !isdigit(*params)) 2646 return -EINVAL; 2647 2648 dev_id = strtoul(params, &end_param, 0); 2649 if (*end_param != '\0') 2650 CDEV_LOG_ERR("Extra parameters passed to command, ignoring"); 2651 if (!rte_cryptodev_is_valid_dev(dev_id)) 2652 return -EINVAL; 2653 2654 rte_tel_data_start_dict(d); 2655 crypto_caps = rte_tel_data_alloc(); 2656 if (!crypto_caps) 2657 return -ENOMEM; 2658 2659 rte_cryptodev_info_get(dev_id, &dev_info); 2660 crypto_caps_n = crypto_caps_array(crypto_caps, dev_info.capabilities); 2661 rte_tel_data_add_dict_container(d, "crypto_caps", crypto_caps, 0); 2662 rte_tel_data_add_dict_int(d, "crypto_caps_n", crypto_caps_n); 2663 2664 return 0; 2665 } 2666 2667 RTE_INIT(cryptodev_init_telemetry) 2668 { 2669 rte_telemetry_register_cmd("/cryptodev/info", cryptodev_handle_dev_info, 2670 "Returns information for a cryptodev. Parameters: int dev_id"); 2671 rte_telemetry_register_cmd("/cryptodev/list", 2672 cryptodev_handle_dev_list, 2673 "Returns list of available crypto devices by IDs. No parameters."); 2674 rte_telemetry_register_cmd("/cryptodev/stats", 2675 cryptodev_handle_dev_stats, 2676 "Returns the stats for a cryptodev. Parameters: int dev_id"); 2677 rte_telemetry_register_cmd("/cryptodev/caps", 2678 cryptodev_handle_dev_caps, 2679 "Returns the capabilities for a cryptodev. Parameters: int dev_id"); 2680 } 2681