1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2015-2020 Intel Corporation 3 */ 4 5 #include <sys/queue.h> 6 #include <ctype.h> 7 #include <stdio.h> 8 #include <stdlib.h> 9 #include <string.h> 10 #include <errno.h> 11 #include <stdint.h> 12 #include <inttypes.h> 13 14 #include <rte_log.h> 15 #include <rte_debug.h> 16 #include <dev_driver.h> 17 #include <rte_memory.h> 18 #include <rte_memcpy.h> 19 #include <rte_memzone.h> 20 #include <rte_eal.h> 21 #include <rte_common.h> 22 #include <rte_mempool.h> 23 #include <rte_malloc.h> 24 #include <rte_errno.h> 25 #include <rte_spinlock.h> 26 #include <rte_string_fns.h> 27 #include <rte_telemetry.h> 28 29 #include "rte_crypto.h" 30 #include "rte_cryptodev.h" 31 #include "cryptodev_pmd.h" 32 #include "rte_cryptodev_trace.h" 33 34 static uint8_t nb_drivers; 35 36 static struct rte_cryptodev rte_crypto_devices[RTE_CRYPTO_MAX_DEVS]; 37 38 struct rte_cryptodev *rte_cryptodevs = rte_crypto_devices; 39 40 static struct rte_cryptodev_global cryptodev_globals = { 41 .devs = rte_crypto_devices, 42 .data = { NULL }, 43 .nb_devs = 0 44 }; 45 46 /* Public fastpath APIs. */ 47 struct rte_crypto_fp_ops rte_crypto_fp_ops[RTE_CRYPTO_MAX_DEVS]; 48 49 /* spinlock for crypto device callbacks */ 50 static rte_spinlock_t rte_cryptodev_cb_lock = RTE_SPINLOCK_INITIALIZER; 51 52 /** 53 * The user application callback description. 54 * 55 * It contains callback address to be registered by user application, 56 * the pointer to the parameters for callback, and the event type. 57 */ 58 struct rte_cryptodev_callback { 59 TAILQ_ENTRY(rte_cryptodev_callback) next; /**< Callbacks list */ 60 rte_cryptodev_cb_fn cb_fn; /**< Callback address */ 61 void *cb_arg; /**< Parameter for callback */ 62 enum rte_cryptodev_event_type event; /**< Interrupt event type */ 63 uint32_t active; /**< Callback is executing */ 64 }; 65 66 /** 67 * @deprecated 68 * The crypto cipher algorithm strings identifiers. 69 * It could be used in application command line. 70 */ 71 __rte_deprecated 72 const char * 73 rte_crypto_cipher_algorithm_strings[] = { 74 [RTE_CRYPTO_CIPHER_3DES_CBC] = "3des-cbc", 75 [RTE_CRYPTO_CIPHER_3DES_ECB] = "3des-ecb", 76 [RTE_CRYPTO_CIPHER_3DES_CTR] = "3des-ctr", 77 78 [RTE_CRYPTO_CIPHER_AES_CBC] = "aes-cbc", 79 [RTE_CRYPTO_CIPHER_AES_CTR] = "aes-ctr", 80 [RTE_CRYPTO_CIPHER_AES_DOCSISBPI] = "aes-docsisbpi", 81 [RTE_CRYPTO_CIPHER_AES_ECB] = "aes-ecb", 82 [RTE_CRYPTO_CIPHER_AES_F8] = "aes-f8", 83 [RTE_CRYPTO_CIPHER_AES_XTS] = "aes-xts", 84 85 [RTE_CRYPTO_CIPHER_ARC4] = "arc4", 86 87 [RTE_CRYPTO_CIPHER_DES_CBC] = "des-cbc", 88 [RTE_CRYPTO_CIPHER_DES_DOCSISBPI] = "des-docsisbpi", 89 90 [RTE_CRYPTO_CIPHER_NULL] = "null", 91 92 [RTE_CRYPTO_CIPHER_KASUMI_F8] = "kasumi-f8", 93 [RTE_CRYPTO_CIPHER_SNOW3G_UEA2] = "snow3g-uea2", 94 [RTE_CRYPTO_CIPHER_ZUC_EEA3] = "zuc-eea3", 95 [RTE_CRYPTO_CIPHER_SM4_ECB] = "sm4-ecb", 96 [RTE_CRYPTO_CIPHER_SM4_CBC] = "sm4-cbc", 97 [RTE_CRYPTO_CIPHER_SM4_CTR] = "sm4-ctr" 98 }; 99 100 /** 101 * The crypto cipher algorithm strings identifiers. 102 * Not to be used in application directly. 103 * Application can use rte_cryptodev_get_cipher_algo_string(). 104 */ 105 static const char * 106 crypto_cipher_algorithm_strings[] = { 107 [RTE_CRYPTO_CIPHER_3DES_CBC] = "3des-cbc", 108 [RTE_CRYPTO_CIPHER_3DES_ECB] = "3des-ecb", 109 [RTE_CRYPTO_CIPHER_3DES_CTR] = "3des-ctr", 110 111 [RTE_CRYPTO_CIPHER_AES_CBC] = "aes-cbc", 112 [RTE_CRYPTO_CIPHER_AES_CTR] = "aes-ctr", 113 [RTE_CRYPTO_CIPHER_AES_DOCSISBPI] = "aes-docsisbpi", 114 [RTE_CRYPTO_CIPHER_AES_ECB] = "aes-ecb", 115 [RTE_CRYPTO_CIPHER_AES_F8] = "aes-f8", 116 [RTE_CRYPTO_CIPHER_AES_XTS] = "aes-xts", 117 118 [RTE_CRYPTO_CIPHER_ARC4] = "arc4", 119 120 [RTE_CRYPTO_CIPHER_DES_CBC] = "des-cbc", 121 [RTE_CRYPTO_CIPHER_DES_DOCSISBPI] = "des-docsisbpi", 122 123 [RTE_CRYPTO_CIPHER_NULL] = "null", 124 125 [RTE_CRYPTO_CIPHER_KASUMI_F8] = "kasumi-f8", 126 [RTE_CRYPTO_CIPHER_SNOW3G_UEA2] = "snow3g-uea2", 127 [RTE_CRYPTO_CIPHER_ZUC_EEA3] = "zuc-eea3", 128 [RTE_CRYPTO_CIPHER_SM4_ECB] = "sm4-ecb", 129 [RTE_CRYPTO_CIPHER_SM4_CBC] = "sm4-cbc", 130 [RTE_CRYPTO_CIPHER_SM4_CTR] = "sm4-ctr" 131 }; 132 133 /** 134 * The crypto cipher operation strings identifiers. 135 * It could be used in application command line. 136 */ 137 const char * 138 rte_crypto_cipher_operation_strings[] = { 139 [RTE_CRYPTO_CIPHER_OP_ENCRYPT] = "encrypt", 140 [RTE_CRYPTO_CIPHER_OP_DECRYPT] = "decrypt" 141 }; 142 143 /** 144 * @deprecated 145 * The crypto auth algorithm strings identifiers. 146 * It could be used in application command line. 147 */ 148 __rte_deprecated 149 const char * 150 rte_crypto_auth_algorithm_strings[] = { 151 [RTE_CRYPTO_AUTH_AES_CBC_MAC] = "aes-cbc-mac", 152 [RTE_CRYPTO_AUTH_AES_CMAC] = "aes-cmac", 153 [RTE_CRYPTO_AUTH_AES_GMAC] = "aes-gmac", 154 [RTE_CRYPTO_AUTH_AES_XCBC_MAC] = "aes-xcbc-mac", 155 156 [RTE_CRYPTO_AUTH_MD5] = "md5", 157 [RTE_CRYPTO_AUTH_MD5_HMAC] = "md5-hmac", 158 159 [RTE_CRYPTO_AUTH_NULL] = "null", 160 161 [RTE_CRYPTO_AUTH_SHA1] = "sha1", 162 [RTE_CRYPTO_AUTH_SHA1_HMAC] = "sha1-hmac", 163 164 [RTE_CRYPTO_AUTH_SHA224] = "sha2-224", 165 [RTE_CRYPTO_AUTH_SHA224_HMAC] = "sha2-224-hmac", 166 [RTE_CRYPTO_AUTH_SHA256] = "sha2-256", 167 [RTE_CRYPTO_AUTH_SHA256_HMAC] = "sha2-256-hmac", 168 [RTE_CRYPTO_AUTH_SHA384] = "sha2-384", 169 [RTE_CRYPTO_AUTH_SHA384_HMAC] = "sha2-384-hmac", 170 [RTE_CRYPTO_AUTH_SHA512] = "sha2-512", 171 [RTE_CRYPTO_AUTH_SHA512_HMAC] = "sha2-512-hmac", 172 173 [RTE_CRYPTO_AUTH_SHA3_224] = "sha3-224", 174 [RTE_CRYPTO_AUTH_SHA3_224_HMAC] = "sha3-224-hmac", 175 [RTE_CRYPTO_AUTH_SHA3_256] = "sha3-256", 176 [RTE_CRYPTO_AUTH_SHA3_256_HMAC] = "sha3-256-hmac", 177 [RTE_CRYPTO_AUTH_SHA3_384] = "sha3-384", 178 [RTE_CRYPTO_AUTH_SHA3_384_HMAC] = "sha3-384-hmac", 179 [RTE_CRYPTO_AUTH_SHA3_512] = "sha3-512", 180 [RTE_CRYPTO_AUTH_SHA3_512_HMAC] = "sha3-512-hmac", 181 182 [RTE_CRYPTO_AUTH_KASUMI_F9] = "kasumi-f9", 183 [RTE_CRYPTO_AUTH_SNOW3G_UIA2] = "snow3g-uia2", 184 [RTE_CRYPTO_AUTH_ZUC_EIA3] = "zuc-eia3", 185 [RTE_CRYPTO_AUTH_SM3] = "sm3" 186 }; 187 188 /** 189 * The crypto auth algorithm strings identifiers. 190 * Not to be used in application directly. 191 * Application can use rte_cryptodev_get_auth_algo_string(). 192 */ 193 static const char * 194 crypto_auth_algorithm_strings[] = { 195 [RTE_CRYPTO_AUTH_AES_CBC_MAC] = "aes-cbc-mac", 196 [RTE_CRYPTO_AUTH_AES_CMAC] = "aes-cmac", 197 [RTE_CRYPTO_AUTH_AES_GMAC] = "aes-gmac", 198 [RTE_CRYPTO_AUTH_AES_XCBC_MAC] = "aes-xcbc-mac", 199 200 [RTE_CRYPTO_AUTH_MD5] = "md5", 201 [RTE_CRYPTO_AUTH_MD5_HMAC] = "md5-hmac", 202 203 [RTE_CRYPTO_AUTH_NULL] = "null", 204 205 [RTE_CRYPTO_AUTH_SHA1] = "sha1", 206 [RTE_CRYPTO_AUTH_SHA1_HMAC] = "sha1-hmac", 207 208 [RTE_CRYPTO_AUTH_SHA224] = "sha2-224", 209 [RTE_CRYPTO_AUTH_SHA224_HMAC] = "sha2-224-hmac", 210 [RTE_CRYPTO_AUTH_SHA256] = "sha2-256", 211 [RTE_CRYPTO_AUTH_SHA256_HMAC] = "sha2-256-hmac", 212 [RTE_CRYPTO_AUTH_SHA384] = "sha2-384", 213 [RTE_CRYPTO_AUTH_SHA384_HMAC] = "sha2-384-hmac", 214 [RTE_CRYPTO_AUTH_SHA512] = "sha2-512", 215 [RTE_CRYPTO_AUTH_SHA512_HMAC] = "sha2-512-hmac", 216 217 [RTE_CRYPTO_AUTH_SHA3_224] = "sha3-224", 218 [RTE_CRYPTO_AUTH_SHA3_224_HMAC] = "sha3-224-hmac", 219 [RTE_CRYPTO_AUTH_SHA3_256] = "sha3-256", 220 [RTE_CRYPTO_AUTH_SHA3_256_HMAC] = "sha3-256-hmac", 221 [RTE_CRYPTO_AUTH_SHA3_384] = "sha3-384", 222 [RTE_CRYPTO_AUTH_SHA3_384_HMAC] = "sha3-384-hmac", 223 [RTE_CRYPTO_AUTH_SHA3_512] = "sha3-512", 224 [RTE_CRYPTO_AUTH_SHA3_512_HMAC] = "sha3-512-hmac", 225 226 [RTE_CRYPTO_AUTH_KASUMI_F9] = "kasumi-f9", 227 [RTE_CRYPTO_AUTH_SNOW3G_UIA2] = "snow3g-uia2", 228 [RTE_CRYPTO_AUTH_ZUC_EIA3] = "zuc-eia3", 229 [RTE_CRYPTO_AUTH_SM3] = "sm3" 230 }; 231 232 /** 233 * @deprecated 234 * The crypto AEAD algorithm strings identifiers. 235 * It could be used in application command line. 236 */ 237 __rte_deprecated 238 const char * 239 rte_crypto_aead_algorithm_strings[] = { 240 [RTE_CRYPTO_AEAD_AES_CCM] = "aes-ccm", 241 [RTE_CRYPTO_AEAD_AES_GCM] = "aes-gcm", 242 [RTE_CRYPTO_AEAD_CHACHA20_POLY1305] = "chacha20-poly1305" 243 }; 244 245 /** 246 * The crypto AEAD algorithm strings identifiers. 247 * Not to be used in application directly. 248 * Application can use rte_cryptodev_get_aead_algo_string(). 249 */ 250 static const char * 251 crypto_aead_algorithm_strings[] = { 252 [RTE_CRYPTO_AEAD_AES_CCM] = "aes-ccm", 253 [RTE_CRYPTO_AEAD_AES_GCM] = "aes-gcm", 254 [RTE_CRYPTO_AEAD_CHACHA20_POLY1305] = "chacha20-poly1305" 255 }; 256 257 258 /** 259 * The crypto AEAD operation strings identifiers. 260 * It could be used in application command line. 261 */ 262 const char * 263 rte_crypto_aead_operation_strings[] = { 264 [RTE_CRYPTO_AEAD_OP_ENCRYPT] = "encrypt", 265 [RTE_CRYPTO_AEAD_OP_DECRYPT] = "decrypt" 266 }; 267 268 /** 269 * @deprecated 270 * Asymmetric crypto transform operation strings identifiers. 271 */ 272 __rte_deprecated 273 const char *rte_crypto_asym_xform_strings[] = { 274 [RTE_CRYPTO_ASYM_XFORM_NONE] = "none", 275 [RTE_CRYPTO_ASYM_XFORM_RSA] = "rsa", 276 [RTE_CRYPTO_ASYM_XFORM_MODEX] = "modexp", 277 [RTE_CRYPTO_ASYM_XFORM_MODINV] = "modinv", 278 [RTE_CRYPTO_ASYM_XFORM_DH] = "dh", 279 [RTE_CRYPTO_ASYM_XFORM_DSA] = "dsa", 280 [RTE_CRYPTO_ASYM_XFORM_ECDSA] = "ecdsa", 281 [RTE_CRYPTO_ASYM_XFORM_ECPM] = "ecpm", 282 }; 283 284 /** 285 * Asymmetric crypto transform operation strings identifiers. 286 * Not to be used in application directly. 287 * Application can use rte_cryptodev_asym_get_xform_string(). 288 */ 289 static const char * 290 crypto_asym_xform_strings[] = { 291 [RTE_CRYPTO_ASYM_XFORM_NONE] = "none", 292 [RTE_CRYPTO_ASYM_XFORM_RSA] = "rsa", 293 [RTE_CRYPTO_ASYM_XFORM_MODEX] = "modexp", 294 [RTE_CRYPTO_ASYM_XFORM_MODINV] = "modinv", 295 [RTE_CRYPTO_ASYM_XFORM_DH] = "dh", 296 [RTE_CRYPTO_ASYM_XFORM_DSA] = "dsa", 297 [RTE_CRYPTO_ASYM_XFORM_ECDSA] = "ecdsa", 298 [RTE_CRYPTO_ASYM_XFORM_ECPM] = "ecpm", 299 }; 300 301 /** 302 * Asymmetric crypto operation strings identifiers. 303 */ 304 const char *rte_crypto_asym_op_strings[] = { 305 [RTE_CRYPTO_ASYM_OP_ENCRYPT] = "encrypt", 306 [RTE_CRYPTO_ASYM_OP_DECRYPT] = "decrypt", 307 [RTE_CRYPTO_ASYM_OP_SIGN] = "sign", 308 [RTE_CRYPTO_ASYM_OP_VERIFY] = "verify" 309 }; 310 311 /** 312 * Asymmetric crypto key exchange operation strings identifiers. 313 */ 314 const char *rte_crypto_asym_ke_strings[] = { 315 [RTE_CRYPTO_ASYM_KE_PRIV_KEY_GENERATE] = "priv_key_generate", 316 [RTE_CRYPTO_ASYM_KE_PUB_KEY_GENERATE] = "pub_key_generate", 317 [RTE_CRYPTO_ASYM_KE_SHARED_SECRET_COMPUTE] = "sharedsecret_compute", 318 [RTE_CRYPTO_ASYM_KE_PUB_KEY_VERIFY] = "pub_ec_key_verify" 319 }; 320 321 struct rte_cryptodev_sym_session_pool_private_data { 322 uint16_t sess_data_sz; 323 /**< driver session data size */ 324 uint16_t user_data_sz; 325 /**< session user data will be placed after sess_data */ 326 }; 327 328 /** 329 * The private data structure stored in the asym session mempool private data. 330 */ 331 struct rte_cryptodev_asym_session_pool_private_data { 332 uint16_t max_priv_session_sz; 333 /**< Size of private session data used when creating mempool */ 334 uint16_t user_data_sz; 335 /**< Session user data will be placed after sess_private_data */ 336 }; 337 338 int 339 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum, 340 const char *algo_string) 341 { 342 unsigned int i; 343 int ret = -1; /* Invalid string */ 344 345 for (i = 1; i < RTE_DIM(crypto_cipher_algorithm_strings); i++) { 346 if (strcmp(algo_string, crypto_cipher_algorithm_strings[i]) == 0) { 347 *algo_enum = (enum rte_crypto_cipher_algorithm) i; 348 ret = 0; 349 break; 350 } 351 } 352 353 rte_cryptodev_trace_get_cipher_algo_enum(algo_string, *algo_enum, ret); 354 355 return ret; 356 } 357 358 int 359 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum, 360 const char *algo_string) 361 { 362 unsigned int i; 363 int ret = -1; /* Invalid string */ 364 365 for (i = 1; i < RTE_DIM(crypto_auth_algorithm_strings); i++) { 366 if (strcmp(algo_string, crypto_auth_algorithm_strings[i]) == 0) { 367 *algo_enum = (enum rte_crypto_auth_algorithm) i; 368 ret = 0; 369 break; 370 } 371 } 372 373 rte_cryptodev_trace_get_auth_algo_enum(algo_string, *algo_enum, ret); 374 375 return ret; 376 } 377 378 int 379 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum, 380 const char *algo_string) 381 { 382 unsigned int i; 383 int ret = -1; /* Invalid string */ 384 385 for (i = 1; i < RTE_DIM(crypto_aead_algorithm_strings); i++) { 386 if (strcmp(algo_string, crypto_aead_algorithm_strings[i]) == 0) { 387 *algo_enum = (enum rte_crypto_aead_algorithm) i; 388 ret = 0; 389 break; 390 } 391 } 392 393 rte_cryptodev_trace_get_aead_algo_enum(algo_string, *algo_enum, ret); 394 395 return ret; 396 } 397 398 int 399 rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum, 400 const char *xform_string) 401 { 402 unsigned int i; 403 int ret = -1; /* Invalid string */ 404 405 for (i = 1; i < RTE_DIM(crypto_asym_xform_strings); i++) { 406 if (strcmp(xform_string, 407 crypto_asym_xform_strings[i]) == 0) { 408 *xform_enum = (enum rte_crypto_asym_xform_type) i; 409 ret = 0; 410 break; 411 } 412 } 413 414 rte_cryptodev_trace_asym_get_xform_enum(xform_string, *xform_enum, ret); 415 416 return ret; 417 } 418 419 const char * 420 rte_cryptodev_get_cipher_algo_string(enum rte_crypto_cipher_algorithm algo_enum) 421 { 422 const char *alg_str = NULL; 423 424 if ((unsigned int)algo_enum < RTE_DIM(crypto_cipher_algorithm_strings)) 425 alg_str = crypto_cipher_algorithm_strings[algo_enum]; 426 427 rte_cryptodev_trace_get_cipher_algo_string(algo_enum, alg_str); 428 429 return alg_str; 430 } 431 432 const char * 433 rte_cryptodev_get_auth_algo_string(enum rte_crypto_auth_algorithm algo_enum) 434 { 435 const char *alg_str = NULL; 436 437 if ((unsigned int)algo_enum < RTE_DIM(crypto_auth_algorithm_strings)) 438 alg_str = crypto_auth_algorithm_strings[algo_enum]; 439 440 rte_cryptodev_trace_get_auth_algo_string(algo_enum, alg_str); 441 442 return alg_str; 443 } 444 445 const char * 446 rte_cryptodev_get_aead_algo_string(enum rte_crypto_aead_algorithm algo_enum) 447 { 448 const char *alg_str = NULL; 449 450 if ((unsigned int)algo_enum < RTE_DIM(crypto_aead_algorithm_strings)) 451 alg_str = crypto_aead_algorithm_strings[algo_enum]; 452 453 rte_cryptodev_trace_get_aead_algo_string(algo_enum, alg_str); 454 455 return alg_str; 456 } 457 458 const char * 459 rte_cryptodev_asym_get_xform_string(enum rte_crypto_asym_xform_type xform_enum) 460 { 461 const char *xform_str = NULL; 462 463 if ((unsigned int)xform_enum < RTE_DIM(crypto_asym_xform_strings)) 464 xform_str = crypto_asym_xform_strings[xform_enum]; 465 466 rte_cryptodev_trace_asym_get_xform_string(xform_enum, xform_str); 467 468 return xform_str; 469 } 470 471 /** 472 * The crypto auth operation strings identifiers. 473 * It could be used in application command line. 474 */ 475 const char * 476 rte_crypto_auth_operation_strings[] = { 477 [RTE_CRYPTO_AUTH_OP_VERIFY] = "verify", 478 [RTE_CRYPTO_AUTH_OP_GENERATE] = "generate" 479 }; 480 481 const struct rte_cryptodev_symmetric_capability * 482 rte_cryptodev_sym_capability_get(uint8_t dev_id, 483 const struct rte_cryptodev_sym_capability_idx *idx) 484 { 485 const struct rte_cryptodev_capabilities *capability; 486 const struct rte_cryptodev_symmetric_capability *sym_capability = NULL; 487 struct rte_cryptodev_info dev_info; 488 int i = 0; 489 490 rte_cryptodev_info_get(dev_id, &dev_info); 491 492 while ((capability = &dev_info.capabilities[i++])->op != 493 RTE_CRYPTO_OP_TYPE_UNDEFINED) { 494 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC) 495 continue; 496 497 if (capability->sym.xform_type != idx->type) 498 continue; 499 500 if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH && 501 capability->sym.auth.algo == idx->algo.auth) { 502 sym_capability = &capability->sym; 503 break; 504 } 505 506 if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER && 507 capability->sym.cipher.algo == idx->algo.cipher) { 508 sym_capability = &capability->sym; 509 break; 510 } 511 512 if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD && 513 capability->sym.aead.algo == idx->algo.aead) { 514 sym_capability = &capability->sym; 515 break; 516 } 517 } 518 519 rte_cryptodev_trace_sym_capability_get(dev_id, dev_info.driver_name, 520 dev_info.driver_id, idx->type, sym_capability); 521 522 return sym_capability; 523 } 524 525 static int 526 param_range_check(uint16_t size, const struct rte_crypto_param_range *range) 527 { 528 unsigned int next_size; 529 530 /* Check lower/upper bounds */ 531 if (size < range->min) 532 return -1; 533 534 if (size > range->max) 535 return -1; 536 537 /* If range is actually only one value, size is correct */ 538 if (range->increment == 0) 539 return 0; 540 541 /* Check if value is one of the supported sizes */ 542 for (next_size = range->min; next_size <= range->max; 543 next_size += range->increment) 544 if (size == next_size) 545 return 0; 546 547 return -1; 548 } 549 550 const struct rte_cryptodev_asymmetric_xform_capability * 551 rte_cryptodev_asym_capability_get(uint8_t dev_id, 552 const struct rte_cryptodev_asym_capability_idx *idx) 553 { 554 const struct rte_cryptodev_capabilities *capability; 555 const struct rte_cryptodev_asymmetric_xform_capability *asym_cap = NULL; 556 struct rte_cryptodev_info dev_info; 557 unsigned int i = 0; 558 559 memset(&dev_info, 0, sizeof(struct rte_cryptodev_info)); 560 rte_cryptodev_info_get(dev_id, &dev_info); 561 562 while ((capability = &dev_info.capabilities[i++])->op != 563 RTE_CRYPTO_OP_TYPE_UNDEFINED) { 564 if (capability->op != RTE_CRYPTO_OP_TYPE_ASYMMETRIC) 565 continue; 566 567 if (capability->asym.xform_capa.xform_type == idx->type) { 568 asym_cap = &capability->asym.xform_capa; 569 break; 570 } 571 } 572 573 rte_cryptodev_trace_asym_capability_get(dev_info.driver_name, 574 dev_info.driver_id, idx->type, asym_cap); 575 576 return asym_cap; 577 }; 578 579 int 580 rte_cryptodev_sym_capability_check_cipher( 581 const struct rte_cryptodev_symmetric_capability *capability, 582 uint16_t key_size, uint16_t iv_size) 583 { 584 int ret = 0; /* success */ 585 586 if (param_range_check(key_size, &capability->cipher.key_size) != 0) { 587 ret = -1; 588 goto done; 589 } 590 591 if (param_range_check(iv_size, &capability->cipher.iv_size) != 0) 592 ret = -1; 593 594 done: 595 rte_cryptodev_trace_sym_capability_check_cipher(capability, key_size, 596 iv_size, ret); 597 598 return ret; 599 } 600 601 int 602 rte_cryptodev_sym_capability_check_auth( 603 const struct rte_cryptodev_symmetric_capability *capability, 604 uint16_t key_size, uint16_t digest_size, uint16_t iv_size) 605 { 606 int ret = 0; /* success */ 607 608 if (param_range_check(key_size, &capability->auth.key_size) != 0) { 609 ret = -1; 610 goto done; 611 } 612 613 if (param_range_check(digest_size, 614 &capability->auth.digest_size) != 0) { 615 ret = -1; 616 goto done; 617 } 618 619 if (param_range_check(iv_size, &capability->auth.iv_size) != 0) 620 ret = -1; 621 622 done: 623 rte_cryptodev_trace_sym_capability_check_auth(capability, key_size, 624 digest_size, iv_size, ret); 625 626 return ret; 627 } 628 629 int 630 rte_cryptodev_sym_capability_check_aead( 631 const struct rte_cryptodev_symmetric_capability *capability, 632 uint16_t key_size, uint16_t digest_size, uint16_t aad_size, 633 uint16_t iv_size) 634 { 635 int ret = 0; /* success */ 636 637 if (param_range_check(key_size, &capability->aead.key_size) != 0) { 638 ret = -1; 639 goto done; 640 } 641 642 if (param_range_check(digest_size, 643 &capability->aead.digest_size) != 0) { 644 ret = -1; 645 goto done; 646 } 647 648 if (param_range_check(aad_size, &capability->aead.aad_size) != 0) { 649 ret = -1; 650 goto done; 651 } 652 653 if (param_range_check(iv_size, &capability->aead.iv_size) != 0) 654 ret = -1; 655 656 done: 657 rte_cryptodev_trace_sym_capability_check_aead(capability, key_size, 658 digest_size, aad_size, iv_size, ret); 659 660 return ret; 661 } 662 663 int 664 rte_cryptodev_asym_xform_capability_check_optype( 665 const struct rte_cryptodev_asymmetric_xform_capability *capability, 666 enum rte_crypto_asym_op_type op_type) 667 { 668 int ret = 0; 669 670 if (capability->op_types & (1 << op_type)) 671 ret = 1; 672 673 rte_cryptodev_trace_asym_xform_capability_check_optype( 674 capability->op_types, op_type, ret); 675 676 return ret; 677 } 678 679 int 680 rte_cryptodev_asym_xform_capability_check_modlen( 681 const struct rte_cryptodev_asymmetric_xform_capability *capability, 682 uint16_t modlen) 683 { 684 int ret = 0; /* success */ 685 686 /* no need to check for limits, if min or max = 0 */ 687 if (capability->modlen.min != 0) { 688 if (modlen < capability->modlen.min) { 689 ret = -1; 690 goto done; 691 } 692 } 693 694 if (capability->modlen.max != 0) { 695 if (modlen > capability->modlen.max) { 696 ret = -1; 697 goto done; 698 } 699 } 700 701 /* in any case, check if given modlen is module increment */ 702 if (capability->modlen.increment != 0) { 703 if (modlen % (capability->modlen.increment)) 704 ret = -1; 705 } 706 707 done: 708 rte_cryptodev_trace_asym_xform_capability_check_modlen(capability, 709 modlen, ret); 710 711 return ret; 712 } 713 714 /* spinlock for crypto device enq callbacks */ 715 static rte_spinlock_t rte_cryptodev_callback_lock = RTE_SPINLOCK_INITIALIZER; 716 717 static void 718 cryptodev_cb_cleanup(struct rte_cryptodev *dev) 719 { 720 struct rte_cryptodev_cb_rcu *list; 721 struct rte_cryptodev_cb *cb, *next; 722 uint16_t qp_id; 723 724 if (dev->enq_cbs == NULL && dev->deq_cbs == NULL) 725 return; 726 727 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { 728 list = &dev->enq_cbs[qp_id]; 729 cb = list->next; 730 while (cb != NULL) { 731 next = cb->next; 732 rte_free(cb); 733 cb = next; 734 } 735 736 rte_free(list->qsbr); 737 } 738 739 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { 740 list = &dev->deq_cbs[qp_id]; 741 cb = list->next; 742 while (cb != NULL) { 743 next = cb->next; 744 rte_free(cb); 745 cb = next; 746 } 747 748 rte_free(list->qsbr); 749 } 750 751 rte_free(dev->enq_cbs); 752 dev->enq_cbs = NULL; 753 rte_free(dev->deq_cbs); 754 dev->deq_cbs = NULL; 755 } 756 757 static int 758 cryptodev_cb_init(struct rte_cryptodev *dev) 759 { 760 struct rte_cryptodev_cb_rcu *list; 761 struct rte_rcu_qsbr *qsbr; 762 uint16_t qp_id; 763 size_t size; 764 765 /* Max thread set to 1, as one DP thread accessing a queue-pair */ 766 const uint32_t max_threads = 1; 767 768 dev->enq_cbs = rte_zmalloc(NULL, 769 sizeof(struct rte_cryptodev_cb_rcu) * 770 dev->data->nb_queue_pairs, 0); 771 if (dev->enq_cbs == NULL) { 772 CDEV_LOG_ERR("Failed to allocate memory for enq callbacks"); 773 return -ENOMEM; 774 } 775 776 dev->deq_cbs = rte_zmalloc(NULL, 777 sizeof(struct rte_cryptodev_cb_rcu) * 778 dev->data->nb_queue_pairs, 0); 779 if (dev->deq_cbs == NULL) { 780 CDEV_LOG_ERR("Failed to allocate memory for deq callbacks"); 781 rte_free(dev->enq_cbs); 782 return -ENOMEM; 783 } 784 785 /* Create RCU QSBR variable */ 786 size = rte_rcu_qsbr_get_memsize(max_threads); 787 788 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { 789 list = &dev->enq_cbs[qp_id]; 790 qsbr = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE); 791 if (qsbr == NULL) { 792 CDEV_LOG_ERR("Failed to allocate memory for RCU on " 793 "queue_pair_id=%d", qp_id); 794 goto cb_init_err; 795 } 796 797 if (rte_rcu_qsbr_init(qsbr, max_threads)) { 798 CDEV_LOG_ERR("Failed to initialize for RCU on " 799 "queue_pair_id=%d", qp_id); 800 goto cb_init_err; 801 } 802 803 list->qsbr = qsbr; 804 } 805 806 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { 807 list = &dev->deq_cbs[qp_id]; 808 qsbr = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE); 809 if (qsbr == NULL) { 810 CDEV_LOG_ERR("Failed to allocate memory for RCU on " 811 "queue_pair_id=%d", qp_id); 812 goto cb_init_err; 813 } 814 815 if (rte_rcu_qsbr_init(qsbr, max_threads)) { 816 CDEV_LOG_ERR("Failed to initialize for RCU on " 817 "queue_pair_id=%d", qp_id); 818 goto cb_init_err; 819 } 820 821 list->qsbr = qsbr; 822 } 823 824 return 0; 825 826 cb_init_err: 827 cryptodev_cb_cleanup(dev); 828 return -ENOMEM; 829 } 830 831 const char * 832 rte_cryptodev_get_feature_name(uint64_t flag) 833 { 834 rte_cryptodev_trace_get_feature_name(flag); 835 836 switch (flag) { 837 case RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO: 838 return "SYMMETRIC_CRYPTO"; 839 case RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO: 840 return "ASYMMETRIC_CRYPTO"; 841 case RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING: 842 return "SYM_OPERATION_CHAINING"; 843 case RTE_CRYPTODEV_FF_CPU_SSE: 844 return "CPU_SSE"; 845 case RTE_CRYPTODEV_FF_CPU_AVX: 846 return "CPU_AVX"; 847 case RTE_CRYPTODEV_FF_CPU_AVX2: 848 return "CPU_AVX2"; 849 case RTE_CRYPTODEV_FF_CPU_AVX512: 850 return "CPU_AVX512"; 851 case RTE_CRYPTODEV_FF_CPU_AESNI: 852 return "CPU_AESNI"; 853 case RTE_CRYPTODEV_FF_HW_ACCELERATED: 854 return "HW_ACCELERATED"; 855 case RTE_CRYPTODEV_FF_IN_PLACE_SGL: 856 return "IN_PLACE_SGL"; 857 case RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT: 858 return "OOP_SGL_IN_SGL_OUT"; 859 case RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT: 860 return "OOP_SGL_IN_LB_OUT"; 861 case RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT: 862 return "OOP_LB_IN_SGL_OUT"; 863 case RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT: 864 return "OOP_LB_IN_LB_OUT"; 865 case RTE_CRYPTODEV_FF_CPU_NEON: 866 return "CPU_NEON"; 867 case RTE_CRYPTODEV_FF_CPU_ARM_CE: 868 return "CPU_ARM_CE"; 869 case RTE_CRYPTODEV_FF_SECURITY: 870 return "SECURITY_PROTOCOL"; 871 case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP: 872 return "RSA_PRIV_OP_KEY_EXP"; 873 case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT: 874 return "RSA_PRIV_OP_KEY_QT"; 875 case RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED: 876 return "DIGEST_ENCRYPTED"; 877 case RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO: 878 return "SYM_CPU_CRYPTO"; 879 case RTE_CRYPTODEV_FF_ASYM_SESSIONLESS: 880 return "ASYM_SESSIONLESS"; 881 case RTE_CRYPTODEV_FF_SYM_SESSIONLESS: 882 return "SYM_SESSIONLESS"; 883 case RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA: 884 return "NON_BYTE_ALIGNED_DATA"; 885 case RTE_CRYPTODEV_FF_CIPHER_MULTIPLE_DATA_UNITS: 886 return "CIPHER_MULTIPLE_DATA_UNITS"; 887 case RTE_CRYPTODEV_FF_CIPHER_WRAPPED_KEY: 888 return "CIPHER_WRAPPED_KEY"; 889 default: 890 return NULL; 891 } 892 } 893 894 struct rte_cryptodev * 895 rte_cryptodev_pmd_get_dev(uint8_t dev_id) 896 { 897 return &cryptodev_globals.devs[dev_id]; 898 } 899 900 struct rte_cryptodev * 901 rte_cryptodev_pmd_get_named_dev(const char *name) 902 { 903 struct rte_cryptodev *dev; 904 unsigned int i; 905 906 if (name == NULL) 907 return NULL; 908 909 for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) { 910 dev = &cryptodev_globals.devs[i]; 911 912 if ((dev->attached == RTE_CRYPTODEV_ATTACHED) && 913 (strcmp(dev->data->name, name) == 0)) 914 return dev; 915 } 916 917 return NULL; 918 } 919 920 static inline uint8_t 921 rte_cryptodev_is_valid_device_data(uint8_t dev_id) 922 { 923 if (dev_id >= RTE_CRYPTO_MAX_DEVS || 924 rte_crypto_devices[dev_id].data == NULL) 925 return 0; 926 927 return 1; 928 } 929 930 unsigned int 931 rte_cryptodev_is_valid_dev(uint8_t dev_id) 932 { 933 struct rte_cryptodev *dev = NULL; 934 unsigned int ret = 1; 935 936 if (!rte_cryptodev_is_valid_device_data(dev_id)) { 937 ret = 0; 938 goto done; 939 } 940 941 dev = rte_cryptodev_pmd_get_dev(dev_id); 942 if (dev->attached != RTE_CRYPTODEV_ATTACHED) 943 ret = 0; 944 945 done: 946 rte_cryptodev_trace_is_valid_dev(dev_id, ret); 947 948 return ret; 949 } 950 951 int 952 rte_cryptodev_get_dev_id(const char *name) 953 { 954 unsigned i; 955 int ret = -1; 956 957 if (name == NULL) 958 return -1; 959 960 for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) { 961 if (!rte_cryptodev_is_valid_device_data(i)) 962 continue; 963 if ((strcmp(cryptodev_globals.devs[i].data->name, name) 964 == 0) && 965 (cryptodev_globals.devs[i].attached == 966 RTE_CRYPTODEV_ATTACHED)) { 967 ret = (int)i; 968 break; 969 } 970 } 971 972 rte_cryptodev_trace_get_dev_id(name, ret); 973 974 return ret; 975 } 976 977 uint8_t 978 rte_cryptodev_count(void) 979 { 980 rte_cryptodev_trace_count(cryptodev_globals.nb_devs); 981 982 return cryptodev_globals.nb_devs; 983 } 984 985 uint8_t 986 rte_cryptodev_device_count_by_driver(uint8_t driver_id) 987 { 988 uint8_t i, dev_count = 0; 989 990 for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) 991 if (cryptodev_globals.devs[i].driver_id == driver_id && 992 cryptodev_globals.devs[i].attached == 993 RTE_CRYPTODEV_ATTACHED) 994 dev_count++; 995 996 rte_cryptodev_trace_device_count_by_driver(driver_id, dev_count); 997 998 return dev_count; 999 } 1000 1001 uint8_t 1002 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices, 1003 uint8_t nb_devices) 1004 { 1005 uint8_t i, count = 0; 1006 struct rte_cryptodev *devs = cryptodev_globals.devs; 1007 1008 for (i = 0; i < RTE_CRYPTO_MAX_DEVS && count < nb_devices; i++) { 1009 if (!rte_cryptodev_is_valid_device_data(i)) 1010 continue; 1011 1012 if (devs[i].attached == RTE_CRYPTODEV_ATTACHED) { 1013 int cmp; 1014 1015 cmp = strncmp(devs[i].device->driver->name, 1016 driver_name, 1017 strlen(driver_name) + 1); 1018 1019 if (cmp == 0) 1020 devices[count++] = devs[i].data->dev_id; 1021 } 1022 } 1023 1024 rte_cryptodev_trace_devices_get(driver_name, count); 1025 1026 return count; 1027 } 1028 1029 void * 1030 rte_cryptodev_get_sec_ctx(uint8_t dev_id) 1031 { 1032 void *sec_ctx = NULL; 1033 1034 if (dev_id < RTE_CRYPTO_MAX_DEVS && 1035 (rte_crypto_devices[dev_id].feature_flags & 1036 RTE_CRYPTODEV_FF_SECURITY)) 1037 sec_ctx = rte_crypto_devices[dev_id].security_ctx; 1038 1039 rte_cryptodev_trace_get_sec_ctx(dev_id, sec_ctx); 1040 1041 return sec_ctx; 1042 } 1043 1044 int 1045 rte_cryptodev_socket_id(uint8_t dev_id) 1046 { 1047 struct rte_cryptodev *dev; 1048 1049 if (!rte_cryptodev_is_valid_dev(dev_id)) 1050 return -1; 1051 1052 dev = rte_cryptodev_pmd_get_dev(dev_id); 1053 1054 rte_cryptodev_trace_socket_id(dev_id, dev->data->name, 1055 dev->data->socket_id); 1056 return dev->data->socket_id; 1057 } 1058 1059 static inline int 1060 rte_cryptodev_data_alloc(uint8_t dev_id, struct rte_cryptodev_data **data, 1061 int socket_id) 1062 { 1063 char mz_name[RTE_MEMZONE_NAMESIZE]; 1064 const struct rte_memzone *mz; 1065 int n; 1066 1067 /* generate memzone name */ 1068 n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id); 1069 if (n >= (int)sizeof(mz_name)) 1070 return -EINVAL; 1071 1072 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 1073 mz = rte_memzone_reserve(mz_name, 1074 sizeof(struct rte_cryptodev_data), 1075 socket_id, 0); 1076 CDEV_LOG_DEBUG("PRIMARY:reserved memzone for %s (%p)", 1077 mz_name, mz); 1078 } else { 1079 mz = rte_memzone_lookup(mz_name); 1080 CDEV_LOG_DEBUG("SECONDARY:looked up memzone for %s (%p)", 1081 mz_name, mz); 1082 } 1083 1084 if (mz == NULL) 1085 return -ENOMEM; 1086 1087 *data = mz->addr; 1088 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 1089 memset(*data, 0, sizeof(struct rte_cryptodev_data)); 1090 1091 return 0; 1092 } 1093 1094 static inline int 1095 rte_cryptodev_data_free(uint8_t dev_id, struct rte_cryptodev_data **data) 1096 { 1097 char mz_name[RTE_MEMZONE_NAMESIZE]; 1098 const struct rte_memzone *mz; 1099 int n; 1100 1101 /* generate memzone name */ 1102 n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id); 1103 if (n >= (int)sizeof(mz_name)) 1104 return -EINVAL; 1105 1106 mz = rte_memzone_lookup(mz_name); 1107 if (mz == NULL) 1108 return -ENOMEM; 1109 1110 RTE_ASSERT(*data == mz->addr); 1111 *data = NULL; 1112 1113 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 1114 CDEV_LOG_DEBUG("PRIMARY:free memzone of %s (%p)", 1115 mz_name, mz); 1116 return rte_memzone_free(mz); 1117 } else { 1118 CDEV_LOG_DEBUG("SECONDARY:don't free memzone of %s (%p)", 1119 mz_name, mz); 1120 } 1121 1122 return 0; 1123 } 1124 1125 static uint8_t 1126 rte_cryptodev_find_free_device_index(void) 1127 { 1128 uint8_t dev_id; 1129 1130 for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++) { 1131 if (rte_crypto_devices[dev_id].attached == 1132 RTE_CRYPTODEV_DETACHED) 1133 return dev_id; 1134 } 1135 return RTE_CRYPTO_MAX_DEVS; 1136 } 1137 1138 struct rte_cryptodev * 1139 rte_cryptodev_pmd_allocate(const char *name, int socket_id) 1140 { 1141 struct rte_cryptodev *cryptodev; 1142 uint8_t dev_id; 1143 1144 if (rte_cryptodev_pmd_get_named_dev(name) != NULL) { 1145 CDEV_LOG_ERR("Crypto device with name %s already " 1146 "allocated!", name); 1147 return NULL; 1148 } 1149 1150 dev_id = rte_cryptodev_find_free_device_index(); 1151 if (dev_id == RTE_CRYPTO_MAX_DEVS) { 1152 CDEV_LOG_ERR("Reached maximum number of crypto devices"); 1153 return NULL; 1154 } 1155 1156 cryptodev = rte_cryptodev_pmd_get_dev(dev_id); 1157 1158 if (cryptodev->data == NULL) { 1159 struct rte_cryptodev_data **cryptodev_data = 1160 &cryptodev_globals.data[dev_id]; 1161 1162 int retval = rte_cryptodev_data_alloc(dev_id, cryptodev_data, 1163 socket_id); 1164 1165 if (retval < 0 || *cryptodev_data == NULL) 1166 return NULL; 1167 1168 cryptodev->data = *cryptodev_data; 1169 1170 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 1171 strlcpy(cryptodev->data->name, name, 1172 RTE_CRYPTODEV_NAME_MAX_LEN); 1173 1174 cryptodev->data->dev_id = dev_id; 1175 cryptodev->data->socket_id = socket_id; 1176 cryptodev->data->dev_started = 0; 1177 CDEV_LOG_DEBUG("PRIMARY:init data"); 1178 } 1179 1180 CDEV_LOG_DEBUG("Data for %s: dev_id %d, socket %d, started %d", 1181 cryptodev->data->name, 1182 cryptodev->data->dev_id, 1183 cryptodev->data->socket_id, 1184 cryptodev->data->dev_started); 1185 1186 /* init user callbacks */ 1187 TAILQ_INIT(&(cryptodev->link_intr_cbs)); 1188 1189 cryptodev->attached = RTE_CRYPTODEV_ATTACHED; 1190 1191 cryptodev_globals.nb_devs++; 1192 } 1193 1194 return cryptodev; 1195 } 1196 1197 int 1198 rte_cryptodev_pmd_release_device(struct rte_cryptodev *cryptodev) 1199 { 1200 int ret; 1201 uint8_t dev_id; 1202 1203 if (cryptodev == NULL) 1204 return -EINVAL; 1205 1206 dev_id = cryptodev->data->dev_id; 1207 1208 cryptodev_fp_ops_reset(rte_crypto_fp_ops + dev_id); 1209 1210 /* Close device only if device operations have been set */ 1211 if (cryptodev->dev_ops) { 1212 ret = rte_cryptodev_close(dev_id); 1213 if (ret < 0) 1214 return ret; 1215 } 1216 1217 ret = rte_cryptodev_data_free(dev_id, &cryptodev_globals.data[dev_id]); 1218 if (ret < 0) 1219 return ret; 1220 1221 cryptodev->attached = RTE_CRYPTODEV_DETACHED; 1222 cryptodev_globals.nb_devs--; 1223 return 0; 1224 } 1225 1226 uint16_t 1227 rte_cryptodev_queue_pair_count(uint8_t dev_id) 1228 { 1229 struct rte_cryptodev *dev; 1230 1231 if (!rte_cryptodev_is_valid_device_data(dev_id)) { 1232 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 1233 return 0; 1234 } 1235 1236 dev = &rte_crypto_devices[dev_id]; 1237 rte_cryptodev_trace_queue_pair_count(dev, dev->data->name, 1238 dev->data->socket_id, dev->data->dev_id, 1239 dev->data->nb_queue_pairs); 1240 1241 return dev->data->nb_queue_pairs; 1242 } 1243 1244 static int 1245 rte_cryptodev_queue_pairs_config(struct rte_cryptodev *dev, uint16_t nb_qpairs, 1246 int socket_id) 1247 { 1248 struct rte_cryptodev_info dev_info; 1249 void **qp; 1250 unsigned i; 1251 1252 if ((dev == NULL) || (nb_qpairs < 1)) { 1253 CDEV_LOG_ERR("invalid param: dev %p, nb_queues %u", 1254 dev, nb_qpairs); 1255 return -EINVAL; 1256 } 1257 1258 CDEV_LOG_DEBUG("Setup %d queues pairs on device %u", 1259 nb_qpairs, dev->data->dev_id); 1260 1261 memset(&dev_info, 0, sizeof(struct rte_cryptodev_info)); 1262 1263 if (*dev->dev_ops->dev_infos_get == NULL) 1264 return -ENOTSUP; 1265 (*dev->dev_ops->dev_infos_get)(dev, &dev_info); 1266 1267 if (nb_qpairs > (dev_info.max_nb_queue_pairs)) { 1268 CDEV_LOG_ERR("Invalid num queue_pairs (%u) for dev %u", 1269 nb_qpairs, dev->data->dev_id); 1270 return -EINVAL; 1271 } 1272 1273 if (dev->data->queue_pairs == NULL) { /* first time configuration */ 1274 dev->data->queue_pairs = rte_zmalloc_socket( 1275 "cryptodev->queue_pairs", 1276 sizeof(dev->data->queue_pairs[0]) * 1277 dev_info.max_nb_queue_pairs, 1278 RTE_CACHE_LINE_SIZE, socket_id); 1279 1280 if (dev->data->queue_pairs == NULL) { 1281 dev->data->nb_queue_pairs = 0; 1282 CDEV_LOG_ERR("failed to get memory for qp meta data, " 1283 "nb_queues %u", 1284 nb_qpairs); 1285 return -(ENOMEM); 1286 } 1287 } else { /* re-configure */ 1288 int ret; 1289 uint16_t old_nb_queues = dev->data->nb_queue_pairs; 1290 1291 qp = dev->data->queue_pairs; 1292 1293 if (*dev->dev_ops->queue_pair_release == NULL) 1294 return -ENOTSUP; 1295 1296 for (i = nb_qpairs; i < old_nb_queues; i++) { 1297 ret = (*dev->dev_ops->queue_pair_release)(dev, i); 1298 if (ret < 0) 1299 return ret; 1300 qp[i] = NULL; 1301 } 1302 1303 } 1304 dev->data->nb_queue_pairs = nb_qpairs; 1305 return 0; 1306 } 1307 1308 int 1309 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config) 1310 { 1311 struct rte_cryptodev *dev; 1312 int diag; 1313 1314 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1315 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 1316 return -EINVAL; 1317 } 1318 1319 dev = &rte_crypto_devices[dev_id]; 1320 1321 if (dev->data->dev_started) { 1322 CDEV_LOG_ERR( 1323 "device %d must be stopped to allow configuration", dev_id); 1324 return -EBUSY; 1325 } 1326 1327 if (*dev->dev_ops->dev_configure == NULL) 1328 return -ENOTSUP; 1329 1330 rte_spinlock_lock(&rte_cryptodev_callback_lock); 1331 cryptodev_cb_cleanup(dev); 1332 rte_spinlock_unlock(&rte_cryptodev_callback_lock); 1333 1334 /* Setup new number of queue pairs and reconfigure device. */ 1335 diag = rte_cryptodev_queue_pairs_config(dev, config->nb_queue_pairs, 1336 config->socket_id); 1337 if (diag != 0) { 1338 CDEV_LOG_ERR("dev%d rte_crypto_dev_queue_pairs_config = %d", 1339 dev_id, diag); 1340 return diag; 1341 } 1342 1343 rte_spinlock_lock(&rte_cryptodev_callback_lock); 1344 diag = cryptodev_cb_init(dev); 1345 rte_spinlock_unlock(&rte_cryptodev_callback_lock); 1346 if (diag) { 1347 CDEV_LOG_ERR("Callback init failed for dev_id=%d", dev_id); 1348 return diag; 1349 } 1350 1351 rte_cryptodev_trace_configure(dev_id, config); 1352 return (*dev->dev_ops->dev_configure)(dev, config); 1353 } 1354 1355 int 1356 rte_cryptodev_start(uint8_t dev_id) 1357 { 1358 struct rte_cryptodev *dev; 1359 int diag; 1360 1361 CDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id); 1362 1363 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1364 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 1365 return -EINVAL; 1366 } 1367 1368 dev = &rte_crypto_devices[dev_id]; 1369 1370 if (*dev->dev_ops->dev_start == NULL) 1371 return -ENOTSUP; 1372 1373 if (dev->data->dev_started != 0) { 1374 CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already started", 1375 dev_id); 1376 return 0; 1377 } 1378 1379 diag = (*dev->dev_ops->dev_start)(dev); 1380 /* expose selection of PMD fast-path functions */ 1381 cryptodev_fp_ops_set(rte_crypto_fp_ops + dev_id, dev); 1382 1383 rte_cryptodev_trace_start(dev_id, diag); 1384 if (diag == 0) 1385 dev->data->dev_started = 1; 1386 else 1387 return diag; 1388 1389 return 0; 1390 } 1391 1392 void 1393 rte_cryptodev_stop(uint8_t dev_id) 1394 { 1395 struct rte_cryptodev *dev; 1396 1397 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1398 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 1399 return; 1400 } 1401 1402 dev = &rte_crypto_devices[dev_id]; 1403 1404 if (*dev->dev_ops->dev_stop == NULL) 1405 return; 1406 1407 if (dev->data->dev_started == 0) { 1408 CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already stopped", 1409 dev_id); 1410 return; 1411 } 1412 1413 /* point fast-path functions to dummy ones */ 1414 cryptodev_fp_ops_reset(rte_crypto_fp_ops + dev_id); 1415 1416 (*dev->dev_ops->dev_stop)(dev); 1417 rte_cryptodev_trace_stop(dev_id); 1418 dev->data->dev_started = 0; 1419 } 1420 1421 int 1422 rte_cryptodev_close(uint8_t dev_id) 1423 { 1424 struct rte_cryptodev *dev; 1425 int retval; 1426 1427 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1428 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 1429 return -1; 1430 } 1431 1432 dev = &rte_crypto_devices[dev_id]; 1433 1434 /* Device must be stopped before it can be closed */ 1435 if (dev->data->dev_started == 1) { 1436 CDEV_LOG_ERR("Device %u must be stopped before closing", 1437 dev_id); 1438 return -EBUSY; 1439 } 1440 1441 /* We can't close the device if there are outstanding sessions in use */ 1442 if (dev->data->session_pool != NULL) { 1443 if (!rte_mempool_full(dev->data->session_pool)) { 1444 CDEV_LOG_ERR("dev_id=%u close failed, session mempool " 1445 "has sessions still in use, free " 1446 "all sessions before calling close", 1447 (unsigned)dev_id); 1448 return -EBUSY; 1449 } 1450 } 1451 1452 if (*dev->dev_ops->dev_close == NULL) 1453 return -ENOTSUP; 1454 retval = (*dev->dev_ops->dev_close)(dev); 1455 rte_cryptodev_trace_close(dev_id, retval); 1456 1457 if (retval < 0) 1458 return retval; 1459 1460 return 0; 1461 } 1462 1463 int 1464 rte_cryptodev_get_qp_status(uint8_t dev_id, uint16_t queue_pair_id) 1465 { 1466 struct rte_cryptodev *dev; 1467 int ret = 0; 1468 1469 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1470 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 1471 ret = -EINVAL; 1472 goto done; 1473 } 1474 1475 dev = &rte_crypto_devices[dev_id]; 1476 if (queue_pair_id >= dev->data->nb_queue_pairs) { 1477 CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id); 1478 ret = -EINVAL; 1479 goto done; 1480 } 1481 void **qps = dev->data->queue_pairs; 1482 1483 if (qps[queue_pair_id]) { 1484 CDEV_LOG_DEBUG("qp %d on dev %d is initialised", 1485 queue_pair_id, dev_id); 1486 ret = 1; 1487 goto done; 1488 } 1489 1490 CDEV_LOG_DEBUG("qp %d on dev %d is not initialised", 1491 queue_pair_id, dev_id); 1492 1493 done: 1494 rte_cryptodev_trace_get_qp_status(dev_id, queue_pair_id, ret); 1495 1496 return ret; 1497 } 1498 1499 static uint8_t 1500 rte_cryptodev_sym_is_valid_session_pool(struct rte_mempool *mp, 1501 uint32_t sess_priv_size) 1502 { 1503 struct rte_cryptodev_sym_session_pool_private_data *pool_priv; 1504 1505 if (!mp) 1506 return 0; 1507 1508 pool_priv = rte_mempool_get_priv(mp); 1509 1510 if (!pool_priv || mp->private_data_size < sizeof(*pool_priv) || 1511 pool_priv->sess_data_sz < sess_priv_size) 1512 return 0; 1513 1514 return 1; 1515 } 1516 1517 int 1518 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id, 1519 const struct rte_cryptodev_qp_conf *qp_conf, int socket_id) 1520 1521 { 1522 struct rte_cryptodev *dev; 1523 1524 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1525 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 1526 return -EINVAL; 1527 } 1528 1529 dev = &rte_crypto_devices[dev_id]; 1530 if (queue_pair_id >= dev->data->nb_queue_pairs) { 1531 CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id); 1532 return -EINVAL; 1533 } 1534 1535 if (!qp_conf) { 1536 CDEV_LOG_ERR("qp_conf cannot be NULL"); 1537 return -EINVAL; 1538 } 1539 1540 if (qp_conf->mp_session) { 1541 struct rte_cryptodev_sym_session_pool_private_data *pool_priv; 1542 1543 pool_priv = rte_mempool_get_priv(qp_conf->mp_session); 1544 if (!pool_priv || qp_conf->mp_session->private_data_size < 1545 sizeof(*pool_priv)) { 1546 CDEV_LOG_ERR("Invalid mempool"); 1547 return -EINVAL; 1548 } 1549 1550 if (!rte_cryptodev_sym_is_valid_session_pool(qp_conf->mp_session, 1551 rte_cryptodev_sym_get_private_session_size(dev_id))) { 1552 CDEV_LOG_ERR("Invalid mempool"); 1553 return -EINVAL; 1554 } 1555 } 1556 1557 if (dev->data->dev_started) { 1558 CDEV_LOG_ERR( 1559 "device %d must be stopped to allow configuration", dev_id); 1560 return -EBUSY; 1561 } 1562 1563 if (*dev->dev_ops->queue_pair_setup == NULL) 1564 return -ENOTSUP; 1565 1566 rte_cryptodev_trace_queue_pair_setup(dev_id, queue_pair_id, qp_conf); 1567 return (*dev->dev_ops->queue_pair_setup)(dev, queue_pair_id, qp_conf, 1568 socket_id); 1569 } 1570 1571 struct rte_cryptodev_cb * 1572 rte_cryptodev_add_enq_callback(uint8_t dev_id, 1573 uint16_t qp_id, 1574 rte_cryptodev_callback_fn cb_fn, 1575 void *cb_arg) 1576 { 1577 struct rte_cryptodev *dev; 1578 struct rte_cryptodev_cb_rcu *list; 1579 struct rte_cryptodev_cb *cb, *tail; 1580 1581 if (!cb_fn) { 1582 CDEV_LOG_ERR("Callback is NULL on dev_id=%d", dev_id); 1583 rte_errno = EINVAL; 1584 return NULL; 1585 } 1586 1587 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1588 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id); 1589 rte_errno = ENODEV; 1590 return NULL; 1591 } 1592 1593 dev = &rte_crypto_devices[dev_id]; 1594 if (qp_id >= dev->data->nb_queue_pairs) { 1595 CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id); 1596 rte_errno = ENODEV; 1597 return NULL; 1598 } 1599 1600 cb = rte_zmalloc(NULL, sizeof(*cb), 0); 1601 if (cb == NULL) { 1602 CDEV_LOG_ERR("Failed to allocate memory for callback on " 1603 "dev=%d, queue_pair_id=%d", dev_id, qp_id); 1604 rte_errno = ENOMEM; 1605 return NULL; 1606 } 1607 1608 rte_spinlock_lock(&rte_cryptodev_callback_lock); 1609 1610 cb->fn = cb_fn; 1611 cb->arg = cb_arg; 1612 1613 /* Add the callbacks in fifo order. */ 1614 list = &dev->enq_cbs[qp_id]; 1615 tail = list->next; 1616 1617 if (tail) { 1618 while (tail->next) 1619 tail = tail->next; 1620 /* Stores to cb->fn and cb->param should complete before 1621 * cb is visible to data plane. 1622 */ 1623 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE); 1624 } else { 1625 /* Stores to cb->fn and cb->param should complete before 1626 * cb is visible to data plane. 1627 */ 1628 __atomic_store_n(&list->next, cb, __ATOMIC_RELEASE); 1629 } 1630 1631 rte_spinlock_unlock(&rte_cryptodev_callback_lock); 1632 1633 rte_cryptodev_trace_add_enq_callback(dev_id, qp_id, cb_fn); 1634 return cb; 1635 } 1636 1637 int 1638 rte_cryptodev_remove_enq_callback(uint8_t dev_id, 1639 uint16_t qp_id, 1640 struct rte_cryptodev_cb *cb) 1641 { 1642 struct rte_cryptodev *dev; 1643 struct rte_cryptodev_cb **prev_cb, *curr_cb; 1644 struct rte_cryptodev_cb_rcu *list; 1645 int ret; 1646 1647 ret = -EINVAL; 1648 1649 if (!cb) { 1650 CDEV_LOG_ERR("Callback is NULL"); 1651 return -EINVAL; 1652 } 1653 1654 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1655 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id); 1656 return -ENODEV; 1657 } 1658 1659 rte_cryptodev_trace_remove_enq_callback(dev_id, qp_id, cb->fn); 1660 1661 dev = &rte_crypto_devices[dev_id]; 1662 if (qp_id >= dev->data->nb_queue_pairs) { 1663 CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id); 1664 return -ENODEV; 1665 } 1666 1667 rte_spinlock_lock(&rte_cryptodev_callback_lock); 1668 if (dev->enq_cbs == NULL) { 1669 CDEV_LOG_ERR("Callback not initialized"); 1670 goto cb_err; 1671 } 1672 1673 list = &dev->enq_cbs[qp_id]; 1674 if (list == NULL) { 1675 CDEV_LOG_ERR("Callback list is NULL"); 1676 goto cb_err; 1677 } 1678 1679 if (list->qsbr == NULL) { 1680 CDEV_LOG_ERR("Rcu qsbr is NULL"); 1681 goto cb_err; 1682 } 1683 1684 prev_cb = &list->next; 1685 for (; *prev_cb != NULL; prev_cb = &curr_cb->next) { 1686 curr_cb = *prev_cb; 1687 if (curr_cb == cb) { 1688 /* Remove the user cb from the callback list. */ 1689 __atomic_store_n(prev_cb, curr_cb->next, 1690 __ATOMIC_RELAXED); 1691 ret = 0; 1692 break; 1693 } 1694 } 1695 1696 if (!ret) { 1697 /* Call sync with invalid thread id as this is part of 1698 * control plane API 1699 */ 1700 rte_rcu_qsbr_synchronize(list->qsbr, RTE_QSBR_THRID_INVALID); 1701 rte_free(cb); 1702 } 1703 1704 cb_err: 1705 rte_spinlock_unlock(&rte_cryptodev_callback_lock); 1706 return ret; 1707 } 1708 1709 struct rte_cryptodev_cb * 1710 rte_cryptodev_add_deq_callback(uint8_t dev_id, 1711 uint16_t qp_id, 1712 rte_cryptodev_callback_fn cb_fn, 1713 void *cb_arg) 1714 { 1715 struct rte_cryptodev *dev; 1716 struct rte_cryptodev_cb_rcu *list; 1717 struct rte_cryptodev_cb *cb, *tail; 1718 1719 if (!cb_fn) { 1720 CDEV_LOG_ERR("Callback is NULL on dev_id=%d", dev_id); 1721 rte_errno = EINVAL; 1722 return NULL; 1723 } 1724 1725 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1726 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id); 1727 rte_errno = ENODEV; 1728 return NULL; 1729 } 1730 1731 dev = &rte_crypto_devices[dev_id]; 1732 if (qp_id >= dev->data->nb_queue_pairs) { 1733 CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id); 1734 rte_errno = ENODEV; 1735 return NULL; 1736 } 1737 1738 cb = rte_zmalloc(NULL, sizeof(*cb), 0); 1739 if (cb == NULL) { 1740 CDEV_LOG_ERR("Failed to allocate memory for callback on " 1741 "dev=%d, queue_pair_id=%d", dev_id, qp_id); 1742 rte_errno = ENOMEM; 1743 return NULL; 1744 } 1745 1746 rte_spinlock_lock(&rte_cryptodev_callback_lock); 1747 1748 cb->fn = cb_fn; 1749 cb->arg = cb_arg; 1750 1751 /* Add the callbacks in fifo order. */ 1752 list = &dev->deq_cbs[qp_id]; 1753 tail = list->next; 1754 1755 if (tail) { 1756 while (tail->next) 1757 tail = tail->next; 1758 /* Stores to cb->fn and cb->param should complete before 1759 * cb is visible to data plane. 1760 */ 1761 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE); 1762 } else { 1763 /* Stores to cb->fn and cb->param should complete before 1764 * cb is visible to data plane. 1765 */ 1766 __atomic_store_n(&list->next, cb, __ATOMIC_RELEASE); 1767 } 1768 1769 rte_spinlock_unlock(&rte_cryptodev_callback_lock); 1770 1771 rte_cryptodev_trace_add_deq_callback(dev_id, qp_id, cb_fn); 1772 1773 return cb; 1774 } 1775 1776 int 1777 rte_cryptodev_remove_deq_callback(uint8_t dev_id, 1778 uint16_t qp_id, 1779 struct rte_cryptodev_cb *cb) 1780 { 1781 struct rte_cryptodev *dev; 1782 struct rte_cryptodev_cb **prev_cb, *curr_cb; 1783 struct rte_cryptodev_cb_rcu *list; 1784 int ret; 1785 1786 ret = -EINVAL; 1787 1788 if (!cb) { 1789 CDEV_LOG_ERR("Callback is NULL"); 1790 return -EINVAL; 1791 } 1792 1793 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1794 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id); 1795 return -ENODEV; 1796 } 1797 1798 rte_cryptodev_trace_remove_deq_callback(dev_id, qp_id, cb->fn); 1799 1800 dev = &rte_crypto_devices[dev_id]; 1801 if (qp_id >= dev->data->nb_queue_pairs) { 1802 CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id); 1803 return -ENODEV; 1804 } 1805 1806 rte_spinlock_lock(&rte_cryptodev_callback_lock); 1807 if (dev->enq_cbs == NULL) { 1808 CDEV_LOG_ERR("Callback not initialized"); 1809 goto cb_err; 1810 } 1811 1812 list = &dev->deq_cbs[qp_id]; 1813 if (list == NULL) { 1814 CDEV_LOG_ERR("Callback list is NULL"); 1815 goto cb_err; 1816 } 1817 1818 if (list->qsbr == NULL) { 1819 CDEV_LOG_ERR("Rcu qsbr is NULL"); 1820 goto cb_err; 1821 } 1822 1823 prev_cb = &list->next; 1824 for (; *prev_cb != NULL; prev_cb = &curr_cb->next) { 1825 curr_cb = *prev_cb; 1826 if (curr_cb == cb) { 1827 /* Remove the user cb from the callback list. */ 1828 __atomic_store_n(prev_cb, curr_cb->next, 1829 __ATOMIC_RELAXED); 1830 ret = 0; 1831 break; 1832 } 1833 } 1834 1835 if (!ret) { 1836 /* Call sync with invalid thread id as this is part of 1837 * control plane API 1838 */ 1839 rte_rcu_qsbr_synchronize(list->qsbr, RTE_QSBR_THRID_INVALID); 1840 rte_free(cb); 1841 } 1842 1843 cb_err: 1844 rte_spinlock_unlock(&rte_cryptodev_callback_lock); 1845 return ret; 1846 } 1847 1848 int 1849 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats) 1850 { 1851 struct rte_cryptodev *dev; 1852 1853 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1854 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id); 1855 return -ENODEV; 1856 } 1857 1858 if (stats == NULL) { 1859 CDEV_LOG_ERR("Invalid stats ptr"); 1860 return -EINVAL; 1861 } 1862 1863 dev = &rte_crypto_devices[dev_id]; 1864 memset(stats, 0, sizeof(*stats)); 1865 1866 if (*dev->dev_ops->stats_get == NULL) 1867 return -ENOTSUP; 1868 (*dev->dev_ops->stats_get)(dev, stats); 1869 1870 rte_cryptodev_trace_stats_get(dev_id, stats); 1871 return 0; 1872 } 1873 1874 void 1875 rte_cryptodev_stats_reset(uint8_t dev_id) 1876 { 1877 struct rte_cryptodev *dev; 1878 1879 rte_cryptodev_trace_stats_reset(dev_id); 1880 1881 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1882 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 1883 return; 1884 } 1885 1886 dev = &rte_crypto_devices[dev_id]; 1887 1888 if (*dev->dev_ops->stats_reset == NULL) 1889 return; 1890 (*dev->dev_ops->stats_reset)(dev); 1891 } 1892 1893 void 1894 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info) 1895 { 1896 struct rte_cryptodev *dev; 1897 1898 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1899 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id); 1900 return; 1901 } 1902 1903 dev = &rte_crypto_devices[dev_id]; 1904 1905 memset(dev_info, 0, sizeof(struct rte_cryptodev_info)); 1906 1907 if (*dev->dev_ops->dev_infos_get == NULL) 1908 return; 1909 (*dev->dev_ops->dev_infos_get)(dev, dev_info); 1910 1911 dev_info->driver_name = dev->device->driver->name; 1912 dev_info->device = dev->device; 1913 1914 rte_cryptodev_trace_info_get(dev_id, dev_info->driver_name); 1915 1916 } 1917 1918 int 1919 rte_cryptodev_callback_register(uint8_t dev_id, 1920 enum rte_cryptodev_event_type event, 1921 rte_cryptodev_cb_fn cb_fn, void *cb_arg) 1922 { 1923 struct rte_cryptodev *dev; 1924 struct rte_cryptodev_callback *user_cb; 1925 1926 if (!cb_fn) 1927 return -EINVAL; 1928 1929 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1930 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 1931 return -EINVAL; 1932 } 1933 1934 dev = &rte_crypto_devices[dev_id]; 1935 rte_spinlock_lock(&rte_cryptodev_cb_lock); 1936 1937 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) { 1938 if (user_cb->cb_fn == cb_fn && 1939 user_cb->cb_arg == cb_arg && 1940 user_cb->event == event) { 1941 break; 1942 } 1943 } 1944 1945 /* create a new callback. */ 1946 if (user_cb == NULL) { 1947 user_cb = rte_zmalloc("INTR_USER_CALLBACK", 1948 sizeof(struct rte_cryptodev_callback), 0); 1949 if (user_cb != NULL) { 1950 user_cb->cb_fn = cb_fn; 1951 user_cb->cb_arg = cb_arg; 1952 user_cb->event = event; 1953 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next); 1954 } 1955 } 1956 1957 rte_spinlock_unlock(&rte_cryptodev_cb_lock); 1958 1959 rte_cryptodev_trace_callback_register(dev_id, event, cb_fn); 1960 return (user_cb == NULL) ? -ENOMEM : 0; 1961 } 1962 1963 int 1964 rte_cryptodev_callback_unregister(uint8_t dev_id, 1965 enum rte_cryptodev_event_type event, 1966 rte_cryptodev_cb_fn cb_fn, void *cb_arg) 1967 { 1968 int ret; 1969 struct rte_cryptodev *dev; 1970 struct rte_cryptodev_callback *cb, *next; 1971 1972 if (!cb_fn) 1973 return -EINVAL; 1974 1975 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1976 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 1977 return -EINVAL; 1978 } 1979 1980 dev = &rte_crypto_devices[dev_id]; 1981 rte_spinlock_lock(&rte_cryptodev_cb_lock); 1982 1983 ret = 0; 1984 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) { 1985 1986 next = TAILQ_NEXT(cb, next); 1987 1988 if (cb->cb_fn != cb_fn || cb->event != event || 1989 (cb->cb_arg != (void *)-1 && 1990 cb->cb_arg != cb_arg)) 1991 continue; 1992 1993 /* 1994 * if this callback is not executing right now, 1995 * then remove it. 1996 */ 1997 if (cb->active == 0) { 1998 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next); 1999 rte_free(cb); 2000 } else { 2001 ret = -EAGAIN; 2002 } 2003 } 2004 2005 rte_spinlock_unlock(&rte_cryptodev_cb_lock); 2006 2007 rte_cryptodev_trace_callback_unregister(dev_id, event, cb_fn); 2008 return ret; 2009 } 2010 2011 void 2012 rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev, 2013 enum rte_cryptodev_event_type event) 2014 { 2015 struct rte_cryptodev_callback *cb_lst; 2016 struct rte_cryptodev_callback dev_cb; 2017 2018 rte_spinlock_lock(&rte_cryptodev_cb_lock); 2019 TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) { 2020 if (cb_lst->cb_fn == NULL || cb_lst->event != event) 2021 continue; 2022 dev_cb = *cb_lst; 2023 cb_lst->active = 1; 2024 rte_spinlock_unlock(&rte_cryptodev_cb_lock); 2025 dev_cb.cb_fn(dev->data->dev_id, dev_cb.event, 2026 dev_cb.cb_arg); 2027 rte_spinlock_lock(&rte_cryptodev_cb_lock); 2028 cb_lst->active = 0; 2029 } 2030 rte_spinlock_unlock(&rte_cryptodev_cb_lock); 2031 } 2032 2033 struct rte_mempool * 2034 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts, 2035 uint32_t elt_size, uint32_t cache_size, uint16_t user_data_size, 2036 int socket_id) 2037 { 2038 struct rte_mempool *mp; 2039 struct rte_cryptodev_sym_session_pool_private_data *pool_priv; 2040 uint32_t obj_sz; 2041 2042 obj_sz = sizeof(struct rte_cryptodev_sym_session) + elt_size + user_data_size; 2043 2044 obj_sz = RTE_ALIGN_CEIL(obj_sz, RTE_CACHE_LINE_SIZE); 2045 mp = rte_mempool_create(name, nb_elts, obj_sz, cache_size, 2046 (uint32_t)(sizeof(*pool_priv)), NULL, NULL, 2047 NULL, NULL, 2048 socket_id, 0); 2049 if (mp == NULL) { 2050 CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d", 2051 __func__, name, rte_errno); 2052 return NULL; 2053 } 2054 2055 pool_priv = rte_mempool_get_priv(mp); 2056 if (!pool_priv) { 2057 CDEV_LOG_ERR("%s(name=%s) failed to get private data", 2058 __func__, name); 2059 rte_mempool_free(mp); 2060 return NULL; 2061 } 2062 2063 pool_priv->sess_data_sz = elt_size; 2064 pool_priv->user_data_sz = user_data_size; 2065 2066 rte_cryptodev_trace_sym_session_pool_create(name, nb_elts, 2067 elt_size, cache_size, user_data_size, mp); 2068 return mp; 2069 } 2070 2071 struct rte_mempool * 2072 rte_cryptodev_asym_session_pool_create(const char *name, uint32_t nb_elts, 2073 uint32_t cache_size, uint16_t user_data_size, int socket_id) 2074 { 2075 struct rte_mempool *mp; 2076 struct rte_cryptodev_asym_session_pool_private_data *pool_priv; 2077 uint32_t obj_sz, obj_sz_aligned; 2078 uint8_t dev_id; 2079 unsigned int priv_sz, max_priv_sz = 0; 2080 2081 for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++) 2082 if (rte_cryptodev_is_valid_dev(dev_id)) { 2083 priv_sz = rte_cryptodev_asym_get_private_session_size(dev_id); 2084 if (priv_sz > max_priv_sz) 2085 max_priv_sz = priv_sz; 2086 } 2087 if (max_priv_sz == 0) { 2088 CDEV_LOG_INFO("Could not set max private session size"); 2089 return NULL; 2090 } 2091 2092 obj_sz = rte_cryptodev_asym_get_header_session_size() + max_priv_sz + 2093 user_data_size; 2094 obj_sz_aligned = RTE_ALIGN_CEIL(obj_sz, RTE_CACHE_LINE_SIZE); 2095 2096 mp = rte_mempool_create(name, nb_elts, obj_sz_aligned, cache_size, 2097 (uint32_t)(sizeof(*pool_priv)), 2098 NULL, NULL, NULL, NULL, 2099 socket_id, 0); 2100 if (mp == NULL) { 2101 CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d", 2102 __func__, name, rte_errno); 2103 return NULL; 2104 } 2105 2106 pool_priv = rte_mempool_get_priv(mp); 2107 if (!pool_priv) { 2108 CDEV_LOG_ERR("%s(name=%s) failed to get private data", 2109 __func__, name); 2110 rte_mempool_free(mp); 2111 return NULL; 2112 } 2113 pool_priv->max_priv_session_sz = max_priv_sz; 2114 pool_priv->user_data_sz = user_data_size; 2115 2116 rte_cryptodev_trace_asym_session_pool_create(name, nb_elts, 2117 user_data_size, cache_size, mp); 2118 return mp; 2119 } 2120 2121 void * 2122 rte_cryptodev_sym_session_create(uint8_t dev_id, 2123 struct rte_crypto_sym_xform *xforms, 2124 struct rte_mempool *mp) 2125 { 2126 struct rte_cryptodev *dev; 2127 struct rte_cryptodev_sym_session *sess; 2128 struct rte_cryptodev_sym_session_pool_private_data *pool_priv; 2129 uint32_t sess_priv_sz; 2130 int ret; 2131 2132 if (!rte_cryptodev_is_valid_dev(dev_id)) { 2133 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 2134 rte_errno = EINVAL; 2135 return NULL; 2136 } 2137 2138 if (xforms == NULL) { 2139 CDEV_LOG_ERR("Invalid xform\n"); 2140 rte_errno = EINVAL; 2141 return NULL; 2142 } 2143 2144 sess_priv_sz = rte_cryptodev_sym_get_private_session_size(dev_id); 2145 if (!rte_cryptodev_sym_is_valid_session_pool(mp, sess_priv_sz)) { 2146 CDEV_LOG_ERR("Invalid mempool"); 2147 rte_errno = EINVAL; 2148 return NULL; 2149 } 2150 2151 dev = rte_cryptodev_pmd_get_dev(dev_id); 2152 2153 /* Allocate a session structure from the session pool */ 2154 if (rte_mempool_get(mp, (void **)&sess)) { 2155 CDEV_LOG_ERR("couldn't get object from session mempool"); 2156 rte_errno = ENOMEM; 2157 return NULL; 2158 } 2159 2160 pool_priv = rte_mempool_get_priv(mp); 2161 sess->driver_id = dev->driver_id; 2162 sess->sess_data_sz = pool_priv->sess_data_sz; 2163 sess->user_data_sz = pool_priv->user_data_sz; 2164 sess->driver_priv_data_iova = rte_mempool_virt2iova(sess) + 2165 offsetof(struct rte_cryptodev_sym_session, driver_priv_data); 2166 2167 if (dev->dev_ops->sym_session_configure == NULL) { 2168 rte_errno = ENOTSUP; 2169 goto error_exit; 2170 } 2171 memset(sess->driver_priv_data, 0, pool_priv->sess_data_sz + pool_priv->user_data_sz); 2172 2173 ret = dev->dev_ops->sym_session_configure(dev, xforms, sess); 2174 if (ret < 0) { 2175 rte_errno = -ret; 2176 goto error_exit; 2177 } 2178 sess->driver_id = dev->driver_id; 2179 2180 rte_cryptodev_trace_sym_session_create(dev_id, sess, xforms, mp); 2181 2182 return (void *)sess; 2183 error_exit: 2184 rte_mempool_put(mp, (void *)sess); 2185 return NULL; 2186 } 2187 2188 int 2189 rte_cryptodev_asym_session_create(uint8_t dev_id, 2190 struct rte_crypto_asym_xform *xforms, struct rte_mempool *mp, 2191 void **session) 2192 { 2193 struct rte_cryptodev_asym_session *sess; 2194 uint32_t session_priv_data_sz; 2195 struct rte_cryptodev_asym_session_pool_private_data *pool_priv; 2196 unsigned int session_header_size = 2197 rte_cryptodev_asym_get_header_session_size(); 2198 struct rte_cryptodev *dev; 2199 int ret; 2200 2201 if (!rte_cryptodev_is_valid_dev(dev_id)) { 2202 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 2203 return -EINVAL; 2204 } 2205 2206 dev = rte_cryptodev_pmd_get_dev(dev_id); 2207 2208 if (dev == NULL) 2209 return -EINVAL; 2210 2211 if (!mp) { 2212 CDEV_LOG_ERR("invalid mempool"); 2213 return -EINVAL; 2214 } 2215 2216 session_priv_data_sz = rte_cryptodev_asym_get_private_session_size( 2217 dev_id); 2218 pool_priv = rte_mempool_get_priv(mp); 2219 2220 if (pool_priv->max_priv_session_sz < session_priv_data_sz) { 2221 CDEV_LOG_DEBUG( 2222 "The private session data size used when creating the mempool is smaller than this device's private session data."); 2223 return -EINVAL; 2224 } 2225 2226 /* Verify if provided mempool can hold elements big enough. */ 2227 if (mp->elt_size < session_header_size + session_priv_data_sz) { 2228 CDEV_LOG_ERR( 2229 "mempool elements too small to hold session objects"); 2230 return -EINVAL; 2231 } 2232 2233 /* Allocate a session structure from the session pool */ 2234 if (rte_mempool_get(mp, session)) { 2235 CDEV_LOG_ERR("couldn't get object from session mempool"); 2236 return -ENOMEM; 2237 } 2238 2239 sess = *session; 2240 sess->driver_id = dev->driver_id; 2241 sess->user_data_sz = pool_priv->user_data_sz; 2242 sess->max_priv_data_sz = pool_priv->max_priv_session_sz; 2243 2244 /* Clear device session pointer.*/ 2245 memset(sess->sess_private_data, 0, session_priv_data_sz + sess->user_data_sz); 2246 2247 if (*dev->dev_ops->asym_session_configure == NULL) 2248 return -ENOTSUP; 2249 2250 if (sess->sess_private_data[0] == 0) { 2251 ret = dev->dev_ops->asym_session_configure(dev, xforms, sess); 2252 if (ret < 0) { 2253 CDEV_LOG_ERR( 2254 "dev_id %d failed to configure session details", 2255 dev_id); 2256 return ret; 2257 } 2258 } 2259 2260 rte_cryptodev_trace_asym_session_create(dev_id, xforms, mp, sess); 2261 return 0; 2262 } 2263 2264 int 2265 rte_cryptodev_sym_session_free(uint8_t dev_id, void *_sess) 2266 { 2267 struct rte_cryptodev *dev; 2268 struct rte_mempool *sess_mp; 2269 struct rte_cryptodev_sym_session *sess = _sess; 2270 struct rte_cryptodev_sym_session_pool_private_data *pool_priv; 2271 2272 if (sess == NULL) 2273 return -EINVAL; 2274 2275 if (!rte_cryptodev_is_valid_dev(dev_id)) { 2276 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 2277 return -EINVAL; 2278 } 2279 2280 dev = rte_cryptodev_pmd_get_dev(dev_id); 2281 2282 if (dev == NULL || sess == NULL) 2283 return -EINVAL; 2284 2285 sess_mp = rte_mempool_from_obj(sess); 2286 if (!sess_mp) 2287 return -EINVAL; 2288 pool_priv = rte_mempool_get_priv(sess_mp); 2289 2290 if (sess->driver_id != dev->driver_id) { 2291 CDEV_LOG_ERR("Session created by driver %u but freed by %u", 2292 sess->driver_id, dev->driver_id); 2293 return -EINVAL; 2294 } 2295 2296 if (*dev->dev_ops->sym_session_clear == NULL) 2297 return -ENOTSUP; 2298 2299 dev->dev_ops->sym_session_clear(dev, sess); 2300 2301 memset(sess->driver_priv_data, 0, pool_priv->sess_data_sz + pool_priv->user_data_sz); 2302 2303 /* Return session to mempool */ 2304 rte_mempool_put(sess_mp, sess); 2305 2306 rte_cryptodev_trace_sym_session_free(dev_id, sess); 2307 return 0; 2308 } 2309 2310 int 2311 rte_cryptodev_asym_session_free(uint8_t dev_id, void *sess) 2312 { 2313 struct rte_mempool *sess_mp; 2314 struct rte_cryptodev *dev; 2315 2316 if (!rte_cryptodev_is_valid_dev(dev_id)) { 2317 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 2318 return -EINVAL; 2319 } 2320 2321 dev = rte_cryptodev_pmd_get_dev(dev_id); 2322 2323 if (dev == NULL || sess == NULL) 2324 return -EINVAL; 2325 2326 if (*dev->dev_ops->asym_session_clear == NULL) 2327 return -ENOTSUP; 2328 2329 dev->dev_ops->asym_session_clear(dev, sess); 2330 2331 rte_free(((struct rte_cryptodev_asym_session *)sess)->event_mdata); 2332 2333 /* Return session to mempool */ 2334 sess_mp = rte_mempool_from_obj(sess); 2335 rte_mempool_put(sess_mp, sess); 2336 2337 rte_cryptodev_trace_asym_session_free(dev_id, sess); 2338 return 0; 2339 } 2340 2341 unsigned int 2342 rte_cryptodev_asym_get_header_session_size(void) 2343 { 2344 return sizeof(struct rte_cryptodev_asym_session); 2345 } 2346 2347 unsigned int 2348 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id) 2349 { 2350 struct rte_cryptodev *dev; 2351 unsigned int priv_sess_size; 2352 2353 if (!rte_cryptodev_is_valid_dev(dev_id)) 2354 return 0; 2355 2356 dev = rte_cryptodev_pmd_get_dev(dev_id); 2357 2358 if (*dev->dev_ops->sym_session_get_size == NULL) 2359 return 0; 2360 2361 priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev); 2362 2363 rte_cryptodev_trace_sym_get_private_session_size(dev_id, 2364 priv_sess_size); 2365 2366 return priv_sess_size; 2367 } 2368 2369 unsigned int 2370 rte_cryptodev_asym_get_private_session_size(uint8_t dev_id) 2371 { 2372 struct rte_cryptodev *dev; 2373 unsigned int priv_sess_size; 2374 2375 if (!rte_cryptodev_is_valid_dev(dev_id)) 2376 return 0; 2377 2378 dev = rte_cryptodev_pmd_get_dev(dev_id); 2379 2380 if (*dev->dev_ops->asym_session_get_size == NULL) 2381 return 0; 2382 2383 priv_sess_size = (*dev->dev_ops->asym_session_get_size)(dev); 2384 2385 rte_cryptodev_trace_asym_get_private_session_size(dev_id, 2386 priv_sess_size); 2387 2388 return priv_sess_size; 2389 } 2390 2391 int 2392 rte_cryptodev_sym_session_set_user_data(void *_sess, void *data, 2393 uint16_t size) 2394 { 2395 struct rte_cryptodev_sym_session *sess = _sess; 2396 2397 if (sess == NULL) 2398 return -EINVAL; 2399 2400 if (sess->user_data_sz < size) 2401 return -ENOMEM; 2402 2403 rte_memcpy(sess->driver_priv_data + sess->sess_data_sz, data, size); 2404 2405 rte_cryptodev_trace_sym_session_set_user_data(sess, data, size); 2406 2407 return 0; 2408 } 2409 2410 void * 2411 rte_cryptodev_sym_session_get_user_data(void *_sess) 2412 { 2413 struct rte_cryptodev_sym_session *sess = _sess; 2414 void *data = NULL; 2415 2416 if (sess == NULL || sess->user_data_sz == 0) 2417 return NULL; 2418 2419 data = (void *)(sess->driver_priv_data + sess->sess_data_sz); 2420 2421 rte_cryptodev_trace_sym_session_get_user_data(sess, data); 2422 2423 return data; 2424 } 2425 2426 int 2427 rte_cryptodev_asym_session_set_user_data(void *session, void *data, uint16_t size) 2428 { 2429 struct rte_cryptodev_asym_session *sess = session; 2430 if (sess == NULL) 2431 return -EINVAL; 2432 2433 if (sess->user_data_sz < size) 2434 return -ENOMEM; 2435 2436 rte_memcpy(sess->sess_private_data + 2437 sess->max_priv_data_sz, 2438 data, size); 2439 2440 rte_cryptodev_trace_asym_session_set_user_data(sess, data, size); 2441 2442 return 0; 2443 } 2444 2445 void * 2446 rte_cryptodev_asym_session_get_user_data(void *session) 2447 { 2448 struct rte_cryptodev_asym_session *sess = session; 2449 void *data = NULL; 2450 2451 if (sess == NULL || sess->user_data_sz == 0) 2452 return NULL; 2453 2454 data = (void *)(sess->sess_private_data + sess->max_priv_data_sz); 2455 2456 rte_cryptodev_trace_asym_session_get_user_data(sess, data); 2457 2458 return data; 2459 } 2460 2461 static inline void 2462 sym_crypto_fill_status(struct rte_crypto_sym_vec *vec, int32_t errnum) 2463 { 2464 uint32_t i; 2465 for (i = 0; i < vec->num; i++) 2466 vec->status[i] = errnum; 2467 } 2468 2469 uint32_t 2470 rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id, 2471 void *_sess, union rte_crypto_sym_ofs ofs, 2472 struct rte_crypto_sym_vec *vec) 2473 { 2474 struct rte_cryptodev *dev; 2475 struct rte_cryptodev_sym_session *sess = _sess; 2476 2477 if (!rte_cryptodev_is_valid_dev(dev_id)) { 2478 sym_crypto_fill_status(vec, EINVAL); 2479 return 0; 2480 } 2481 2482 dev = rte_cryptodev_pmd_get_dev(dev_id); 2483 2484 if (*dev->dev_ops->sym_cpu_process == NULL || 2485 !(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO)) { 2486 sym_crypto_fill_status(vec, ENOTSUP); 2487 return 0; 2488 } 2489 2490 rte_cryptodev_trace_sym_cpu_crypto_process(dev_id, sess); 2491 2492 return dev->dev_ops->sym_cpu_process(dev, sess, ofs, vec); 2493 } 2494 2495 int 2496 rte_cryptodev_get_raw_dp_ctx_size(uint8_t dev_id) 2497 { 2498 struct rte_cryptodev *dev; 2499 int32_t size = sizeof(struct rte_crypto_raw_dp_ctx); 2500 int32_t priv_size; 2501 2502 if (!rte_cryptodev_is_valid_dev(dev_id)) 2503 return -EINVAL; 2504 2505 dev = rte_cryptodev_pmd_get_dev(dev_id); 2506 2507 if (*dev->dev_ops->sym_get_raw_dp_ctx_size == NULL || 2508 !(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP)) { 2509 return -ENOTSUP; 2510 } 2511 2512 priv_size = (*dev->dev_ops->sym_get_raw_dp_ctx_size)(dev); 2513 if (priv_size < 0) 2514 return -ENOTSUP; 2515 2516 rte_cryptodev_trace_get_raw_dp_ctx_size(dev_id); 2517 2518 return RTE_ALIGN_CEIL((size + priv_size), 8); 2519 } 2520 2521 int 2522 rte_cryptodev_configure_raw_dp_ctx(uint8_t dev_id, uint16_t qp_id, 2523 struct rte_crypto_raw_dp_ctx *ctx, 2524 enum rte_crypto_op_sess_type sess_type, 2525 union rte_cryptodev_session_ctx session_ctx, 2526 uint8_t is_update) 2527 { 2528 struct rte_cryptodev *dev; 2529 2530 if (!rte_cryptodev_get_qp_status(dev_id, qp_id)) 2531 return -EINVAL; 2532 2533 dev = rte_cryptodev_pmd_get_dev(dev_id); 2534 if (!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP) 2535 || dev->dev_ops->sym_configure_raw_dp_ctx == NULL) 2536 return -ENOTSUP; 2537 2538 rte_cryptodev_trace_configure_raw_dp_ctx(dev_id, qp_id, sess_type); 2539 2540 return (*dev->dev_ops->sym_configure_raw_dp_ctx)(dev, qp_id, ctx, 2541 sess_type, session_ctx, is_update); 2542 } 2543 2544 int 2545 rte_cryptodev_session_event_mdata_set(uint8_t dev_id, void *sess, 2546 enum rte_crypto_op_type op_type, 2547 enum rte_crypto_op_sess_type sess_type, 2548 void *ev_mdata, 2549 uint16_t size) 2550 { 2551 struct rte_cryptodev *dev; 2552 2553 if (sess == NULL || ev_mdata == NULL) 2554 return -EINVAL; 2555 2556 if (!rte_cryptodev_is_valid_dev(dev_id)) 2557 goto skip_pmd_op; 2558 2559 dev = rte_cryptodev_pmd_get_dev(dev_id); 2560 if (dev->dev_ops->session_ev_mdata_set == NULL) 2561 goto skip_pmd_op; 2562 2563 rte_cryptodev_trace_session_event_mdata_set(dev_id, sess, op_type, 2564 sess_type, ev_mdata, size); 2565 2566 return (*dev->dev_ops->session_ev_mdata_set)(dev, sess, op_type, 2567 sess_type, ev_mdata); 2568 2569 skip_pmd_op: 2570 if (op_type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) 2571 return rte_cryptodev_sym_session_set_user_data(sess, ev_mdata, 2572 size); 2573 else if (op_type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) { 2574 struct rte_cryptodev_asym_session *s = sess; 2575 2576 if (s->event_mdata == NULL) { 2577 s->event_mdata = rte_malloc(NULL, size, 0); 2578 if (s->event_mdata == NULL) 2579 return -ENOMEM; 2580 } 2581 rte_memcpy(s->event_mdata, ev_mdata, size); 2582 2583 return 0; 2584 } else 2585 return -ENOTSUP; 2586 } 2587 2588 uint32_t 2589 rte_cryptodev_raw_enqueue_burst(struct rte_crypto_raw_dp_ctx *ctx, 2590 struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs, 2591 void **user_data, int *enqueue_status) 2592 { 2593 return (*ctx->enqueue_burst)(ctx->qp_data, ctx->drv_ctx_data, vec, 2594 ofs, user_data, enqueue_status); 2595 } 2596 2597 int 2598 rte_cryptodev_raw_enqueue_done(struct rte_crypto_raw_dp_ctx *ctx, 2599 uint32_t n) 2600 { 2601 return (*ctx->enqueue_done)(ctx->qp_data, ctx->drv_ctx_data, n); 2602 } 2603 2604 uint32_t 2605 rte_cryptodev_raw_dequeue_burst(struct rte_crypto_raw_dp_ctx *ctx, 2606 rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count, 2607 uint32_t max_nb_to_dequeue, 2608 rte_cryptodev_raw_post_dequeue_t post_dequeue, 2609 void **out_user_data, uint8_t is_user_data_array, 2610 uint32_t *n_success_jobs, int *status) 2611 { 2612 return (*ctx->dequeue_burst)(ctx->qp_data, ctx->drv_ctx_data, 2613 get_dequeue_count, max_nb_to_dequeue, post_dequeue, 2614 out_user_data, is_user_data_array, n_success_jobs, status); 2615 } 2616 2617 int 2618 rte_cryptodev_raw_dequeue_done(struct rte_crypto_raw_dp_ctx *ctx, 2619 uint32_t n) 2620 { 2621 return (*ctx->dequeue_done)(ctx->qp_data, ctx->drv_ctx_data, n); 2622 } 2623 2624 /** Initialise rte_crypto_op mempool element */ 2625 static void 2626 rte_crypto_op_init(struct rte_mempool *mempool, 2627 void *opaque_arg, 2628 void *_op_data, 2629 __rte_unused unsigned i) 2630 { 2631 struct rte_crypto_op *op = _op_data; 2632 enum rte_crypto_op_type type = *(enum rte_crypto_op_type *)opaque_arg; 2633 2634 memset(_op_data, 0, mempool->elt_size); 2635 2636 __rte_crypto_op_reset(op, type); 2637 2638 op->phys_addr = rte_mem_virt2iova(_op_data); 2639 op->mempool = mempool; 2640 } 2641 2642 2643 struct rte_mempool * 2644 rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type, 2645 unsigned nb_elts, unsigned cache_size, uint16_t priv_size, 2646 int socket_id) 2647 { 2648 struct rte_crypto_op_pool_private *priv; 2649 2650 unsigned elt_size = sizeof(struct rte_crypto_op) + 2651 priv_size; 2652 2653 if (type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) { 2654 elt_size += sizeof(struct rte_crypto_sym_op); 2655 } else if (type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) { 2656 elt_size += sizeof(struct rte_crypto_asym_op); 2657 } else if (type == RTE_CRYPTO_OP_TYPE_UNDEFINED) { 2658 elt_size += RTE_MAX(sizeof(struct rte_crypto_sym_op), 2659 sizeof(struct rte_crypto_asym_op)); 2660 } else { 2661 CDEV_LOG_ERR("Invalid op_type"); 2662 return NULL; 2663 } 2664 2665 /* lookup mempool in case already allocated */ 2666 struct rte_mempool *mp = rte_mempool_lookup(name); 2667 2668 if (mp != NULL) { 2669 priv = (struct rte_crypto_op_pool_private *) 2670 rte_mempool_get_priv(mp); 2671 2672 if (mp->elt_size != elt_size || 2673 mp->cache_size < cache_size || 2674 mp->size < nb_elts || 2675 priv->priv_size < priv_size) { 2676 mp = NULL; 2677 CDEV_LOG_ERR("Mempool %s already exists but with " 2678 "incompatible parameters", name); 2679 return NULL; 2680 } 2681 return mp; 2682 } 2683 2684 mp = rte_mempool_create( 2685 name, 2686 nb_elts, 2687 elt_size, 2688 cache_size, 2689 sizeof(struct rte_crypto_op_pool_private), 2690 NULL, 2691 NULL, 2692 rte_crypto_op_init, 2693 &type, 2694 socket_id, 2695 0); 2696 2697 if (mp == NULL) { 2698 CDEV_LOG_ERR("Failed to create mempool %s", name); 2699 return NULL; 2700 } 2701 2702 priv = (struct rte_crypto_op_pool_private *) 2703 rte_mempool_get_priv(mp); 2704 2705 priv->priv_size = priv_size; 2706 priv->type = type; 2707 2708 rte_cryptodev_trace_op_pool_create(name, socket_id, type, nb_elts, mp); 2709 return mp; 2710 } 2711 2712 int 2713 rte_cryptodev_pmd_create_dev_name(char *name, const char *dev_name_prefix) 2714 { 2715 struct rte_cryptodev *dev = NULL; 2716 uint32_t i = 0; 2717 2718 if (name == NULL) 2719 return -EINVAL; 2720 2721 for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) { 2722 int ret = snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, 2723 "%s_%u", dev_name_prefix, i); 2724 2725 if (ret < 0) 2726 return ret; 2727 2728 dev = rte_cryptodev_pmd_get_named_dev(name); 2729 if (!dev) 2730 return 0; 2731 } 2732 2733 return -1; 2734 } 2735 2736 TAILQ_HEAD(cryptodev_driver_list, cryptodev_driver); 2737 2738 static struct cryptodev_driver_list cryptodev_driver_list = 2739 TAILQ_HEAD_INITIALIZER(cryptodev_driver_list); 2740 2741 int 2742 rte_cryptodev_driver_id_get(const char *name) 2743 { 2744 struct cryptodev_driver *driver; 2745 const char *driver_name; 2746 int driver_id = -1; 2747 2748 if (name == NULL) { 2749 RTE_LOG(DEBUG, CRYPTODEV, "name pointer NULL"); 2750 return -1; 2751 } 2752 2753 TAILQ_FOREACH(driver, &cryptodev_driver_list, next) { 2754 driver_name = driver->driver->name; 2755 if (strncmp(driver_name, name, strlen(driver_name) + 1) == 0) { 2756 driver_id = driver->id; 2757 break; 2758 } 2759 } 2760 2761 rte_cryptodev_trace_driver_id_get(name, driver_id); 2762 2763 return driver_id; 2764 } 2765 2766 const char * 2767 rte_cryptodev_name_get(uint8_t dev_id) 2768 { 2769 struct rte_cryptodev *dev; 2770 2771 if (!rte_cryptodev_is_valid_device_data(dev_id)) { 2772 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 2773 return NULL; 2774 } 2775 2776 dev = rte_cryptodev_pmd_get_dev(dev_id); 2777 if (dev == NULL) 2778 return NULL; 2779 2780 rte_cryptodev_trace_name_get(dev_id, dev->data->name); 2781 2782 return dev->data->name; 2783 } 2784 2785 const char * 2786 rte_cryptodev_driver_name_get(uint8_t driver_id) 2787 { 2788 struct cryptodev_driver *driver; 2789 2790 TAILQ_FOREACH(driver, &cryptodev_driver_list, next) { 2791 if (driver->id == driver_id) { 2792 rte_cryptodev_trace_driver_name_get(driver_id, 2793 driver->driver->name); 2794 return driver->driver->name; 2795 } 2796 } 2797 return NULL; 2798 } 2799 2800 uint8_t 2801 rte_cryptodev_allocate_driver(struct cryptodev_driver *crypto_drv, 2802 const struct rte_driver *drv) 2803 { 2804 crypto_drv->driver = drv; 2805 crypto_drv->id = nb_drivers; 2806 2807 TAILQ_INSERT_TAIL(&cryptodev_driver_list, crypto_drv, next); 2808 2809 rte_cryptodev_trace_allocate_driver(drv->name); 2810 2811 return nb_drivers++; 2812 } 2813 2814 RTE_INIT(cryptodev_init_fp_ops) 2815 { 2816 uint32_t i; 2817 2818 for (i = 0; i != RTE_DIM(rte_crypto_fp_ops); i++) 2819 cryptodev_fp_ops_reset(rte_crypto_fp_ops + i); 2820 } 2821 2822 static int 2823 cryptodev_handle_dev_list(const char *cmd __rte_unused, 2824 const char *params __rte_unused, 2825 struct rte_tel_data *d) 2826 { 2827 int dev_id; 2828 2829 if (rte_cryptodev_count() < 1) 2830 return -EINVAL; 2831 2832 rte_tel_data_start_array(d, RTE_TEL_INT_VAL); 2833 for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++) 2834 if (rte_cryptodev_is_valid_dev(dev_id)) 2835 rte_tel_data_add_array_int(d, dev_id); 2836 2837 return 0; 2838 } 2839 2840 static int 2841 cryptodev_handle_dev_info(const char *cmd __rte_unused, 2842 const char *params, struct rte_tel_data *d) 2843 { 2844 struct rte_cryptodev_info cryptodev_info; 2845 int dev_id; 2846 char *end_param; 2847 2848 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 2849 return -EINVAL; 2850 2851 dev_id = strtoul(params, &end_param, 0); 2852 if (*end_param != '\0') 2853 CDEV_LOG_ERR("Extra parameters passed to command, ignoring"); 2854 if (!rte_cryptodev_is_valid_dev(dev_id)) 2855 return -EINVAL; 2856 2857 rte_cryptodev_info_get(dev_id, &cryptodev_info); 2858 2859 rte_tel_data_start_dict(d); 2860 rte_tel_data_add_dict_string(d, "device_name", 2861 cryptodev_info.device->name); 2862 rte_tel_data_add_dict_int(d, "max_nb_queue_pairs", 2863 cryptodev_info.max_nb_queue_pairs); 2864 2865 return 0; 2866 } 2867 2868 #define ADD_DICT_STAT(s) rte_tel_data_add_dict_u64(d, #s, cryptodev_stats.s) 2869 2870 static int 2871 cryptodev_handle_dev_stats(const char *cmd __rte_unused, 2872 const char *params, 2873 struct rte_tel_data *d) 2874 { 2875 struct rte_cryptodev_stats cryptodev_stats; 2876 int dev_id, ret; 2877 char *end_param; 2878 2879 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 2880 return -EINVAL; 2881 2882 dev_id = strtoul(params, &end_param, 0); 2883 if (*end_param != '\0') 2884 CDEV_LOG_ERR("Extra parameters passed to command, ignoring"); 2885 if (!rte_cryptodev_is_valid_dev(dev_id)) 2886 return -EINVAL; 2887 2888 ret = rte_cryptodev_stats_get(dev_id, &cryptodev_stats); 2889 if (ret < 0) 2890 return ret; 2891 2892 rte_tel_data_start_dict(d); 2893 ADD_DICT_STAT(enqueued_count); 2894 ADD_DICT_STAT(dequeued_count); 2895 ADD_DICT_STAT(enqueue_err_count); 2896 ADD_DICT_STAT(dequeue_err_count); 2897 2898 return 0; 2899 } 2900 2901 #define CRYPTO_CAPS_SZ \ 2902 (RTE_ALIGN_CEIL(sizeof(struct rte_cryptodev_capabilities), \ 2903 sizeof(uint64_t)) / \ 2904 sizeof(uint64_t)) 2905 2906 static int 2907 crypto_caps_array(struct rte_tel_data *d, 2908 const struct rte_cryptodev_capabilities *capabilities) 2909 { 2910 const struct rte_cryptodev_capabilities *dev_caps; 2911 uint64_t caps_val[CRYPTO_CAPS_SZ]; 2912 unsigned int i = 0, j; 2913 2914 rte_tel_data_start_array(d, RTE_TEL_U64_VAL); 2915 2916 while ((dev_caps = &capabilities[i++])->op != 2917 RTE_CRYPTO_OP_TYPE_UNDEFINED) { 2918 memset(&caps_val, 0, CRYPTO_CAPS_SZ * sizeof(caps_val[0])); 2919 rte_memcpy(caps_val, dev_caps, sizeof(capabilities[0])); 2920 for (j = 0; j < CRYPTO_CAPS_SZ; j++) 2921 rte_tel_data_add_array_u64(d, caps_val[j]); 2922 } 2923 2924 return i; 2925 } 2926 2927 static int 2928 cryptodev_handle_dev_caps(const char *cmd __rte_unused, const char *params, 2929 struct rte_tel_data *d) 2930 { 2931 struct rte_cryptodev_info dev_info; 2932 struct rte_tel_data *crypto_caps; 2933 int crypto_caps_n; 2934 char *end_param; 2935 int dev_id; 2936 2937 if (!params || strlen(params) == 0 || !isdigit(*params)) 2938 return -EINVAL; 2939 2940 dev_id = strtoul(params, &end_param, 0); 2941 if (*end_param != '\0') 2942 CDEV_LOG_ERR("Extra parameters passed to command, ignoring"); 2943 if (!rte_cryptodev_is_valid_dev(dev_id)) 2944 return -EINVAL; 2945 2946 rte_tel_data_start_dict(d); 2947 crypto_caps = rte_tel_data_alloc(); 2948 if (!crypto_caps) 2949 return -ENOMEM; 2950 2951 rte_cryptodev_info_get(dev_id, &dev_info); 2952 crypto_caps_n = crypto_caps_array(crypto_caps, dev_info.capabilities); 2953 rte_tel_data_add_dict_container(d, "crypto_caps", crypto_caps, 0); 2954 rte_tel_data_add_dict_int(d, "crypto_caps_n", crypto_caps_n); 2955 2956 return 0; 2957 } 2958 2959 RTE_INIT(cryptodev_init_telemetry) 2960 { 2961 rte_telemetry_register_cmd("/cryptodev/info", cryptodev_handle_dev_info, 2962 "Returns information for a cryptodev. Parameters: int dev_id"); 2963 rte_telemetry_register_cmd("/cryptodev/list", 2964 cryptodev_handle_dev_list, 2965 "Returns list of available crypto devices by IDs. No parameters."); 2966 rte_telemetry_register_cmd("/cryptodev/stats", 2967 cryptodev_handle_dev_stats, 2968 "Returns the stats for a cryptodev. Parameters: int dev_id"); 2969 rte_telemetry_register_cmd("/cryptodev/caps", 2970 cryptodev_handle_dev_caps, 2971 "Returns the capabilities for a cryptodev. Parameters: int dev_id"); 2972 } 2973