1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2015-2020 Intel Corporation 3 */ 4 5 #include <sys/queue.h> 6 #include <ctype.h> 7 #include <stdio.h> 8 #include <stdlib.h> 9 #include <string.h> 10 #include <errno.h> 11 #include <stdint.h> 12 #include <inttypes.h> 13 14 #include <rte_log.h> 15 #include <rte_debug.h> 16 #include <dev_driver.h> 17 #include <rte_memory.h> 18 #include <rte_memcpy.h> 19 #include <rte_memzone.h> 20 #include <rte_eal.h> 21 #include <rte_common.h> 22 #include <rte_mempool.h> 23 #include <rte_malloc.h> 24 #include <rte_errno.h> 25 #include <rte_spinlock.h> 26 #include <rte_string_fns.h> 27 #include <rte_telemetry.h> 28 29 #include "rte_crypto.h" 30 #include "rte_cryptodev.h" 31 #include "cryptodev_pmd.h" 32 #include "cryptodev_trace.h" 33 34 static uint8_t nb_drivers; 35 36 static struct rte_cryptodev rte_crypto_devices[RTE_CRYPTO_MAX_DEVS]; 37 38 struct rte_cryptodev *rte_cryptodevs = rte_crypto_devices; 39 40 static struct rte_cryptodev_global cryptodev_globals = { 41 .devs = rte_crypto_devices, 42 .data = { NULL }, 43 .nb_devs = 0 44 }; 45 46 /* Public fastpath APIs. */ 47 struct rte_crypto_fp_ops rte_crypto_fp_ops[RTE_CRYPTO_MAX_DEVS]; 48 49 /* spinlock for crypto device callbacks */ 50 static rte_spinlock_t rte_cryptodev_cb_lock = RTE_SPINLOCK_INITIALIZER; 51 52 /** 53 * The user application callback description. 54 * 55 * It contains callback address to be registered by user application, 56 * the pointer to the parameters for callback, and the event type. 57 */ 58 struct rte_cryptodev_callback { 59 TAILQ_ENTRY(rte_cryptodev_callback) next; /**< Callbacks list */ 60 rte_cryptodev_cb_fn cb_fn; /**< Callback address */ 61 void *cb_arg; /**< Parameter for callback */ 62 enum rte_cryptodev_event_type event; /**< Interrupt event type */ 63 uint32_t active; /**< Callback is executing */ 64 }; 65 66 /** 67 * @deprecated 68 * The crypto cipher algorithm strings identifiers. 69 * It could be used in application command line. 70 */ 71 __rte_deprecated 72 const char * 73 rte_crypto_cipher_algorithm_strings[] = { 74 [RTE_CRYPTO_CIPHER_3DES_CBC] = "3des-cbc", 75 [RTE_CRYPTO_CIPHER_3DES_ECB] = "3des-ecb", 76 [RTE_CRYPTO_CIPHER_3DES_CTR] = "3des-ctr", 77 78 [RTE_CRYPTO_CIPHER_AES_CBC] = "aes-cbc", 79 [RTE_CRYPTO_CIPHER_AES_CTR] = "aes-ctr", 80 [RTE_CRYPTO_CIPHER_AES_DOCSISBPI] = "aes-docsisbpi", 81 [RTE_CRYPTO_CIPHER_AES_ECB] = "aes-ecb", 82 [RTE_CRYPTO_CIPHER_AES_F8] = "aes-f8", 83 [RTE_CRYPTO_CIPHER_AES_XTS] = "aes-xts", 84 85 [RTE_CRYPTO_CIPHER_ARC4] = "arc4", 86 87 [RTE_CRYPTO_CIPHER_DES_CBC] = "des-cbc", 88 [RTE_CRYPTO_CIPHER_DES_DOCSISBPI] = "des-docsisbpi", 89 90 [RTE_CRYPTO_CIPHER_NULL] = "null", 91 92 [RTE_CRYPTO_CIPHER_KASUMI_F8] = "kasumi-f8", 93 [RTE_CRYPTO_CIPHER_SNOW3G_UEA2] = "snow3g-uea2", 94 [RTE_CRYPTO_CIPHER_ZUC_EEA3] = "zuc-eea3", 95 [RTE_CRYPTO_CIPHER_SM4_ECB] = "sm4-ecb", 96 [RTE_CRYPTO_CIPHER_SM4_CBC] = "sm4-cbc", 97 [RTE_CRYPTO_CIPHER_SM4_CTR] = "sm4-ctr" 98 }; 99 100 /** 101 * The crypto cipher algorithm strings identifiers. 102 * Not to be used in application directly. 103 * Application can use rte_cryptodev_get_cipher_algo_string(). 104 */ 105 static const char * 106 crypto_cipher_algorithm_strings[] = { 107 [RTE_CRYPTO_CIPHER_3DES_CBC] = "3des-cbc", 108 [RTE_CRYPTO_CIPHER_3DES_ECB] = "3des-ecb", 109 [RTE_CRYPTO_CIPHER_3DES_CTR] = "3des-ctr", 110 111 [RTE_CRYPTO_CIPHER_AES_CBC] = "aes-cbc", 112 [RTE_CRYPTO_CIPHER_AES_CTR] = "aes-ctr", 113 [RTE_CRYPTO_CIPHER_AES_DOCSISBPI] = "aes-docsisbpi", 114 [RTE_CRYPTO_CIPHER_AES_ECB] = "aes-ecb", 115 [RTE_CRYPTO_CIPHER_AES_F8] = "aes-f8", 116 [RTE_CRYPTO_CIPHER_AES_XTS] = "aes-xts", 117 118 [RTE_CRYPTO_CIPHER_ARC4] = "arc4", 119 120 [RTE_CRYPTO_CIPHER_DES_CBC] = "des-cbc", 121 [RTE_CRYPTO_CIPHER_DES_DOCSISBPI] = "des-docsisbpi", 122 123 [RTE_CRYPTO_CIPHER_NULL] = "null", 124 125 [RTE_CRYPTO_CIPHER_KASUMI_F8] = "kasumi-f8", 126 [RTE_CRYPTO_CIPHER_SNOW3G_UEA2] = "snow3g-uea2", 127 [RTE_CRYPTO_CIPHER_ZUC_EEA3] = "zuc-eea3", 128 [RTE_CRYPTO_CIPHER_SM4_ECB] = "sm4-ecb", 129 [RTE_CRYPTO_CIPHER_SM4_CBC] = "sm4-cbc", 130 [RTE_CRYPTO_CIPHER_SM4_CTR] = "sm4-ctr", 131 [RTE_CRYPTO_CIPHER_SM4_CFB] = "sm4-cfb", 132 [RTE_CRYPTO_CIPHER_SM4_OFB] = "sm4-ofb" 133 }; 134 135 /** 136 * The crypto cipher operation strings identifiers. 137 * It could be used in application command line. 138 */ 139 const char * 140 rte_crypto_cipher_operation_strings[] = { 141 [RTE_CRYPTO_CIPHER_OP_ENCRYPT] = "encrypt", 142 [RTE_CRYPTO_CIPHER_OP_DECRYPT] = "decrypt" 143 }; 144 145 /** 146 * @deprecated 147 * The crypto auth algorithm strings identifiers. 148 * It could be used in application command line. 149 */ 150 __rte_deprecated 151 const char * 152 rte_crypto_auth_algorithm_strings[] = { 153 [RTE_CRYPTO_AUTH_AES_CBC_MAC] = "aes-cbc-mac", 154 [RTE_CRYPTO_AUTH_AES_CMAC] = "aes-cmac", 155 [RTE_CRYPTO_AUTH_AES_GMAC] = "aes-gmac", 156 [RTE_CRYPTO_AUTH_AES_XCBC_MAC] = "aes-xcbc-mac", 157 158 [RTE_CRYPTO_AUTH_MD5] = "md5", 159 [RTE_CRYPTO_AUTH_MD5_HMAC] = "md5-hmac", 160 161 [RTE_CRYPTO_AUTH_NULL] = "null", 162 163 [RTE_CRYPTO_AUTH_SHA1] = "sha1", 164 [RTE_CRYPTO_AUTH_SHA1_HMAC] = "sha1-hmac", 165 166 [RTE_CRYPTO_AUTH_SHA224] = "sha2-224", 167 [RTE_CRYPTO_AUTH_SHA224_HMAC] = "sha2-224-hmac", 168 [RTE_CRYPTO_AUTH_SHA256] = "sha2-256", 169 [RTE_CRYPTO_AUTH_SHA256_HMAC] = "sha2-256-hmac", 170 [RTE_CRYPTO_AUTH_SHA384] = "sha2-384", 171 [RTE_CRYPTO_AUTH_SHA384_HMAC] = "sha2-384-hmac", 172 [RTE_CRYPTO_AUTH_SHA512] = "sha2-512", 173 [RTE_CRYPTO_AUTH_SHA512_HMAC] = "sha2-512-hmac", 174 175 [RTE_CRYPTO_AUTH_SHA3_224] = "sha3-224", 176 [RTE_CRYPTO_AUTH_SHA3_224_HMAC] = "sha3-224-hmac", 177 [RTE_CRYPTO_AUTH_SHA3_256] = "sha3-256", 178 [RTE_CRYPTO_AUTH_SHA3_256_HMAC] = "sha3-256-hmac", 179 [RTE_CRYPTO_AUTH_SHA3_384] = "sha3-384", 180 [RTE_CRYPTO_AUTH_SHA3_384_HMAC] = "sha3-384-hmac", 181 [RTE_CRYPTO_AUTH_SHA3_512] = "sha3-512", 182 [RTE_CRYPTO_AUTH_SHA3_512_HMAC] = "sha3-512-hmac", 183 184 [RTE_CRYPTO_AUTH_KASUMI_F9] = "kasumi-f9", 185 [RTE_CRYPTO_AUTH_SNOW3G_UIA2] = "snow3g-uia2", 186 [RTE_CRYPTO_AUTH_ZUC_EIA3] = "zuc-eia3", 187 [RTE_CRYPTO_AUTH_SM3] = "sm3" 188 }; 189 190 /** 191 * The crypto auth algorithm strings identifiers. 192 * Not to be used in application directly. 193 * Application can use rte_cryptodev_get_auth_algo_string(). 194 */ 195 static const char * 196 crypto_auth_algorithm_strings[] = { 197 [RTE_CRYPTO_AUTH_AES_CBC_MAC] = "aes-cbc-mac", 198 [RTE_CRYPTO_AUTH_AES_CMAC] = "aes-cmac", 199 [RTE_CRYPTO_AUTH_AES_GMAC] = "aes-gmac", 200 [RTE_CRYPTO_AUTH_AES_XCBC_MAC] = "aes-xcbc-mac", 201 202 [RTE_CRYPTO_AUTH_MD5] = "md5", 203 [RTE_CRYPTO_AUTH_MD5_HMAC] = "md5-hmac", 204 205 [RTE_CRYPTO_AUTH_NULL] = "null", 206 207 [RTE_CRYPTO_AUTH_SHA1] = "sha1", 208 [RTE_CRYPTO_AUTH_SHA1_HMAC] = "sha1-hmac", 209 210 [RTE_CRYPTO_AUTH_SHA224] = "sha2-224", 211 [RTE_CRYPTO_AUTH_SHA224_HMAC] = "sha2-224-hmac", 212 [RTE_CRYPTO_AUTH_SHA256] = "sha2-256", 213 [RTE_CRYPTO_AUTH_SHA256_HMAC] = "sha2-256-hmac", 214 [RTE_CRYPTO_AUTH_SHA384] = "sha2-384", 215 [RTE_CRYPTO_AUTH_SHA384_HMAC] = "sha2-384-hmac", 216 [RTE_CRYPTO_AUTH_SHA512] = "sha2-512", 217 [RTE_CRYPTO_AUTH_SHA512_HMAC] = "sha2-512-hmac", 218 219 [RTE_CRYPTO_AUTH_SHA3_224] = "sha3-224", 220 [RTE_CRYPTO_AUTH_SHA3_224_HMAC] = "sha3-224-hmac", 221 [RTE_CRYPTO_AUTH_SHA3_256] = "sha3-256", 222 [RTE_CRYPTO_AUTH_SHA3_256_HMAC] = "sha3-256-hmac", 223 [RTE_CRYPTO_AUTH_SHA3_384] = "sha3-384", 224 [RTE_CRYPTO_AUTH_SHA3_384_HMAC] = "sha3-384-hmac", 225 [RTE_CRYPTO_AUTH_SHA3_512] = "sha3-512", 226 [RTE_CRYPTO_AUTH_SHA3_512_HMAC] = "sha3-512-hmac", 227 228 [RTE_CRYPTO_AUTH_KASUMI_F9] = "kasumi-f9", 229 [RTE_CRYPTO_AUTH_SNOW3G_UIA2] = "snow3g-uia2", 230 [RTE_CRYPTO_AUTH_ZUC_EIA3] = "zuc-eia3", 231 [RTE_CRYPTO_AUTH_SM3] = "sm3", 232 [RTE_CRYPTO_AUTH_SM3_HMAC] = "sm3-hmac", 233 234 [RTE_CRYPTO_AUTH_SHAKE_128] = "shake-128", 235 [RTE_CRYPTO_AUTH_SHAKE_256] = "shake-256", 236 }; 237 238 /** 239 * @deprecated 240 * The crypto AEAD algorithm strings identifiers. 241 * It could be used in application command line. 242 */ 243 __rte_deprecated 244 const char * 245 rte_crypto_aead_algorithm_strings[] = { 246 [RTE_CRYPTO_AEAD_AES_CCM] = "aes-ccm", 247 [RTE_CRYPTO_AEAD_AES_GCM] = "aes-gcm", 248 [RTE_CRYPTO_AEAD_CHACHA20_POLY1305] = "chacha20-poly1305" 249 }; 250 251 /** 252 * The crypto AEAD algorithm strings identifiers. 253 * Not to be used in application directly. 254 * Application can use rte_cryptodev_get_aead_algo_string(). 255 */ 256 static const char * 257 crypto_aead_algorithm_strings[] = { 258 [RTE_CRYPTO_AEAD_AES_CCM] = "aes-ccm", 259 [RTE_CRYPTO_AEAD_AES_GCM] = "aes-gcm", 260 [RTE_CRYPTO_AEAD_CHACHA20_POLY1305] = "chacha20-poly1305" 261 }; 262 263 264 /** 265 * The crypto AEAD operation strings identifiers. 266 * It could be used in application command line. 267 */ 268 const char * 269 rte_crypto_aead_operation_strings[] = { 270 [RTE_CRYPTO_AEAD_OP_ENCRYPT] = "encrypt", 271 [RTE_CRYPTO_AEAD_OP_DECRYPT] = "decrypt" 272 }; 273 274 /** 275 * @deprecated 276 * Asymmetric crypto transform operation strings identifiers. 277 */ 278 __rte_deprecated 279 const char *rte_crypto_asym_xform_strings[] = { 280 [RTE_CRYPTO_ASYM_XFORM_NONE] = "none", 281 [RTE_CRYPTO_ASYM_XFORM_RSA] = "rsa", 282 [RTE_CRYPTO_ASYM_XFORM_MODEX] = "modexp", 283 [RTE_CRYPTO_ASYM_XFORM_MODINV] = "modinv", 284 [RTE_CRYPTO_ASYM_XFORM_DH] = "dh", 285 [RTE_CRYPTO_ASYM_XFORM_DSA] = "dsa", 286 [RTE_CRYPTO_ASYM_XFORM_ECDSA] = "ecdsa", 287 [RTE_CRYPTO_ASYM_XFORM_ECPM] = "ecpm", 288 }; 289 290 /** 291 * Asymmetric crypto transform operation strings identifiers. 292 * Not to be used in application directly. 293 * Application can use rte_cryptodev_asym_get_xform_string(). 294 */ 295 static const char * 296 crypto_asym_xform_strings[] = { 297 [RTE_CRYPTO_ASYM_XFORM_NONE] = "none", 298 [RTE_CRYPTO_ASYM_XFORM_RSA] = "rsa", 299 [RTE_CRYPTO_ASYM_XFORM_MODEX] = "modexp", 300 [RTE_CRYPTO_ASYM_XFORM_MODINV] = "modinv", 301 [RTE_CRYPTO_ASYM_XFORM_DH] = "dh", 302 [RTE_CRYPTO_ASYM_XFORM_DSA] = "dsa", 303 [RTE_CRYPTO_ASYM_XFORM_ECDSA] = "ecdsa", 304 [RTE_CRYPTO_ASYM_XFORM_ECPM] = "ecpm", 305 }; 306 307 /** 308 * Asymmetric crypto operation strings identifiers. 309 */ 310 const char *rte_crypto_asym_op_strings[] = { 311 [RTE_CRYPTO_ASYM_OP_ENCRYPT] = "encrypt", 312 [RTE_CRYPTO_ASYM_OP_DECRYPT] = "decrypt", 313 [RTE_CRYPTO_ASYM_OP_SIGN] = "sign", 314 [RTE_CRYPTO_ASYM_OP_VERIFY] = "verify" 315 }; 316 317 /** 318 * Asymmetric crypto key exchange operation strings identifiers. 319 */ 320 const char *rte_crypto_asym_ke_strings[] = { 321 [RTE_CRYPTO_ASYM_KE_PRIV_KEY_GENERATE] = "priv_key_generate", 322 [RTE_CRYPTO_ASYM_KE_PUB_KEY_GENERATE] = "pub_key_generate", 323 [RTE_CRYPTO_ASYM_KE_SHARED_SECRET_COMPUTE] = "sharedsecret_compute", 324 [RTE_CRYPTO_ASYM_KE_PUB_KEY_VERIFY] = "pub_ec_key_verify" 325 }; 326 327 struct rte_cryptodev_sym_session_pool_private_data { 328 uint16_t sess_data_sz; 329 /**< driver session data size */ 330 uint16_t user_data_sz; 331 /**< session user data will be placed after sess_data */ 332 }; 333 334 /** 335 * The private data structure stored in the asym session mempool private data. 336 */ 337 struct rte_cryptodev_asym_session_pool_private_data { 338 uint16_t max_priv_session_sz; 339 /**< Size of private session data used when creating mempool */ 340 uint16_t user_data_sz; 341 /**< Session user data will be placed after sess_private_data */ 342 }; 343 344 int 345 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum, 346 const char *algo_string) 347 { 348 unsigned int i; 349 int ret = -1; /* Invalid string */ 350 351 for (i = 1; i < RTE_DIM(crypto_cipher_algorithm_strings); i++) { 352 if (strcmp(algo_string, crypto_cipher_algorithm_strings[i]) == 0) { 353 *algo_enum = (enum rte_crypto_cipher_algorithm) i; 354 ret = 0; 355 break; 356 } 357 } 358 359 rte_cryptodev_trace_get_cipher_algo_enum(algo_string, *algo_enum, ret); 360 361 return ret; 362 } 363 364 int 365 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum, 366 const char *algo_string) 367 { 368 unsigned int i; 369 int ret = -1; /* Invalid string */ 370 371 for (i = 1; i < RTE_DIM(crypto_auth_algorithm_strings); i++) { 372 if (strcmp(algo_string, crypto_auth_algorithm_strings[i]) == 0) { 373 *algo_enum = (enum rte_crypto_auth_algorithm) i; 374 ret = 0; 375 break; 376 } 377 } 378 379 rte_cryptodev_trace_get_auth_algo_enum(algo_string, *algo_enum, ret); 380 381 return ret; 382 } 383 384 int 385 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum, 386 const char *algo_string) 387 { 388 unsigned int i; 389 int ret = -1; /* Invalid string */ 390 391 for (i = 1; i < RTE_DIM(crypto_aead_algorithm_strings); i++) { 392 if (strcmp(algo_string, crypto_aead_algorithm_strings[i]) == 0) { 393 *algo_enum = (enum rte_crypto_aead_algorithm) i; 394 ret = 0; 395 break; 396 } 397 } 398 399 rte_cryptodev_trace_get_aead_algo_enum(algo_string, *algo_enum, ret); 400 401 return ret; 402 } 403 404 int 405 rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum, 406 const char *xform_string) 407 { 408 unsigned int i; 409 int ret = -1; /* Invalid string */ 410 411 for (i = 1; i < RTE_DIM(crypto_asym_xform_strings); i++) { 412 if (strcmp(xform_string, 413 crypto_asym_xform_strings[i]) == 0) { 414 *xform_enum = (enum rte_crypto_asym_xform_type) i; 415 ret = 0; 416 break; 417 } 418 } 419 420 rte_cryptodev_trace_asym_get_xform_enum(xform_string, *xform_enum, ret); 421 422 return ret; 423 } 424 425 const char * 426 rte_cryptodev_get_cipher_algo_string(enum rte_crypto_cipher_algorithm algo_enum) 427 { 428 const char *alg_str = NULL; 429 430 if ((unsigned int)algo_enum < RTE_DIM(crypto_cipher_algorithm_strings)) 431 alg_str = crypto_cipher_algorithm_strings[algo_enum]; 432 433 rte_cryptodev_trace_get_cipher_algo_string(algo_enum, alg_str); 434 435 return alg_str; 436 } 437 438 const char * 439 rte_cryptodev_get_auth_algo_string(enum rte_crypto_auth_algorithm algo_enum) 440 { 441 const char *alg_str = NULL; 442 443 if ((unsigned int)algo_enum < RTE_DIM(crypto_auth_algorithm_strings)) 444 alg_str = crypto_auth_algorithm_strings[algo_enum]; 445 446 rte_cryptodev_trace_get_auth_algo_string(algo_enum, alg_str); 447 448 return alg_str; 449 } 450 451 const char * 452 rte_cryptodev_get_aead_algo_string(enum rte_crypto_aead_algorithm algo_enum) 453 { 454 const char *alg_str = NULL; 455 456 if ((unsigned int)algo_enum < RTE_DIM(crypto_aead_algorithm_strings)) 457 alg_str = crypto_aead_algorithm_strings[algo_enum]; 458 459 rte_cryptodev_trace_get_aead_algo_string(algo_enum, alg_str); 460 461 return alg_str; 462 } 463 464 const char * 465 rte_cryptodev_asym_get_xform_string(enum rte_crypto_asym_xform_type xform_enum) 466 { 467 const char *xform_str = NULL; 468 469 if ((unsigned int)xform_enum < RTE_DIM(crypto_asym_xform_strings)) 470 xform_str = crypto_asym_xform_strings[xform_enum]; 471 472 rte_cryptodev_trace_asym_get_xform_string(xform_enum, xform_str); 473 474 return xform_str; 475 } 476 477 /** 478 * The crypto auth operation strings identifiers. 479 * It could be used in application command line. 480 */ 481 const char * 482 rte_crypto_auth_operation_strings[] = { 483 [RTE_CRYPTO_AUTH_OP_VERIFY] = "verify", 484 [RTE_CRYPTO_AUTH_OP_GENERATE] = "generate" 485 }; 486 487 const struct rte_cryptodev_symmetric_capability * 488 rte_cryptodev_sym_capability_get(uint8_t dev_id, 489 const struct rte_cryptodev_sym_capability_idx *idx) 490 { 491 const struct rte_cryptodev_capabilities *capability; 492 const struct rte_cryptodev_symmetric_capability *sym_capability = NULL; 493 struct rte_cryptodev_info dev_info; 494 int i = 0; 495 496 rte_cryptodev_info_get(dev_id, &dev_info); 497 498 while ((capability = &dev_info.capabilities[i++])->op != 499 RTE_CRYPTO_OP_TYPE_UNDEFINED) { 500 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC) 501 continue; 502 503 if (capability->sym.xform_type != idx->type) 504 continue; 505 506 if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH && 507 capability->sym.auth.algo == idx->algo.auth) { 508 sym_capability = &capability->sym; 509 break; 510 } 511 512 if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER && 513 capability->sym.cipher.algo == idx->algo.cipher) { 514 sym_capability = &capability->sym; 515 break; 516 } 517 518 if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD && 519 capability->sym.aead.algo == idx->algo.aead) { 520 sym_capability = &capability->sym; 521 break; 522 } 523 } 524 525 rte_cryptodev_trace_sym_capability_get(dev_id, dev_info.driver_name, 526 dev_info.driver_id, idx->type, sym_capability); 527 528 return sym_capability; 529 } 530 531 static int 532 param_range_check(uint16_t size, const struct rte_crypto_param_range *range) 533 { 534 unsigned int next_size; 535 536 /* Check lower/upper bounds */ 537 if (size < range->min) 538 return -1; 539 540 if (size > range->max) 541 return -1; 542 543 /* If range is actually only one value, size is correct */ 544 if (range->increment == 0) 545 return 0; 546 547 /* Check if value is one of the supported sizes */ 548 for (next_size = range->min; next_size <= range->max; 549 next_size += range->increment) 550 if (size == next_size) 551 return 0; 552 553 return -1; 554 } 555 556 const struct rte_cryptodev_asymmetric_xform_capability * 557 rte_cryptodev_asym_capability_get(uint8_t dev_id, 558 const struct rte_cryptodev_asym_capability_idx *idx) 559 { 560 const struct rte_cryptodev_capabilities *capability; 561 const struct rte_cryptodev_asymmetric_xform_capability *asym_cap = NULL; 562 struct rte_cryptodev_info dev_info; 563 unsigned int i = 0; 564 565 memset(&dev_info, 0, sizeof(struct rte_cryptodev_info)); 566 rte_cryptodev_info_get(dev_id, &dev_info); 567 568 while ((capability = &dev_info.capabilities[i++])->op != 569 RTE_CRYPTO_OP_TYPE_UNDEFINED) { 570 if (capability->op != RTE_CRYPTO_OP_TYPE_ASYMMETRIC) 571 continue; 572 573 if (capability->asym.xform_capa.xform_type == idx->type) { 574 asym_cap = &capability->asym.xform_capa; 575 break; 576 } 577 } 578 579 rte_cryptodev_trace_asym_capability_get(dev_info.driver_name, 580 dev_info.driver_id, idx->type, asym_cap); 581 582 return asym_cap; 583 }; 584 585 int 586 rte_cryptodev_sym_capability_check_cipher( 587 const struct rte_cryptodev_symmetric_capability *capability, 588 uint16_t key_size, uint16_t iv_size) 589 { 590 int ret = 0; /* success */ 591 592 if (param_range_check(key_size, &capability->cipher.key_size) != 0) { 593 ret = -1; 594 goto done; 595 } 596 597 if (param_range_check(iv_size, &capability->cipher.iv_size) != 0) 598 ret = -1; 599 600 done: 601 rte_cryptodev_trace_sym_capability_check_cipher(capability, key_size, 602 iv_size, ret); 603 604 return ret; 605 } 606 607 int 608 rte_cryptodev_sym_capability_check_auth( 609 const struct rte_cryptodev_symmetric_capability *capability, 610 uint16_t key_size, uint16_t digest_size, uint16_t iv_size) 611 { 612 int ret = 0; /* success */ 613 614 if (param_range_check(key_size, &capability->auth.key_size) != 0) { 615 ret = -1; 616 goto done; 617 } 618 619 if (param_range_check(digest_size, 620 &capability->auth.digest_size) != 0) { 621 ret = -1; 622 goto done; 623 } 624 625 if (param_range_check(iv_size, &capability->auth.iv_size) != 0) 626 ret = -1; 627 628 done: 629 rte_cryptodev_trace_sym_capability_check_auth(capability, key_size, 630 digest_size, iv_size, ret); 631 632 return ret; 633 } 634 635 int 636 rte_cryptodev_sym_capability_check_aead( 637 const struct rte_cryptodev_symmetric_capability *capability, 638 uint16_t key_size, uint16_t digest_size, uint16_t aad_size, 639 uint16_t iv_size) 640 { 641 int ret = 0; /* success */ 642 643 if (param_range_check(key_size, &capability->aead.key_size) != 0) { 644 ret = -1; 645 goto done; 646 } 647 648 if (param_range_check(digest_size, 649 &capability->aead.digest_size) != 0) { 650 ret = -1; 651 goto done; 652 } 653 654 if (param_range_check(aad_size, &capability->aead.aad_size) != 0) { 655 ret = -1; 656 goto done; 657 } 658 659 if (param_range_check(iv_size, &capability->aead.iv_size) != 0) 660 ret = -1; 661 662 done: 663 rte_cryptodev_trace_sym_capability_check_aead(capability, key_size, 664 digest_size, aad_size, iv_size, ret); 665 666 return ret; 667 } 668 669 int 670 rte_cryptodev_asym_xform_capability_check_optype( 671 const struct rte_cryptodev_asymmetric_xform_capability *capability, 672 enum rte_crypto_asym_op_type op_type) 673 { 674 int ret = 0; 675 676 if (capability->op_types & (1 << op_type)) 677 ret = 1; 678 679 rte_cryptodev_trace_asym_xform_capability_check_optype( 680 capability->op_types, op_type, ret); 681 682 return ret; 683 } 684 685 int 686 rte_cryptodev_asym_xform_capability_check_modlen( 687 const struct rte_cryptodev_asymmetric_xform_capability *capability, 688 uint16_t modlen) 689 { 690 int ret = 0; /* success */ 691 692 /* no need to check for limits, if min or max = 0 */ 693 if (capability->modlen.min != 0) { 694 if (modlen < capability->modlen.min) { 695 ret = -1; 696 goto done; 697 } 698 } 699 700 if (capability->modlen.max != 0) { 701 if (modlen > capability->modlen.max) { 702 ret = -1; 703 goto done; 704 } 705 } 706 707 /* in any case, check if given modlen is module increment */ 708 if (capability->modlen.increment != 0) { 709 if (modlen % (capability->modlen.increment)) 710 ret = -1; 711 } 712 713 done: 714 rte_cryptodev_trace_asym_xform_capability_check_modlen(capability, 715 modlen, ret); 716 717 return ret; 718 } 719 720 /* spinlock for crypto device enq callbacks */ 721 static rte_spinlock_t rte_cryptodev_callback_lock = RTE_SPINLOCK_INITIALIZER; 722 723 static void 724 cryptodev_cb_cleanup(struct rte_cryptodev *dev) 725 { 726 struct rte_cryptodev_cb_rcu *list; 727 struct rte_cryptodev_cb *cb, *next; 728 uint16_t qp_id; 729 730 if (dev->enq_cbs == NULL && dev->deq_cbs == NULL) 731 return; 732 733 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { 734 list = &dev->enq_cbs[qp_id]; 735 cb = list->next; 736 while (cb != NULL) { 737 next = cb->next; 738 rte_free(cb); 739 cb = next; 740 } 741 742 rte_free(list->qsbr); 743 } 744 745 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { 746 list = &dev->deq_cbs[qp_id]; 747 cb = list->next; 748 while (cb != NULL) { 749 next = cb->next; 750 rte_free(cb); 751 cb = next; 752 } 753 754 rte_free(list->qsbr); 755 } 756 757 rte_free(dev->enq_cbs); 758 dev->enq_cbs = NULL; 759 rte_free(dev->deq_cbs); 760 dev->deq_cbs = NULL; 761 } 762 763 static int 764 cryptodev_cb_init(struct rte_cryptodev *dev) 765 { 766 struct rte_cryptodev_cb_rcu *list; 767 struct rte_rcu_qsbr *qsbr; 768 uint16_t qp_id; 769 size_t size; 770 771 /* Max thread set to 1, as one DP thread accessing a queue-pair */ 772 const uint32_t max_threads = 1; 773 774 dev->enq_cbs = rte_zmalloc(NULL, 775 sizeof(struct rte_cryptodev_cb_rcu) * 776 dev->data->nb_queue_pairs, 0); 777 if (dev->enq_cbs == NULL) { 778 CDEV_LOG_ERR("Failed to allocate memory for enq callbacks"); 779 return -ENOMEM; 780 } 781 782 dev->deq_cbs = rte_zmalloc(NULL, 783 sizeof(struct rte_cryptodev_cb_rcu) * 784 dev->data->nb_queue_pairs, 0); 785 if (dev->deq_cbs == NULL) { 786 CDEV_LOG_ERR("Failed to allocate memory for deq callbacks"); 787 rte_free(dev->enq_cbs); 788 return -ENOMEM; 789 } 790 791 /* Create RCU QSBR variable */ 792 size = rte_rcu_qsbr_get_memsize(max_threads); 793 794 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { 795 list = &dev->enq_cbs[qp_id]; 796 qsbr = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE); 797 if (qsbr == NULL) { 798 CDEV_LOG_ERR("Failed to allocate memory for RCU on " 799 "queue_pair_id=%d", qp_id); 800 goto cb_init_err; 801 } 802 803 if (rte_rcu_qsbr_init(qsbr, max_threads)) { 804 CDEV_LOG_ERR("Failed to initialize for RCU on " 805 "queue_pair_id=%d", qp_id); 806 goto cb_init_err; 807 } 808 809 list->qsbr = qsbr; 810 } 811 812 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { 813 list = &dev->deq_cbs[qp_id]; 814 qsbr = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE); 815 if (qsbr == NULL) { 816 CDEV_LOG_ERR("Failed to allocate memory for RCU on " 817 "queue_pair_id=%d", qp_id); 818 goto cb_init_err; 819 } 820 821 if (rte_rcu_qsbr_init(qsbr, max_threads)) { 822 CDEV_LOG_ERR("Failed to initialize for RCU on " 823 "queue_pair_id=%d", qp_id); 824 goto cb_init_err; 825 } 826 827 list->qsbr = qsbr; 828 } 829 830 return 0; 831 832 cb_init_err: 833 cryptodev_cb_cleanup(dev); 834 return -ENOMEM; 835 } 836 837 const char * 838 rte_cryptodev_get_feature_name(uint64_t flag) 839 { 840 rte_cryptodev_trace_get_feature_name(flag); 841 842 switch (flag) { 843 case RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO: 844 return "SYMMETRIC_CRYPTO"; 845 case RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO: 846 return "ASYMMETRIC_CRYPTO"; 847 case RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING: 848 return "SYM_OPERATION_CHAINING"; 849 case RTE_CRYPTODEV_FF_CPU_SSE: 850 return "CPU_SSE"; 851 case RTE_CRYPTODEV_FF_CPU_AVX: 852 return "CPU_AVX"; 853 case RTE_CRYPTODEV_FF_CPU_AVX2: 854 return "CPU_AVX2"; 855 case RTE_CRYPTODEV_FF_CPU_AVX512: 856 return "CPU_AVX512"; 857 case RTE_CRYPTODEV_FF_CPU_AESNI: 858 return "CPU_AESNI"; 859 case RTE_CRYPTODEV_FF_HW_ACCELERATED: 860 return "HW_ACCELERATED"; 861 case RTE_CRYPTODEV_FF_IN_PLACE_SGL: 862 return "IN_PLACE_SGL"; 863 case RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT: 864 return "OOP_SGL_IN_SGL_OUT"; 865 case RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT: 866 return "OOP_SGL_IN_LB_OUT"; 867 case RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT: 868 return "OOP_LB_IN_SGL_OUT"; 869 case RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT: 870 return "OOP_LB_IN_LB_OUT"; 871 case RTE_CRYPTODEV_FF_CPU_NEON: 872 return "CPU_NEON"; 873 case RTE_CRYPTODEV_FF_CPU_ARM_CE: 874 return "CPU_ARM_CE"; 875 case RTE_CRYPTODEV_FF_SECURITY: 876 return "SECURITY_PROTOCOL"; 877 case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP: 878 return "RSA_PRIV_OP_KEY_EXP"; 879 case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT: 880 return "RSA_PRIV_OP_KEY_QT"; 881 case RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED: 882 return "DIGEST_ENCRYPTED"; 883 case RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO: 884 return "SYM_CPU_CRYPTO"; 885 case RTE_CRYPTODEV_FF_ASYM_SESSIONLESS: 886 return "ASYM_SESSIONLESS"; 887 case RTE_CRYPTODEV_FF_SYM_SESSIONLESS: 888 return "SYM_SESSIONLESS"; 889 case RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA: 890 return "NON_BYTE_ALIGNED_DATA"; 891 case RTE_CRYPTODEV_FF_CIPHER_MULTIPLE_DATA_UNITS: 892 return "CIPHER_MULTIPLE_DATA_UNITS"; 893 case RTE_CRYPTODEV_FF_CIPHER_WRAPPED_KEY: 894 return "CIPHER_WRAPPED_KEY"; 895 default: 896 return NULL; 897 } 898 } 899 900 struct rte_cryptodev * 901 rte_cryptodev_pmd_get_dev(uint8_t dev_id) 902 { 903 return &cryptodev_globals.devs[dev_id]; 904 } 905 906 struct rte_cryptodev * 907 rte_cryptodev_pmd_get_named_dev(const char *name) 908 { 909 struct rte_cryptodev *dev; 910 unsigned int i; 911 912 if (name == NULL) 913 return NULL; 914 915 for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) { 916 dev = &cryptodev_globals.devs[i]; 917 918 if ((dev->attached == RTE_CRYPTODEV_ATTACHED) && 919 (strcmp(dev->data->name, name) == 0)) 920 return dev; 921 } 922 923 return NULL; 924 } 925 926 static inline uint8_t 927 rte_cryptodev_is_valid_device_data(uint8_t dev_id) 928 { 929 if (dev_id >= RTE_CRYPTO_MAX_DEVS || 930 rte_crypto_devices[dev_id].data == NULL) 931 return 0; 932 933 return 1; 934 } 935 936 unsigned int 937 rte_cryptodev_is_valid_dev(uint8_t dev_id) 938 { 939 struct rte_cryptodev *dev = NULL; 940 unsigned int ret = 1; 941 942 if (!rte_cryptodev_is_valid_device_data(dev_id)) { 943 ret = 0; 944 goto done; 945 } 946 947 dev = rte_cryptodev_pmd_get_dev(dev_id); 948 if (dev->attached != RTE_CRYPTODEV_ATTACHED) 949 ret = 0; 950 951 done: 952 rte_cryptodev_trace_is_valid_dev(dev_id, ret); 953 954 return ret; 955 } 956 957 int 958 rte_cryptodev_get_dev_id(const char *name) 959 { 960 unsigned i; 961 int ret = -1; 962 963 if (name == NULL) 964 return -1; 965 966 for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) { 967 if (!rte_cryptodev_is_valid_device_data(i)) 968 continue; 969 if ((strcmp(cryptodev_globals.devs[i].data->name, name) 970 == 0) && 971 (cryptodev_globals.devs[i].attached == 972 RTE_CRYPTODEV_ATTACHED)) { 973 ret = (int)i; 974 break; 975 } 976 } 977 978 rte_cryptodev_trace_get_dev_id(name, ret); 979 980 return ret; 981 } 982 983 uint8_t 984 rte_cryptodev_count(void) 985 { 986 rte_cryptodev_trace_count(cryptodev_globals.nb_devs); 987 988 return cryptodev_globals.nb_devs; 989 } 990 991 uint8_t 992 rte_cryptodev_device_count_by_driver(uint8_t driver_id) 993 { 994 uint8_t i, dev_count = 0; 995 996 for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) 997 if (cryptodev_globals.devs[i].driver_id == driver_id && 998 cryptodev_globals.devs[i].attached == 999 RTE_CRYPTODEV_ATTACHED) 1000 dev_count++; 1001 1002 rte_cryptodev_trace_device_count_by_driver(driver_id, dev_count); 1003 1004 return dev_count; 1005 } 1006 1007 uint8_t 1008 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices, 1009 uint8_t nb_devices) 1010 { 1011 uint8_t i, count = 0; 1012 struct rte_cryptodev *devs = cryptodev_globals.devs; 1013 1014 for (i = 0; i < RTE_CRYPTO_MAX_DEVS && count < nb_devices; i++) { 1015 if (!rte_cryptodev_is_valid_device_data(i)) 1016 continue; 1017 1018 if (devs[i].attached == RTE_CRYPTODEV_ATTACHED) { 1019 int cmp; 1020 1021 cmp = strncmp(devs[i].device->driver->name, 1022 driver_name, 1023 strlen(driver_name) + 1); 1024 1025 if (cmp == 0) 1026 devices[count++] = devs[i].data->dev_id; 1027 } 1028 } 1029 1030 rte_cryptodev_trace_devices_get(driver_name, count); 1031 1032 return count; 1033 } 1034 1035 void * 1036 rte_cryptodev_get_sec_ctx(uint8_t dev_id) 1037 { 1038 void *sec_ctx = NULL; 1039 1040 if (dev_id < RTE_CRYPTO_MAX_DEVS && 1041 (rte_crypto_devices[dev_id].feature_flags & 1042 RTE_CRYPTODEV_FF_SECURITY)) 1043 sec_ctx = rte_crypto_devices[dev_id].security_ctx; 1044 1045 rte_cryptodev_trace_get_sec_ctx(dev_id, sec_ctx); 1046 1047 return sec_ctx; 1048 } 1049 1050 int 1051 rte_cryptodev_socket_id(uint8_t dev_id) 1052 { 1053 struct rte_cryptodev *dev; 1054 1055 if (!rte_cryptodev_is_valid_dev(dev_id)) 1056 return -1; 1057 1058 dev = rte_cryptodev_pmd_get_dev(dev_id); 1059 1060 rte_cryptodev_trace_socket_id(dev_id, dev->data->name, 1061 dev->data->socket_id); 1062 return dev->data->socket_id; 1063 } 1064 1065 static inline int 1066 rte_cryptodev_data_alloc(uint8_t dev_id, struct rte_cryptodev_data **data, 1067 int socket_id) 1068 { 1069 char mz_name[RTE_MEMZONE_NAMESIZE]; 1070 const struct rte_memzone *mz; 1071 int n; 1072 1073 /* generate memzone name */ 1074 n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id); 1075 if (n >= (int)sizeof(mz_name)) 1076 return -EINVAL; 1077 1078 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 1079 mz = rte_memzone_reserve(mz_name, 1080 sizeof(struct rte_cryptodev_data), 1081 socket_id, 0); 1082 CDEV_LOG_DEBUG("PRIMARY:reserved memzone for %s (%p)", 1083 mz_name, mz); 1084 } else { 1085 mz = rte_memzone_lookup(mz_name); 1086 CDEV_LOG_DEBUG("SECONDARY:looked up memzone for %s (%p)", 1087 mz_name, mz); 1088 } 1089 1090 if (mz == NULL) 1091 return -ENOMEM; 1092 1093 *data = mz->addr; 1094 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 1095 memset(*data, 0, sizeof(struct rte_cryptodev_data)); 1096 1097 return 0; 1098 } 1099 1100 static inline int 1101 rte_cryptodev_data_free(uint8_t dev_id, struct rte_cryptodev_data **data) 1102 { 1103 char mz_name[RTE_MEMZONE_NAMESIZE]; 1104 const struct rte_memzone *mz; 1105 int n; 1106 1107 /* generate memzone name */ 1108 n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id); 1109 if (n >= (int)sizeof(mz_name)) 1110 return -EINVAL; 1111 1112 mz = rte_memzone_lookup(mz_name); 1113 if (mz == NULL) 1114 return -ENOMEM; 1115 1116 RTE_ASSERT(*data == mz->addr); 1117 *data = NULL; 1118 1119 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 1120 CDEV_LOG_DEBUG("PRIMARY:free memzone of %s (%p)", 1121 mz_name, mz); 1122 return rte_memzone_free(mz); 1123 } else { 1124 CDEV_LOG_DEBUG("SECONDARY:don't free memzone of %s (%p)", 1125 mz_name, mz); 1126 } 1127 1128 return 0; 1129 } 1130 1131 static uint8_t 1132 rte_cryptodev_find_free_device_index(void) 1133 { 1134 uint8_t dev_id; 1135 1136 for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++) { 1137 if (rte_crypto_devices[dev_id].attached == 1138 RTE_CRYPTODEV_DETACHED) 1139 return dev_id; 1140 } 1141 return RTE_CRYPTO_MAX_DEVS; 1142 } 1143 1144 struct rte_cryptodev * 1145 rte_cryptodev_pmd_allocate(const char *name, int socket_id) 1146 { 1147 struct rte_cryptodev *cryptodev; 1148 uint8_t dev_id; 1149 1150 if (rte_cryptodev_pmd_get_named_dev(name) != NULL) { 1151 CDEV_LOG_ERR("Crypto device with name %s already " 1152 "allocated!", name); 1153 return NULL; 1154 } 1155 1156 dev_id = rte_cryptodev_find_free_device_index(); 1157 if (dev_id == RTE_CRYPTO_MAX_DEVS) { 1158 CDEV_LOG_ERR("Reached maximum number of crypto devices"); 1159 return NULL; 1160 } 1161 1162 cryptodev = rte_cryptodev_pmd_get_dev(dev_id); 1163 1164 if (cryptodev->data == NULL) { 1165 struct rte_cryptodev_data **cryptodev_data = 1166 &cryptodev_globals.data[dev_id]; 1167 1168 int retval = rte_cryptodev_data_alloc(dev_id, cryptodev_data, 1169 socket_id); 1170 1171 if (retval < 0 || *cryptodev_data == NULL) 1172 return NULL; 1173 1174 cryptodev->data = *cryptodev_data; 1175 1176 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 1177 strlcpy(cryptodev->data->name, name, 1178 RTE_CRYPTODEV_NAME_MAX_LEN); 1179 1180 cryptodev->data->dev_id = dev_id; 1181 cryptodev->data->socket_id = socket_id; 1182 cryptodev->data->dev_started = 0; 1183 CDEV_LOG_DEBUG("PRIMARY:init data"); 1184 } 1185 1186 CDEV_LOG_DEBUG("Data for %s: dev_id %d, socket %d, started %d", 1187 cryptodev->data->name, 1188 cryptodev->data->dev_id, 1189 cryptodev->data->socket_id, 1190 cryptodev->data->dev_started); 1191 1192 /* init user callbacks */ 1193 TAILQ_INIT(&(cryptodev->link_intr_cbs)); 1194 1195 cryptodev->attached = RTE_CRYPTODEV_ATTACHED; 1196 1197 cryptodev_globals.nb_devs++; 1198 } 1199 1200 return cryptodev; 1201 } 1202 1203 int 1204 rte_cryptodev_pmd_release_device(struct rte_cryptodev *cryptodev) 1205 { 1206 int ret; 1207 uint8_t dev_id; 1208 1209 if (cryptodev == NULL) 1210 return -EINVAL; 1211 1212 dev_id = cryptodev->data->dev_id; 1213 1214 cryptodev_fp_ops_reset(rte_crypto_fp_ops + dev_id); 1215 1216 /* Close device only if device operations have been set */ 1217 if (cryptodev->dev_ops) { 1218 ret = rte_cryptodev_close(dev_id); 1219 if (ret < 0) 1220 return ret; 1221 } 1222 1223 ret = rte_cryptodev_data_free(dev_id, &cryptodev_globals.data[dev_id]); 1224 if (ret < 0) 1225 return ret; 1226 1227 cryptodev->attached = RTE_CRYPTODEV_DETACHED; 1228 cryptodev_globals.nb_devs--; 1229 return 0; 1230 } 1231 1232 uint16_t 1233 rte_cryptodev_queue_pair_count(uint8_t dev_id) 1234 { 1235 struct rte_cryptodev *dev; 1236 1237 if (!rte_cryptodev_is_valid_device_data(dev_id)) { 1238 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 1239 return 0; 1240 } 1241 1242 dev = &rte_crypto_devices[dev_id]; 1243 rte_cryptodev_trace_queue_pair_count(dev, dev->data->name, 1244 dev->data->socket_id, dev->data->dev_id, 1245 dev->data->nb_queue_pairs); 1246 1247 return dev->data->nb_queue_pairs; 1248 } 1249 1250 static int 1251 rte_cryptodev_queue_pairs_config(struct rte_cryptodev *dev, uint16_t nb_qpairs, 1252 int socket_id) 1253 { 1254 struct rte_cryptodev_info dev_info; 1255 void **qp; 1256 unsigned i; 1257 1258 if ((dev == NULL) || (nb_qpairs < 1)) { 1259 CDEV_LOG_ERR("invalid param: dev %p, nb_queues %u", 1260 dev, nb_qpairs); 1261 return -EINVAL; 1262 } 1263 1264 CDEV_LOG_DEBUG("Setup %d queues pairs on device %u", 1265 nb_qpairs, dev->data->dev_id); 1266 1267 memset(&dev_info, 0, sizeof(struct rte_cryptodev_info)); 1268 1269 if (*dev->dev_ops->dev_infos_get == NULL) 1270 return -ENOTSUP; 1271 (*dev->dev_ops->dev_infos_get)(dev, &dev_info); 1272 1273 if (nb_qpairs > (dev_info.max_nb_queue_pairs)) { 1274 CDEV_LOG_ERR("Invalid num queue_pairs (%u) for dev %u", 1275 nb_qpairs, dev->data->dev_id); 1276 return -EINVAL; 1277 } 1278 1279 if (dev->data->queue_pairs == NULL) { /* first time configuration */ 1280 dev->data->queue_pairs = rte_zmalloc_socket( 1281 "cryptodev->queue_pairs", 1282 sizeof(dev->data->queue_pairs[0]) * 1283 dev_info.max_nb_queue_pairs, 1284 RTE_CACHE_LINE_SIZE, socket_id); 1285 1286 if (dev->data->queue_pairs == NULL) { 1287 dev->data->nb_queue_pairs = 0; 1288 CDEV_LOG_ERR("failed to get memory for qp meta data, " 1289 "nb_queues %u", 1290 nb_qpairs); 1291 return -(ENOMEM); 1292 } 1293 } else { /* re-configure */ 1294 int ret; 1295 uint16_t old_nb_queues = dev->data->nb_queue_pairs; 1296 1297 qp = dev->data->queue_pairs; 1298 1299 if (*dev->dev_ops->queue_pair_release == NULL) 1300 return -ENOTSUP; 1301 1302 for (i = nb_qpairs; i < old_nb_queues; i++) { 1303 ret = (*dev->dev_ops->queue_pair_release)(dev, i); 1304 if (ret < 0) 1305 return ret; 1306 qp[i] = NULL; 1307 } 1308 1309 } 1310 dev->data->nb_queue_pairs = nb_qpairs; 1311 return 0; 1312 } 1313 1314 int 1315 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config) 1316 { 1317 struct rte_cryptodev *dev; 1318 int diag; 1319 1320 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1321 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 1322 return -EINVAL; 1323 } 1324 1325 dev = &rte_crypto_devices[dev_id]; 1326 1327 if (dev->data->dev_started) { 1328 CDEV_LOG_ERR( 1329 "device %d must be stopped to allow configuration", dev_id); 1330 return -EBUSY; 1331 } 1332 1333 if (*dev->dev_ops->dev_configure == NULL) 1334 return -ENOTSUP; 1335 1336 rte_spinlock_lock(&rte_cryptodev_callback_lock); 1337 cryptodev_cb_cleanup(dev); 1338 rte_spinlock_unlock(&rte_cryptodev_callback_lock); 1339 1340 /* Setup new number of queue pairs and reconfigure device. */ 1341 diag = rte_cryptodev_queue_pairs_config(dev, config->nb_queue_pairs, 1342 config->socket_id); 1343 if (diag != 0) { 1344 CDEV_LOG_ERR("dev%d rte_crypto_dev_queue_pairs_config = %d", 1345 dev_id, diag); 1346 return diag; 1347 } 1348 1349 rte_spinlock_lock(&rte_cryptodev_callback_lock); 1350 diag = cryptodev_cb_init(dev); 1351 rte_spinlock_unlock(&rte_cryptodev_callback_lock); 1352 if (diag) { 1353 CDEV_LOG_ERR("Callback init failed for dev_id=%d", dev_id); 1354 return diag; 1355 } 1356 1357 rte_cryptodev_trace_configure(dev_id, config); 1358 return (*dev->dev_ops->dev_configure)(dev, config); 1359 } 1360 1361 int 1362 rte_cryptodev_start(uint8_t dev_id) 1363 { 1364 struct rte_cryptodev *dev; 1365 int diag; 1366 1367 CDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id); 1368 1369 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1370 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 1371 return -EINVAL; 1372 } 1373 1374 dev = &rte_crypto_devices[dev_id]; 1375 1376 if (*dev->dev_ops->dev_start == NULL) 1377 return -ENOTSUP; 1378 1379 if (dev->data->dev_started != 0) { 1380 CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already started", 1381 dev_id); 1382 return 0; 1383 } 1384 1385 diag = (*dev->dev_ops->dev_start)(dev); 1386 /* expose selection of PMD fast-path functions */ 1387 cryptodev_fp_ops_set(rte_crypto_fp_ops + dev_id, dev); 1388 1389 rte_cryptodev_trace_start(dev_id, diag); 1390 if (diag == 0) 1391 dev->data->dev_started = 1; 1392 else 1393 return diag; 1394 1395 return 0; 1396 } 1397 1398 void 1399 rte_cryptodev_stop(uint8_t dev_id) 1400 { 1401 struct rte_cryptodev *dev; 1402 1403 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1404 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 1405 return; 1406 } 1407 1408 dev = &rte_crypto_devices[dev_id]; 1409 1410 if (*dev->dev_ops->dev_stop == NULL) 1411 return; 1412 1413 if (dev->data->dev_started == 0) { 1414 CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already stopped", 1415 dev_id); 1416 return; 1417 } 1418 1419 /* point fast-path functions to dummy ones */ 1420 cryptodev_fp_ops_reset(rte_crypto_fp_ops + dev_id); 1421 1422 (*dev->dev_ops->dev_stop)(dev); 1423 rte_cryptodev_trace_stop(dev_id); 1424 dev->data->dev_started = 0; 1425 } 1426 1427 int 1428 rte_cryptodev_close(uint8_t dev_id) 1429 { 1430 struct rte_cryptodev *dev; 1431 int retval; 1432 1433 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1434 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 1435 return -1; 1436 } 1437 1438 dev = &rte_crypto_devices[dev_id]; 1439 1440 /* Device must be stopped before it can be closed */ 1441 if (dev->data->dev_started == 1) { 1442 CDEV_LOG_ERR("Device %u must be stopped before closing", 1443 dev_id); 1444 return -EBUSY; 1445 } 1446 1447 /* We can't close the device if there are outstanding sessions in use */ 1448 if (dev->data->session_pool != NULL) { 1449 if (!rte_mempool_full(dev->data->session_pool)) { 1450 CDEV_LOG_ERR("dev_id=%u close failed, session mempool " 1451 "has sessions still in use, free " 1452 "all sessions before calling close", 1453 (unsigned)dev_id); 1454 return -EBUSY; 1455 } 1456 } 1457 1458 if (*dev->dev_ops->dev_close == NULL) 1459 return -ENOTSUP; 1460 retval = (*dev->dev_ops->dev_close)(dev); 1461 rte_cryptodev_trace_close(dev_id, retval); 1462 1463 if (retval < 0) 1464 return retval; 1465 1466 return 0; 1467 } 1468 1469 int 1470 rte_cryptodev_get_qp_status(uint8_t dev_id, uint16_t queue_pair_id) 1471 { 1472 struct rte_cryptodev *dev; 1473 int ret = 0; 1474 1475 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1476 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 1477 ret = -EINVAL; 1478 goto done; 1479 } 1480 1481 dev = &rte_crypto_devices[dev_id]; 1482 if (queue_pair_id >= dev->data->nb_queue_pairs) { 1483 CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id); 1484 ret = -EINVAL; 1485 goto done; 1486 } 1487 void **qps = dev->data->queue_pairs; 1488 1489 if (qps[queue_pair_id]) { 1490 CDEV_LOG_DEBUG("qp %d on dev %d is initialised", 1491 queue_pair_id, dev_id); 1492 ret = 1; 1493 goto done; 1494 } 1495 1496 CDEV_LOG_DEBUG("qp %d on dev %d is not initialised", 1497 queue_pair_id, dev_id); 1498 1499 done: 1500 rte_cryptodev_trace_get_qp_status(dev_id, queue_pair_id, ret); 1501 1502 return ret; 1503 } 1504 1505 static uint8_t 1506 rte_cryptodev_sym_is_valid_session_pool(struct rte_mempool *mp, 1507 uint32_t sess_priv_size) 1508 { 1509 struct rte_cryptodev_sym_session_pool_private_data *pool_priv; 1510 1511 if (!mp) 1512 return 0; 1513 1514 pool_priv = rte_mempool_get_priv(mp); 1515 1516 if (!pool_priv || mp->private_data_size < sizeof(*pool_priv) || 1517 pool_priv->sess_data_sz < sess_priv_size) 1518 return 0; 1519 1520 return 1; 1521 } 1522 1523 int 1524 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id, 1525 const struct rte_cryptodev_qp_conf *qp_conf, int socket_id) 1526 1527 { 1528 struct rte_cryptodev *dev; 1529 1530 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1531 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 1532 return -EINVAL; 1533 } 1534 1535 dev = &rte_crypto_devices[dev_id]; 1536 if (queue_pair_id >= dev->data->nb_queue_pairs) { 1537 CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id); 1538 return -EINVAL; 1539 } 1540 1541 if (!qp_conf) { 1542 CDEV_LOG_ERR("qp_conf cannot be NULL"); 1543 return -EINVAL; 1544 } 1545 1546 if (qp_conf->mp_session) { 1547 struct rte_cryptodev_sym_session_pool_private_data *pool_priv; 1548 1549 pool_priv = rte_mempool_get_priv(qp_conf->mp_session); 1550 if (!pool_priv || qp_conf->mp_session->private_data_size < 1551 sizeof(*pool_priv)) { 1552 CDEV_LOG_ERR("Invalid mempool"); 1553 return -EINVAL; 1554 } 1555 1556 if (!rte_cryptodev_sym_is_valid_session_pool(qp_conf->mp_session, 1557 rte_cryptodev_sym_get_private_session_size(dev_id))) { 1558 CDEV_LOG_ERR("Invalid mempool"); 1559 return -EINVAL; 1560 } 1561 } 1562 1563 if (dev->data->dev_started) { 1564 CDEV_LOG_ERR( 1565 "device %d must be stopped to allow configuration", dev_id); 1566 return -EBUSY; 1567 } 1568 1569 if (*dev->dev_ops->queue_pair_setup == NULL) 1570 return -ENOTSUP; 1571 1572 rte_cryptodev_trace_queue_pair_setup(dev_id, queue_pair_id, qp_conf); 1573 return (*dev->dev_ops->queue_pair_setup)(dev, queue_pair_id, qp_conf, 1574 socket_id); 1575 } 1576 1577 struct rte_cryptodev_cb * 1578 rte_cryptodev_add_enq_callback(uint8_t dev_id, 1579 uint16_t qp_id, 1580 rte_cryptodev_callback_fn cb_fn, 1581 void *cb_arg) 1582 { 1583 struct rte_cryptodev *dev; 1584 struct rte_cryptodev_cb_rcu *list; 1585 struct rte_cryptodev_cb *cb, *tail; 1586 1587 if (!cb_fn) { 1588 CDEV_LOG_ERR("Callback is NULL on dev_id=%d", dev_id); 1589 rte_errno = EINVAL; 1590 return NULL; 1591 } 1592 1593 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1594 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id); 1595 rte_errno = ENODEV; 1596 return NULL; 1597 } 1598 1599 dev = &rte_crypto_devices[dev_id]; 1600 if (qp_id >= dev->data->nb_queue_pairs) { 1601 CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id); 1602 rte_errno = ENODEV; 1603 return NULL; 1604 } 1605 1606 cb = rte_zmalloc(NULL, sizeof(*cb), 0); 1607 if (cb == NULL) { 1608 CDEV_LOG_ERR("Failed to allocate memory for callback on " 1609 "dev=%d, queue_pair_id=%d", dev_id, qp_id); 1610 rte_errno = ENOMEM; 1611 return NULL; 1612 } 1613 1614 rte_spinlock_lock(&rte_cryptodev_callback_lock); 1615 1616 cb->fn = cb_fn; 1617 cb->arg = cb_arg; 1618 1619 /* Add the callbacks in fifo order. */ 1620 list = &dev->enq_cbs[qp_id]; 1621 tail = list->next; 1622 1623 if (tail) { 1624 while (tail->next) 1625 tail = tail->next; 1626 /* Stores to cb->fn and cb->param should complete before 1627 * cb is visible to data plane. 1628 */ 1629 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE); 1630 } else { 1631 /* Stores to cb->fn and cb->param should complete before 1632 * cb is visible to data plane. 1633 */ 1634 __atomic_store_n(&list->next, cb, __ATOMIC_RELEASE); 1635 } 1636 1637 rte_spinlock_unlock(&rte_cryptodev_callback_lock); 1638 1639 rte_cryptodev_trace_add_enq_callback(dev_id, qp_id, cb_fn); 1640 return cb; 1641 } 1642 1643 int 1644 rte_cryptodev_remove_enq_callback(uint8_t dev_id, 1645 uint16_t qp_id, 1646 struct rte_cryptodev_cb *cb) 1647 { 1648 struct rte_cryptodev *dev; 1649 struct rte_cryptodev_cb **prev_cb, *curr_cb; 1650 struct rte_cryptodev_cb_rcu *list; 1651 int ret; 1652 1653 ret = -EINVAL; 1654 1655 if (!cb) { 1656 CDEV_LOG_ERR("Callback is NULL"); 1657 return -EINVAL; 1658 } 1659 1660 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1661 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id); 1662 return -ENODEV; 1663 } 1664 1665 rte_cryptodev_trace_remove_enq_callback(dev_id, qp_id, cb->fn); 1666 1667 dev = &rte_crypto_devices[dev_id]; 1668 if (qp_id >= dev->data->nb_queue_pairs) { 1669 CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id); 1670 return -ENODEV; 1671 } 1672 1673 rte_spinlock_lock(&rte_cryptodev_callback_lock); 1674 if (dev->enq_cbs == NULL) { 1675 CDEV_LOG_ERR("Callback not initialized"); 1676 goto cb_err; 1677 } 1678 1679 list = &dev->enq_cbs[qp_id]; 1680 if (list == NULL) { 1681 CDEV_LOG_ERR("Callback list is NULL"); 1682 goto cb_err; 1683 } 1684 1685 if (list->qsbr == NULL) { 1686 CDEV_LOG_ERR("Rcu qsbr is NULL"); 1687 goto cb_err; 1688 } 1689 1690 prev_cb = &list->next; 1691 for (; *prev_cb != NULL; prev_cb = &curr_cb->next) { 1692 curr_cb = *prev_cb; 1693 if (curr_cb == cb) { 1694 /* Remove the user cb from the callback list. */ 1695 __atomic_store_n(prev_cb, curr_cb->next, 1696 __ATOMIC_RELAXED); 1697 ret = 0; 1698 break; 1699 } 1700 } 1701 1702 if (!ret) { 1703 /* Call sync with invalid thread id as this is part of 1704 * control plane API 1705 */ 1706 rte_rcu_qsbr_synchronize(list->qsbr, RTE_QSBR_THRID_INVALID); 1707 rte_free(cb); 1708 } 1709 1710 cb_err: 1711 rte_spinlock_unlock(&rte_cryptodev_callback_lock); 1712 return ret; 1713 } 1714 1715 struct rte_cryptodev_cb * 1716 rte_cryptodev_add_deq_callback(uint8_t dev_id, 1717 uint16_t qp_id, 1718 rte_cryptodev_callback_fn cb_fn, 1719 void *cb_arg) 1720 { 1721 struct rte_cryptodev *dev; 1722 struct rte_cryptodev_cb_rcu *list; 1723 struct rte_cryptodev_cb *cb, *tail; 1724 1725 if (!cb_fn) { 1726 CDEV_LOG_ERR("Callback is NULL on dev_id=%d", dev_id); 1727 rte_errno = EINVAL; 1728 return NULL; 1729 } 1730 1731 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1732 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id); 1733 rte_errno = ENODEV; 1734 return NULL; 1735 } 1736 1737 dev = &rte_crypto_devices[dev_id]; 1738 if (qp_id >= dev->data->nb_queue_pairs) { 1739 CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id); 1740 rte_errno = ENODEV; 1741 return NULL; 1742 } 1743 1744 cb = rte_zmalloc(NULL, sizeof(*cb), 0); 1745 if (cb == NULL) { 1746 CDEV_LOG_ERR("Failed to allocate memory for callback on " 1747 "dev=%d, queue_pair_id=%d", dev_id, qp_id); 1748 rte_errno = ENOMEM; 1749 return NULL; 1750 } 1751 1752 rte_spinlock_lock(&rte_cryptodev_callback_lock); 1753 1754 cb->fn = cb_fn; 1755 cb->arg = cb_arg; 1756 1757 /* Add the callbacks in fifo order. */ 1758 list = &dev->deq_cbs[qp_id]; 1759 tail = list->next; 1760 1761 if (tail) { 1762 while (tail->next) 1763 tail = tail->next; 1764 /* Stores to cb->fn and cb->param should complete before 1765 * cb is visible to data plane. 1766 */ 1767 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE); 1768 } else { 1769 /* Stores to cb->fn and cb->param should complete before 1770 * cb is visible to data plane. 1771 */ 1772 __atomic_store_n(&list->next, cb, __ATOMIC_RELEASE); 1773 } 1774 1775 rte_spinlock_unlock(&rte_cryptodev_callback_lock); 1776 1777 rte_cryptodev_trace_add_deq_callback(dev_id, qp_id, cb_fn); 1778 1779 return cb; 1780 } 1781 1782 int 1783 rte_cryptodev_remove_deq_callback(uint8_t dev_id, 1784 uint16_t qp_id, 1785 struct rte_cryptodev_cb *cb) 1786 { 1787 struct rte_cryptodev *dev; 1788 struct rte_cryptodev_cb **prev_cb, *curr_cb; 1789 struct rte_cryptodev_cb_rcu *list; 1790 int ret; 1791 1792 ret = -EINVAL; 1793 1794 if (!cb) { 1795 CDEV_LOG_ERR("Callback is NULL"); 1796 return -EINVAL; 1797 } 1798 1799 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1800 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id); 1801 return -ENODEV; 1802 } 1803 1804 rte_cryptodev_trace_remove_deq_callback(dev_id, qp_id, cb->fn); 1805 1806 dev = &rte_crypto_devices[dev_id]; 1807 if (qp_id >= dev->data->nb_queue_pairs) { 1808 CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id); 1809 return -ENODEV; 1810 } 1811 1812 rte_spinlock_lock(&rte_cryptodev_callback_lock); 1813 if (dev->enq_cbs == NULL) { 1814 CDEV_LOG_ERR("Callback not initialized"); 1815 goto cb_err; 1816 } 1817 1818 list = &dev->deq_cbs[qp_id]; 1819 if (list == NULL) { 1820 CDEV_LOG_ERR("Callback list is NULL"); 1821 goto cb_err; 1822 } 1823 1824 if (list->qsbr == NULL) { 1825 CDEV_LOG_ERR("Rcu qsbr is NULL"); 1826 goto cb_err; 1827 } 1828 1829 prev_cb = &list->next; 1830 for (; *prev_cb != NULL; prev_cb = &curr_cb->next) { 1831 curr_cb = *prev_cb; 1832 if (curr_cb == cb) { 1833 /* Remove the user cb from the callback list. */ 1834 __atomic_store_n(prev_cb, curr_cb->next, 1835 __ATOMIC_RELAXED); 1836 ret = 0; 1837 break; 1838 } 1839 } 1840 1841 if (!ret) { 1842 /* Call sync with invalid thread id as this is part of 1843 * control plane API 1844 */ 1845 rte_rcu_qsbr_synchronize(list->qsbr, RTE_QSBR_THRID_INVALID); 1846 rte_free(cb); 1847 } 1848 1849 cb_err: 1850 rte_spinlock_unlock(&rte_cryptodev_callback_lock); 1851 return ret; 1852 } 1853 1854 int 1855 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats) 1856 { 1857 struct rte_cryptodev *dev; 1858 1859 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1860 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id); 1861 return -ENODEV; 1862 } 1863 1864 if (stats == NULL) { 1865 CDEV_LOG_ERR("Invalid stats ptr"); 1866 return -EINVAL; 1867 } 1868 1869 dev = &rte_crypto_devices[dev_id]; 1870 memset(stats, 0, sizeof(*stats)); 1871 1872 if (*dev->dev_ops->stats_get == NULL) 1873 return -ENOTSUP; 1874 (*dev->dev_ops->stats_get)(dev, stats); 1875 1876 rte_cryptodev_trace_stats_get(dev_id, stats); 1877 return 0; 1878 } 1879 1880 void 1881 rte_cryptodev_stats_reset(uint8_t dev_id) 1882 { 1883 struct rte_cryptodev *dev; 1884 1885 rte_cryptodev_trace_stats_reset(dev_id); 1886 1887 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1888 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 1889 return; 1890 } 1891 1892 dev = &rte_crypto_devices[dev_id]; 1893 1894 if (*dev->dev_ops->stats_reset == NULL) 1895 return; 1896 (*dev->dev_ops->stats_reset)(dev); 1897 } 1898 1899 void 1900 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info) 1901 { 1902 struct rte_cryptodev *dev; 1903 1904 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1905 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id); 1906 return; 1907 } 1908 1909 dev = &rte_crypto_devices[dev_id]; 1910 1911 memset(dev_info, 0, sizeof(struct rte_cryptodev_info)); 1912 1913 if (*dev->dev_ops->dev_infos_get == NULL) 1914 return; 1915 (*dev->dev_ops->dev_infos_get)(dev, dev_info); 1916 1917 dev_info->driver_name = dev->device->driver->name; 1918 dev_info->device = dev->device; 1919 1920 rte_cryptodev_trace_info_get(dev_id, dev_info->driver_name); 1921 1922 } 1923 1924 int 1925 rte_cryptodev_callback_register(uint8_t dev_id, 1926 enum rte_cryptodev_event_type event, 1927 rte_cryptodev_cb_fn cb_fn, void *cb_arg) 1928 { 1929 struct rte_cryptodev *dev; 1930 struct rte_cryptodev_callback *user_cb; 1931 1932 if (!cb_fn) 1933 return -EINVAL; 1934 1935 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1936 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 1937 return -EINVAL; 1938 } 1939 1940 dev = &rte_crypto_devices[dev_id]; 1941 rte_spinlock_lock(&rte_cryptodev_cb_lock); 1942 1943 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) { 1944 if (user_cb->cb_fn == cb_fn && 1945 user_cb->cb_arg == cb_arg && 1946 user_cb->event == event) { 1947 break; 1948 } 1949 } 1950 1951 /* create a new callback. */ 1952 if (user_cb == NULL) { 1953 user_cb = rte_zmalloc("INTR_USER_CALLBACK", 1954 sizeof(struct rte_cryptodev_callback), 0); 1955 if (user_cb != NULL) { 1956 user_cb->cb_fn = cb_fn; 1957 user_cb->cb_arg = cb_arg; 1958 user_cb->event = event; 1959 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next); 1960 } 1961 } 1962 1963 rte_spinlock_unlock(&rte_cryptodev_cb_lock); 1964 1965 rte_cryptodev_trace_callback_register(dev_id, event, cb_fn); 1966 return (user_cb == NULL) ? -ENOMEM : 0; 1967 } 1968 1969 int 1970 rte_cryptodev_callback_unregister(uint8_t dev_id, 1971 enum rte_cryptodev_event_type event, 1972 rte_cryptodev_cb_fn cb_fn, void *cb_arg) 1973 { 1974 int ret; 1975 struct rte_cryptodev *dev; 1976 struct rte_cryptodev_callback *cb, *next; 1977 1978 if (!cb_fn) 1979 return -EINVAL; 1980 1981 if (!rte_cryptodev_is_valid_dev(dev_id)) { 1982 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 1983 return -EINVAL; 1984 } 1985 1986 dev = &rte_crypto_devices[dev_id]; 1987 rte_spinlock_lock(&rte_cryptodev_cb_lock); 1988 1989 ret = 0; 1990 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) { 1991 1992 next = TAILQ_NEXT(cb, next); 1993 1994 if (cb->cb_fn != cb_fn || cb->event != event || 1995 (cb->cb_arg != (void *)-1 && 1996 cb->cb_arg != cb_arg)) 1997 continue; 1998 1999 /* 2000 * if this callback is not executing right now, 2001 * then remove it. 2002 */ 2003 if (cb->active == 0) { 2004 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next); 2005 rte_free(cb); 2006 } else { 2007 ret = -EAGAIN; 2008 } 2009 } 2010 2011 rte_spinlock_unlock(&rte_cryptodev_cb_lock); 2012 2013 rte_cryptodev_trace_callback_unregister(dev_id, event, cb_fn); 2014 return ret; 2015 } 2016 2017 void 2018 rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev, 2019 enum rte_cryptodev_event_type event) 2020 { 2021 struct rte_cryptodev_callback *cb_lst; 2022 struct rte_cryptodev_callback dev_cb; 2023 2024 rte_spinlock_lock(&rte_cryptodev_cb_lock); 2025 TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) { 2026 if (cb_lst->cb_fn == NULL || cb_lst->event != event) 2027 continue; 2028 dev_cb = *cb_lst; 2029 cb_lst->active = 1; 2030 rte_spinlock_unlock(&rte_cryptodev_cb_lock); 2031 dev_cb.cb_fn(dev->data->dev_id, dev_cb.event, 2032 dev_cb.cb_arg); 2033 rte_spinlock_lock(&rte_cryptodev_cb_lock); 2034 cb_lst->active = 0; 2035 } 2036 rte_spinlock_unlock(&rte_cryptodev_cb_lock); 2037 } 2038 2039 int 2040 rte_cryptodev_queue_pair_event_error_query(uint8_t dev_id, uint16_t qp_id) 2041 { 2042 struct rte_cryptodev *dev; 2043 2044 if (!rte_cryptodev_is_valid_dev(dev_id)) { 2045 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 2046 return -EINVAL; 2047 } 2048 dev = &rte_crypto_devices[dev_id]; 2049 2050 if (qp_id >= dev->data->nb_queue_pairs) 2051 return -EINVAL; 2052 if (*dev->dev_ops->queue_pair_event_error_query == NULL) 2053 return -ENOTSUP; 2054 2055 return dev->dev_ops->queue_pair_event_error_query(dev, qp_id); 2056 } 2057 2058 struct rte_mempool * 2059 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts, 2060 uint32_t elt_size, uint32_t cache_size, uint16_t user_data_size, 2061 int socket_id) 2062 { 2063 struct rte_mempool *mp; 2064 struct rte_cryptodev_sym_session_pool_private_data *pool_priv; 2065 uint32_t obj_sz; 2066 2067 obj_sz = sizeof(struct rte_cryptodev_sym_session) + elt_size + user_data_size; 2068 2069 obj_sz = RTE_ALIGN_CEIL(obj_sz, RTE_CACHE_LINE_SIZE); 2070 mp = rte_mempool_create(name, nb_elts, obj_sz, cache_size, 2071 (uint32_t)(sizeof(*pool_priv)), NULL, NULL, 2072 NULL, NULL, 2073 socket_id, 0); 2074 if (mp == NULL) { 2075 CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d", 2076 __func__, name, rte_errno); 2077 return NULL; 2078 } 2079 2080 pool_priv = rte_mempool_get_priv(mp); 2081 if (!pool_priv) { 2082 CDEV_LOG_ERR("%s(name=%s) failed to get private data", 2083 __func__, name); 2084 rte_mempool_free(mp); 2085 return NULL; 2086 } 2087 2088 pool_priv->sess_data_sz = elt_size; 2089 pool_priv->user_data_sz = user_data_size; 2090 2091 rte_cryptodev_trace_sym_session_pool_create(name, nb_elts, 2092 elt_size, cache_size, user_data_size, mp); 2093 return mp; 2094 } 2095 2096 struct rte_mempool * 2097 rte_cryptodev_asym_session_pool_create(const char *name, uint32_t nb_elts, 2098 uint32_t cache_size, uint16_t user_data_size, int socket_id) 2099 { 2100 struct rte_mempool *mp; 2101 struct rte_cryptodev_asym_session_pool_private_data *pool_priv; 2102 uint32_t obj_sz, obj_sz_aligned; 2103 uint8_t dev_id; 2104 unsigned int priv_sz, max_priv_sz = 0; 2105 2106 for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++) 2107 if (rte_cryptodev_is_valid_dev(dev_id)) { 2108 priv_sz = rte_cryptodev_asym_get_private_session_size(dev_id); 2109 if (priv_sz > max_priv_sz) 2110 max_priv_sz = priv_sz; 2111 } 2112 if (max_priv_sz == 0) { 2113 CDEV_LOG_INFO("Could not set max private session size"); 2114 return NULL; 2115 } 2116 2117 obj_sz = rte_cryptodev_asym_get_header_session_size() + max_priv_sz + 2118 user_data_size; 2119 obj_sz_aligned = RTE_ALIGN_CEIL(obj_sz, RTE_CACHE_LINE_SIZE); 2120 2121 mp = rte_mempool_create(name, nb_elts, obj_sz_aligned, cache_size, 2122 (uint32_t)(sizeof(*pool_priv)), 2123 NULL, NULL, NULL, NULL, 2124 socket_id, 0); 2125 if (mp == NULL) { 2126 CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d", 2127 __func__, name, rte_errno); 2128 return NULL; 2129 } 2130 2131 pool_priv = rte_mempool_get_priv(mp); 2132 if (!pool_priv) { 2133 CDEV_LOG_ERR("%s(name=%s) failed to get private data", 2134 __func__, name); 2135 rte_mempool_free(mp); 2136 return NULL; 2137 } 2138 pool_priv->max_priv_session_sz = max_priv_sz; 2139 pool_priv->user_data_sz = user_data_size; 2140 2141 rte_cryptodev_trace_asym_session_pool_create(name, nb_elts, 2142 user_data_size, cache_size, mp); 2143 return mp; 2144 } 2145 2146 void * 2147 rte_cryptodev_sym_session_create(uint8_t dev_id, 2148 struct rte_crypto_sym_xform *xforms, 2149 struct rte_mempool *mp) 2150 { 2151 struct rte_cryptodev *dev; 2152 struct rte_cryptodev_sym_session *sess; 2153 struct rte_cryptodev_sym_session_pool_private_data *pool_priv; 2154 uint32_t sess_priv_sz; 2155 int ret; 2156 2157 if (!rte_cryptodev_is_valid_dev(dev_id)) { 2158 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 2159 rte_errno = EINVAL; 2160 return NULL; 2161 } 2162 2163 if (xforms == NULL) { 2164 CDEV_LOG_ERR("Invalid xform\n"); 2165 rte_errno = EINVAL; 2166 return NULL; 2167 } 2168 2169 sess_priv_sz = rte_cryptodev_sym_get_private_session_size(dev_id); 2170 if (!rte_cryptodev_sym_is_valid_session_pool(mp, sess_priv_sz)) { 2171 CDEV_LOG_ERR("Invalid mempool"); 2172 rte_errno = EINVAL; 2173 return NULL; 2174 } 2175 2176 dev = rte_cryptodev_pmd_get_dev(dev_id); 2177 2178 /* Allocate a session structure from the session pool */ 2179 if (rte_mempool_get(mp, (void **)&sess)) { 2180 CDEV_LOG_ERR("couldn't get object from session mempool"); 2181 rte_errno = ENOMEM; 2182 return NULL; 2183 } 2184 2185 pool_priv = rte_mempool_get_priv(mp); 2186 sess->driver_id = dev->driver_id; 2187 sess->sess_data_sz = pool_priv->sess_data_sz; 2188 sess->user_data_sz = pool_priv->user_data_sz; 2189 sess->driver_priv_data_iova = rte_mempool_virt2iova(sess) + 2190 offsetof(struct rte_cryptodev_sym_session, driver_priv_data); 2191 2192 if (dev->dev_ops->sym_session_configure == NULL) { 2193 rte_errno = ENOTSUP; 2194 goto error_exit; 2195 } 2196 memset(sess->driver_priv_data, 0, pool_priv->sess_data_sz + pool_priv->user_data_sz); 2197 2198 ret = dev->dev_ops->sym_session_configure(dev, xforms, sess); 2199 if (ret < 0) { 2200 rte_errno = -ret; 2201 goto error_exit; 2202 } 2203 sess->driver_id = dev->driver_id; 2204 2205 rte_cryptodev_trace_sym_session_create(dev_id, sess, xforms, mp); 2206 2207 return (void *)sess; 2208 error_exit: 2209 rte_mempool_put(mp, (void *)sess); 2210 return NULL; 2211 } 2212 2213 int 2214 rte_cryptodev_asym_session_create(uint8_t dev_id, 2215 struct rte_crypto_asym_xform *xforms, struct rte_mempool *mp, 2216 void **session) 2217 { 2218 struct rte_cryptodev_asym_session *sess; 2219 uint32_t session_priv_data_sz; 2220 struct rte_cryptodev_asym_session_pool_private_data *pool_priv; 2221 unsigned int session_header_size = 2222 rte_cryptodev_asym_get_header_session_size(); 2223 struct rte_cryptodev *dev; 2224 int ret; 2225 2226 if (!rte_cryptodev_is_valid_dev(dev_id)) { 2227 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 2228 return -EINVAL; 2229 } 2230 2231 dev = rte_cryptodev_pmd_get_dev(dev_id); 2232 2233 if (dev == NULL) 2234 return -EINVAL; 2235 2236 if (!mp) { 2237 CDEV_LOG_ERR("invalid mempool"); 2238 return -EINVAL; 2239 } 2240 2241 session_priv_data_sz = rte_cryptodev_asym_get_private_session_size( 2242 dev_id); 2243 pool_priv = rte_mempool_get_priv(mp); 2244 2245 if (pool_priv->max_priv_session_sz < session_priv_data_sz) { 2246 CDEV_LOG_DEBUG( 2247 "The private session data size used when creating the mempool is smaller than this device's private session data."); 2248 return -EINVAL; 2249 } 2250 2251 /* Verify if provided mempool can hold elements big enough. */ 2252 if (mp->elt_size < session_header_size + session_priv_data_sz) { 2253 CDEV_LOG_ERR( 2254 "mempool elements too small to hold session objects"); 2255 return -EINVAL; 2256 } 2257 2258 /* Allocate a session structure from the session pool */ 2259 if (rte_mempool_get(mp, session)) { 2260 CDEV_LOG_ERR("couldn't get object from session mempool"); 2261 return -ENOMEM; 2262 } 2263 2264 sess = *session; 2265 sess->driver_id = dev->driver_id; 2266 sess->user_data_sz = pool_priv->user_data_sz; 2267 sess->max_priv_data_sz = pool_priv->max_priv_session_sz; 2268 2269 /* Clear device session pointer.*/ 2270 memset(sess->sess_private_data, 0, session_priv_data_sz + sess->user_data_sz); 2271 2272 if (*dev->dev_ops->asym_session_configure == NULL) 2273 return -ENOTSUP; 2274 2275 if (sess->sess_private_data[0] == 0) { 2276 ret = dev->dev_ops->asym_session_configure(dev, xforms, sess); 2277 if (ret < 0) { 2278 CDEV_LOG_ERR( 2279 "dev_id %d failed to configure session details", 2280 dev_id); 2281 return ret; 2282 } 2283 } 2284 2285 rte_cryptodev_trace_asym_session_create(dev_id, xforms, mp, sess); 2286 return 0; 2287 } 2288 2289 int 2290 rte_cryptodev_sym_session_free(uint8_t dev_id, void *_sess) 2291 { 2292 struct rte_cryptodev *dev; 2293 struct rte_mempool *sess_mp; 2294 struct rte_cryptodev_sym_session *sess = _sess; 2295 struct rte_cryptodev_sym_session_pool_private_data *pool_priv; 2296 2297 if (sess == NULL) 2298 return -EINVAL; 2299 2300 if (!rte_cryptodev_is_valid_dev(dev_id)) { 2301 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 2302 return -EINVAL; 2303 } 2304 2305 dev = rte_cryptodev_pmd_get_dev(dev_id); 2306 2307 if (dev == NULL || sess == NULL) 2308 return -EINVAL; 2309 2310 sess_mp = rte_mempool_from_obj(sess); 2311 if (!sess_mp) 2312 return -EINVAL; 2313 pool_priv = rte_mempool_get_priv(sess_mp); 2314 2315 if (sess->driver_id != dev->driver_id) { 2316 CDEV_LOG_ERR("Session created by driver %u but freed by %u", 2317 sess->driver_id, dev->driver_id); 2318 return -EINVAL; 2319 } 2320 2321 if (*dev->dev_ops->sym_session_clear == NULL) 2322 return -ENOTSUP; 2323 2324 dev->dev_ops->sym_session_clear(dev, sess); 2325 2326 memset(sess->driver_priv_data, 0, pool_priv->sess_data_sz + pool_priv->user_data_sz); 2327 2328 /* Return session to mempool */ 2329 rte_mempool_put(sess_mp, sess); 2330 2331 rte_cryptodev_trace_sym_session_free(dev_id, sess); 2332 return 0; 2333 } 2334 2335 int 2336 rte_cryptodev_asym_session_free(uint8_t dev_id, void *sess) 2337 { 2338 struct rte_mempool *sess_mp; 2339 struct rte_cryptodev *dev; 2340 2341 if (!rte_cryptodev_is_valid_dev(dev_id)) { 2342 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 2343 return -EINVAL; 2344 } 2345 2346 dev = rte_cryptodev_pmd_get_dev(dev_id); 2347 2348 if (dev == NULL || sess == NULL) 2349 return -EINVAL; 2350 2351 if (*dev->dev_ops->asym_session_clear == NULL) 2352 return -ENOTSUP; 2353 2354 dev->dev_ops->asym_session_clear(dev, sess); 2355 2356 rte_free(((struct rte_cryptodev_asym_session *)sess)->event_mdata); 2357 2358 /* Return session to mempool */ 2359 sess_mp = rte_mempool_from_obj(sess); 2360 rte_mempool_put(sess_mp, sess); 2361 2362 rte_cryptodev_trace_asym_session_free(dev_id, sess); 2363 return 0; 2364 } 2365 2366 unsigned int 2367 rte_cryptodev_asym_get_header_session_size(void) 2368 { 2369 return sizeof(struct rte_cryptodev_asym_session); 2370 } 2371 2372 unsigned int 2373 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id) 2374 { 2375 struct rte_cryptodev *dev; 2376 unsigned int priv_sess_size; 2377 2378 if (!rte_cryptodev_is_valid_dev(dev_id)) 2379 return 0; 2380 2381 dev = rte_cryptodev_pmd_get_dev(dev_id); 2382 2383 if (*dev->dev_ops->sym_session_get_size == NULL) 2384 return 0; 2385 2386 priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev); 2387 2388 rte_cryptodev_trace_sym_get_private_session_size(dev_id, 2389 priv_sess_size); 2390 2391 return priv_sess_size; 2392 } 2393 2394 unsigned int 2395 rte_cryptodev_asym_get_private_session_size(uint8_t dev_id) 2396 { 2397 struct rte_cryptodev *dev; 2398 unsigned int priv_sess_size; 2399 2400 if (!rte_cryptodev_is_valid_dev(dev_id)) 2401 return 0; 2402 2403 dev = rte_cryptodev_pmd_get_dev(dev_id); 2404 2405 if (*dev->dev_ops->asym_session_get_size == NULL) 2406 return 0; 2407 2408 priv_sess_size = (*dev->dev_ops->asym_session_get_size)(dev); 2409 2410 rte_cryptodev_trace_asym_get_private_session_size(dev_id, 2411 priv_sess_size); 2412 2413 return priv_sess_size; 2414 } 2415 2416 int 2417 rte_cryptodev_sym_session_set_user_data(void *_sess, void *data, 2418 uint16_t size) 2419 { 2420 struct rte_cryptodev_sym_session *sess = _sess; 2421 2422 if (sess == NULL) 2423 return -EINVAL; 2424 2425 if (sess->user_data_sz < size) 2426 return -ENOMEM; 2427 2428 rte_memcpy(sess->driver_priv_data + sess->sess_data_sz, data, size); 2429 2430 rte_cryptodev_trace_sym_session_set_user_data(sess, data, size); 2431 2432 return 0; 2433 } 2434 2435 void * 2436 rte_cryptodev_sym_session_get_user_data(void *_sess) 2437 { 2438 struct rte_cryptodev_sym_session *sess = _sess; 2439 void *data = NULL; 2440 2441 if (sess == NULL || sess->user_data_sz == 0) 2442 return NULL; 2443 2444 data = (void *)(sess->driver_priv_data + sess->sess_data_sz); 2445 2446 rte_cryptodev_trace_sym_session_get_user_data(sess, data); 2447 2448 return data; 2449 } 2450 2451 int 2452 rte_cryptodev_asym_session_set_user_data(void *session, void *data, uint16_t size) 2453 { 2454 struct rte_cryptodev_asym_session *sess = session; 2455 if (sess == NULL) 2456 return -EINVAL; 2457 2458 if (sess->user_data_sz < size) 2459 return -ENOMEM; 2460 2461 rte_memcpy(sess->sess_private_data + 2462 sess->max_priv_data_sz, 2463 data, size); 2464 2465 rte_cryptodev_trace_asym_session_set_user_data(sess, data, size); 2466 2467 return 0; 2468 } 2469 2470 void * 2471 rte_cryptodev_asym_session_get_user_data(void *session) 2472 { 2473 struct rte_cryptodev_asym_session *sess = session; 2474 void *data = NULL; 2475 2476 if (sess == NULL || sess->user_data_sz == 0) 2477 return NULL; 2478 2479 data = (void *)(sess->sess_private_data + sess->max_priv_data_sz); 2480 2481 rte_cryptodev_trace_asym_session_get_user_data(sess, data); 2482 2483 return data; 2484 } 2485 2486 static inline void 2487 sym_crypto_fill_status(struct rte_crypto_sym_vec *vec, int32_t errnum) 2488 { 2489 uint32_t i; 2490 for (i = 0; i < vec->num; i++) 2491 vec->status[i] = errnum; 2492 } 2493 2494 uint32_t 2495 rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id, 2496 void *_sess, union rte_crypto_sym_ofs ofs, 2497 struct rte_crypto_sym_vec *vec) 2498 { 2499 struct rte_cryptodev *dev; 2500 struct rte_cryptodev_sym_session *sess = _sess; 2501 2502 if (!rte_cryptodev_is_valid_dev(dev_id)) { 2503 sym_crypto_fill_status(vec, EINVAL); 2504 return 0; 2505 } 2506 2507 dev = rte_cryptodev_pmd_get_dev(dev_id); 2508 2509 if (*dev->dev_ops->sym_cpu_process == NULL || 2510 !(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO)) { 2511 sym_crypto_fill_status(vec, ENOTSUP); 2512 return 0; 2513 } 2514 2515 rte_cryptodev_trace_sym_cpu_crypto_process(dev_id, sess); 2516 2517 return dev->dev_ops->sym_cpu_process(dev, sess, ofs, vec); 2518 } 2519 2520 int 2521 rte_cryptodev_get_raw_dp_ctx_size(uint8_t dev_id) 2522 { 2523 struct rte_cryptodev *dev; 2524 int32_t size = sizeof(struct rte_crypto_raw_dp_ctx); 2525 int32_t priv_size; 2526 2527 if (!rte_cryptodev_is_valid_dev(dev_id)) 2528 return -EINVAL; 2529 2530 dev = rte_cryptodev_pmd_get_dev(dev_id); 2531 2532 if (*dev->dev_ops->sym_get_raw_dp_ctx_size == NULL || 2533 !(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP)) { 2534 return -ENOTSUP; 2535 } 2536 2537 priv_size = (*dev->dev_ops->sym_get_raw_dp_ctx_size)(dev); 2538 if (priv_size < 0) 2539 return -ENOTSUP; 2540 2541 rte_cryptodev_trace_get_raw_dp_ctx_size(dev_id); 2542 2543 return RTE_ALIGN_CEIL((size + priv_size), 8); 2544 } 2545 2546 int 2547 rte_cryptodev_configure_raw_dp_ctx(uint8_t dev_id, uint16_t qp_id, 2548 struct rte_crypto_raw_dp_ctx *ctx, 2549 enum rte_crypto_op_sess_type sess_type, 2550 union rte_cryptodev_session_ctx session_ctx, 2551 uint8_t is_update) 2552 { 2553 struct rte_cryptodev *dev; 2554 2555 if (!rte_cryptodev_get_qp_status(dev_id, qp_id)) 2556 return -EINVAL; 2557 2558 dev = rte_cryptodev_pmd_get_dev(dev_id); 2559 if (!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP) 2560 || dev->dev_ops->sym_configure_raw_dp_ctx == NULL) 2561 return -ENOTSUP; 2562 2563 rte_cryptodev_trace_configure_raw_dp_ctx(dev_id, qp_id, sess_type); 2564 2565 return (*dev->dev_ops->sym_configure_raw_dp_ctx)(dev, qp_id, ctx, 2566 sess_type, session_ctx, is_update); 2567 } 2568 2569 int 2570 rte_cryptodev_session_event_mdata_set(uint8_t dev_id, void *sess, 2571 enum rte_crypto_op_type op_type, 2572 enum rte_crypto_op_sess_type sess_type, 2573 void *ev_mdata, 2574 uint16_t size) 2575 { 2576 struct rte_cryptodev *dev; 2577 2578 if (sess == NULL || ev_mdata == NULL) 2579 return -EINVAL; 2580 2581 if (!rte_cryptodev_is_valid_dev(dev_id)) 2582 goto skip_pmd_op; 2583 2584 dev = rte_cryptodev_pmd_get_dev(dev_id); 2585 if (dev->dev_ops->session_ev_mdata_set == NULL) 2586 goto skip_pmd_op; 2587 2588 rte_cryptodev_trace_session_event_mdata_set(dev_id, sess, op_type, 2589 sess_type, ev_mdata, size); 2590 2591 return (*dev->dev_ops->session_ev_mdata_set)(dev, sess, op_type, 2592 sess_type, ev_mdata); 2593 2594 skip_pmd_op: 2595 if (op_type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) 2596 return rte_cryptodev_sym_session_set_user_data(sess, ev_mdata, 2597 size); 2598 else if (op_type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) { 2599 struct rte_cryptodev_asym_session *s = sess; 2600 2601 if (s->event_mdata == NULL) { 2602 s->event_mdata = rte_malloc(NULL, size, 0); 2603 if (s->event_mdata == NULL) 2604 return -ENOMEM; 2605 } 2606 rte_memcpy(s->event_mdata, ev_mdata, size); 2607 2608 return 0; 2609 } else 2610 return -ENOTSUP; 2611 } 2612 2613 uint32_t 2614 rte_cryptodev_raw_enqueue_burst(struct rte_crypto_raw_dp_ctx *ctx, 2615 struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs, 2616 void **user_data, int *enqueue_status) 2617 { 2618 return (*ctx->enqueue_burst)(ctx->qp_data, ctx->drv_ctx_data, vec, 2619 ofs, user_data, enqueue_status); 2620 } 2621 2622 int 2623 rte_cryptodev_raw_enqueue_done(struct rte_crypto_raw_dp_ctx *ctx, 2624 uint32_t n) 2625 { 2626 return (*ctx->enqueue_done)(ctx->qp_data, ctx->drv_ctx_data, n); 2627 } 2628 2629 uint32_t 2630 rte_cryptodev_raw_dequeue_burst(struct rte_crypto_raw_dp_ctx *ctx, 2631 rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count, 2632 uint32_t max_nb_to_dequeue, 2633 rte_cryptodev_raw_post_dequeue_t post_dequeue, 2634 void **out_user_data, uint8_t is_user_data_array, 2635 uint32_t *n_success_jobs, int *status) 2636 { 2637 return (*ctx->dequeue_burst)(ctx->qp_data, ctx->drv_ctx_data, 2638 get_dequeue_count, max_nb_to_dequeue, post_dequeue, 2639 out_user_data, is_user_data_array, n_success_jobs, status); 2640 } 2641 2642 int 2643 rte_cryptodev_raw_dequeue_done(struct rte_crypto_raw_dp_ctx *ctx, 2644 uint32_t n) 2645 { 2646 return (*ctx->dequeue_done)(ctx->qp_data, ctx->drv_ctx_data, n); 2647 } 2648 2649 /** Initialise rte_crypto_op mempool element */ 2650 static void 2651 rte_crypto_op_init(struct rte_mempool *mempool, 2652 void *opaque_arg, 2653 void *_op_data, 2654 __rte_unused unsigned i) 2655 { 2656 struct rte_crypto_op *op = _op_data; 2657 enum rte_crypto_op_type type = *(enum rte_crypto_op_type *)opaque_arg; 2658 2659 memset(_op_data, 0, mempool->elt_size); 2660 2661 __rte_crypto_op_reset(op, type); 2662 2663 op->phys_addr = rte_mem_virt2iova(_op_data); 2664 op->mempool = mempool; 2665 } 2666 2667 2668 struct rte_mempool * 2669 rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type, 2670 unsigned nb_elts, unsigned cache_size, uint16_t priv_size, 2671 int socket_id) 2672 { 2673 struct rte_crypto_op_pool_private *priv; 2674 2675 unsigned elt_size = sizeof(struct rte_crypto_op) + 2676 priv_size; 2677 2678 if (type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) { 2679 elt_size += sizeof(struct rte_crypto_sym_op); 2680 } else if (type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) { 2681 elt_size += sizeof(struct rte_crypto_asym_op); 2682 } else if (type == RTE_CRYPTO_OP_TYPE_UNDEFINED) { 2683 elt_size += RTE_MAX(sizeof(struct rte_crypto_sym_op), 2684 sizeof(struct rte_crypto_asym_op)); 2685 } else { 2686 CDEV_LOG_ERR("Invalid op_type"); 2687 return NULL; 2688 } 2689 2690 /* lookup mempool in case already allocated */ 2691 struct rte_mempool *mp = rte_mempool_lookup(name); 2692 2693 if (mp != NULL) { 2694 priv = (struct rte_crypto_op_pool_private *) 2695 rte_mempool_get_priv(mp); 2696 2697 if (mp->elt_size != elt_size || 2698 mp->cache_size < cache_size || 2699 mp->size < nb_elts || 2700 priv->priv_size < priv_size) { 2701 mp = NULL; 2702 CDEV_LOG_ERR("Mempool %s already exists but with " 2703 "incompatible parameters", name); 2704 return NULL; 2705 } 2706 return mp; 2707 } 2708 2709 mp = rte_mempool_create( 2710 name, 2711 nb_elts, 2712 elt_size, 2713 cache_size, 2714 sizeof(struct rte_crypto_op_pool_private), 2715 NULL, 2716 NULL, 2717 rte_crypto_op_init, 2718 &type, 2719 socket_id, 2720 0); 2721 2722 if (mp == NULL) { 2723 CDEV_LOG_ERR("Failed to create mempool %s", name); 2724 return NULL; 2725 } 2726 2727 priv = (struct rte_crypto_op_pool_private *) 2728 rte_mempool_get_priv(mp); 2729 2730 priv->priv_size = priv_size; 2731 priv->type = type; 2732 2733 rte_cryptodev_trace_op_pool_create(name, socket_id, type, nb_elts, mp); 2734 return mp; 2735 } 2736 2737 int 2738 rte_cryptodev_pmd_create_dev_name(char *name, const char *dev_name_prefix) 2739 { 2740 struct rte_cryptodev *dev = NULL; 2741 uint32_t i = 0; 2742 2743 if (name == NULL) 2744 return -EINVAL; 2745 2746 for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) { 2747 int ret = snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, 2748 "%s_%u", dev_name_prefix, i); 2749 2750 if (ret < 0) 2751 return ret; 2752 2753 dev = rte_cryptodev_pmd_get_named_dev(name); 2754 if (!dev) 2755 return 0; 2756 } 2757 2758 return -1; 2759 } 2760 2761 TAILQ_HEAD(cryptodev_driver_list, cryptodev_driver); 2762 2763 static struct cryptodev_driver_list cryptodev_driver_list = 2764 TAILQ_HEAD_INITIALIZER(cryptodev_driver_list); 2765 2766 int 2767 rte_cryptodev_driver_id_get(const char *name) 2768 { 2769 struct cryptodev_driver *driver; 2770 const char *driver_name; 2771 int driver_id = -1; 2772 2773 if (name == NULL) { 2774 RTE_LOG(DEBUG, CRYPTODEV, "name pointer NULL"); 2775 return -1; 2776 } 2777 2778 TAILQ_FOREACH(driver, &cryptodev_driver_list, next) { 2779 driver_name = driver->driver->name; 2780 if (strncmp(driver_name, name, strlen(driver_name) + 1) == 0) { 2781 driver_id = driver->id; 2782 break; 2783 } 2784 } 2785 2786 rte_cryptodev_trace_driver_id_get(name, driver_id); 2787 2788 return driver_id; 2789 } 2790 2791 const char * 2792 rte_cryptodev_name_get(uint8_t dev_id) 2793 { 2794 struct rte_cryptodev *dev; 2795 2796 if (!rte_cryptodev_is_valid_device_data(dev_id)) { 2797 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); 2798 return NULL; 2799 } 2800 2801 dev = rte_cryptodev_pmd_get_dev(dev_id); 2802 if (dev == NULL) 2803 return NULL; 2804 2805 rte_cryptodev_trace_name_get(dev_id, dev->data->name); 2806 2807 return dev->data->name; 2808 } 2809 2810 const char * 2811 rte_cryptodev_driver_name_get(uint8_t driver_id) 2812 { 2813 struct cryptodev_driver *driver; 2814 2815 TAILQ_FOREACH(driver, &cryptodev_driver_list, next) { 2816 if (driver->id == driver_id) { 2817 rte_cryptodev_trace_driver_name_get(driver_id, 2818 driver->driver->name); 2819 return driver->driver->name; 2820 } 2821 } 2822 return NULL; 2823 } 2824 2825 uint8_t 2826 rte_cryptodev_allocate_driver(struct cryptodev_driver *crypto_drv, 2827 const struct rte_driver *drv) 2828 { 2829 crypto_drv->driver = drv; 2830 crypto_drv->id = nb_drivers; 2831 2832 TAILQ_INSERT_TAIL(&cryptodev_driver_list, crypto_drv, next); 2833 2834 rte_cryptodev_trace_allocate_driver(drv->name); 2835 2836 return nb_drivers++; 2837 } 2838 2839 RTE_INIT(cryptodev_init_fp_ops) 2840 { 2841 uint32_t i; 2842 2843 for (i = 0; i != RTE_DIM(rte_crypto_fp_ops); i++) 2844 cryptodev_fp_ops_reset(rte_crypto_fp_ops + i); 2845 } 2846 2847 static int 2848 cryptodev_handle_dev_list(const char *cmd __rte_unused, 2849 const char *params __rte_unused, 2850 struct rte_tel_data *d) 2851 { 2852 int dev_id; 2853 2854 if (rte_cryptodev_count() < 1) 2855 return -EINVAL; 2856 2857 rte_tel_data_start_array(d, RTE_TEL_INT_VAL); 2858 for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++) 2859 if (rte_cryptodev_is_valid_dev(dev_id)) 2860 rte_tel_data_add_array_int(d, dev_id); 2861 2862 return 0; 2863 } 2864 2865 static int 2866 cryptodev_handle_dev_info(const char *cmd __rte_unused, 2867 const char *params, struct rte_tel_data *d) 2868 { 2869 struct rte_cryptodev_info cryptodev_info; 2870 int dev_id; 2871 char *end_param; 2872 2873 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 2874 return -EINVAL; 2875 2876 dev_id = strtoul(params, &end_param, 0); 2877 if (*end_param != '\0') 2878 CDEV_LOG_ERR("Extra parameters passed to command, ignoring"); 2879 if (!rte_cryptodev_is_valid_dev(dev_id)) 2880 return -EINVAL; 2881 2882 rte_cryptodev_info_get(dev_id, &cryptodev_info); 2883 2884 rte_tel_data_start_dict(d); 2885 rte_tel_data_add_dict_string(d, "device_name", 2886 cryptodev_info.device->name); 2887 rte_tel_data_add_dict_uint(d, "max_nb_queue_pairs", 2888 cryptodev_info.max_nb_queue_pairs); 2889 2890 return 0; 2891 } 2892 2893 #define ADD_DICT_STAT(s) rte_tel_data_add_dict_uint(d, #s, cryptodev_stats.s) 2894 2895 static int 2896 cryptodev_handle_dev_stats(const char *cmd __rte_unused, 2897 const char *params, 2898 struct rte_tel_data *d) 2899 { 2900 struct rte_cryptodev_stats cryptodev_stats; 2901 int dev_id, ret; 2902 char *end_param; 2903 2904 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 2905 return -EINVAL; 2906 2907 dev_id = strtoul(params, &end_param, 0); 2908 if (*end_param != '\0') 2909 CDEV_LOG_ERR("Extra parameters passed to command, ignoring"); 2910 if (!rte_cryptodev_is_valid_dev(dev_id)) 2911 return -EINVAL; 2912 2913 ret = rte_cryptodev_stats_get(dev_id, &cryptodev_stats); 2914 if (ret < 0) 2915 return ret; 2916 2917 rte_tel_data_start_dict(d); 2918 ADD_DICT_STAT(enqueued_count); 2919 ADD_DICT_STAT(dequeued_count); 2920 ADD_DICT_STAT(enqueue_err_count); 2921 ADD_DICT_STAT(dequeue_err_count); 2922 2923 return 0; 2924 } 2925 2926 #define CRYPTO_CAPS_SZ \ 2927 (RTE_ALIGN_CEIL(sizeof(struct rte_cryptodev_capabilities), \ 2928 sizeof(uint64_t)) / \ 2929 sizeof(uint64_t)) 2930 2931 static int 2932 crypto_caps_array(struct rte_tel_data *d, 2933 const struct rte_cryptodev_capabilities *capabilities) 2934 { 2935 const struct rte_cryptodev_capabilities *dev_caps; 2936 uint64_t caps_val[CRYPTO_CAPS_SZ]; 2937 unsigned int i = 0, j; 2938 2939 rte_tel_data_start_array(d, RTE_TEL_UINT_VAL); 2940 2941 while ((dev_caps = &capabilities[i++])->op != 2942 RTE_CRYPTO_OP_TYPE_UNDEFINED) { 2943 memset(&caps_val, 0, CRYPTO_CAPS_SZ * sizeof(caps_val[0])); 2944 rte_memcpy(caps_val, dev_caps, sizeof(capabilities[0])); 2945 for (j = 0; j < CRYPTO_CAPS_SZ; j++) 2946 rte_tel_data_add_array_uint(d, caps_val[j]); 2947 } 2948 2949 return i; 2950 } 2951 2952 static int 2953 cryptodev_handle_dev_caps(const char *cmd __rte_unused, const char *params, 2954 struct rte_tel_data *d) 2955 { 2956 struct rte_cryptodev_info dev_info; 2957 struct rte_tel_data *crypto_caps; 2958 int crypto_caps_n; 2959 char *end_param; 2960 int dev_id; 2961 2962 if (!params || strlen(params) == 0 || !isdigit(*params)) 2963 return -EINVAL; 2964 2965 dev_id = strtoul(params, &end_param, 0); 2966 if (*end_param != '\0') 2967 CDEV_LOG_ERR("Extra parameters passed to command, ignoring"); 2968 if (!rte_cryptodev_is_valid_dev(dev_id)) 2969 return -EINVAL; 2970 2971 rte_tel_data_start_dict(d); 2972 crypto_caps = rte_tel_data_alloc(); 2973 if (!crypto_caps) 2974 return -ENOMEM; 2975 2976 rte_cryptodev_info_get(dev_id, &dev_info); 2977 crypto_caps_n = crypto_caps_array(crypto_caps, dev_info.capabilities); 2978 rte_tel_data_add_dict_container(d, "crypto_caps", crypto_caps, 0); 2979 rte_tel_data_add_dict_int(d, "crypto_caps_n", crypto_caps_n); 2980 2981 return 0; 2982 } 2983 2984 RTE_INIT(cryptodev_init_telemetry) 2985 { 2986 rte_telemetry_register_cmd("/cryptodev/info", cryptodev_handle_dev_info, 2987 "Returns information for a cryptodev. Parameters: int dev_id"); 2988 rte_telemetry_register_cmd("/cryptodev/list", 2989 cryptodev_handle_dev_list, 2990 "Returns list of available crypto devices by IDs. No parameters."); 2991 rte_telemetry_register_cmd("/cryptodev/stats", 2992 cryptodev_handle_dev_stats, 2993 "Returns the stats for a cryptodev. Parameters: int dev_id"); 2994 rte_telemetry_register_cmd("/cryptodev/caps", 2995 cryptodev_handle_dev_caps, 2996 "Returns the capabilities for a cryptodev. Parameters: int dev_id"); 2997 } 2998