1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2015-2020 Intel Corporation. 3 */ 4 5 #ifndef _RTE_CRYPTODEV_H_ 6 #define _RTE_CRYPTODEV_H_ 7 8 /** 9 * @file rte_cryptodev.h 10 * 11 * RTE Cryptographic Device APIs 12 * 13 * Defines RTE Crypto Device APIs for the provisioning of cipher and 14 * authentication operations. 15 */ 16 17 #ifdef __cplusplus 18 extern "C" { 19 #endif 20 21 #include "rte_kvargs.h" 22 #include "rte_crypto.h" 23 #include "rte_dev.h" 24 #include <rte_common.h> 25 #include <rte_config.h> 26 #include <rte_rcu_qsbr.h> 27 28 #include "rte_cryptodev_trace_fp.h" 29 30 extern const char **rte_cyptodev_names; 31 32 /* Logging Macros */ 33 34 #define CDEV_LOG_ERR(...) \ 35 RTE_LOG(ERR, CRYPTODEV, \ 36 RTE_FMT("%s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \ 37 __func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,))) 38 39 #define CDEV_LOG_INFO(...) \ 40 RTE_LOG(INFO, CRYPTODEV, \ 41 RTE_FMT(RTE_FMT_HEAD(__VA_ARGS__,) "\n", \ 42 RTE_FMT_TAIL(__VA_ARGS__,))) 43 44 #define CDEV_LOG_DEBUG(...) \ 45 RTE_LOG(DEBUG, CRYPTODEV, \ 46 RTE_FMT("%s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \ 47 __func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,))) 48 49 #define CDEV_PMD_TRACE(...) \ 50 RTE_LOG(DEBUG, CRYPTODEV, \ 51 RTE_FMT("[%s] %s: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \ 52 dev, __func__, RTE_FMT_TAIL(__VA_ARGS__,))) 53 54 /** 55 * A macro that points to an offset from the start 56 * of the crypto operation structure (rte_crypto_op) 57 * 58 * The returned pointer is cast to type t. 59 * 60 * @param c 61 * The crypto operation. 62 * @param o 63 * The offset from the start of the crypto operation. 64 * @param t 65 * The type to cast the result into. 66 */ 67 #define rte_crypto_op_ctod_offset(c, t, o) \ 68 ((t)((char *)(c) + (o))) 69 70 /** 71 * A macro that returns the physical address that points 72 * to an offset from the start of the crypto operation 73 * (rte_crypto_op) 74 * 75 * @param c 76 * The crypto operation. 77 * @param o 78 * The offset from the start of the crypto operation 79 * to calculate address from. 80 */ 81 #define rte_crypto_op_ctophys_offset(c, o) \ 82 (rte_iova_t)((c)->phys_addr + (o)) 83 84 /** 85 * Crypto parameters range description 86 */ 87 struct rte_crypto_param_range { 88 uint16_t min; /**< minimum size */ 89 uint16_t max; /**< maximum size */ 90 uint16_t increment; 91 /**< if a range of sizes are supported, 92 * this parameter is used to indicate 93 * increments in byte size that are supported 94 * between the minimum and maximum 95 */ 96 }; 97 98 /** 99 * Data-unit supported lengths of cipher algorithms. 100 * A bit can represent any set of data-unit sizes 101 * (single size, multiple size, range, etc). 102 */ 103 #define RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_512_BYTES RTE_BIT32(0) 104 #define RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_4096_BYTES RTE_BIT32(1) 105 #define RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_1_MEGABYTES RTE_BIT32(2) 106 107 /** 108 * Symmetric Crypto Capability 109 */ 110 struct rte_cryptodev_symmetric_capability { 111 enum rte_crypto_sym_xform_type xform_type; 112 /**< Transform type : Authentication / Cipher / AEAD */ 113 RTE_STD_C11 114 union { 115 struct { 116 enum rte_crypto_auth_algorithm algo; 117 /**< authentication algorithm */ 118 uint16_t block_size; 119 /**< algorithm block size */ 120 struct rte_crypto_param_range key_size; 121 /**< auth key size range */ 122 struct rte_crypto_param_range digest_size; 123 /**< digest size range */ 124 struct rte_crypto_param_range aad_size; 125 /**< Additional authentication data size range */ 126 struct rte_crypto_param_range iv_size; 127 /**< Initialisation vector data size range */ 128 } auth; 129 /**< Symmetric Authentication transform capabilities */ 130 struct { 131 enum rte_crypto_cipher_algorithm algo; 132 /**< cipher algorithm */ 133 uint16_t block_size; 134 /**< algorithm block size */ 135 struct rte_crypto_param_range key_size; 136 /**< cipher key size range */ 137 struct rte_crypto_param_range iv_size; 138 /**< Initialisation vector data size range */ 139 uint32_t dataunit_set; 140 /**< 141 * Supported data-unit lengths: 142 * RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_* bits 143 * or 0 for lengths defined in the algorithm standard. 144 */ 145 } cipher; 146 /**< Symmetric Cipher transform capabilities */ 147 struct { 148 enum rte_crypto_aead_algorithm algo; 149 /**< AEAD algorithm */ 150 uint16_t block_size; 151 /**< algorithm block size */ 152 struct rte_crypto_param_range key_size; 153 /**< AEAD key size range */ 154 struct rte_crypto_param_range digest_size; 155 /**< digest size range */ 156 struct rte_crypto_param_range aad_size; 157 /**< Additional authentication data size range */ 158 struct rte_crypto_param_range iv_size; 159 /**< Initialisation vector data size range */ 160 } aead; 161 }; 162 }; 163 164 /** 165 * Asymmetric Xform Crypto Capability 166 * 167 */ 168 struct rte_cryptodev_asymmetric_xform_capability { 169 enum rte_crypto_asym_xform_type xform_type; 170 /**< Transform type: RSA/MODEXP/DH/DSA/MODINV */ 171 172 uint32_t op_types; 173 /**< bitmask for supported rte_crypto_asym_op_type */ 174 175 __extension__ 176 union { 177 struct rte_crypto_param_range modlen; 178 /**< Range of modulus length supported by modulus based xform. 179 * Value 0 mean implementation default 180 */ 181 }; 182 }; 183 184 /** 185 * Asymmetric Crypto Capability 186 * 187 */ 188 struct rte_cryptodev_asymmetric_capability { 189 struct rte_cryptodev_asymmetric_xform_capability xform_capa; 190 }; 191 192 193 /** Structure used to capture a capability of a crypto device */ 194 struct rte_cryptodev_capabilities { 195 enum rte_crypto_op_type op; 196 /**< Operation type */ 197 198 RTE_STD_C11 199 union { 200 struct rte_cryptodev_symmetric_capability sym; 201 /**< Symmetric operation capability parameters */ 202 struct rte_cryptodev_asymmetric_capability asym; 203 /**< Asymmetric operation capability parameters */ 204 }; 205 }; 206 207 /** Structure used to describe crypto algorithms */ 208 struct rte_cryptodev_sym_capability_idx { 209 enum rte_crypto_sym_xform_type type; 210 union { 211 enum rte_crypto_cipher_algorithm cipher; 212 enum rte_crypto_auth_algorithm auth; 213 enum rte_crypto_aead_algorithm aead; 214 } algo; 215 }; 216 217 /** 218 * Structure used to describe asymmetric crypto xforms 219 * Each xform maps to one asym algorithm. 220 * 221 */ 222 struct rte_cryptodev_asym_capability_idx { 223 enum rte_crypto_asym_xform_type type; 224 /**< Asymmetric xform (algo) type */ 225 }; 226 227 /** 228 * Provide capabilities available for defined device and algorithm 229 * 230 * @param dev_id The identifier of the device. 231 * @param idx Description of crypto algorithms. 232 * 233 * @return 234 * - Return description of the symmetric crypto capability if exist. 235 * - Return NULL if the capability not exist. 236 */ 237 const struct rte_cryptodev_symmetric_capability * 238 rte_cryptodev_sym_capability_get(uint8_t dev_id, 239 const struct rte_cryptodev_sym_capability_idx *idx); 240 241 /** 242 * Provide capabilities available for defined device and xform 243 * 244 * @param dev_id The identifier of the device. 245 * @param idx Description of asym crypto xform. 246 * 247 * @return 248 * - Return description of the asymmetric crypto capability if exist. 249 * - Return NULL if the capability not exist. 250 */ 251 __rte_experimental 252 const struct rte_cryptodev_asymmetric_xform_capability * 253 rte_cryptodev_asym_capability_get(uint8_t dev_id, 254 const struct rte_cryptodev_asym_capability_idx *idx); 255 256 /** 257 * Check if key size and initial vector are supported 258 * in crypto cipher capability 259 * 260 * @param capability Description of the symmetric crypto capability. 261 * @param key_size Cipher key size. 262 * @param iv_size Cipher initial vector size. 263 * 264 * @return 265 * - Return 0 if the parameters are in range of the capability. 266 * - Return -1 if the parameters are out of range of the capability. 267 */ 268 int 269 rte_cryptodev_sym_capability_check_cipher( 270 const struct rte_cryptodev_symmetric_capability *capability, 271 uint16_t key_size, uint16_t iv_size); 272 273 /** 274 * Check if key size and initial vector are supported 275 * in crypto auth capability 276 * 277 * @param capability Description of the symmetric crypto capability. 278 * @param key_size Auth key size. 279 * @param digest_size Auth digest size. 280 * @param iv_size Auth initial vector size. 281 * 282 * @return 283 * - Return 0 if the parameters are in range of the capability. 284 * - Return -1 if the parameters are out of range of the capability. 285 */ 286 int 287 rte_cryptodev_sym_capability_check_auth( 288 const struct rte_cryptodev_symmetric_capability *capability, 289 uint16_t key_size, uint16_t digest_size, uint16_t iv_size); 290 291 /** 292 * Check if key, digest, AAD and initial vector sizes are supported 293 * in crypto AEAD capability 294 * 295 * @param capability Description of the symmetric crypto capability. 296 * @param key_size AEAD key size. 297 * @param digest_size AEAD digest size. 298 * @param aad_size AEAD AAD size. 299 * @param iv_size AEAD IV size. 300 * 301 * @return 302 * - Return 0 if the parameters are in range of the capability. 303 * - Return -1 if the parameters are out of range of the capability. 304 */ 305 int 306 rte_cryptodev_sym_capability_check_aead( 307 const struct rte_cryptodev_symmetric_capability *capability, 308 uint16_t key_size, uint16_t digest_size, uint16_t aad_size, 309 uint16_t iv_size); 310 311 /** 312 * Check if op type is supported 313 * 314 * @param capability Description of the asymmetric crypto capability. 315 * @param op_type op type 316 * 317 * @return 318 * - Return 1 if the op type is supported 319 * - Return 0 if unsupported 320 */ 321 __rte_experimental 322 int 323 rte_cryptodev_asym_xform_capability_check_optype( 324 const struct rte_cryptodev_asymmetric_xform_capability *capability, 325 enum rte_crypto_asym_op_type op_type); 326 327 /** 328 * Check if modulus length is in supported range 329 * 330 * @param capability Description of the asymmetric crypto capability. 331 * @param modlen modulus length. 332 * 333 * @return 334 * - Return 0 if the parameters are in range of the capability. 335 * - Return -1 if the parameters are out of range of the capability. 336 */ 337 __rte_experimental 338 int 339 rte_cryptodev_asym_xform_capability_check_modlen( 340 const struct rte_cryptodev_asymmetric_xform_capability *capability, 341 uint16_t modlen); 342 343 /** 344 * Provide the cipher algorithm enum, given an algorithm string 345 * 346 * @param algo_enum A pointer to the cipher algorithm 347 * enum to be filled 348 * @param algo_string Authentication algo string 349 * 350 * @return 351 * - Return -1 if string is not valid 352 * - Return 0 is the string is valid 353 */ 354 int 355 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum, 356 const char *algo_string); 357 358 /** 359 * Provide the authentication algorithm enum, given an algorithm string 360 * 361 * @param algo_enum A pointer to the authentication algorithm 362 * enum to be filled 363 * @param algo_string Authentication algo string 364 * 365 * @return 366 * - Return -1 if string is not valid 367 * - Return 0 is the string is valid 368 */ 369 int 370 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum, 371 const char *algo_string); 372 373 /** 374 * Provide the AEAD algorithm enum, given an algorithm string 375 * 376 * @param algo_enum A pointer to the AEAD algorithm 377 * enum to be filled 378 * @param algo_string AEAD algorithm string 379 * 380 * @return 381 * - Return -1 if string is not valid 382 * - Return 0 is the string is valid 383 */ 384 int 385 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum, 386 const char *algo_string); 387 388 /** 389 * Provide the Asymmetric xform enum, given an xform string 390 * 391 * @param xform_enum A pointer to the xform type 392 * enum to be filled 393 * @param xform_string xform string 394 * 395 * @return 396 * - Return -1 if string is not valid 397 * - Return 0 if the string is valid 398 */ 399 __rte_experimental 400 int 401 rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum, 402 const char *xform_string); 403 404 405 /** Macro used at end of crypto PMD list */ 406 #define RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() \ 407 { RTE_CRYPTO_OP_TYPE_UNDEFINED } 408 409 410 /** 411 * Crypto device supported feature flags 412 * 413 * Note: 414 * New features flags should be added to the end of the list 415 * 416 * Keep these flags synchronised with rte_cryptodev_get_feature_name() 417 */ 418 #define RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO (1ULL << 0) 419 /**< Symmetric crypto operations are supported */ 420 #define RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO (1ULL << 1) 421 /**< Asymmetric crypto operations are supported */ 422 #define RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING (1ULL << 2) 423 /**< Chaining symmetric crypto operations are supported */ 424 #define RTE_CRYPTODEV_FF_CPU_SSE (1ULL << 3) 425 /**< Utilises CPU SIMD SSE instructions */ 426 #define RTE_CRYPTODEV_FF_CPU_AVX (1ULL << 4) 427 /**< Utilises CPU SIMD AVX instructions */ 428 #define RTE_CRYPTODEV_FF_CPU_AVX2 (1ULL << 5) 429 /**< Utilises CPU SIMD AVX2 instructions */ 430 #define RTE_CRYPTODEV_FF_CPU_AESNI (1ULL << 6) 431 /**< Utilises CPU AES-NI instructions */ 432 #define RTE_CRYPTODEV_FF_HW_ACCELERATED (1ULL << 7) 433 /**< Operations are off-loaded to an 434 * external hardware accelerator 435 */ 436 #define RTE_CRYPTODEV_FF_CPU_AVX512 (1ULL << 8) 437 /**< Utilises CPU SIMD AVX512 instructions */ 438 #define RTE_CRYPTODEV_FF_IN_PLACE_SGL (1ULL << 9) 439 /**< In-place Scatter-gather (SGL) buffers, with multiple segments, 440 * are supported 441 */ 442 #define RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT (1ULL << 10) 443 /**< Out-of-place Scatter-gather (SGL) buffers are 444 * supported in input and output 445 */ 446 #define RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT (1ULL << 11) 447 /**< Out-of-place Scatter-gather (SGL) buffers are supported 448 * in input, combined with linear buffers (LB), with a 449 * single segment in output 450 */ 451 #define RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT (1ULL << 12) 452 /**< Out-of-place Scatter-gather (SGL) buffers are supported 453 * in output, combined with linear buffers (LB) in input 454 */ 455 #define RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT (1ULL << 13) 456 /**< Out-of-place linear buffers (LB) are supported in input and output */ 457 #define RTE_CRYPTODEV_FF_CPU_NEON (1ULL << 14) 458 /**< Utilises CPU NEON instructions */ 459 #define RTE_CRYPTODEV_FF_CPU_ARM_CE (1ULL << 15) 460 /**< Utilises ARM CPU Cryptographic Extensions */ 461 #define RTE_CRYPTODEV_FF_SECURITY (1ULL << 16) 462 /**< Support Security Protocol Processing */ 463 #define RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP (1ULL << 17) 464 /**< Support RSA Private Key OP with exponent */ 465 #define RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT (1ULL << 18) 466 /**< Support RSA Private Key OP with CRT (quintuple) Keys */ 467 #define RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED (1ULL << 19) 468 /**< Support encrypted-digest operations where digest is appended to data */ 469 #define RTE_CRYPTODEV_FF_ASYM_SESSIONLESS (1ULL << 20) 470 /**< Support asymmetric session-less operations */ 471 #define RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO (1ULL << 21) 472 /**< Support symmetric cpu-crypto processing */ 473 #define RTE_CRYPTODEV_FF_SYM_SESSIONLESS (1ULL << 22) 474 /**< Support symmetric session-less operations */ 475 #define RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA (1ULL << 23) 476 /**< Support operations on data which is not byte aligned */ 477 #define RTE_CRYPTODEV_FF_SYM_RAW_DP (1ULL << 24) 478 /**< Support accelerator specific symmetric raw data-path APIs */ 479 #define RTE_CRYPTODEV_FF_CIPHER_MULTIPLE_DATA_UNITS (1ULL << 25) 480 /**< Support operations on multiple data-units message */ 481 #define RTE_CRYPTODEV_FF_CIPHER_WRAPPED_KEY (1ULL << 26) 482 /**< Support wrapped key in cipher xform */ 483 #define RTE_CRYPTODEV_FF_SECURITY_INNER_CSUM (1ULL << 27) 484 /**< Support inner checksum computation/verification */ 485 486 /** 487 * Get the name of a crypto device feature flag 488 * 489 * @param flag The mask describing the flag. 490 * 491 * @return 492 * The name of this flag, or NULL if it's not a valid feature flag. 493 */ 494 495 extern const char * 496 rte_cryptodev_get_feature_name(uint64_t flag); 497 498 /** Crypto device information */ 499 struct rte_cryptodev_info { 500 const char *driver_name; /**< Driver name. */ 501 uint8_t driver_id; /**< Driver identifier */ 502 struct rte_device *device; /**< Generic device information. */ 503 504 uint64_t feature_flags; 505 /**< Feature flags exposes HW/SW features for the given device */ 506 507 const struct rte_cryptodev_capabilities *capabilities; 508 /**< Array of devices supported capabilities */ 509 510 unsigned max_nb_queue_pairs; 511 /**< Maximum number of queues pairs supported by device. */ 512 513 uint16_t min_mbuf_headroom_req; 514 /**< Minimum mbuf headroom required by device */ 515 516 uint16_t min_mbuf_tailroom_req; 517 /**< Minimum mbuf tailroom required by device */ 518 519 struct { 520 unsigned max_nb_sessions; 521 /**< Maximum number of sessions supported by device. 522 * If 0, the device does not have any limitation in 523 * number of sessions that can be used. 524 */ 525 } sym; 526 }; 527 528 #define RTE_CRYPTODEV_DETACHED (0) 529 #define RTE_CRYPTODEV_ATTACHED (1) 530 531 /** Definitions of Crypto device event types */ 532 enum rte_cryptodev_event_type { 533 RTE_CRYPTODEV_EVENT_UNKNOWN, /**< unknown event type */ 534 RTE_CRYPTODEV_EVENT_ERROR, /**< error interrupt event */ 535 RTE_CRYPTODEV_EVENT_MAX /**< max value of this enum */ 536 }; 537 538 /** Crypto device queue pair configuration structure. */ 539 struct rte_cryptodev_qp_conf { 540 uint32_t nb_descriptors; /**< Number of descriptors per queue pair */ 541 struct rte_mempool *mp_session; 542 /**< The mempool for creating session in sessionless mode */ 543 struct rte_mempool *mp_session_private; 544 /**< The mempool for creating sess private data in sessionless mode */ 545 }; 546 547 /** 548 * Function type used for processing crypto ops when enqueue/dequeue burst is 549 * called. 550 * 551 * The callback function is called on enqueue/dequeue burst immediately. 552 * 553 * @param dev_id The identifier of the device. 554 * @param qp_id The index of the queue pair on which ops are 555 * enqueued/dequeued. The value must be in the 556 * range [0, nb_queue_pairs - 1] previously 557 * supplied to *rte_cryptodev_configure*. 558 * @param ops The address of an array of *nb_ops* pointers 559 * to *rte_crypto_op* structures which contain 560 * the crypto operations to be processed. 561 * @param nb_ops The number of operations to process. 562 * @param user_param The arbitrary user parameter passed in by the 563 * application when the callback was originally 564 * registered. 565 * @return The number of ops to be enqueued to the 566 * crypto device. 567 */ 568 typedef uint16_t (*rte_cryptodev_callback_fn)(uint16_t dev_id, uint16_t qp_id, 569 struct rte_crypto_op **ops, uint16_t nb_ops, void *user_param); 570 571 /** 572 * Typedef for application callback function to be registered by application 573 * software for notification of device events 574 * 575 * @param dev_id Crypto device identifier 576 * @param event Crypto device event to register for notification of. 577 * @param cb_arg User specified parameter to be passed as to passed to 578 * users callback function. 579 */ 580 typedef void (*rte_cryptodev_cb_fn)(uint8_t dev_id, 581 enum rte_cryptodev_event_type event, void *cb_arg); 582 583 584 /** Crypto Device statistics */ 585 struct rte_cryptodev_stats { 586 uint64_t enqueued_count; 587 /**< Count of all operations enqueued */ 588 uint64_t dequeued_count; 589 /**< Count of all operations dequeued */ 590 591 uint64_t enqueue_err_count; 592 /**< Total error count on operations enqueued */ 593 uint64_t dequeue_err_count; 594 /**< Total error count on operations dequeued */ 595 }; 596 597 #define RTE_CRYPTODEV_NAME_MAX_LEN (64) 598 /**< Max length of name of crypto PMD */ 599 600 /** 601 * Get the device identifier for the named crypto device. 602 * 603 * @param name device name to select the device structure. 604 * 605 * @return 606 * - Returns crypto device identifier on success. 607 * - Return -1 on failure to find named crypto device. 608 */ 609 extern int 610 rte_cryptodev_get_dev_id(const char *name); 611 612 /** 613 * Get the crypto device name given a device identifier. 614 * 615 * @param dev_id 616 * The identifier of the device 617 * 618 * @return 619 * - Returns crypto device name. 620 * - Returns NULL if crypto device is not present. 621 */ 622 extern const char * 623 rte_cryptodev_name_get(uint8_t dev_id); 624 625 /** 626 * Get the total number of crypto devices that have been successfully 627 * initialised. 628 * 629 * @return 630 * - The total number of usable crypto devices. 631 */ 632 extern uint8_t 633 rte_cryptodev_count(void); 634 635 /** 636 * Get number of crypto device defined type. 637 * 638 * @param driver_id driver identifier. 639 * 640 * @return 641 * Returns number of crypto device. 642 */ 643 extern uint8_t 644 rte_cryptodev_device_count_by_driver(uint8_t driver_id); 645 646 /** 647 * Get number and identifiers of attached crypto devices that 648 * use the same crypto driver. 649 * 650 * @param driver_name driver name. 651 * @param devices output devices identifiers. 652 * @param nb_devices maximal number of devices. 653 * 654 * @return 655 * Returns number of attached crypto device. 656 */ 657 uint8_t 658 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices, 659 uint8_t nb_devices); 660 /* 661 * Return the NUMA socket to which a device is connected 662 * 663 * @param dev_id 664 * The identifier of the device 665 * @return 666 * The NUMA socket id to which the device is connected or 667 * a default of zero if the socket could not be determined. 668 * -1 if returned is the dev_id value is out of range. 669 */ 670 extern int 671 rte_cryptodev_socket_id(uint8_t dev_id); 672 673 /** Crypto device configuration structure */ 674 struct rte_cryptodev_config { 675 int socket_id; /**< Socket to allocate resources on */ 676 uint16_t nb_queue_pairs; 677 /**< Number of queue pairs to configure on device */ 678 uint64_t ff_disable; 679 /**< Feature flags to be disabled. Only the following features are 680 * allowed to be disabled, 681 * - RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO 682 * - RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO 683 * - RTE_CRYTPODEV_FF_SECURITY 684 */ 685 }; 686 687 /** 688 * Configure a device. 689 * 690 * This function must be invoked first before any other function in the 691 * API. This function can also be re-invoked when a device is in the 692 * stopped state. 693 * 694 * @param dev_id The identifier of the device to configure. 695 * @param config The crypto device configuration structure. 696 * 697 * @return 698 * - 0: Success, device configured. 699 * - <0: Error code returned by the driver configuration function. 700 */ 701 extern int 702 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config); 703 704 /** 705 * Start an device. 706 * 707 * The device start step is the last one and consists of setting the configured 708 * offload features and in starting the transmit and the receive units of the 709 * device. 710 * On success, all basic functions exported by the API (link status, 711 * receive/transmit, and so on) can be invoked. 712 * 713 * @param dev_id 714 * The identifier of the device. 715 * @return 716 * - 0: Success, device started. 717 * - <0: Error code of the driver device start function. 718 */ 719 extern int 720 rte_cryptodev_start(uint8_t dev_id); 721 722 /** 723 * Stop an device. The device can be restarted with a call to 724 * rte_cryptodev_start() 725 * 726 * @param dev_id The identifier of the device. 727 */ 728 extern void 729 rte_cryptodev_stop(uint8_t dev_id); 730 731 /** 732 * Close an device. The device cannot be restarted! 733 * 734 * @param dev_id The identifier of the device. 735 * 736 * @return 737 * - 0 on successfully closing device 738 * - <0 on failure to close device 739 */ 740 extern int 741 rte_cryptodev_close(uint8_t dev_id); 742 743 /** 744 * Allocate and set up a receive queue pair for a device. 745 * 746 * 747 * @param dev_id The identifier of the device. 748 * @param queue_pair_id The index of the queue pairs to set up. The 749 * value must be in the range [0, nb_queue_pair 750 * - 1] previously supplied to 751 * rte_cryptodev_configure(). 752 * @param qp_conf The pointer to the configuration data to be 753 * used for the queue pair. 754 * @param socket_id The *socket_id* argument is the socket 755 * identifier in case of NUMA. The value can be 756 * *SOCKET_ID_ANY* if there is no NUMA constraint 757 * for the DMA memory allocated for the receive 758 * queue pair. 759 * 760 * @return 761 * - 0: Success, queue pair correctly set up. 762 * - <0: Queue pair configuration failed 763 */ 764 extern int 765 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id, 766 const struct rte_cryptodev_qp_conf *qp_conf, int socket_id); 767 768 /** 769 * Get the status of queue pairs setup on a specific crypto device 770 * 771 * @param dev_id Crypto device identifier. 772 * @param queue_pair_id The index of the queue pairs to set up. The 773 * value must be in the range [0, nb_queue_pair 774 * - 1] previously supplied to 775 * rte_cryptodev_configure(). 776 * @return 777 * - 0: qp was not configured 778 * - 1: qp was configured 779 * - -EINVAL: device was not configured 780 */ 781 __rte_experimental 782 int 783 rte_cryptodev_get_qp_status(uint8_t dev_id, uint16_t queue_pair_id); 784 785 /** 786 * Get the number of queue pairs on a specific crypto device 787 * 788 * @param dev_id Crypto device identifier. 789 * @return 790 * - The number of configured queue pairs. 791 */ 792 extern uint16_t 793 rte_cryptodev_queue_pair_count(uint8_t dev_id); 794 795 796 /** 797 * Retrieve the general I/O statistics of a device. 798 * 799 * @param dev_id The identifier of the device. 800 * @param stats A pointer to a structure of type 801 * *rte_cryptodev_stats* to be filled with the 802 * values of device counters. 803 * @return 804 * - Zero if successful. 805 * - Non-zero otherwise. 806 */ 807 extern int 808 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats); 809 810 /** 811 * Reset the general I/O statistics of a device. 812 * 813 * @param dev_id The identifier of the device. 814 */ 815 extern void 816 rte_cryptodev_stats_reset(uint8_t dev_id); 817 818 /** 819 * Retrieve the contextual information of a device. 820 * 821 * @param dev_id The identifier of the device. 822 * @param dev_info A pointer to a structure of type 823 * *rte_cryptodev_info* to be filled with the 824 * contextual information of the device. 825 * 826 * @note The capabilities field of dev_info is set to point to the first 827 * element of an array of struct rte_cryptodev_capabilities. The element after 828 * the last valid element has it's op field set to 829 * RTE_CRYPTO_OP_TYPE_UNDEFINED. 830 */ 831 extern void 832 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info); 833 834 835 /** 836 * Register a callback function for specific device id. 837 * 838 * @param dev_id Device id. 839 * @param event Event interested. 840 * @param cb_fn User supplied callback function to be called. 841 * @param cb_arg Pointer to the parameters for the registered 842 * callback. 843 * 844 * @return 845 * - On success, zero. 846 * - On failure, a negative value. 847 */ 848 extern int 849 rte_cryptodev_callback_register(uint8_t dev_id, 850 enum rte_cryptodev_event_type event, 851 rte_cryptodev_cb_fn cb_fn, void *cb_arg); 852 853 /** 854 * Unregister a callback function for specific device id. 855 * 856 * @param dev_id The device identifier. 857 * @param event Event interested. 858 * @param cb_fn User supplied callback function to be called. 859 * @param cb_arg Pointer to the parameters for the registered 860 * callback. 861 * 862 * @return 863 * - On success, zero. 864 * - On failure, a negative value. 865 */ 866 extern int 867 rte_cryptodev_callback_unregister(uint8_t dev_id, 868 enum rte_cryptodev_event_type event, 869 rte_cryptodev_cb_fn cb_fn, void *cb_arg); 870 871 struct rte_cryptodev_callback; 872 873 /** Structure to keep track of registered callbacks */ 874 RTE_TAILQ_HEAD(rte_cryptodev_cb_list, rte_cryptodev_callback); 875 876 /** 877 * Structure used to hold information about the callbacks to be called for a 878 * queue pair on enqueue/dequeue. 879 */ 880 struct rte_cryptodev_cb { 881 struct rte_cryptodev_cb *next; 882 /**< Pointer to next callback */ 883 rte_cryptodev_callback_fn fn; 884 /**< Pointer to callback function */ 885 void *arg; 886 /**< Pointer to argument */ 887 }; 888 889 /** 890 * @internal 891 * Structure used to hold information about the RCU for a queue pair. 892 */ 893 struct rte_cryptodev_cb_rcu { 894 struct rte_cryptodev_cb *next; 895 /**< Pointer to next callback */ 896 struct rte_rcu_qsbr *qsbr; 897 /**< RCU QSBR variable per queue pair */ 898 }; 899 900 void * 901 rte_cryptodev_get_sec_ctx(uint8_t dev_id); 902 903 /** Cryptodev symmetric crypto session 904 * Each session is derived from a fixed xform chain. Therefore each session 905 * has a fixed algo, key, op-type, digest_len etc. 906 */ 907 struct rte_cryptodev_sym_session { 908 uint64_t opaque_data; 909 /**< Can be used for external metadata */ 910 uint16_t nb_drivers; 911 /**< number of elements in sess_data array */ 912 uint16_t user_data_sz; 913 /**< session user data will be placed after sess_data */ 914 __extension__ struct { 915 void *data; 916 uint16_t refcnt; 917 } sess_data[0]; 918 /**< Driver specific session material, variable size */ 919 }; 920 921 /** Cryptodev asymmetric crypto session */ 922 struct rte_cryptodev_asym_session { 923 __extension__ void *sess_private_data[0]; 924 /**< Private asymmetric session material */ 925 }; 926 927 /** 928 * Create a symmetric session mempool. 929 * 930 * @param name 931 * The unique mempool name. 932 * @param nb_elts 933 * The number of elements in the mempool. 934 * @param elt_size 935 * The size of the element. This value will be ignored if it is smaller than 936 * the minimum session header size required for the system. For the user who 937 * want to use the same mempool for sym session and session private data it 938 * can be the maximum value of all existing devices' private data and session 939 * header sizes. 940 * @param cache_size 941 * The number of per-lcore cache elements 942 * @param priv_size 943 * The private data size of each session. 944 * @param socket_id 945 * The *socket_id* argument is the socket identifier in the case of 946 * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA 947 * constraint for the reserved zone. 948 * 949 * @return 950 * - On success return size of the session 951 * - On failure returns 0 952 */ 953 __rte_experimental 954 struct rte_mempool * 955 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts, 956 uint32_t elt_size, uint32_t cache_size, uint16_t priv_size, 957 int socket_id); 958 959 /** 960 * Create symmetric crypto session header (generic with no private data) 961 * 962 * @param mempool Symmetric session mempool to allocate session 963 * objects from 964 * @return 965 * - On success return pointer to sym-session 966 * - On failure returns NULL 967 */ 968 struct rte_cryptodev_sym_session * 969 rte_cryptodev_sym_session_create(struct rte_mempool *mempool); 970 971 /** 972 * Create asymmetric crypto session header (generic with no private data) 973 * 974 * @param mempool mempool to allocate asymmetric session 975 * objects from 976 * @return 977 * - On success return pointer to asym-session 978 * - On failure returns NULL 979 */ 980 __rte_experimental 981 struct rte_cryptodev_asym_session * 982 rte_cryptodev_asym_session_create(struct rte_mempool *mempool); 983 984 /** 985 * Frees symmetric crypto session header, after checking that all 986 * the device private data has been freed, returning it 987 * to its original mempool. 988 * 989 * @param sess Session header to be freed. 990 * 991 * @return 992 * - 0 if successful. 993 * - -EINVAL if session is NULL. 994 * - -EBUSY if not all device private data has been freed. 995 */ 996 int 997 rte_cryptodev_sym_session_free(struct rte_cryptodev_sym_session *sess); 998 999 /** 1000 * Frees asymmetric crypto session header, after checking that all 1001 * the device private data has been freed, returning it 1002 * to its original mempool. 1003 * 1004 * @param sess Session header to be freed. 1005 * 1006 * @return 1007 * - 0 if successful. 1008 * - -EINVAL if session is NULL. 1009 * - -EBUSY if not all device private data has been freed. 1010 */ 1011 __rte_experimental 1012 int 1013 rte_cryptodev_asym_session_free(struct rte_cryptodev_asym_session *sess); 1014 1015 /** 1016 * Fill out private data for the device id, based on its device type. 1017 * 1018 * @param dev_id ID of device that we want the session to be used on 1019 * @param sess Session where the private data will be attached to 1020 * @param xforms Symmetric crypto transform operations to apply on flow 1021 * processed with this session 1022 * @param mempool Mempool where the private data is allocated. 1023 * 1024 * @return 1025 * - On success, zero. 1026 * - -EINVAL if input parameters are invalid. 1027 * - -ENOTSUP if crypto device does not support the crypto transform or 1028 * does not support symmetric operations. 1029 * - -ENOMEM if the private session could not be allocated. 1030 */ 1031 int 1032 rte_cryptodev_sym_session_init(uint8_t dev_id, 1033 struct rte_cryptodev_sym_session *sess, 1034 struct rte_crypto_sym_xform *xforms, 1035 struct rte_mempool *mempool); 1036 1037 /** 1038 * Initialize asymmetric session on a device with specific asymmetric xform 1039 * 1040 * @param dev_id ID of device that we want the session to be used on 1041 * @param sess Session to be set up on a device 1042 * @param xforms Asymmetric crypto transform operations to apply on flow 1043 * processed with this session 1044 * @param mempool Mempool to be used for internal allocation. 1045 * 1046 * @return 1047 * - On success, zero. 1048 * - -EINVAL if input parameters are invalid. 1049 * - -ENOTSUP if crypto device does not support the crypto transform. 1050 * - -ENOMEM if the private session could not be allocated. 1051 */ 1052 __rte_experimental 1053 int 1054 rte_cryptodev_asym_session_init(uint8_t dev_id, 1055 struct rte_cryptodev_asym_session *sess, 1056 struct rte_crypto_asym_xform *xforms, 1057 struct rte_mempool *mempool); 1058 1059 /** 1060 * Frees private data for the device id, based on its device type, 1061 * returning it to its mempool. It is the application's responsibility 1062 * to ensure that private session data is not cleared while there are 1063 * still in-flight operations using it. 1064 * 1065 * @param dev_id ID of device that uses the session. 1066 * @param sess Session containing the reference to the private data 1067 * 1068 * @return 1069 * - 0 if successful. 1070 * - -EINVAL if device is invalid or session is NULL. 1071 * - -ENOTSUP if crypto device does not support symmetric operations. 1072 */ 1073 int 1074 rte_cryptodev_sym_session_clear(uint8_t dev_id, 1075 struct rte_cryptodev_sym_session *sess); 1076 1077 /** 1078 * Frees resources held by asymmetric session during rte_cryptodev_session_init 1079 * 1080 * @param dev_id ID of device that uses the asymmetric session. 1081 * @param sess Asymmetric session setup on device using 1082 * rte_cryptodev_session_init 1083 * @return 1084 * - 0 if successful. 1085 * - -EINVAL if device is invalid or session is NULL. 1086 */ 1087 __rte_experimental 1088 int 1089 rte_cryptodev_asym_session_clear(uint8_t dev_id, 1090 struct rte_cryptodev_asym_session *sess); 1091 1092 /** 1093 * Get the size of the header session, for all registered drivers excluding 1094 * the user data size. 1095 * 1096 * @return 1097 * Size of the symmetric header session. 1098 */ 1099 unsigned int 1100 rte_cryptodev_sym_get_header_session_size(void); 1101 1102 /** 1103 * Get the size of the header session from created session. 1104 * 1105 * @param sess 1106 * The sym cryptodev session pointer 1107 * 1108 * @return 1109 * - If sess is not NULL, return the size of the header session including 1110 * the private data size defined within sess. 1111 * - If sess is NULL, return 0. 1112 */ 1113 __rte_experimental 1114 unsigned int 1115 rte_cryptodev_sym_get_existing_header_session_size( 1116 struct rte_cryptodev_sym_session *sess); 1117 1118 /** 1119 * Get the size of the asymmetric session header, for all registered drivers. 1120 * 1121 * @return 1122 * Size of the asymmetric header session. 1123 */ 1124 __rte_experimental 1125 unsigned int 1126 rte_cryptodev_asym_get_header_session_size(void); 1127 1128 /** 1129 * Get the size of the private symmetric session data 1130 * for a device. 1131 * 1132 * @param dev_id The device identifier. 1133 * 1134 * @return 1135 * - Size of the private data, if successful 1136 * - 0 if device is invalid or does not have private 1137 * symmetric session 1138 */ 1139 unsigned int 1140 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id); 1141 1142 /** 1143 * Get the size of the private data for asymmetric session 1144 * on device 1145 * 1146 * @param dev_id The device identifier. 1147 * 1148 * @return 1149 * - Size of the asymmetric private data, if successful 1150 * - 0 if device is invalid or does not have private session 1151 */ 1152 __rte_experimental 1153 unsigned int 1154 rte_cryptodev_asym_get_private_session_size(uint8_t dev_id); 1155 1156 /** 1157 * Validate if the crypto device index is valid attached crypto device. 1158 * 1159 * @param dev_id Crypto device index. 1160 * 1161 * @return 1162 * - If the device index is valid (1) or not (0). 1163 */ 1164 unsigned int 1165 rte_cryptodev_is_valid_dev(uint8_t dev_id); 1166 1167 /** 1168 * Provide driver identifier. 1169 * 1170 * @param name 1171 * The pointer to a driver name. 1172 * @return 1173 * The driver type identifier or -1 if no driver found 1174 */ 1175 int rte_cryptodev_driver_id_get(const char *name); 1176 1177 /** 1178 * Provide driver name. 1179 * 1180 * @param driver_id 1181 * The driver identifier. 1182 * @return 1183 * The driver name or null if no driver found 1184 */ 1185 const char *rte_cryptodev_driver_name_get(uint8_t driver_id); 1186 1187 /** 1188 * Store user data in a session. 1189 * 1190 * @param sess Session pointer allocated by 1191 * *rte_cryptodev_sym_session_create*. 1192 * @param data Pointer to the user data. 1193 * @param size Size of the user data. 1194 * 1195 * @return 1196 * - On success, zero. 1197 * - On failure, a negative value. 1198 */ 1199 __rte_experimental 1200 int 1201 rte_cryptodev_sym_session_set_user_data( 1202 struct rte_cryptodev_sym_session *sess, 1203 void *data, 1204 uint16_t size); 1205 1206 /** 1207 * Get user data stored in a session. 1208 * 1209 * @param sess Session pointer allocated by 1210 * *rte_cryptodev_sym_session_create*. 1211 * 1212 * @return 1213 * - On success return pointer to user data. 1214 * - On failure returns NULL. 1215 */ 1216 __rte_experimental 1217 void * 1218 rte_cryptodev_sym_session_get_user_data( 1219 struct rte_cryptodev_sym_session *sess); 1220 1221 /** 1222 * Perform actual crypto processing (encrypt/digest or auth/decrypt) 1223 * on user provided data. 1224 * 1225 * @param dev_id The device identifier. 1226 * @param sess Cryptodev session structure 1227 * @param ofs Start and stop offsets for auth and cipher operations 1228 * @param vec Vectorized operation descriptor 1229 * 1230 * @return 1231 * - Returns number of successfully processed packets. 1232 */ 1233 __rte_experimental 1234 uint32_t 1235 rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id, 1236 struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs ofs, 1237 struct rte_crypto_sym_vec *vec); 1238 1239 /** 1240 * Get the size of the raw data-path context buffer. 1241 * 1242 * @param dev_id The device identifier. 1243 * 1244 * @return 1245 * - If the device supports raw data-path APIs, return the context size. 1246 * - If the device does not support the APIs, return -1. 1247 */ 1248 __rte_experimental 1249 int 1250 rte_cryptodev_get_raw_dp_ctx_size(uint8_t dev_id); 1251 1252 /** 1253 * Union of different crypto session types, including session-less xform 1254 * pointer. 1255 */ 1256 union rte_cryptodev_session_ctx { 1257 struct rte_cryptodev_sym_session *crypto_sess; 1258 struct rte_crypto_sym_xform *xform; 1259 struct rte_security_session *sec_sess; 1260 }; 1261 1262 /** 1263 * Enqueue a vectorized operation descriptor into the device queue but the 1264 * driver may or may not start processing until rte_cryptodev_raw_enqueue_done() 1265 * is called. 1266 * 1267 * @param qp Driver specific queue pair data. 1268 * @param drv_ctx Driver specific context data. 1269 * @param vec Vectorized operation descriptor. 1270 * @param ofs Start and stop offsets for auth and cipher 1271 * operations. 1272 * @param user_data The array of user data for dequeue later. 1273 * @param enqueue_status Driver written value to specify the 1274 * enqueue status. Possible values: 1275 * - 1: The number of operations returned are 1276 * enqueued successfully. 1277 * - 0: The number of operations returned are 1278 * cached into the queue but are not processed 1279 * until rte_cryptodev_raw_enqueue_done() is 1280 * called. 1281 * - negative integer: Error occurred. 1282 * @return 1283 * - The number of operations in the descriptor successfully enqueued or 1284 * cached into the queue but not enqueued yet, depends on the 1285 * "enqueue_status" value. 1286 */ 1287 typedef uint32_t (*cryptodev_sym_raw_enqueue_burst_t)( 1288 void *qp, uint8_t *drv_ctx, struct rte_crypto_sym_vec *vec, 1289 union rte_crypto_sym_ofs ofs, void *user_data[], int *enqueue_status); 1290 1291 /** 1292 * Enqueue single raw data vector into the device queue but the driver may or 1293 * may not start processing until rte_cryptodev_raw_enqueue_done() is called. 1294 * 1295 * @param qp Driver specific queue pair data. 1296 * @param drv_ctx Driver specific context data. 1297 * @param data_vec The buffer data vector. 1298 * @param n_data_vecs Number of buffer data vectors. 1299 * @param ofs Start and stop offsets for auth and cipher 1300 * operations. 1301 * @param iv IV virtual and IOVA addresses 1302 * @param digest digest virtual and IOVA addresses 1303 * @param aad_or_auth_iv AAD or auth IV virtual and IOVA addresses, 1304 * depends on the algorithm used. 1305 * @param user_data The user data. 1306 * @return 1307 * - 1: The data vector is enqueued successfully. 1308 * - 0: The data vector is cached into the queue but is not processed 1309 * until rte_cryptodev_raw_enqueue_done() is called. 1310 * - negative integer: failure. 1311 */ 1312 typedef int (*cryptodev_sym_raw_enqueue_t)( 1313 void *qp, uint8_t *drv_ctx, struct rte_crypto_vec *data_vec, 1314 uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs, 1315 struct rte_crypto_va_iova_ptr *iv, 1316 struct rte_crypto_va_iova_ptr *digest, 1317 struct rte_crypto_va_iova_ptr *aad_or_auth_iv, 1318 void *user_data); 1319 1320 /** 1321 * Inform the cryptodev queue pair to start processing or finish dequeuing all 1322 * enqueued/dequeued operations. 1323 * 1324 * @param qp Driver specific queue pair data. 1325 * @param drv_ctx Driver specific context data. 1326 * @param n The total number of processed operations. 1327 * @return 1328 * - On success return 0. 1329 * - On failure return negative integer. 1330 */ 1331 typedef int (*cryptodev_sym_raw_operation_done_t)(void *qp, uint8_t *drv_ctx, 1332 uint32_t n); 1333 1334 /** 1335 * Typedef that the user provided for the driver to get the dequeue count. 1336 * The function may return a fixed number or the number parsed from the user 1337 * data stored in the first processed operation. 1338 * 1339 * @param user_data Dequeued user data. 1340 * @return 1341 * - The number of operations to be dequeued. 1342 **/ 1343 typedef uint32_t (*rte_cryptodev_raw_get_dequeue_count_t)(void *user_data); 1344 1345 /** 1346 * Typedef that the user provided to deal with post dequeue operation, such 1347 * as filling status. 1348 * 1349 * @param user_data Dequeued user data. 1350 * @param index Index number of the processed descriptor. 1351 * @param is_op_success Operation status provided by the driver. 1352 **/ 1353 typedef void (*rte_cryptodev_raw_post_dequeue_t)(void *user_data, 1354 uint32_t index, uint8_t is_op_success); 1355 1356 /** 1357 * Dequeue a burst of symmetric crypto processing. 1358 * 1359 * @param qp Driver specific queue pair data. 1360 * @param drv_ctx Driver specific context data. 1361 * @param get_dequeue_count User provided callback function to 1362 * obtain dequeue operation count. 1363 * @param max_nb_to_dequeue When get_dequeue_count is NULL this 1364 * value is used to pass the maximum 1365 * number of operations to be dequeued. 1366 * @param post_dequeue User provided callback function to 1367 * post-process a dequeued operation. 1368 * @param out_user_data User data pointer array to be retrieve 1369 * from device queue. In case of 1370 * *is_user_data_array* is set there 1371 * should be enough room to store all 1372 * user data. 1373 * @param is_user_data_array Set 1 if every dequeued user data will 1374 * be written into out_user_data array. 1375 * Set 0 if only the first user data will 1376 * be written into out_user_data array. 1377 * @param n_success Driver written value to specific the 1378 * total successful operations count. 1379 * @param dequeue_status Driver written value to specify the 1380 * dequeue status. Possible values: 1381 * - 1: Successfully dequeued the number 1382 * of operations returned. The user 1383 * data previously set during enqueue 1384 * is stored in the "out_user_data". 1385 * - 0: The number of operations returned 1386 * are completed and the user data is 1387 * stored in the "out_user_data", but 1388 * they are not freed from the queue 1389 * until 1390 * rte_cryptodev_raw_dequeue_done() 1391 * is called. 1392 * - negative integer: Error occurred. 1393 * @return 1394 * - The number of operations dequeued or completed but not freed from the 1395 * queue, depends on "dequeue_status" value. 1396 */ 1397 typedef uint32_t (*cryptodev_sym_raw_dequeue_burst_t)(void *qp, 1398 uint8_t *drv_ctx, 1399 rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count, 1400 uint32_t max_nb_to_dequeue, 1401 rte_cryptodev_raw_post_dequeue_t post_dequeue, 1402 void **out_user_data, uint8_t is_user_data_array, 1403 uint32_t *n_success, int *dequeue_status); 1404 1405 /** 1406 * Dequeue a symmetric crypto processing. 1407 * 1408 * @param qp Driver specific queue pair data. 1409 * @param drv_ctx Driver specific context data. 1410 * @param dequeue_status Driver written value to specify the 1411 * dequeue status. Possible values: 1412 * - 1: Successfully dequeued a operation. 1413 * The user data is returned. 1414 * - 0: The first operation in the queue 1415 * is completed and the user data 1416 * previously set during enqueue is 1417 * returned, but it is not freed from 1418 * the queue until 1419 * rte_cryptodev_raw_dequeue_done() is 1420 * called. 1421 * - negative integer: Error occurred. 1422 * @param op_status Driver written value to specify 1423 * operation status. 1424 * @return 1425 * - The user data pointer retrieved from device queue or NULL if no 1426 * operation is ready for dequeue. 1427 */ 1428 typedef void * (*cryptodev_sym_raw_dequeue_t)( 1429 void *qp, uint8_t *drv_ctx, int *dequeue_status, 1430 enum rte_crypto_op_status *op_status); 1431 1432 /** 1433 * Context data for raw data-path API crypto process. The buffer of this 1434 * structure is to be allocated by the user application with the size equal 1435 * or bigger than rte_cryptodev_get_raw_dp_ctx_size() returned value. 1436 */ 1437 struct rte_crypto_raw_dp_ctx { 1438 void *qp_data; 1439 1440 cryptodev_sym_raw_enqueue_t enqueue; 1441 cryptodev_sym_raw_enqueue_burst_t enqueue_burst; 1442 cryptodev_sym_raw_operation_done_t enqueue_done; 1443 cryptodev_sym_raw_dequeue_t dequeue; 1444 cryptodev_sym_raw_dequeue_burst_t dequeue_burst; 1445 cryptodev_sym_raw_operation_done_t dequeue_done; 1446 1447 /* Driver specific context data */ 1448 __extension__ uint8_t drv_ctx_data[]; 1449 }; 1450 1451 /** 1452 * Configure raw data-path context data. 1453 * 1454 * NOTE: 1455 * After the context data is configured, the user should call 1456 * rte_cryptodev_raw_attach_session() before using it in 1457 * rte_cryptodev_raw_enqueue/dequeue function call. 1458 * 1459 * @param dev_id The device identifier. 1460 * @param qp_id The index of the queue pair from which to 1461 * retrieve processed packets. The value must be 1462 * in the range [0, nb_queue_pair - 1] previously 1463 * supplied to rte_cryptodev_configure(). 1464 * @param ctx The raw data-path context data. 1465 * @param sess_type session type. 1466 * @param session_ctx Session context data. 1467 * @param is_update Set 0 if it is to initialize the ctx. 1468 * Set 1 if ctx is initialized and only to update 1469 * session context data. 1470 * @return 1471 * - On success return 0. 1472 * - On failure return negative integer. 1473 */ 1474 __rte_experimental 1475 int 1476 rte_cryptodev_configure_raw_dp_ctx(uint8_t dev_id, uint16_t qp_id, 1477 struct rte_crypto_raw_dp_ctx *ctx, 1478 enum rte_crypto_op_sess_type sess_type, 1479 union rte_cryptodev_session_ctx session_ctx, 1480 uint8_t is_update); 1481 1482 /** 1483 * Enqueue a vectorized operation descriptor into the device queue but the 1484 * driver may or may not start processing until rte_cryptodev_raw_enqueue_done() 1485 * is called. 1486 * 1487 * @param ctx The initialized raw data-path context data. 1488 * @param vec Vectorized operation descriptor. 1489 * @param ofs Start and stop offsets for auth and cipher 1490 * operations. 1491 * @param user_data The array of user data for dequeue later. 1492 * @param enqueue_status Driver written value to specify the 1493 * enqueue status. Possible values: 1494 * - 1: The number of operations returned are 1495 * enqueued successfully. 1496 * - 0: The number of operations returned are 1497 * cached into the queue but are not processed 1498 * until rte_cryptodev_raw_enqueue_done() is 1499 * called. 1500 * - negative integer: Error occurred. 1501 * @return 1502 * - The number of operations in the descriptor successfully enqueued or 1503 * cached into the queue but not enqueued yet, depends on the 1504 * "enqueue_status" value. 1505 */ 1506 __rte_experimental 1507 uint32_t 1508 rte_cryptodev_raw_enqueue_burst(struct rte_crypto_raw_dp_ctx *ctx, 1509 struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs, 1510 void **user_data, int *enqueue_status); 1511 1512 /** 1513 * Enqueue single raw data vector into the device queue but the driver may or 1514 * may not start processing until rte_cryptodev_raw_enqueue_done() is called. 1515 * 1516 * @param ctx The initialized raw data-path context data. 1517 * @param data_vec The buffer data vector. 1518 * @param n_data_vecs Number of buffer data vectors. 1519 * @param ofs Start and stop offsets for auth and cipher 1520 * operations. 1521 * @param iv IV virtual and IOVA addresses 1522 * @param digest digest virtual and IOVA addresses 1523 * @param aad_or_auth_iv AAD or auth IV virtual and IOVA addresses, 1524 * depends on the algorithm used. 1525 * @param user_data The user data. 1526 * @return 1527 * - 1: The data vector is enqueued successfully. 1528 * - 0: The data vector is cached into the queue but is not processed 1529 * until rte_cryptodev_raw_enqueue_done() is called. 1530 * - negative integer: failure. 1531 */ 1532 __rte_experimental 1533 static __rte_always_inline int 1534 rte_cryptodev_raw_enqueue(struct rte_crypto_raw_dp_ctx *ctx, 1535 struct rte_crypto_vec *data_vec, uint16_t n_data_vecs, 1536 union rte_crypto_sym_ofs ofs, 1537 struct rte_crypto_va_iova_ptr *iv, 1538 struct rte_crypto_va_iova_ptr *digest, 1539 struct rte_crypto_va_iova_ptr *aad_or_auth_iv, 1540 void *user_data) 1541 { 1542 return (*ctx->enqueue)(ctx->qp_data, ctx->drv_ctx_data, data_vec, 1543 n_data_vecs, ofs, iv, digest, aad_or_auth_iv, user_data); 1544 } 1545 1546 /** 1547 * Start processing all enqueued operations from last 1548 * rte_cryptodev_configure_raw_dp_ctx() call. 1549 * 1550 * @param ctx The initialized raw data-path context data. 1551 * @param n The number of operations cached. 1552 * @return 1553 * - On success return 0. 1554 * - On failure return negative integer. 1555 */ 1556 __rte_experimental 1557 int 1558 rte_cryptodev_raw_enqueue_done(struct rte_crypto_raw_dp_ctx *ctx, 1559 uint32_t n); 1560 1561 /** 1562 * Dequeue a burst of symmetric crypto processing. 1563 * 1564 * @param ctx The initialized raw data-path context 1565 * data. 1566 * @param get_dequeue_count User provided callback function to 1567 * obtain dequeue operation count. 1568 * @param max_nb_to_dequeue When get_dequeue_count is NULL this 1569 * value is used to pass the maximum 1570 * number of operations to be dequeued. 1571 * @param post_dequeue User provided callback function to 1572 * post-process a dequeued operation. 1573 * @param out_user_data User data pointer array to be retrieve 1574 * from device queue. In case of 1575 * *is_user_data_array* is set there 1576 * should be enough room to store all 1577 * user data. 1578 * @param is_user_data_array Set 1 if every dequeued user data will 1579 * be written into out_user_data array. 1580 * Set 0 if only the first user data will 1581 * be written into out_user_data array. 1582 * @param n_success Driver written value to specific the 1583 * total successful operations count. 1584 * @param dequeue_status Driver written value to specify the 1585 * dequeue status. Possible values: 1586 * - 1: Successfully dequeued the number 1587 * of operations returned. The user 1588 * data previously set during enqueue 1589 * is stored in the "out_user_data". 1590 * - 0: The number of operations returned 1591 * are completed and the user data is 1592 * stored in the "out_user_data", but 1593 * they are not freed from the queue 1594 * until 1595 * rte_cryptodev_raw_dequeue_done() 1596 * is called. 1597 * - negative integer: Error occurred. 1598 * @return 1599 * - The number of operations dequeued or completed but not freed from the 1600 * queue, depends on "dequeue_status" value. 1601 */ 1602 __rte_experimental 1603 uint32_t 1604 rte_cryptodev_raw_dequeue_burst(struct rte_crypto_raw_dp_ctx *ctx, 1605 rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count, 1606 uint32_t max_nb_to_dequeue, 1607 rte_cryptodev_raw_post_dequeue_t post_dequeue, 1608 void **out_user_data, uint8_t is_user_data_array, 1609 uint32_t *n_success, int *dequeue_status); 1610 1611 /** 1612 * Dequeue a symmetric crypto processing. 1613 * 1614 * @param ctx The initialized raw data-path context 1615 * data. 1616 * @param dequeue_status Driver written value to specify the 1617 * dequeue status. Possible values: 1618 * - 1: Successfully dequeued a operation. 1619 * The user data is returned. 1620 * - 0: The first operation in the queue 1621 * is completed and the user data 1622 * previously set during enqueue is 1623 * returned, but it is not freed from 1624 * the queue until 1625 * rte_cryptodev_raw_dequeue_done() is 1626 * called. 1627 * - negative integer: Error occurred. 1628 * @param op_status Driver written value to specify 1629 * operation status. 1630 * @return 1631 * - The user data pointer retrieved from device queue or NULL if no 1632 * operation is ready for dequeue. 1633 */ 1634 __rte_experimental 1635 static __rte_always_inline void * 1636 rte_cryptodev_raw_dequeue(struct rte_crypto_raw_dp_ctx *ctx, 1637 int *dequeue_status, enum rte_crypto_op_status *op_status) 1638 { 1639 return (*ctx->dequeue)(ctx->qp_data, ctx->drv_ctx_data, dequeue_status, 1640 op_status); 1641 } 1642 1643 /** 1644 * Inform the queue pair dequeue operations is finished. 1645 * 1646 * @param ctx The initialized raw data-path context data. 1647 * @param n The number of operations. 1648 * @return 1649 * - On success return 0. 1650 * - On failure return negative integer. 1651 */ 1652 __rte_experimental 1653 int 1654 rte_cryptodev_raw_dequeue_done(struct rte_crypto_raw_dp_ctx *ctx, 1655 uint32_t n); 1656 1657 /** 1658 * Add a user callback for a given crypto device and queue pair which will be 1659 * called on crypto ops enqueue. 1660 * 1661 * This API configures a function to be called for each burst of crypto ops 1662 * received on a given crypto device queue pair. The return value is a pointer 1663 * that can be used later to remove the callback using 1664 * rte_cryptodev_remove_enq_callback(). 1665 * 1666 * Callbacks registered by application would not survive 1667 * rte_cryptodev_configure() as it reinitializes the callback list. 1668 * It is user responsibility to remove all installed callbacks before 1669 * calling rte_cryptodev_configure() to avoid possible memory leakage. 1670 * Application is expected to call add API after rte_cryptodev_configure(). 1671 * 1672 * Multiple functions can be registered per queue pair & they are called 1673 * in the order they were added. The API does not restrict on maximum number 1674 * of callbacks. 1675 * 1676 * @param dev_id The identifier of the device. 1677 * @param qp_id The index of the queue pair on which ops are 1678 * to be enqueued for processing. The value 1679 * must be in the range [0, nb_queue_pairs - 1] 1680 * previously supplied to 1681 * *rte_cryptodev_configure*. 1682 * @param cb_fn The callback function 1683 * @param cb_arg A generic pointer parameter which will be passed 1684 * to each invocation of the callback function on 1685 * this crypto device and queue pair. 1686 * 1687 * @return 1688 * - NULL on error & rte_errno will contain the error code. 1689 * - On success, a pointer value which can later be used to remove the 1690 * callback. 1691 */ 1692 1693 __rte_experimental 1694 struct rte_cryptodev_cb * 1695 rte_cryptodev_add_enq_callback(uint8_t dev_id, 1696 uint16_t qp_id, 1697 rte_cryptodev_callback_fn cb_fn, 1698 void *cb_arg); 1699 1700 /** 1701 * Remove a user callback function for given crypto device and queue pair. 1702 * 1703 * This function is used to remove enqueue callbacks that were added to a 1704 * crypto device queue pair using rte_cryptodev_add_enq_callback(). 1705 * 1706 * 1707 * 1708 * @param dev_id The identifier of the device. 1709 * @param qp_id The index of the queue pair on which ops are 1710 * to be enqueued. The value must be in the 1711 * range [0, nb_queue_pairs - 1] previously 1712 * supplied to *rte_cryptodev_configure*. 1713 * @param cb Pointer to user supplied callback created via 1714 * rte_cryptodev_add_enq_callback(). 1715 * 1716 * @return 1717 * - 0: Success. Callback was removed. 1718 * - <0: The dev_id or the qp_id is out of range, or the callback 1719 * is NULL or not found for the crypto device queue pair. 1720 */ 1721 1722 __rte_experimental 1723 int rte_cryptodev_remove_enq_callback(uint8_t dev_id, 1724 uint16_t qp_id, 1725 struct rte_cryptodev_cb *cb); 1726 1727 /** 1728 * Add a user callback for a given crypto device and queue pair which will be 1729 * called on crypto ops dequeue. 1730 * 1731 * This API configures a function to be called for each burst of crypto ops 1732 * received on a given crypto device queue pair. The return value is a pointer 1733 * that can be used later to remove the callback using 1734 * rte_cryptodev_remove_deq_callback(). 1735 * 1736 * Callbacks registered by application would not survive 1737 * rte_cryptodev_configure() as it reinitializes the callback list. 1738 * It is user responsibility to remove all installed callbacks before 1739 * calling rte_cryptodev_configure() to avoid possible memory leakage. 1740 * Application is expected to call add API after rte_cryptodev_configure(). 1741 * 1742 * Multiple functions can be registered per queue pair & they are called 1743 * in the order they were added. The API does not restrict on maximum number 1744 * of callbacks. 1745 * 1746 * @param dev_id The identifier of the device. 1747 * @param qp_id The index of the queue pair on which ops are 1748 * to be dequeued. The value must be in the 1749 * range [0, nb_queue_pairs - 1] previously 1750 * supplied to *rte_cryptodev_configure*. 1751 * @param cb_fn The callback function 1752 * @param cb_arg A generic pointer parameter which will be passed 1753 * to each invocation of the callback function on 1754 * this crypto device and queue pair. 1755 * 1756 * @return 1757 * - NULL on error & rte_errno will contain the error code. 1758 * - On success, a pointer value which can later be used to remove the 1759 * callback. 1760 */ 1761 1762 __rte_experimental 1763 struct rte_cryptodev_cb * 1764 rte_cryptodev_add_deq_callback(uint8_t dev_id, 1765 uint16_t qp_id, 1766 rte_cryptodev_callback_fn cb_fn, 1767 void *cb_arg); 1768 1769 /** 1770 * Remove a user callback function for given crypto device and queue pair. 1771 * 1772 * This function is used to remove dequeue callbacks that were added to a 1773 * crypto device queue pair using rte_cryptodev_add_deq_callback(). 1774 * 1775 * 1776 * 1777 * @param dev_id The identifier of the device. 1778 * @param qp_id The index of the queue pair on which ops are 1779 * to be dequeued. The value must be in the 1780 * range [0, nb_queue_pairs - 1] previously 1781 * supplied to *rte_cryptodev_configure*. 1782 * @param cb Pointer to user supplied callback created via 1783 * rte_cryptodev_add_deq_callback(). 1784 * 1785 * @return 1786 * - 0: Success. Callback was removed. 1787 * - <0: The dev_id or the qp_id is out of range, or the callback 1788 * is NULL or not found for the crypto device queue pair. 1789 */ 1790 __rte_experimental 1791 int rte_cryptodev_remove_deq_callback(uint8_t dev_id, 1792 uint16_t qp_id, 1793 struct rte_cryptodev_cb *cb); 1794 1795 #include <rte_cryptodev_core.h> 1796 /** 1797 * 1798 * Dequeue a burst of processed crypto operations from a queue on the crypto 1799 * device. The dequeued operation are stored in *rte_crypto_op* structures 1800 * whose pointers are supplied in the *ops* array. 1801 * 1802 * The rte_cryptodev_dequeue_burst() function returns the number of ops 1803 * actually dequeued, which is the number of *rte_crypto_op* data structures 1804 * effectively supplied into the *ops* array. 1805 * 1806 * A return value equal to *nb_ops* indicates that the queue contained 1807 * at least *nb_ops* operations, and this is likely to signify that other 1808 * processed operations remain in the devices output queue. Applications 1809 * implementing a "retrieve as many processed operations as possible" policy 1810 * can check this specific case and keep invoking the 1811 * rte_cryptodev_dequeue_burst() function until a value less than 1812 * *nb_ops* is returned. 1813 * 1814 * The rte_cryptodev_dequeue_burst() function does not provide any error 1815 * notification to avoid the corresponding overhead. 1816 * 1817 * @param dev_id The symmetric crypto device identifier 1818 * @param qp_id The index of the queue pair from which to 1819 * retrieve processed packets. The value must be 1820 * in the range [0, nb_queue_pair - 1] previously 1821 * supplied to rte_cryptodev_configure(). 1822 * @param ops The address of an array of pointers to 1823 * *rte_crypto_op* structures that must be 1824 * large enough to store *nb_ops* pointers in it. 1825 * @param nb_ops The maximum number of operations to dequeue. 1826 * 1827 * @return 1828 * - The number of operations actually dequeued, which is the number 1829 * of pointers to *rte_crypto_op* structures effectively supplied to the 1830 * *ops* array. 1831 */ 1832 static inline uint16_t 1833 rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id, 1834 struct rte_crypto_op **ops, uint16_t nb_ops) 1835 { 1836 const struct rte_crypto_fp_ops *fp_ops; 1837 void *qp; 1838 1839 rte_cryptodev_trace_dequeue_burst(dev_id, qp_id, (void **)ops, nb_ops); 1840 1841 fp_ops = &rte_crypto_fp_ops[dev_id]; 1842 qp = fp_ops->qp.data[qp_id]; 1843 1844 nb_ops = fp_ops->dequeue_burst(qp, ops, nb_ops); 1845 1846 #ifdef RTE_CRYPTO_CALLBACKS 1847 if (unlikely(fp_ops->qp.deq_cb != NULL)) { 1848 struct rte_cryptodev_cb_rcu *list; 1849 struct rte_cryptodev_cb *cb; 1850 1851 /* __ATOMIC_RELEASE memory order was used when the 1852 * call back was inserted into the list. 1853 * Since there is a clear dependency between loading 1854 * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is 1855 * not required. 1856 */ 1857 list = &fp_ops->qp.deq_cb[qp_id]; 1858 rte_rcu_qsbr_thread_online(list->qsbr, 0); 1859 cb = __atomic_load_n(&list->next, __ATOMIC_RELAXED); 1860 1861 while (cb != NULL) { 1862 nb_ops = cb->fn(dev_id, qp_id, ops, nb_ops, 1863 cb->arg); 1864 cb = cb->next; 1865 }; 1866 1867 rte_rcu_qsbr_thread_offline(list->qsbr, 0); 1868 } 1869 #endif 1870 return nb_ops; 1871 } 1872 1873 /** 1874 * Enqueue a burst of operations for processing on a crypto device. 1875 * 1876 * The rte_cryptodev_enqueue_burst() function is invoked to place 1877 * crypto operations on the queue *qp_id* of the device designated by 1878 * its *dev_id*. 1879 * 1880 * The *nb_ops* parameter is the number of operations to process which are 1881 * supplied in the *ops* array of *rte_crypto_op* structures. 1882 * 1883 * The rte_cryptodev_enqueue_burst() function returns the number of 1884 * operations it actually enqueued for processing. A return value equal to 1885 * *nb_ops* means that all packets have been enqueued. 1886 * 1887 * @param dev_id The identifier of the device. 1888 * @param qp_id The index of the queue pair which packets are 1889 * to be enqueued for processing. The value 1890 * must be in the range [0, nb_queue_pairs - 1] 1891 * previously supplied to 1892 * *rte_cryptodev_configure*. 1893 * @param ops The address of an array of *nb_ops* pointers 1894 * to *rte_crypto_op* structures which contain 1895 * the crypto operations to be processed. 1896 * @param nb_ops The number of operations to process. 1897 * 1898 * @return 1899 * The number of operations actually enqueued on the crypto device. The return 1900 * value can be less than the value of the *nb_ops* parameter when the 1901 * crypto devices queue is full or if invalid parameters are specified in 1902 * a *rte_crypto_op*. 1903 */ 1904 static inline uint16_t 1905 rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id, 1906 struct rte_crypto_op **ops, uint16_t nb_ops) 1907 { 1908 const struct rte_crypto_fp_ops *fp_ops; 1909 void *qp; 1910 1911 fp_ops = &rte_crypto_fp_ops[dev_id]; 1912 qp = fp_ops->qp.data[qp_id]; 1913 #ifdef RTE_CRYPTO_CALLBACKS 1914 if (unlikely(fp_ops->qp.enq_cb != NULL)) { 1915 struct rte_cryptodev_cb_rcu *list; 1916 struct rte_cryptodev_cb *cb; 1917 1918 /* __ATOMIC_RELEASE memory order was used when the 1919 * call back was inserted into the list. 1920 * Since there is a clear dependency between loading 1921 * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is 1922 * not required. 1923 */ 1924 list = &fp_ops->qp.enq_cb[qp_id]; 1925 rte_rcu_qsbr_thread_online(list->qsbr, 0); 1926 cb = __atomic_load_n(&list->next, __ATOMIC_RELAXED); 1927 1928 while (cb != NULL) { 1929 nb_ops = cb->fn(dev_id, qp_id, ops, nb_ops, 1930 cb->arg); 1931 cb = cb->next; 1932 }; 1933 1934 rte_rcu_qsbr_thread_offline(list->qsbr, 0); 1935 } 1936 #endif 1937 1938 rte_cryptodev_trace_enqueue_burst(dev_id, qp_id, (void **)ops, nb_ops); 1939 return fp_ops->enqueue_burst(qp, ops, nb_ops); 1940 } 1941 1942 1943 1944 #ifdef __cplusplus 1945 } 1946 #endif 1947 1948 #endif /* _RTE_CRYPTODEV_H_ */ 1949