1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2015-2020 Intel Corporation. 3 */ 4 5 #ifndef _RTE_CRYPTODEV_H_ 6 #define _RTE_CRYPTODEV_H_ 7 8 /** 9 * @file rte_cryptodev.h 10 * 11 * RTE Cryptographic Device APIs 12 * 13 * Defines RTE Crypto Device APIs for the provisioning of cipher and 14 * authentication operations. 15 */ 16 17 #ifdef __cplusplus 18 extern "C" { 19 #endif 20 21 #include <rte_compat.h> 22 #include "rte_kvargs.h" 23 #include "rte_crypto.h" 24 #include <rte_common.h> 25 #include <rte_rcu_qsbr.h> 26 27 #include "rte_cryptodev_trace_fp.h" 28 29 extern const char **rte_cyptodev_names; 30 31 /* Logging Macros */ 32 33 #define CDEV_LOG_ERR(...) \ 34 RTE_LOG(ERR, CRYPTODEV, \ 35 RTE_FMT("%s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \ 36 __func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,))) 37 38 #define CDEV_LOG_INFO(...) \ 39 RTE_LOG(INFO, CRYPTODEV, \ 40 RTE_FMT(RTE_FMT_HEAD(__VA_ARGS__,) "\n", \ 41 RTE_FMT_TAIL(__VA_ARGS__,))) 42 43 #define CDEV_LOG_DEBUG(...) \ 44 RTE_LOG(DEBUG, CRYPTODEV, \ 45 RTE_FMT("%s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \ 46 __func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,))) 47 48 #define CDEV_PMD_TRACE(...) \ 49 RTE_LOG(DEBUG, CRYPTODEV, \ 50 RTE_FMT("[%s] %s: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \ 51 dev, __func__, RTE_FMT_TAIL(__VA_ARGS__,))) 52 53 /** 54 * A macro that points to an offset from the start 55 * of the crypto operation structure (rte_crypto_op) 56 * 57 * The returned pointer is cast to type t. 58 * 59 * @param c 60 * The crypto operation. 61 * @param o 62 * The offset from the start of the crypto operation. 63 * @param t 64 * The type to cast the result into. 65 */ 66 #define rte_crypto_op_ctod_offset(c, t, o) \ 67 ((t)((char *)(c) + (o))) 68 69 /** 70 * A macro that returns the physical address that points 71 * to an offset from the start of the crypto operation 72 * (rte_crypto_op) 73 * 74 * @param c 75 * The crypto operation. 76 * @param o 77 * The offset from the start of the crypto operation 78 * to calculate address from. 79 */ 80 #define rte_crypto_op_ctophys_offset(c, o) \ 81 (rte_iova_t)((c)->phys_addr + (o)) 82 83 /** 84 * Crypto parameters range description 85 */ 86 struct rte_crypto_param_range { 87 uint16_t min; /**< minimum size */ 88 uint16_t max; /**< maximum size */ 89 uint16_t increment; 90 /**< if a range of sizes are supported, 91 * this parameter is used to indicate 92 * increments in byte size that are supported 93 * between the minimum and maximum 94 */ 95 }; 96 97 /** 98 * Data-unit supported lengths of cipher algorithms. 99 * A bit can represent any set of data-unit sizes 100 * (single size, multiple size, range, etc). 101 */ 102 #define RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_512_BYTES RTE_BIT32(0) 103 #define RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_4096_BYTES RTE_BIT32(1) 104 #define RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_1_MEGABYTES RTE_BIT32(2) 105 106 /** 107 * Symmetric Crypto Capability 108 */ 109 struct rte_cryptodev_symmetric_capability { 110 enum rte_crypto_sym_xform_type xform_type; 111 /**< Transform type : Authentication / Cipher / AEAD */ 112 union { 113 struct { 114 enum rte_crypto_auth_algorithm algo; 115 /**< authentication algorithm */ 116 uint16_t block_size; 117 /**< algorithm block size */ 118 struct rte_crypto_param_range key_size; 119 /**< auth key size range */ 120 struct rte_crypto_param_range digest_size; 121 /**< digest size range */ 122 struct rte_crypto_param_range aad_size; 123 /**< Additional authentication data size range */ 124 struct rte_crypto_param_range iv_size; 125 /**< Initialisation vector data size range */ 126 } auth; 127 /**< Symmetric Authentication transform capabilities */ 128 struct { 129 enum rte_crypto_cipher_algorithm algo; 130 /**< cipher algorithm */ 131 uint16_t block_size; 132 /**< algorithm block size */ 133 struct rte_crypto_param_range key_size; 134 /**< cipher key size range */ 135 struct rte_crypto_param_range iv_size; 136 /**< Initialisation vector data size range */ 137 uint32_t dataunit_set; 138 /**< 139 * Supported data-unit lengths: 140 * RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_* bits 141 * or 0 for lengths defined in the algorithm standard. 142 */ 143 } cipher; 144 /**< Symmetric Cipher transform capabilities */ 145 struct { 146 enum rte_crypto_aead_algorithm algo; 147 /**< AEAD algorithm */ 148 uint16_t block_size; 149 /**< algorithm block size */ 150 struct rte_crypto_param_range key_size; 151 /**< AEAD key size range */ 152 struct rte_crypto_param_range digest_size; 153 /**< digest size range */ 154 struct rte_crypto_param_range aad_size; 155 /**< Additional authentication data size range */ 156 struct rte_crypto_param_range iv_size; 157 /**< Initialisation vector data size range */ 158 } aead; 159 }; 160 }; 161 162 /** 163 * Asymmetric Xform Crypto Capability 164 */ 165 struct rte_cryptodev_asymmetric_xform_capability { 166 enum rte_crypto_asym_xform_type xform_type; 167 /**< Transform type: RSA/MODEXP/DH/DSA/MODINV */ 168 169 uint32_t op_types; 170 /**< 171 * Bitmask for supported rte_crypto_asym_op_type or 172 * rte_crypto_asym_ke_type. Which enum is used is determined 173 * by the rte_crypto_asym_xform_type. For key exchange algorithms 174 * like Diffie-Hellman it is rte_crypto_asym_ke_type, for others 175 * it is rte_crypto_asym_op_type. 176 */ 177 178 __extension__ 179 union { 180 struct rte_crypto_param_range modlen; 181 /**< Range of modulus length supported by modulus based xform. 182 * Value 0 mean implementation default 183 */ 184 }; 185 }; 186 187 /** 188 * Asymmetric Crypto Capability 189 */ 190 struct rte_cryptodev_asymmetric_capability { 191 struct rte_cryptodev_asymmetric_xform_capability xform_capa; 192 }; 193 194 195 /** Structure used to capture a capability of a crypto device */ 196 struct rte_cryptodev_capabilities { 197 enum rte_crypto_op_type op; 198 /**< Operation type */ 199 200 union { 201 struct rte_cryptodev_symmetric_capability sym; 202 /**< Symmetric operation capability parameters */ 203 struct rte_cryptodev_asymmetric_capability asym; 204 /**< Asymmetric operation capability parameters */ 205 }; 206 }; 207 208 /** Structure used to describe crypto algorithms */ 209 struct rte_cryptodev_sym_capability_idx { 210 enum rte_crypto_sym_xform_type type; 211 union { 212 enum rte_crypto_cipher_algorithm cipher; 213 enum rte_crypto_auth_algorithm auth; 214 enum rte_crypto_aead_algorithm aead; 215 } algo; 216 }; 217 218 /** 219 * Structure used to describe asymmetric crypto xforms 220 * Each xform maps to one asym algorithm. 221 */ 222 struct rte_cryptodev_asym_capability_idx { 223 enum rte_crypto_asym_xform_type type; 224 /**< Asymmetric xform (algo) type */ 225 }; 226 227 /** 228 * Provide capabilities available for defined device and algorithm 229 * 230 * @param dev_id The identifier of the device. 231 * @param idx Description of crypto algorithms. 232 * 233 * @return 234 * - Return description of the symmetric crypto capability if exist. 235 * - Return NULL if the capability not exist. 236 */ 237 const struct rte_cryptodev_symmetric_capability * 238 rte_cryptodev_sym_capability_get(uint8_t dev_id, 239 const struct rte_cryptodev_sym_capability_idx *idx); 240 241 /** 242 * Provide capabilities available for defined device and xform 243 * 244 * @param dev_id The identifier of the device. 245 * @param idx Description of asym crypto xform. 246 * 247 * @return 248 * - Return description of the asymmetric crypto capability if exist. 249 * - Return NULL if the capability not exist. 250 */ 251 __rte_experimental 252 const struct rte_cryptodev_asymmetric_xform_capability * 253 rte_cryptodev_asym_capability_get(uint8_t dev_id, 254 const struct rte_cryptodev_asym_capability_idx *idx); 255 256 /** 257 * Check if key size and initial vector are supported 258 * in crypto cipher capability 259 * 260 * @param capability Description of the symmetric crypto capability. 261 * @param key_size Cipher key size. 262 * @param iv_size Cipher initial vector size. 263 * 264 * @return 265 * - Return 0 if the parameters are in range of the capability. 266 * - Return -1 if the parameters are out of range of the capability. 267 */ 268 int 269 rte_cryptodev_sym_capability_check_cipher( 270 const struct rte_cryptodev_symmetric_capability *capability, 271 uint16_t key_size, uint16_t iv_size); 272 273 /** 274 * Check if key size and initial vector are supported 275 * in crypto auth capability 276 * 277 * @param capability Description of the symmetric crypto capability. 278 * @param key_size Auth key size. 279 * @param digest_size Auth digest size. 280 * @param iv_size Auth initial vector size. 281 * 282 * @return 283 * - Return 0 if the parameters are in range of the capability. 284 * - Return -1 if the parameters are out of range of the capability. 285 */ 286 int 287 rte_cryptodev_sym_capability_check_auth( 288 const struct rte_cryptodev_symmetric_capability *capability, 289 uint16_t key_size, uint16_t digest_size, uint16_t iv_size); 290 291 /** 292 * Check if key, digest, AAD and initial vector sizes are supported 293 * in crypto AEAD capability 294 * 295 * @param capability Description of the symmetric crypto capability. 296 * @param key_size AEAD key size. 297 * @param digest_size AEAD digest size. 298 * @param aad_size AEAD AAD size. 299 * @param iv_size AEAD IV size. 300 * 301 * @return 302 * - Return 0 if the parameters are in range of the capability. 303 * - Return -1 if the parameters are out of range of the capability. 304 */ 305 int 306 rte_cryptodev_sym_capability_check_aead( 307 const struct rte_cryptodev_symmetric_capability *capability, 308 uint16_t key_size, uint16_t digest_size, uint16_t aad_size, 309 uint16_t iv_size); 310 311 /** 312 * Check if op type is supported 313 * 314 * @param capability Description of the asymmetric crypto capability. 315 * @param op_type op type 316 * 317 * @return 318 * - Return 1 if the op type is supported 319 * - Return 0 if unsupported 320 */ 321 __rte_experimental 322 int 323 rte_cryptodev_asym_xform_capability_check_optype( 324 const struct rte_cryptodev_asymmetric_xform_capability *capability, 325 enum rte_crypto_asym_op_type op_type); 326 327 /** 328 * Check if modulus length is in supported range 329 * 330 * @param capability Description of the asymmetric crypto capability. 331 * @param modlen modulus length. 332 * 333 * @return 334 * - Return 0 if the parameters are in range of the capability. 335 * - Return -1 if the parameters are out of range of the capability. 336 */ 337 __rte_experimental 338 int 339 rte_cryptodev_asym_xform_capability_check_modlen( 340 const struct rte_cryptodev_asymmetric_xform_capability *capability, 341 uint16_t modlen); 342 343 /** 344 * Provide the cipher algorithm enum, given an algorithm string 345 * 346 * @param algo_enum A pointer to the cipher algorithm 347 * enum to be filled 348 * @param algo_string Authentication algo string 349 * 350 * @return 351 * - Return -1 if string is not valid 352 * - Return 0 is the string is valid 353 */ 354 int 355 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum, 356 const char *algo_string); 357 358 /** 359 * Provide the authentication algorithm enum, given an algorithm string 360 * 361 * @param algo_enum A pointer to the authentication algorithm 362 * enum to be filled 363 * @param algo_string Authentication algo string 364 * 365 * @return 366 * - Return -1 if string is not valid 367 * - Return 0 is the string is valid 368 */ 369 int 370 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum, 371 const char *algo_string); 372 373 /** 374 * Provide the AEAD algorithm enum, given an algorithm string 375 * 376 * @param algo_enum A pointer to the AEAD algorithm 377 * enum to be filled 378 * @param algo_string AEAD algorithm string 379 * 380 * @return 381 * - Return -1 if string is not valid 382 * - Return 0 is the string is valid 383 */ 384 int 385 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum, 386 const char *algo_string); 387 388 /** 389 * Provide the Asymmetric xform enum, given an xform string 390 * 391 * @param xform_enum A pointer to the xform type 392 * enum to be filled 393 * @param xform_string xform string 394 * 395 * @return 396 * - Return -1 if string is not valid 397 * - Return 0 if the string is valid 398 */ 399 __rte_experimental 400 int 401 rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum, 402 const char *xform_string); 403 404 /** 405 * Provide the cipher algorithm string, given an algorithm enum. 406 * 407 * @param algo_enum cipher algorithm enum 408 * 409 * @return 410 * - Return NULL if enum is not valid 411 * - Return algo_string corresponding to enum 412 */ 413 __rte_experimental 414 const char * 415 rte_cryptodev_get_cipher_algo_string(enum rte_crypto_cipher_algorithm algo_enum); 416 417 /** 418 * Provide the authentication algorithm string, given an algorithm enum. 419 * 420 * @param algo_enum auth algorithm enum 421 * 422 * @return 423 * - Return NULL if enum is not valid 424 * - Return algo_string corresponding to enum 425 */ 426 __rte_experimental 427 const char * 428 rte_cryptodev_get_auth_algo_string(enum rte_crypto_auth_algorithm algo_enum); 429 430 /** 431 * Provide the AEAD algorithm string, given an algorithm enum. 432 * 433 * @param algo_enum AEAD algorithm enum 434 * 435 * @return 436 * - Return NULL if enum is not valid 437 * - Return algo_string corresponding to enum 438 */ 439 __rte_experimental 440 const char * 441 rte_cryptodev_get_aead_algo_string(enum rte_crypto_aead_algorithm algo_enum); 442 443 /** 444 * Provide the Asymmetric xform string, given an xform enum. 445 * 446 * @param xform_enum xform type enum 447 * 448 * @return 449 * - Return NULL, if enum is not valid. 450 * - Return xform string, for valid enum. 451 */ 452 __rte_experimental 453 const char * 454 rte_cryptodev_asym_get_xform_string(enum rte_crypto_asym_xform_type xform_enum); 455 456 457 /** Macro used at end of crypto PMD list */ 458 #define RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() \ 459 { RTE_CRYPTO_OP_TYPE_UNDEFINED } 460 461 462 /** 463 * Crypto device supported feature flags 464 * 465 * Note: 466 * New features flags should be added to the end of the list 467 * 468 * Keep these flags synchronised with rte_cryptodev_get_feature_name() 469 */ 470 #define RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO (1ULL << 0) 471 /**< Symmetric crypto operations are supported */ 472 #define RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO (1ULL << 1) 473 /**< Asymmetric crypto operations are supported */ 474 #define RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING (1ULL << 2) 475 /**< Chaining symmetric crypto operations are supported */ 476 #define RTE_CRYPTODEV_FF_CPU_SSE (1ULL << 3) 477 /**< Utilises CPU SIMD SSE instructions */ 478 #define RTE_CRYPTODEV_FF_CPU_AVX (1ULL << 4) 479 /**< Utilises CPU SIMD AVX instructions */ 480 #define RTE_CRYPTODEV_FF_CPU_AVX2 (1ULL << 5) 481 /**< Utilises CPU SIMD AVX2 instructions */ 482 #define RTE_CRYPTODEV_FF_CPU_AESNI (1ULL << 6) 483 /**< Utilises CPU AES-NI instructions */ 484 #define RTE_CRYPTODEV_FF_HW_ACCELERATED (1ULL << 7) 485 /**< Operations are off-loaded to an 486 * external hardware accelerator 487 */ 488 #define RTE_CRYPTODEV_FF_CPU_AVX512 (1ULL << 8) 489 /**< Utilises CPU SIMD AVX512 instructions */ 490 #define RTE_CRYPTODEV_FF_IN_PLACE_SGL (1ULL << 9) 491 /**< In-place Scatter-gather (SGL) buffers, with multiple segments, 492 * are supported 493 */ 494 #define RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT (1ULL << 10) 495 /**< Out-of-place Scatter-gather (SGL) buffers are 496 * supported in input and output 497 */ 498 #define RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT (1ULL << 11) 499 /**< Out-of-place Scatter-gather (SGL) buffers are supported 500 * in input, combined with linear buffers (LB), with a 501 * single segment in output 502 */ 503 #define RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT (1ULL << 12) 504 /**< Out-of-place Scatter-gather (SGL) buffers are supported 505 * in output, combined with linear buffers (LB) in input 506 */ 507 #define RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT (1ULL << 13) 508 /**< Out-of-place linear buffers (LB) are supported in input and output */ 509 #define RTE_CRYPTODEV_FF_CPU_NEON (1ULL << 14) 510 /**< Utilises CPU NEON instructions */ 511 #define RTE_CRYPTODEV_FF_CPU_ARM_CE (1ULL << 15) 512 /**< Utilises ARM CPU Cryptographic Extensions */ 513 #define RTE_CRYPTODEV_FF_SECURITY (1ULL << 16) 514 /**< Support Security Protocol Processing */ 515 #define RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP (1ULL << 17) 516 /**< Support RSA Private Key OP with exponent */ 517 #define RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT (1ULL << 18) 518 /**< Support RSA Private Key OP with CRT (quintuple) Keys */ 519 #define RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED (1ULL << 19) 520 /**< Support encrypted-digest operations where digest is appended to data */ 521 #define RTE_CRYPTODEV_FF_ASYM_SESSIONLESS (1ULL << 20) 522 /**< Support asymmetric session-less operations */ 523 #define RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO (1ULL << 21) 524 /**< Support symmetric cpu-crypto processing */ 525 #define RTE_CRYPTODEV_FF_SYM_SESSIONLESS (1ULL << 22) 526 /**< Support symmetric session-less operations */ 527 #define RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA (1ULL << 23) 528 /**< Support operations on data which is not byte aligned */ 529 #define RTE_CRYPTODEV_FF_SYM_RAW_DP (1ULL << 24) 530 /**< Support accelerator specific symmetric raw data-path APIs */ 531 #define RTE_CRYPTODEV_FF_CIPHER_MULTIPLE_DATA_UNITS (1ULL << 25) 532 /**< Support operations on multiple data-units message */ 533 #define RTE_CRYPTODEV_FF_CIPHER_WRAPPED_KEY (1ULL << 26) 534 /**< Support wrapped key in cipher xform */ 535 #define RTE_CRYPTODEV_FF_SECURITY_INNER_CSUM (1ULL << 27) 536 /**< Support inner checksum computation/verification */ 537 538 /** 539 * Get the name of a crypto device feature flag 540 * 541 * @param flag The mask describing the flag. 542 * 543 * @return 544 * The name of this flag, or NULL if it's not a valid feature flag. 545 */ 546 const char * 547 rte_cryptodev_get_feature_name(uint64_t flag); 548 549 /** Crypto device information */ 550 /* Structure rte_cryptodev_info 8< */ 551 struct rte_cryptodev_info { 552 const char *driver_name; /**< Driver name. */ 553 uint8_t driver_id; /**< Driver identifier */ 554 struct rte_device *device; /**< Generic device information. */ 555 556 uint64_t feature_flags; 557 /**< Feature flags exposes HW/SW features for the given device */ 558 559 const struct rte_cryptodev_capabilities *capabilities; 560 /**< Array of devices supported capabilities */ 561 562 unsigned max_nb_queue_pairs; 563 /**< Maximum number of queues pairs supported by device. */ 564 565 uint16_t min_mbuf_headroom_req; 566 /**< Minimum mbuf headroom required by device */ 567 568 uint16_t min_mbuf_tailroom_req; 569 /**< Minimum mbuf tailroom required by device */ 570 571 struct { 572 unsigned max_nb_sessions; 573 /**< Maximum number of sessions supported by device. 574 * If 0, the device does not have any limitation in 575 * number of sessions that can be used. 576 */ 577 } sym; 578 }; 579 /* >8 End of structure rte_cryptodev_info. */ 580 581 #define RTE_CRYPTODEV_DETACHED (0) 582 #define RTE_CRYPTODEV_ATTACHED (1) 583 584 /** Definitions of Crypto device event types */ 585 enum rte_cryptodev_event_type { 586 RTE_CRYPTODEV_EVENT_UNKNOWN, /**< unknown event type */ 587 RTE_CRYPTODEV_EVENT_ERROR, /**< error interrupt event */ 588 RTE_CRYPTODEV_EVENT_MAX /**< max value of this enum */ 589 }; 590 591 /** Crypto device queue pair configuration structure. */ 592 /* Structure rte_cryptodev_qp_conf 8<*/ 593 struct rte_cryptodev_qp_conf { 594 uint32_t nb_descriptors; /**< Number of descriptors per queue pair */ 595 struct rte_mempool *mp_session; 596 /**< The mempool for creating session in sessionless mode */ 597 }; 598 /* >8 End of structure rte_cryptodev_qp_conf. */ 599 600 /** 601 * Function type used for processing crypto ops when enqueue/dequeue burst is 602 * called. 603 * 604 * The callback function is called on enqueue/dequeue burst immediately. 605 * 606 * @param dev_id The identifier of the device. 607 * @param qp_id The index of the queue pair on which ops are 608 * enqueued/dequeued. The value must be in the 609 * range [0, nb_queue_pairs - 1] previously 610 * supplied to *rte_cryptodev_configure*. 611 * @param ops The address of an array of *nb_ops* pointers 612 * to *rte_crypto_op* structures which contain 613 * the crypto operations to be processed. 614 * @param nb_ops The number of operations to process. 615 * @param user_param The arbitrary user parameter passed in by the 616 * application when the callback was originally 617 * registered. 618 * @return The number of ops to be enqueued to the 619 * crypto device. 620 */ 621 typedef uint16_t (*rte_cryptodev_callback_fn)(uint16_t dev_id, uint16_t qp_id, 622 struct rte_crypto_op **ops, uint16_t nb_ops, void *user_param); 623 624 /** 625 * Typedef for application callback function to be registered by application 626 * software for notification of device events 627 * 628 * @param dev_id Crypto device identifier 629 * @param event Crypto device event to register for notification of. 630 * @param cb_arg User specified parameter to be passed as to passed to 631 * users callback function. 632 */ 633 typedef void (*rte_cryptodev_cb_fn)(uint8_t dev_id, 634 enum rte_cryptodev_event_type event, void *cb_arg); 635 636 637 /** Crypto Device statistics */ 638 struct rte_cryptodev_stats { 639 uint64_t enqueued_count; 640 /**< Count of all operations enqueued */ 641 uint64_t dequeued_count; 642 /**< Count of all operations dequeued */ 643 644 uint64_t enqueue_err_count; 645 /**< Total error count on operations enqueued */ 646 uint64_t dequeue_err_count; 647 /**< Total error count on operations dequeued */ 648 }; 649 650 #define RTE_CRYPTODEV_NAME_MAX_LEN (64) 651 /**< Max length of name of crypto PMD */ 652 653 /** 654 * Get the device identifier for the named crypto device. 655 * 656 * @param name device name to select the device structure. 657 * 658 * @return 659 * - Returns crypto device identifier on success. 660 * - Return -1 on failure to find named crypto device. 661 */ 662 int 663 rte_cryptodev_get_dev_id(const char *name); 664 665 /** 666 * Get the crypto device name given a device identifier. 667 * 668 * @param dev_id 669 * The identifier of the device 670 * 671 * @return 672 * - Returns crypto device name. 673 * - Returns NULL if crypto device is not present. 674 */ 675 const char * 676 rte_cryptodev_name_get(uint8_t dev_id); 677 678 /** 679 * Get the total number of crypto devices that have been successfully 680 * initialised. 681 * 682 * @return 683 * - The total number of usable crypto devices. 684 */ 685 uint8_t 686 rte_cryptodev_count(void); 687 688 /** 689 * Get number of crypto device defined type. 690 * 691 * @param driver_id driver identifier. 692 * 693 * @return 694 * Returns number of crypto device. 695 */ 696 uint8_t 697 rte_cryptodev_device_count_by_driver(uint8_t driver_id); 698 699 /** 700 * Get number and identifiers of attached crypto devices that 701 * use the same crypto driver. 702 * 703 * @param driver_name driver name. 704 * @param devices output devices identifiers. 705 * @param nb_devices maximal number of devices. 706 * 707 * @return 708 * Returns number of attached crypto device. 709 */ 710 uint8_t 711 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices, 712 uint8_t nb_devices); 713 /* 714 * Return the NUMA socket to which a device is connected 715 * 716 * @param dev_id 717 * The identifier of the device 718 * @return 719 * The NUMA socket id to which the device is connected or 720 * a default of zero if the socket could not be determined. 721 * -1 if returned is the dev_id value is out of range. 722 */ 723 int 724 rte_cryptodev_socket_id(uint8_t dev_id); 725 726 /** Crypto device configuration structure */ 727 /* Structure rte_cryptodev_config 8< */ 728 struct rte_cryptodev_config { 729 int socket_id; /**< Socket to allocate resources on */ 730 uint16_t nb_queue_pairs; 731 /**< Number of queue pairs to configure on device */ 732 uint64_t ff_disable; 733 /**< Feature flags to be disabled. Only the following features are 734 * allowed to be disabled, 735 * - RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO 736 * - RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO 737 * - RTE_CRYTPODEV_FF_SECURITY 738 */ 739 }; 740 /* >8 End of structure rte_cryptodev_config. */ 741 742 /** 743 * Configure a device. 744 * 745 * This function must be invoked first before any other function in the 746 * API. This function can also be re-invoked when a device is in the 747 * stopped state. 748 * 749 * @param dev_id The identifier of the device to configure. 750 * @param config The crypto device configuration structure. 751 * 752 * @return 753 * - 0: Success, device configured. 754 * - <0: Error code returned by the driver configuration function. 755 */ 756 int 757 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config); 758 759 /** 760 * Start an device. 761 * 762 * The device start step is the last one and consists of setting the configured 763 * offload features and in starting the transmit and the receive units of the 764 * device. 765 * On success, all basic functions exported by the API (link status, 766 * receive/transmit, and so on) can be invoked. 767 * 768 * @param dev_id 769 * The identifier of the device. 770 * @return 771 * - 0: Success, device started. 772 * - <0: Error code of the driver device start function. 773 */ 774 int 775 rte_cryptodev_start(uint8_t dev_id); 776 777 /** 778 * Stop an device. The device can be restarted with a call to 779 * rte_cryptodev_start() 780 * 781 * @param dev_id The identifier of the device. 782 */ 783 void 784 rte_cryptodev_stop(uint8_t dev_id); 785 786 /** 787 * Close an device. The device cannot be restarted! 788 * 789 * @param dev_id The identifier of the device. 790 * 791 * @return 792 * - 0 on successfully closing device 793 * - <0 on failure to close device 794 */ 795 int 796 rte_cryptodev_close(uint8_t dev_id); 797 798 /** 799 * Allocate and set up a receive queue pair for a device. 800 * 801 * 802 * @param dev_id The identifier of the device. 803 * @param queue_pair_id The index of the queue pairs to set up. The 804 * value must be in the range [0, nb_queue_pair 805 * - 1] previously supplied to 806 * rte_cryptodev_configure(). 807 * @param qp_conf The pointer to the configuration data to be 808 * used for the queue pair. 809 * @param socket_id The *socket_id* argument is the socket 810 * identifier in case of NUMA. The value can be 811 * *SOCKET_ID_ANY* if there is no NUMA constraint 812 * for the DMA memory allocated for the receive 813 * queue pair. 814 * 815 * @return 816 * - 0: Success, queue pair correctly set up. 817 * - <0: Queue pair configuration failed 818 */ 819 int 820 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id, 821 const struct rte_cryptodev_qp_conf *qp_conf, int socket_id); 822 823 /** 824 * Get the status of queue pairs setup on a specific crypto device 825 * 826 * @param dev_id Crypto device identifier. 827 * @param queue_pair_id The index of the queue pairs to set up. The 828 * value must be in the range [0, nb_queue_pair 829 * - 1] previously supplied to 830 * rte_cryptodev_configure(). 831 * @return 832 * - 0: qp was not configured 833 * - 1: qp was configured 834 * - -EINVAL: device was not configured 835 */ 836 __rte_experimental 837 int 838 rte_cryptodev_get_qp_status(uint8_t dev_id, uint16_t queue_pair_id); 839 840 /** 841 * Get the number of queue pairs on a specific crypto device 842 * 843 * @param dev_id Crypto device identifier. 844 * @return 845 * - The number of configured queue pairs. 846 */ 847 uint16_t 848 rte_cryptodev_queue_pair_count(uint8_t dev_id); 849 850 851 /** 852 * Retrieve the general I/O statistics of a device. 853 * 854 * @param dev_id The identifier of the device. 855 * @param stats A pointer to a structure of type 856 * *rte_cryptodev_stats* to be filled with the 857 * values of device counters. 858 * @return 859 * - Zero if successful. 860 * - Non-zero otherwise. 861 */ 862 int 863 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats); 864 865 /** 866 * Reset the general I/O statistics of a device. 867 * 868 * @param dev_id The identifier of the device. 869 */ 870 void 871 rte_cryptodev_stats_reset(uint8_t dev_id); 872 873 /** 874 * Retrieve the contextual information of a device. 875 * 876 * @param dev_id The identifier of the device. 877 * @param dev_info A pointer to a structure of type 878 * *rte_cryptodev_info* to be filled with the 879 * contextual information of the device. 880 * 881 * @note The capabilities field of dev_info is set to point to the first 882 * element of an array of struct rte_cryptodev_capabilities. The element after 883 * the last valid element has it's op field set to 884 * RTE_CRYPTO_OP_TYPE_UNDEFINED. 885 */ 886 void 887 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info); 888 889 890 /** 891 * Register a callback function for specific device id. 892 * 893 * @param dev_id Device id. 894 * @param event Event interested. 895 * @param cb_fn User supplied callback function to be called. 896 * @param cb_arg Pointer to the parameters for the registered 897 * callback. 898 * 899 * @return 900 * - On success, zero. 901 * - On failure, a negative value. 902 */ 903 int 904 rte_cryptodev_callback_register(uint8_t dev_id, 905 enum rte_cryptodev_event_type event, 906 rte_cryptodev_cb_fn cb_fn, void *cb_arg); 907 908 /** 909 * Unregister a callback function for specific device id. 910 * 911 * @param dev_id The device identifier. 912 * @param event Event interested. 913 * @param cb_fn User supplied callback function to be called. 914 * @param cb_arg Pointer to the parameters for the registered 915 * callback. 916 * 917 * @return 918 * - On success, zero. 919 * - On failure, a negative value. 920 */ 921 int 922 rte_cryptodev_callback_unregister(uint8_t dev_id, 923 enum rte_cryptodev_event_type event, 924 rte_cryptodev_cb_fn cb_fn, void *cb_arg); 925 926 /** 927 * @warning 928 * @b EXPERIMENTAL: this API may change without prior notice. 929 * 930 * Query a cryptodev queue pair if there are pending RTE_CRYPTODEV_EVENT_ERROR 931 * events. 932 * 933 * @param dev_id The device identifier. 934 * @param qp_id Queue pair index to be queried. 935 * 936 * @return 937 * - 1 if requested queue has a pending event. 938 * - 0 if no pending event is found. 939 * - a negative value on failure 940 */ 941 __rte_experimental 942 int 943 rte_cryptodev_queue_pair_event_error_query(uint8_t dev_id, uint16_t qp_id); 944 945 struct rte_cryptodev_callback; 946 947 /** Structure to keep track of registered callbacks */ 948 RTE_TAILQ_HEAD(rte_cryptodev_cb_list, rte_cryptodev_callback); 949 950 /** 951 * Structure used to hold information about the callbacks to be called for a 952 * queue pair on enqueue/dequeue. 953 */ 954 struct rte_cryptodev_cb { 955 struct rte_cryptodev_cb *next; 956 /**< Pointer to next callback */ 957 rte_cryptodev_callback_fn fn; 958 /**< Pointer to callback function */ 959 void *arg; 960 /**< Pointer to argument */ 961 }; 962 963 /** 964 * @internal 965 * Structure used to hold information about the RCU for a queue pair. 966 */ 967 struct rte_cryptodev_cb_rcu { 968 struct rte_cryptodev_cb *next; 969 /**< Pointer to next callback */ 970 struct rte_rcu_qsbr *qsbr; 971 /**< RCU QSBR variable per queue pair */ 972 }; 973 974 /** 975 * Get the security context for the cryptodev. 976 * 977 * @param dev_id 978 * The device identifier. 979 * @return 980 * - NULL on error. 981 * - Pointer to security context on success. 982 */ 983 void * 984 rte_cryptodev_get_sec_ctx(uint8_t dev_id); 985 986 /** 987 * Create a symmetric session mempool. 988 * 989 * @param name 990 * The unique mempool name. 991 * @param nb_elts 992 * The number of elements in the mempool. 993 * @param elt_size 994 * The size of the element. This should be the size of the cryptodev PMD 995 * session private data obtained through 996 * rte_cryptodev_sym_get_private_session_size() function call. 997 * For the user who wants to use the same mempool for heterogeneous PMDs 998 * this value should be the maximum value of their private session sizes. 999 * Please note the created mempool will have bigger elt size than this 1000 * value as necessary session header and the possible padding are filled 1001 * into each elt. 1002 * @param cache_size 1003 * The number of per-lcore cache elements 1004 * @param priv_size 1005 * The private data size of each session. 1006 * @param socket_id 1007 * The *socket_id* argument is the socket identifier in the case of 1008 * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA 1009 * constraint for the reserved zone. 1010 * 1011 * @return 1012 * - On success returns the created session mempool pointer 1013 * - On failure returns NULL 1014 */ 1015 __rte_experimental 1016 struct rte_mempool * 1017 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts, 1018 uint32_t elt_size, uint32_t cache_size, uint16_t priv_size, 1019 int socket_id); 1020 1021 1022 /** 1023 * Create an asymmetric session mempool. 1024 * 1025 * @param name 1026 * The unique mempool name. 1027 * @param nb_elts 1028 * The number of elements in the mempool. 1029 * @param cache_size 1030 * The number of per-lcore cache elements 1031 * @param user_data_size 1032 * The size of user data to be placed after session private data. 1033 * @param socket_id 1034 * The *socket_id* argument is the socket identifier in the case of 1035 * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA 1036 * constraint for the reserved zone. 1037 * 1038 * @return 1039 * - On success return mempool 1040 * - On failure returns NULL 1041 */ 1042 __rte_experimental 1043 struct rte_mempool * 1044 rte_cryptodev_asym_session_pool_create(const char *name, uint32_t nb_elts, 1045 uint32_t cache_size, uint16_t user_data_size, int socket_id); 1046 1047 /** 1048 * Create symmetric crypto session and fill out private data for the device id, 1049 * based on its device type. 1050 * 1051 * @param dev_id ID of device that we want the session to be used on 1052 * @param xforms Symmetric crypto transform operations to apply on flow 1053 * processed with this session 1054 * @param mp Mempool to allocate symmetric session objects from 1055 * 1056 * @return 1057 * - On success return pointer to sym-session. 1058 * - On failure returns NULL and rte_errno is set to the error code: 1059 * - EINVAL on invalid arguments. 1060 * - ENOMEM on memory error for session allocation. 1061 * - ENOTSUP if device doesn't support session configuration. 1062 */ 1063 void * 1064 rte_cryptodev_sym_session_create(uint8_t dev_id, 1065 struct rte_crypto_sym_xform *xforms, 1066 struct rte_mempool *mp); 1067 /** 1068 * Create and initialise an asymmetric crypto session structure. 1069 * Calls the PMD to configure the private session data. 1070 * 1071 * @param dev_id ID of device that we want the session to be used on 1072 * @param xforms Asymmetric crypto transform operations to apply on flow 1073 * processed with this session 1074 * @param mp mempool to allocate asymmetric session 1075 * objects from 1076 * @param session void ** for session to be used 1077 * 1078 * @return 1079 * - 0 on success. 1080 * - -EINVAL on invalid arguments. 1081 * - -ENOMEM on memory error for session allocation. 1082 * - -ENOTSUP if device doesn't support session configuration. 1083 */ 1084 __rte_experimental 1085 int 1086 rte_cryptodev_asym_session_create(uint8_t dev_id, 1087 struct rte_crypto_asym_xform *xforms, struct rte_mempool *mp, 1088 void **session); 1089 1090 /** 1091 * Frees session for the device id and returning it to its mempool. 1092 * It is the application's responsibility to ensure that the session 1093 * is not still in-flight operations using it. 1094 * 1095 * @param dev_id ID of device that uses the session. 1096 * @param sess Session header to be freed. 1097 * 1098 * @return 1099 * - 0 if successful. 1100 * - -EINVAL if session is NULL or the mismatched device ids. 1101 */ 1102 int 1103 rte_cryptodev_sym_session_free(uint8_t dev_id, 1104 void *sess); 1105 1106 /** 1107 * Clears and frees asymmetric crypto session header and private data, 1108 * returning it to its original mempool. 1109 * 1110 * @param dev_id ID of device that uses the asymmetric session. 1111 * @param sess Session header to be freed. 1112 * 1113 * @return 1114 * - 0 if successful. 1115 * - -EINVAL if device is invalid or session is NULL. 1116 */ 1117 __rte_experimental 1118 int 1119 rte_cryptodev_asym_session_free(uint8_t dev_id, void *sess); 1120 1121 /** 1122 * Get the size of the asymmetric session header. 1123 * 1124 * @return 1125 * Size of the asymmetric header session. 1126 */ 1127 __rte_experimental 1128 unsigned int 1129 rte_cryptodev_asym_get_header_session_size(void); 1130 1131 /** 1132 * Get the size of the private symmetric session data 1133 * for a device. 1134 * 1135 * @param dev_id The device identifier. 1136 * 1137 * @return 1138 * - Size of the private data, if successful 1139 * - 0 if device is invalid or does not have private 1140 * symmetric session 1141 */ 1142 unsigned int 1143 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id); 1144 1145 /** 1146 * Get the size of the private data for asymmetric session 1147 * on device 1148 * 1149 * @param dev_id The device identifier. 1150 * 1151 * @return 1152 * - Size of the asymmetric private data, if successful 1153 * - 0 if device is invalid or does not have private session 1154 */ 1155 __rte_experimental 1156 unsigned int 1157 rte_cryptodev_asym_get_private_session_size(uint8_t dev_id); 1158 1159 /** 1160 * Validate if the crypto device index is valid attached crypto device. 1161 * 1162 * @param dev_id Crypto device index. 1163 * 1164 * @return 1165 * - If the device index is valid (1) or not (0). 1166 */ 1167 unsigned int 1168 rte_cryptodev_is_valid_dev(uint8_t dev_id); 1169 1170 /** 1171 * Provide driver identifier. 1172 * 1173 * @param name 1174 * The pointer to a driver name. 1175 * @return 1176 * The driver type identifier or -1 if no driver found 1177 */ 1178 int rte_cryptodev_driver_id_get(const char *name); 1179 1180 /** 1181 * Provide driver name. 1182 * 1183 * @param driver_id 1184 * The driver identifier. 1185 * @return 1186 * The driver name or null if no driver found 1187 */ 1188 const char *rte_cryptodev_driver_name_get(uint8_t driver_id); 1189 1190 /** 1191 * Store user data in a session. 1192 * 1193 * @param sess Session pointer allocated by 1194 * *rte_cryptodev_sym_session_create*. 1195 * @param data Pointer to the user data. 1196 * @param size Size of the user data. 1197 * 1198 * @return 1199 * - On success, zero. 1200 * - On failure, a negative value. 1201 */ 1202 __rte_experimental 1203 int 1204 rte_cryptodev_sym_session_set_user_data(void *sess, 1205 void *data, 1206 uint16_t size); 1207 1208 #define CRYPTO_SESS_OPAQUE_DATA_OFF 0 1209 /** 1210 * Get opaque data from session handle 1211 */ 1212 static inline uint64_t 1213 rte_cryptodev_sym_session_opaque_data_get(void *sess) 1214 { 1215 return *((uint64_t *)sess + CRYPTO_SESS_OPAQUE_DATA_OFF); 1216 } 1217 1218 /** 1219 * Set opaque data in session handle 1220 */ 1221 static inline void 1222 rte_cryptodev_sym_session_opaque_data_set(void *sess, uint64_t opaque) 1223 { 1224 uint64_t *data; 1225 data = (((uint64_t *)sess) + CRYPTO_SESS_OPAQUE_DATA_OFF); 1226 *data = opaque; 1227 } 1228 1229 /** 1230 * Get user data stored in a session. 1231 * 1232 * @param sess Session pointer allocated by 1233 * *rte_cryptodev_sym_session_create*. 1234 * 1235 * @return 1236 * - On success return pointer to user data. 1237 * - On failure returns NULL. 1238 */ 1239 __rte_experimental 1240 void * 1241 rte_cryptodev_sym_session_get_user_data(void *sess); 1242 1243 /** 1244 * Store user data in an asymmetric session. 1245 * 1246 * @param sess Session pointer allocated by 1247 * *rte_cryptodev_asym_session_create*. 1248 * @param data Pointer to the user data. 1249 * @param size Size of the user data. 1250 * 1251 * @return 1252 * - On success, zero. 1253 * - -EINVAL if the session pointer is invalid. 1254 * - -ENOMEM if the available user data size is smaller than the size parameter. 1255 */ 1256 __rte_experimental 1257 int 1258 rte_cryptodev_asym_session_set_user_data(void *sess, void *data, uint16_t size); 1259 1260 /** 1261 * Get user data stored in an asymmetric session. 1262 * 1263 * @param sess Session pointer allocated by 1264 * *rte_cryptodev_asym_session_create*. 1265 * 1266 * @return 1267 * - On success return pointer to user data. 1268 * - On failure returns NULL. 1269 */ 1270 __rte_experimental 1271 void * 1272 rte_cryptodev_asym_session_get_user_data(void *sess); 1273 1274 /** 1275 * Perform actual crypto processing (encrypt/digest or auth/decrypt) 1276 * on user provided data. 1277 * 1278 * @param dev_id The device identifier. 1279 * @param sess Cryptodev session structure 1280 * @param ofs Start and stop offsets for auth and cipher operations 1281 * @param vec Vectorized operation descriptor 1282 * 1283 * @return 1284 * - Returns number of successfully processed packets. 1285 */ 1286 __rte_experimental 1287 uint32_t 1288 rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id, 1289 void *sess, union rte_crypto_sym_ofs ofs, 1290 struct rte_crypto_sym_vec *vec); 1291 1292 /** 1293 * Get the size of the raw data-path context buffer. 1294 * 1295 * @param dev_id The device identifier. 1296 * 1297 * @return 1298 * - If the device supports raw data-path APIs, return the context size. 1299 * - If the device does not support the APIs, return -1. 1300 */ 1301 __rte_experimental 1302 int 1303 rte_cryptodev_get_raw_dp_ctx_size(uint8_t dev_id); 1304 1305 /** 1306 * Set session event meta data 1307 * 1308 * @param dev_id The device identifier. 1309 * @param sess Crypto or security session. 1310 * @param op_type Operation type. 1311 * @param sess_type Session type. 1312 * @param ev_mdata Pointer to the event crypto meta data 1313 * (aka *union rte_event_crypto_metadata*) 1314 * @param size Size of ev_mdata. 1315 * 1316 * @return 1317 * - On success, zero. 1318 * - On failure, a negative value. 1319 */ 1320 __rte_experimental 1321 int 1322 rte_cryptodev_session_event_mdata_set(uint8_t dev_id, void *sess, 1323 enum rte_crypto_op_type op_type, 1324 enum rte_crypto_op_sess_type sess_type, 1325 void *ev_mdata, uint16_t size); 1326 1327 /** 1328 * Union of different crypto session types, including session-less xform 1329 * pointer. 1330 */ 1331 union rte_cryptodev_session_ctx {void *crypto_sess; 1332 struct rte_crypto_sym_xform *xform; 1333 struct rte_security_session *sec_sess; 1334 }; 1335 1336 /** 1337 * Enqueue a vectorized operation descriptor into the device queue but the 1338 * driver may or may not start processing until rte_cryptodev_raw_enqueue_done() 1339 * is called. 1340 * 1341 * @param qp Driver specific queue pair data. 1342 * @param drv_ctx Driver specific context data. 1343 * @param vec Vectorized operation descriptor. 1344 * @param ofs Start and stop offsets for auth and cipher 1345 * operations. 1346 * @param user_data The array of user data for dequeue later. 1347 * @param enqueue_status Driver written value to specify the 1348 * enqueue status. Possible values: 1349 * - 1: The number of operations returned are 1350 * enqueued successfully. 1351 * - 0: The number of operations returned are 1352 * cached into the queue but are not processed 1353 * until rte_cryptodev_raw_enqueue_done() is 1354 * called. 1355 * - negative integer: Error occurred. 1356 * @return 1357 * - The number of operations in the descriptor successfully enqueued or 1358 * cached into the queue but not enqueued yet, depends on the 1359 * "enqueue_status" value. 1360 */ 1361 typedef uint32_t (*cryptodev_sym_raw_enqueue_burst_t)( 1362 void *qp, uint8_t *drv_ctx, struct rte_crypto_sym_vec *vec, 1363 union rte_crypto_sym_ofs ofs, void *user_data[], int *enqueue_status); 1364 1365 /** 1366 * Enqueue single raw data vector into the device queue but the driver may or 1367 * may not start processing until rte_cryptodev_raw_enqueue_done() is called. 1368 * 1369 * @param qp Driver specific queue pair data. 1370 * @param drv_ctx Driver specific context data. 1371 * @param data_vec The buffer data vector. 1372 * @param n_data_vecs Number of buffer data vectors. 1373 * @param ofs Start and stop offsets for auth and cipher 1374 * operations. 1375 * @param iv IV virtual and IOVA addresses 1376 * @param digest digest virtual and IOVA addresses 1377 * @param aad_or_auth_iv AAD or auth IV virtual and IOVA addresses, 1378 * depends on the algorithm used. 1379 * @param user_data The user data. 1380 * @return 1381 * - 1: The data vector is enqueued successfully. 1382 * - 0: The data vector is cached into the queue but is not processed 1383 * until rte_cryptodev_raw_enqueue_done() is called. 1384 * - negative integer: failure. 1385 */ 1386 typedef int (*cryptodev_sym_raw_enqueue_t)( 1387 void *qp, uint8_t *drv_ctx, struct rte_crypto_vec *data_vec, 1388 uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs, 1389 struct rte_crypto_va_iova_ptr *iv, 1390 struct rte_crypto_va_iova_ptr *digest, 1391 struct rte_crypto_va_iova_ptr *aad_or_auth_iv, 1392 void *user_data); 1393 1394 /** 1395 * Inform the cryptodev queue pair to start processing or finish dequeuing all 1396 * enqueued/dequeued operations. 1397 * 1398 * @param qp Driver specific queue pair data. 1399 * @param drv_ctx Driver specific context data. 1400 * @param n The total number of processed operations. 1401 * @return 1402 * - On success return 0. 1403 * - On failure return negative integer. 1404 */ 1405 typedef int (*cryptodev_sym_raw_operation_done_t)(void *qp, uint8_t *drv_ctx, 1406 uint32_t n); 1407 1408 /** 1409 * Typedef that the user provided for the driver to get the dequeue count. 1410 * The function may return a fixed number or the number parsed from the user 1411 * data stored in the first processed operation. 1412 * 1413 * @param user_data Dequeued user data. 1414 * @return 1415 * - The number of operations to be dequeued. 1416 */ 1417 typedef uint32_t (*rte_cryptodev_raw_get_dequeue_count_t)(void *user_data); 1418 1419 /** 1420 * Typedef that the user provided to deal with post dequeue operation, such 1421 * as filling status. 1422 * 1423 * @param user_data Dequeued user data. 1424 * @param index Index number of the processed descriptor. 1425 * @param is_op_success Operation status provided by the driver. 1426 */ 1427 typedef void (*rte_cryptodev_raw_post_dequeue_t)(void *user_data, 1428 uint32_t index, uint8_t is_op_success); 1429 1430 /** 1431 * Dequeue a burst of symmetric crypto processing. 1432 * 1433 * @param qp Driver specific queue pair data. 1434 * @param drv_ctx Driver specific context data. 1435 * @param get_dequeue_count User provided callback function to 1436 * obtain dequeue operation count. 1437 * @param max_nb_to_dequeue When get_dequeue_count is NULL this 1438 * value is used to pass the maximum 1439 * number of operations to be dequeued. 1440 * @param post_dequeue User provided callback function to 1441 * post-process a dequeued operation. 1442 * @param out_user_data User data pointer array to be retrieve 1443 * from device queue. In case of 1444 * *is_user_data_array* is set there 1445 * should be enough room to store all 1446 * user data. 1447 * @param is_user_data_array Set 1 if every dequeued user data will 1448 * be written into out_user_data array. 1449 * Set 0 if only the first user data will 1450 * be written into out_user_data array. 1451 * @param n_success Driver written value to specific the 1452 * total successful operations count. 1453 * @param dequeue_status Driver written value to specify the 1454 * dequeue status. Possible values: 1455 * - 1: Successfully dequeued the number 1456 * of operations returned. The user 1457 * data previously set during enqueue 1458 * is stored in the "out_user_data". 1459 * - 0: The number of operations returned 1460 * are completed and the user data is 1461 * stored in the "out_user_data", but 1462 * they are not freed from the queue 1463 * until 1464 * rte_cryptodev_raw_dequeue_done() 1465 * is called. 1466 * - negative integer: Error occurred. 1467 * @return 1468 * - The number of operations dequeued or completed but not freed from the 1469 * queue, depends on "dequeue_status" value. 1470 */ 1471 typedef uint32_t (*cryptodev_sym_raw_dequeue_burst_t)(void *qp, 1472 uint8_t *drv_ctx, 1473 rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count, 1474 uint32_t max_nb_to_dequeue, 1475 rte_cryptodev_raw_post_dequeue_t post_dequeue, 1476 void **out_user_data, uint8_t is_user_data_array, 1477 uint32_t *n_success, int *dequeue_status); 1478 1479 /** 1480 * Dequeue a symmetric crypto processing. 1481 * 1482 * @param qp Driver specific queue pair data. 1483 * @param drv_ctx Driver specific context data. 1484 * @param dequeue_status Driver written value to specify the 1485 * dequeue status. Possible values: 1486 * - 1: Successfully dequeued a operation. 1487 * The user data is returned. 1488 * - 0: The first operation in the queue 1489 * is completed and the user data 1490 * previously set during enqueue is 1491 * returned, but it is not freed from 1492 * the queue until 1493 * rte_cryptodev_raw_dequeue_done() is 1494 * called. 1495 * - negative integer: Error occurred. 1496 * @param op_status Driver written value to specify 1497 * operation status. 1498 * @return 1499 * - The user data pointer retrieved from device queue or NULL if no 1500 * operation is ready for dequeue. 1501 */ 1502 typedef void * (*cryptodev_sym_raw_dequeue_t)( 1503 void *qp, uint8_t *drv_ctx, int *dequeue_status, 1504 enum rte_crypto_op_status *op_status); 1505 1506 /** 1507 * Context data for raw data-path API crypto process. The buffer of this 1508 * structure is to be allocated by the user application with the size equal 1509 * or bigger than rte_cryptodev_get_raw_dp_ctx_size() returned value. 1510 */ 1511 struct rte_crypto_raw_dp_ctx { 1512 void *qp_data; 1513 1514 cryptodev_sym_raw_enqueue_t enqueue; 1515 cryptodev_sym_raw_enqueue_burst_t enqueue_burst; 1516 cryptodev_sym_raw_operation_done_t enqueue_done; 1517 cryptodev_sym_raw_dequeue_t dequeue; 1518 cryptodev_sym_raw_dequeue_burst_t dequeue_burst; 1519 cryptodev_sym_raw_operation_done_t dequeue_done; 1520 1521 /* Driver specific context data */ 1522 __extension__ uint8_t drv_ctx_data[]; 1523 }; 1524 1525 /** 1526 * Configure raw data-path context data. 1527 * 1528 * @param dev_id The device identifier. 1529 * @param qp_id The index of the queue pair from which to 1530 * retrieve processed packets. The value must be 1531 * in the range [0, nb_queue_pair - 1] previously 1532 * supplied to rte_cryptodev_configure(). 1533 * @param ctx The raw data-path context data. 1534 * @param sess_type Session type. 1535 * @param session_ctx Session context data. 1536 * @param is_update Set 0 if it is to initialize the ctx. 1537 * Set 1 if ctx is initialized and only to update 1538 * session context data. 1539 * @return 1540 * - On success return 0. 1541 * - On failure return negative integer. 1542 * - -EINVAL if input parameters are invalid. 1543 * - -ENOTSUP if crypto device does not support raw DP operations with the 1544 * provided session. 1545 */ 1546 __rte_experimental 1547 int 1548 rte_cryptodev_configure_raw_dp_ctx(uint8_t dev_id, uint16_t qp_id, 1549 struct rte_crypto_raw_dp_ctx *ctx, 1550 enum rte_crypto_op_sess_type sess_type, 1551 union rte_cryptodev_session_ctx session_ctx, 1552 uint8_t is_update); 1553 1554 /** 1555 * Enqueue a vectorized operation descriptor into the device queue but the 1556 * driver may or may not start processing until rte_cryptodev_raw_enqueue_done() 1557 * is called. 1558 * 1559 * @param ctx The initialized raw data-path context data. 1560 * @param vec Vectorized operation descriptor. 1561 * @param ofs Start and stop offsets for auth and cipher 1562 * operations. 1563 * @param user_data The array of user data for dequeue later. 1564 * @param enqueue_status Driver written value to specify the 1565 * enqueue status. Possible values: 1566 * - 1: The number of operations returned are 1567 * enqueued successfully. 1568 * - 0: The number of operations returned are 1569 * cached into the queue but are not processed 1570 * until rte_cryptodev_raw_enqueue_done() is 1571 * called. 1572 * - negative integer: Error occurred. 1573 * @return 1574 * - The number of operations in the descriptor successfully enqueued or 1575 * cached into the queue but not enqueued yet, depends on the 1576 * "enqueue_status" value. 1577 */ 1578 __rte_experimental 1579 uint32_t 1580 rte_cryptodev_raw_enqueue_burst(struct rte_crypto_raw_dp_ctx *ctx, 1581 struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs, 1582 void **user_data, int *enqueue_status); 1583 1584 /** 1585 * Enqueue single raw data vector into the device queue but the driver may or 1586 * may not start processing until rte_cryptodev_raw_enqueue_done() is called. 1587 * 1588 * @param ctx The initialized raw data-path context data. 1589 * @param data_vec The buffer data vector. 1590 * @param n_data_vecs Number of buffer data vectors. 1591 * @param ofs Start and stop offsets for auth and cipher 1592 * operations. 1593 * @param iv IV virtual and IOVA addresses 1594 * @param digest digest virtual and IOVA addresses 1595 * @param aad_or_auth_iv AAD or auth IV virtual and IOVA addresses, 1596 * depends on the algorithm used. 1597 * @param user_data The user data. 1598 * @return 1599 * - 1: The data vector is enqueued successfully. 1600 * - 0: The data vector is cached into the queue but is not processed 1601 * until rte_cryptodev_raw_enqueue_done() is called. 1602 * - negative integer: failure. 1603 */ 1604 __rte_experimental 1605 static __rte_always_inline int 1606 rte_cryptodev_raw_enqueue(struct rte_crypto_raw_dp_ctx *ctx, 1607 struct rte_crypto_vec *data_vec, uint16_t n_data_vecs, 1608 union rte_crypto_sym_ofs ofs, 1609 struct rte_crypto_va_iova_ptr *iv, 1610 struct rte_crypto_va_iova_ptr *digest, 1611 struct rte_crypto_va_iova_ptr *aad_or_auth_iv, 1612 void *user_data) 1613 { 1614 return (*ctx->enqueue)(ctx->qp_data, ctx->drv_ctx_data, data_vec, 1615 n_data_vecs, ofs, iv, digest, aad_or_auth_iv, user_data); 1616 } 1617 1618 /** 1619 * Start processing all enqueued operations from last 1620 * rte_cryptodev_configure_raw_dp_ctx() call. 1621 * 1622 * @param ctx The initialized raw data-path context data. 1623 * @param n The number of operations cached. 1624 * @return 1625 * - On success return 0. 1626 * - On failure return negative integer. 1627 */ 1628 __rte_experimental 1629 int 1630 rte_cryptodev_raw_enqueue_done(struct rte_crypto_raw_dp_ctx *ctx, 1631 uint32_t n); 1632 1633 /** 1634 * Dequeue a burst of symmetric crypto processing. 1635 * 1636 * @param ctx The initialized raw data-path context 1637 * data. 1638 * @param get_dequeue_count User provided callback function to 1639 * obtain dequeue operation count. 1640 * @param max_nb_to_dequeue When get_dequeue_count is NULL this 1641 * value is used to pass the maximum 1642 * number of operations to be dequeued. 1643 * @param post_dequeue User provided callback function to 1644 * post-process a dequeued operation. 1645 * @param out_user_data User data pointer array to be retrieve 1646 * from device queue. In case of 1647 * *is_user_data_array* is set there 1648 * should be enough room to store all 1649 * user data. 1650 * @param is_user_data_array Set 1 if every dequeued user data will 1651 * be written into out_user_data array. 1652 * Set 0 if only the first user data will 1653 * be written into out_user_data array. 1654 * @param n_success Driver written value to specific the 1655 * total successful operations count. 1656 * @param dequeue_status Driver written value to specify the 1657 * dequeue status. Possible values: 1658 * - 1: Successfully dequeued the number 1659 * of operations returned. The user 1660 * data previously set during enqueue 1661 * is stored in the "out_user_data". 1662 * - 0: The number of operations returned 1663 * are completed and the user data is 1664 * stored in the "out_user_data", but 1665 * they are not freed from the queue 1666 * until 1667 * rte_cryptodev_raw_dequeue_done() 1668 * is called. 1669 * - negative integer: Error occurred. 1670 * @return 1671 * - The number of operations dequeued or completed but not freed from the 1672 * queue, depends on "dequeue_status" value. 1673 */ 1674 __rte_experimental 1675 uint32_t 1676 rte_cryptodev_raw_dequeue_burst(struct rte_crypto_raw_dp_ctx *ctx, 1677 rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count, 1678 uint32_t max_nb_to_dequeue, 1679 rte_cryptodev_raw_post_dequeue_t post_dequeue, 1680 void **out_user_data, uint8_t is_user_data_array, 1681 uint32_t *n_success, int *dequeue_status); 1682 1683 /** 1684 * Dequeue a symmetric crypto processing. 1685 * 1686 * @param ctx The initialized raw data-path context 1687 * data. 1688 * @param dequeue_status Driver written value to specify the 1689 * dequeue status. Possible values: 1690 * - 1: Successfully dequeued a operation. 1691 * The user data is returned. 1692 * - 0: The first operation in the queue 1693 * is completed and the user data 1694 * previously set during enqueue is 1695 * returned, but it is not freed from 1696 * the queue until 1697 * rte_cryptodev_raw_dequeue_done() is 1698 * called. 1699 * - negative integer: Error occurred. 1700 * @param op_status Driver written value to specify 1701 * operation status. 1702 * @return 1703 * - The user data pointer retrieved from device queue or NULL if no 1704 * operation is ready for dequeue. 1705 */ 1706 __rte_experimental 1707 static __rte_always_inline void * 1708 rte_cryptodev_raw_dequeue(struct rte_crypto_raw_dp_ctx *ctx, 1709 int *dequeue_status, enum rte_crypto_op_status *op_status) 1710 { 1711 return (*ctx->dequeue)(ctx->qp_data, ctx->drv_ctx_data, dequeue_status, 1712 op_status); 1713 } 1714 1715 /** 1716 * Inform the queue pair dequeue operations is finished. 1717 * 1718 * @param ctx The initialized raw data-path context data. 1719 * @param n The number of operations. 1720 * @return 1721 * - On success return 0. 1722 * - On failure return negative integer. 1723 */ 1724 __rte_experimental 1725 int 1726 rte_cryptodev_raw_dequeue_done(struct rte_crypto_raw_dp_ctx *ctx, 1727 uint32_t n); 1728 1729 /** 1730 * Add a user callback for a given crypto device and queue pair which will be 1731 * called on crypto ops enqueue. 1732 * 1733 * This API configures a function to be called for each burst of crypto ops 1734 * received on a given crypto device queue pair. The return value is a pointer 1735 * that can be used later to remove the callback using 1736 * rte_cryptodev_remove_enq_callback(). 1737 * 1738 * Callbacks registered by application would not survive 1739 * rte_cryptodev_configure() as it reinitializes the callback list. 1740 * It is user responsibility to remove all installed callbacks before 1741 * calling rte_cryptodev_configure() to avoid possible memory leakage. 1742 * Application is expected to call add API after rte_cryptodev_configure(). 1743 * 1744 * Multiple functions can be registered per queue pair & they are called 1745 * in the order they were added. The API does not restrict on maximum number 1746 * of callbacks. 1747 * 1748 * @param dev_id The identifier of the device. 1749 * @param qp_id The index of the queue pair on which ops are 1750 * to be enqueued for processing. The value 1751 * must be in the range [0, nb_queue_pairs - 1] 1752 * previously supplied to 1753 * *rte_cryptodev_configure*. 1754 * @param cb_fn The callback function 1755 * @param cb_arg A generic pointer parameter which will be passed 1756 * to each invocation of the callback function on 1757 * this crypto device and queue pair. 1758 * 1759 * @return 1760 * - NULL on error & rte_errno will contain the error code. 1761 * - On success, a pointer value which can later be used to remove the 1762 * callback. 1763 */ 1764 1765 __rte_experimental 1766 struct rte_cryptodev_cb * 1767 rte_cryptodev_add_enq_callback(uint8_t dev_id, 1768 uint16_t qp_id, 1769 rte_cryptodev_callback_fn cb_fn, 1770 void *cb_arg); 1771 1772 /** 1773 * Remove a user callback function for given crypto device and queue pair. 1774 * 1775 * This function is used to remove enqueue callbacks that were added to a 1776 * crypto device queue pair using rte_cryptodev_add_enq_callback(). 1777 * 1778 * 1779 * 1780 * @param dev_id The identifier of the device. 1781 * @param qp_id The index of the queue pair on which ops are 1782 * to be enqueued. The value must be in the 1783 * range [0, nb_queue_pairs - 1] previously 1784 * supplied to *rte_cryptodev_configure*. 1785 * @param cb Pointer to user supplied callback created via 1786 * rte_cryptodev_add_enq_callback(). 1787 * 1788 * @return 1789 * - 0: Success. Callback was removed. 1790 * - <0: The dev_id or the qp_id is out of range, or the callback 1791 * is NULL or not found for the crypto device queue pair. 1792 */ 1793 1794 __rte_experimental 1795 int rte_cryptodev_remove_enq_callback(uint8_t dev_id, 1796 uint16_t qp_id, 1797 struct rte_cryptodev_cb *cb); 1798 1799 /** 1800 * Add a user callback for a given crypto device and queue pair which will be 1801 * called on crypto ops dequeue. 1802 * 1803 * This API configures a function to be called for each burst of crypto ops 1804 * received on a given crypto device queue pair. The return value is a pointer 1805 * that can be used later to remove the callback using 1806 * rte_cryptodev_remove_deq_callback(). 1807 * 1808 * Callbacks registered by application would not survive 1809 * rte_cryptodev_configure() as it reinitializes the callback list. 1810 * It is user responsibility to remove all installed callbacks before 1811 * calling rte_cryptodev_configure() to avoid possible memory leakage. 1812 * Application is expected to call add API after rte_cryptodev_configure(). 1813 * 1814 * Multiple functions can be registered per queue pair & they are called 1815 * in the order they were added. The API does not restrict on maximum number 1816 * of callbacks. 1817 * 1818 * @param dev_id The identifier of the device. 1819 * @param qp_id The index of the queue pair on which ops are 1820 * to be dequeued. The value must be in the 1821 * range [0, nb_queue_pairs - 1] previously 1822 * supplied to *rte_cryptodev_configure*. 1823 * @param cb_fn The callback function 1824 * @param cb_arg A generic pointer parameter which will be passed 1825 * to each invocation of the callback function on 1826 * this crypto device and queue pair. 1827 * 1828 * @return 1829 * - NULL on error & rte_errno will contain the error code. 1830 * - On success, a pointer value which can later be used to remove the 1831 * callback. 1832 */ 1833 1834 __rte_experimental 1835 struct rte_cryptodev_cb * 1836 rte_cryptodev_add_deq_callback(uint8_t dev_id, 1837 uint16_t qp_id, 1838 rte_cryptodev_callback_fn cb_fn, 1839 void *cb_arg); 1840 1841 /** 1842 * Remove a user callback function for given crypto device and queue pair. 1843 * 1844 * This function is used to remove dequeue callbacks that were added to a 1845 * crypto device queue pair using rte_cryptodev_add_deq_callback(). 1846 * 1847 * 1848 * 1849 * @param dev_id The identifier of the device. 1850 * @param qp_id The index of the queue pair on which ops are 1851 * to be dequeued. The value must be in the 1852 * range [0, nb_queue_pairs - 1] previously 1853 * supplied to *rte_cryptodev_configure*. 1854 * @param cb Pointer to user supplied callback created via 1855 * rte_cryptodev_add_deq_callback(). 1856 * 1857 * @return 1858 * - 0: Success. Callback was removed. 1859 * - <0: The dev_id or the qp_id is out of range, or the callback 1860 * is NULL or not found for the crypto device queue pair. 1861 */ 1862 __rte_experimental 1863 int rte_cryptodev_remove_deq_callback(uint8_t dev_id, 1864 uint16_t qp_id, 1865 struct rte_cryptodev_cb *cb); 1866 1867 #include <rte_cryptodev_core.h> 1868 /** 1869 * 1870 * Dequeue a burst of processed crypto operations from a queue on the crypto 1871 * device. The dequeued operation are stored in *rte_crypto_op* structures 1872 * whose pointers are supplied in the *ops* array. 1873 * 1874 * The rte_cryptodev_dequeue_burst() function returns the number of ops 1875 * actually dequeued, which is the number of *rte_crypto_op* data structures 1876 * effectively supplied into the *ops* array. 1877 * 1878 * A return value equal to *nb_ops* indicates that the queue contained 1879 * at least *nb_ops* operations, and this is likely to signify that other 1880 * processed operations remain in the devices output queue. Applications 1881 * implementing a "retrieve as many processed operations as possible" policy 1882 * can check this specific case and keep invoking the 1883 * rte_cryptodev_dequeue_burst() function until a value less than 1884 * *nb_ops* is returned. 1885 * 1886 * The rte_cryptodev_dequeue_burst() function does not provide any error 1887 * notification to avoid the corresponding overhead. 1888 * 1889 * @param dev_id The symmetric crypto device identifier 1890 * @param qp_id The index of the queue pair from which to 1891 * retrieve processed packets. The value must be 1892 * in the range [0, nb_queue_pair - 1] previously 1893 * supplied to rte_cryptodev_configure(). 1894 * @param ops The address of an array of pointers to 1895 * *rte_crypto_op* structures that must be 1896 * large enough to store *nb_ops* pointers in it. 1897 * @param nb_ops The maximum number of operations to dequeue. 1898 * 1899 * @return 1900 * - The number of operations actually dequeued, which is the number 1901 * of pointers to *rte_crypto_op* structures effectively supplied to the 1902 * *ops* array. 1903 */ 1904 static inline uint16_t 1905 rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id, 1906 struct rte_crypto_op **ops, uint16_t nb_ops) 1907 { 1908 const struct rte_crypto_fp_ops *fp_ops; 1909 void *qp; 1910 1911 rte_cryptodev_trace_dequeue_burst(dev_id, qp_id, (void **)ops, nb_ops); 1912 1913 fp_ops = &rte_crypto_fp_ops[dev_id]; 1914 qp = fp_ops->qp.data[qp_id]; 1915 1916 nb_ops = fp_ops->dequeue_burst(qp, ops, nb_ops); 1917 1918 #ifdef RTE_CRYPTO_CALLBACKS 1919 if (unlikely(fp_ops->qp.deq_cb != NULL)) { 1920 struct rte_cryptodev_cb_rcu *list; 1921 struct rte_cryptodev_cb *cb; 1922 1923 /* __ATOMIC_RELEASE memory order was used when the 1924 * call back was inserted into the list. 1925 * Since there is a clear dependency between loading 1926 * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is 1927 * not required. 1928 */ 1929 list = &fp_ops->qp.deq_cb[qp_id]; 1930 rte_rcu_qsbr_thread_online(list->qsbr, 0); 1931 cb = __atomic_load_n(&list->next, __ATOMIC_RELAXED); 1932 1933 while (cb != NULL) { 1934 nb_ops = cb->fn(dev_id, qp_id, ops, nb_ops, 1935 cb->arg); 1936 cb = cb->next; 1937 }; 1938 1939 rte_rcu_qsbr_thread_offline(list->qsbr, 0); 1940 } 1941 #endif 1942 return nb_ops; 1943 } 1944 1945 /** 1946 * Enqueue a burst of operations for processing on a crypto device. 1947 * 1948 * The rte_cryptodev_enqueue_burst() function is invoked to place 1949 * crypto operations on the queue *qp_id* of the device designated by 1950 * its *dev_id*. 1951 * 1952 * The *nb_ops* parameter is the number of operations to process which are 1953 * supplied in the *ops* array of *rte_crypto_op* structures. 1954 * 1955 * The rte_cryptodev_enqueue_burst() function returns the number of 1956 * operations it actually enqueued for processing. A return value equal to 1957 * *nb_ops* means that all packets have been enqueued. 1958 * 1959 * @param dev_id The identifier of the device. 1960 * @param qp_id The index of the queue pair which packets are 1961 * to be enqueued for processing. The value 1962 * must be in the range [0, nb_queue_pairs - 1] 1963 * previously supplied to 1964 * *rte_cryptodev_configure*. 1965 * @param ops The address of an array of *nb_ops* pointers 1966 * to *rte_crypto_op* structures which contain 1967 * the crypto operations to be processed. 1968 * @param nb_ops The number of operations to process. 1969 * 1970 * @return 1971 * The number of operations actually enqueued on the crypto device. The return 1972 * value can be less than the value of the *nb_ops* parameter when the 1973 * crypto devices queue is full or if invalid parameters are specified in 1974 * a *rte_crypto_op*. 1975 */ 1976 static inline uint16_t 1977 rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id, 1978 struct rte_crypto_op **ops, uint16_t nb_ops) 1979 { 1980 const struct rte_crypto_fp_ops *fp_ops; 1981 void *qp; 1982 1983 fp_ops = &rte_crypto_fp_ops[dev_id]; 1984 qp = fp_ops->qp.data[qp_id]; 1985 #ifdef RTE_CRYPTO_CALLBACKS 1986 if (unlikely(fp_ops->qp.enq_cb != NULL)) { 1987 struct rte_cryptodev_cb_rcu *list; 1988 struct rte_cryptodev_cb *cb; 1989 1990 /* __ATOMIC_RELEASE memory order was used when the 1991 * call back was inserted into the list. 1992 * Since there is a clear dependency between loading 1993 * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is 1994 * not required. 1995 */ 1996 list = &fp_ops->qp.enq_cb[qp_id]; 1997 rte_rcu_qsbr_thread_online(list->qsbr, 0); 1998 cb = __atomic_load_n(&list->next, __ATOMIC_RELAXED); 1999 2000 while (cb != NULL) { 2001 nb_ops = cb->fn(dev_id, qp_id, ops, nb_ops, 2002 cb->arg); 2003 cb = cb->next; 2004 }; 2005 2006 rte_rcu_qsbr_thread_offline(list->qsbr, 0); 2007 } 2008 #endif 2009 2010 rte_cryptodev_trace_enqueue_burst(dev_id, qp_id, (void **)ops, nb_ops); 2011 return fp_ops->enqueue_burst(qp, ops, nb_ops); 2012 } 2013 2014 2015 2016 #ifdef __cplusplus 2017 } 2018 #endif 2019 2020 #endif /* _RTE_CRYPTODEV_H_ */ 2021