1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2015-2020 Intel Corporation. 3 */ 4 5 #ifndef _RTE_CRYPTODEV_H_ 6 #define _RTE_CRYPTODEV_H_ 7 8 /** 9 * @file rte_cryptodev.h 10 * 11 * RTE Cryptographic Device APIs 12 * 13 * Defines RTE Crypto Device APIs for the provisioning of cipher and 14 * authentication operations. 15 */ 16 17 #ifdef __cplusplus 18 extern "C" { 19 #endif 20 21 #include "rte_kvargs.h" 22 #include "rte_crypto.h" 23 #include <rte_common.h> 24 #include <rte_rcu_qsbr.h> 25 26 #include "rte_cryptodev_trace_fp.h" 27 28 extern const char **rte_cyptodev_names; 29 30 /* Logging Macros */ 31 32 #define CDEV_LOG_ERR(...) \ 33 RTE_LOG(ERR, CRYPTODEV, \ 34 RTE_FMT("%s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \ 35 __func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,))) 36 37 #define CDEV_LOG_INFO(...) \ 38 RTE_LOG(INFO, CRYPTODEV, \ 39 RTE_FMT(RTE_FMT_HEAD(__VA_ARGS__,) "\n", \ 40 RTE_FMT_TAIL(__VA_ARGS__,))) 41 42 #define CDEV_LOG_DEBUG(...) \ 43 RTE_LOG(DEBUG, CRYPTODEV, \ 44 RTE_FMT("%s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \ 45 __func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,))) 46 47 #define CDEV_PMD_TRACE(...) \ 48 RTE_LOG(DEBUG, CRYPTODEV, \ 49 RTE_FMT("[%s] %s: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \ 50 dev, __func__, RTE_FMT_TAIL(__VA_ARGS__,))) 51 52 /** 53 * A macro that points to an offset from the start 54 * of the crypto operation structure (rte_crypto_op) 55 * 56 * The returned pointer is cast to type t. 57 * 58 * @param c 59 * The crypto operation. 60 * @param o 61 * The offset from the start of the crypto operation. 62 * @param t 63 * The type to cast the result into. 64 */ 65 #define rte_crypto_op_ctod_offset(c, t, o) \ 66 ((t)((char *)(c) + (o))) 67 68 /** 69 * A macro that returns the physical address that points 70 * to an offset from the start of the crypto operation 71 * (rte_crypto_op) 72 * 73 * @param c 74 * The crypto operation. 75 * @param o 76 * The offset from the start of the crypto operation 77 * to calculate address from. 78 */ 79 #define rte_crypto_op_ctophys_offset(c, o) \ 80 (rte_iova_t)((c)->phys_addr + (o)) 81 82 /** 83 * Crypto parameters range description 84 */ 85 struct rte_crypto_param_range { 86 uint16_t min; /**< minimum size */ 87 uint16_t max; /**< maximum size */ 88 uint16_t increment; 89 /**< if a range of sizes are supported, 90 * this parameter is used to indicate 91 * increments in byte size that are supported 92 * between the minimum and maximum 93 */ 94 }; 95 96 /** 97 * Data-unit supported lengths of cipher algorithms. 98 * A bit can represent any set of data-unit sizes 99 * (single size, multiple size, range, etc). 100 */ 101 #define RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_512_BYTES RTE_BIT32(0) 102 #define RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_4096_BYTES RTE_BIT32(1) 103 #define RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_1_MEGABYTES RTE_BIT32(2) 104 105 /** 106 * Symmetric Crypto Capability 107 */ 108 struct rte_cryptodev_symmetric_capability { 109 enum rte_crypto_sym_xform_type xform_type; 110 /**< Transform type : Authentication / Cipher / AEAD */ 111 RTE_STD_C11 112 union { 113 struct { 114 enum rte_crypto_auth_algorithm algo; 115 /**< authentication algorithm */ 116 uint16_t block_size; 117 /**< algorithm block size */ 118 struct rte_crypto_param_range key_size; 119 /**< auth key size range */ 120 struct rte_crypto_param_range digest_size; 121 /**< digest size range */ 122 struct rte_crypto_param_range aad_size; 123 /**< Additional authentication data size range */ 124 struct rte_crypto_param_range iv_size; 125 /**< Initialisation vector data size range */ 126 } auth; 127 /**< Symmetric Authentication transform capabilities */ 128 struct { 129 enum rte_crypto_cipher_algorithm algo; 130 /**< cipher algorithm */ 131 uint16_t block_size; 132 /**< algorithm block size */ 133 struct rte_crypto_param_range key_size; 134 /**< cipher key size range */ 135 struct rte_crypto_param_range iv_size; 136 /**< Initialisation vector data size range */ 137 uint32_t dataunit_set; 138 /**< 139 * Supported data-unit lengths: 140 * RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_* bits 141 * or 0 for lengths defined in the algorithm standard. 142 */ 143 } cipher; 144 /**< Symmetric Cipher transform capabilities */ 145 struct { 146 enum rte_crypto_aead_algorithm algo; 147 /**< AEAD algorithm */ 148 uint16_t block_size; 149 /**< algorithm block size */ 150 struct rte_crypto_param_range key_size; 151 /**< AEAD key size range */ 152 struct rte_crypto_param_range digest_size; 153 /**< digest size range */ 154 struct rte_crypto_param_range aad_size; 155 /**< Additional authentication data size range */ 156 struct rte_crypto_param_range iv_size; 157 /**< Initialisation vector data size range */ 158 } aead; 159 }; 160 }; 161 162 /** 163 * Asymmetric Xform Crypto Capability 164 * 165 */ 166 struct rte_cryptodev_asymmetric_xform_capability { 167 enum rte_crypto_asym_xform_type xform_type; 168 /**< Transform type: RSA/MODEXP/DH/DSA/MODINV */ 169 170 uint32_t op_types; 171 /**< bitmask for supported rte_crypto_asym_op_type */ 172 173 __extension__ 174 union { 175 struct rte_crypto_param_range modlen; 176 /**< Range of modulus length supported by modulus based xform. 177 * Value 0 mean implementation default 178 */ 179 }; 180 }; 181 182 /** 183 * Asymmetric Crypto Capability 184 * 185 */ 186 struct rte_cryptodev_asymmetric_capability { 187 struct rte_cryptodev_asymmetric_xform_capability xform_capa; 188 }; 189 190 191 /** Structure used to capture a capability of a crypto device */ 192 struct rte_cryptodev_capabilities { 193 enum rte_crypto_op_type op; 194 /**< Operation type */ 195 196 RTE_STD_C11 197 union { 198 struct rte_cryptodev_symmetric_capability sym; 199 /**< Symmetric operation capability parameters */ 200 struct rte_cryptodev_asymmetric_capability asym; 201 /**< Asymmetric operation capability parameters */ 202 }; 203 }; 204 205 /** Structure used to describe crypto algorithms */ 206 struct rte_cryptodev_sym_capability_idx { 207 enum rte_crypto_sym_xform_type type; 208 union { 209 enum rte_crypto_cipher_algorithm cipher; 210 enum rte_crypto_auth_algorithm auth; 211 enum rte_crypto_aead_algorithm aead; 212 } algo; 213 }; 214 215 /** 216 * Structure used to describe asymmetric crypto xforms 217 * Each xform maps to one asym algorithm. 218 * 219 */ 220 struct rte_cryptodev_asym_capability_idx { 221 enum rte_crypto_asym_xform_type type; 222 /**< Asymmetric xform (algo) type */ 223 }; 224 225 /** 226 * Provide capabilities available for defined device and algorithm 227 * 228 * @param dev_id The identifier of the device. 229 * @param idx Description of crypto algorithms. 230 * 231 * @return 232 * - Return description of the symmetric crypto capability if exist. 233 * - Return NULL if the capability not exist. 234 */ 235 const struct rte_cryptodev_symmetric_capability * 236 rte_cryptodev_sym_capability_get(uint8_t dev_id, 237 const struct rte_cryptodev_sym_capability_idx *idx); 238 239 /** 240 * Provide capabilities available for defined device and xform 241 * 242 * @param dev_id The identifier of the device. 243 * @param idx Description of asym crypto xform. 244 * 245 * @return 246 * - Return description of the asymmetric crypto capability if exist. 247 * - Return NULL if the capability not exist. 248 */ 249 __rte_experimental 250 const struct rte_cryptodev_asymmetric_xform_capability * 251 rte_cryptodev_asym_capability_get(uint8_t dev_id, 252 const struct rte_cryptodev_asym_capability_idx *idx); 253 254 /** 255 * Check if key size and initial vector are supported 256 * in crypto cipher capability 257 * 258 * @param capability Description of the symmetric crypto capability. 259 * @param key_size Cipher key size. 260 * @param iv_size Cipher initial vector size. 261 * 262 * @return 263 * - Return 0 if the parameters are in range of the capability. 264 * - Return -1 if the parameters are out of range of the capability. 265 */ 266 int 267 rte_cryptodev_sym_capability_check_cipher( 268 const struct rte_cryptodev_symmetric_capability *capability, 269 uint16_t key_size, uint16_t iv_size); 270 271 /** 272 * Check if key size and initial vector are supported 273 * in crypto auth capability 274 * 275 * @param capability Description of the symmetric crypto capability. 276 * @param key_size Auth key size. 277 * @param digest_size Auth digest size. 278 * @param iv_size Auth initial vector size. 279 * 280 * @return 281 * - Return 0 if the parameters are in range of the capability. 282 * - Return -1 if the parameters are out of range of the capability. 283 */ 284 int 285 rte_cryptodev_sym_capability_check_auth( 286 const struct rte_cryptodev_symmetric_capability *capability, 287 uint16_t key_size, uint16_t digest_size, uint16_t iv_size); 288 289 /** 290 * Check if key, digest, AAD and initial vector sizes are supported 291 * in crypto AEAD capability 292 * 293 * @param capability Description of the symmetric crypto capability. 294 * @param key_size AEAD key size. 295 * @param digest_size AEAD digest size. 296 * @param aad_size AEAD AAD size. 297 * @param iv_size AEAD IV size. 298 * 299 * @return 300 * - Return 0 if the parameters are in range of the capability. 301 * - Return -1 if the parameters are out of range of the capability. 302 */ 303 int 304 rte_cryptodev_sym_capability_check_aead( 305 const struct rte_cryptodev_symmetric_capability *capability, 306 uint16_t key_size, uint16_t digest_size, uint16_t aad_size, 307 uint16_t iv_size); 308 309 /** 310 * Check if op type is supported 311 * 312 * @param capability Description of the asymmetric crypto capability. 313 * @param op_type op type 314 * 315 * @return 316 * - Return 1 if the op type is supported 317 * - Return 0 if unsupported 318 */ 319 __rte_experimental 320 int 321 rte_cryptodev_asym_xform_capability_check_optype( 322 const struct rte_cryptodev_asymmetric_xform_capability *capability, 323 enum rte_crypto_asym_op_type op_type); 324 325 /** 326 * Check if modulus length is in supported range 327 * 328 * @param capability Description of the asymmetric crypto capability. 329 * @param modlen modulus length. 330 * 331 * @return 332 * - Return 0 if the parameters are in range of the capability. 333 * - Return -1 if the parameters are out of range of the capability. 334 */ 335 __rte_experimental 336 int 337 rte_cryptodev_asym_xform_capability_check_modlen( 338 const struct rte_cryptodev_asymmetric_xform_capability *capability, 339 uint16_t modlen); 340 341 /** 342 * Provide the cipher algorithm enum, given an algorithm string 343 * 344 * @param algo_enum A pointer to the cipher algorithm 345 * enum to be filled 346 * @param algo_string Authentication algo string 347 * 348 * @return 349 * - Return -1 if string is not valid 350 * - Return 0 is the string is valid 351 */ 352 int 353 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum, 354 const char *algo_string); 355 356 /** 357 * Provide the authentication algorithm enum, given an algorithm string 358 * 359 * @param algo_enum A pointer to the authentication algorithm 360 * enum to be filled 361 * @param algo_string Authentication algo string 362 * 363 * @return 364 * - Return -1 if string is not valid 365 * - Return 0 is the string is valid 366 */ 367 int 368 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum, 369 const char *algo_string); 370 371 /** 372 * Provide the AEAD algorithm enum, given an algorithm string 373 * 374 * @param algo_enum A pointer to the AEAD algorithm 375 * enum to be filled 376 * @param algo_string AEAD algorithm string 377 * 378 * @return 379 * - Return -1 if string is not valid 380 * - Return 0 is the string is valid 381 */ 382 int 383 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum, 384 const char *algo_string); 385 386 /** 387 * Provide the Asymmetric xform enum, given an xform string 388 * 389 * @param xform_enum A pointer to the xform type 390 * enum to be filled 391 * @param xform_string xform string 392 * 393 * @return 394 * - Return -1 if string is not valid 395 * - Return 0 if the string is valid 396 */ 397 __rte_experimental 398 int 399 rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum, 400 const char *xform_string); 401 402 403 /** Macro used at end of crypto PMD list */ 404 #define RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() \ 405 { RTE_CRYPTO_OP_TYPE_UNDEFINED } 406 407 408 /** 409 * Crypto device supported feature flags 410 * 411 * Note: 412 * New features flags should be added to the end of the list 413 * 414 * Keep these flags synchronised with rte_cryptodev_get_feature_name() 415 */ 416 #define RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO (1ULL << 0) 417 /**< Symmetric crypto operations are supported */ 418 #define RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO (1ULL << 1) 419 /**< Asymmetric crypto operations are supported */ 420 #define RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING (1ULL << 2) 421 /**< Chaining symmetric crypto operations are supported */ 422 #define RTE_CRYPTODEV_FF_CPU_SSE (1ULL << 3) 423 /**< Utilises CPU SIMD SSE instructions */ 424 #define RTE_CRYPTODEV_FF_CPU_AVX (1ULL << 4) 425 /**< Utilises CPU SIMD AVX instructions */ 426 #define RTE_CRYPTODEV_FF_CPU_AVX2 (1ULL << 5) 427 /**< Utilises CPU SIMD AVX2 instructions */ 428 #define RTE_CRYPTODEV_FF_CPU_AESNI (1ULL << 6) 429 /**< Utilises CPU AES-NI instructions */ 430 #define RTE_CRYPTODEV_FF_HW_ACCELERATED (1ULL << 7) 431 /**< Operations are off-loaded to an 432 * external hardware accelerator 433 */ 434 #define RTE_CRYPTODEV_FF_CPU_AVX512 (1ULL << 8) 435 /**< Utilises CPU SIMD AVX512 instructions */ 436 #define RTE_CRYPTODEV_FF_IN_PLACE_SGL (1ULL << 9) 437 /**< In-place Scatter-gather (SGL) buffers, with multiple segments, 438 * are supported 439 */ 440 #define RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT (1ULL << 10) 441 /**< Out-of-place Scatter-gather (SGL) buffers are 442 * supported in input and output 443 */ 444 #define RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT (1ULL << 11) 445 /**< Out-of-place Scatter-gather (SGL) buffers are supported 446 * in input, combined with linear buffers (LB), with a 447 * single segment in output 448 */ 449 #define RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT (1ULL << 12) 450 /**< Out-of-place Scatter-gather (SGL) buffers are supported 451 * in output, combined with linear buffers (LB) in input 452 */ 453 #define RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT (1ULL << 13) 454 /**< Out-of-place linear buffers (LB) are supported in input and output */ 455 #define RTE_CRYPTODEV_FF_CPU_NEON (1ULL << 14) 456 /**< Utilises CPU NEON instructions */ 457 #define RTE_CRYPTODEV_FF_CPU_ARM_CE (1ULL << 15) 458 /**< Utilises ARM CPU Cryptographic Extensions */ 459 #define RTE_CRYPTODEV_FF_SECURITY (1ULL << 16) 460 /**< Support Security Protocol Processing */ 461 #define RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP (1ULL << 17) 462 /**< Support RSA Private Key OP with exponent */ 463 #define RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT (1ULL << 18) 464 /**< Support RSA Private Key OP with CRT (quintuple) Keys */ 465 #define RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED (1ULL << 19) 466 /**< Support encrypted-digest operations where digest is appended to data */ 467 #define RTE_CRYPTODEV_FF_ASYM_SESSIONLESS (1ULL << 20) 468 /**< Support asymmetric session-less operations */ 469 #define RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO (1ULL << 21) 470 /**< Support symmetric cpu-crypto processing */ 471 #define RTE_CRYPTODEV_FF_SYM_SESSIONLESS (1ULL << 22) 472 /**< Support symmetric session-less operations */ 473 #define RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA (1ULL << 23) 474 /**< Support operations on data which is not byte aligned */ 475 #define RTE_CRYPTODEV_FF_SYM_RAW_DP (1ULL << 24) 476 /**< Support accelerator specific symmetric raw data-path APIs */ 477 #define RTE_CRYPTODEV_FF_CIPHER_MULTIPLE_DATA_UNITS (1ULL << 25) 478 /**< Support operations on multiple data-units message */ 479 #define RTE_CRYPTODEV_FF_CIPHER_WRAPPED_KEY (1ULL << 26) 480 /**< Support wrapped key in cipher xform */ 481 #define RTE_CRYPTODEV_FF_SECURITY_INNER_CSUM (1ULL << 27) 482 /**< Support inner checksum computation/verification */ 483 484 /** 485 * Get the name of a crypto device feature flag 486 * 487 * @param flag The mask describing the flag. 488 * 489 * @return 490 * The name of this flag, or NULL if it's not a valid feature flag. 491 */ 492 493 extern const char * 494 rte_cryptodev_get_feature_name(uint64_t flag); 495 496 /** Crypto device information */ 497 struct rte_cryptodev_info { 498 const char *driver_name; /**< Driver name. */ 499 uint8_t driver_id; /**< Driver identifier */ 500 struct rte_device *device; /**< Generic device information. */ 501 502 uint64_t feature_flags; 503 /**< Feature flags exposes HW/SW features for the given device */ 504 505 const struct rte_cryptodev_capabilities *capabilities; 506 /**< Array of devices supported capabilities */ 507 508 unsigned max_nb_queue_pairs; 509 /**< Maximum number of queues pairs supported by device. */ 510 511 uint16_t min_mbuf_headroom_req; 512 /**< Minimum mbuf headroom required by device */ 513 514 uint16_t min_mbuf_tailroom_req; 515 /**< Minimum mbuf tailroom required by device */ 516 517 struct { 518 unsigned max_nb_sessions; 519 /**< Maximum number of sessions supported by device. 520 * If 0, the device does not have any limitation in 521 * number of sessions that can be used. 522 */ 523 } sym; 524 }; 525 526 #define RTE_CRYPTODEV_DETACHED (0) 527 #define RTE_CRYPTODEV_ATTACHED (1) 528 529 /** Definitions of Crypto device event types */ 530 enum rte_cryptodev_event_type { 531 RTE_CRYPTODEV_EVENT_UNKNOWN, /**< unknown event type */ 532 RTE_CRYPTODEV_EVENT_ERROR, /**< error interrupt event */ 533 RTE_CRYPTODEV_EVENT_MAX /**< max value of this enum */ 534 }; 535 536 /** Crypto device queue pair configuration structure. */ 537 struct rte_cryptodev_qp_conf { 538 uint32_t nb_descriptors; /**< Number of descriptors per queue pair */ 539 struct rte_mempool *mp_session; 540 /**< The mempool for creating session in sessionless mode */ 541 struct rte_mempool *mp_session_private; 542 /**< The mempool for creating sess private data in sessionless mode */ 543 }; 544 545 /** 546 * Function type used for processing crypto ops when enqueue/dequeue burst is 547 * called. 548 * 549 * The callback function is called on enqueue/dequeue burst immediately. 550 * 551 * @param dev_id The identifier of the device. 552 * @param qp_id The index of the queue pair on which ops are 553 * enqueued/dequeued. The value must be in the 554 * range [0, nb_queue_pairs - 1] previously 555 * supplied to *rte_cryptodev_configure*. 556 * @param ops The address of an array of *nb_ops* pointers 557 * to *rte_crypto_op* structures which contain 558 * the crypto operations to be processed. 559 * @param nb_ops The number of operations to process. 560 * @param user_param The arbitrary user parameter passed in by the 561 * application when the callback was originally 562 * registered. 563 * @return The number of ops to be enqueued to the 564 * crypto device. 565 */ 566 typedef uint16_t (*rte_cryptodev_callback_fn)(uint16_t dev_id, uint16_t qp_id, 567 struct rte_crypto_op **ops, uint16_t nb_ops, void *user_param); 568 569 /** 570 * Typedef for application callback function to be registered by application 571 * software for notification of device events 572 * 573 * @param dev_id Crypto device identifier 574 * @param event Crypto device event to register for notification of. 575 * @param cb_arg User specified parameter to be passed as to passed to 576 * users callback function. 577 */ 578 typedef void (*rte_cryptodev_cb_fn)(uint8_t dev_id, 579 enum rte_cryptodev_event_type event, void *cb_arg); 580 581 582 /** Crypto Device statistics */ 583 struct rte_cryptodev_stats { 584 uint64_t enqueued_count; 585 /**< Count of all operations enqueued */ 586 uint64_t dequeued_count; 587 /**< Count of all operations dequeued */ 588 589 uint64_t enqueue_err_count; 590 /**< Total error count on operations enqueued */ 591 uint64_t dequeue_err_count; 592 /**< Total error count on operations dequeued */ 593 }; 594 595 #define RTE_CRYPTODEV_NAME_MAX_LEN (64) 596 /**< Max length of name of crypto PMD */ 597 598 /** 599 * Get the device identifier for the named crypto device. 600 * 601 * @param name device name to select the device structure. 602 * 603 * @return 604 * - Returns crypto device identifier on success. 605 * - Return -1 on failure to find named crypto device. 606 */ 607 extern int 608 rte_cryptodev_get_dev_id(const char *name); 609 610 /** 611 * Get the crypto device name given a device identifier. 612 * 613 * @param dev_id 614 * The identifier of the device 615 * 616 * @return 617 * - Returns crypto device name. 618 * - Returns NULL if crypto device is not present. 619 */ 620 extern const char * 621 rte_cryptodev_name_get(uint8_t dev_id); 622 623 /** 624 * Get the total number of crypto devices that have been successfully 625 * initialised. 626 * 627 * @return 628 * - The total number of usable crypto devices. 629 */ 630 extern uint8_t 631 rte_cryptodev_count(void); 632 633 /** 634 * Get number of crypto device defined type. 635 * 636 * @param driver_id driver identifier. 637 * 638 * @return 639 * Returns number of crypto device. 640 */ 641 extern uint8_t 642 rte_cryptodev_device_count_by_driver(uint8_t driver_id); 643 644 /** 645 * Get number and identifiers of attached crypto devices that 646 * use the same crypto driver. 647 * 648 * @param driver_name driver name. 649 * @param devices output devices identifiers. 650 * @param nb_devices maximal number of devices. 651 * 652 * @return 653 * Returns number of attached crypto device. 654 */ 655 uint8_t 656 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices, 657 uint8_t nb_devices); 658 /* 659 * Return the NUMA socket to which a device is connected 660 * 661 * @param dev_id 662 * The identifier of the device 663 * @return 664 * The NUMA socket id to which the device is connected or 665 * a default of zero if the socket could not be determined. 666 * -1 if returned is the dev_id value is out of range. 667 */ 668 extern int 669 rte_cryptodev_socket_id(uint8_t dev_id); 670 671 /** Crypto device configuration structure */ 672 struct rte_cryptodev_config { 673 int socket_id; /**< Socket to allocate resources on */ 674 uint16_t nb_queue_pairs; 675 /**< Number of queue pairs to configure on device */ 676 uint64_t ff_disable; 677 /**< Feature flags to be disabled. Only the following features are 678 * allowed to be disabled, 679 * - RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO 680 * - RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO 681 * - RTE_CRYTPODEV_FF_SECURITY 682 */ 683 }; 684 685 /** 686 * Configure a device. 687 * 688 * This function must be invoked first before any other function in the 689 * API. This function can also be re-invoked when a device is in the 690 * stopped state. 691 * 692 * @param dev_id The identifier of the device to configure. 693 * @param config The crypto device configuration structure. 694 * 695 * @return 696 * - 0: Success, device configured. 697 * - <0: Error code returned by the driver configuration function. 698 */ 699 extern int 700 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config); 701 702 /** 703 * Start an device. 704 * 705 * The device start step is the last one and consists of setting the configured 706 * offload features and in starting the transmit and the receive units of the 707 * device. 708 * On success, all basic functions exported by the API (link status, 709 * receive/transmit, and so on) can be invoked. 710 * 711 * @param dev_id 712 * The identifier of the device. 713 * @return 714 * - 0: Success, device started. 715 * - <0: Error code of the driver device start function. 716 */ 717 extern int 718 rte_cryptodev_start(uint8_t dev_id); 719 720 /** 721 * Stop an device. The device can be restarted with a call to 722 * rte_cryptodev_start() 723 * 724 * @param dev_id The identifier of the device. 725 */ 726 extern void 727 rte_cryptodev_stop(uint8_t dev_id); 728 729 /** 730 * Close an device. The device cannot be restarted! 731 * 732 * @param dev_id The identifier of the device. 733 * 734 * @return 735 * - 0 on successfully closing device 736 * - <0 on failure to close device 737 */ 738 extern int 739 rte_cryptodev_close(uint8_t dev_id); 740 741 /** 742 * Allocate and set up a receive queue pair for a device. 743 * 744 * 745 * @param dev_id The identifier of the device. 746 * @param queue_pair_id The index of the queue pairs to set up. The 747 * value must be in the range [0, nb_queue_pair 748 * - 1] previously supplied to 749 * rte_cryptodev_configure(). 750 * @param qp_conf The pointer to the configuration data to be 751 * used for the queue pair. 752 * @param socket_id The *socket_id* argument is the socket 753 * identifier in case of NUMA. The value can be 754 * *SOCKET_ID_ANY* if there is no NUMA constraint 755 * for the DMA memory allocated for the receive 756 * queue pair. 757 * 758 * @return 759 * - 0: Success, queue pair correctly set up. 760 * - <0: Queue pair configuration failed 761 */ 762 extern int 763 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id, 764 const struct rte_cryptodev_qp_conf *qp_conf, int socket_id); 765 766 /** 767 * Get the status of queue pairs setup on a specific crypto device 768 * 769 * @param dev_id Crypto device identifier. 770 * @param queue_pair_id The index of the queue pairs to set up. The 771 * value must be in the range [0, nb_queue_pair 772 * - 1] previously supplied to 773 * rte_cryptodev_configure(). 774 * @return 775 * - 0: qp was not configured 776 * - 1: qp was configured 777 * - -EINVAL: device was not configured 778 */ 779 __rte_experimental 780 int 781 rte_cryptodev_get_qp_status(uint8_t dev_id, uint16_t queue_pair_id); 782 783 /** 784 * Get the number of queue pairs on a specific crypto device 785 * 786 * @param dev_id Crypto device identifier. 787 * @return 788 * - The number of configured queue pairs. 789 */ 790 extern uint16_t 791 rte_cryptodev_queue_pair_count(uint8_t dev_id); 792 793 794 /** 795 * Retrieve the general I/O statistics of a device. 796 * 797 * @param dev_id The identifier of the device. 798 * @param stats A pointer to a structure of type 799 * *rte_cryptodev_stats* to be filled with the 800 * values of device counters. 801 * @return 802 * - Zero if successful. 803 * - Non-zero otherwise. 804 */ 805 extern int 806 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats); 807 808 /** 809 * Reset the general I/O statistics of a device. 810 * 811 * @param dev_id The identifier of the device. 812 */ 813 extern void 814 rte_cryptodev_stats_reset(uint8_t dev_id); 815 816 /** 817 * Retrieve the contextual information of a device. 818 * 819 * @param dev_id The identifier of the device. 820 * @param dev_info A pointer to a structure of type 821 * *rte_cryptodev_info* to be filled with the 822 * contextual information of the device. 823 * 824 * @note The capabilities field of dev_info is set to point to the first 825 * element of an array of struct rte_cryptodev_capabilities. The element after 826 * the last valid element has it's op field set to 827 * RTE_CRYPTO_OP_TYPE_UNDEFINED. 828 */ 829 extern void 830 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info); 831 832 833 /** 834 * Register a callback function for specific device id. 835 * 836 * @param dev_id Device id. 837 * @param event Event interested. 838 * @param cb_fn User supplied callback function to be called. 839 * @param cb_arg Pointer to the parameters for the registered 840 * callback. 841 * 842 * @return 843 * - On success, zero. 844 * - On failure, a negative value. 845 */ 846 extern int 847 rte_cryptodev_callback_register(uint8_t dev_id, 848 enum rte_cryptodev_event_type event, 849 rte_cryptodev_cb_fn cb_fn, void *cb_arg); 850 851 /** 852 * Unregister a callback function for specific device id. 853 * 854 * @param dev_id The device identifier. 855 * @param event Event interested. 856 * @param cb_fn User supplied callback function to be called. 857 * @param cb_arg Pointer to the parameters for the registered 858 * callback. 859 * 860 * @return 861 * - On success, zero. 862 * - On failure, a negative value. 863 */ 864 extern int 865 rte_cryptodev_callback_unregister(uint8_t dev_id, 866 enum rte_cryptodev_event_type event, 867 rte_cryptodev_cb_fn cb_fn, void *cb_arg); 868 869 struct rte_cryptodev_callback; 870 871 /** Structure to keep track of registered callbacks */ 872 RTE_TAILQ_HEAD(rte_cryptodev_cb_list, rte_cryptodev_callback); 873 874 /** 875 * Structure used to hold information about the callbacks to be called for a 876 * queue pair on enqueue/dequeue. 877 */ 878 struct rte_cryptodev_cb { 879 struct rte_cryptodev_cb *next; 880 /**< Pointer to next callback */ 881 rte_cryptodev_callback_fn fn; 882 /**< Pointer to callback function */ 883 void *arg; 884 /**< Pointer to argument */ 885 }; 886 887 /** 888 * @internal 889 * Structure used to hold information about the RCU for a queue pair. 890 */ 891 struct rte_cryptodev_cb_rcu { 892 struct rte_cryptodev_cb *next; 893 /**< Pointer to next callback */ 894 struct rte_rcu_qsbr *qsbr; 895 /**< RCU QSBR variable per queue pair */ 896 }; 897 898 void * 899 rte_cryptodev_get_sec_ctx(uint8_t dev_id); 900 901 /** Cryptodev symmetric crypto session 902 * Each session is derived from a fixed xform chain. Therefore each session 903 * has a fixed algo, key, op-type, digest_len etc. 904 */ 905 struct rte_cryptodev_sym_session { 906 uint64_t opaque_data; 907 /**< Can be used for external metadata */ 908 uint16_t nb_drivers; 909 /**< number of elements in sess_data array */ 910 uint16_t user_data_sz; 911 /**< session user data will be placed after sess_data */ 912 __extension__ struct { 913 void *data; 914 uint16_t refcnt; 915 } sess_data[0]; 916 /**< Driver specific session material, variable size */ 917 }; 918 919 /** 920 * Create a symmetric session mempool. 921 * 922 * @param name 923 * The unique mempool name. 924 * @param nb_elts 925 * The number of elements in the mempool. 926 * @param elt_size 927 * The size of the element. This value will be ignored if it is smaller than 928 * the minimum session header size required for the system. For the user who 929 * want to use the same mempool for sym session and session private data it 930 * can be the maximum value of all existing devices' private data and session 931 * header sizes. 932 * @param cache_size 933 * The number of per-lcore cache elements 934 * @param priv_size 935 * The private data size of each session. 936 * @param socket_id 937 * The *socket_id* argument is the socket identifier in the case of 938 * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA 939 * constraint for the reserved zone. 940 * 941 * @return 942 * - On success return size of the session 943 * - On failure returns 0 944 */ 945 __rte_experimental 946 struct rte_mempool * 947 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts, 948 uint32_t elt_size, uint32_t cache_size, uint16_t priv_size, 949 int socket_id); 950 951 /** 952 * Create an asymmetric session mempool. 953 * 954 * @param name 955 * The unique mempool name. 956 * @param nb_elts 957 * The number of elements in the mempool. 958 * @param cache_size 959 * The number of per-lcore cache elements 960 * @param user_data_size 961 * The size of user data to be placed after session private data. 962 * @param socket_id 963 * The *socket_id* argument is the socket identifier in the case of 964 * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA 965 * constraint for the reserved zone. 966 * 967 * @return 968 * - On success return mempool 969 * - On failure returns NULL 970 */ 971 __rte_experimental 972 struct rte_mempool * 973 rte_cryptodev_asym_session_pool_create(const char *name, uint32_t nb_elts, 974 uint32_t cache_size, uint16_t user_data_size, int socket_id); 975 976 /** 977 * Create symmetric crypto session header (generic with no private data) 978 * 979 * @param mempool Symmetric session mempool to allocate session 980 * objects from 981 * @return 982 * - On success return pointer to sym-session 983 * - On failure returns NULL 984 */ 985 struct rte_cryptodev_sym_session * 986 rte_cryptodev_sym_session_create(struct rte_mempool *mempool); 987 988 /** 989 * Create and initialise an asymmetric crypto session structure. 990 * Calls the PMD to configure the private session data. 991 * 992 * @param dev_id ID of device that we want the session to be used on 993 * @param xforms Asymmetric crypto transform operations to apply on flow 994 * processed with this session 995 * @param mp mempool to allocate asymmetric session 996 * objects from 997 * @param session void ** for session to be used 998 * 999 * @return 1000 * - 0 on success. 1001 * - -EINVAL on invalid arguments. 1002 * - -ENOMEM on memory error for session allocation. 1003 * - -ENOTSUP if device doesn't support session configuration. 1004 */ 1005 __rte_experimental 1006 int 1007 rte_cryptodev_asym_session_create(uint8_t dev_id, 1008 struct rte_crypto_asym_xform *xforms, struct rte_mempool *mp, 1009 void **session); 1010 1011 /** 1012 * Frees symmetric crypto session header, after checking that all 1013 * the device private data has been freed, returning it 1014 * to its original mempool. 1015 * 1016 * @param sess Session header to be freed. 1017 * 1018 * @return 1019 * - 0 if successful. 1020 * - -EINVAL if session is NULL. 1021 * - -EBUSY if not all device private data has been freed. 1022 */ 1023 int 1024 rte_cryptodev_sym_session_free(struct rte_cryptodev_sym_session *sess); 1025 1026 /** 1027 * Clears and frees asymmetric crypto session header and private data, 1028 * returning it to its original mempool. 1029 * 1030 * @param dev_id ID of device that uses the asymmetric session. 1031 * @param sess Session header to be freed. 1032 * 1033 * @return 1034 * - 0 if successful. 1035 * - -EINVAL if device is invalid or session is NULL. 1036 */ 1037 __rte_experimental 1038 int 1039 rte_cryptodev_asym_session_free(uint8_t dev_id, void *sess); 1040 1041 /** 1042 * Fill out private data for the device id, based on its device type. 1043 * 1044 * @param dev_id ID of device that we want the session to be used on 1045 * @param sess Session where the private data will be attached to 1046 * @param xforms Symmetric crypto transform operations to apply on flow 1047 * processed with this session 1048 * @param mempool Mempool where the private data is allocated. 1049 * 1050 * @return 1051 * - On success, zero. 1052 * - -EINVAL if input parameters are invalid. 1053 * - -ENOTSUP if crypto device does not support the crypto transform or 1054 * does not support symmetric operations. 1055 * - -ENOMEM if the private session could not be allocated. 1056 */ 1057 int 1058 rte_cryptodev_sym_session_init(uint8_t dev_id, 1059 struct rte_cryptodev_sym_session *sess, 1060 struct rte_crypto_sym_xform *xforms, 1061 struct rte_mempool *mempool); 1062 1063 /** 1064 * Frees private data for the device id, based on its device type, 1065 * returning it to its mempool. It is the application's responsibility 1066 * to ensure that private session data is not cleared while there are 1067 * still in-flight operations using it. 1068 * 1069 * @param dev_id ID of device that uses the session. 1070 * @param sess Session containing the reference to the private data 1071 * 1072 * @return 1073 * - 0 if successful. 1074 * - -EINVAL if device is invalid or session is NULL. 1075 * - -ENOTSUP if crypto device does not support symmetric operations. 1076 */ 1077 int 1078 rte_cryptodev_sym_session_clear(uint8_t dev_id, 1079 struct rte_cryptodev_sym_session *sess); 1080 1081 /** 1082 * Get the size of the header session, for all registered drivers excluding 1083 * the user data size. 1084 * 1085 * @return 1086 * Size of the symmetric header session. 1087 */ 1088 unsigned int 1089 rte_cryptodev_sym_get_header_session_size(void); 1090 1091 /** 1092 * Get the size of the header session from created session. 1093 * 1094 * @param sess 1095 * The sym cryptodev session pointer 1096 * 1097 * @return 1098 * - If sess is not NULL, return the size of the header session including 1099 * the private data size defined within sess. 1100 * - If sess is NULL, return 0. 1101 */ 1102 __rte_experimental 1103 unsigned int 1104 rte_cryptodev_sym_get_existing_header_session_size( 1105 struct rte_cryptodev_sym_session *sess); 1106 1107 /** 1108 * Get the size of the asymmetric session header. 1109 * 1110 * @return 1111 * Size of the asymmetric header session. 1112 */ 1113 __rte_experimental 1114 unsigned int 1115 rte_cryptodev_asym_get_header_session_size(void); 1116 1117 /** 1118 * Get the size of the private symmetric session data 1119 * for a device. 1120 * 1121 * @param dev_id The device identifier. 1122 * 1123 * @return 1124 * - Size of the private data, if successful 1125 * - 0 if device is invalid or does not have private 1126 * symmetric session 1127 */ 1128 unsigned int 1129 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id); 1130 1131 /** 1132 * Get the size of the private data for asymmetric session 1133 * on device 1134 * 1135 * @param dev_id The device identifier. 1136 * 1137 * @return 1138 * - Size of the asymmetric private data, if successful 1139 * - 0 if device is invalid or does not have private session 1140 */ 1141 __rte_experimental 1142 unsigned int 1143 rte_cryptodev_asym_get_private_session_size(uint8_t dev_id); 1144 1145 /** 1146 * Validate if the crypto device index is valid attached crypto device. 1147 * 1148 * @param dev_id Crypto device index. 1149 * 1150 * @return 1151 * - If the device index is valid (1) or not (0). 1152 */ 1153 unsigned int 1154 rte_cryptodev_is_valid_dev(uint8_t dev_id); 1155 1156 /** 1157 * Provide driver identifier. 1158 * 1159 * @param name 1160 * The pointer to a driver name. 1161 * @return 1162 * The driver type identifier or -1 if no driver found 1163 */ 1164 int rte_cryptodev_driver_id_get(const char *name); 1165 1166 /** 1167 * Provide driver name. 1168 * 1169 * @param driver_id 1170 * The driver identifier. 1171 * @return 1172 * The driver name or null if no driver found 1173 */ 1174 const char *rte_cryptodev_driver_name_get(uint8_t driver_id); 1175 1176 /** 1177 * Store user data in a session. 1178 * 1179 * @param sess Session pointer allocated by 1180 * *rte_cryptodev_sym_session_create*. 1181 * @param data Pointer to the user data. 1182 * @param size Size of the user data. 1183 * 1184 * @return 1185 * - On success, zero. 1186 * - On failure, a negative value. 1187 */ 1188 __rte_experimental 1189 int 1190 rte_cryptodev_sym_session_set_user_data( 1191 struct rte_cryptodev_sym_session *sess, 1192 void *data, 1193 uint16_t size); 1194 1195 /** 1196 * Get user data stored in a session. 1197 * 1198 * @param sess Session pointer allocated by 1199 * *rte_cryptodev_sym_session_create*. 1200 * 1201 * @return 1202 * - On success return pointer to user data. 1203 * - On failure returns NULL. 1204 */ 1205 __rte_experimental 1206 void * 1207 rte_cryptodev_sym_session_get_user_data( 1208 struct rte_cryptodev_sym_session *sess); 1209 1210 /** 1211 * Store user data in an asymmetric session. 1212 * 1213 * @param sess Session pointer allocated by 1214 * *rte_cryptodev_asym_session_create*. 1215 * @param data Pointer to the user data. 1216 * @param size Size of the user data. 1217 * 1218 * @return 1219 * - On success, zero. 1220 * - -EINVAL if the session pointer is invalid. 1221 * - -ENOMEM if the available user data size is smaller than the size parameter. 1222 */ 1223 __rte_experimental 1224 int 1225 rte_cryptodev_asym_session_set_user_data(void *sess, void *data, uint16_t size); 1226 1227 /** 1228 * Get user data stored in an asymmetric session. 1229 * 1230 * @param sess Session pointer allocated by 1231 * *rte_cryptodev_asym_session_create*. 1232 * 1233 * @return 1234 * - On success return pointer to user data. 1235 * - On failure returns NULL. 1236 */ 1237 __rte_experimental 1238 void * 1239 rte_cryptodev_asym_session_get_user_data(void *sess); 1240 1241 /** 1242 * Perform actual crypto processing (encrypt/digest or auth/decrypt) 1243 * on user provided data. 1244 * 1245 * @param dev_id The device identifier. 1246 * @param sess Cryptodev session structure 1247 * @param ofs Start and stop offsets for auth and cipher operations 1248 * @param vec Vectorized operation descriptor 1249 * 1250 * @return 1251 * - Returns number of successfully processed packets. 1252 */ 1253 __rte_experimental 1254 uint32_t 1255 rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id, 1256 struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs ofs, 1257 struct rte_crypto_sym_vec *vec); 1258 1259 /** 1260 * Get the size of the raw data-path context buffer. 1261 * 1262 * @param dev_id The device identifier. 1263 * 1264 * @return 1265 * - If the device supports raw data-path APIs, return the context size. 1266 * - If the device does not support the APIs, return -1. 1267 */ 1268 __rte_experimental 1269 int 1270 rte_cryptodev_get_raw_dp_ctx_size(uint8_t dev_id); 1271 1272 /** 1273 * Set session event meta data 1274 * 1275 * @param dev_id The device identifier. 1276 * @param sess Crypto or security session. 1277 * @param op_type Operation type. 1278 * @param sess_type Session type. 1279 * @param ev_mdata Pointer to the event crypto meta data 1280 * (aka *union rte_event_crypto_metadata*) 1281 * @param size Size of ev_mdata. 1282 * 1283 * @return 1284 * - On success, zero. 1285 * - On failure, a negative value. 1286 */ 1287 __rte_experimental 1288 int 1289 rte_cryptodev_session_event_mdata_set(uint8_t dev_id, void *sess, 1290 enum rte_crypto_op_type op_type, 1291 enum rte_crypto_op_sess_type sess_type, 1292 void *ev_mdata, uint16_t size); 1293 1294 /** 1295 * Union of different crypto session types, including session-less xform 1296 * pointer. 1297 */ 1298 union rte_cryptodev_session_ctx { 1299 struct rte_cryptodev_sym_session *crypto_sess; 1300 struct rte_crypto_sym_xform *xform; 1301 struct rte_security_session *sec_sess; 1302 }; 1303 1304 /** 1305 * Enqueue a vectorized operation descriptor into the device queue but the 1306 * driver may or may not start processing until rte_cryptodev_raw_enqueue_done() 1307 * is called. 1308 * 1309 * @param qp Driver specific queue pair data. 1310 * @param drv_ctx Driver specific context data. 1311 * @param vec Vectorized operation descriptor. 1312 * @param ofs Start and stop offsets for auth and cipher 1313 * operations. 1314 * @param user_data The array of user data for dequeue later. 1315 * @param enqueue_status Driver written value to specify the 1316 * enqueue status. Possible values: 1317 * - 1: The number of operations returned are 1318 * enqueued successfully. 1319 * - 0: The number of operations returned are 1320 * cached into the queue but are not processed 1321 * until rte_cryptodev_raw_enqueue_done() is 1322 * called. 1323 * - negative integer: Error occurred. 1324 * @return 1325 * - The number of operations in the descriptor successfully enqueued or 1326 * cached into the queue but not enqueued yet, depends on the 1327 * "enqueue_status" value. 1328 */ 1329 typedef uint32_t (*cryptodev_sym_raw_enqueue_burst_t)( 1330 void *qp, uint8_t *drv_ctx, struct rte_crypto_sym_vec *vec, 1331 union rte_crypto_sym_ofs ofs, void *user_data[], int *enqueue_status); 1332 1333 /** 1334 * Enqueue single raw data vector into the device queue but the driver may or 1335 * may not start processing until rte_cryptodev_raw_enqueue_done() is called. 1336 * 1337 * @param qp Driver specific queue pair data. 1338 * @param drv_ctx Driver specific context data. 1339 * @param data_vec The buffer data vector. 1340 * @param n_data_vecs Number of buffer data vectors. 1341 * @param ofs Start and stop offsets for auth and cipher 1342 * operations. 1343 * @param iv IV virtual and IOVA addresses 1344 * @param digest digest virtual and IOVA addresses 1345 * @param aad_or_auth_iv AAD or auth IV virtual and IOVA addresses, 1346 * depends on the algorithm used. 1347 * @param user_data The user data. 1348 * @return 1349 * - 1: The data vector is enqueued successfully. 1350 * - 0: The data vector is cached into the queue but is not processed 1351 * until rte_cryptodev_raw_enqueue_done() is called. 1352 * - negative integer: failure. 1353 */ 1354 typedef int (*cryptodev_sym_raw_enqueue_t)( 1355 void *qp, uint8_t *drv_ctx, struct rte_crypto_vec *data_vec, 1356 uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs, 1357 struct rte_crypto_va_iova_ptr *iv, 1358 struct rte_crypto_va_iova_ptr *digest, 1359 struct rte_crypto_va_iova_ptr *aad_or_auth_iv, 1360 void *user_data); 1361 1362 /** 1363 * Inform the cryptodev queue pair to start processing or finish dequeuing all 1364 * enqueued/dequeued operations. 1365 * 1366 * @param qp Driver specific queue pair data. 1367 * @param drv_ctx Driver specific context data. 1368 * @param n The total number of processed operations. 1369 * @return 1370 * - On success return 0. 1371 * - On failure return negative integer. 1372 */ 1373 typedef int (*cryptodev_sym_raw_operation_done_t)(void *qp, uint8_t *drv_ctx, 1374 uint32_t n); 1375 1376 /** 1377 * Typedef that the user provided for the driver to get the dequeue count. 1378 * The function may return a fixed number or the number parsed from the user 1379 * data stored in the first processed operation. 1380 * 1381 * @param user_data Dequeued user data. 1382 * @return 1383 * - The number of operations to be dequeued. 1384 **/ 1385 typedef uint32_t (*rte_cryptodev_raw_get_dequeue_count_t)(void *user_data); 1386 1387 /** 1388 * Typedef that the user provided to deal with post dequeue operation, such 1389 * as filling status. 1390 * 1391 * @param user_data Dequeued user data. 1392 * @param index Index number of the processed descriptor. 1393 * @param is_op_success Operation status provided by the driver. 1394 **/ 1395 typedef void (*rte_cryptodev_raw_post_dequeue_t)(void *user_data, 1396 uint32_t index, uint8_t is_op_success); 1397 1398 /** 1399 * Dequeue a burst of symmetric crypto processing. 1400 * 1401 * @param qp Driver specific queue pair data. 1402 * @param drv_ctx Driver specific context data. 1403 * @param get_dequeue_count User provided callback function to 1404 * obtain dequeue operation count. 1405 * @param max_nb_to_dequeue When get_dequeue_count is NULL this 1406 * value is used to pass the maximum 1407 * number of operations to be dequeued. 1408 * @param post_dequeue User provided callback function to 1409 * post-process a dequeued operation. 1410 * @param out_user_data User data pointer array to be retrieve 1411 * from device queue. In case of 1412 * *is_user_data_array* is set there 1413 * should be enough room to store all 1414 * user data. 1415 * @param is_user_data_array Set 1 if every dequeued user data will 1416 * be written into out_user_data array. 1417 * Set 0 if only the first user data will 1418 * be written into out_user_data array. 1419 * @param n_success Driver written value to specific the 1420 * total successful operations count. 1421 * @param dequeue_status Driver written value to specify the 1422 * dequeue status. Possible values: 1423 * - 1: Successfully dequeued the number 1424 * of operations returned. The user 1425 * data previously set during enqueue 1426 * is stored in the "out_user_data". 1427 * - 0: The number of operations returned 1428 * are completed and the user data is 1429 * stored in the "out_user_data", but 1430 * they are not freed from the queue 1431 * until 1432 * rte_cryptodev_raw_dequeue_done() 1433 * is called. 1434 * - negative integer: Error occurred. 1435 * @return 1436 * - The number of operations dequeued or completed but not freed from the 1437 * queue, depends on "dequeue_status" value. 1438 */ 1439 typedef uint32_t (*cryptodev_sym_raw_dequeue_burst_t)(void *qp, 1440 uint8_t *drv_ctx, 1441 rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count, 1442 uint32_t max_nb_to_dequeue, 1443 rte_cryptodev_raw_post_dequeue_t post_dequeue, 1444 void **out_user_data, uint8_t is_user_data_array, 1445 uint32_t *n_success, int *dequeue_status); 1446 1447 /** 1448 * Dequeue a symmetric crypto processing. 1449 * 1450 * @param qp Driver specific queue pair data. 1451 * @param drv_ctx Driver specific context data. 1452 * @param dequeue_status Driver written value to specify the 1453 * dequeue status. Possible values: 1454 * - 1: Successfully dequeued a operation. 1455 * The user data is returned. 1456 * - 0: The first operation in the queue 1457 * is completed and the user data 1458 * previously set during enqueue is 1459 * returned, but it is not freed from 1460 * the queue until 1461 * rte_cryptodev_raw_dequeue_done() is 1462 * called. 1463 * - negative integer: Error occurred. 1464 * @param op_status Driver written value to specify 1465 * operation status. 1466 * @return 1467 * - The user data pointer retrieved from device queue or NULL if no 1468 * operation is ready for dequeue. 1469 */ 1470 typedef void * (*cryptodev_sym_raw_dequeue_t)( 1471 void *qp, uint8_t *drv_ctx, int *dequeue_status, 1472 enum rte_crypto_op_status *op_status); 1473 1474 /** 1475 * Context data for raw data-path API crypto process. The buffer of this 1476 * structure is to be allocated by the user application with the size equal 1477 * or bigger than rte_cryptodev_get_raw_dp_ctx_size() returned value. 1478 */ 1479 struct rte_crypto_raw_dp_ctx { 1480 void *qp_data; 1481 1482 cryptodev_sym_raw_enqueue_t enqueue; 1483 cryptodev_sym_raw_enqueue_burst_t enqueue_burst; 1484 cryptodev_sym_raw_operation_done_t enqueue_done; 1485 cryptodev_sym_raw_dequeue_t dequeue; 1486 cryptodev_sym_raw_dequeue_burst_t dequeue_burst; 1487 cryptodev_sym_raw_operation_done_t dequeue_done; 1488 1489 /* Driver specific context data */ 1490 __extension__ uint8_t drv_ctx_data[]; 1491 }; 1492 1493 /** 1494 * Configure raw data-path context data. 1495 * 1496 * NOTE: 1497 * After the context data is configured, the user should call 1498 * rte_cryptodev_raw_attach_session() before using it in 1499 * rte_cryptodev_raw_enqueue/dequeue function call. 1500 * 1501 * @param dev_id The device identifier. 1502 * @param qp_id The index of the queue pair from which to 1503 * retrieve processed packets. The value must be 1504 * in the range [0, nb_queue_pair - 1] previously 1505 * supplied to rte_cryptodev_configure(). 1506 * @param ctx The raw data-path context data. 1507 * @param sess_type session type. 1508 * @param session_ctx Session context data. 1509 * @param is_update Set 0 if it is to initialize the ctx. 1510 * Set 1 if ctx is initialized and only to update 1511 * session context data. 1512 * @return 1513 * - On success return 0. 1514 * - On failure return negative integer. 1515 */ 1516 __rte_experimental 1517 int 1518 rte_cryptodev_configure_raw_dp_ctx(uint8_t dev_id, uint16_t qp_id, 1519 struct rte_crypto_raw_dp_ctx *ctx, 1520 enum rte_crypto_op_sess_type sess_type, 1521 union rte_cryptodev_session_ctx session_ctx, 1522 uint8_t is_update); 1523 1524 /** 1525 * Enqueue a vectorized operation descriptor into the device queue but the 1526 * driver may or may not start processing until rte_cryptodev_raw_enqueue_done() 1527 * is called. 1528 * 1529 * @param ctx The initialized raw data-path context data. 1530 * @param vec Vectorized operation descriptor. 1531 * @param ofs Start and stop offsets for auth and cipher 1532 * operations. 1533 * @param user_data The array of user data for dequeue later. 1534 * @param enqueue_status Driver written value to specify the 1535 * enqueue status. Possible values: 1536 * - 1: The number of operations returned are 1537 * enqueued successfully. 1538 * - 0: The number of operations returned are 1539 * cached into the queue but are not processed 1540 * until rte_cryptodev_raw_enqueue_done() is 1541 * called. 1542 * - negative integer: Error occurred. 1543 * @return 1544 * - The number of operations in the descriptor successfully enqueued or 1545 * cached into the queue but not enqueued yet, depends on the 1546 * "enqueue_status" value. 1547 */ 1548 __rte_experimental 1549 uint32_t 1550 rte_cryptodev_raw_enqueue_burst(struct rte_crypto_raw_dp_ctx *ctx, 1551 struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs, 1552 void **user_data, int *enqueue_status); 1553 1554 /** 1555 * Enqueue single raw data vector into the device queue but the driver may or 1556 * may not start processing until rte_cryptodev_raw_enqueue_done() is called. 1557 * 1558 * @param ctx The initialized raw data-path context data. 1559 * @param data_vec The buffer data vector. 1560 * @param n_data_vecs Number of buffer data vectors. 1561 * @param ofs Start and stop offsets for auth and cipher 1562 * operations. 1563 * @param iv IV virtual and IOVA addresses 1564 * @param digest digest virtual and IOVA addresses 1565 * @param aad_or_auth_iv AAD or auth IV virtual and IOVA addresses, 1566 * depends on the algorithm used. 1567 * @param user_data The user data. 1568 * @return 1569 * - 1: The data vector is enqueued successfully. 1570 * - 0: The data vector is cached into the queue but is not processed 1571 * until rte_cryptodev_raw_enqueue_done() is called. 1572 * - negative integer: failure. 1573 */ 1574 __rte_experimental 1575 static __rte_always_inline int 1576 rte_cryptodev_raw_enqueue(struct rte_crypto_raw_dp_ctx *ctx, 1577 struct rte_crypto_vec *data_vec, uint16_t n_data_vecs, 1578 union rte_crypto_sym_ofs ofs, 1579 struct rte_crypto_va_iova_ptr *iv, 1580 struct rte_crypto_va_iova_ptr *digest, 1581 struct rte_crypto_va_iova_ptr *aad_or_auth_iv, 1582 void *user_data) 1583 { 1584 return (*ctx->enqueue)(ctx->qp_data, ctx->drv_ctx_data, data_vec, 1585 n_data_vecs, ofs, iv, digest, aad_or_auth_iv, user_data); 1586 } 1587 1588 /** 1589 * Start processing all enqueued operations from last 1590 * rte_cryptodev_configure_raw_dp_ctx() call. 1591 * 1592 * @param ctx The initialized raw data-path context data. 1593 * @param n The number of operations cached. 1594 * @return 1595 * - On success return 0. 1596 * - On failure return negative integer. 1597 */ 1598 __rte_experimental 1599 int 1600 rte_cryptodev_raw_enqueue_done(struct rte_crypto_raw_dp_ctx *ctx, 1601 uint32_t n); 1602 1603 /** 1604 * Dequeue a burst of symmetric crypto processing. 1605 * 1606 * @param ctx The initialized raw data-path context 1607 * data. 1608 * @param get_dequeue_count User provided callback function to 1609 * obtain dequeue operation count. 1610 * @param max_nb_to_dequeue When get_dequeue_count is NULL this 1611 * value is used to pass the maximum 1612 * number of operations to be dequeued. 1613 * @param post_dequeue User provided callback function to 1614 * post-process a dequeued operation. 1615 * @param out_user_data User data pointer array to be retrieve 1616 * from device queue. In case of 1617 * *is_user_data_array* is set there 1618 * should be enough room to store all 1619 * user data. 1620 * @param is_user_data_array Set 1 if every dequeued user data will 1621 * be written into out_user_data array. 1622 * Set 0 if only the first user data will 1623 * be written into out_user_data array. 1624 * @param n_success Driver written value to specific the 1625 * total successful operations count. 1626 * @param dequeue_status Driver written value to specify the 1627 * dequeue status. Possible values: 1628 * - 1: Successfully dequeued the number 1629 * of operations returned. The user 1630 * data previously set during enqueue 1631 * is stored in the "out_user_data". 1632 * - 0: The number of operations returned 1633 * are completed and the user data is 1634 * stored in the "out_user_data", but 1635 * they are not freed from the queue 1636 * until 1637 * rte_cryptodev_raw_dequeue_done() 1638 * is called. 1639 * - negative integer: Error occurred. 1640 * @return 1641 * - The number of operations dequeued or completed but not freed from the 1642 * queue, depends on "dequeue_status" value. 1643 */ 1644 __rte_experimental 1645 uint32_t 1646 rte_cryptodev_raw_dequeue_burst(struct rte_crypto_raw_dp_ctx *ctx, 1647 rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count, 1648 uint32_t max_nb_to_dequeue, 1649 rte_cryptodev_raw_post_dequeue_t post_dequeue, 1650 void **out_user_data, uint8_t is_user_data_array, 1651 uint32_t *n_success, int *dequeue_status); 1652 1653 /** 1654 * Dequeue a symmetric crypto processing. 1655 * 1656 * @param ctx The initialized raw data-path context 1657 * data. 1658 * @param dequeue_status Driver written value to specify the 1659 * dequeue status. Possible values: 1660 * - 1: Successfully dequeued a operation. 1661 * The user data is returned. 1662 * - 0: The first operation in the queue 1663 * is completed and the user data 1664 * previously set during enqueue is 1665 * returned, but it is not freed from 1666 * the queue until 1667 * rte_cryptodev_raw_dequeue_done() is 1668 * called. 1669 * - negative integer: Error occurred. 1670 * @param op_status Driver written value to specify 1671 * operation status. 1672 * @return 1673 * - The user data pointer retrieved from device queue or NULL if no 1674 * operation is ready for dequeue. 1675 */ 1676 __rte_experimental 1677 static __rte_always_inline void * 1678 rte_cryptodev_raw_dequeue(struct rte_crypto_raw_dp_ctx *ctx, 1679 int *dequeue_status, enum rte_crypto_op_status *op_status) 1680 { 1681 return (*ctx->dequeue)(ctx->qp_data, ctx->drv_ctx_data, dequeue_status, 1682 op_status); 1683 } 1684 1685 /** 1686 * Inform the queue pair dequeue operations is finished. 1687 * 1688 * @param ctx The initialized raw data-path context data. 1689 * @param n The number of operations. 1690 * @return 1691 * - On success return 0. 1692 * - On failure return negative integer. 1693 */ 1694 __rte_experimental 1695 int 1696 rte_cryptodev_raw_dequeue_done(struct rte_crypto_raw_dp_ctx *ctx, 1697 uint32_t n); 1698 1699 /** 1700 * Add a user callback for a given crypto device and queue pair which will be 1701 * called on crypto ops enqueue. 1702 * 1703 * This API configures a function to be called for each burst of crypto ops 1704 * received on a given crypto device queue pair. The return value is a pointer 1705 * that can be used later to remove the callback using 1706 * rte_cryptodev_remove_enq_callback(). 1707 * 1708 * Callbacks registered by application would not survive 1709 * rte_cryptodev_configure() as it reinitializes the callback list. 1710 * It is user responsibility to remove all installed callbacks before 1711 * calling rte_cryptodev_configure() to avoid possible memory leakage. 1712 * Application is expected to call add API after rte_cryptodev_configure(). 1713 * 1714 * Multiple functions can be registered per queue pair & they are called 1715 * in the order they were added. The API does not restrict on maximum number 1716 * of callbacks. 1717 * 1718 * @param dev_id The identifier of the device. 1719 * @param qp_id The index of the queue pair on which ops are 1720 * to be enqueued for processing. The value 1721 * must be in the range [0, nb_queue_pairs - 1] 1722 * previously supplied to 1723 * *rte_cryptodev_configure*. 1724 * @param cb_fn The callback function 1725 * @param cb_arg A generic pointer parameter which will be passed 1726 * to each invocation of the callback function on 1727 * this crypto device and queue pair. 1728 * 1729 * @return 1730 * - NULL on error & rte_errno will contain the error code. 1731 * - On success, a pointer value which can later be used to remove the 1732 * callback. 1733 */ 1734 1735 __rte_experimental 1736 struct rte_cryptodev_cb * 1737 rte_cryptodev_add_enq_callback(uint8_t dev_id, 1738 uint16_t qp_id, 1739 rte_cryptodev_callback_fn cb_fn, 1740 void *cb_arg); 1741 1742 /** 1743 * Remove a user callback function for given crypto device and queue pair. 1744 * 1745 * This function is used to remove enqueue callbacks that were added to a 1746 * crypto device queue pair using rte_cryptodev_add_enq_callback(). 1747 * 1748 * 1749 * 1750 * @param dev_id The identifier of the device. 1751 * @param qp_id The index of the queue pair on which ops are 1752 * to be enqueued. The value must be in the 1753 * range [0, nb_queue_pairs - 1] previously 1754 * supplied to *rte_cryptodev_configure*. 1755 * @param cb Pointer to user supplied callback created via 1756 * rte_cryptodev_add_enq_callback(). 1757 * 1758 * @return 1759 * - 0: Success. Callback was removed. 1760 * - <0: The dev_id or the qp_id is out of range, or the callback 1761 * is NULL or not found for the crypto device queue pair. 1762 */ 1763 1764 __rte_experimental 1765 int rte_cryptodev_remove_enq_callback(uint8_t dev_id, 1766 uint16_t qp_id, 1767 struct rte_cryptodev_cb *cb); 1768 1769 /** 1770 * Add a user callback for a given crypto device and queue pair which will be 1771 * called on crypto ops dequeue. 1772 * 1773 * This API configures a function to be called for each burst of crypto ops 1774 * received on a given crypto device queue pair. The return value is a pointer 1775 * that can be used later to remove the callback using 1776 * rte_cryptodev_remove_deq_callback(). 1777 * 1778 * Callbacks registered by application would not survive 1779 * rte_cryptodev_configure() as it reinitializes the callback list. 1780 * It is user responsibility to remove all installed callbacks before 1781 * calling rte_cryptodev_configure() to avoid possible memory leakage. 1782 * Application is expected to call add API after rte_cryptodev_configure(). 1783 * 1784 * Multiple functions can be registered per queue pair & they are called 1785 * in the order they were added. The API does not restrict on maximum number 1786 * of callbacks. 1787 * 1788 * @param dev_id The identifier of the device. 1789 * @param qp_id The index of the queue pair on which ops are 1790 * to be dequeued. The value must be in the 1791 * range [0, nb_queue_pairs - 1] previously 1792 * supplied to *rte_cryptodev_configure*. 1793 * @param cb_fn The callback function 1794 * @param cb_arg A generic pointer parameter which will be passed 1795 * to each invocation of the callback function on 1796 * this crypto device and queue pair. 1797 * 1798 * @return 1799 * - NULL on error & rte_errno will contain the error code. 1800 * - On success, a pointer value which can later be used to remove the 1801 * callback. 1802 */ 1803 1804 __rte_experimental 1805 struct rte_cryptodev_cb * 1806 rte_cryptodev_add_deq_callback(uint8_t dev_id, 1807 uint16_t qp_id, 1808 rte_cryptodev_callback_fn cb_fn, 1809 void *cb_arg); 1810 1811 /** 1812 * Remove a user callback function for given crypto device and queue pair. 1813 * 1814 * This function is used to remove dequeue callbacks that were added to a 1815 * crypto device queue pair using rte_cryptodev_add_deq_callback(). 1816 * 1817 * 1818 * 1819 * @param dev_id The identifier of the device. 1820 * @param qp_id The index of the queue pair on which ops are 1821 * to be dequeued. The value must be in the 1822 * range [0, nb_queue_pairs - 1] previously 1823 * supplied to *rte_cryptodev_configure*. 1824 * @param cb Pointer to user supplied callback created via 1825 * rte_cryptodev_add_deq_callback(). 1826 * 1827 * @return 1828 * - 0: Success. Callback was removed. 1829 * - <0: The dev_id or the qp_id is out of range, or the callback 1830 * is NULL or not found for the crypto device queue pair. 1831 */ 1832 __rte_experimental 1833 int rte_cryptodev_remove_deq_callback(uint8_t dev_id, 1834 uint16_t qp_id, 1835 struct rte_cryptodev_cb *cb); 1836 1837 #include <rte_cryptodev_core.h> 1838 /** 1839 * 1840 * Dequeue a burst of processed crypto operations from a queue on the crypto 1841 * device. The dequeued operation are stored in *rte_crypto_op* structures 1842 * whose pointers are supplied in the *ops* array. 1843 * 1844 * The rte_cryptodev_dequeue_burst() function returns the number of ops 1845 * actually dequeued, which is the number of *rte_crypto_op* data structures 1846 * effectively supplied into the *ops* array. 1847 * 1848 * A return value equal to *nb_ops* indicates that the queue contained 1849 * at least *nb_ops* operations, and this is likely to signify that other 1850 * processed operations remain in the devices output queue. Applications 1851 * implementing a "retrieve as many processed operations as possible" policy 1852 * can check this specific case and keep invoking the 1853 * rte_cryptodev_dequeue_burst() function until a value less than 1854 * *nb_ops* is returned. 1855 * 1856 * The rte_cryptodev_dequeue_burst() function does not provide any error 1857 * notification to avoid the corresponding overhead. 1858 * 1859 * @param dev_id The symmetric crypto device identifier 1860 * @param qp_id The index of the queue pair from which to 1861 * retrieve processed packets. The value must be 1862 * in the range [0, nb_queue_pair - 1] previously 1863 * supplied to rte_cryptodev_configure(). 1864 * @param ops The address of an array of pointers to 1865 * *rte_crypto_op* structures that must be 1866 * large enough to store *nb_ops* pointers in it. 1867 * @param nb_ops The maximum number of operations to dequeue. 1868 * 1869 * @return 1870 * - The number of operations actually dequeued, which is the number 1871 * of pointers to *rte_crypto_op* structures effectively supplied to the 1872 * *ops* array. 1873 */ 1874 static inline uint16_t 1875 rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id, 1876 struct rte_crypto_op **ops, uint16_t nb_ops) 1877 { 1878 const struct rte_crypto_fp_ops *fp_ops; 1879 void *qp; 1880 1881 rte_cryptodev_trace_dequeue_burst(dev_id, qp_id, (void **)ops, nb_ops); 1882 1883 fp_ops = &rte_crypto_fp_ops[dev_id]; 1884 qp = fp_ops->qp.data[qp_id]; 1885 1886 nb_ops = fp_ops->dequeue_burst(qp, ops, nb_ops); 1887 1888 #ifdef RTE_CRYPTO_CALLBACKS 1889 if (unlikely(fp_ops->qp.deq_cb != NULL)) { 1890 struct rte_cryptodev_cb_rcu *list; 1891 struct rte_cryptodev_cb *cb; 1892 1893 /* __ATOMIC_RELEASE memory order was used when the 1894 * call back was inserted into the list. 1895 * Since there is a clear dependency between loading 1896 * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is 1897 * not required. 1898 */ 1899 list = &fp_ops->qp.deq_cb[qp_id]; 1900 rte_rcu_qsbr_thread_online(list->qsbr, 0); 1901 cb = __atomic_load_n(&list->next, __ATOMIC_RELAXED); 1902 1903 while (cb != NULL) { 1904 nb_ops = cb->fn(dev_id, qp_id, ops, nb_ops, 1905 cb->arg); 1906 cb = cb->next; 1907 }; 1908 1909 rte_rcu_qsbr_thread_offline(list->qsbr, 0); 1910 } 1911 #endif 1912 return nb_ops; 1913 } 1914 1915 /** 1916 * Enqueue a burst of operations for processing on a crypto device. 1917 * 1918 * The rte_cryptodev_enqueue_burst() function is invoked to place 1919 * crypto operations on the queue *qp_id* of the device designated by 1920 * its *dev_id*. 1921 * 1922 * The *nb_ops* parameter is the number of operations to process which are 1923 * supplied in the *ops* array of *rte_crypto_op* structures. 1924 * 1925 * The rte_cryptodev_enqueue_burst() function returns the number of 1926 * operations it actually enqueued for processing. A return value equal to 1927 * *nb_ops* means that all packets have been enqueued. 1928 * 1929 * @param dev_id The identifier of the device. 1930 * @param qp_id The index of the queue pair which packets are 1931 * to be enqueued for processing. The value 1932 * must be in the range [0, nb_queue_pairs - 1] 1933 * previously supplied to 1934 * *rte_cryptodev_configure*. 1935 * @param ops The address of an array of *nb_ops* pointers 1936 * to *rte_crypto_op* structures which contain 1937 * the crypto operations to be processed. 1938 * @param nb_ops The number of operations to process. 1939 * 1940 * @return 1941 * The number of operations actually enqueued on the crypto device. The return 1942 * value can be less than the value of the *nb_ops* parameter when the 1943 * crypto devices queue is full or if invalid parameters are specified in 1944 * a *rte_crypto_op*. 1945 */ 1946 static inline uint16_t 1947 rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id, 1948 struct rte_crypto_op **ops, uint16_t nb_ops) 1949 { 1950 const struct rte_crypto_fp_ops *fp_ops; 1951 void *qp; 1952 1953 fp_ops = &rte_crypto_fp_ops[dev_id]; 1954 qp = fp_ops->qp.data[qp_id]; 1955 #ifdef RTE_CRYPTO_CALLBACKS 1956 if (unlikely(fp_ops->qp.enq_cb != NULL)) { 1957 struct rte_cryptodev_cb_rcu *list; 1958 struct rte_cryptodev_cb *cb; 1959 1960 /* __ATOMIC_RELEASE memory order was used when the 1961 * call back was inserted into the list. 1962 * Since there is a clear dependency between loading 1963 * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is 1964 * not required. 1965 */ 1966 list = &fp_ops->qp.enq_cb[qp_id]; 1967 rte_rcu_qsbr_thread_online(list->qsbr, 0); 1968 cb = __atomic_load_n(&list->next, __ATOMIC_RELAXED); 1969 1970 while (cb != NULL) { 1971 nb_ops = cb->fn(dev_id, qp_id, ops, nb_ops, 1972 cb->arg); 1973 cb = cb->next; 1974 }; 1975 1976 rte_rcu_qsbr_thread_offline(list->qsbr, 0); 1977 } 1978 #endif 1979 1980 rte_cryptodev_trace_enqueue_burst(dev_id, qp_id, (void **)ops, nb_ops); 1981 return fp_ops->enqueue_burst(qp, ops, nb_ops); 1982 } 1983 1984 1985 1986 #ifdef __cplusplus 1987 } 1988 #endif 1989 1990 #endif /* _RTE_CRYPTODEV_H_ */ 1991