1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2015-2020 Intel Corporation. 3 */ 4 5 #ifndef _RTE_CRYPTODEV_H_ 6 #define _RTE_CRYPTODEV_H_ 7 8 /** 9 * @file rte_cryptodev.h 10 * 11 * RTE Cryptographic Device APIs 12 * 13 * Defines RTE Crypto Device APIs for the provisioning of cipher and 14 * authentication operations. 15 */ 16 17 #ifdef __cplusplus 18 extern "C" { 19 #endif 20 21 #include "rte_kvargs.h" 22 #include "rte_crypto.h" 23 #include "rte_dev.h" 24 #include <rte_common.h> 25 #include <rte_config.h> 26 #include <rte_rcu_qsbr.h> 27 28 #include "rte_cryptodev_trace_fp.h" 29 30 extern const char **rte_cyptodev_names; 31 32 /* Logging Macros */ 33 34 #define CDEV_LOG_ERR(...) \ 35 RTE_LOG(ERR, CRYPTODEV, \ 36 RTE_FMT("%s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \ 37 __func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,))) 38 39 #define CDEV_LOG_INFO(...) \ 40 RTE_LOG(INFO, CRYPTODEV, \ 41 RTE_FMT(RTE_FMT_HEAD(__VA_ARGS__,) "\n", \ 42 RTE_FMT_TAIL(__VA_ARGS__,))) 43 44 #define CDEV_LOG_DEBUG(...) \ 45 RTE_LOG(DEBUG, CRYPTODEV, \ 46 RTE_FMT("%s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \ 47 __func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,))) 48 49 #define CDEV_PMD_TRACE(...) \ 50 RTE_LOG(DEBUG, CRYPTODEV, \ 51 RTE_FMT("[%s] %s: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \ 52 dev, __func__, RTE_FMT_TAIL(__VA_ARGS__,))) 53 54 /** 55 * A macro that points to an offset from the start 56 * of the crypto operation structure (rte_crypto_op) 57 * 58 * The returned pointer is cast to type t. 59 * 60 * @param c 61 * The crypto operation. 62 * @param o 63 * The offset from the start of the crypto operation. 64 * @param t 65 * The type to cast the result into. 66 */ 67 #define rte_crypto_op_ctod_offset(c, t, o) \ 68 ((t)((char *)(c) + (o))) 69 70 /** 71 * A macro that returns the physical address that points 72 * to an offset from the start of the crypto operation 73 * (rte_crypto_op) 74 * 75 * @param c 76 * The crypto operation. 77 * @param o 78 * The offset from the start of the crypto operation 79 * to calculate address from. 80 */ 81 #define rte_crypto_op_ctophys_offset(c, o) \ 82 (rte_iova_t)((c)->phys_addr + (o)) 83 84 /** 85 * Crypto parameters range description 86 */ 87 struct rte_crypto_param_range { 88 uint16_t min; /**< minimum size */ 89 uint16_t max; /**< maximum size */ 90 uint16_t increment; 91 /**< if a range of sizes are supported, 92 * this parameter is used to indicate 93 * increments in byte size that are supported 94 * between the minimum and maximum 95 */ 96 }; 97 98 /** 99 * Data-unit supported lengths of cipher algorithms. 100 * A bit can represent any set of data-unit sizes 101 * (single size, multiple size, range, etc). 102 */ 103 #define RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_512_BYTES RTE_BIT32(0) 104 #define RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_4096_BYTES RTE_BIT32(1) 105 106 /** 107 * Symmetric Crypto Capability 108 */ 109 struct rte_cryptodev_symmetric_capability { 110 enum rte_crypto_sym_xform_type xform_type; 111 /**< Transform type : Authentication / Cipher / AEAD */ 112 RTE_STD_C11 113 union { 114 struct { 115 enum rte_crypto_auth_algorithm algo; 116 /**< authentication algorithm */ 117 uint16_t block_size; 118 /**< algorithm block size */ 119 struct rte_crypto_param_range key_size; 120 /**< auth key size range */ 121 struct rte_crypto_param_range digest_size; 122 /**< digest size range */ 123 struct rte_crypto_param_range aad_size; 124 /**< Additional authentication data size range */ 125 struct rte_crypto_param_range iv_size; 126 /**< Initialisation vector data size range */ 127 } auth; 128 /**< Symmetric Authentication transform capabilities */ 129 struct { 130 enum rte_crypto_cipher_algorithm algo; 131 /**< cipher algorithm */ 132 uint16_t block_size; 133 /**< algorithm block size */ 134 struct rte_crypto_param_range key_size; 135 /**< cipher key size range */ 136 struct rte_crypto_param_range iv_size; 137 /**< Initialisation vector data size range */ 138 uint32_t dataunit_set; 139 /**< 140 * Supported data-unit lengths: 141 * RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_* bits 142 * or 0 for lengths defined in the algorithm standard. 143 */ 144 } cipher; 145 /**< Symmetric Cipher transform capabilities */ 146 struct { 147 enum rte_crypto_aead_algorithm algo; 148 /**< AEAD algorithm */ 149 uint16_t block_size; 150 /**< algorithm block size */ 151 struct rte_crypto_param_range key_size; 152 /**< AEAD key size range */ 153 struct rte_crypto_param_range digest_size; 154 /**< digest size range */ 155 struct rte_crypto_param_range aad_size; 156 /**< Additional authentication data size range */ 157 struct rte_crypto_param_range iv_size; 158 /**< Initialisation vector data size range */ 159 } aead; 160 }; 161 }; 162 163 /** 164 * Asymmetric Xform Crypto Capability 165 * 166 */ 167 struct rte_cryptodev_asymmetric_xform_capability { 168 enum rte_crypto_asym_xform_type xform_type; 169 /**< Transform type: RSA/MODEXP/DH/DSA/MODINV */ 170 171 uint32_t op_types; 172 /**< bitmask for supported rte_crypto_asym_op_type */ 173 174 __extension__ 175 union { 176 struct rte_crypto_param_range modlen; 177 /**< Range of modulus length supported by modulus based xform. 178 * Value 0 mean implementation default 179 */ 180 }; 181 }; 182 183 /** 184 * Asymmetric Crypto Capability 185 * 186 */ 187 struct rte_cryptodev_asymmetric_capability { 188 struct rte_cryptodev_asymmetric_xform_capability xform_capa; 189 }; 190 191 192 /** Structure used to capture a capability of a crypto device */ 193 struct rte_cryptodev_capabilities { 194 enum rte_crypto_op_type op; 195 /**< Operation type */ 196 197 RTE_STD_C11 198 union { 199 struct rte_cryptodev_symmetric_capability sym; 200 /**< Symmetric operation capability parameters */ 201 struct rte_cryptodev_asymmetric_capability asym; 202 /**< Asymmetric operation capability parameters */ 203 }; 204 }; 205 206 /** Structure used to describe crypto algorithms */ 207 struct rte_cryptodev_sym_capability_idx { 208 enum rte_crypto_sym_xform_type type; 209 union { 210 enum rte_crypto_cipher_algorithm cipher; 211 enum rte_crypto_auth_algorithm auth; 212 enum rte_crypto_aead_algorithm aead; 213 } algo; 214 }; 215 216 /** 217 * Structure used to describe asymmetric crypto xforms 218 * Each xform maps to one asym algorithm. 219 * 220 */ 221 struct rte_cryptodev_asym_capability_idx { 222 enum rte_crypto_asym_xform_type type; 223 /**< Asymmetric xform (algo) type */ 224 }; 225 226 /** 227 * Provide capabilities available for defined device and algorithm 228 * 229 * @param dev_id The identifier of the device. 230 * @param idx Description of crypto algorithms. 231 * 232 * @return 233 * - Return description of the symmetric crypto capability if exist. 234 * - Return NULL if the capability not exist. 235 */ 236 const struct rte_cryptodev_symmetric_capability * 237 rte_cryptodev_sym_capability_get(uint8_t dev_id, 238 const struct rte_cryptodev_sym_capability_idx *idx); 239 240 /** 241 * Provide capabilities available for defined device and xform 242 * 243 * @param dev_id The identifier of the device. 244 * @param idx Description of asym crypto xform. 245 * 246 * @return 247 * - Return description of the asymmetric crypto capability if exist. 248 * - Return NULL if the capability not exist. 249 */ 250 __rte_experimental 251 const struct rte_cryptodev_asymmetric_xform_capability * 252 rte_cryptodev_asym_capability_get(uint8_t dev_id, 253 const struct rte_cryptodev_asym_capability_idx *idx); 254 255 /** 256 * Check if key size and initial vector are supported 257 * in crypto cipher capability 258 * 259 * @param capability Description of the symmetric crypto capability. 260 * @param key_size Cipher key size. 261 * @param iv_size Cipher initial vector size. 262 * 263 * @return 264 * - Return 0 if the parameters are in range of the capability. 265 * - Return -1 if the parameters are out of range of the capability. 266 */ 267 int 268 rte_cryptodev_sym_capability_check_cipher( 269 const struct rte_cryptodev_symmetric_capability *capability, 270 uint16_t key_size, uint16_t iv_size); 271 272 /** 273 * Check if key size and initial vector are supported 274 * in crypto auth capability 275 * 276 * @param capability Description of the symmetric crypto capability. 277 * @param key_size Auth key size. 278 * @param digest_size Auth digest size. 279 * @param iv_size Auth initial vector size. 280 * 281 * @return 282 * - Return 0 if the parameters are in range of the capability. 283 * - Return -1 if the parameters are out of range of the capability. 284 */ 285 int 286 rte_cryptodev_sym_capability_check_auth( 287 const struct rte_cryptodev_symmetric_capability *capability, 288 uint16_t key_size, uint16_t digest_size, uint16_t iv_size); 289 290 /** 291 * Check if key, digest, AAD and initial vector sizes are supported 292 * in crypto AEAD capability 293 * 294 * @param capability Description of the symmetric crypto capability. 295 * @param key_size AEAD key size. 296 * @param digest_size AEAD digest size. 297 * @param aad_size AEAD AAD size. 298 * @param iv_size AEAD IV size. 299 * 300 * @return 301 * - Return 0 if the parameters are in range of the capability. 302 * - Return -1 if the parameters are out of range of the capability. 303 */ 304 int 305 rte_cryptodev_sym_capability_check_aead( 306 const struct rte_cryptodev_symmetric_capability *capability, 307 uint16_t key_size, uint16_t digest_size, uint16_t aad_size, 308 uint16_t iv_size); 309 310 /** 311 * Check if op type is supported 312 * 313 * @param capability Description of the asymmetric crypto capability. 314 * @param op_type op type 315 * 316 * @return 317 * - Return 1 if the op type is supported 318 * - Return 0 if unsupported 319 */ 320 __rte_experimental 321 int 322 rte_cryptodev_asym_xform_capability_check_optype( 323 const struct rte_cryptodev_asymmetric_xform_capability *capability, 324 enum rte_crypto_asym_op_type op_type); 325 326 /** 327 * Check if modulus length is in supported range 328 * 329 * @param capability Description of the asymmetric crypto capability. 330 * @param modlen modulus length. 331 * 332 * @return 333 * - Return 0 if the parameters are in range of the capability. 334 * - Return -1 if the parameters are out of range of the capability. 335 */ 336 __rte_experimental 337 int 338 rte_cryptodev_asym_xform_capability_check_modlen( 339 const struct rte_cryptodev_asymmetric_xform_capability *capability, 340 uint16_t modlen); 341 342 /** 343 * Provide the cipher algorithm enum, given an algorithm string 344 * 345 * @param algo_enum A pointer to the cipher algorithm 346 * enum to be filled 347 * @param algo_string Authentication algo string 348 * 349 * @return 350 * - Return -1 if string is not valid 351 * - Return 0 is the string is valid 352 */ 353 int 354 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum, 355 const char *algo_string); 356 357 /** 358 * Provide the authentication algorithm enum, given an algorithm string 359 * 360 * @param algo_enum A pointer to the authentication algorithm 361 * enum to be filled 362 * @param algo_string Authentication algo string 363 * 364 * @return 365 * - Return -1 if string is not valid 366 * - Return 0 is the string is valid 367 */ 368 int 369 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum, 370 const char *algo_string); 371 372 /** 373 * Provide the AEAD algorithm enum, given an algorithm string 374 * 375 * @param algo_enum A pointer to the AEAD algorithm 376 * enum to be filled 377 * @param algo_string AEAD algorithm string 378 * 379 * @return 380 * - Return -1 if string is not valid 381 * - Return 0 is the string is valid 382 */ 383 int 384 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum, 385 const char *algo_string); 386 387 /** 388 * Provide the Asymmetric xform enum, given an xform string 389 * 390 * @param xform_enum A pointer to the xform type 391 * enum to be filled 392 * @param xform_string xform string 393 * 394 * @return 395 * - Return -1 if string is not valid 396 * - Return 0 if the string is valid 397 */ 398 __rte_experimental 399 int 400 rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum, 401 const char *xform_string); 402 403 404 /** Macro used at end of crypto PMD list */ 405 #define RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() \ 406 { RTE_CRYPTO_OP_TYPE_UNDEFINED } 407 408 409 /** 410 * Crypto device supported feature flags 411 * 412 * Note: 413 * New features flags should be added to the end of the list 414 * 415 * Keep these flags synchronised with rte_cryptodev_get_feature_name() 416 */ 417 #define RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO (1ULL << 0) 418 /**< Symmetric crypto operations are supported */ 419 #define RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO (1ULL << 1) 420 /**< Asymmetric crypto operations are supported */ 421 #define RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING (1ULL << 2) 422 /**< Chaining symmetric crypto operations are supported */ 423 #define RTE_CRYPTODEV_FF_CPU_SSE (1ULL << 3) 424 /**< Utilises CPU SIMD SSE instructions */ 425 #define RTE_CRYPTODEV_FF_CPU_AVX (1ULL << 4) 426 /**< Utilises CPU SIMD AVX instructions */ 427 #define RTE_CRYPTODEV_FF_CPU_AVX2 (1ULL << 5) 428 /**< Utilises CPU SIMD AVX2 instructions */ 429 #define RTE_CRYPTODEV_FF_CPU_AESNI (1ULL << 6) 430 /**< Utilises CPU AES-NI instructions */ 431 #define RTE_CRYPTODEV_FF_HW_ACCELERATED (1ULL << 7) 432 /**< Operations are off-loaded to an 433 * external hardware accelerator 434 */ 435 #define RTE_CRYPTODEV_FF_CPU_AVX512 (1ULL << 8) 436 /**< Utilises CPU SIMD AVX512 instructions */ 437 #define RTE_CRYPTODEV_FF_IN_PLACE_SGL (1ULL << 9) 438 /**< In-place Scatter-gather (SGL) buffers, with multiple segments, 439 * are supported 440 */ 441 #define RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT (1ULL << 10) 442 /**< Out-of-place Scatter-gather (SGL) buffers are 443 * supported in input and output 444 */ 445 #define RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT (1ULL << 11) 446 /**< Out-of-place Scatter-gather (SGL) buffers are supported 447 * in input, combined with linear buffers (LB), with a 448 * single segment in output 449 */ 450 #define RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT (1ULL << 12) 451 /**< Out-of-place Scatter-gather (SGL) buffers are supported 452 * in output, combined with linear buffers (LB) in input 453 */ 454 #define RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT (1ULL << 13) 455 /**< Out-of-place linear buffers (LB) are supported in input and output */ 456 #define RTE_CRYPTODEV_FF_CPU_NEON (1ULL << 14) 457 /**< Utilises CPU NEON instructions */ 458 #define RTE_CRYPTODEV_FF_CPU_ARM_CE (1ULL << 15) 459 /**< Utilises ARM CPU Cryptographic Extensions */ 460 #define RTE_CRYPTODEV_FF_SECURITY (1ULL << 16) 461 /**< Support Security Protocol Processing */ 462 #define RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP (1ULL << 17) 463 /**< Support RSA Private Key OP with exponent */ 464 #define RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT (1ULL << 18) 465 /**< Support RSA Private Key OP with CRT (quintuple) Keys */ 466 #define RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED (1ULL << 19) 467 /**< Support encrypted-digest operations where digest is appended to data */ 468 #define RTE_CRYPTODEV_FF_ASYM_SESSIONLESS (1ULL << 20) 469 /**< Support asymmetric session-less operations */ 470 #define RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO (1ULL << 21) 471 /**< Support symmetric cpu-crypto processing */ 472 #define RTE_CRYPTODEV_FF_SYM_SESSIONLESS (1ULL << 22) 473 /**< Support symmetric session-less operations */ 474 #define RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA (1ULL << 23) 475 /**< Support operations on data which is not byte aligned */ 476 #define RTE_CRYPTODEV_FF_SYM_RAW_DP (1ULL << 24) 477 /**< Support accelerator specific symmetric raw data-path APIs */ 478 #define RTE_CRYPTODEV_FF_CIPHER_MULTIPLE_DATA_UNITS (1ULL << 25) 479 /**< Support operations on multiple data-units message */ 480 #define RTE_CRYPTODEV_FF_CIPHER_WRAPPED_KEY (1ULL << 26) 481 /**< Support wrapped key in cipher xform */ 482 #define RTE_CRYPTODEV_FF_SECURITY_INNER_CSUM (1ULL << 27) 483 /**< Support inner checksum computation/verification */ 484 485 /** 486 * Get the name of a crypto device feature flag 487 * 488 * @param flag The mask describing the flag. 489 * 490 * @return 491 * The name of this flag, or NULL if it's not a valid feature flag. 492 */ 493 494 extern const char * 495 rte_cryptodev_get_feature_name(uint64_t flag); 496 497 /** Crypto device information */ 498 struct rte_cryptodev_info { 499 const char *driver_name; /**< Driver name. */ 500 uint8_t driver_id; /**< Driver identifier */ 501 struct rte_device *device; /**< Generic device information. */ 502 503 uint64_t feature_flags; 504 /**< Feature flags exposes HW/SW features for the given device */ 505 506 const struct rte_cryptodev_capabilities *capabilities; 507 /**< Array of devices supported capabilities */ 508 509 unsigned max_nb_queue_pairs; 510 /**< Maximum number of queues pairs supported by device. */ 511 512 uint16_t min_mbuf_headroom_req; 513 /**< Minimum mbuf headroom required by device */ 514 515 uint16_t min_mbuf_tailroom_req; 516 /**< Minimum mbuf tailroom required by device */ 517 518 struct { 519 unsigned max_nb_sessions; 520 /**< Maximum number of sessions supported by device. 521 * If 0, the device does not have any limitation in 522 * number of sessions that can be used. 523 */ 524 } sym; 525 }; 526 527 #define RTE_CRYPTODEV_DETACHED (0) 528 #define RTE_CRYPTODEV_ATTACHED (1) 529 530 /** Definitions of Crypto device event types */ 531 enum rte_cryptodev_event_type { 532 RTE_CRYPTODEV_EVENT_UNKNOWN, /**< unknown event type */ 533 RTE_CRYPTODEV_EVENT_ERROR, /**< error interrupt event */ 534 RTE_CRYPTODEV_EVENT_MAX /**< max value of this enum */ 535 }; 536 537 /** Crypto device queue pair configuration structure. */ 538 struct rte_cryptodev_qp_conf { 539 uint32_t nb_descriptors; /**< Number of descriptors per queue pair */ 540 struct rte_mempool *mp_session; 541 /**< The mempool for creating session in sessionless mode */ 542 struct rte_mempool *mp_session_private; 543 /**< The mempool for creating sess private data in sessionless mode */ 544 }; 545 546 /** 547 * Function type used for processing crypto ops when enqueue/dequeue burst is 548 * called. 549 * 550 * The callback function is called on enqueue/dequeue burst immediately. 551 * 552 * @param dev_id The identifier of the device. 553 * @param qp_id The index of the queue pair on which ops are 554 * enqueued/dequeued. The value must be in the 555 * range [0, nb_queue_pairs - 1] previously 556 * supplied to *rte_cryptodev_configure*. 557 * @param ops The address of an array of *nb_ops* pointers 558 * to *rte_crypto_op* structures which contain 559 * the crypto operations to be processed. 560 * @param nb_ops The number of operations to process. 561 * @param user_param The arbitrary user parameter passed in by the 562 * application when the callback was originally 563 * registered. 564 * @return The number of ops to be enqueued to the 565 * crypto device. 566 */ 567 typedef uint16_t (*rte_cryptodev_callback_fn)(uint16_t dev_id, uint16_t qp_id, 568 struct rte_crypto_op **ops, uint16_t nb_ops, void *user_param); 569 570 /** 571 * Typedef for application callback function to be registered by application 572 * software for notification of device events 573 * 574 * @param dev_id Crypto device identifier 575 * @param event Crypto device event to register for notification of. 576 * @param cb_arg User specified parameter to be passed as to passed to 577 * users callback function. 578 */ 579 typedef void (*rte_cryptodev_cb_fn)(uint8_t dev_id, 580 enum rte_cryptodev_event_type event, void *cb_arg); 581 582 583 /** Crypto Device statistics */ 584 struct rte_cryptodev_stats { 585 uint64_t enqueued_count; 586 /**< Count of all operations enqueued */ 587 uint64_t dequeued_count; 588 /**< Count of all operations dequeued */ 589 590 uint64_t enqueue_err_count; 591 /**< Total error count on operations enqueued */ 592 uint64_t dequeue_err_count; 593 /**< Total error count on operations dequeued */ 594 }; 595 596 #define RTE_CRYPTODEV_NAME_MAX_LEN (64) 597 /**< Max length of name of crypto PMD */ 598 599 /** 600 * Get the device identifier for the named crypto device. 601 * 602 * @param name device name to select the device structure. 603 * 604 * @return 605 * - Returns crypto device identifier on success. 606 * - Return -1 on failure to find named crypto device. 607 */ 608 extern int 609 rte_cryptodev_get_dev_id(const char *name); 610 611 /** 612 * Get the crypto device name given a device identifier. 613 * 614 * @param dev_id 615 * The identifier of the device 616 * 617 * @return 618 * - Returns crypto device name. 619 * - Returns NULL if crypto device is not present. 620 */ 621 extern const char * 622 rte_cryptodev_name_get(uint8_t dev_id); 623 624 /** 625 * Get the total number of crypto devices that have been successfully 626 * initialised. 627 * 628 * @return 629 * - The total number of usable crypto devices. 630 */ 631 extern uint8_t 632 rte_cryptodev_count(void); 633 634 /** 635 * Get number of crypto device defined type. 636 * 637 * @param driver_id driver identifier. 638 * 639 * @return 640 * Returns number of crypto device. 641 */ 642 extern uint8_t 643 rte_cryptodev_device_count_by_driver(uint8_t driver_id); 644 645 /** 646 * Get number and identifiers of attached crypto devices that 647 * use the same crypto driver. 648 * 649 * @param driver_name driver name. 650 * @param devices output devices identifiers. 651 * @param nb_devices maximal number of devices. 652 * 653 * @return 654 * Returns number of attached crypto device. 655 */ 656 uint8_t 657 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices, 658 uint8_t nb_devices); 659 /* 660 * Return the NUMA socket to which a device is connected 661 * 662 * @param dev_id 663 * The identifier of the device 664 * @return 665 * The NUMA socket id to which the device is connected or 666 * a default of zero if the socket could not be determined. 667 * -1 if returned is the dev_id value is out of range. 668 */ 669 extern int 670 rte_cryptodev_socket_id(uint8_t dev_id); 671 672 /** Crypto device configuration structure */ 673 struct rte_cryptodev_config { 674 int socket_id; /**< Socket to allocate resources on */ 675 uint16_t nb_queue_pairs; 676 /**< Number of queue pairs to configure on device */ 677 uint64_t ff_disable; 678 /**< Feature flags to be disabled. Only the following features are 679 * allowed to be disabled, 680 * - RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO 681 * - RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO 682 * - RTE_CRYTPODEV_FF_SECURITY 683 */ 684 }; 685 686 /** 687 * Configure a device. 688 * 689 * This function must be invoked first before any other function in the 690 * API. This function can also be re-invoked when a device is in the 691 * stopped state. 692 * 693 * @param dev_id The identifier of the device to configure. 694 * @param config The crypto device configuration structure. 695 * 696 * @return 697 * - 0: Success, device configured. 698 * - <0: Error code returned by the driver configuration function. 699 */ 700 extern int 701 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config); 702 703 /** 704 * Start an device. 705 * 706 * The device start step is the last one and consists of setting the configured 707 * offload features and in starting the transmit and the receive units of the 708 * device. 709 * On success, all basic functions exported by the API (link status, 710 * receive/transmit, and so on) can be invoked. 711 * 712 * @param dev_id 713 * The identifier of the device. 714 * @return 715 * - 0: Success, device started. 716 * - <0: Error code of the driver device start function. 717 */ 718 extern int 719 rte_cryptodev_start(uint8_t dev_id); 720 721 /** 722 * Stop an device. The device can be restarted with a call to 723 * rte_cryptodev_start() 724 * 725 * @param dev_id The identifier of the device. 726 */ 727 extern void 728 rte_cryptodev_stop(uint8_t dev_id); 729 730 /** 731 * Close an device. The device cannot be restarted! 732 * 733 * @param dev_id The identifier of the device. 734 * 735 * @return 736 * - 0 on successfully closing device 737 * - <0 on failure to close device 738 */ 739 extern int 740 rte_cryptodev_close(uint8_t dev_id); 741 742 /** 743 * Allocate and set up a receive queue pair for a device. 744 * 745 * 746 * @param dev_id The identifier of the device. 747 * @param queue_pair_id The index of the queue pairs to set up. The 748 * value must be in the range [0, nb_queue_pair 749 * - 1] previously supplied to 750 * rte_cryptodev_configure(). 751 * @param qp_conf The pointer to the configuration data to be 752 * used for the queue pair. 753 * @param socket_id The *socket_id* argument is the socket 754 * identifier in case of NUMA. The value can be 755 * *SOCKET_ID_ANY* if there is no NUMA constraint 756 * for the DMA memory allocated for the receive 757 * queue pair. 758 * 759 * @return 760 * - 0: Success, queue pair correctly set up. 761 * - <0: Queue pair configuration failed 762 */ 763 extern int 764 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id, 765 const struct rte_cryptodev_qp_conf *qp_conf, int socket_id); 766 767 /** 768 * Get the status of queue pairs setup on a specific crypto device 769 * 770 * @param dev_id Crypto device identifier. 771 * @param queue_pair_id The index of the queue pairs to set up. The 772 * value must be in the range [0, nb_queue_pair 773 * - 1] previously supplied to 774 * rte_cryptodev_configure(). 775 * @return 776 * - 0: qp was not configured 777 * - 1: qp was configured 778 * - -EINVAL: device was not configured 779 */ 780 __rte_experimental 781 int 782 rte_cryptodev_get_qp_status(uint8_t dev_id, uint16_t queue_pair_id); 783 784 /** 785 * Get the number of queue pairs on a specific crypto device 786 * 787 * @param dev_id Crypto device identifier. 788 * @return 789 * - The number of configured queue pairs. 790 */ 791 extern uint16_t 792 rte_cryptodev_queue_pair_count(uint8_t dev_id); 793 794 795 /** 796 * Retrieve the general I/O statistics of a device. 797 * 798 * @param dev_id The identifier of the device. 799 * @param stats A pointer to a structure of type 800 * *rte_cryptodev_stats* to be filled with the 801 * values of device counters. 802 * @return 803 * - Zero if successful. 804 * - Non-zero otherwise. 805 */ 806 extern int 807 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats); 808 809 /** 810 * Reset the general I/O statistics of a device. 811 * 812 * @param dev_id The identifier of the device. 813 */ 814 extern void 815 rte_cryptodev_stats_reset(uint8_t dev_id); 816 817 /** 818 * Retrieve the contextual information of a device. 819 * 820 * @param dev_id The identifier of the device. 821 * @param dev_info A pointer to a structure of type 822 * *rte_cryptodev_info* to be filled with the 823 * contextual information of the device. 824 * 825 * @note The capabilities field of dev_info is set to point to the first 826 * element of an array of struct rte_cryptodev_capabilities. The element after 827 * the last valid element has it's op field set to 828 * RTE_CRYPTO_OP_TYPE_UNDEFINED. 829 */ 830 extern void 831 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info); 832 833 834 /** 835 * Register a callback function for specific device id. 836 * 837 * @param dev_id Device id. 838 * @param event Event interested. 839 * @param cb_fn User supplied callback function to be called. 840 * @param cb_arg Pointer to the parameters for the registered 841 * callback. 842 * 843 * @return 844 * - On success, zero. 845 * - On failure, a negative value. 846 */ 847 extern int 848 rte_cryptodev_callback_register(uint8_t dev_id, 849 enum rte_cryptodev_event_type event, 850 rte_cryptodev_cb_fn cb_fn, void *cb_arg); 851 852 /** 853 * Unregister a callback function for specific device id. 854 * 855 * @param dev_id The device identifier. 856 * @param event Event interested. 857 * @param cb_fn User supplied callback function to be called. 858 * @param cb_arg Pointer to the parameters for the registered 859 * callback. 860 * 861 * @return 862 * - On success, zero. 863 * - On failure, a negative value. 864 */ 865 extern int 866 rte_cryptodev_callback_unregister(uint8_t dev_id, 867 enum rte_cryptodev_event_type event, 868 rte_cryptodev_cb_fn cb_fn, void *cb_arg); 869 870 typedef uint16_t (*dequeue_pkt_burst_t)(void *qp, 871 struct rte_crypto_op **ops, uint16_t nb_ops); 872 /**< Dequeue processed packets from queue pair of a device. */ 873 874 typedef uint16_t (*enqueue_pkt_burst_t)(void *qp, 875 struct rte_crypto_op **ops, uint16_t nb_ops); 876 /**< Enqueue packets for processing on queue pair of a device. */ 877 878 879 880 881 struct rte_cryptodev_callback; 882 883 /** Structure to keep track of registered callbacks */ 884 RTE_TAILQ_HEAD(rte_cryptodev_cb_list, rte_cryptodev_callback); 885 886 /** 887 * Structure used to hold information about the callbacks to be called for a 888 * queue pair on enqueue/dequeue. 889 */ 890 struct rte_cryptodev_cb { 891 struct rte_cryptodev_cb *next; 892 /**< Pointer to next callback */ 893 rte_cryptodev_callback_fn fn; 894 /**< Pointer to callback function */ 895 void *arg; 896 /**< Pointer to argument */ 897 }; 898 899 /** 900 * @internal 901 * Structure used to hold information about the RCU for a queue pair. 902 */ 903 struct rte_cryptodev_cb_rcu { 904 struct rte_cryptodev_cb *next; 905 /**< Pointer to next callback */ 906 struct rte_rcu_qsbr *qsbr; 907 /**< RCU QSBR variable per queue pair */ 908 }; 909 910 /** The data structure associated with each crypto device. */ 911 struct rte_cryptodev { 912 dequeue_pkt_burst_t dequeue_burst; 913 /**< Pointer to PMD receive function. */ 914 enqueue_pkt_burst_t enqueue_burst; 915 /**< Pointer to PMD transmit function. */ 916 917 struct rte_cryptodev_data *data; 918 /**< Pointer to device data */ 919 struct rte_cryptodev_ops *dev_ops; 920 /**< Functions exported by PMD */ 921 uint64_t feature_flags; 922 /**< Feature flags exposes HW/SW features for the given device */ 923 struct rte_device *device; 924 /**< Backing device */ 925 926 uint8_t driver_id; 927 /**< Crypto driver identifier*/ 928 929 struct rte_cryptodev_cb_list link_intr_cbs; 930 /**< User application callback for interrupts if present */ 931 932 void *security_ctx; 933 /**< Context for security ops */ 934 935 __extension__ 936 uint8_t attached : 1; 937 /**< Flag indicating the device is attached */ 938 939 struct rte_cryptodev_cb_rcu *enq_cbs; 940 /**< User application callback for pre enqueue processing */ 941 942 struct rte_cryptodev_cb_rcu *deq_cbs; 943 /**< User application callback for post dequeue processing */ 944 } __rte_cache_aligned; 945 946 void * 947 rte_cryptodev_get_sec_ctx(uint8_t dev_id); 948 949 /** 950 * 951 * The data part, with no function pointers, associated with each device. 952 * 953 * This structure is safe to place in shared memory to be common among 954 * different processes in a multi-process configuration. 955 */ 956 struct rte_cryptodev_data { 957 uint8_t dev_id; 958 /**< Device ID for this instance */ 959 uint8_t socket_id; 960 /**< Socket ID where memory is allocated */ 961 char name[RTE_CRYPTODEV_NAME_MAX_LEN]; 962 /**< Unique identifier name */ 963 964 __extension__ 965 uint8_t dev_started : 1; 966 /**< Device state: STARTED(1)/STOPPED(0) */ 967 968 struct rte_mempool *session_pool; 969 /**< Session memory pool */ 970 void **queue_pairs; 971 /**< Array of pointers to queue pairs. */ 972 uint16_t nb_queue_pairs; 973 /**< Number of device queue pairs. */ 974 975 void *dev_private; 976 /**< PMD-specific private data */ 977 } __rte_cache_aligned; 978 979 extern struct rte_cryptodev *rte_cryptodevs; 980 /** 981 * 982 * Dequeue a burst of processed crypto operations from a queue on the crypto 983 * device. The dequeued operation are stored in *rte_crypto_op* structures 984 * whose pointers are supplied in the *ops* array. 985 * 986 * The rte_cryptodev_dequeue_burst() function returns the number of ops 987 * actually dequeued, which is the number of *rte_crypto_op* data structures 988 * effectively supplied into the *ops* array. 989 * 990 * A return value equal to *nb_ops* indicates that the queue contained 991 * at least *nb_ops* operations, and this is likely to signify that other 992 * processed operations remain in the devices output queue. Applications 993 * implementing a "retrieve as many processed operations as possible" policy 994 * can check this specific case and keep invoking the 995 * rte_cryptodev_dequeue_burst() function until a value less than 996 * *nb_ops* is returned. 997 * 998 * The rte_cryptodev_dequeue_burst() function does not provide any error 999 * notification to avoid the corresponding overhead. 1000 * 1001 * @param dev_id The symmetric crypto device identifier 1002 * @param qp_id The index of the queue pair from which to 1003 * retrieve processed packets. The value must be 1004 * in the range [0, nb_queue_pair - 1] previously 1005 * supplied to rte_cryptodev_configure(). 1006 * @param ops The address of an array of pointers to 1007 * *rte_crypto_op* structures that must be 1008 * large enough to store *nb_ops* pointers in it. 1009 * @param nb_ops The maximum number of operations to dequeue. 1010 * 1011 * @return 1012 * - The number of operations actually dequeued, which is the number 1013 * of pointers to *rte_crypto_op* structures effectively supplied to the 1014 * *ops* array. 1015 */ 1016 static inline uint16_t 1017 rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id, 1018 struct rte_crypto_op **ops, uint16_t nb_ops) 1019 { 1020 struct rte_cryptodev *dev = &rte_cryptodevs[dev_id]; 1021 1022 rte_cryptodev_trace_dequeue_burst(dev_id, qp_id, (void **)ops, nb_ops); 1023 nb_ops = (*dev->dequeue_burst) 1024 (dev->data->queue_pairs[qp_id], ops, nb_ops); 1025 #ifdef RTE_CRYPTO_CALLBACKS 1026 if (unlikely(dev->deq_cbs != NULL)) { 1027 struct rte_cryptodev_cb_rcu *list; 1028 struct rte_cryptodev_cb *cb; 1029 1030 /* __ATOMIC_RELEASE memory order was used when the 1031 * call back was inserted into the list. 1032 * Since there is a clear dependency between loading 1033 * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is 1034 * not required. 1035 */ 1036 list = &dev->deq_cbs[qp_id]; 1037 rte_rcu_qsbr_thread_online(list->qsbr, 0); 1038 cb = __atomic_load_n(&list->next, __ATOMIC_RELAXED); 1039 1040 while (cb != NULL) { 1041 nb_ops = cb->fn(dev_id, qp_id, ops, nb_ops, 1042 cb->arg); 1043 cb = cb->next; 1044 }; 1045 1046 rte_rcu_qsbr_thread_offline(list->qsbr, 0); 1047 } 1048 #endif 1049 return nb_ops; 1050 } 1051 1052 /** 1053 * Enqueue a burst of operations for processing on a crypto device. 1054 * 1055 * The rte_cryptodev_enqueue_burst() function is invoked to place 1056 * crypto operations on the queue *qp_id* of the device designated by 1057 * its *dev_id*. 1058 * 1059 * The *nb_ops* parameter is the number of operations to process which are 1060 * supplied in the *ops* array of *rte_crypto_op* structures. 1061 * 1062 * The rte_cryptodev_enqueue_burst() function returns the number of 1063 * operations it actually enqueued for processing. A return value equal to 1064 * *nb_ops* means that all packets have been enqueued. 1065 * 1066 * @param dev_id The identifier of the device. 1067 * @param qp_id The index of the queue pair which packets are 1068 * to be enqueued for processing. The value 1069 * must be in the range [0, nb_queue_pairs - 1] 1070 * previously supplied to 1071 * *rte_cryptodev_configure*. 1072 * @param ops The address of an array of *nb_ops* pointers 1073 * to *rte_crypto_op* structures which contain 1074 * the crypto operations to be processed. 1075 * @param nb_ops The number of operations to process. 1076 * 1077 * @return 1078 * The number of operations actually enqueued on the crypto device. The return 1079 * value can be less than the value of the *nb_ops* parameter when the 1080 * crypto devices queue is full or if invalid parameters are specified in 1081 * a *rte_crypto_op*. 1082 */ 1083 static inline uint16_t 1084 rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id, 1085 struct rte_crypto_op **ops, uint16_t nb_ops) 1086 { 1087 struct rte_cryptodev *dev = &rte_cryptodevs[dev_id]; 1088 1089 #ifdef RTE_CRYPTO_CALLBACKS 1090 if (unlikely(dev->enq_cbs != NULL)) { 1091 struct rte_cryptodev_cb_rcu *list; 1092 struct rte_cryptodev_cb *cb; 1093 1094 /* __ATOMIC_RELEASE memory order was used when the 1095 * call back was inserted into the list. 1096 * Since there is a clear dependency between loading 1097 * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is 1098 * not required. 1099 */ 1100 list = &dev->enq_cbs[qp_id]; 1101 rte_rcu_qsbr_thread_online(list->qsbr, 0); 1102 cb = __atomic_load_n(&list->next, __ATOMIC_RELAXED); 1103 1104 while (cb != NULL) { 1105 nb_ops = cb->fn(dev_id, qp_id, ops, nb_ops, 1106 cb->arg); 1107 cb = cb->next; 1108 }; 1109 1110 rte_rcu_qsbr_thread_offline(list->qsbr, 0); 1111 } 1112 #endif 1113 1114 rte_cryptodev_trace_enqueue_burst(dev_id, qp_id, (void **)ops, nb_ops); 1115 return (*dev->enqueue_burst)( 1116 dev->data->queue_pairs[qp_id], ops, nb_ops); 1117 } 1118 1119 1120 /** Cryptodev symmetric crypto session 1121 * Each session is derived from a fixed xform chain. Therefore each session 1122 * has a fixed algo, key, op-type, digest_len etc. 1123 */ 1124 struct rte_cryptodev_sym_session { 1125 uint64_t opaque_data; 1126 /**< Can be used for external metadata */ 1127 uint16_t nb_drivers; 1128 /**< number of elements in sess_data array */ 1129 uint16_t user_data_sz; 1130 /**< session user data will be placed after sess_data */ 1131 __extension__ struct { 1132 void *data; 1133 uint16_t refcnt; 1134 } sess_data[0]; 1135 /**< Driver specific session material, variable size */ 1136 }; 1137 1138 /** Cryptodev asymmetric crypto session */ 1139 struct rte_cryptodev_asym_session { 1140 __extension__ void *sess_private_data[0]; 1141 /**< Private asymmetric session material */ 1142 }; 1143 1144 /** 1145 * Create a symmetric session mempool. 1146 * 1147 * @param name 1148 * The unique mempool name. 1149 * @param nb_elts 1150 * The number of elements in the mempool. 1151 * @param elt_size 1152 * The size of the element. This value will be ignored if it is smaller than 1153 * the minimum session header size required for the system. For the user who 1154 * want to use the same mempool for sym session and session private data it 1155 * can be the maximum value of all existing devices' private data and session 1156 * header sizes. 1157 * @param cache_size 1158 * The number of per-lcore cache elements 1159 * @param priv_size 1160 * The private data size of each session. 1161 * @param socket_id 1162 * The *socket_id* argument is the socket identifier in the case of 1163 * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA 1164 * constraint for the reserved zone. 1165 * 1166 * @return 1167 * - On success return size of the session 1168 * - On failure returns 0 1169 */ 1170 __rte_experimental 1171 struct rte_mempool * 1172 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts, 1173 uint32_t elt_size, uint32_t cache_size, uint16_t priv_size, 1174 int socket_id); 1175 1176 /** 1177 * Create symmetric crypto session header (generic with no private data) 1178 * 1179 * @param mempool Symmetric session mempool to allocate session 1180 * objects from 1181 * @return 1182 * - On success return pointer to sym-session 1183 * - On failure returns NULL 1184 */ 1185 struct rte_cryptodev_sym_session * 1186 rte_cryptodev_sym_session_create(struct rte_mempool *mempool); 1187 1188 /** 1189 * Create asymmetric crypto session header (generic with no private data) 1190 * 1191 * @param mempool mempool to allocate asymmetric session 1192 * objects from 1193 * @return 1194 * - On success return pointer to asym-session 1195 * - On failure returns NULL 1196 */ 1197 __rte_experimental 1198 struct rte_cryptodev_asym_session * 1199 rte_cryptodev_asym_session_create(struct rte_mempool *mempool); 1200 1201 /** 1202 * Frees symmetric crypto session header, after checking that all 1203 * the device private data has been freed, returning it 1204 * to its original mempool. 1205 * 1206 * @param sess Session header to be freed. 1207 * 1208 * @return 1209 * - 0 if successful. 1210 * - -EINVAL if session is NULL. 1211 * - -EBUSY if not all device private data has been freed. 1212 */ 1213 int 1214 rte_cryptodev_sym_session_free(struct rte_cryptodev_sym_session *sess); 1215 1216 /** 1217 * Frees asymmetric crypto session header, after checking that all 1218 * the device private data has been freed, returning it 1219 * to its original mempool. 1220 * 1221 * @param sess Session header to be freed. 1222 * 1223 * @return 1224 * - 0 if successful. 1225 * - -EINVAL if session is NULL. 1226 * - -EBUSY if not all device private data has been freed. 1227 */ 1228 __rte_experimental 1229 int 1230 rte_cryptodev_asym_session_free(struct rte_cryptodev_asym_session *sess); 1231 1232 /** 1233 * Fill out private data for the device id, based on its device type. 1234 * 1235 * @param dev_id ID of device that we want the session to be used on 1236 * @param sess Session where the private data will be attached to 1237 * @param xforms Symmetric crypto transform operations to apply on flow 1238 * processed with this session 1239 * @param mempool Mempool where the private data is allocated. 1240 * 1241 * @return 1242 * - On success, zero. 1243 * - -EINVAL if input parameters are invalid. 1244 * - -ENOTSUP if crypto device does not support the crypto transform or 1245 * does not support symmetric operations. 1246 * - -ENOMEM if the private session could not be allocated. 1247 */ 1248 int 1249 rte_cryptodev_sym_session_init(uint8_t dev_id, 1250 struct rte_cryptodev_sym_session *sess, 1251 struct rte_crypto_sym_xform *xforms, 1252 struct rte_mempool *mempool); 1253 1254 /** 1255 * Initialize asymmetric session on a device with specific asymmetric xform 1256 * 1257 * @param dev_id ID of device that we want the session to be used on 1258 * @param sess Session to be set up on a device 1259 * @param xforms Asymmetric crypto transform operations to apply on flow 1260 * processed with this session 1261 * @param mempool Mempool to be used for internal allocation. 1262 * 1263 * @return 1264 * - On success, zero. 1265 * - -EINVAL if input parameters are invalid. 1266 * - -ENOTSUP if crypto device does not support the crypto transform. 1267 * - -ENOMEM if the private session could not be allocated. 1268 */ 1269 __rte_experimental 1270 int 1271 rte_cryptodev_asym_session_init(uint8_t dev_id, 1272 struct rte_cryptodev_asym_session *sess, 1273 struct rte_crypto_asym_xform *xforms, 1274 struct rte_mempool *mempool); 1275 1276 /** 1277 * Frees private data for the device id, based on its device type, 1278 * returning it to its mempool. It is the application's responsibility 1279 * to ensure that private session data is not cleared while there are 1280 * still in-flight operations using it. 1281 * 1282 * @param dev_id ID of device that uses the session. 1283 * @param sess Session containing the reference to the private data 1284 * 1285 * @return 1286 * - 0 if successful. 1287 * - -EINVAL if device is invalid or session is NULL. 1288 * - -ENOTSUP if crypto device does not support symmetric operations. 1289 */ 1290 int 1291 rte_cryptodev_sym_session_clear(uint8_t dev_id, 1292 struct rte_cryptodev_sym_session *sess); 1293 1294 /** 1295 * Frees resources held by asymmetric session during rte_cryptodev_session_init 1296 * 1297 * @param dev_id ID of device that uses the asymmetric session. 1298 * @param sess Asymmetric session setup on device using 1299 * rte_cryptodev_session_init 1300 * @return 1301 * - 0 if successful. 1302 * - -EINVAL if device is invalid or session is NULL. 1303 */ 1304 __rte_experimental 1305 int 1306 rte_cryptodev_asym_session_clear(uint8_t dev_id, 1307 struct rte_cryptodev_asym_session *sess); 1308 1309 /** 1310 * Get the size of the header session, for all registered drivers excluding 1311 * the user data size. 1312 * 1313 * @return 1314 * Size of the symmetric header session. 1315 */ 1316 unsigned int 1317 rte_cryptodev_sym_get_header_session_size(void); 1318 1319 /** 1320 * Get the size of the header session from created session. 1321 * 1322 * @param sess 1323 * The sym cryptodev session pointer 1324 * 1325 * @return 1326 * - If sess is not NULL, return the size of the header session including 1327 * the private data size defined within sess. 1328 * - If sess is NULL, return 0. 1329 */ 1330 __rte_experimental 1331 unsigned int 1332 rte_cryptodev_sym_get_existing_header_session_size( 1333 struct rte_cryptodev_sym_session *sess); 1334 1335 /** 1336 * Get the size of the asymmetric session header, for all registered drivers. 1337 * 1338 * @return 1339 * Size of the asymmetric header session. 1340 */ 1341 __rte_experimental 1342 unsigned int 1343 rte_cryptodev_asym_get_header_session_size(void); 1344 1345 /** 1346 * Get the size of the private symmetric session data 1347 * for a device. 1348 * 1349 * @param dev_id The device identifier. 1350 * 1351 * @return 1352 * - Size of the private data, if successful 1353 * - 0 if device is invalid or does not have private 1354 * symmetric session 1355 */ 1356 unsigned int 1357 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id); 1358 1359 /** 1360 * Get the size of the private data for asymmetric session 1361 * on device 1362 * 1363 * @param dev_id The device identifier. 1364 * 1365 * @return 1366 * - Size of the asymmetric private data, if successful 1367 * - 0 if device is invalid or does not have private session 1368 */ 1369 __rte_experimental 1370 unsigned int 1371 rte_cryptodev_asym_get_private_session_size(uint8_t dev_id); 1372 1373 /** 1374 * Validate if the crypto device index is valid attached crypto device. 1375 * 1376 * @param dev_id Crypto device index. 1377 * 1378 * @return 1379 * - If the device index is valid (1) or not (0). 1380 */ 1381 unsigned int 1382 rte_cryptodev_is_valid_dev(uint8_t dev_id); 1383 1384 /** 1385 * Provide driver identifier. 1386 * 1387 * @param name 1388 * The pointer to a driver name. 1389 * @return 1390 * The driver type identifier or -1 if no driver found 1391 */ 1392 int rte_cryptodev_driver_id_get(const char *name); 1393 1394 /** 1395 * Provide driver name. 1396 * 1397 * @param driver_id 1398 * The driver identifier. 1399 * @return 1400 * The driver name or null if no driver found 1401 */ 1402 const char *rte_cryptodev_driver_name_get(uint8_t driver_id); 1403 1404 /** 1405 * Store user data in a session. 1406 * 1407 * @param sess Session pointer allocated by 1408 * *rte_cryptodev_sym_session_create*. 1409 * @param data Pointer to the user data. 1410 * @param size Size of the user data. 1411 * 1412 * @return 1413 * - On success, zero. 1414 * - On failure, a negative value. 1415 */ 1416 __rte_experimental 1417 int 1418 rte_cryptodev_sym_session_set_user_data( 1419 struct rte_cryptodev_sym_session *sess, 1420 void *data, 1421 uint16_t size); 1422 1423 /** 1424 * Get user data stored in a session. 1425 * 1426 * @param sess Session pointer allocated by 1427 * *rte_cryptodev_sym_session_create*. 1428 * 1429 * @return 1430 * - On success return pointer to user data. 1431 * - On failure returns NULL. 1432 */ 1433 __rte_experimental 1434 void * 1435 rte_cryptodev_sym_session_get_user_data( 1436 struct rte_cryptodev_sym_session *sess); 1437 1438 /** 1439 * Perform actual crypto processing (encrypt/digest or auth/decrypt) 1440 * on user provided data. 1441 * 1442 * @param dev_id The device identifier. 1443 * @param sess Cryptodev session structure 1444 * @param ofs Start and stop offsets for auth and cipher operations 1445 * @param vec Vectorized operation descriptor 1446 * 1447 * @return 1448 * - Returns number of successfully processed packets. 1449 */ 1450 __rte_experimental 1451 uint32_t 1452 rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id, 1453 struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs ofs, 1454 struct rte_crypto_sym_vec *vec); 1455 1456 /** 1457 * Get the size of the raw data-path context buffer. 1458 * 1459 * @param dev_id The device identifier. 1460 * 1461 * @return 1462 * - If the device supports raw data-path APIs, return the context size. 1463 * - If the device does not support the APIs, return -1. 1464 */ 1465 __rte_experimental 1466 int 1467 rte_cryptodev_get_raw_dp_ctx_size(uint8_t dev_id); 1468 1469 /** 1470 * Union of different crypto session types, including session-less xform 1471 * pointer. 1472 */ 1473 union rte_cryptodev_session_ctx { 1474 struct rte_cryptodev_sym_session *crypto_sess; 1475 struct rte_crypto_sym_xform *xform; 1476 struct rte_security_session *sec_sess; 1477 }; 1478 1479 /** 1480 * Enqueue a vectorized operation descriptor into the device queue but the 1481 * driver may or may not start processing until rte_cryptodev_raw_enqueue_done() 1482 * is called. 1483 * 1484 * @param qp Driver specific queue pair data. 1485 * @param drv_ctx Driver specific context data. 1486 * @param vec Vectorized operation descriptor. 1487 * @param ofs Start and stop offsets for auth and cipher 1488 * operations. 1489 * @param user_data The array of user data for dequeue later. 1490 * @param enqueue_status Driver written value to specify the 1491 * enqueue status. Possible values: 1492 * - 1: The number of operations returned are 1493 * enqueued successfully. 1494 * - 0: The number of operations returned are 1495 * cached into the queue but are not processed 1496 * until rte_cryptodev_raw_enqueue_done() is 1497 * called. 1498 * - negative integer: Error occurred. 1499 * @return 1500 * - The number of operations in the descriptor successfully enqueued or 1501 * cached into the queue but not enqueued yet, depends on the 1502 * "enqueue_status" value. 1503 */ 1504 typedef uint32_t (*cryptodev_sym_raw_enqueue_burst_t)( 1505 void *qp, uint8_t *drv_ctx, struct rte_crypto_sym_vec *vec, 1506 union rte_crypto_sym_ofs ofs, void *user_data[], int *enqueue_status); 1507 1508 /** 1509 * Enqueue single raw data vector into the device queue but the driver may or 1510 * may not start processing until rte_cryptodev_raw_enqueue_done() is called. 1511 * 1512 * @param qp Driver specific queue pair data. 1513 * @param drv_ctx Driver specific context data. 1514 * @param data_vec The buffer data vector. 1515 * @param n_data_vecs Number of buffer data vectors. 1516 * @param ofs Start and stop offsets for auth and cipher 1517 * operations. 1518 * @param iv IV virtual and IOVA addresses 1519 * @param digest digest virtual and IOVA addresses 1520 * @param aad_or_auth_iv AAD or auth IV virtual and IOVA addresses, 1521 * depends on the algorithm used. 1522 * @param user_data The user data. 1523 * @return 1524 * - 1: The data vector is enqueued successfully. 1525 * - 0: The data vector is cached into the queue but is not processed 1526 * until rte_cryptodev_raw_enqueue_done() is called. 1527 * - negative integer: failure. 1528 */ 1529 typedef int (*cryptodev_sym_raw_enqueue_t)( 1530 void *qp, uint8_t *drv_ctx, struct rte_crypto_vec *data_vec, 1531 uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs, 1532 struct rte_crypto_va_iova_ptr *iv, 1533 struct rte_crypto_va_iova_ptr *digest, 1534 struct rte_crypto_va_iova_ptr *aad_or_auth_iv, 1535 void *user_data); 1536 1537 /** 1538 * Inform the cryptodev queue pair to start processing or finish dequeuing all 1539 * enqueued/dequeued operations. 1540 * 1541 * @param qp Driver specific queue pair data. 1542 * @param drv_ctx Driver specific context data. 1543 * @param n The total number of processed operations. 1544 * @return 1545 * - On success return 0. 1546 * - On failure return negative integer. 1547 */ 1548 typedef int (*cryptodev_sym_raw_operation_done_t)(void *qp, uint8_t *drv_ctx, 1549 uint32_t n); 1550 1551 /** 1552 * Typedef that the user provided for the driver to get the dequeue count. 1553 * The function may return a fixed number or the number parsed from the user 1554 * data stored in the first processed operation. 1555 * 1556 * @param user_data Dequeued user data. 1557 * @return 1558 * - The number of operations to be dequeued. 1559 **/ 1560 typedef uint32_t (*rte_cryptodev_raw_get_dequeue_count_t)(void *user_data); 1561 1562 /** 1563 * Typedef that the user provided to deal with post dequeue operation, such 1564 * as filling status. 1565 * 1566 * @param user_data Dequeued user data. 1567 * @param index Index number of the processed descriptor. 1568 * @param is_op_success Operation status provided by the driver. 1569 **/ 1570 typedef void (*rte_cryptodev_raw_post_dequeue_t)(void *user_data, 1571 uint32_t index, uint8_t is_op_success); 1572 1573 /** 1574 * Dequeue a burst of symmetric crypto processing. 1575 * 1576 * @param qp Driver specific queue pair data. 1577 * @param drv_ctx Driver specific context data. 1578 * @param get_dequeue_count User provided callback function to 1579 * obtain dequeue operation count. 1580 * @param max_nb_to_dequeue When get_dequeue_count is NULL this 1581 * value is used to pass the maximum 1582 * number of operations to be dequeued. 1583 * @param post_dequeue User provided callback function to 1584 * post-process a dequeued operation. 1585 * @param out_user_data User data pointer array to be retrieve 1586 * from device queue. In case of 1587 * *is_user_data_array* is set there 1588 * should be enough room to store all 1589 * user data. 1590 * @param is_user_data_array Set 1 if every dequeued user data will 1591 * be written into out_user_data array. 1592 * Set 0 if only the first user data will 1593 * be written into out_user_data array. 1594 * @param n_success Driver written value to specific the 1595 * total successful operations count. 1596 * @param dequeue_status Driver written value to specify the 1597 * dequeue status. Possible values: 1598 * - 1: Successfully dequeued the number 1599 * of operations returned. The user 1600 * data previously set during enqueue 1601 * is stored in the "out_user_data". 1602 * - 0: The number of operations returned 1603 * are completed and the user data is 1604 * stored in the "out_user_data", but 1605 * they are not freed from the queue 1606 * until 1607 * rte_cryptodev_raw_dequeue_done() 1608 * is called. 1609 * - negative integer: Error occurred. 1610 * @return 1611 * - The number of operations dequeued or completed but not freed from the 1612 * queue, depends on "dequeue_status" value. 1613 */ 1614 typedef uint32_t (*cryptodev_sym_raw_dequeue_burst_t)(void *qp, 1615 uint8_t *drv_ctx, 1616 rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count, 1617 uint32_t max_nb_to_dequeue, 1618 rte_cryptodev_raw_post_dequeue_t post_dequeue, 1619 void **out_user_data, uint8_t is_user_data_array, 1620 uint32_t *n_success, int *dequeue_status); 1621 1622 /** 1623 * Dequeue a symmetric crypto processing. 1624 * 1625 * @param qp Driver specific queue pair data. 1626 * @param drv_ctx Driver specific context data. 1627 * @param dequeue_status Driver written value to specify the 1628 * dequeue status. Possible values: 1629 * - 1: Successfully dequeued a operation. 1630 * The user data is returned. 1631 * - 0: The first operation in the queue 1632 * is completed and the user data 1633 * previously set during enqueue is 1634 * returned, but it is not freed from 1635 * the queue until 1636 * rte_cryptodev_raw_dequeue_done() is 1637 * called. 1638 * - negative integer: Error occurred. 1639 * @param op_status Driver written value to specify 1640 * operation status. 1641 * @return 1642 * - The user data pointer retrieved from device queue or NULL if no 1643 * operation is ready for dequeue. 1644 */ 1645 typedef void * (*cryptodev_sym_raw_dequeue_t)( 1646 void *qp, uint8_t *drv_ctx, int *dequeue_status, 1647 enum rte_crypto_op_status *op_status); 1648 1649 /** 1650 * Context data for raw data-path API crypto process. The buffer of this 1651 * structure is to be allocated by the user application with the size equal 1652 * or bigger than rte_cryptodev_get_raw_dp_ctx_size() returned value. 1653 */ 1654 struct rte_crypto_raw_dp_ctx { 1655 void *qp_data; 1656 1657 cryptodev_sym_raw_enqueue_t enqueue; 1658 cryptodev_sym_raw_enqueue_burst_t enqueue_burst; 1659 cryptodev_sym_raw_operation_done_t enqueue_done; 1660 cryptodev_sym_raw_dequeue_t dequeue; 1661 cryptodev_sym_raw_dequeue_burst_t dequeue_burst; 1662 cryptodev_sym_raw_operation_done_t dequeue_done; 1663 1664 /* Driver specific context data */ 1665 __extension__ uint8_t drv_ctx_data[]; 1666 }; 1667 1668 /** 1669 * Configure raw data-path context data. 1670 * 1671 * NOTE: 1672 * After the context data is configured, the user should call 1673 * rte_cryptodev_raw_attach_session() before using it in 1674 * rte_cryptodev_raw_enqueue/dequeue function call. 1675 * 1676 * @param dev_id The device identifier. 1677 * @param qp_id The index of the queue pair from which to 1678 * retrieve processed packets. The value must be 1679 * in the range [0, nb_queue_pair - 1] previously 1680 * supplied to rte_cryptodev_configure(). 1681 * @param ctx The raw data-path context data. 1682 * @param sess_type session type. 1683 * @param session_ctx Session context data. 1684 * @param is_update Set 0 if it is to initialize the ctx. 1685 * Set 1 if ctx is initialized and only to update 1686 * session context data. 1687 * @return 1688 * - On success return 0. 1689 * - On failure return negative integer. 1690 */ 1691 __rte_experimental 1692 int 1693 rte_cryptodev_configure_raw_dp_ctx(uint8_t dev_id, uint16_t qp_id, 1694 struct rte_crypto_raw_dp_ctx *ctx, 1695 enum rte_crypto_op_sess_type sess_type, 1696 union rte_cryptodev_session_ctx session_ctx, 1697 uint8_t is_update); 1698 1699 /** 1700 * Enqueue a vectorized operation descriptor into the device queue but the 1701 * driver may or may not start processing until rte_cryptodev_raw_enqueue_done() 1702 * is called. 1703 * 1704 * @param ctx The initialized raw data-path context data. 1705 * @param vec Vectorized operation descriptor. 1706 * @param ofs Start and stop offsets for auth and cipher 1707 * operations. 1708 * @param user_data The array of user data for dequeue later. 1709 * @param enqueue_status Driver written value to specify the 1710 * enqueue status. Possible values: 1711 * - 1: The number of operations returned are 1712 * enqueued successfully. 1713 * - 0: The number of operations returned are 1714 * cached into the queue but are not processed 1715 * until rte_cryptodev_raw_enqueue_done() is 1716 * called. 1717 * - negative integer: Error occurred. 1718 * @return 1719 * - The number of operations in the descriptor successfully enqueued or 1720 * cached into the queue but not enqueued yet, depends on the 1721 * "enqueue_status" value. 1722 */ 1723 __rte_experimental 1724 uint32_t 1725 rte_cryptodev_raw_enqueue_burst(struct rte_crypto_raw_dp_ctx *ctx, 1726 struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs, 1727 void **user_data, int *enqueue_status); 1728 1729 /** 1730 * Enqueue single raw data vector into the device queue but the driver may or 1731 * may not start processing until rte_cryptodev_raw_enqueue_done() is called. 1732 * 1733 * @param ctx The initialized raw data-path context data. 1734 * @param data_vec The buffer data vector. 1735 * @param n_data_vecs Number of buffer data vectors. 1736 * @param ofs Start and stop offsets for auth and cipher 1737 * operations. 1738 * @param iv IV virtual and IOVA addresses 1739 * @param digest digest virtual and IOVA addresses 1740 * @param aad_or_auth_iv AAD or auth IV virtual and IOVA addresses, 1741 * depends on the algorithm used. 1742 * @param user_data The user data. 1743 * @return 1744 * - 1: The data vector is enqueued successfully. 1745 * - 0: The data vector is cached into the queue but is not processed 1746 * until rte_cryptodev_raw_enqueue_done() is called. 1747 * - negative integer: failure. 1748 */ 1749 __rte_experimental 1750 static __rte_always_inline int 1751 rte_cryptodev_raw_enqueue(struct rte_crypto_raw_dp_ctx *ctx, 1752 struct rte_crypto_vec *data_vec, uint16_t n_data_vecs, 1753 union rte_crypto_sym_ofs ofs, 1754 struct rte_crypto_va_iova_ptr *iv, 1755 struct rte_crypto_va_iova_ptr *digest, 1756 struct rte_crypto_va_iova_ptr *aad_or_auth_iv, 1757 void *user_data) 1758 { 1759 return (*ctx->enqueue)(ctx->qp_data, ctx->drv_ctx_data, data_vec, 1760 n_data_vecs, ofs, iv, digest, aad_or_auth_iv, user_data); 1761 } 1762 1763 /** 1764 * Start processing all enqueued operations from last 1765 * rte_cryptodev_configure_raw_dp_ctx() call. 1766 * 1767 * @param ctx The initialized raw data-path context data. 1768 * @param n The number of operations cached. 1769 * @return 1770 * - On success return 0. 1771 * - On failure return negative integer. 1772 */ 1773 __rte_experimental 1774 int 1775 rte_cryptodev_raw_enqueue_done(struct rte_crypto_raw_dp_ctx *ctx, 1776 uint32_t n); 1777 1778 /** 1779 * Dequeue a burst of symmetric crypto processing. 1780 * 1781 * @param ctx The initialized raw data-path context 1782 * data. 1783 * @param get_dequeue_count User provided callback function to 1784 * obtain dequeue operation count. 1785 * @param max_nb_to_dequeue When get_dequeue_count is NULL this 1786 * value is used to pass the maximum 1787 * number of operations to be dequeued. 1788 * @param post_dequeue User provided callback function to 1789 * post-process a dequeued operation. 1790 * @param out_user_data User data pointer array to be retrieve 1791 * from device queue. In case of 1792 * *is_user_data_array* is set there 1793 * should be enough room to store all 1794 * user data. 1795 * @param is_user_data_array Set 1 if every dequeued user data will 1796 * be written into out_user_data array. 1797 * Set 0 if only the first user data will 1798 * be written into out_user_data array. 1799 * @param n_success Driver written value to specific the 1800 * total successful operations count. 1801 * @param dequeue_status Driver written value to specify the 1802 * dequeue status. Possible values: 1803 * - 1: Successfully dequeued the number 1804 * of operations returned. The user 1805 * data previously set during enqueue 1806 * is stored in the "out_user_data". 1807 * - 0: The number of operations returned 1808 * are completed and the user data is 1809 * stored in the "out_user_data", but 1810 * they are not freed from the queue 1811 * until 1812 * rte_cryptodev_raw_dequeue_done() 1813 * is called. 1814 * - negative integer: Error occurred. 1815 * @return 1816 * - The number of operations dequeued or completed but not freed from the 1817 * queue, depends on "dequeue_status" value. 1818 */ 1819 __rte_experimental 1820 uint32_t 1821 rte_cryptodev_raw_dequeue_burst(struct rte_crypto_raw_dp_ctx *ctx, 1822 rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count, 1823 uint32_t max_nb_to_dequeue, 1824 rte_cryptodev_raw_post_dequeue_t post_dequeue, 1825 void **out_user_data, uint8_t is_user_data_array, 1826 uint32_t *n_success, int *dequeue_status); 1827 1828 /** 1829 * Dequeue a symmetric crypto processing. 1830 * 1831 * @param ctx The initialized raw data-path context 1832 * data. 1833 * @param dequeue_status Driver written value to specify the 1834 * dequeue status. Possible values: 1835 * - 1: Successfully dequeued a operation. 1836 * The user data is returned. 1837 * - 0: The first operation in the queue 1838 * is completed and the user data 1839 * previously set during enqueue is 1840 * returned, but it is not freed from 1841 * the queue until 1842 * rte_cryptodev_raw_dequeue_done() is 1843 * called. 1844 * - negative integer: Error occurred. 1845 * @param op_status Driver written value to specify 1846 * operation status. 1847 * @return 1848 * - The user data pointer retrieved from device queue or NULL if no 1849 * operation is ready for dequeue. 1850 */ 1851 __rte_experimental 1852 static __rte_always_inline void * 1853 rte_cryptodev_raw_dequeue(struct rte_crypto_raw_dp_ctx *ctx, 1854 int *dequeue_status, enum rte_crypto_op_status *op_status) 1855 { 1856 return (*ctx->dequeue)(ctx->qp_data, ctx->drv_ctx_data, dequeue_status, 1857 op_status); 1858 } 1859 1860 /** 1861 * Inform the queue pair dequeue operations is finished. 1862 * 1863 * @param ctx The initialized raw data-path context data. 1864 * @param n The number of operations. 1865 * @return 1866 * - On success return 0. 1867 * - On failure return negative integer. 1868 */ 1869 __rte_experimental 1870 int 1871 rte_cryptodev_raw_dequeue_done(struct rte_crypto_raw_dp_ctx *ctx, 1872 uint32_t n); 1873 1874 /** 1875 * Add a user callback for a given crypto device and queue pair which will be 1876 * called on crypto ops enqueue. 1877 * 1878 * This API configures a function to be called for each burst of crypto ops 1879 * received on a given crypto device queue pair. The return value is a pointer 1880 * that can be used later to remove the callback using 1881 * rte_cryptodev_remove_enq_callback(). 1882 * 1883 * Callbacks registered by application would not survive 1884 * rte_cryptodev_configure() as it reinitializes the callback list. 1885 * It is user responsibility to remove all installed callbacks before 1886 * calling rte_cryptodev_configure() to avoid possible memory leakage. 1887 * Application is expected to call add API after rte_cryptodev_configure(). 1888 * 1889 * Multiple functions can be registered per queue pair & they are called 1890 * in the order they were added. The API does not restrict on maximum number 1891 * of callbacks. 1892 * 1893 * @param dev_id The identifier of the device. 1894 * @param qp_id The index of the queue pair on which ops are 1895 * to be enqueued for processing. The value 1896 * must be in the range [0, nb_queue_pairs - 1] 1897 * previously supplied to 1898 * *rte_cryptodev_configure*. 1899 * @param cb_fn The callback function 1900 * @param cb_arg A generic pointer parameter which will be passed 1901 * to each invocation of the callback function on 1902 * this crypto device and queue pair. 1903 * 1904 * @return 1905 * - NULL on error & rte_errno will contain the error code. 1906 * - On success, a pointer value which can later be used to remove the 1907 * callback. 1908 */ 1909 1910 __rte_experimental 1911 struct rte_cryptodev_cb * 1912 rte_cryptodev_add_enq_callback(uint8_t dev_id, 1913 uint16_t qp_id, 1914 rte_cryptodev_callback_fn cb_fn, 1915 void *cb_arg); 1916 1917 /** 1918 * Remove a user callback function for given crypto device and queue pair. 1919 * 1920 * This function is used to remove enqueue callbacks that were added to a 1921 * crypto device queue pair using rte_cryptodev_add_enq_callback(). 1922 * 1923 * 1924 * 1925 * @param dev_id The identifier of the device. 1926 * @param qp_id The index of the queue pair on which ops are 1927 * to be enqueued. The value must be in the 1928 * range [0, nb_queue_pairs - 1] previously 1929 * supplied to *rte_cryptodev_configure*. 1930 * @param cb Pointer to user supplied callback created via 1931 * rte_cryptodev_add_enq_callback(). 1932 * 1933 * @return 1934 * - 0: Success. Callback was removed. 1935 * - <0: The dev_id or the qp_id is out of range, or the callback 1936 * is NULL or not found for the crypto device queue pair. 1937 */ 1938 1939 __rte_experimental 1940 int rte_cryptodev_remove_enq_callback(uint8_t dev_id, 1941 uint16_t qp_id, 1942 struct rte_cryptodev_cb *cb); 1943 1944 /** 1945 * Add a user callback for a given crypto device and queue pair which will be 1946 * called on crypto ops dequeue. 1947 * 1948 * This API configures a function to be called for each burst of crypto ops 1949 * received on a given crypto device queue pair. The return value is a pointer 1950 * that can be used later to remove the callback using 1951 * rte_cryptodev_remove_deq_callback(). 1952 * 1953 * Callbacks registered by application would not survive 1954 * rte_cryptodev_configure() as it reinitializes the callback list. 1955 * It is user responsibility to remove all installed callbacks before 1956 * calling rte_cryptodev_configure() to avoid possible memory leakage. 1957 * Application is expected to call add API after rte_cryptodev_configure(). 1958 * 1959 * Multiple functions can be registered per queue pair & they are called 1960 * in the order they were added. The API does not restrict on maximum number 1961 * of callbacks. 1962 * 1963 * @param dev_id The identifier of the device. 1964 * @param qp_id The index of the queue pair on which ops are 1965 * to be dequeued. The value must be in the 1966 * range [0, nb_queue_pairs - 1] previously 1967 * supplied to *rte_cryptodev_configure*. 1968 * @param cb_fn The callback function 1969 * @param cb_arg A generic pointer parameter which will be passed 1970 * to each invocation of the callback function on 1971 * this crypto device and queue pair. 1972 * 1973 * @return 1974 * - NULL on error & rte_errno will contain the error code. 1975 * - On success, a pointer value which can later be used to remove the 1976 * callback. 1977 */ 1978 1979 __rte_experimental 1980 struct rte_cryptodev_cb * 1981 rte_cryptodev_add_deq_callback(uint8_t dev_id, 1982 uint16_t qp_id, 1983 rte_cryptodev_callback_fn cb_fn, 1984 void *cb_arg); 1985 1986 /** 1987 * Remove a user callback function for given crypto device and queue pair. 1988 * 1989 * This function is used to remove dequeue callbacks that were added to a 1990 * crypto device queue pair using rte_cryptodev_add_deq_callback(). 1991 * 1992 * 1993 * 1994 * @param dev_id The identifier of the device. 1995 * @param qp_id The index of the queue pair on which ops are 1996 * to be dequeued. The value must be in the 1997 * range [0, nb_queue_pairs - 1] previously 1998 * supplied to *rte_cryptodev_configure*. 1999 * @param cb Pointer to user supplied callback created via 2000 * rte_cryptodev_add_deq_callback(). 2001 * 2002 * @return 2003 * - 0: Success. Callback was removed. 2004 * - <0: The dev_id or the qp_id is out of range, or the callback 2005 * is NULL or not found for the crypto device queue pair. 2006 */ 2007 __rte_experimental 2008 int rte_cryptodev_remove_deq_callback(uint8_t dev_id, 2009 uint16_t qp_id, 2010 struct rte_cryptodev_cb *cb); 2011 2012 #ifdef __cplusplus 2013 } 2014 #endif 2015 2016 #endif /* _RTE_CRYPTODEV_H_ */ 2017