xref: /dpdk/lib/cryptodev/rte_cryptodev.h (revision b225783dda7a254fc49bc4d43b8b58f67e03be1d)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020 Intel Corporation.
3  */
4 
5 #ifndef _RTE_CRYPTODEV_H_
6 #define _RTE_CRYPTODEV_H_
7 
8 /**
9  * @file rte_cryptodev.h
10  *
11  * RTE Cryptographic Device APIs
12  *
13  * Defines RTE Crypto Device APIs for the provisioning of cipher and
14  * authentication operations.
15  */
16 
17 #ifdef __cplusplus
18 extern "C" {
19 #endif
20 
21 #include "rte_kvargs.h"
22 #include "rte_crypto.h"
23 #include "rte_dev.h"
24 #include <rte_common.h>
25 #include <rte_config.h>
26 #include <rte_rcu_qsbr.h>
27 
28 #include "rte_cryptodev_trace_fp.h"
29 
30 extern const char **rte_cyptodev_names;
31 
32 /* Logging Macros */
33 
34 #define CDEV_LOG_ERR(...) \
35 	RTE_LOG(ERR, CRYPTODEV, \
36 		RTE_FMT("%s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
37 			__func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,)))
38 
39 #define CDEV_LOG_INFO(...) \
40 	RTE_LOG(INFO, CRYPTODEV, \
41 		RTE_FMT(RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
42 			RTE_FMT_TAIL(__VA_ARGS__,)))
43 
44 #define CDEV_LOG_DEBUG(...) \
45 	RTE_LOG(DEBUG, CRYPTODEV, \
46 		RTE_FMT("%s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
47 			__func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,)))
48 
49 #define CDEV_PMD_TRACE(...) \
50 	RTE_LOG(DEBUG, CRYPTODEV, \
51 		RTE_FMT("[%s] %s: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
52 			dev, __func__, RTE_FMT_TAIL(__VA_ARGS__,)))
53 
54 /**
55  * A macro that points to an offset from the start
56  * of the crypto operation structure (rte_crypto_op)
57  *
58  * The returned pointer is cast to type t.
59  *
60  * @param c
61  *   The crypto operation.
62  * @param o
63  *   The offset from the start of the crypto operation.
64  * @param t
65  *   The type to cast the result into.
66  */
67 #define rte_crypto_op_ctod_offset(c, t, o)	\
68 	((t)((char *)(c) + (o)))
69 
70 /**
71  * A macro that returns the physical address that points
72  * to an offset from the start of the crypto operation
73  * (rte_crypto_op)
74  *
75  * @param c
76  *   The crypto operation.
77  * @param o
78  *   The offset from the start of the crypto operation
79  *   to calculate address from.
80  */
81 #define rte_crypto_op_ctophys_offset(c, o)	\
82 	(rte_iova_t)((c)->phys_addr + (o))
83 
84 /**
85  * Crypto parameters range description
86  */
87 struct rte_crypto_param_range {
88 	uint16_t min;	/**< minimum size */
89 	uint16_t max;	/**< maximum size */
90 	uint16_t increment;
91 	/**< if a range of sizes are supported,
92 	 * this parameter is used to indicate
93 	 * increments in byte size that are supported
94 	 * between the minimum and maximum
95 	 */
96 };
97 
98 /**
99  * Data-unit supported lengths of cipher algorithms.
100  * A bit can represent any set of data-unit sizes
101  * (single size, multiple size, range, etc).
102  */
103 #define RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_512_BYTES             RTE_BIT32(0)
104 #define RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_4096_BYTES            RTE_BIT32(1)
105 
106 /**
107  * Symmetric Crypto Capability
108  */
109 struct rte_cryptodev_symmetric_capability {
110 	enum rte_crypto_sym_xform_type xform_type;
111 	/**< Transform type : Authentication / Cipher / AEAD */
112 	RTE_STD_C11
113 	union {
114 		struct {
115 			enum rte_crypto_auth_algorithm algo;
116 			/**< authentication algorithm */
117 			uint16_t block_size;
118 			/**< algorithm block size */
119 			struct rte_crypto_param_range key_size;
120 			/**< auth key size range */
121 			struct rte_crypto_param_range digest_size;
122 			/**< digest size range */
123 			struct rte_crypto_param_range aad_size;
124 			/**< Additional authentication data size range */
125 			struct rte_crypto_param_range iv_size;
126 			/**< Initialisation vector data size range */
127 		} auth;
128 		/**< Symmetric Authentication transform capabilities */
129 		struct {
130 			enum rte_crypto_cipher_algorithm algo;
131 			/**< cipher algorithm */
132 			uint16_t block_size;
133 			/**< algorithm block size */
134 			struct rte_crypto_param_range key_size;
135 			/**< cipher key size range */
136 			struct rte_crypto_param_range iv_size;
137 			/**< Initialisation vector data size range */
138 			uint32_t dataunit_set;
139 			/**<
140 			 * Supported data-unit lengths:
141 			 * RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_* bits
142 			 * or 0 for lengths defined in the algorithm standard.
143 			 */
144 		} cipher;
145 		/**< Symmetric Cipher transform capabilities */
146 		struct {
147 			enum rte_crypto_aead_algorithm algo;
148 			/**< AEAD algorithm */
149 			uint16_t block_size;
150 			/**< algorithm block size */
151 			struct rte_crypto_param_range key_size;
152 			/**< AEAD key size range */
153 			struct rte_crypto_param_range digest_size;
154 			/**< digest size range */
155 			struct rte_crypto_param_range aad_size;
156 			/**< Additional authentication data size range */
157 			struct rte_crypto_param_range iv_size;
158 			/**< Initialisation vector data size range */
159 		} aead;
160 	};
161 };
162 
163 /**
164  * Asymmetric Xform Crypto Capability
165  *
166  */
167 struct rte_cryptodev_asymmetric_xform_capability {
168 	enum rte_crypto_asym_xform_type xform_type;
169 	/**< Transform type: RSA/MODEXP/DH/DSA/MODINV */
170 
171 	uint32_t op_types;
172 	/**< bitmask for supported rte_crypto_asym_op_type */
173 
174 	__extension__
175 	union {
176 		struct rte_crypto_param_range modlen;
177 		/**< Range of modulus length supported by modulus based xform.
178 		 * Value 0 mean implementation default
179 		 */
180 	};
181 };
182 
183 /**
184  * Asymmetric Crypto Capability
185  *
186  */
187 struct rte_cryptodev_asymmetric_capability {
188 	struct rte_cryptodev_asymmetric_xform_capability xform_capa;
189 };
190 
191 
192 /** Structure used to capture a capability of a crypto device */
193 struct rte_cryptodev_capabilities {
194 	enum rte_crypto_op_type op;
195 	/**< Operation type */
196 
197 	RTE_STD_C11
198 	union {
199 		struct rte_cryptodev_symmetric_capability sym;
200 		/**< Symmetric operation capability parameters */
201 		struct rte_cryptodev_asymmetric_capability asym;
202 		/**< Asymmetric operation capability parameters */
203 	};
204 };
205 
206 /** Structure used to describe crypto algorithms */
207 struct rte_cryptodev_sym_capability_idx {
208 	enum rte_crypto_sym_xform_type type;
209 	union {
210 		enum rte_crypto_cipher_algorithm cipher;
211 		enum rte_crypto_auth_algorithm auth;
212 		enum rte_crypto_aead_algorithm aead;
213 	} algo;
214 };
215 
216 /**
217  * Structure used to describe asymmetric crypto xforms
218  * Each xform maps to one asym algorithm.
219  *
220  */
221 struct rte_cryptodev_asym_capability_idx {
222 	enum rte_crypto_asym_xform_type type;
223 	/**< Asymmetric xform (algo) type */
224 };
225 
226 /**
227  * Provide capabilities available for defined device and algorithm
228  *
229  * @param	dev_id		The identifier of the device.
230  * @param	idx		Description of crypto algorithms.
231  *
232  * @return
233  *   - Return description of the symmetric crypto capability if exist.
234  *   - Return NULL if the capability not exist.
235  */
236 const struct rte_cryptodev_symmetric_capability *
237 rte_cryptodev_sym_capability_get(uint8_t dev_id,
238 		const struct rte_cryptodev_sym_capability_idx *idx);
239 
240 /**
241  *  Provide capabilities available for defined device and xform
242  *
243  * @param	dev_id		The identifier of the device.
244  * @param	idx		Description of asym crypto xform.
245  *
246  * @return
247  *   - Return description of the asymmetric crypto capability if exist.
248  *   - Return NULL if the capability not exist.
249  */
250 __rte_experimental
251 const struct rte_cryptodev_asymmetric_xform_capability *
252 rte_cryptodev_asym_capability_get(uint8_t dev_id,
253 		const struct rte_cryptodev_asym_capability_idx *idx);
254 
255 /**
256  * Check if key size and initial vector are supported
257  * in crypto cipher capability
258  *
259  * @param	capability	Description of the symmetric crypto capability.
260  * @param	key_size	Cipher key size.
261  * @param	iv_size		Cipher initial vector size.
262  *
263  * @return
264  *   - Return 0 if the parameters are in range of the capability.
265  *   - Return -1 if the parameters are out of range of the capability.
266  */
267 int
268 rte_cryptodev_sym_capability_check_cipher(
269 		const struct rte_cryptodev_symmetric_capability *capability,
270 		uint16_t key_size, uint16_t iv_size);
271 
272 /**
273  * Check if key size and initial vector are supported
274  * in crypto auth capability
275  *
276  * @param	capability	Description of the symmetric crypto capability.
277  * @param	key_size	Auth key size.
278  * @param	digest_size	Auth digest size.
279  * @param	iv_size		Auth initial vector size.
280  *
281  * @return
282  *   - Return 0 if the parameters are in range of the capability.
283  *   - Return -1 if the parameters are out of range of the capability.
284  */
285 int
286 rte_cryptodev_sym_capability_check_auth(
287 		const struct rte_cryptodev_symmetric_capability *capability,
288 		uint16_t key_size, uint16_t digest_size, uint16_t iv_size);
289 
290 /**
291  * Check if key, digest, AAD and initial vector sizes are supported
292  * in crypto AEAD capability
293  *
294  * @param	capability	Description of the symmetric crypto capability.
295  * @param	key_size	AEAD key size.
296  * @param	digest_size	AEAD digest size.
297  * @param	aad_size	AEAD AAD size.
298  * @param	iv_size		AEAD IV size.
299  *
300  * @return
301  *   - Return 0 if the parameters are in range of the capability.
302  *   - Return -1 if the parameters are out of range of the capability.
303  */
304 int
305 rte_cryptodev_sym_capability_check_aead(
306 		const struct rte_cryptodev_symmetric_capability *capability,
307 		uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
308 		uint16_t iv_size);
309 
310 /**
311  * Check if op type is supported
312  *
313  * @param	capability	Description of the asymmetric crypto capability.
314  * @param	op_type		op type
315  *
316  * @return
317  *   - Return 1 if the op type is supported
318  *   - Return 0 if unsupported
319  */
320 __rte_experimental
321 int
322 rte_cryptodev_asym_xform_capability_check_optype(
323 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
324 		enum rte_crypto_asym_op_type op_type);
325 
326 /**
327  * Check if modulus length is in supported range
328  *
329  * @param	capability	Description of the asymmetric crypto capability.
330  * @param	modlen		modulus length.
331  *
332  * @return
333  *   - Return 0 if the parameters are in range of the capability.
334  *   - Return -1 if the parameters are out of range of the capability.
335  */
336 __rte_experimental
337 int
338 rte_cryptodev_asym_xform_capability_check_modlen(
339 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
340 		uint16_t modlen);
341 
342 /**
343  * Provide the cipher algorithm enum, given an algorithm string
344  *
345  * @param	algo_enum	A pointer to the cipher algorithm
346  *				enum to be filled
347  * @param	algo_string	Authentication algo string
348  *
349  * @return
350  * - Return -1 if string is not valid
351  * - Return 0 is the string is valid
352  */
353 int
354 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum,
355 		const char *algo_string);
356 
357 /**
358  * Provide the authentication algorithm enum, given an algorithm string
359  *
360  * @param	algo_enum	A pointer to the authentication algorithm
361  *				enum to be filled
362  * @param	algo_string	Authentication algo string
363  *
364  * @return
365  * - Return -1 if string is not valid
366  * - Return 0 is the string is valid
367  */
368 int
369 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum,
370 		const char *algo_string);
371 
372 /**
373  * Provide the AEAD algorithm enum, given an algorithm string
374  *
375  * @param	algo_enum	A pointer to the AEAD algorithm
376  *				enum to be filled
377  * @param	algo_string	AEAD algorithm string
378  *
379  * @return
380  * - Return -1 if string is not valid
381  * - Return 0 is the string is valid
382  */
383 int
384 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum,
385 		const char *algo_string);
386 
387 /**
388  * Provide the Asymmetric xform enum, given an xform string
389  *
390  * @param	xform_enum	A pointer to the xform type
391  *				enum to be filled
392  * @param	xform_string	xform string
393  *
394  * @return
395  * - Return -1 if string is not valid
396  * - Return 0 if the string is valid
397  */
398 __rte_experimental
399 int
400 rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum,
401 		const char *xform_string);
402 
403 
404 /** Macro used at end of crypto PMD list */
405 #define RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() \
406 	{ RTE_CRYPTO_OP_TYPE_UNDEFINED }
407 
408 
409 /**
410  * Crypto device supported feature flags
411  *
412  * Note:
413  * New features flags should be added to the end of the list
414  *
415  * Keep these flags synchronised with rte_cryptodev_get_feature_name()
416  */
417 #define	RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO		(1ULL << 0)
418 /**< Symmetric crypto operations are supported */
419 #define	RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO		(1ULL << 1)
420 /**< Asymmetric crypto operations are supported */
421 #define	RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING		(1ULL << 2)
422 /**< Chaining symmetric crypto operations are supported */
423 #define	RTE_CRYPTODEV_FF_CPU_SSE			(1ULL << 3)
424 /**< Utilises CPU SIMD SSE instructions */
425 #define	RTE_CRYPTODEV_FF_CPU_AVX			(1ULL << 4)
426 /**< Utilises CPU SIMD AVX instructions */
427 #define	RTE_CRYPTODEV_FF_CPU_AVX2			(1ULL << 5)
428 /**< Utilises CPU SIMD AVX2 instructions */
429 #define	RTE_CRYPTODEV_FF_CPU_AESNI			(1ULL << 6)
430 /**< Utilises CPU AES-NI instructions */
431 #define	RTE_CRYPTODEV_FF_HW_ACCELERATED			(1ULL << 7)
432 /**< Operations are off-loaded to an
433  * external hardware accelerator
434  */
435 #define	RTE_CRYPTODEV_FF_CPU_AVX512			(1ULL << 8)
436 /**< Utilises CPU SIMD AVX512 instructions */
437 #define	RTE_CRYPTODEV_FF_IN_PLACE_SGL			(1ULL << 9)
438 /**< In-place Scatter-gather (SGL) buffers, with multiple segments,
439  * are supported
440  */
441 #define RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT		(1ULL << 10)
442 /**< Out-of-place Scatter-gather (SGL) buffers are
443  * supported in input and output
444  */
445 #define RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT		(1ULL << 11)
446 /**< Out-of-place Scatter-gather (SGL) buffers are supported
447  * in input, combined with linear buffers (LB), with a
448  * single segment in output
449  */
450 #define RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT		(1ULL << 12)
451 /**< Out-of-place Scatter-gather (SGL) buffers are supported
452  * in output, combined with linear buffers (LB) in input
453  */
454 #define RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT		(1ULL << 13)
455 /**< Out-of-place linear buffers (LB) are supported in input and output */
456 #define	RTE_CRYPTODEV_FF_CPU_NEON			(1ULL << 14)
457 /**< Utilises CPU NEON instructions */
458 #define	RTE_CRYPTODEV_FF_CPU_ARM_CE			(1ULL << 15)
459 /**< Utilises ARM CPU Cryptographic Extensions */
460 #define	RTE_CRYPTODEV_FF_SECURITY			(1ULL << 16)
461 /**< Support Security Protocol Processing */
462 #define RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP		(1ULL << 17)
463 /**< Support RSA Private Key OP with exponent */
464 #define RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT		(1ULL << 18)
465 /**< Support RSA Private Key OP with CRT (quintuple) Keys */
466 #define RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED		(1ULL << 19)
467 /**< Support encrypted-digest operations where digest is appended to data */
468 #define RTE_CRYPTODEV_FF_ASYM_SESSIONLESS		(1ULL << 20)
469 /**< Support asymmetric session-less operations */
470 #define	RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO			(1ULL << 21)
471 /**< Support symmetric cpu-crypto processing */
472 #define RTE_CRYPTODEV_FF_SYM_SESSIONLESS		(1ULL << 22)
473 /**< Support symmetric session-less operations */
474 #define RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA		(1ULL << 23)
475 /**< Support operations on data which is not byte aligned */
476 #define RTE_CRYPTODEV_FF_SYM_RAW_DP			(1ULL << 24)
477 /**< Support accelerator specific symmetric raw data-path APIs */
478 #define RTE_CRYPTODEV_FF_CIPHER_MULTIPLE_DATA_UNITS	(1ULL << 25)
479 /**< Support operations on multiple data-units message */
480 #define RTE_CRYPTODEV_FF_CIPHER_WRAPPED_KEY		(1ULL << 26)
481 /**< Support wrapped key in cipher xform  */
482 
483 /**
484  * Get the name of a crypto device feature flag
485  *
486  * @param	flag	The mask describing the flag.
487  *
488  * @return
489  *   The name of this flag, or NULL if it's not a valid feature flag.
490  */
491 
492 extern const char *
493 rte_cryptodev_get_feature_name(uint64_t flag);
494 
495 /**  Crypto device information */
496 struct rte_cryptodev_info {
497 	const char *driver_name;	/**< Driver name. */
498 	uint8_t driver_id;		/**< Driver identifier */
499 	struct rte_device *device;	/**< Generic device information. */
500 
501 	uint64_t feature_flags;
502 	/**< Feature flags exposes HW/SW features for the given device */
503 
504 	const struct rte_cryptodev_capabilities *capabilities;
505 	/**< Array of devices supported capabilities */
506 
507 	unsigned max_nb_queue_pairs;
508 	/**< Maximum number of queues pairs supported by device. */
509 
510 	uint16_t min_mbuf_headroom_req;
511 	/**< Minimum mbuf headroom required by device */
512 
513 	uint16_t min_mbuf_tailroom_req;
514 	/**< Minimum mbuf tailroom required by device */
515 
516 	struct {
517 		unsigned max_nb_sessions;
518 		/**< Maximum number of sessions supported by device.
519 		 * If 0, the device does not have any limitation in
520 		 * number of sessions that can be used.
521 		 */
522 	} sym;
523 };
524 
525 #define RTE_CRYPTODEV_DETACHED  (0)
526 #define RTE_CRYPTODEV_ATTACHED  (1)
527 
528 /** Definitions of Crypto device event types */
529 enum rte_cryptodev_event_type {
530 	RTE_CRYPTODEV_EVENT_UNKNOWN,	/**< unknown event type */
531 	RTE_CRYPTODEV_EVENT_ERROR,	/**< error interrupt event */
532 	RTE_CRYPTODEV_EVENT_MAX		/**< max value of this enum */
533 };
534 
535 /** Crypto device queue pair configuration structure. */
536 struct rte_cryptodev_qp_conf {
537 	uint32_t nb_descriptors; /**< Number of descriptors per queue pair */
538 	struct rte_mempool *mp_session;
539 	/**< The mempool for creating session in sessionless mode */
540 	struct rte_mempool *mp_session_private;
541 	/**< The mempool for creating sess private data in sessionless mode */
542 };
543 
544 /**
545  * Function type used for processing crypto ops when enqueue/dequeue burst is
546  * called.
547  *
548  * The callback function is called on enqueue/dequeue burst immediately.
549  *
550  * @param	dev_id		The identifier of the device.
551  * @param	qp_id		The index of the queue pair on which ops are
552  *				enqueued/dequeued. The value must be in the
553  *				range [0, nb_queue_pairs - 1] previously
554  *				supplied to *rte_cryptodev_configure*.
555  * @param	ops		The address of an array of *nb_ops* pointers
556  *				to *rte_crypto_op* structures which contain
557  *				the crypto operations to be processed.
558  * @param	nb_ops		The number of operations to process.
559  * @param	user_param	The arbitrary user parameter passed in by the
560  *				application when the callback was originally
561  *				registered.
562  * @return			The number of ops to be enqueued to the
563  *				crypto device.
564  */
565 typedef uint16_t (*rte_cryptodev_callback_fn)(uint16_t dev_id, uint16_t qp_id,
566 		struct rte_crypto_op **ops, uint16_t nb_ops, void *user_param);
567 
568 /**
569  * Typedef for application callback function to be registered by application
570  * software for notification of device events
571  *
572  * @param	dev_id	Crypto device identifier
573  * @param	event	Crypto device event to register for notification of.
574  * @param	cb_arg	User specified parameter to be passed as to passed to
575  *			users callback function.
576  */
577 typedef void (*rte_cryptodev_cb_fn)(uint8_t dev_id,
578 		enum rte_cryptodev_event_type event, void *cb_arg);
579 
580 
581 /** Crypto Device statistics */
582 struct rte_cryptodev_stats {
583 	uint64_t enqueued_count;
584 	/**< Count of all operations enqueued */
585 	uint64_t dequeued_count;
586 	/**< Count of all operations dequeued */
587 
588 	uint64_t enqueue_err_count;
589 	/**< Total error count on operations enqueued */
590 	uint64_t dequeue_err_count;
591 	/**< Total error count on operations dequeued */
592 };
593 
594 #define RTE_CRYPTODEV_NAME_MAX_LEN	(64)
595 /**< Max length of name of crypto PMD */
596 
597 /**
598  * Get the device identifier for the named crypto device.
599  *
600  * @param	name	device name to select the device structure.
601  *
602  * @return
603  *   - Returns crypto device identifier on success.
604  *   - Return -1 on failure to find named crypto device.
605  */
606 extern int
607 rte_cryptodev_get_dev_id(const char *name);
608 
609 /**
610  * Get the crypto device name given a device identifier.
611  *
612  * @param dev_id
613  *   The identifier of the device
614  *
615  * @return
616  *   - Returns crypto device name.
617  *   - Returns NULL if crypto device is not present.
618  */
619 extern const char *
620 rte_cryptodev_name_get(uint8_t dev_id);
621 
622 /**
623  * Get the total number of crypto devices that have been successfully
624  * initialised.
625  *
626  * @return
627  *   - The total number of usable crypto devices.
628  */
629 extern uint8_t
630 rte_cryptodev_count(void);
631 
632 /**
633  * Get number of crypto device defined type.
634  *
635  * @param	driver_id	driver identifier.
636  *
637  * @return
638  *   Returns number of crypto device.
639  */
640 extern uint8_t
641 rte_cryptodev_device_count_by_driver(uint8_t driver_id);
642 
643 /**
644  * Get number and identifiers of attached crypto devices that
645  * use the same crypto driver.
646  *
647  * @param	driver_name	driver name.
648  * @param	devices		output devices identifiers.
649  * @param	nb_devices	maximal number of devices.
650  *
651  * @return
652  *   Returns number of attached crypto device.
653  */
654 uint8_t
655 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
656 		uint8_t nb_devices);
657 /*
658  * Return the NUMA socket to which a device is connected
659  *
660  * @param dev_id
661  *   The identifier of the device
662  * @return
663  *   The NUMA socket id to which the device is connected or
664  *   a default of zero if the socket could not be determined.
665  *   -1 if returned is the dev_id value is out of range.
666  */
667 extern int
668 rte_cryptodev_socket_id(uint8_t dev_id);
669 
670 /** Crypto device configuration structure */
671 struct rte_cryptodev_config {
672 	int socket_id;			/**< Socket to allocate resources on */
673 	uint16_t nb_queue_pairs;
674 	/**< Number of queue pairs to configure on device */
675 	uint64_t ff_disable;
676 	/**< Feature flags to be disabled. Only the following features are
677 	 * allowed to be disabled,
678 	 *  - RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO
679 	 *  - RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO
680 	 *  - RTE_CRYTPODEV_FF_SECURITY
681 	 */
682 };
683 
684 /**
685  * Configure a device.
686  *
687  * This function must be invoked first before any other function in the
688  * API. This function can also be re-invoked when a device is in the
689  * stopped state.
690  *
691  * @param	dev_id		The identifier of the device to configure.
692  * @param	config		The crypto device configuration structure.
693  *
694  * @return
695  *   - 0: Success, device configured.
696  *   - <0: Error code returned by the driver configuration function.
697  */
698 extern int
699 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config);
700 
701 /**
702  * Start an device.
703  *
704  * The device start step is the last one and consists of setting the configured
705  * offload features and in starting the transmit and the receive units of the
706  * device.
707  * On success, all basic functions exported by the API (link status,
708  * receive/transmit, and so on) can be invoked.
709  *
710  * @param dev_id
711  *   The identifier of the device.
712  * @return
713  *   - 0: Success, device started.
714  *   - <0: Error code of the driver device start function.
715  */
716 extern int
717 rte_cryptodev_start(uint8_t dev_id);
718 
719 /**
720  * Stop an device. The device can be restarted with a call to
721  * rte_cryptodev_start()
722  *
723  * @param	dev_id		The identifier of the device.
724  */
725 extern void
726 rte_cryptodev_stop(uint8_t dev_id);
727 
728 /**
729  * Close an device. The device cannot be restarted!
730  *
731  * @param	dev_id		The identifier of the device.
732  *
733  * @return
734  *  - 0 on successfully closing device
735  *  - <0 on failure to close device
736  */
737 extern int
738 rte_cryptodev_close(uint8_t dev_id);
739 
740 /**
741  * Allocate and set up a receive queue pair for a device.
742  *
743  *
744  * @param	dev_id		The identifier of the device.
745  * @param	queue_pair_id	The index of the queue pairs to set up. The
746  *				value must be in the range [0, nb_queue_pair
747  *				- 1] previously supplied to
748  *				rte_cryptodev_configure().
749  * @param	qp_conf		The pointer to the configuration data to be
750  *				used for the queue pair.
751  * @param	socket_id	The *socket_id* argument is the socket
752  *				identifier in case of NUMA. The value can be
753  *				*SOCKET_ID_ANY* if there is no NUMA constraint
754  *				for the DMA memory allocated for the receive
755  *				queue pair.
756  *
757  * @return
758  *   - 0: Success, queue pair correctly set up.
759  *   - <0: Queue pair configuration failed
760  */
761 extern int
762 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
763 		const struct rte_cryptodev_qp_conf *qp_conf, int socket_id);
764 
765 /**
766  * Get the status of queue pairs setup on a specific crypto device
767  *
768  * @param	dev_id		Crypto device identifier.
769  * @param	queue_pair_id	The index of the queue pairs to set up. The
770  *				value must be in the range [0, nb_queue_pair
771  *				- 1] previously supplied to
772  *				rte_cryptodev_configure().
773  * @return
774  *   - 0: qp was not configured
775  *	 - 1: qp was configured
776  *	 - -EINVAL: device was not configured
777  */
778 __rte_experimental
779 int
780 rte_cryptodev_get_qp_status(uint8_t dev_id, uint16_t queue_pair_id);
781 
782 /**
783  * Get the number of queue pairs on a specific crypto device
784  *
785  * @param	dev_id		Crypto device identifier.
786  * @return
787  *   - The number of configured queue pairs.
788  */
789 extern uint16_t
790 rte_cryptodev_queue_pair_count(uint8_t dev_id);
791 
792 
793 /**
794  * Retrieve the general I/O statistics of a device.
795  *
796  * @param	dev_id		The identifier of the device.
797  * @param	stats		A pointer to a structure of type
798  *				*rte_cryptodev_stats* to be filled with the
799  *				values of device counters.
800  * @return
801  *   - Zero if successful.
802  *   - Non-zero otherwise.
803  */
804 extern int
805 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats);
806 
807 /**
808  * Reset the general I/O statistics of a device.
809  *
810  * @param	dev_id		The identifier of the device.
811  */
812 extern void
813 rte_cryptodev_stats_reset(uint8_t dev_id);
814 
815 /**
816  * Retrieve the contextual information of a device.
817  *
818  * @param	dev_id		The identifier of the device.
819  * @param	dev_info	A pointer to a structure of type
820  *				*rte_cryptodev_info* to be filled with the
821  *				contextual information of the device.
822  *
823  * @note The capabilities field of dev_info is set to point to the first
824  * element of an array of struct rte_cryptodev_capabilities. The element after
825  * the last valid element has it's op field set to
826  * RTE_CRYPTO_OP_TYPE_UNDEFINED.
827  */
828 extern void
829 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info);
830 
831 
832 /**
833  * Register a callback function for specific device id.
834  *
835  * @param	dev_id		Device id.
836  * @param	event		Event interested.
837  * @param	cb_fn		User supplied callback function to be called.
838  * @param	cb_arg		Pointer to the parameters for the registered
839  *				callback.
840  *
841  * @return
842  *  - On success, zero.
843  *  - On failure, a negative value.
844  */
845 extern int
846 rte_cryptodev_callback_register(uint8_t dev_id,
847 		enum rte_cryptodev_event_type event,
848 		rte_cryptodev_cb_fn cb_fn, void *cb_arg);
849 
850 /**
851  * Unregister a callback function for specific device id.
852  *
853  * @param	dev_id		The device identifier.
854  * @param	event		Event interested.
855  * @param	cb_fn		User supplied callback function to be called.
856  * @param	cb_arg		Pointer to the parameters for the registered
857  *				callback.
858  *
859  * @return
860  *  - On success, zero.
861  *  - On failure, a negative value.
862  */
863 extern int
864 rte_cryptodev_callback_unregister(uint8_t dev_id,
865 		enum rte_cryptodev_event_type event,
866 		rte_cryptodev_cb_fn cb_fn, void *cb_arg);
867 
868 typedef uint16_t (*dequeue_pkt_burst_t)(void *qp,
869 		struct rte_crypto_op **ops,	uint16_t nb_ops);
870 /**< Dequeue processed packets from queue pair of a device. */
871 
872 typedef uint16_t (*enqueue_pkt_burst_t)(void *qp,
873 		struct rte_crypto_op **ops,	uint16_t nb_ops);
874 /**< Enqueue packets for processing on queue pair of a device. */
875 
876 
877 
878 
879 struct rte_cryptodev_callback;
880 
881 /** Structure to keep track of registered callbacks */
882 RTE_TAILQ_HEAD(rte_cryptodev_cb_list, rte_cryptodev_callback);
883 
884 /**
885  * Structure used to hold information about the callbacks to be called for a
886  * queue pair on enqueue/dequeue.
887  */
888 struct rte_cryptodev_cb {
889 	struct rte_cryptodev_cb *next;
890 	/**< Pointer to next callback */
891 	rte_cryptodev_callback_fn fn;
892 	/**< Pointer to callback function */
893 	void *arg;
894 	/**< Pointer to argument */
895 };
896 
897 /**
898  * @internal
899  * Structure used to hold information about the RCU for a queue pair.
900  */
901 struct rte_cryptodev_cb_rcu {
902 	struct rte_cryptodev_cb *next;
903 	/**< Pointer to next callback */
904 	struct rte_rcu_qsbr *qsbr;
905 	/**< RCU QSBR variable per queue pair */
906 };
907 
908 /** The data structure associated with each crypto device. */
909 struct rte_cryptodev {
910 	dequeue_pkt_burst_t dequeue_burst;
911 	/**< Pointer to PMD receive function. */
912 	enqueue_pkt_burst_t enqueue_burst;
913 	/**< Pointer to PMD transmit function. */
914 
915 	struct rte_cryptodev_data *data;
916 	/**< Pointer to device data */
917 	struct rte_cryptodev_ops *dev_ops;
918 	/**< Functions exported by PMD */
919 	uint64_t feature_flags;
920 	/**< Feature flags exposes HW/SW features for the given device */
921 	struct rte_device *device;
922 	/**< Backing device */
923 
924 	uint8_t driver_id;
925 	/**< Crypto driver identifier*/
926 
927 	struct rte_cryptodev_cb_list link_intr_cbs;
928 	/**< User application callback for interrupts if present */
929 
930 	void *security_ctx;
931 	/**< Context for security ops */
932 
933 	__extension__
934 	uint8_t attached : 1;
935 	/**< Flag indicating the device is attached */
936 
937 	struct rte_cryptodev_cb_rcu *enq_cbs;
938 	/**< User application callback for pre enqueue processing */
939 
940 	struct rte_cryptodev_cb_rcu *deq_cbs;
941 	/**< User application callback for post dequeue processing */
942 } __rte_cache_aligned;
943 
944 void *
945 rte_cryptodev_get_sec_ctx(uint8_t dev_id);
946 
947 /**
948  *
949  * The data part, with no function pointers, associated with each device.
950  *
951  * This structure is safe to place in shared memory to be common among
952  * different processes in a multi-process configuration.
953  */
954 struct rte_cryptodev_data {
955 	uint8_t dev_id;
956 	/**< Device ID for this instance */
957 	uint8_t socket_id;
958 	/**< Socket ID where memory is allocated */
959 	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
960 	/**< Unique identifier name */
961 
962 	__extension__
963 	uint8_t dev_started : 1;
964 	/**< Device state: STARTED(1)/STOPPED(0) */
965 
966 	struct rte_mempool *session_pool;
967 	/**< Session memory pool */
968 	void **queue_pairs;
969 	/**< Array of pointers to queue pairs. */
970 	uint16_t nb_queue_pairs;
971 	/**< Number of device queue pairs. */
972 
973 	void *dev_private;
974 	/**< PMD-specific private data */
975 } __rte_cache_aligned;
976 
977 extern struct rte_cryptodev *rte_cryptodevs;
978 /**
979  *
980  * Dequeue a burst of processed crypto operations from a queue on the crypto
981  * device. The dequeued operation are stored in *rte_crypto_op* structures
982  * whose pointers are supplied in the *ops* array.
983  *
984  * The rte_cryptodev_dequeue_burst() function returns the number of ops
985  * actually dequeued, which is the number of *rte_crypto_op* data structures
986  * effectively supplied into the *ops* array.
987  *
988  * A return value equal to *nb_ops* indicates that the queue contained
989  * at least *nb_ops* operations, and this is likely to signify that other
990  * processed operations remain in the devices output queue. Applications
991  * implementing a "retrieve as many processed operations as possible" policy
992  * can check this specific case and keep invoking the
993  * rte_cryptodev_dequeue_burst() function until a value less than
994  * *nb_ops* is returned.
995  *
996  * The rte_cryptodev_dequeue_burst() function does not provide any error
997  * notification to avoid the corresponding overhead.
998  *
999  * @param	dev_id		The symmetric crypto device identifier
1000  * @param	qp_id		The index of the queue pair from which to
1001  *				retrieve processed packets. The value must be
1002  *				in the range [0, nb_queue_pair - 1] previously
1003  *				supplied to rte_cryptodev_configure().
1004  * @param	ops		The address of an array of pointers to
1005  *				*rte_crypto_op* structures that must be
1006  *				large enough to store *nb_ops* pointers in it.
1007  * @param	nb_ops		The maximum number of operations to dequeue.
1008  *
1009  * @return
1010  *   - The number of operations actually dequeued, which is the number
1011  *   of pointers to *rte_crypto_op* structures effectively supplied to the
1012  *   *ops* array.
1013  */
1014 static inline uint16_t
1015 rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
1016 		struct rte_crypto_op **ops, uint16_t nb_ops)
1017 {
1018 	struct rte_cryptodev *dev = &rte_cryptodevs[dev_id];
1019 
1020 	rte_cryptodev_trace_dequeue_burst(dev_id, qp_id, (void **)ops, nb_ops);
1021 	nb_ops = (*dev->dequeue_burst)
1022 			(dev->data->queue_pairs[qp_id], ops, nb_ops);
1023 #ifdef RTE_CRYPTO_CALLBACKS
1024 	if (unlikely(dev->deq_cbs != NULL)) {
1025 		struct rte_cryptodev_cb_rcu *list;
1026 		struct rte_cryptodev_cb *cb;
1027 
1028 		/* __ATOMIC_RELEASE memory order was used when the
1029 		 * call back was inserted into the list.
1030 		 * Since there is a clear dependency between loading
1031 		 * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
1032 		 * not required.
1033 		 */
1034 		list = &dev->deq_cbs[qp_id];
1035 		rte_rcu_qsbr_thread_online(list->qsbr, 0);
1036 		cb = __atomic_load_n(&list->next, __ATOMIC_RELAXED);
1037 
1038 		while (cb != NULL) {
1039 			nb_ops = cb->fn(dev_id, qp_id, ops, nb_ops,
1040 					cb->arg);
1041 			cb = cb->next;
1042 		};
1043 
1044 		rte_rcu_qsbr_thread_offline(list->qsbr, 0);
1045 	}
1046 #endif
1047 	return nb_ops;
1048 }
1049 
1050 /**
1051  * Enqueue a burst of operations for processing on a crypto device.
1052  *
1053  * The rte_cryptodev_enqueue_burst() function is invoked to place
1054  * crypto operations on the queue *qp_id* of the device designated by
1055  * its *dev_id*.
1056  *
1057  * The *nb_ops* parameter is the number of operations to process which are
1058  * supplied in the *ops* array of *rte_crypto_op* structures.
1059  *
1060  * The rte_cryptodev_enqueue_burst() function returns the number of
1061  * operations it actually enqueued for processing. A return value equal to
1062  * *nb_ops* means that all packets have been enqueued.
1063  *
1064  * @param	dev_id		The identifier of the device.
1065  * @param	qp_id		The index of the queue pair which packets are
1066  *				to be enqueued for processing. The value
1067  *				must be in the range [0, nb_queue_pairs - 1]
1068  *				previously supplied to
1069  *				 *rte_cryptodev_configure*.
1070  * @param	ops		The address of an array of *nb_ops* pointers
1071  *				to *rte_crypto_op* structures which contain
1072  *				the crypto operations to be processed.
1073  * @param	nb_ops		The number of operations to process.
1074  *
1075  * @return
1076  * The number of operations actually enqueued on the crypto device. The return
1077  * value can be less than the value of the *nb_ops* parameter when the
1078  * crypto devices queue is full or if invalid parameters are specified in
1079  * a *rte_crypto_op*.
1080  */
1081 static inline uint16_t
1082 rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
1083 		struct rte_crypto_op **ops, uint16_t nb_ops)
1084 {
1085 	struct rte_cryptodev *dev = &rte_cryptodevs[dev_id];
1086 
1087 #ifdef RTE_CRYPTO_CALLBACKS
1088 	if (unlikely(dev->enq_cbs != NULL)) {
1089 		struct rte_cryptodev_cb_rcu *list;
1090 		struct rte_cryptodev_cb *cb;
1091 
1092 		/* __ATOMIC_RELEASE memory order was used when the
1093 		 * call back was inserted into the list.
1094 		 * Since there is a clear dependency between loading
1095 		 * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
1096 		 * not required.
1097 		 */
1098 		list = &dev->enq_cbs[qp_id];
1099 		rte_rcu_qsbr_thread_online(list->qsbr, 0);
1100 		cb = __atomic_load_n(&list->next, __ATOMIC_RELAXED);
1101 
1102 		while (cb != NULL) {
1103 			nb_ops = cb->fn(dev_id, qp_id, ops, nb_ops,
1104 					cb->arg);
1105 			cb = cb->next;
1106 		};
1107 
1108 		rte_rcu_qsbr_thread_offline(list->qsbr, 0);
1109 	}
1110 #endif
1111 
1112 	rte_cryptodev_trace_enqueue_burst(dev_id, qp_id, (void **)ops, nb_ops);
1113 	return (*dev->enqueue_burst)(
1114 			dev->data->queue_pairs[qp_id], ops, nb_ops);
1115 }
1116 
1117 
1118 /** Cryptodev symmetric crypto session
1119  * Each session is derived from a fixed xform chain. Therefore each session
1120  * has a fixed algo, key, op-type, digest_len etc.
1121  */
1122 struct rte_cryptodev_sym_session {
1123 	uint64_t opaque_data;
1124 	/**< Can be used for external metadata */
1125 	uint16_t nb_drivers;
1126 	/**< number of elements in sess_data array */
1127 	uint16_t user_data_sz;
1128 	/**< session user data will be placed after sess_data */
1129 	__extension__ struct {
1130 		void *data;
1131 		uint16_t refcnt;
1132 	} sess_data[0];
1133 	/**< Driver specific session material, variable size */
1134 };
1135 
1136 /** Cryptodev asymmetric crypto session */
1137 struct rte_cryptodev_asym_session {
1138 	__extension__ void *sess_private_data[0];
1139 	/**< Private asymmetric session material */
1140 };
1141 
1142 /**
1143  * Create a symmetric session mempool.
1144  *
1145  * @param name
1146  *   The unique mempool name.
1147  * @param nb_elts
1148  *   The number of elements in the mempool.
1149  * @param elt_size
1150  *   The size of the element. This value will be ignored if it is smaller than
1151  *   the minimum session header size required for the system. For the user who
1152  *   want to use the same mempool for sym session and session private data it
1153  *   can be the maximum value of all existing devices' private data and session
1154  *   header sizes.
1155  * @param cache_size
1156  *   The number of per-lcore cache elements
1157  * @param priv_size
1158  *   The private data size of each session.
1159  * @param socket_id
1160  *   The *socket_id* argument is the socket identifier in the case of
1161  *   NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
1162  *   constraint for the reserved zone.
1163  *
1164  * @return
1165  *  - On success return size of the session
1166  *  - On failure returns 0
1167  */
1168 __rte_experimental
1169 struct rte_mempool *
1170 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
1171 	uint32_t elt_size, uint32_t cache_size, uint16_t priv_size,
1172 	int socket_id);
1173 
1174 /**
1175  * Create symmetric crypto session header (generic with no private data)
1176  *
1177  * @param   mempool    Symmetric session mempool to allocate session
1178  *                     objects from
1179  * @return
1180  *  - On success return pointer to sym-session
1181  *  - On failure returns NULL
1182  */
1183 struct rte_cryptodev_sym_session *
1184 rte_cryptodev_sym_session_create(struct rte_mempool *mempool);
1185 
1186 /**
1187  * Create asymmetric crypto session header (generic with no private data)
1188  *
1189  * @param   mempool    mempool to allocate asymmetric session
1190  *                     objects from
1191  * @return
1192  *  - On success return pointer to asym-session
1193  *  - On failure returns NULL
1194  */
1195 __rte_experimental
1196 struct rte_cryptodev_asym_session *
1197 rte_cryptodev_asym_session_create(struct rte_mempool *mempool);
1198 
1199 /**
1200  * Frees symmetric crypto session header, after checking that all
1201  * the device private data has been freed, returning it
1202  * to its original mempool.
1203  *
1204  * @param   sess     Session header to be freed.
1205  *
1206  * @return
1207  *  - 0 if successful.
1208  *  - -EINVAL if session is NULL.
1209  *  - -EBUSY if not all device private data has been freed.
1210  */
1211 int
1212 rte_cryptodev_sym_session_free(struct rte_cryptodev_sym_session *sess);
1213 
1214 /**
1215  * Frees asymmetric crypto session header, after checking that all
1216  * the device private data has been freed, returning it
1217  * to its original mempool.
1218  *
1219  * @param   sess     Session header to be freed.
1220  *
1221  * @return
1222  *  - 0 if successful.
1223  *  - -EINVAL if session is NULL.
1224  *  - -EBUSY if not all device private data has been freed.
1225  */
1226 __rte_experimental
1227 int
1228 rte_cryptodev_asym_session_free(struct rte_cryptodev_asym_session *sess);
1229 
1230 /**
1231  * Fill out private data for the device id, based on its device type.
1232  *
1233  * @param   dev_id   ID of device that we want the session to be used on
1234  * @param   sess     Session where the private data will be attached to
1235  * @param   xforms   Symmetric crypto transform operations to apply on flow
1236  *                   processed with this session
1237  * @param   mempool  Mempool where the private data is allocated.
1238  *
1239  * @return
1240  *  - On success, zero.
1241  *  - -EINVAL if input parameters are invalid.
1242  *  - -ENOTSUP if crypto device does not support the crypto transform or
1243  *    does not support symmetric operations.
1244  *  - -ENOMEM if the private session could not be allocated.
1245  */
1246 int
1247 rte_cryptodev_sym_session_init(uint8_t dev_id,
1248 			struct rte_cryptodev_sym_session *sess,
1249 			struct rte_crypto_sym_xform *xforms,
1250 			struct rte_mempool *mempool);
1251 
1252 /**
1253  * Initialize asymmetric session on a device with specific asymmetric xform
1254  *
1255  * @param   dev_id   ID of device that we want the session to be used on
1256  * @param   sess     Session to be set up on a device
1257  * @param   xforms   Asymmetric crypto transform operations to apply on flow
1258  *                   processed with this session
1259  * @param   mempool  Mempool to be used for internal allocation.
1260  *
1261  * @return
1262  *  - On success, zero.
1263  *  - -EINVAL if input parameters are invalid.
1264  *  - -ENOTSUP if crypto device does not support the crypto transform.
1265  *  - -ENOMEM if the private session could not be allocated.
1266  */
1267 __rte_experimental
1268 int
1269 rte_cryptodev_asym_session_init(uint8_t dev_id,
1270 			struct rte_cryptodev_asym_session *sess,
1271 			struct rte_crypto_asym_xform *xforms,
1272 			struct rte_mempool *mempool);
1273 
1274 /**
1275  * Frees private data for the device id, based on its device type,
1276  * returning it to its mempool. It is the application's responsibility
1277  * to ensure that private session data is not cleared while there are
1278  * still in-flight operations using it.
1279  *
1280  * @param   dev_id   ID of device that uses the session.
1281  * @param   sess     Session containing the reference to the private data
1282  *
1283  * @return
1284  *  - 0 if successful.
1285  *  - -EINVAL if device is invalid or session is NULL.
1286  *  - -ENOTSUP if crypto device does not support symmetric operations.
1287  */
1288 int
1289 rte_cryptodev_sym_session_clear(uint8_t dev_id,
1290 			struct rte_cryptodev_sym_session *sess);
1291 
1292 /**
1293  * Frees resources held by asymmetric session during rte_cryptodev_session_init
1294  *
1295  * @param   dev_id   ID of device that uses the asymmetric session.
1296  * @param   sess     Asymmetric session setup on device using
1297  *					 rte_cryptodev_session_init
1298  * @return
1299  *  - 0 if successful.
1300  *  - -EINVAL if device is invalid or session is NULL.
1301  */
1302 __rte_experimental
1303 int
1304 rte_cryptodev_asym_session_clear(uint8_t dev_id,
1305 			struct rte_cryptodev_asym_session *sess);
1306 
1307 /**
1308  * Get the size of the header session, for all registered drivers excluding
1309  * the user data size.
1310  *
1311  * @return
1312  *   Size of the symmetric header session.
1313  */
1314 unsigned int
1315 rte_cryptodev_sym_get_header_session_size(void);
1316 
1317 /**
1318  * Get the size of the header session from created session.
1319  *
1320  * @param sess
1321  *   The sym cryptodev session pointer
1322  *
1323  * @return
1324  *   - If sess is not NULL, return the size of the header session including
1325  *   the private data size defined within sess.
1326  *   - If sess is NULL, return 0.
1327  */
1328 __rte_experimental
1329 unsigned int
1330 rte_cryptodev_sym_get_existing_header_session_size(
1331 		struct rte_cryptodev_sym_session *sess);
1332 
1333 /**
1334  * Get the size of the asymmetric session header, for all registered drivers.
1335  *
1336  * @return
1337  *   Size of the asymmetric header session.
1338  */
1339 __rte_experimental
1340 unsigned int
1341 rte_cryptodev_asym_get_header_session_size(void);
1342 
1343 /**
1344  * Get the size of the private symmetric session data
1345  * for a device.
1346  *
1347  * @param	dev_id		The device identifier.
1348  *
1349  * @return
1350  *   - Size of the private data, if successful
1351  *   - 0 if device is invalid or does not have private
1352  *   symmetric session
1353  */
1354 unsigned int
1355 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id);
1356 
1357 /**
1358  * Get the size of the private data for asymmetric session
1359  * on device
1360  *
1361  * @param	dev_id		The device identifier.
1362  *
1363  * @return
1364  *   - Size of the asymmetric private data, if successful
1365  *   - 0 if device is invalid or does not have private session
1366  */
1367 __rte_experimental
1368 unsigned int
1369 rte_cryptodev_asym_get_private_session_size(uint8_t dev_id);
1370 
1371 /**
1372  * Validate if the crypto device index is valid attached crypto device.
1373  *
1374  * @param	dev_id	Crypto device index.
1375  *
1376  * @return
1377  *   - If the device index is valid (1) or not (0).
1378  */
1379 unsigned int
1380 rte_cryptodev_is_valid_dev(uint8_t dev_id);
1381 
1382 /**
1383  * Provide driver identifier.
1384  *
1385  * @param name
1386  *   The pointer to a driver name.
1387  * @return
1388  *  The driver type identifier or -1 if no driver found
1389  */
1390 int rte_cryptodev_driver_id_get(const char *name);
1391 
1392 /**
1393  * Provide driver name.
1394  *
1395  * @param driver_id
1396  *   The driver identifier.
1397  * @return
1398  *  The driver name or null if no driver found
1399  */
1400 const char *rte_cryptodev_driver_name_get(uint8_t driver_id);
1401 
1402 /**
1403  * Store user data in a session.
1404  *
1405  * @param	sess		Session pointer allocated by
1406  *				*rte_cryptodev_sym_session_create*.
1407  * @param	data		Pointer to the user data.
1408  * @param	size		Size of the user data.
1409  *
1410  * @return
1411  *  - On success, zero.
1412  *  - On failure, a negative value.
1413  */
1414 __rte_experimental
1415 int
1416 rte_cryptodev_sym_session_set_user_data(
1417 					struct rte_cryptodev_sym_session *sess,
1418 					void *data,
1419 					uint16_t size);
1420 
1421 /**
1422  * Get user data stored in a session.
1423  *
1424  * @param	sess		Session pointer allocated by
1425  *				*rte_cryptodev_sym_session_create*.
1426  *
1427  * @return
1428  *  - On success return pointer to user data.
1429  *  - On failure returns NULL.
1430  */
1431 __rte_experimental
1432 void *
1433 rte_cryptodev_sym_session_get_user_data(
1434 					struct rte_cryptodev_sym_session *sess);
1435 
1436 /**
1437  * Perform actual crypto processing (encrypt/digest or auth/decrypt)
1438  * on user provided data.
1439  *
1440  * @param	dev_id	The device identifier.
1441  * @param	sess	Cryptodev session structure
1442  * @param	ofs	Start and stop offsets for auth and cipher operations
1443  * @param	vec	Vectorized operation descriptor
1444  *
1445  * @return
1446  *  - Returns number of successfully processed packets.
1447  */
1448 __rte_experimental
1449 uint32_t
1450 rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id,
1451 	struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs ofs,
1452 	struct rte_crypto_sym_vec *vec);
1453 
1454 /**
1455  * Get the size of the raw data-path context buffer.
1456  *
1457  * @param	dev_id		The device identifier.
1458  *
1459  * @return
1460  *   - If the device supports raw data-path APIs, return the context size.
1461  *   - If the device does not support the APIs, return -1.
1462  */
1463 __rte_experimental
1464 int
1465 rte_cryptodev_get_raw_dp_ctx_size(uint8_t dev_id);
1466 
1467 /**
1468  * Union of different crypto session types, including session-less xform
1469  * pointer.
1470  */
1471 union rte_cryptodev_session_ctx {
1472 	struct rte_cryptodev_sym_session *crypto_sess;
1473 	struct rte_crypto_sym_xform *xform;
1474 	struct rte_security_session *sec_sess;
1475 };
1476 
1477 /**
1478  * Enqueue a vectorized operation descriptor into the device queue but the
1479  * driver may or may not start processing until rte_cryptodev_raw_enqueue_done()
1480  * is called.
1481  *
1482  * @param	qp		Driver specific queue pair data.
1483  * @param	drv_ctx		Driver specific context data.
1484  * @param	vec		Vectorized operation descriptor.
1485  * @param	ofs		Start and stop offsets for auth and cipher
1486  *				operations.
1487  * @param	user_data	The array of user data for dequeue later.
1488  * @param	enqueue_status	Driver written value to specify the
1489  *				enqueue status. Possible values:
1490  *				- 1: The number of operations returned are
1491  *				     enqueued successfully.
1492  *				- 0: The number of operations returned are
1493  *				     cached into the queue but are not processed
1494  *				     until rte_cryptodev_raw_enqueue_done() is
1495  *				     called.
1496  *				- negative integer: Error occurred.
1497  * @return
1498  *   - The number of operations in the descriptor successfully enqueued or
1499  *     cached into the queue but not enqueued yet, depends on the
1500  *     "enqueue_status" value.
1501  */
1502 typedef uint32_t (*cryptodev_sym_raw_enqueue_burst_t)(
1503 	void *qp, uint8_t *drv_ctx, struct rte_crypto_sym_vec *vec,
1504 	union rte_crypto_sym_ofs ofs, void *user_data[], int *enqueue_status);
1505 
1506 /**
1507  * Enqueue single raw data vector into the device queue but the driver may or
1508  * may not start processing until rte_cryptodev_raw_enqueue_done() is called.
1509  *
1510  * @param	qp		Driver specific queue pair data.
1511  * @param	drv_ctx		Driver specific context data.
1512  * @param	data_vec	The buffer data vector.
1513  * @param	n_data_vecs	Number of buffer data vectors.
1514  * @param	ofs		Start and stop offsets for auth and cipher
1515  *				operations.
1516  * @param	iv		IV virtual and IOVA addresses
1517  * @param	digest		digest virtual and IOVA addresses
1518  * @param	aad_or_auth_iv	AAD or auth IV virtual and IOVA addresses,
1519  *				depends on the algorithm used.
1520  * @param	user_data	The user data.
1521  * @return
1522  *   - 1: The data vector is enqueued successfully.
1523  *   - 0: The data vector is cached into the queue but is not processed
1524  *        until rte_cryptodev_raw_enqueue_done() is called.
1525  *   - negative integer: failure.
1526  */
1527 typedef int (*cryptodev_sym_raw_enqueue_t)(
1528 	void *qp, uint8_t *drv_ctx, struct rte_crypto_vec *data_vec,
1529 	uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs,
1530 	struct rte_crypto_va_iova_ptr *iv,
1531 	struct rte_crypto_va_iova_ptr *digest,
1532 	struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
1533 	void *user_data);
1534 
1535 /**
1536  * Inform the cryptodev queue pair to start processing or finish dequeuing all
1537  * enqueued/dequeued operations.
1538  *
1539  * @param	qp		Driver specific queue pair data.
1540  * @param	drv_ctx		Driver specific context data.
1541  * @param	n		The total number of processed operations.
1542  * @return
1543  *   - On success return 0.
1544  *   - On failure return negative integer.
1545  */
1546 typedef int (*cryptodev_sym_raw_operation_done_t)(void *qp, uint8_t *drv_ctx,
1547 	uint32_t n);
1548 
1549 /**
1550  * Typedef that the user provided for the driver to get the dequeue count.
1551  * The function may return a fixed number or the number parsed from the user
1552  * data stored in the first processed operation.
1553  *
1554  * @param	user_data	Dequeued user data.
1555  * @return
1556  *  - The number of operations to be dequeued.
1557  **/
1558 typedef uint32_t (*rte_cryptodev_raw_get_dequeue_count_t)(void *user_data);
1559 
1560 /**
1561  * Typedef that the user provided to deal with post dequeue operation, such
1562  * as filling status.
1563  *
1564  * @param	user_data	Dequeued user data.
1565  * @param	index		Index number of the processed descriptor.
1566  * @param	is_op_success	Operation status provided by the driver.
1567  **/
1568 typedef void (*rte_cryptodev_raw_post_dequeue_t)(void *user_data,
1569 	uint32_t index, uint8_t is_op_success);
1570 
1571 /**
1572  * Dequeue a burst of symmetric crypto processing.
1573  *
1574  * @param	qp			Driver specific queue pair data.
1575  * @param	drv_ctx			Driver specific context data.
1576  * @param	get_dequeue_count	User provided callback function to
1577  *					obtain dequeue operation count.
1578  * @param	max_nb_to_dequeue	When get_dequeue_count is NULL this
1579  *					value is used to pass the maximum
1580  *					number of operations to be dequeued.
1581  * @param	post_dequeue		User provided callback function to
1582  *					post-process a dequeued operation.
1583  * @param	out_user_data		User data pointer array to be retrieve
1584  *					from device queue. In case of
1585  *					*is_user_data_array* is set there
1586  *					should be enough room to store all
1587  *					user data.
1588  * @param	is_user_data_array	Set 1 if every dequeued user data will
1589  *					be written into out_user_data array.
1590  *					Set 0 if only the first user data will
1591  *					be written into out_user_data array.
1592  * @param	n_success		Driver written value to specific the
1593  *					total successful operations count.
1594  * @param	dequeue_status		Driver written value to specify the
1595  *					dequeue status. Possible values:
1596  *					- 1: Successfully dequeued the number
1597  *					     of operations returned. The user
1598  *					     data previously set during enqueue
1599  *					     is stored in the "out_user_data".
1600  *					- 0: The number of operations returned
1601  *					     are completed and the user data is
1602  *					     stored in the "out_user_data", but
1603  *					     they are not freed from the queue
1604  *					     until
1605  *					     rte_cryptodev_raw_dequeue_done()
1606  *					     is called.
1607  *					- negative integer: Error occurred.
1608  * @return
1609  *   - The number of operations dequeued or completed but not freed from the
1610  *     queue, depends on "dequeue_status" value.
1611  */
1612 typedef uint32_t (*cryptodev_sym_raw_dequeue_burst_t)(void *qp,
1613 	uint8_t *drv_ctx,
1614 	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
1615 	uint32_t max_nb_to_dequeue,
1616 	rte_cryptodev_raw_post_dequeue_t post_dequeue,
1617 	void **out_user_data, uint8_t is_user_data_array,
1618 	uint32_t *n_success, int *dequeue_status);
1619 
1620 /**
1621  * Dequeue a symmetric crypto processing.
1622  *
1623  * @param	qp			Driver specific queue pair data.
1624  * @param	drv_ctx			Driver specific context data.
1625  * @param	dequeue_status		Driver written value to specify the
1626  *					dequeue status. Possible values:
1627  *					- 1: Successfully dequeued a operation.
1628  *					     The user data is returned.
1629  *					- 0: The first operation in the queue
1630  *					     is completed and the user data
1631  *					     previously set during enqueue is
1632  *					     returned, but it is not freed from
1633  *					     the queue until
1634  *					     rte_cryptodev_raw_dequeue_done() is
1635  *					     called.
1636  *					- negative integer: Error occurred.
1637  * @param	op_status		Driver written value to specify
1638  *					operation status.
1639  * @return
1640  *   - The user data pointer retrieved from device queue or NULL if no
1641  *     operation is ready for dequeue.
1642  */
1643 typedef void * (*cryptodev_sym_raw_dequeue_t)(
1644 		void *qp, uint8_t *drv_ctx, int *dequeue_status,
1645 		enum rte_crypto_op_status *op_status);
1646 
1647 /**
1648  * Context data for raw data-path API crypto process. The buffer of this
1649  * structure is to be allocated by the user application with the size equal
1650  * or bigger than rte_cryptodev_get_raw_dp_ctx_size() returned value.
1651  */
1652 struct rte_crypto_raw_dp_ctx {
1653 	void *qp_data;
1654 
1655 	cryptodev_sym_raw_enqueue_t enqueue;
1656 	cryptodev_sym_raw_enqueue_burst_t enqueue_burst;
1657 	cryptodev_sym_raw_operation_done_t enqueue_done;
1658 	cryptodev_sym_raw_dequeue_t dequeue;
1659 	cryptodev_sym_raw_dequeue_burst_t dequeue_burst;
1660 	cryptodev_sym_raw_operation_done_t dequeue_done;
1661 
1662 	/* Driver specific context data */
1663 	__extension__ uint8_t drv_ctx_data[];
1664 };
1665 
1666 /**
1667  * Configure raw data-path context data.
1668  *
1669  * NOTE:
1670  * After the context data is configured, the user should call
1671  * rte_cryptodev_raw_attach_session() before using it in
1672  * rte_cryptodev_raw_enqueue/dequeue function call.
1673  *
1674  * @param	dev_id		The device identifier.
1675  * @param	qp_id		The index of the queue pair from which to
1676  *				retrieve processed packets. The value must be
1677  *				in the range [0, nb_queue_pair - 1] previously
1678  *				supplied to rte_cryptodev_configure().
1679  * @param	ctx		The raw data-path context data.
1680  * @param	sess_type	session type.
1681  * @param	session_ctx	Session context data.
1682  * @param	is_update	Set 0 if it is to initialize the ctx.
1683  *				Set 1 if ctx is initialized and only to update
1684  *				session context data.
1685  * @return
1686  *   - On success return 0.
1687  *   - On failure return negative integer.
1688  */
1689 __rte_experimental
1690 int
1691 rte_cryptodev_configure_raw_dp_ctx(uint8_t dev_id, uint16_t qp_id,
1692 	struct rte_crypto_raw_dp_ctx *ctx,
1693 	enum rte_crypto_op_sess_type sess_type,
1694 	union rte_cryptodev_session_ctx session_ctx,
1695 	uint8_t is_update);
1696 
1697 /**
1698  * Enqueue a vectorized operation descriptor into the device queue but the
1699  * driver may or may not start processing until rte_cryptodev_raw_enqueue_done()
1700  * is called.
1701  *
1702  * @param	ctx		The initialized raw data-path context data.
1703  * @param	vec		Vectorized operation descriptor.
1704  * @param	ofs		Start and stop offsets for auth and cipher
1705  *				operations.
1706  * @param	user_data	The array of user data for dequeue later.
1707  * @param	enqueue_status	Driver written value to specify the
1708  *				enqueue status. Possible values:
1709  *				- 1: The number of operations returned are
1710  *				     enqueued successfully.
1711  *				- 0: The number of operations returned are
1712  *				     cached into the queue but are not processed
1713  *				     until rte_cryptodev_raw_enqueue_done() is
1714  *				     called.
1715  *				- negative integer: Error occurred.
1716  * @return
1717  *   - The number of operations in the descriptor successfully enqueued or
1718  *     cached into the queue but not enqueued yet, depends on the
1719  *     "enqueue_status" value.
1720  */
1721 __rte_experimental
1722 uint32_t
1723 rte_cryptodev_raw_enqueue_burst(struct rte_crypto_raw_dp_ctx *ctx,
1724 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
1725 	void **user_data, int *enqueue_status);
1726 
1727 /**
1728  * Enqueue single raw data vector into the device queue but the driver may or
1729  * may not start processing until rte_cryptodev_raw_enqueue_done() is called.
1730  *
1731  * @param	ctx		The initialized raw data-path context data.
1732  * @param	data_vec	The buffer data vector.
1733  * @param	n_data_vecs	Number of buffer data vectors.
1734  * @param	ofs		Start and stop offsets for auth and cipher
1735  *				operations.
1736  * @param	iv		IV virtual and IOVA addresses
1737  * @param	digest		digest virtual and IOVA addresses
1738  * @param	aad_or_auth_iv	AAD or auth IV virtual and IOVA addresses,
1739  *				depends on the algorithm used.
1740  * @param	user_data	The user data.
1741  * @return
1742  *   - 1: The data vector is enqueued successfully.
1743  *   - 0: The data vector is cached into the queue but is not processed
1744  *        until rte_cryptodev_raw_enqueue_done() is called.
1745  *   - negative integer: failure.
1746  */
1747 __rte_experimental
1748 static __rte_always_inline int
1749 rte_cryptodev_raw_enqueue(struct rte_crypto_raw_dp_ctx *ctx,
1750 	struct rte_crypto_vec *data_vec, uint16_t n_data_vecs,
1751 	union rte_crypto_sym_ofs ofs,
1752 	struct rte_crypto_va_iova_ptr *iv,
1753 	struct rte_crypto_va_iova_ptr *digest,
1754 	struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
1755 	void *user_data)
1756 {
1757 	return (*ctx->enqueue)(ctx->qp_data, ctx->drv_ctx_data, data_vec,
1758 		n_data_vecs, ofs, iv, digest, aad_or_auth_iv, user_data);
1759 }
1760 
1761 /**
1762  * Start processing all enqueued operations from last
1763  * rte_cryptodev_configure_raw_dp_ctx() call.
1764  *
1765  * @param	ctx	The initialized raw data-path context data.
1766  * @param	n	The number of operations cached.
1767  * @return
1768  *   - On success return 0.
1769  *   - On failure return negative integer.
1770  */
1771 __rte_experimental
1772 int
1773 rte_cryptodev_raw_enqueue_done(struct rte_crypto_raw_dp_ctx *ctx,
1774 		uint32_t n);
1775 
1776 /**
1777  * Dequeue a burst of symmetric crypto processing.
1778  *
1779  * @param	ctx			The initialized raw data-path context
1780  *					data.
1781  * @param	get_dequeue_count	User provided callback function to
1782  *					obtain dequeue operation count.
1783  * @param	max_nb_to_dequeue	When get_dequeue_count is NULL this
1784  *					value is used to pass the maximum
1785  *					number of operations to be dequeued.
1786  * @param	post_dequeue		User provided callback function to
1787  *					post-process a dequeued operation.
1788  * @param	out_user_data		User data pointer array to be retrieve
1789  *					from device queue. In case of
1790  *					*is_user_data_array* is set there
1791  *					should be enough room to store all
1792  *					user data.
1793  * @param	is_user_data_array	Set 1 if every dequeued user data will
1794  *					be written into out_user_data array.
1795  *					Set 0 if only the first user data will
1796  *					be written into out_user_data array.
1797  * @param	n_success		Driver written value to specific the
1798  *					total successful operations count.
1799  * @param	dequeue_status		Driver written value to specify the
1800  *					dequeue status. Possible values:
1801  *					- 1: Successfully dequeued the number
1802  *					     of operations returned. The user
1803  *					     data previously set during enqueue
1804  *					     is stored in the "out_user_data".
1805  *					- 0: The number of operations returned
1806  *					     are completed and the user data is
1807  *					     stored in the "out_user_data", but
1808  *					     they are not freed from the queue
1809  *					     until
1810  *					     rte_cryptodev_raw_dequeue_done()
1811  *					     is called.
1812  *					- negative integer: Error occurred.
1813  * @return
1814  *   - The number of operations dequeued or completed but not freed from the
1815  *     queue, depends on "dequeue_status" value.
1816  */
1817 __rte_experimental
1818 uint32_t
1819 rte_cryptodev_raw_dequeue_burst(struct rte_crypto_raw_dp_ctx *ctx,
1820 	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
1821 	uint32_t max_nb_to_dequeue,
1822 	rte_cryptodev_raw_post_dequeue_t post_dequeue,
1823 	void **out_user_data, uint8_t is_user_data_array,
1824 	uint32_t *n_success, int *dequeue_status);
1825 
1826 /**
1827  * Dequeue a symmetric crypto processing.
1828  *
1829  * @param	ctx			The initialized raw data-path context
1830  *					data.
1831  * @param	dequeue_status		Driver written value to specify the
1832  *					dequeue status. Possible values:
1833  *					- 1: Successfully dequeued a operation.
1834  *					     The user data is returned.
1835  *					- 0: The first operation in the queue
1836  *					     is completed and the user data
1837  *					     previously set during enqueue is
1838  *					     returned, but it is not freed from
1839  *					     the queue until
1840  *					     rte_cryptodev_raw_dequeue_done() is
1841  *					     called.
1842  *					- negative integer: Error occurred.
1843  * @param	op_status		Driver written value to specify
1844  *					operation status.
1845  * @return
1846  *   - The user data pointer retrieved from device queue or NULL if no
1847  *     operation is ready for dequeue.
1848  */
1849 __rte_experimental
1850 static __rte_always_inline void *
1851 rte_cryptodev_raw_dequeue(struct rte_crypto_raw_dp_ctx *ctx,
1852 		int *dequeue_status, enum rte_crypto_op_status *op_status)
1853 {
1854 	return (*ctx->dequeue)(ctx->qp_data, ctx->drv_ctx_data, dequeue_status,
1855 			op_status);
1856 }
1857 
1858 /**
1859  * Inform the queue pair dequeue operations is finished.
1860  *
1861  * @param	ctx	The initialized raw data-path context data.
1862  * @param	n	The number of operations.
1863  * @return
1864  *   - On success return 0.
1865  *   - On failure return negative integer.
1866  */
1867 __rte_experimental
1868 int
1869 rte_cryptodev_raw_dequeue_done(struct rte_crypto_raw_dp_ctx *ctx,
1870 		uint32_t n);
1871 
1872 /**
1873  * Add a user callback for a given crypto device and queue pair which will be
1874  * called on crypto ops enqueue.
1875  *
1876  * This API configures a function to be called for each burst of crypto ops
1877  * received on a given crypto device queue pair. The return value is a pointer
1878  * that can be used later to remove the callback using
1879  * rte_cryptodev_remove_enq_callback().
1880  *
1881  * Callbacks registered by application would not survive
1882  * rte_cryptodev_configure() as it reinitializes the callback list.
1883  * It is user responsibility to remove all installed callbacks before
1884  * calling rte_cryptodev_configure() to avoid possible memory leakage.
1885  * Application is expected to call add API after rte_cryptodev_configure().
1886  *
1887  * Multiple functions can be registered per queue pair & they are called
1888  * in the order they were added. The API does not restrict on maximum number
1889  * of callbacks.
1890  *
1891  * @param	dev_id		The identifier of the device.
1892  * @param	qp_id		The index of the queue pair on which ops are
1893  *				to be enqueued for processing. The value
1894  *				must be in the range [0, nb_queue_pairs - 1]
1895  *				previously supplied to
1896  *				*rte_cryptodev_configure*.
1897  * @param	cb_fn		The callback function
1898  * @param	cb_arg		A generic pointer parameter which will be passed
1899  *				to each invocation of the callback function on
1900  *				this crypto device and queue pair.
1901  *
1902  * @return
1903  *  - NULL on error & rte_errno will contain the error code.
1904  *  - On success, a pointer value which can later be used to remove the
1905  *    callback.
1906  */
1907 
1908 __rte_experimental
1909 struct rte_cryptodev_cb *
1910 rte_cryptodev_add_enq_callback(uint8_t dev_id,
1911 			       uint16_t qp_id,
1912 			       rte_cryptodev_callback_fn cb_fn,
1913 			       void *cb_arg);
1914 
1915 /**
1916  * Remove a user callback function for given crypto device and queue pair.
1917  *
1918  * This function is used to remove enqueue callbacks that were added to a
1919  * crypto device queue pair using rte_cryptodev_add_enq_callback().
1920  *
1921  *
1922  *
1923  * @param	dev_id		The identifier of the device.
1924  * @param	qp_id		The index of the queue pair on which ops are
1925  *				to be enqueued. The value must be in the
1926  *				range [0, nb_queue_pairs - 1] previously
1927  *				supplied to *rte_cryptodev_configure*.
1928  * @param	cb		Pointer to user supplied callback created via
1929  *				rte_cryptodev_add_enq_callback().
1930  *
1931  * @return
1932  *   -  0: Success. Callback was removed.
1933  *   - <0: The dev_id or the qp_id is out of range, or the callback
1934  *         is NULL or not found for the crypto device queue pair.
1935  */
1936 
1937 __rte_experimental
1938 int rte_cryptodev_remove_enq_callback(uint8_t dev_id,
1939 				      uint16_t qp_id,
1940 				      struct rte_cryptodev_cb *cb);
1941 
1942 /**
1943  * Add a user callback for a given crypto device and queue pair which will be
1944  * called on crypto ops dequeue.
1945  *
1946  * This API configures a function to be called for each burst of crypto ops
1947  * received on a given crypto device queue pair. The return value is a pointer
1948  * that can be used later to remove the callback using
1949  * rte_cryptodev_remove_deq_callback().
1950  *
1951  * Callbacks registered by application would not survive
1952  * rte_cryptodev_configure() as it reinitializes the callback list.
1953  * It is user responsibility to remove all installed callbacks before
1954  * calling rte_cryptodev_configure() to avoid possible memory leakage.
1955  * Application is expected to call add API after rte_cryptodev_configure().
1956  *
1957  * Multiple functions can be registered per queue pair & they are called
1958  * in the order they were added. The API does not restrict on maximum number
1959  * of callbacks.
1960  *
1961  * @param	dev_id		The identifier of the device.
1962  * @param	qp_id		The index of the queue pair on which ops are
1963  *				to be dequeued. The value must be in the
1964  *				range [0, nb_queue_pairs - 1] previously
1965  *				supplied to *rte_cryptodev_configure*.
1966  * @param	cb_fn		The callback function
1967  * @param	cb_arg		A generic pointer parameter which will be passed
1968  *				to each invocation of the callback function on
1969  *				this crypto device and queue pair.
1970  *
1971  * @return
1972  *   - NULL on error & rte_errno will contain the error code.
1973  *   - On success, a pointer value which can later be used to remove the
1974  *     callback.
1975  */
1976 
1977 __rte_experimental
1978 struct rte_cryptodev_cb *
1979 rte_cryptodev_add_deq_callback(uint8_t dev_id,
1980 			       uint16_t qp_id,
1981 			       rte_cryptodev_callback_fn cb_fn,
1982 			       void *cb_arg);
1983 
1984 /**
1985  * Remove a user callback function for given crypto device and queue pair.
1986  *
1987  * This function is used to remove dequeue callbacks that were added to a
1988  * crypto device queue pair using rte_cryptodev_add_deq_callback().
1989  *
1990  *
1991  *
1992  * @param	dev_id		The identifier of the device.
1993  * @param	qp_id		The index of the queue pair on which ops are
1994  *				to be dequeued. The value must be in the
1995  *				range [0, nb_queue_pairs - 1] previously
1996  *				supplied to *rte_cryptodev_configure*.
1997  * @param	cb		Pointer to user supplied callback created via
1998  *				rte_cryptodev_add_deq_callback().
1999  *
2000  * @return
2001  *   -  0: Success. Callback was removed.
2002  *   - <0: The dev_id or the qp_id is out of range, or the callback
2003  *         is NULL or not found for the crypto device queue pair.
2004  */
2005 __rte_experimental
2006 int rte_cryptodev_remove_deq_callback(uint8_t dev_id,
2007 				      uint16_t qp_id,
2008 				      struct rte_cryptodev_cb *cb);
2009 
2010 #ifdef __cplusplus
2011 }
2012 #endif
2013 
2014 #endif /* _RTE_CRYPTODEV_H_ */
2015