xref: /dpdk/lib/cryptodev/rte_cryptodev.h (revision 53c65a3ce2c6b56cf3fa71621a74b97c41432fc0)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020 Intel Corporation.
3  */
4 
5 #ifndef _RTE_CRYPTODEV_H_
6 #define _RTE_CRYPTODEV_H_
7 
8 /**
9  * @file rte_cryptodev.h
10  *
11  * RTE Cryptographic Device APIs
12  *
13  * Defines RTE Crypto Device APIs for the provisioning of cipher and
14  * authentication operations.
15  */
16 
17 #include <rte_compat.h>
18 #include "rte_kvargs.h"
19 #include "rte_crypto.h"
20 #include <rte_common.h>
21 #include <rte_rcu_qsbr.h>
22 
23 #include "rte_cryptodev_trace_fp.h"
24 
25 /**
26  * @internal Logtype used for cryptodev related messages.
27  */
28 extern int rte_cryptodev_logtype;
29 #define RTE_LOGTYPE_CRYPTODEV rte_cryptodev_logtype
30 
31 /* Logging Macros */
32 #define CDEV_LOG_ERR(...) \
33 	RTE_LOG_LINE_PREFIX(ERR, CRYPTODEV, \
34 		"%s() line %u: ", __func__ RTE_LOG_COMMA __LINE__, __VA_ARGS__)
35 
36 #define CDEV_LOG_INFO(...) \
37 	RTE_LOG_LINE(INFO, CRYPTODEV, "" __VA_ARGS__)
38 
39 #define CDEV_LOG_DEBUG(...) \
40 	RTE_LOG_LINE_PREFIX(DEBUG, CRYPTODEV, \
41 		"%s() line %u: ", __func__ RTE_LOG_COMMA __LINE__, __VA_ARGS__)
42 
43 #define CDEV_PMD_TRACE(...) \
44 	RTE_LOG_LINE_PREFIX(DEBUG, CRYPTODEV, \
45 		"[%s] %s: ", dev RTE_LOG_COMMA __func__, __VA_ARGS__)
46 
47 /**
48  * A macro that points to an offset from the start
49  * of the crypto operation structure (rte_crypto_op)
50  *
51  * The returned pointer is cast to type t.
52  *
53  * @param c
54  *   The crypto operation.
55  * @param o
56  *   The offset from the start of the crypto operation.
57  * @param t
58  *   The type to cast the result into.
59  */
60 #define rte_crypto_op_ctod_offset(c, t, o)	\
61 	((t)((char *)(c) + (o)))
62 
63 /**
64  * A macro that returns the physical address that points
65  * to an offset from the start of the crypto operation
66  * (rte_crypto_op)
67  *
68  * @param c
69  *   The crypto operation.
70  * @param o
71  *   The offset from the start of the crypto operation
72  *   to calculate address from.
73  */
74 #define rte_crypto_op_ctophys_offset(c, o)	\
75 	(rte_iova_t)((c)->phys_addr + (o))
76 
77 /**
78  * Crypto parameters range description
79  */
80 struct rte_crypto_param_range {
81 	uint16_t min;	/**< minimum size */
82 	uint16_t max;	/**< maximum size */
83 	uint16_t increment;
84 	/**< if a range of sizes are supported,
85 	 * this parameter is used to indicate
86 	 * increments in byte size that are supported
87 	 * between the minimum and maximum
88 	 */
89 };
90 
91 /**
92  * Data-unit supported lengths of cipher algorithms.
93  * A bit can represent any set of data-unit sizes
94  * (single size, multiple size, range, etc).
95  */
96 #define RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_512_BYTES             RTE_BIT32(0)
97 #define RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_4096_BYTES            RTE_BIT32(1)
98 #define RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_1_MEGABYTES           RTE_BIT32(2)
99 
100 /**
101  * Symmetric Crypto Capability
102  */
103 struct rte_cryptodev_symmetric_capability {
104 	enum rte_crypto_sym_xform_type xform_type;
105 	/**< Transform type : Authentication / Cipher / AEAD */
106 	union {
107 		struct {
108 			enum rte_crypto_auth_algorithm algo;
109 			/**< authentication algorithm */
110 			uint16_t block_size;
111 			/**< algorithm block size */
112 			struct rte_crypto_param_range key_size;
113 			/**< auth key size range */
114 			struct rte_crypto_param_range digest_size;
115 			/**< digest size range */
116 			struct rte_crypto_param_range aad_size;
117 			/**< Additional authentication data size range */
118 			struct rte_crypto_param_range iv_size;
119 			/**< Initialisation vector data size range */
120 		} auth;
121 		/**< Symmetric Authentication transform capabilities */
122 		struct {
123 			enum rte_crypto_cipher_algorithm algo;
124 			/**< cipher algorithm */
125 			uint16_t block_size;
126 			/**< algorithm block size */
127 			struct rte_crypto_param_range key_size;
128 			/**< cipher key size range */
129 			struct rte_crypto_param_range iv_size;
130 			/**< Initialisation vector data size range */
131 			uint32_t dataunit_set;
132 			/**<
133 			 * Supported data-unit lengths:
134 			 * RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_* bits
135 			 * or 0 for lengths defined in the algorithm standard.
136 			 */
137 		} cipher;
138 		/**< Symmetric Cipher transform capabilities */
139 		struct {
140 			enum rte_crypto_aead_algorithm algo;
141 			/**< AEAD algorithm */
142 			uint16_t block_size;
143 			/**< algorithm block size */
144 			struct rte_crypto_param_range key_size;
145 			/**< AEAD key size range */
146 			struct rte_crypto_param_range digest_size;
147 			/**< digest size range */
148 			struct rte_crypto_param_range aad_size;
149 			/**< Additional authentication data size range */
150 			struct rte_crypto_param_range iv_size;
151 			/**< Initialisation vector data size range */
152 		} aead;
153 	};
154 };
155 
156 /**
157  * Asymmetric Xform Crypto Capability
158  */
159 struct rte_cryptodev_asymmetric_xform_capability {
160 	enum rte_crypto_asym_xform_type xform_type;
161 	/**< Transform type: RSA/MODEXP/DH/DSA/MODINV */
162 
163 	uint32_t op_types;
164 	/**<
165 	 * Bitmask for supported rte_crypto_asym_op_type or
166 	 * rte_crypto_asym_ke_type. Which enum is used is determined
167 	 * by the rte_crypto_asym_xform_type. For key exchange algorithms
168 	 * like Diffie-Hellman it is rte_crypto_asym_ke_type, for others
169 	 * it is rte_crypto_asym_op_type.
170 	 */
171 
172 	__extension__
173 	union {
174 		struct rte_crypto_param_range modlen;
175 		/**< Range of modulus length supported by modulus based xform.
176 		 * Value 0 mean implementation default
177 		 */
178 
179 		uint8_t internal_rng;
180 		/**< Availability of random number generator for Elliptic curve based xform.
181 		 * Value 0 means unavailable, and application should pass the required
182 		 * random value. Otherwise, PMD would internally compute the random number.
183 		 */
184 
185 		uint32_t op_capa[RTE_CRYPTO_ASYM_OP_LIST_END];
186 		/**< Operation specific capabilities. */
187 	};
188 
189 	uint64_t hash_algos;
190 	/**< Bitmask of hash algorithms supported for op_type. */
191 };
192 
193 /**
194  * Asymmetric Crypto Capability
195  */
196 struct rte_cryptodev_asymmetric_capability {
197 	struct rte_cryptodev_asymmetric_xform_capability xform_capa;
198 };
199 
200 
201 /** Structure used to capture a capability of a crypto device */
202 struct rte_cryptodev_capabilities {
203 	enum rte_crypto_op_type op;
204 	/**< Operation type */
205 
206 	union {
207 		struct rte_cryptodev_symmetric_capability sym;
208 		/**< Symmetric operation capability parameters */
209 		struct rte_cryptodev_asymmetric_capability asym;
210 		/**< Asymmetric operation capability parameters */
211 	};
212 };
213 
214 /** Structure used to describe crypto algorithms */
215 struct rte_cryptodev_sym_capability_idx {
216 	enum rte_crypto_sym_xform_type type;
217 	union {
218 		enum rte_crypto_cipher_algorithm cipher;
219 		enum rte_crypto_auth_algorithm auth;
220 		enum rte_crypto_aead_algorithm aead;
221 	} algo;
222 };
223 
224 /**
225  * Structure used to describe asymmetric crypto xforms
226  * Each xform maps to one asym algorithm.
227  */
228 struct rte_cryptodev_asym_capability_idx {
229 	enum rte_crypto_asym_xform_type type;
230 	/**< Asymmetric xform (algo) type */
231 };
232 
233 /**
234  * Provide capabilities available for defined device and algorithm
235  *
236  * @param	dev_id		The identifier of the device.
237  * @param	idx		Description of crypto algorithms.
238  *
239  * @return
240  *   - Return description of the symmetric crypto capability if exist.
241  *   - Return NULL if the capability not exist.
242  */
243 const struct rte_cryptodev_symmetric_capability *
244 rte_cryptodev_sym_capability_get(uint8_t dev_id,
245 		const struct rte_cryptodev_sym_capability_idx *idx);
246 
247 /**
248  *  Provide capabilities available for defined device and xform
249  *
250  * @param	dev_id		The identifier of the device.
251  * @param	idx		Description of asym crypto xform.
252  *
253  * @return
254  *   - Return description of the asymmetric crypto capability if exist.
255  *   - Return NULL if the capability not exist.
256  */
257 const struct rte_cryptodev_asymmetric_xform_capability *
258 rte_cryptodev_asym_capability_get(uint8_t dev_id,
259 		const struct rte_cryptodev_asym_capability_idx *idx);
260 
261 /**
262  * Check if key size and initial vector are supported
263  * in crypto cipher capability
264  *
265  * @param	capability	Description of the symmetric crypto capability.
266  * @param	key_size	Cipher key size.
267  * @param	iv_size		Cipher initial vector size.
268  *
269  * @return
270  *   - Return 0 if the parameters are in range of the capability.
271  *   - Return -1 if the parameters are out of range of the capability.
272  */
273 int
274 rte_cryptodev_sym_capability_check_cipher(
275 		const struct rte_cryptodev_symmetric_capability *capability,
276 		uint16_t key_size, uint16_t iv_size);
277 
278 /**
279  * Check if key size and initial vector are supported
280  * in crypto auth capability
281  *
282  * @param	capability	Description of the symmetric crypto capability.
283  * @param	key_size	Auth key size.
284  * @param	digest_size	Auth digest size.
285  * @param	iv_size		Auth initial vector size.
286  *
287  * @return
288  *   - Return 0 if the parameters are in range of the capability.
289  *   - Return -1 if the parameters are out of range of the capability.
290  */
291 int
292 rte_cryptodev_sym_capability_check_auth(
293 		const struct rte_cryptodev_symmetric_capability *capability,
294 		uint16_t key_size, uint16_t digest_size, uint16_t iv_size);
295 
296 /**
297  * Check if key, digest, AAD and initial vector sizes are supported
298  * in crypto AEAD capability
299  *
300  * @param	capability	Description of the symmetric crypto capability.
301  * @param	key_size	AEAD key size.
302  * @param	digest_size	AEAD digest size.
303  * @param	aad_size	AEAD AAD size.
304  * @param	iv_size		AEAD IV size.
305  *
306  * @return
307  *   - Return 0 if the parameters are in range of the capability.
308  *   - Return -1 if the parameters are out of range of the capability.
309  */
310 int
311 rte_cryptodev_sym_capability_check_aead(
312 		const struct rte_cryptodev_symmetric_capability *capability,
313 		uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
314 		uint16_t iv_size);
315 
316 /**
317  * Check if op type is supported
318  *
319  * @param	capability	Description of the asymmetric crypto capability.
320  * @param	op_type		op type
321  *
322  * @return
323  *   - Return 1 if the op type is supported
324  *   - Return 0 if unsupported
325  */
326 int
327 rte_cryptodev_asym_xform_capability_check_optype(
328 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
329 		enum rte_crypto_asym_op_type op_type);
330 
331 /**
332  * Check if modulus length is in supported range
333  *
334  * @param	capability	Description of the asymmetric crypto capability.
335  * @param	modlen		modulus length.
336  *
337  * @return
338  *   - Return 0 if the parameters are in range of the capability.
339  *   - Return -1 if the parameters are out of range of the capability.
340  */
341 int
342 rte_cryptodev_asym_xform_capability_check_modlen(
343 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
344 		uint16_t modlen);
345 
346 /**
347  * Check if hash algorithm is supported.
348  *
349  * @param	capability	Asymmetric crypto capability.
350  * @param	hash		Hash algorithm.
351  *
352  * @return
353  *   - Return true if the hash algorithm is supported.
354  *   - Return false if the hash algorithm is not supported.
355  */
356 bool
357 rte_cryptodev_asym_xform_capability_check_hash(
358 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
359 	enum rte_crypto_auth_algorithm hash);
360 
361 /**
362  * @warning
363  * @b EXPERIMENTAL: this API may change without prior notice.
364  *
365  * Check if op capability is supported
366  *
367  * @param	capability	Description of the asymmetric crypto capability.
368  * @param	op_type		op type
369  * @param	cap		op capability
370  *
371  * @return
372  *   - Return 1 if the op capability is supported
373  *   - Return 0 if unsupported
374  */
375 __rte_experimental
376 int
377 rte_cryptodev_asym_xform_capability_check_opcap(
378 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
379 	enum rte_crypto_asym_op_type op_type, uint8_t cap);
380 
381 /**
382  * Provide the cipher algorithm enum, given an algorithm string
383  *
384  * @param	algo_enum	A pointer to the cipher algorithm
385  *				enum to be filled
386  * @param	algo_string	Authentication algo string
387  *
388  * @return
389  * - Return -1 if string is not valid
390  * - Return 0 is the string is valid
391  */
392 int
393 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum,
394 		const char *algo_string);
395 
396 /**
397  * Provide the authentication algorithm enum, given an algorithm string
398  *
399  * @param	algo_enum	A pointer to the authentication algorithm
400  *				enum to be filled
401  * @param	algo_string	Authentication algo string
402  *
403  * @return
404  * - Return -1 if string is not valid
405  * - Return 0 is the string is valid
406  */
407 int
408 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum,
409 		const char *algo_string);
410 
411 /**
412  * Provide the AEAD algorithm enum, given an algorithm string
413  *
414  * @param	algo_enum	A pointer to the AEAD algorithm
415  *				enum to be filled
416  * @param	algo_string	AEAD algorithm string
417  *
418  * @return
419  * - Return -1 if string is not valid
420  * - Return 0 is the string is valid
421  */
422 int
423 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum,
424 		const char *algo_string);
425 
426 /**
427  * Provide the Asymmetric xform enum, given an xform string
428  *
429  * @param	xform_enum	A pointer to the xform type
430  *				enum to be filled
431  * @param	xform_string	xform string
432  *
433  * @return
434  * - Return -1 if string is not valid
435  * - Return 0 if the string is valid
436  */
437 int
438 rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum,
439 		const char *xform_string);
440 
441 /**
442  * Provide the cipher algorithm string, given an algorithm enum.
443  *
444  * @param	algo_enum	cipher algorithm enum
445  *
446  * @return
447  * - Return NULL if enum is not valid
448  * - Return algo_string corresponding to enum
449  */
450 __rte_experimental
451 const char *
452 rte_cryptodev_get_cipher_algo_string(enum rte_crypto_cipher_algorithm algo_enum);
453 
454 /**
455  * Provide the authentication algorithm string, given an algorithm enum.
456  *
457  * @param	algo_enum	auth algorithm enum
458  *
459  * @return
460  * - Return NULL if enum is not valid
461  * - Return algo_string corresponding to enum
462  */
463 __rte_experimental
464 const char *
465 rte_cryptodev_get_auth_algo_string(enum rte_crypto_auth_algorithm algo_enum);
466 
467 /**
468  * Provide the AEAD algorithm string, given an algorithm enum.
469  *
470  * @param	algo_enum	AEAD algorithm enum
471  *
472  * @return
473  * - Return NULL if enum is not valid
474  * - Return algo_string corresponding to enum
475  */
476 __rte_experimental
477 const char *
478 rte_cryptodev_get_aead_algo_string(enum rte_crypto_aead_algorithm algo_enum);
479 
480 /**
481  * Provide the Asymmetric xform string, given an xform enum.
482  *
483  * @param	xform_enum	xform type enum
484  *
485  * @return
486  * - Return NULL, if enum is not valid.
487  * - Return xform string, for valid enum.
488  */
489 __rte_experimental
490 const char *
491 rte_cryptodev_asym_get_xform_string(enum rte_crypto_asym_xform_type xform_enum);
492 
493 
494 /** Macro used at end of crypto PMD list */
495 #define RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() \
496 	{ RTE_CRYPTO_OP_TYPE_UNDEFINED }
497 
498 
499 /**
500  * Crypto device supported feature flags
501  *
502  * Note:
503  * New features flags should be added to the end of the list
504  *
505  * Keep these flags synchronised with rte_cryptodev_get_feature_name()
506  */
507 #define	RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO		(1ULL << 0)
508 /**< Symmetric crypto operations are supported */
509 #define	RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO		(1ULL << 1)
510 /**< Asymmetric crypto operations are supported */
511 #define	RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING		(1ULL << 2)
512 /**< Chaining symmetric crypto operations are supported */
513 #define	RTE_CRYPTODEV_FF_CPU_SSE			(1ULL << 3)
514 /**< Utilises CPU SIMD SSE instructions */
515 #define	RTE_CRYPTODEV_FF_CPU_AVX			(1ULL << 4)
516 /**< Utilises CPU SIMD AVX instructions */
517 #define	RTE_CRYPTODEV_FF_CPU_AVX2			(1ULL << 5)
518 /**< Utilises CPU SIMD AVX2 instructions */
519 #define	RTE_CRYPTODEV_FF_CPU_AESNI			(1ULL << 6)
520 /**< Utilises CPU AES-NI instructions */
521 #define	RTE_CRYPTODEV_FF_HW_ACCELERATED			(1ULL << 7)
522 /**< Operations are off-loaded to an
523  * external hardware accelerator
524  */
525 #define	RTE_CRYPTODEV_FF_CPU_AVX512			(1ULL << 8)
526 /**< Utilises CPU SIMD AVX512 instructions */
527 #define	RTE_CRYPTODEV_FF_IN_PLACE_SGL			(1ULL << 9)
528 /**< In-place Scatter-gather (SGL) buffers, with multiple segments,
529  * are supported
530  */
531 #define RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT		(1ULL << 10)
532 /**< Out-of-place Scatter-gather (SGL) buffers are
533  * supported in input and output
534  */
535 #define RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT		(1ULL << 11)
536 /**< Out-of-place Scatter-gather (SGL) buffers are supported
537  * in input, combined with linear buffers (LB), with a
538  * single segment in output
539  */
540 #define RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT		(1ULL << 12)
541 /**< Out-of-place Scatter-gather (SGL) buffers are supported
542  * in output, combined with linear buffers (LB) in input
543  */
544 #define RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT		(1ULL << 13)
545 /**< Out-of-place linear buffers (LB) are supported in input and output */
546 #define	RTE_CRYPTODEV_FF_CPU_NEON			(1ULL << 14)
547 /**< Utilises CPU NEON instructions */
548 #define	RTE_CRYPTODEV_FF_CPU_ARM_CE			(1ULL << 15)
549 /**< Utilises ARM CPU Cryptographic Extensions */
550 #define	RTE_CRYPTODEV_FF_SECURITY			(1ULL << 16)
551 /**< Support Security Protocol Processing */
552 #define RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP		(1ULL << 17)
553 /**< Support RSA Private Key OP with exponent */
554 #define RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT		(1ULL << 18)
555 /**< Support RSA Private Key OP with CRT (quintuple) Keys */
556 #define RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED		(1ULL << 19)
557 /**< Support encrypted-digest operations where digest is appended to data */
558 #define RTE_CRYPTODEV_FF_ASYM_SESSIONLESS		(1ULL << 20)
559 /**< Support asymmetric session-less operations */
560 #define	RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO			(1ULL << 21)
561 /**< Support symmetric cpu-crypto processing */
562 #define RTE_CRYPTODEV_FF_SYM_SESSIONLESS		(1ULL << 22)
563 /**< Support symmetric session-less operations */
564 #define RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA		(1ULL << 23)
565 /**< Support operations on data which is not byte aligned */
566 #define RTE_CRYPTODEV_FF_SYM_RAW_DP			(1ULL << 24)
567 /**< Support accelerator specific symmetric raw data-path APIs */
568 #define RTE_CRYPTODEV_FF_CIPHER_MULTIPLE_DATA_UNITS	(1ULL << 25)
569 /**< Support operations on multiple data-units message */
570 #define RTE_CRYPTODEV_FF_CIPHER_WRAPPED_KEY		(1ULL << 26)
571 /**< Support wrapped key in cipher xform  */
572 #define RTE_CRYPTODEV_FF_SECURITY_INNER_CSUM		(1ULL << 27)
573 /**< Support inner checksum computation/verification */
574 #define RTE_CRYPTODEV_FF_SECURITY_RX_INJECT		(1ULL << 28)
575 /**< Support Rx injection after security processing */
576 
577 /**
578  * Get the name of a crypto device feature flag
579  *
580  * @param	flag	The mask describing the flag.
581  *
582  * @return
583  *   The name of this flag, or NULL if it's not a valid feature flag.
584  */
585 const char *
586 rte_cryptodev_get_feature_name(uint64_t flag);
587 
588 /**  Crypto device information */
589 /* Structure rte_cryptodev_info 8< */
590 struct rte_cryptodev_info {
591 	const char *driver_name;	/**< Driver name. */
592 	uint8_t driver_id;		/**< Driver identifier */
593 	struct rte_device *device;	/**< Generic device information. */
594 
595 	uint64_t feature_flags;
596 	/**< Feature flags exposes HW/SW features for the given device */
597 
598 	const struct rte_cryptodev_capabilities *capabilities;
599 	/**< Array of devices supported capabilities */
600 
601 	unsigned max_nb_queue_pairs;
602 	/**< Maximum number of queues pairs supported by device. */
603 
604 	uint16_t min_mbuf_headroom_req;
605 	/**< Minimum mbuf headroom required by device */
606 
607 	uint16_t min_mbuf_tailroom_req;
608 	/**< Minimum mbuf tailroom required by device */
609 
610 	struct {
611 		unsigned max_nb_sessions;
612 		/**< Maximum number of sessions supported by device.
613 		 * If 0, the device does not have any limitation in
614 		 * number of sessions that can be used.
615 		 */
616 	} sym;
617 };
618 /* >8 End of structure rte_cryptodev_info. */
619 
620 #define RTE_CRYPTODEV_DETACHED  (0)
621 #define RTE_CRYPTODEV_ATTACHED  (1)
622 
623 /** Definitions of Crypto device event types */
624 enum rte_cryptodev_event_type {
625 	RTE_CRYPTODEV_EVENT_UNKNOWN,	/**< unknown event type */
626 	RTE_CRYPTODEV_EVENT_ERROR,	/**< error interrupt event */
627 	RTE_CRYPTODEV_EVENT_MAX		/**< max value of this enum */
628 };
629 
630 /* Crypto queue pair priority levels */
631 #define RTE_CRYPTODEV_QP_PRIORITY_HIGHEST   0
632 /**< Highest priority of a cryptodev queue pair
633  * @see rte_cryptodev_queue_pair_setup(), rte_cryptodev_enqueue_burst()
634  */
635 #define RTE_CRYPTODEV_QP_PRIORITY_NORMAL    128
636 /**< Normal priority of a cryptodev queue pair
637  * @see rte_cryptodev_queue_pair_setup(), rte_cryptodev_enqueue_burst()
638  */
639 #define RTE_CRYPTODEV_QP_PRIORITY_LOWEST    255
640 /**< Lowest priority of a cryptodev queue pair
641  * @see rte_cryptodev_queue_pair_setup(), rte_cryptodev_enqueue_burst()
642  */
643 
644 /** Crypto device queue pair configuration structure. */
645 /* Structure rte_cryptodev_qp_conf 8<*/
646 struct rte_cryptodev_qp_conf {
647 	uint32_t nb_descriptors; /**< Number of descriptors per queue pair */
648 	struct rte_mempool *mp_session;
649 	/**< The mempool for creating session in sessionless mode */
650 	uint8_t priority;
651 	/**< Priority for this queue pair relative to other queue pairs.
652 	 *
653 	 * The requested priority should in the range of
654 	 * [@ref RTE_CRYPTODEV_QP_PRIORITY_HIGHEST, @ref RTE_CRYPTODEV_QP_PRIORITY_LOWEST].
655 	 * The implementation may normalize the requested priority to
656 	 * device supported priority value.
657 	 */
658 };
659 /* >8 End of structure rte_cryptodev_qp_conf. */
660 
661 /**
662  * Function type used for processing crypto ops when enqueue/dequeue burst is
663  * called.
664  *
665  * The callback function is called on enqueue/dequeue burst immediately.
666  *
667  * @param	dev_id		The identifier of the device.
668  * @param	qp_id		The index of the queue pair on which ops are
669  *				enqueued/dequeued. The value must be in the
670  *				range [0, nb_queue_pairs - 1] previously
671  *				supplied to *rte_cryptodev_configure*.
672  * @param	ops		The address of an array of *nb_ops* pointers
673  *				to *rte_crypto_op* structures which contain
674  *				the crypto operations to be processed.
675  * @param	nb_ops		The number of operations to process.
676  * @param	user_param	The arbitrary user parameter passed in by the
677  *				application when the callback was originally
678  *				registered.
679  * @return			The number of ops to be enqueued to the
680  *				crypto device.
681  */
682 typedef uint16_t (*rte_cryptodev_callback_fn)(uint16_t dev_id, uint16_t qp_id,
683 		struct rte_crypto_op **ops, uint16_t nb_ops, void *user_param);
684 
685 /**
686  * Typedef for application callback function to be registered by application
687  * software for notification of device events
688  *
689  * @param	dev_id	Crypto device identifier
690  * @param	event	Crypto device event to register for notification of.
691  * @param	cb_arg	User specified parameter to be passed as to passed to
692  *			users callback function.
693  */
694 typedef void (*rte_cryptodev_cb_fn)(uint8_t dev_id,
695 		enum rte_cryptodev_event_type event, void *cb_arg);
696 
697 
698 /** Crypto Device statistics */
699 struct rte_cryptodev_stats {
700 	uint64_t enqueued_count;
701 	/**< Count of all operations enqueued */
702 	uint64_t dequeued_count;
703 	/**< Count of all operations dequeued */
704 
705 	uint64_t enqueue_err_count;
706 	/**< Total error count on operations enqueued */
707 	uint64_t dequeue_err_count;
708 	/**< Total error count on operations dequeued */
709 };
710 
711 #define RTE_CRYPTODEV_NAME_MAX_LEN	(64)
712 /**< Max length of name of crypto PMD */
713 
714 /**
715  * Get the device identifier for the named crypto device.
716  *
717  * @param	name	device name to select the device structure.
718  *
719  * @return
720  *   - Returns crypto device identifier on success.
721  *   - Return -1 on failure to find named crypto device.
722  */
723 int
724 rte_cryptodev_get_dev_id(const char *name);
725 
726 /**
727  * Get the crypto device name given a device identifier.
728  *
729  * @param dev_id
730  *   The identifier of the device
731  *
732  * @return
733  *   - Returns crypto device name.
734  *   - Returns NULL if crypto device is not present.
735  */
736 const char *
737 rte_cryptodev_name_get(uint8_t dev_id);
738 
739 /**
740  * Get the total number of crypto devices that have been successfully
741  * initialised.
742  *
743  * @return
744  *   - The total number of usable crypto devices.
745  */
746 uint8_t
747 rte_cryptodev_count(void);
748 
749 /**
750  * Get number of crypto device defined type.
751  *
752  * @param	driver_id	driver identifier.
753  *
754  * @return
755  *   Returns number of crypto device.
756  */
757 uint8_t
758 rte_cryptodev_device_count_by_driver(uint8_t driver_id);
759 
760 /**
761  * Get number and identifiers of attached crypto devices that
762  * use the same crypto driver.
763  *
764  * @param	driver_name	driver name.
765  * @param	devices		output devices identifiers.
766  * @param	nb_devices	maximal number of devices.
767  *
768  * @return
769  *   Returns number of attached crypto device.
770  */
771 uint8_t
772 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
773 		uint8_t nb_devices);
774 /*
775  * Return the NUMA socket to which a device is connected
776  *
777  * @param dev_id
778  *   The identifier of the device
779  * @return
780  *   The NUMA socket id to which the device is connected or
781  *   a default of zero if the socket could not be determined.
782  *   -1 if returned is the dev_id value is out of range.
783  */
784 int
785 rte_cryptodev_socket_id(uint8_t dev_id);
786 
787 /** Crypto device configuration structure */
788 /* Structure rte_cryptodev_config 8< */
789 struct rte_cryptodev_config {
790 	int socket_id;			/**< Socket to allocate resources on */
791 	uint16_t nb_queue_pairs;
792 	/**< Number of queue pairs to configure on device */
793 	uint64_t ff_disable;
794 	/**< Feature flags to be disabled. Only the following features are
795 	 * allowed to be disabled,
796 	 *  - RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO
797 	 *  - RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO
798 	 *  - RTE_CRYTPODEV_FF_SECURITY
799 	 */
800 };
801 /* >8 End of structure rte_cryptodev_config. */
802 
803 /**
804  * Configure a device.
805  *
806  * This function must be invoked first before any other function in the
807  * API. This function can also be re-invoked when a device is in the
808  * stopped state.
809  *
810  * @param	dev_id		The identifier of the device to configure.
811  * @param	config		The crypto device configuration structure.
812  *
813  * @return
814  *   - 0: Success, device configured.
815  *   - <0: Error code returned by the driver configuration function.
816  */
817 int
818 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config);
819 
820 /**
821  * Start an device.
822  *
823  * The device start step is the last one and consists of setting the configured
824  * offload features and in starting the transmit and the receive units of the
825  * device.
826  * On success, all basic functions exported by the API (link status,
827  * receive/transmit, and so on) can be invoked.
828  *
829  * @param dev_id
830  *   The identifier of the device.
831  * @return
832  *   - 0: Success, device started.
833  *   - <0: Error code of the driver device start function.
834  */
835 int
836 rte_cryptodev_start(uint8_t dev_id);
837 
838 /**
839  * Stop an device. The device can be restarted with a call to
840  * rte_cryptodev_start()
841  *
842  * @param	dev_id		The identifier of the device.
843  */
844 void
845 rte_cryptodev_stop(uint8_t dev_id);
846 
847 /**
848  * Close an device. The device cannot be restarted!
849  *
850  * @param	dev_id		The identifier of the device.
851  *
852  * @return
853  *  - 0 on successfully closing device
854  *  - <0 on failure to close device
855  */
856 int
857 rte_cryptodev_close(uint8_t dev_id);
858 
859 /**
860  * Allocate and set up a receive queue pair for a device.
861  *
862  *
863  * @param	dev_id		The identifier of the device.
864  * @param	queue_pair_id	The index of the queue pairs to set up. The
865  *				value must be in the range [0, nb_queue_pair
866  *				- 1] previously supplied to
867  *				rte_cryptodev_configure().
868  * @param	qp_conf		The pointer to the configuration data to be
869  *				used for the queue pair.
870  * @param	socket_id	The *socket_id* argument is the socket
871  *				identifier in case of NUMA. The value can be
872  *				*SOCKET_ID_ANY* if there is no NUMA constraint
873  *				for the DMA memory allocated for the receive
874  *				queue pair.
875  *
876  * @return
877  *   - 0: Success, queue pair correctly set up.
878  *   - <0: Queue pair configuration failed
879  */
880 int
881 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
882 		const struct rte_cryptodev_qp_conf *qp_conf, int socket_id);
883 
884 /**
885  * @warning
886  * @b EXPERIMENTAL: this API may change without prior notice.
887  *
888  * Reset a queue pair for a device.
889  * The caller of this API must ensure that, there are no enqueues to the queue and there are no
890  * pending/inflight packets in the queue when the API is called.
891  * The API can reconfigure the queue pair when the queue pair configuration data is provided.
892  *
893  * @param	dev_id		The identifier of the device.
894  * @param	queue_pair_id	The index of the queue pairs to set up. The value must be in the
895  *				range [0, nb_queue_pair - 1] previously supplied to
896  *				rte_cryptodev_configure().
897  * @param	qp_conf		The pointer to configuration data to be used for the queue pair.
898  *				It should be NULL, if the API is called from an interrupt context.
899  * @param	socket_id	The *socket_id* argument is the socket identifier in case of NUMA.
900  *				The value can be *SOCKET_ID_ANY* if there is no NUMA constraint
901  *				for the DMA memory allocated for the queue pair.
902  *
903  * @return
904  *   - 0:  Queue pair is reset successfully.
905  *   - ENOTSUP: If the operation is not supported by the PMD.
906  *   - <0: Queue pair reset failed
907  */
908 __rte_experimental
909 int
910 rte_cryptodev_queue_pair_reset(uint8_t dev_id, uint16_t queue_pair_id,
911 		const struct rte_cryptodev_qp_conf *qp_conf, int socket_id);
912 
913 /**
914  * Get the status of queue pairs setup on a specific crypto device
915  *
916  * @param	dev_id		Crypto device identifier.
917  * @param	queue_pair_id	The index of the queue pairs to set up. The
918  *				value must be in the range [0, nb_queue_pair
919  *				- 1] previously supplied to
920  *				rte_cryptodev_configure().
921  * @return
922  *   - 0: qp was not configured
923  *	 - 1: qp was configured
924  *	 - -EINVAL: device was not configured
925  */
926 int
927 rte_cryptodev_get_qp_status(uint8_t dev_id, uint16_t queue_pair_id);
928 
929 /**
930  * Get the number of queue pairs on a specific crypto device
931  *
932  * @param	dev_id		Crypto device identifier.
933  * @return
934  *   - The number of configured queue pairs.
935  */
936 uint16_t
937 rte_cryptodev_queue_pair_count(uint8_t dev_id);
938 
939 
940 /**
941  * Retrieve the general I/O statistics of a device.
942  *
943  * @param	dev_id		The identifier of the device.
944  * @param	stats		A pointer to a structure of type
945  *				*rte_cryptodev_stats* to be filled with the
946  *				values of device counters.
947  * @return
948  *   - Zero if successful.
949  *   - Non-zero otherwise.
950  */
951 int
952 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats);
953 
954 /**
955  * Reset the general I/O statistics of a device.
956  *
957  * @param	dev_id		The identifier of the device.
958  */
959 void
960 rte_cryptodev_stats_reset(uint8_t dev_id);
961 
962 /**
963  * Retrieve the contextual information of a device.
964  *
965  * @param	dev_id		The identifier of the device.
966  * @param	dev_info	A pointer to a structure of type
967  *				*rte_cryptodev_info* to be filled with the
968  *				contextual information of the device.
969  *
970  * @note The capabilities field of dev_info is set to point to the first
971  * element of an array of struct rte_cryptodev_capabilities. The element after
972  * the last valid element has it's op field set to
973  * RTE_CRYPTO_OP_TYPE_UNDEFINED.
974  */
975 void
976 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info);
977 
978 
979 /**
980  * Register a callback function for specific device id.
981  *
982  * @param	dev_id		Device id.
983  * @param	event		Event interested.
984  * @param	cb_fn		User supplied callback function to be called.
985  * @param	cb_arg		Pointer to the parameters for the registered
986  *				callback.
987  *
988  * @return
989  *  - On success, zero.
990  *  - On failure, a negative value.
991  */
992 int
993 rte_cryptodev_callback_register(uint8_t dev_id,
994 		enum rte_cryptodev_event_type event,
995 		rte_cryptodev_cb_fn cb_fn, void *cb_arg);
996 
997 /**
998  * Unregister a callback function for specific device id.
999  *
1000  * @param	dev_id		The device identifier.
1001  * @param	event		Event interested.
1002  * @param	cb_fn		User supplied callback function to be called.
1003  * @param	cb_arg		Pointer to the parameters for the registered
1004  *				callback.
1005  *
1006  * @return
1007  *  - On success, zero.
1008  *  - On failure, a negative value.
1009  */
1010 int
1011 rte_cryptodev_callback_unregister(uint8_t dev_id,
1012 		enum rte_cryptodev_event_type event,
1013 		rte_cryptodev_cb_fn cb_fn, void *cb_arg);
1014 
1015 /**
1016  * @warning
1017  * @b EXPERIMENTAL: this API may change without prior notice.
1018  *
1019  * Query a cryptodev queue pair if there are pending RTE_CRYPTODEV_EVENT_ERROR
1020  * events.
1021  *
1022  * @param          dev_id	The device identifier.
1023  * @param          qp_id	Queue pair index to be queried.
1024  *
1025  * @return
1026  *   - 1 if requested queue has a pending event.
1027  *   - 0 if no pending event is found.
1028  *   - a negative value on failure
1029  */
1030 __rte_experimental
1031 int
1032 rte_cryptodev_queue_pair_event_error_query(uint8_t dev_id, uint16_t qp_id);
1033 
1034 struct rte_cryptodev_callback;
1035 
1036 /** Structure to keep track of registered callbacks */
1037 RTE_TAILQ_HEAD(rte_cryptodev_cb_list, rte_cryptodev_callback);
1038 
1039 /**
1040  * Structure used to hold information about the callbacks to be called for a
1041  * queue pair on enqueue/dequeue.
1042  */
1043 struct rte_cryptodev_cb {
1044 	RTE_ATOMIC(struct rte_cryptodev_cb *) next;
1045 	/**< Pointer to next callback */
1046 	rte_cryptodev_callback_fn fn;
1047 	/**< Pointer to callback function */
1048 	void *arg;
1049 	/**< Pointer to argument */
1050 };
1051 
1052 /**
1053  * @internal
1054  * Structure used to hold information about the RCU for a queue pair.
1055  */
1056 struct rte_cryptodev_cb_rcu {
1057 	RTE_ATOMIC(struct rte_cryptodev_cb *) next;
1058 	/**< Pointer to next callback */
1059 	struct rte_rcu_qsbr *qsbr;
1060 	/**< RCU QSBR variable per queue pair */
1061 };
1062 
1063 /**
1064  * Get the security context for the cryptodev.
1065  *
1066  * @param dev_id
1067  *   The device identifier.
1068  * @return
1069  *   - NULL on error.
1070  *   - Pointer to security context on success.
1071  */
1072 void *
1073 rte_cryptodev_get_sec_ctx(uint8_t dev_id);
1074 
1075 /**
1076  * Create a symmetric session mempool.
1077  *
1078  * @param name
1079  *   The unique mempool name.
1080  * @param nb_elts
1081  *   The number of elements in the mempool.
1082  * @param elt_size
1083  *   The size of the element. This should be the size of the cryptodev PMD
1084  *   session private data obtained through
1085  *   rte_cryptodev_sym_get_private_session_size() function call.
1086  *   For the user who wants to use the same mempool for heterogeneous PMDs
1087  *   this value should be the maximum value of their private session sizes.
1088  *   Please note the created mempool will have bigger elt size than this
1089  *   value as necessary session header and the possible padding are filled
1090  *   into each elt.
1091  * @param cache_size
1092  *   The number of per-lcore cache elements
1093  * @param priv_size
1094  *   The private data size of each session.
1095  * @param socket_id
1096  *   The *socket_id* argument is the socket identifier in the case of
1097  *   NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
1098  *   constraint for the reserved zone.
1099  *
1100  * @return
1101  *  - On success returns the created session mempool pointer
1102  *  - On failure returns NULL
1103  */
1104 struct rte_mempool *
1105 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
1106 	uint32_t elt_size, uint32_t cache_size, uint16_t priv_size,
1107 	int socket_id);
1108 
1109 
1110 /**
1111  * Create an asymmetric session mempool.
1112  *
1113  * @param name
1114  *   The unique mempool name.
1115  * @param nb_elts
1116  *   The number of elements in the mempool.
1117  * @param cache_size
1118  *   The number of per-lcore cache elements
1119  * @param user_data_size
1120  *   The size of user data to be placed after session private data.
1121  * @param socket_id
1122  *   The *socket_id* argument is the socket identifier in the case of
1123  *   NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
1124  *   constraint for the reserved zone.
1125  *
1126  * @return
1127  *  - On success return mempool
1128  *  - On failure returns NULL
1129  */
1130 struct rte_mempool *
1131 rte_cryptodev_asym_session_pool_create(const char *name, uint32_t nb_elts,
1132 	uint32_t cache_size, uint16_t user_data_size, int socket_id);
1133 
1134 /**
1135  * Create symmetric crypto session and fill out private data for the device id,
1136  * based on its device type.
1137  *
1138  * @param   dev_id   ID of device that we want the session to be used on
1139  * @param   xforms   Symmetric crypto transform operations to apply on flow
1140  *                   processed with this session
1141  * @param   mp       Mempool to allocate symmetric session objects from
1142  *
1143  * @return
1144  *  - On success return pointer to sym-session.
1145  *  - On failure returns NULL and rte_errno is set to the error code:
1146  *    - EINVAL on invalid arguments.
1147  *    - ENOMEM on memory error for session allocation.
1148  *    - ENOTSUP if device doesn't support session configuration.
1149  */
1150 void *
1151 rte_cryptodev_sym_session_create(uint8_t dev_id,
1152 		struct rte_crypto_sym_xform *xforms,
1153 		struct rte_mempool *mp);
1154 /**
1155  * Create and initialise an asymmetric crypto session structure.
1156  * Calls the PMD to configure the private session data.
1157  *
1158  * @param   dev_id   ID of device that we want the session to be used on
1159  * @param   xforms   Asymmetric crypto transform operations to apply on flow
1160  *                   processed with this session
1161  * @param   mp       mempool to allocate asymmetric session
1162  *                   objects from
1163  * @param   session  void ** for session to be used
1164  *
1165  * @return
1166  *  - 0 on success.
1167  *  - -EINVAL on invalid arguments.
1168  *  - -ENOMEM on memory error for session allocation.
1169  *  - -ENOTSUP if device doesn't support session configuration.
1170  */
1171 int
1172 rte_cryptodev_asym_session_create(uint8_t dev_id,
1173 		struct rte_crypto_asym_xform *xforms, struct rte_mempool *mp,
1174 		void **session);
1175 
1176 /**
1177  * Frees session for the device id and returning it to its mempool.
1178  * It is the application's responsibility to ensure that the session
1179  * is not still in-flight operations using it.
1180  *
1181  * @param   dev_id   ID of device that uses the session.
1182  * @param   sess     Session header to be freed.
1183  *
1184  * @return
1185  *  - 0 if successful.
1186  *  - -EINVAL if session is NULL or the mismatched device ids.
1187  */
1188 int
1189 rte_cryptodev_sym_session_free(uint8_t dev_id,
1190 	void *sess);
1191 
1192 /**
1193  * Clears and frees asymmetric crypto session header and private data,
1194  * returning it to its original mempool.
1195  *
1196  * @param   dev_id   ID of device that uses the asymmetric session.
1197  * @param   sess     Session header to be freed.
1198  *
1199  * @return
1200  *  - 0 if successful.
1201  *  - -EINVAL if device is invalid or session is NULL.
1202  */
1203 int
1204 rte_cryptodev_asym_session_free(uint8_t dev_id, void *sess);
1205 
1206 /**
1207  * Get the size of the asymmetric session header.
1208  *
1209  * @return
1210  *   Size of the asymmetric header session.
1211  */
1212 unsigned int
1213 rte_cryptodev_asym_get_header_session_size(void);
1214 
1215 /**
1216  * Get the size of the private symmetric session data
1217  * for a device.
1218  *
1219  * @param	dev_id		The device identifier.
1220  *
1221  * @return
1222  *   - Size of the private data, if successful
1223  *   - 0 if device is invalid or does not have private
1224  *   symmetric session
1225  */
1226 unsigned int
1227 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id);
1228 
1229 /**
1230  * Get the size of the private data for asymmetric session
1231  * on device
1232  *
1233  * @param	dev_id		The device identifier.
1234  *
1235  * @return
1236  *   - Size of the asymmetric private data, if successful
1237  *   - 0 if device is invalid or does not have private session
1238  */
1239 unsigned int
1240 rte_cryptodev_asym_get_private_session_size(uint8_t dev_id);
1241 
1242 /**
1243  * Validate if the crypto device index is valid attached crypto device.
1244  *
1245  * @param	dev_id	Crypto device index.
1246  *
1247  * @return
1248  *   - If the device index is valid (1) or not (0).
1249  */
1250 unsigned int
1251 rte_cryptodev_is_valid_dev(uint8_t dev_id);
1252 
1253 /**
1254  * Provide driver identifier.
1255  *
1256  * @param name
1257  *   The pointer to a driver name.
1258  * @return
1259  *  The driver type identifier or -1 if no driver found
1260  */
1261 int rte_cryptodev_driver_id_get(const char *name);
1262 
1263 /**
1264  * Provide driver name.
1265  *
1266  * @param driver_id
1267  *   The driver identifier.
1268  * @return
1269  *  The driver name or null if no driver found
1270  */
1271 const char *rte_cryptodev_driver_name_get(uint8_t driver_id);
1272 
1273 /**
1274  * Store user data in a session.
1275  *
1276  * @param	sess		Session pointer allocated by
1277  *				*rte_cryptodev_sym_session_create*.
1278  * @param	data		Pointer to the user data.
1279  * @param	size		Size of the user data.
1280  *
1281  * @return
1282  *  - On success, zero.
1283  *  - On failure, a negative value.
1284  */
1285 int
1286 rte_cryptodev_sym_session_set_user_data(void *sess,
1287 					void *data,
1288 					uint16_t size);
1289 
1290 #define CRYPTO_SESS_OPAQUE_DATA_OFF 0
1291 /**
1292  * Get opaque data from session handle
1293  */
1294 static inline uint64_t
1295 rte_cryptodev_sym_session_opaque_data_get(void *sess)
1296 {
1297 	return *((uint64_t *)sess + CRYPTO_SESS_OPAQUE_DATA_OFF);
1298 }
1299 
1300 /**
1301  * Set opaque data in session handle
1302  */
1303 static inline void
1304 rte_cryptodev_sym_session_opaque_data_set(void *sess, uint64_t opaque)
1305 {
1306 	uint64_t *data;
1307 	data = (((uint64_t *)sess) + CRYPTO_SESS_OPAQUE_DATA_OFF);
1308 	*data = opaque;
1309 }
1310 
1311 /**
1312  * Get user data stored in a session.
1313  *
1314  * @param	sess		Session pointer allocated by
1315  *				*rte_cryptodev_sym_session_create*.
1316  *
1317  * @return
1318  *  - On success return pointer to user data.
1319  *  - On failure returns NULL.
1320  */
1321 void *
1322 rte_cryptodev_sym_session_get_user_data(void *sess);
1323 
1324 /**
1325  * Store user data in an asymmetric session.
1326  *
1327  * @param	sess		Session pointer allocated by
1328  *				*rte_cryptodev_asym_session_create*.
1329  * @param	data		Pointer to the user data.
1330  * @param	size		Size of the user data.
1331  *
1332  * @return
1333  *  - On success, zero.
1334  *  - -EINVAL if the session pointer is invalid.
1335  *  - -ENOMEM if the available user data size is smaller than the size parameter.
1336  */
1337 int
1338 rte_cryptodev_asym_session_set_user_data(void *sess, void *data, uint16_t size);
1339 
1340 /**
1341  * Get user data stored in an asymmetric session.
1342  *
1343  * @param	sess		Session pointer allocated by
1344  *				*rte_cryptodev_asym_session_create*.
1345  *
1346  * @return
1347  *  - On success return pointer to user data.
1348  *  - On failure returns NULL.
1349  */
1350 void *
1351 rte_cryptodev_asym_session_get_user_data(void *sess);
1352 
1353 /**
1354  * Perform actual crypto processing (encrypt/digest or auth/decrypt)
1355  * on user provided data.
1356  *
1357  * @param	dev_id	The device identifier.
1358  * @param	sess	Cryptodev session structure
1359  * @param	ofs	Start and stop offsets for auth and cipher operations
1360  * @param	vec	Vectorized operation descriptor
1361  *
1362  * @return
1363  *  - Returns number of successfully processed packets.
1364  */
1365 uint32_t
1366 rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id,
1367 	void *sess, union rte_crypto_sym_ofs ofs,
1368 	struct rte_crypto_sym_vec *vec);
1369 
1370 /**
1371  * Get the size of the raw data-path context buffer.
1372  *
1373  * @param	dev_id		The device identifier.
1374  *
1375  * @return
1376  *   - If the device supports raw data-path APIs, return the context size.
1377  *   - If the device does not support the APIs, return -1.
1378  */
1379 int
1380 rte_cryptodev_get_raw_dp_ctx_size(uint8_t dev_id);
1381 
1382 /**
1383  * Set session event meta data
1384  *
1385  * @param	dev_id		The device identifier.
1386  * @param	sess            Crypto or security session.
1387  * @param	op_type         Operation type.
1388  * @param	sess_type       Session type.
1389  * @param	ev_mdata	Pointer to the event crypto meta data
1390  *				(aka *union rte_event_crypto_metadata*)
1391  * @param	size            Size of ev_mdata.
1392  *
1393  * @return
1394  *  - On success, zero.
1395  *  - On failure, a negative value.
1396  */
1397 int
1398 rte_cryptodev_session_event_mdata_set(uint8_t dev_id, void *sess,
1399 	enum rte_crypto_op_type op_type,
1400 	enum rte_crypto_op_sess_type sess_type,
1401 	void *ev_mdata, uint16_t size);
1402 
1403 /**
1404  * Union of different crypto session types, including session-less xform
1405  * pointer.
1406  */
1407 union rte_cryptodev_session_ctx {void *crypto_sess;
1408 	struct rte_crypto_sym_xform *xform;
1409 	struct rte_security_session *sec_sess;
1410 };
1411 
1412 /**
1413  * Enqueue a vectorized operation descriptor into the device queue but the
1414  * driver may or may not start processing until rte_cryptodev_raw_enqueue_done()
1415  * is called.
1416  *
1417  * @param	qp		Driver specific queue pair data.
1418  * @param	drv_ctx		Driver specific context data.
1419  * @param	vec		Vectorized operation descriptor.
1420  * @param	ofs		Start and stop offsets for auth and cipher
1421  *				operations.
1422  * @param	user_data	The array of user data for dequeue later.
1423  * @param	enqueue_status	Driver written value to specify the
1424  *				enqueue status. Possible values:
1425  *				- 1: The number of operations returned are
1426  *				     enqueued successfully.
1427  *				- 0: The number of operations returned are
1428  *				     cached into the queue but are not processed
1429  *				     until rte_cryptodev_raw_enqueue_done() is
1430  *				     called.
1431  *				- negative integer: Error occurred.
1432  * @return
1433  *   - The number of operations in the descriptor successfully enqueued or
1434  *     cached into the queue but not enqueued yet, depends on the
1435  *     "enqueue_status" value.
1436  */
1437 typedef uint32_t (*cryptodev_sym_raw_enqueue_burst_t)(
1438 	void *qp, uint8_t *drv_ctx, struct rte_crypto_sym_vec *vec,
1439 	union rte_crypto_sym_ofs ofs, void *user_data[], int *enqueue_status);
1440 
1441 /**
1442  * Enqueue single raw data vector into the device queue but the driver may or
1443  * may not start processing until rte_cryptodev_raw_enqueue_done() is called.
1444  *
1445  * @param	qp		Driver specific queue pair data.
1446  * @param	drv_ctx		Driver specific context data.
1447  * @param	data_vec	The buffer data vector.
1448  * @param	n_data_vecs	Number of buffer data vectors.
1449  * @param	ofs		Start and stop offsets for auth and cipher
1450  *				operations.
1451  * @param	iv		IV virtual and IOVA addresses
1452  * @param	digest		digest virtual and IOVA addresses
1453  * @param	aad_or_auth_iv	AAD or auth IV virtual and IOVA addresses,
1454  *				depends on the algorithm used.
1455  * @param	user_data	The user data.
1456  * @return
1457  *   - 1: The data vector is enqueued successfully.
1458  *   - 0: The data vector is cached into the queue but is not processed
1459  *        until rte_cryptodev_raw_enqueue_done() is called.
1460  *   - negative integer: failure.
1461  */
1462 typedef int (*cryptodev_sym_raw_enqueue_t)(
1463 	void *qp, uint8_t *drv_ctx, struct rte_crypto_vec *data_vec,
1464 	uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs,
1465 	struct rte_crypto_va_iova_ptr *iv,
1466 	struct rte_crypto_va_iova_ptr *digest,
1467 	struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
1468 	void *user_data);
1469 
1470 /**
1471  * Inform the cryptodev queue pair to start processing or finish dequeuing all
1472  * enqueued/dequeued operations.
1473  *
1474  * @param	qp		Driver specific queue pair data.
1475  * @param	drv_ctx		Driver specific context data.
1476  * @param	n		The total number of processed operations.
1477  * @return
1478  *   - On success return 0.
1479  *   - On failure return negative integer.
1480  */
1481 typedef int (*cryptodev_sym_raw_operation_done_t)(void *qp, uint8_t *drv_ctx,
1482 	uint32_t n);
1483 
1484 /**
1485  * Typedef that the user provided for the driver to get the dequeue count.
1486  * The function may return a fixed number or the number parsed from the user
1487  * data stored in the first processed operation.
1488  *
1489  * @param	user_data	Dequeued user data.
1490  * @return
1491  *  - The number of operations to be dequeued.
1492  */
1493 typedef uint32_t (*rte_cryptodev_raw_get_dequeue_count_t)(void *user_data);
1494 
1495 /**
1496  * Typedef that the user provided to deal with post dequeue operation, such
1497  * as filling status.
1498  *
1499  * @param	user_data	Dequeued user data.
1500  * @param	index		Index number of the processed descriptor.
1501  * @param	is_op_success	Operation status provided by the driver.
1502  */
1503 typedef void (*rte_cryptodev_raw_post_dequeue_t)(void *user_data,
1504 	uint32_t index, uint8_t is_op_success);
1505 
1506 /**
1507  * Dequeue a burst of symmetric crypto processing.
1508  *
1509  * @param	qp			Driver specific queue pair data.
1510  * @param	drv_ctx			Driver specific context data.
1511  * @param	get_dequeue_count	User provided callback function to
1512  *					obtain dequeue operation count.
1513  * @param	max_nb_to_dequeue	When get_dequeue_count is NULL this
1514  *					value is used to pass the maximum
1515  *					number of operations to be dequeued.
1516  * @param	post_dequeue		User provided callback function to
1517  *					post-process a dequeued operation.
1518  * @param	out_user_data		User data pointer array to be retrieve
1519  *					from device queue. In case of
1520  *					*is_user_data_array* is set there
1521  *					should be enough room to store all
1522  *					user data.
1523  * @param	is_user_data_array	Set 1 if every dequeued user data will
1524  *					be written into out_user_data array.
1525  *					Set 0 if only the first user data will
1526  *					be written into out_user_data array.
1527  * @param	n_success		Driver written value to specific the
1528  *					total successful operations count.
1529  * @param	dequeue_status		Driver written value to specify the
1530  *					dequeue status. Possible values:
1531  *					- 1: Successfully dequeued the number
1532  *					     of operations returned. The user
1533  *					     data previously set during enqueue
1534  *					     is stored in the "out_user_data".
1535  *					- 0: The number of operations returned
1536  *					     are completed and the user data is
1537  *					     stored in the "out_user_data", but
1538  *					     they are not freed from the queue
1539  *					     until
1540  *					     rte_cryptodev_raw_dequeue_done()
1541  *					     is called.
1542  *					- negative integer: Error occurred.
1543  * @return
1544  *   - The number of operations dequeued or completed but not freed from the
1545  *     queue, depends on "dequeue_status" value.
1546  */
1547 typedef uint32_t (*cryptodev_sym_raw_dequeue_burst_t)(void *qp,
1548 	uint8_t *drv_ctx,
1549 	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
1550 	uint32_t max_nb_to_dequeue,
1551 	rte_cryptodev_raw_post_dequeue_t post_dequeue,
1552 	void **out_user_data, uint8_t is_user_data_array,
1553 	uint32_t *n_success, int *dequeue_status);
1554 
1555 /**
1556  * Dequeue a symmetric crypto processing.
1557  *
1558  * @param	qp			Driver specific queue pair data.
1559  * @param	drv_ctx			Driver specific context data.
1560  * @param	dequeue_status		Driver written value to specify the
1561  *					dequeue status. Possible values:
1562  *					- 1: Successfully dequeued a operation.
1563  *					     The user data is returned.
1564  *					- 0: The first operation in the queue
1565  *					     is completed and the user data
1566  *					     previously set during enqueue is
1567  *					     returned, but it is not freed from
1568  *					     the queue until
1569  *					     rte_cryptodev_raw_dequeue_done() is
1570  *					     called.
1571  *					- negative integer: Error occurred.
1572  * @param	op_status		Driver written value to specify
1573  *					operation status.
1574  * @return
1575  *   - The user data pointer retrieved from device queue or NULL if no
1576  *     operation is ready for dequeue.
1577  */
1578 typedef void * (*cryptodev_sym_raw_dequeue_t)(
1579 		void *qp, uint8_t *drv_ctx, int *dequeue_status,
1580 		enum rte_crypto_op_status *op_status);
1581 
1582 /**
1583  * Context data for raw data-path API crypto process. The buffer of this
1584  * structure is to be allocated by the user application with the size equal
1585  * or bigger than rte_cryptodev_get_raw_dp_ctx_size() returned value.
1586  */
1587 struct rte_crypto_raw_dp_ctx {
1588 	void *qp_data;
1589 
1590 	cryptodev_sym_raw_enqueue_t enqueue;
1591 	cryptodev_sym_raw_enqueue_burst_t enqueue_burst;
1592 	cryptodev_sym_raw_operation_done_t enqueue_done;
1593 	cryptodev_sym_raw_dequeue_t dequeue;
1594 	cryptodev_sym_raw_dequeue_burst_t dequeue_burst;
1595 	cryptodev_sym_raw_operation_done_t dequeue_done;
1596 
1597 	/* Driver specific context data */
1598 	uint8_t drv_ctx_data[];
1599 };
1600 
1601 /**
1602  * Configure raw data-path context data.
1603  *
1604  * @param	dev_id		The device identifier.
1605  * @param	qp_id		The index of the queue pair from which to
1606  *				retrieve processed packets. The value must be
1607  *				in the range [0, nb_queue_pair - 1] previously
1608  *				supplied to rte_cryptodev_configure().
1609  * @param	ctx		The raw data-path context data.
1610  * @param	sess_type	Session type.
1611  * @param	session_ctx	Session context data.
1612  * @param	is_update	Set 0 if it is to initialize the ctx.
1613  *				Set 1 if ctx is initialized and only to update
1614  *				session context data.
1615  * @return
1616  *   - On success return 0.
1617  *   - On failure return negative integer.
1618  *     - -EINVAL if input parameters are invalid.
1619  *     - -ENOTSUP if crypto device does not support raw DP operations with the
1620  *        provided session.
1621  */
1622 int
1623 rte_cryptodev_configure_raw_dp_ctx(uint8_t dev_id, uint16_t qp_id,
1624 	struct rte_crypto_raw_dp_ctx *ctx,
1625 	enum rte_crypto_op_sess_type sess_type,
1626 	union rte_cryptodev_session_ctx session_ctx,
1627 	uint8_t is_update);
1628 
1629 /**
1630  * Enqueue a vectorized operation descriptor into the device queue but the
1631  * driver may or may not start processing until rte_cryptodev_raw_enqueue_done()
1632  * is called.
1633  *
1634  * @param	ctx		The initialized raw data-path context data.
1635  * @param	vec		Vectorized operation descriptor.
1636  * @param	ofs		Start and stop offsets for auth and cipher
1637  *				operations.
1638  * @param	user_data	The array of user data for dequeue later.
1639  * @param	enqueue_status	Driver written value to specify the
1640  *				enqueue status. Possible values:
1641  *				- 1: The number of operations returned are
1642  *				     enqueued successfully.
1643  *				- 0: The number of operations returned are
1644  *				     cached into the queue but are not processed
1645  *				     until rte_cryptodev_raw_enqueue_done() is
1646  *				     called.
1647  *				- negative integer: Error occurred.
1648  * @return
1649  *   - The number of operations in the descriptor successfully enqueued or
1650  *     cached into the queue but not enqueued yet, depends on the
1651  *     "enqueue_status" value.
1652  */
1653 uint32_t
1654 rte_cryptodev_raw_enqueue_burst(struct rte_crypto_raw_dp_ctx *ctx,
1655 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
1656 	void **user_data, int *enqueue_status);
1657 
1658 /**
1659  * Enqueue single raw data vector into the device queue but the driver may or
1660  * may not start processing until rte_cryptodev_raw_enqueue_done() is called.
1661  *
1662  * @param	ctx		The initialized raw data-path context data.
1663  * @param	data_vec	The buffer data vector.
1664  * @param	n_data_vecs	Number of buffer data vectors.
1665  * @param	ofs		Start and stop offsets for auth and cipher
1666  *				operations.
1667  * @param	iv		IV virtual and IOVA addresses
1668  * @param	digest		digest virtual and IOVA addresses
1669  * @param	aad_or_auth_iv	AAD or auth IV virtual and IOVA addresses,
1670  *				depends on the algorithm used.
1671  * @param	user_data	The user data.
1672  * @return
1673  *   - 1: The data vector is enqueued successfully.
1674  *   - 0: The data vector is cached into the queue but is not processed
1675  *        until rte_cryptodev_raw_enqueue_done() is called.
1676  *   - negative integer: failure.
1677  */
1678 __rte_experimental
1679 static __rte_always_inline int
1680 rte_cryptodev_raw_enqueue(struct rte_crypto_raw_dp_ctx *ctx,
1681 	struct rte_crypto_vec *data_vec, uint16_t n_data_vecs,
1682 	union rte_crypto_sym_ofs ofs,
1683 	struct rte_crypto_va_iova_ptr *iv,
1684 	struct rte_crypto_va_iova_ptr *digest,
1685 	struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
1686 	void *user_data)
1687 {
1688 	return (*ctx->enqueue)(ctx->qp_data, ctx->drv_ctx_data, data_vec,
1689 		n_data_vecs, ofs, iv, digest, aad_or_auth_iv, user_data);
1690 }
1691 
1692 /**
1693  * Start processing all enqueued operations from last
1694  * rte_cryptodev_configure_raw_dp_ctx() call.
1695  *
1696  * @param	ctx	The initialized raw data-path context data.
1697  * @param	n	The number of operations cached.
1698  * @return
1699  *   - On success return 0.
1700  *   - On failure return negative integer.
1701  */
1702 int
1703 rte_cryptodev_raw_enqueue_done(struct rte_crypto_raw_dp_ctx *ctx,
1704 		uint32_t n);
1705 
1706 /**
1707  * Dequeue a burst of symmetric crypto processing.
1708  *
1709  * @param	ctx			The initialized raw data-path context
1710  *					data.
1711  * @param	get_dequeue_count	User provided callback function to
1712  *					obtain dequeue operation count.
1713  * @param	max_nb_to_dequeue	When get_dequeue_count is NULL this
1714  *					value is used to pass the maximum
1715  *					number of operations to be dequeued.
1716  * @param	post_dequeue		User provided callback function to
1717  *					post-process a dequeued operation.
1718  * @param	out_user_data		User data pointer array to be retrieve
1719  *					from device queue. In case of
1720  *					*is_user_data_array* is set there
1721  *					should be enough room to store all
1722  *					user data.
1723  * @param	is_user_data_array	Set 1 if every dequeued user data will
1724  *					be written into out_user_data array.
1725  *					Set 0 if only the first user data will
1726  *					be written into out_user_data array.
1727  * @param	n_success		Driver written value to specific the
1728  *					total successful operations count.
1729  * @param	dequeue_status		Driver written value to specify the
1730  *					dequeue status. Possible values:
1731  *					- 1: Successfully dequeued the number
1732  *					     of operations returned. The user
1733  *					     data previously set during enqueue
1734  *					     is stored in the "out_user_data".
1735  *					- 0: The number of operations returned
1736  *					     are completed and the user data is
1737  *					     stored in the "out_user_data", but
1738  *					     they are not freed from the queue
1739  *					     until
1740  *					     rte_cryptodev_raw_dequeue_done()
1741  *					     is called.
1742  *					- negative integer: Error occurred.
1743  * @return
1744  *   - The number of operations dequeued or completed but not freed from the
1745  *     queue, depends on "dequeue_status" value.
1746  */
1747 uint32_t
1748 rte_cryptodev_raw_dequeue_burst(struct rte_crypto_raw_dp_ctx *ctx,
1749 	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
1750 	uint32_t max_nb_to_dequeue,
1751 	rte_cryptodev_raw_post_dequeue_t post_dequeue,
1752 	void **out_user_data, uint8_t is_user_data_array,
1753 	uint32_t *n_success, int *dequeue_status);
1754 
1755 /**
1756  * Dequeue a symmetric crypto processing.
1757  *
1758  * @param	ctx			The initialized raw data-path context
1759  *					data.
1760  * @param	dequeue_status		Driver written value to specify the
1761  *					dequeue status. Possible values:
1762  *					- 1: Successfully dequeued a operation.
1763  *					     The user data is returned.
1764  *					- 0: The first operation in the queue
1765  *					     is completed and the user data
1766  *					     previously set during enqueue is
1767  *					     returned, but it is not freed from
1768  *					     the queue until
1769  *					     rte_cryptodev_raw_dequeue_done() is
1770  *					     called.
1771  *					- negative integer: Error occurred.
1772  * @param	op_status		Driver written value to specify
1773  *					operation status.
1774  * @return
1775  *   - The user data pointer retrieved from device queue or NULL if no
1776  *     operation is ready for dequeue.
1777  */
1778 __rte_experimental
1779 static __rte_always_inline void *
1780 rte_cryptodev_raw_dequeue(struct rte_crypto_raw_dp_ctx *ctx,
1781 		int *dequeue_status, enum rte_crypto_op_status *op_status)
1782 {
1783 	return (*ctx->dequeue)(ctx->qp_data, ctx->drv_ctx_data, dequeue_status,
1784 			op_status);
1785 }
1786 
1787 /**
1788  * Inform the queue pair dequeue operations is finished.
1789  *
1790  * @param	ctx	The initialized raw data-path context data.
1791  * @param	n	The number of operations.
1792  * @return
1793  *   - On success return 0.
1794  *   - On failure return negative integer.
1795  */
1796 int
1797 rte_cryptodev_raw_dequeue_done(struct rte_crypto_raw_dp_ctx *ctx,
1798 		uint32_t n);
1799 
1800 /**
1801  * Add a user callback for a given crypto device and queue pair which will be
1802  * called on crypto ops enqueue.
1803  *
1804  * This API configures a function to be called for each burst of crypto ops
1805  * received on a given crypto device queue pair. The return value is a pointer
1806  * that can be used later to remove the callback using
1807  * rte_cryptodev_remove_enq_callback().
1808  *
1809  * Callbacks registered by application would not survive
1810  * rte_cryptodev_configure() as it reinitializes the callback list.
1811  * It is user responsibility to remove all installed callbacks before
1812  * calling rte_cryptodev_configure() to avoid possible memory leakage.
1813  * Application is expected to call add API after rte_cryptodev_configure().
1814  *
1815  * Multiple functions can be registered per queue pair & they are called
1816  * in the order they were added. The API does not restrict on maximum number
1817  * of callbacks.
1818  *
1819  * @param	dev_id		The identifier of the device.
1820  * @param	qp_id		The index of the queue pair on which ops are
1821  *				to be enqueued for processing. The value
1822  *				must be in the range [0, nb_queue_pairs - 1]
1823  *				previously supplied to
1824  *				*rte_cryptodev_configure*.
1825  * @param	cb_fn		The callback function
1826  * @param	cb_arg		A generic pointer parameter which will be passed
1827  *				to each invocation of the callback function on
1828  *				this crypto device and queue pair.
1829  *
1830  * @return
1831  *  - NULL on error & rte_errno will contain the error code.
1832  *  - On success, a pointer value which can later be used to remove the
1833  *    callback.
1834  */
1835 struct rte_cryptodev_cb *
1836 rte_cryptodev_add_enq_callback(uint8_t dev_id,
1837 			       uint16_t qp_id,
1838 			       rte_cryptodev_callback_fn cb_fn,
1839 			       void *cb_arg);
1840 
1841 /**
1842  * Remove a user callback function for given crypto device and queue pair.
1843  *
1844  * This function is used to remove enqueue callbacks that were added to a
1845  * crypto device queue pair using rte_cryptodev_add_enq_callback().
1846  *
1847  *
1848  *
1849  * @param	dev_id		The identifier of the device.
1850  * @param	qp_id		The index of the queue pair on which ops are
1851  *				to be enqueued. The value must be in the
1852  *				range [0, nb_queue_pairs - 1] previously
1853  *				supplied to *rte_cryptodev_configure*.
1854  * @param	cb		Pointer to user supplied callback created via
1855  *				rte_cryptodev_add_enq_callback().
1856  *
1857  * @return
1858  *   -  0: Success. Callback was removed.
1859  *   - <0: The dev_id or the qp_id is out of range, or the callback
1860  *         is NULL or not found for the crypto device queue pair.
1861  */
1862 int rte_cryptodev_remove_enq_callback(uint8_t dev_id,
1863 				      uint16_t qp_id,
1864 				      struct rte_cryptodev_cb *cb);
1865 
1866 /**
1867  * Add a user callback for a given crypto device and queue pair which will be
1868  * called on crypto ops dequeue.
1869  *
1870  * This API configures a function to be called for each burst of crypto ops
1871  * received on a given crypto device queue pair. The return value is a pointer
1872  * that can be used later to remove the callback using
1873  * rte_cryptodev_remove_deq_callback().
1874  *
1875  * Callbacks registered by application would not survive
1876  * rte_cryptodev_configure() as it reinitializes the callback list.
1877  * It is user responsibility to remove all installed callbacks before
1878  * calling rte_cryptodev_configure() to avoid possible memory leakage.
1879  * Application is expected to call add API after rte_cryptodev_configure().
1880  *
1881  * Multiple functions can be registered per queue pair & they are called
1882  * in the order they were added. The API does not restrict on maximum number
1883  * of callbacks.
1884  *
1885  * @param	dev_id		The identifier of the device.
1886  * @param	qp_id		The index of the queue pair on which ops are
1887  *				to be dequeued. The value must be in the
1888  *				range [0, nb_queue_pairs - 1] previously
1889  *				supplied to *rte_cryptodev_configure*.
1890  * @param	cb_fn		The callback function
1891  * @param	cb_arg		A generic pointer parameter which will be passed
1892  *				to each invocation of the callback function on
1893  *				this crypto device and queue pair.
1894  *
1895  * @return
1896  *   - NULL on error & rte_errno will contain the error code.
1897  *   - On success, a pointer value which can later be used to remove the
1898  *     callback.
1899  */
1900 struct rte_cryptodev_cb *
1901 rte_cryptodev_add_deq_callback(uint8_t dev_id,
1902 			       uint16_t qp_id,
1903 			       rte_cryptodev_callback_fn cb_fn,
1904 			       void *cb_arg);
1905 
1906 /**
1907  * Remove a user callback function for given crypto device and queue pair.
1908  *
1909  * This function is used to remove dequeue callbacks that were added to a
1910  * crypto device queue pair using rte_cryptodev_add_deq_callback().
1911  *
1912  *
1913  *
1914  * @param	dev_id		The identifier of the device.
1915  * @param	qp_id		The index of the queue pair on which ops are
1916  *				to be dequeued. The value must be in the
1917  *				range [0, nb_queue_pairs - 1] previously
1918  *				supplied to *rte_cryptodev_configure*.
1919  * @param	cb		Pointer to user supplied callback created via
1920  *				rte_cryptodev_add_deq_callback().
1921  *
1922  * @return
1923  *   -  0: Success. Callback was removed.
1924  *   - <0: The dev_id or the qp_id is out of range, or the callback
1925  *         is NULL or not found for the crypto device queue pair.
1926  */
1927 int rte_cryptodev_remove_deq_callback(uint8_t dev_id,
1928 				      uint16_t qp_id,
1929 				      struct rte_cryptodev_cb *cb);
1930 
1931 #include <rte_cryptodev_core.h>
1932 
1933 #ifdef __cplusplus
1934 extern "C" {
1935 #endif
1936 /**
1937  *
1938  * Dequeue a burst of processed crypto operations from a queue on the crypto
1939  * device. The dequeued operation are stored in *rte_crypto_op* structures
1940  * whose pointers are supplied in the *ops* array.
1941  *
1942  * The rte_cryptodev_dequeue_burst() function returns the number of ops
1943  * actually dequeued, which is the number of *rte_crypto_op* data structures
1944  * effectively supplied into the *ops* array.
1945  *
1946  * A return value equal to *nb_ops* indicates that the queue contained
1947  * at least *nb_ops* operations, and this is likely to signify that other
1948  * processed operations remain in the devices output queue. Applications
1949  * implementing a "retrieve as many processed operations as possible" policy
1950  * can check this specific case and keep invoking the
1951  * rte_cryptodev_dequeue_burst() function until a value less than
1952  * *nb_ops* is returned.
1953  *
1954  * The rte_cryptodev_dequeue_burst() function does not provide any error
1955  * notification to avoid the corresponding overhead.
1956  *
1957  * @param	dev_id		The symmetric crypto device identifier
1958  * @param	qp_id		The index of the queue pair from which to
1959  *				retrieve processed packets. The value must be
1960  *				in the range [0, nb_queue_pair - 1] previously
1961  *				supplied to rte_cryptodev_configure().
1962  * @param	ops		The address of an array of pointers to
1963  *				*rte_crypto_op* structures that must be
1964  *				large enough to store *nb_ops* pointers in it.
1965  * @param	nb_ops		The maximum number of operations to dequeue.
1966  *
1967  * @return
1968  *   - The number of operations actually dequeued, which is the number
1969  *   of pointers to *rte_crypto_op* structures effectively supplied to the
1970  *   *ops* array.
1971  */
1972 static inline uint16_t
1973 rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
1974 		struct rte_crypto_op **ops, uint16_t nb_ops)
1975 {
1976 	const struct rte_crypto_fp_ops *fp_ops;
1977 	void *qp;
1978 
1979 	rte_cryptodev_trace_dequeue_burst(dev_id, qp_id, (void **)ops, nb_ops);
1980 
1981 	fp_ops = &rte_crypto_fp_ops[dev_id];
1982 	qp = fp_ops->qp.data[qp_id];
1983 
1984 	nb_ops = fp_ops->dequeue_burst(qp, ops, nb_ops);
1985 
1986 #ifdef RTE_CRYPTO_CALLBACKS
1987 	if (unlikely(fp_ops->qp.deq_cb[qp_id].next != NULL)) {
1988 		struct rte_cryptodev_cb_rcu *list;
1989 		struct rte_cryptodev_cb *cb;
1990 
1991 		/* rte_memory_order_release memory order was used when the
1992 		 * call back was inserted into the list.
1993 		 * Since there is a clear dependency between loading
1994 		 * cb and cb->fn/cb->next, rte_memory_order_acquire memory order is
1995 		 * not required.
1996 		 */
1997 		list = &fp_ops->qp.deq_cb[qp_id];
1998 		rte_rcu_qsbr_thread_online(list->qsbr, 0);
1999 		cb = rte_atomic_load_explicit(&list->next, rte_memory_order_relaxed);
2000 
2001 		while (cb != NULL) {
2002 			nb_ops = cb->fn(dev_id, qp_id, ops, nb_ops,
2003 					cb->arg);
2004 			cb = cb->next;
2005 		};
2006 
2007 		rte_rcu_qsbr_thread_offline(list->qsbr, 0);
2008 	}
2009 #endif
2010 	return nb_ops;
2011 }
2012 
2013 /**
2014  * Enqueue a burst of operations for processing on a crypto device.
2015  *
2016  * The rte_cryptodev_enqueue_burst() function is invoked to place
2017  * crypto operations on the queue *qp_id* of the device designated by
2018  * its *dev_id*.
2019  *
2020  * The *nb_ops* parameter is the number of operations to process which are
2021  * supplied in the *ops* array of *rte_crypto_op* structures.
2022  *
2023  * The rte_cryptodev_enqueue_burst() function returns the number of
2024  * operations it actually enqueued for processing. A return value equal to
2025  * *nb_ops* means that all packets have been enqueued.
2026  *
2027  * @param	dev_id		The identifier of the device.
2028  * @param	qp_id		The index of the queue pair which packets are
2029  *				to be enqueued for processing. The value
2030  *				must be in the range [0, nb_queue_pairs - 1]
2031  *				previously supplied to
2032  *				 *rte_cryptodev_configure*.
2033  * @param	ops		The address of an array of *nb_ops* pointers
2034  *				to *rte_crypto_op* structures which contain
2035  *				the crypto operations to be processed.
2036  * @param	nb_ops		The number of operations to process.
2037  *
2038  * @return
2039  * The number of operations actually enqueued on the crypto device. The return
2040  * value can be less than the value of the *nb_ops* parameter when the
2041  * crypto devices queue is full or if invalid parameters are specified in
2042  * a *rte_crypto_op*.
2043  */
2044 static inline uint16_t
2045 rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
2046 		struct rte_crypto_op **ops, uint16_t nb_ops)
2047 {
2048 	const struct rte_crypto_fp_ops *fp_ops;
2049 	void *qp;
2050 
2051 	fp_ops = &rte_crypto_fp_ops[dev_id];
2052 	qp = fp_ops->qp.data[qp_id];
2053 #ifdef RTE_CRYPTO_CALLBACKS
2054 	if (unlikely(fp_ops->qp.enq_cb[qp_id].next != NULL)) {
2055 		struct rte_cryptodev_cb_rcu *list;
2056 		struct rte_cryptodev_cb *cb;
2057 
2058 		/* rte_memory_order_release memory order was used when the
2059 		 * call back was inserted into the list.
2060 		 * Since there is a clear dependency between loading
2061 		 * cb and cb->fn/cb->next, rte_memory_order_acquire memory order is
2062 		 * not required.
2063 		 */
2064 		list = &fp_ops->qp.enq_cb[qp_id];
2065 		rte_rcu_qsbr_thread_online(list->qsbr, 0);
2066 		cb = rte_atomic_load_explicit(&list->next, rte_memory_order_relaxed);
2067 
2068 		while (cb != NULL) {
2069 			nb_ops = cb->fn(dev_id, qp_id, ops, nb_ops,
2070 					cb->arg);
2071 			cb = cb->next;
2072 		};
2073 
2074 		rte_rcu_qsbr_thread_offline(list->qsbr, 0);
2075 	}
2076 #endif
2077 
2078 	rte_cryptodev_trace_enqueue_burst(dev_id, qp_id, (void **)ops, nb_ops);
2079 	return fp_ops->enqueue_burst(qp, ops, nb_ops);
2080 }
2081 
2082 /**
2083  * @warning
2084  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
2085  *
2086  * Get the number of used descriptors or depth of a cryptodev queue pair.
2087  *
2088  * This function retrieves the number of used descriptors in a crypto queue.
2089  * Applications can use this API in the fast path to inspect QP occupancy and
2090  * take appropriate action.
2091  *
2092  * Since it is a fast-path function, no check is performed on dev_id and qp_id.
2093  * Caller must therefore ensure that the device is enabled and queue pair is setup.
2094  *
2095  * @param	dev_id		The identifier of the device.
2096  * @param	qp_id		The index of the queue pair for which used descriptor
2097  *				count is to be retrieved. The value
2098  *				must be in the range [0, nb_queue_pairs - 1]
2099  *				previously supplied to *rte_cryptodev_configure*.
2100  *
2101  * @return
2102  *  The number of used descriptors on the specified queue pair, or:
2103  *   - (-ENOTSUP) if the device does not support this function.
2104  */
2105 
2106 __rte_experimental
2107 static inline int
2108 rte_cryptodev_qp_depth_used(uint8_t dev_id, uint16_t qp_id)
2109 {
2110 	const struct rte_crypto_fp_ops *fp_ops;
2111 	void *qp;
2112 	int rc;
2113 
2114 	fp_ops = &rte_crypto_fp_ops[dev_id];
2115 	qp = fp_ops->qp.data[qp_id];
2116 
2117 	if (fp_ops->qp_depth_used == NULL) {
2118 		rc = -ENOTSUP;
2119 		goto out;
2120 	}
2121 
2122 	rc = fp_ops->qp_depth_used(qp);
2123 out:
2124 	rte_cryptodev_trace_qp_depth_used(dev_id, qp_id);
2125 	return rc;
2126 }
2127 
2128 
2129 #ifdef __cplusplus
2130 }
2131 #endif
2132 
2133 #endif /* _RTE_CRYPTODEV_H_ */
2134