xref: /dpdk/lib/cryptodev/rte_cryptodev.h (revision 8bd4315ceba8d9de9dedafdaa963ffecc09cc971)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020 Intel Corporation.
3  */
4 
5 #ifndef _RTE_CRYPTODEV_H_
6 #define _RTE_CRYPTODEV_H_
7 
8 /**
9  * @file rte_cryptodev.h
10  *
11  * RTE Cryptographic Device APIs
12  *
13  * Defines RTE Crypto Device APIs for the provisioning of cipher and
14  * authentication operations.
15  */
16 
17 #include <rte_compat.h>
18 #include "rte_kvargs.h"
19 #include "rte_crypto.h"
20 #include <rte_common.h>
21 #include <rte_rcu_qsbr.h>
22 
23 #include "rte_cryptodev_trace_fp.h"
24 
25 /**
26  * @internal Logtype used for cryptodev related messages.
27  */
28 extern int rte_cryptodev_logtype;
29 #define RTE_LOGTYPE_CRYPTODEV rte_cryptodev_logtype
30 
31 /* Logging Macros */
32 #define CDEV_LOG_ERR(...) \
33 	RTE_LOG_LINE_PREFIX(ERR, CRYPTODEV, \
34 		"%s() line %u: ", __func__ RTE_LOG_COMMA __LINE__, __VA_ARGS__)
35 
36 #define CDEV_LOG_INFO(...) \
37 	RTE_LOG_LINE(INFO, CRYPTODEV, "" __VA_ARGS__)
38 
39 #define CDEV_LOG_DEBUG(...) \
40 	RTE_LOG_LINE_PREFIX(DEBUG, CRYPTODEV, \
41 		"%s() line %u: ", __func__ RTE_LOG_COMMA __LINE__, __VA_ARGS__)
42 
43 #define CDEV_PMD_TRACE(...) \
44 	RTE_LOG_LINE_PREFIX(DEBUG, CRYPTODEV, \
45 		"[%s] %s: ", dev RTE_LOG_COMMA __func__, __VA_ARGS__)
46 
47 /**
48  * A macro that points to an offset from the start
49  * of the crypto operation structure (rte_crypto_op)
50  *
51  * The returned pointer is cast to type t.
52  *
53  * @param c
54  *   The crypto operation.
55  * @param o
56  *   The offset from the start of the crypto operation.
57  * @param t
58  *   The type to cast the result into.
59  */
60 #define rte_crypto_op_ctod_offset(c, t, o)	\
61 	((t)((char *)(c) + (o)))
62 
63 /**
64  * A macro that returns the physical address that points
65  * to an offset from the start of the crypto operation
66  * (rte_crypto_op)
67  *
68  * @param c
69  *   The crypto operation.
70  * @param o
71  *   The offset from the start of the crypto operation
72  *   to calculate address from.
73  */
74 #define rte_crypto_op_ctophys_offset(c, o)	\
75 	(rte_iova_t)((c)->phys_addr + (o))
76 
77 /**
78  * Crypto parameters range description
79  */
80 struct rte_crypto_param_range {
81 	uint16_t min;	/**< minimum size */
82 	uint16_t max;	/**< maximum size */
83 	uint16_t increment;
84 	/**< if a range of sizes are supported,
85 	 * this parameter is used to indicate
86 	 * increments in byte size that are supported
87 	 * between the minimum and maximum
88 	 */
89 };
90 
91 /**
92  * Data-unit supported lengths of cipher algorithms.
93  * A bit can represent any set of data-unit sizes
94  * (single size, multiple size, range, etc).
95  */
96 #define RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_512_BYTES             RTE_BIT32(0)
97 #define RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_4096_BYTES            RTE_BIT32(1)
98 #define RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_1_MEGABYTES           RTE_BIT32(2)
99 
100 /**
101  * Symmetric Crypto Capability
102  */
103 struct rte_cryptodev_symmetric_capability {
104 	enum rte_crypto_sym_xform_type xform_type;
105 	/**< Transform type : Authentication / Cipher / AEAD */
106 	union {
107 		struct {
108 			enum rte_crypto_auth_algorithm algo;
109 			/**< authentication algorithm */
110 			uint16_t block_size;
111 			/**< algorithm block size */
112 			struct rte_crypto_param_range key_size;
113 			/**< auth key size range */
114 			struct rte_crypto_param_range digest_size;
115 			/**< digest size range */
116 			struct rte_crypto_param_range aad_size;
117 			/**< Additional authentication data size range */
118 			struct rte_crypto_param_range iv_size;
119 			/**< Initialisation vector data size range */
120 		} auth;
121 		/**< Symmetric Authentication transform capabilities */
122 		struct {
123 			enum rte_crypto_cipher_algorithm algo;
124 			/**< cipher algorithm */
125 			uint16_t block_size;
126 			/**< algorithm block size */
127 			struct rte_crypto_param_range key_size;
128 			/**< cipher key size range */
129 			struct rte_crypto_param_range iv_size;
130 			/**< Initialisation vector data size range */
131 			uint32_t dataunit_set;
132 			/**<
133 			 * Supported data-unit lengths:
134 			 * RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_* bits
135 			 * or 0 for lengths defined in the algorithm standard.
136 			 */
137 		} cipher;
138 		/**< Symmetric Cipher transform capabilities */
139 		struct {
140 			enum rte_crypto_aead_algorithm algo;
141 			/**< AEAD algorithm */
142 			uint16_t block_size;
143 			/**< algorithm block size */
144 			struct rte_crypto_param_range key_size;
145 			/**< AEAD key size range */
146 			struct rte_crypto_param_range digest_size;
147 			/**< digest size range */
148 			struct rte_crypto_param_range aad_size;
149 			/**< Additional authentication data size range */
150 			struct rte_crypto_param_range iv_size;
151 			/**< Initialisation vector data size range */
152 		} aead;
153 	};
154 };
155 
156 /**
157  * Asymmetric Xform Crypto Capability
158  */
159 struct rte_cryptodev_asymmetric_xform_capability {
160 	enum rte_crypto_asym_xform_type xform_type;
161 	/**< Transform type: RSA/MODEXP/DH/DSA/MODINV */
162 
163 	uint32_t op_types;
164 	/**<
165 	 * Bitmask for supported rte_crypto_asym_op_type or
166 	 * rte_crypto_asym_ke_type. Which enum is used is determined
167 	 * by the rte_crypto_asym_xform_type. For key exchange algorithms
168 	 * like Diffie-Hellman it is rte_crypto_asym_ke_type, for others
169 	 * it is rte_crypto_asym_op_type.
170 	 */
171 
172 	__extension__
173 	union {
174 		struct rte_crypto_param_range modlen;
175 		/**< Range of modulus length supported by modulus based xform.
176 		 * Value 0 mean implementation default
177 		 */
178 
179 		uint8_t internal_rng;
180 		/**< Availability of random number generator for Elliptic curve based xform.
181 		 * Value 0 means unavailable, and application should pass the required
182 		 * random value. Otherwise, PMD would internally compute the random number.
183 		 */
184 	};
185 
186 	uint64_t hash_algos;
187 	/**< Bitmask of hash algorithms supported for op_type. */
188 };
189 
190 /**
191  * Asymmetric Crypto Capability
192  */
193 struct rte_cryptodev_asymmetric_capability {
194 	struct rte_cryptodev_asymmetric_xform_capability xform_capa;
195 };
196 
197 
198 /** Structure used to capture a capability of a crypto device */
199 struct rte_cryptodev_capabilities {
200 	enum rte_crypto_op_type op;
201 	/**< Operation type */
202 
203 	union {
204 		struct rte_cryptodev_symmetric_capability sym;
205 		/**< Symmetric operation capability parameters */
206 		struct rte_cryptodev_asymmetric_capability asym;
207 		/**< Asymmetric operation capability parameters */
208 	};
209 };
210 
211 /** Structure used to describe crypto algorithms */
212 struct rte_cryptodev_sym_capability_idx {
213 	enum rte_crypto_sym_xform_type type;
214 	union {
215 		enum rte_crypto_cipher_algorithm cipher;
216 		enum rte_crypto_auth_algorithm auth;
217 		enum rte_crypto_aead_algorithm aead;
218 	} algo;
219 };
220 
221 /**
222  * Structure used to describe asymmetric crypto xforms
223  * Each xform maps to one asym algorithm.
224  */
225 struct rte_cryptodev_asym_capability_idx {
226 	enum rte_crypto_asym_xform_type type;
227 	/**< Asymmetric xform (algo) type */
228 };
229 
230 /**
231  * Provide capabilities available for defined device and algorithm
232  *
233  * @param	dev_id		The identifier of the device.
234  * @param	idx		Description of crypto algorithms.
235  *
236  * @return
237  *   - Return description of the symmetric crypto capability if exist.
238  *   - Return NULL if the capability not exist.
239  */
240 const struct rte_cryptodev_symmetric_capability *
241 rte_cryptodev_sym_capability_get(uint8_t dev_id,
242 		const struct rte_cryptodev_sym_capability_idx *idx);
243 
244 /**
245  *  Provide capabilities available for defined device and xform
246  *
247  * @param	dev_id		The identifier of the device.
248  * @param	idx		Description of asym crypto xform.
249  *
250  * @return
251  *   - Return description of the asymmetric crypto capability if exist.
252  *   - Return NULL if the capability not exist.
253  */
254 const struct rte_cryptodev_asymmetric_xform_capability *
255 rte_cryptodev_asym_capability_get(uint8_t dev_id,
256 		const struct rte_cryptodev_asym_capability_idx *idx);
257 
258 /**
259  * Check if key size and initial vector are supported
260  * in crypto cipher capability
261  *
262  * @param	capability	Description of the symmetric crypto capability.
263  * @param	key_size	Cipher key size.
264  * @param	iv_size		Cipher initial vector size.
265  *
266  * @return
267  *   - Return 0 if the parameters are in range of the capability.
268  *   - Return -1 if the parameters are out of range of the capability.
269  */
270 int
271 rte_cryptodev_sym_capability_check_cipher(
272 		const struct rte_cryptodev_symmetric_capability *capability,
273 		uint16_t key_size, uint16_t iv_size);
274 
275 /**
276  * Check if key size and initial vector are supported
277  * in crypto auth capability
278  *
279  * @param	capability	Description of the symmetric crypto capability.
280  * @param	key_size	Auth key size.
281  * @param	digest_size	Auth digest size.
282  * @param	iv_size		Auth initial vector size.
283  *
284  * @return
285  *   - Return 0 if the parameters are in range of the capability.
286  *   - Return -1 if the parameters are out of range of the capability.
287  */
288 int
289 rte_cryptodev_sym_capability_check_auth(
290 		const struct rte_cryptodev_symmetric_capability *capability,
291 		uint16_t key_size, uint16_t digest_size, uint16_t iv_size);
292 
293 /**
294  * Check if key, digest, AAD and initial vector sizes are supported
295  * in crypto AEAD capability
296  *
297  * @param	capability	Description of the symmetric crypto capability.
298  * @param	key_size	AEAD key size.
299  * @param	digest_size	AEAD digest size.
300  * @param	aad_size	AEAD AAD size.
301  * @param	iv_size		AEAD IV size.
302  *
303  * @return
304  *   - Return 0 if the parameters are in range of the capability.
305  *   - Return -1 if the parameters are out of range of the capability.
306  */
307 int
308 rte_cryptodev_sym_capability_check_aead(
309 		const struct rte_cryptodev_symmetric_capability *capability,
310 		uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
311 		uint16_t iv_size);
312 
313 /**
314  * Check if op type is supported
315  *
316  * @param	capability	Description of the asymmetric crypto capability.
317  * @param	op_type		op type
318  *
319  * @return
320  *   - Return 1 if the op type is supported
321  *   - Return 0 if unsupported
322  */
323 int
324 rte_cryptodev_asym_xform_capability_check_optype(
325 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
326 		enum rte_crypto_asym_op_type op_type);
327 
328 /**
329  * Check if modulus length is in supported range
330  *
331  * @param	capability	Description of the asymmetric crypto capability.
332  * @param	modlen		modulus length.
333  *
334  * @return
335  *   - Return 0 if the parameters are in range of the capability.
336  *   - Return -1 if the parameters are out of range of the capability.
337  */
338 int
339 rte_cryptodev_asym_xform_capability_check_modlen(
340 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
341 		uint16_t modlen);
342 
343 /**
344  * Check if hash algorithm is supported.
345  *
346  * @param	capability	Asymmetric crypto capability.
347  * @param	hash		Hash algorithm.
348  *
349  * @return
350  *   - Return true if the hash algorithm is supported.
351  *   - Return false if the hash algorithm is not supported.
352  */
353 bool
354 rte_cryptodev_asym_xform_capability_check_hash(
355 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
356 	enum rte_crypto_auth_algorithm hash);
357 
358 /**
359  * Provide the cipher algorithm enum, given an algorithm string
360  *
361  * @param	algo_enum	A pointer to the cipher algorithm
362  *				enum to be filled
363  * @param	algo_string	Authentication algo string
364  *
365  * @return
366  * - Return -1 if string is not valid
367  * - Return 0 is the string is valid
368  */
369 int
370 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum,
371 		const char *algo_string);
372 
373 /**
374  * Provide the authentication algorithm enum, given an algorithm string
375  *
376  * @param	algo_enum	A pointer to the authentication algorithm
377  *				enum to be filled
378  * @param	algo_string	Authentication algo string
379  *
380  * @return
381  * - Return -1 if string is not valid
382  * - Return 0 is the string is valid
383  */
384 int
385 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum,
386 		const char *algo_string);
387 
388 /**
389  * Provide the AEAD algorithm enum, given an algorithm string
390  *
391  * @param	algo_enum	A pointer to the AEAD algorithm
392  *				enum to be filled
393  * @param	algo_string	AEAD algorithm string
394  *
395  * @return
396  * - Return -1 if string is not valid
397  * - Return 0 is the string is valid
398  */
399 int
400 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum,
401 		const char *algo_string);
402 
403 /**
404  * Provide the Asymmetric xform enum, given an xform string
405  *
406  * @param	xform_enum	A pointer to the xform type
407  *				enum to be filled
408  * @param	xform_string	xform string
409  *
410  * @return
411  * - Return -1 if string is not valid
412  * - Return 0 if the string is valid
413  */
414 int
415 rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum,
416 		const char *xform_string);
417 
418 /**
419  * Provide the cipher algorithm string, given an algorithm enum.
420  *
421  * @param	algo_enum	cipher algorithm enum
422  *
423  * @return
424  * - Return NULL if enum is not valid
425  * - Return algo_string corresponding to enum
426  */
427 __rte_experimental
428 const char *
429 rte_cryptodev_get_cipher_algo_string(enum rte_crypto_cipher_algorithm algo_enum);
430 
431 /**
432  * Provide the authentication algorithm string, given an algorithm enum.
433  *
434  * @param	algo_enum	auth algorithm enum
435  *
436  * @return
437  * - Return NULL if enum is not valid
438  * - Return algo_string corresponding to enum
439  */
440 __rte_experimental
441 const char *
442 rte_cryptodev_get_auth_algo_string(enum rte_crypto_auth_algorithm algo_enum);
443 
444 /**
445  * Provide the AEAD algorithm string, given an algorithm enum.
446  *
447  * @param	algo_enum	AEAD algorithm enum
448  *
449  * @return
450  * - Return NULL if enum is not valid
451  * - Return algo_string corresponding to enum
452  */
453 __rte_experimental
454 const char *
455 rte_cryptodev_get_aead_algo_string(enum rte_crypto_aead_algorithm algo_enum);
456 
457 /**
458  * Provide the Asymmetric xform string, given an xform enum.
459  *
460  * @param	xform_enum	xform type enum
461  *
462  * @return
463  * - Return NULL, if enum is not valid.
464  * - Return xform string, for valid enum.
465  */
466 __rte_experimental
467 const char *
468 rte_cryptodev_asym_get_xform_string(enum rte_crypto_asym_xform_type xform_enum);
469 
470 
471 /** Macro used at end of crypto PMD list */
472 #define RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() \
473 	{ RTE_CRYPTO_OP_TYPE_UNDEFINED }
474 
475 
476 /**
477  * Crypto device supported feature flags
478  *
479  * Note:
480  * New features flags should be added to the end of the list
481  *
482  * Keep these flags synchronised with rte_cryptodev_get_feature_name()
483  */
484 #define	RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO		(1ULL << 0)
485 /**< Symmetric crypto operations are supported */
486 #define	RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO		(1ULL << 1)
487 /**< Asymmetric crypto operations are supported */
488 #define	RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING		(1ULL << 2)
489 /**< Chaining symmetric crypto operations are supported */
490 #define	RTE_CRYPTODEV_FF_CPU_SSE			(1ULL << 3)
491 /**< Utilises CPU SIMD SSE instructions */
492 #define	RTE_CRYPTODEV_FF_CPU_AVX			(1ULL << 4)
493 /**< Utilises CPU SIMD AVX instructions */
494 #define	RTE_CRYPTODEV_FF_CPU_AVX2			(1ULL << 5)
495 /**< Utilises CPU SIMD AVX2 instructions */
496 #define	RTE_CRYPTODEV_FF_CPU_AESNI			(1ULL << 6)
497 /**< Utilises CPU AES-NI instructions */
498 #define	RTE_CRYPTODEV_FF_HW_ACCELERATED			(1ULL << 7)
499 /**< Operations are off-loaded to an
500  * external hardware accelerator
501  */
502 #define	RTE_CRYPTODEV_FF_CPU_AVX512			(1ULL << 8)
503 /**< Utilises CPU SIMD AVX512 instructions */
504 #define	RTE_CRYPTODEV_FF_IN_PLACE_SGL			(1ULL << 9)
505 /**< In-place Scatter-gather (SGL) buffers, with multiple segments,
506  * are supported
507  */
508 #define RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT		(1ULL << 10)
509 /**< Out-of-place Scatter-gather (SGL) buffers are
510  * supported in input and output
511  */
512 #define RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT		(1ULL << 11)
513 /**< Out-of-place Scatter-gather (SGL) buffers are supported
514  * in input, combined with linear buffers (LB), with a
515  * single segment in output
516  */
517 #define RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT		(1ULL << 12)
518 /**< Out-of-place Scatter-gather (SGL) buffers are supported
519  * in output, combined with linear buffers (LB) in input
520  */
521 #define RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT		(1ULL << 13)
522 /**< Out-of-place linear buffers (LB) are supported in input and output */
523 #define	RTE_CRYPTODEV_FF_CPU_NEON			(1ULL << 14)
524 /**< Utilises CPU NEON instructions */
525 #define	RTE_CRYPTODEV_FF_CPU_ARM_CE			(1ULL << 15)
526 /**< Utilises ARM CPU Cryptographic Extensions */
527 #define	RTE_CRYPTODEV_FF_SECURITY			(1ULL << 16)
528 /**< Support Security Protocol Processing */
529 #define RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP		(1ULL << 17)
530 /**< Support RSA Private Key OP with exponent */
531 #define RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT		(1ULL << 18)
532 /**< Support RSA Private Key OP with CRT (quintuple) Keys */
533 #define RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED		(1ULL << 19)
534 /**< Support encrypted-digest operations where digest is appended to data */
535 #define RTE_CRYPTODEV_FF_ASYM_SESSIONLESS		(1ULL << 20)
536 /**< Support asymmetric session-less operations */
537 #define	RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO			(1ULL << 21)
538 /**< Support symmetric cpu-crypto processing */
539 #define RTE_CRYPTODEV_FF_SYM_SESSIONLESS		(1ULL << 22)
540 /**< Support symmetric session-less operations */
541 #define RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA		(1ULL << 23)
542 /**< Support operations on data which is not byte aligned */
543 #define RTE_CRYPTODEV_FF_SYM_RAW_DP			(1ULL << 24)
544 /**< Support accelerator specific symmetric raw data-path APIs */
545 #define RTE_CRYPTODEV_FF_CIPHER_MULTIPLE_DATA_UNITS	(1ULL << 25)
546 /**< Support operations on multiple data-units message */
547 #define RTE_CRYPTODEV_FF_CIPHER_WRAPPED_KEY		(1ULL << 26)
548 /**< Support wrapped key in cipher xform  */
549 #define RTE_CRYPTODEV_FF_SECURITY_INNER_CSUM		(1ULL << 27)
550 /**< Support inner checksum computation/verification */
551 #define RTE_CRYPTODEV_FF_SECURITY_RX_INJECT		(1ULL << 28)
552 /**< Support Rx injection after security processing */
553 
554 /**
555  * Get the name of a crypto device feature flag
556  *
557  * @param	flag	The mask describing the flag.
558  *
559  * @return
560  *   The name of this flag, or NULL if it's not a valid feature flag.
561  */
562 const char *
563 rte_cryptodev_get_feature_name(uint64_t flag);
564 
565 /**  Crypto device information */
566 /* Structure rte_cryptodev_info 8< */
567 struct rte_cryptodev_info {
568 	const char *driver_name;	/**< Driver name. */
569 	uint8_t driver_id;		/**< Driver identifier */
570 	struct rte_device *device;	/**< Generic device information. */
571 
572 	uint64_t feature_flags;
573 	/**< Feature flags exposes HW/SW features for the given device */
574 
575 	const struct rte_cryptodev_capabilities *capabilities;
576 	/**< Array of devices supported capabilities */
577 
578 	unsigned max_nb_queue_pairs;
579 	/**< Maximum number of queues pairs supported by device. */
580 
581 	uint16_t min_mbuf_headroom_req;
582 	/**< Minimum mbuf headroom required by device */
583 
584 	uint16_t min_mbuf_tailroom_req;
585 	/**< Minimum mbuf tailroom required by device */
586 
587 	struct {
588 		unsigned max_nb_sessions;
589 		/**< Maximum number of sessions supported by device.
590 		 * If 0, the device does not have any limitation in
591 		 * number of sessions that can be used.
592 		 */
593 	} sym;
594 };
595 /* >8 End of structure rte_cryptodev_info. */
596 
597 #define RTE_CRYPTODEV_DETACHED  (0)
598 #define RTE_CRYPTODEV_ATTACHED  (1)
599 
600 /** Definitions of Crypto device event types */
601 enum rte_cryptodev_event_type {
602 	RTE_CRYPTODEV_EVENT_UNKNOWN,	/**< unknown event type */
603 	RTE_CRYPTODEV_EVENT_ERROR,	/**< error interrupt event */
604 	RTE_CRYPTODEV_EVENT_MAX		/**< max value of this enum */
605 };
606 
607 /* Crypto queue pair priority levels */
608 #define RTE_CRYPTODEV_QP_PRIORITY_HIGHEST   0
609 /**< Highest priority of a cryptodev queue pair
610  * @see rte_cryptodev_queue_pair_setup(), rte_cryptodev_enqueue_burst()
611  */
612 #define RTE_CRYPTODEV_QP_PRIORITY_NORMAL    128
613 /**< Normal priority of a cryptodev queue pair
614  * @see rte_cryptodev_queue_pair_setup(), rte_cryptodev_enqueue_burst()
615  */
616 #define RTE_CRYPTODEV_QP_PRIORITY_LOWEST    255
617 /**< Lowest priority of a cryptodev queue pair
618  * @see rte_cryptodev_queue_pair_setup(), rte_cryptodev_enqueue_burst()
619  */
620 
621 /** Crypto device queue pair configuration structure. */
622 /* Structure rte_cryptodev_qp_conf 8<*/
623 struct rte_cryptodev_qp_conf {
624 	uint32_t nb_descriptors; /**< Number of descriptors per queue pair */
625 	struct rte_mempool *mp_session;
626 	/**< The mempool for creating session in sessionless mode */
627 	uint8_t priority;
628 	/**< Priority for this queue pair relative to other queue pairs.
629 	 *
630 	 * The requested priority should in the range of
631 	 * [@ref RTE_CRYPTODEV_QP_PRIORITY_HIGHEST, @ref RTE_CRYPTODEV_QP_PRIORITY_LOWEST].
632 	 * The implementation may normalize the requested priority to
633 	 * device supported priority value.
634 	 */
635 };
636 /* >8 End of structure rte_cryptodev_qp_conf. */
637 
638 /**
639  * Function type used for processing crypto ops when enqueue/dequeue burst is
640  * called.
641  *
642  * The callback function is called on enqueue/dequeue burst immediately.
643  *
644  * @param	dev_id		The identifier of the device.
645  * @param	qp_id		The index of the queue pair on which ops are
646  *				enqueued/dequeued. The value must be in the
647  *				range [0, nb_queue_pairs - 1] previously
648  *				supplied to *rte_cryptodev_configure*.
649  * @param	ops		The address of an array of *nb_ops* pointers
650  *				to *rte_crypto_op* structures which contain
651  *				the crypto operations to be processed.
652  * @param	nb_ops		The number of operations to process.
653  * @param	user_param	The arbitrary user parameter passed in by the
654  *				application when the callback was originally
655  *				registered.
656  * @return			The number of ops to be enqueued to the
657  *				crypto device.
658  */
659 typedef uint16_t (*rte_cryptodev_callback_fn)(uint16_t dev_id, uint16_t qp_id,
660 		struct rte_crypto_op **ops, uint16_t nb_ops, void *user_param);
661 
662 /**
663  * Typedef for application callback function to be registered by application
664  * software for notification of device events
665  *
666  * @param	dev_id	Crypto device identifier
667  * @param	event	Crypto device event to register for notification of.
668  * @param	cb_arg	User specified parameter to be passed as to passed to
669  *			users callback function.
670  */
671 typedef void (*rte_cryptodev_cb_fn)(uint8_t dev_id,
672 		enum rte_cryptodev_event_type event, void *cb_arg);
673 
674 
675 /** Crypto Device statistics */
676 struct rte_cryptodev_stats {
677 	uint64_t enqueued_count;
678 	/**< Count of all operations enqueued */
679 	uint64_t dequeued_count;
680 	/**< Count of all operations dequeued */
681 
682 	uint64_t enqueue_err_count;
683 	/**< Total error count on operations enqueued */
684 	uint64_t dequeue_err_count;
685 	/**< Total error count on operations dequeued */
686 };
687 
688 #define RTE_CRYPTODEV_NAME_MAX_LEN	(64)
689 /**< Max length of name of crypto PMD */
690 
691 /**
692  * Get the device identifier for the named crypto device.
693  *
694  * @param	name	device name to select the device structure.
695  *
696  * @return
697  *   - Returns crypto device identifier on success.
698  *   - Return -1 on failure to find named crypto device.
699  */
700 int
701 rte_cryptodev_get_dev_id(const char *name);
702 
703 /**
704  * Get the crypto device name given a device identifier.
705  *
706  * @param dev_id
707  *   The identifier of the device
708  *
709  * @return
710  *   - Returns crypto device name.
711  *   - Returns NULL if crypto device is not present.
712  */
713 const char *
714 rte_cryptodev_name_get(uint8_t dev_id);
715 
716 /**
717  * Get the total number of crypto devices that have been successfully
718  * initialised.
719  *
720  * @return
721  *   - The total number of usable crypto devices.
722  */
723 uint8_t
724 rte_cryptodev_count(void);
725 
726 /**
727  * Get number of crypto device defined type.
728  *
729  * @param	driver_id	driver identifier.
730  *
731  * @return
732  *   Returns number of crypto device.
733  */
734 uint8_t
735 rte_cryptodev_device_count_by_driver(uint8_t driver_id);
736 
737 /**
738  * Get number and identifiers of attached crypto devices that
739  * use the same crypto driver.
740  *
741  * @param	driver_name	driver name.
742  * @param	devices		output devices identifiers.
743  * @param	nb_devices	maximal number of devices.
744  *
745  * @return
746  *   Returns number of attached crypto device.
747  */
748 uint8_t
749 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
750 		uint8_t nb_devices);
751 /*
752  * Return the NUMA socket to which a device is connected
753  *
754  * @param dev_id
755  *   The identifier of the device
756  * @return
757  *   The NUMA socket id to which the device is connected or
758  *   a default of zero if the socket could not be determined.
759  *   -1 if returned is the dev_id value is out of range.
760  */
761 int
762 rte_cryptodev_socket_id(uint8_t dev_id);
763 
764 /** Crypto device configuration structure */
765 /* Structure rte_cryptodev_config 8< */
766 struct rte_cryptodev_config {
767 	int socket_id;			/**< Socket to allocate resources on */
768 	uint16_t nb_queue_pairs;
769 	/**< Number of queue pairs to configure on device */
770 	uint64_t ff_disable;
771 	/**< Feature flags to be disabled. Only the following features are
772 	 * allowed to be disabled,
773 	 *  - RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO
774 	 *  - RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO
775 	 *  - RTE_CRYTPODEV_FF_SECURITY
776 	 */
777 };
778 /* >8 End of structure rte_cryptodev_config. */
779 
780 /**
781  * Configure a device.
782  *
783  * This function must be invoked first before any other function in the
784  * API. This function can also be re-invoked when a device is in the
785  * stopped state.
786  *
787  * @param	dev_id		The identifier of the device to configure.
788  * @param	config		The crypto device configuration structure.
789  *
790  * @return
791  *   - 0: Success, device configured.
792  *   - <0: Error code returned by the driver configuration function.
793  */
794 int
795 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config);
796 
797 /**
798  * Start an device.
799  *
800  * The device start step is the last one and consists of setting the configured
801  * offload features and in starting the transmit and the receive units of the
802  * device.
803  * On success, all basic functions exported by the API (link status,
804  * receive/transmit, and so on) can be invoked.
805  *
806  * @param dev_id
807  *   The identifier of the device.
808  * @return
809  *   - 0: Success, device started.
810  *   - <0: Error code of the driver device start function.
811  */
812 int
813 rte_cryptodev_start(uint8_t dev_id);
814 
815 /**
816  * Stop an device. The device can be restarted with a call to
817  * rte_cryptodev_start()
818  *
819  * @param	dev_id		The identifier of the device.
820  */
821 void
822 rte_cryptodev_stop(uint8_t dev_id);
823 
824 /**
825  * Close an device. The device cannot be restarted!
826  *
827  * @param	dev_id		The identifier of the device.
828  *
829  * @return
830  *  - 0 on successfully closing device
831  *  - <0 on failure to close device
832  */
833 int
834 rte_cryptodev_close(uint8_t dev_id);
835 
836 /**
837  * Allocate and set up a receive queue pair for a device.
838  *
839  *
840  * @param	dev_id		The identifier of the device.
841  * @param	queue_pair_id	The index of the queue pairs to set up. The
842  *				value must be in the range [0, nb_queue_pair
843  *				- 1] previously supplied to
844  *				rte_cryptodev_configure().
845  * @param	qp_conf		The pointer to the configuration data to be
846  *				used for the queue pair.
847  * @param	socket_id	The *socket_id* argument is the socket
848  *				identifier in case of NUMA. The value can be
849  *				*SOCKET_ID_ANY* if there is no NUMA constraint
850  *				for the DMA memory allocated for the receive
851  *				queue pair.
852  *
853  * @return
854  *   - 0: Success, queue pair correctly set up.
855  *   - <0: Queue pair configuration failed
856  */
857 int
858 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
859 		const struct rte_cryptodev_qp_conf *qp_conf, int socket_id);
860 
861 /**
862  * @warning
863  * @b EXPERIMENTAL: this API may change without prior notice.
864  *
865  * Reset a queue pair for a device.
866  * The caller of this API must ensure that, there are no enqueues to the queue and there are no
867  * pending/inflight packets in the queue when the API is called.
868  * The API can reconfigure the queue pair when the queue pair configuration data is provided.
869  *
870  * @param	dev_id		The identifier of the device.
871  * @param	queue_pair_id	The index of the queue pairs to set up. The value must be in the
872  *				range [0, nb_queue_pair - 1] previously supplied to
873  *				rte_cryptodev_configure().
874  * @param	qp_conf		The pointer to configuration data to be used for the queue pair.
875  *				It should be NULL, if the API is called from an interrupt context.
876  * @param	socket_id	The *socket_id* argument is the socket identifier in case of NUMA.
877  *				The value can be *SOCKET_ID_ANY* if there is no NUMA constraint
878  *				for the DMA memory allocated for the queue pair.
879  *
880  * @return
881  *   - 0:  Queue pair is reset successfully.
882  *   - ENOTSUP: If the operation is not supported by the PMD.
883  *   - <0: Queue pair reset failed
884  */
885 __rte_experimental
886 int
887 rte_cryptodev_queue_pair_reset(uint8_t dev_id, uint16_t queue_pair_id,
888 		const struct rte_cryptodev_qp_conf *qp_conf, int socket_id);
889 
890 /**
891  * Get the status of queue pairs setup on a specific crypto device
892  *
893  * @param	dev_id		Crypto device identifier.
894  * @param	queue_pair_id	The index of the queue pairs to set up. The
895  *				value must be in the range [0, nb_queue_pair
896  *				- 1] previously supplied to
897  *				rte_cryptodev_configure().
898  * @return
899  *   - 0: qp was not configured
900  *	 - 1: qp was configured
901  *	 - -EINVAL: device was not configured
902  */
903 int
904 rte_cryptodev_get_qp_status(uint8_t dev_id, uint16_t queue_pair_id);
905 
906 /**
907  * Get the number of queue pairs on a specific crypto device
908  *
909  * @param	dev_id		Crypto device identifier.
910  * @return
911  *   - The number of configured queue pairs.
912  */
913 uint16_t
914 rte_cryptodev_queue_pair_count(uint8_t dev_id);
915 
916 
917 /**
918  * Retrieve the general I/O statistics of a device.
919  *
920  * @param	dev_id		The identifier of the device.
921  * @param	stats		A pointer to a structure of type
922  *				*rte_cryptodev_stats* to be filled with the
923  *				values of device counters.
924  * @return
925  *   - Zero if successful.
926  *   - Non-zero otherwise.
927  */
928 int
929 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats);
930 
931 /**
932  * Reset the general I/O statistics of a device.
933  *
934  * @param	dev_id		The identifier of the device.
935  */
936 void
937 rte_cryptodev_stats_reset(uint8_t dev_id);
938 
939 /**
940  * Retrieve the contextual information of a device.
941  *
942  * @param	dev_id		The identifier of the device.
943  * @param	dev_info	A pointer to a structure of type
944  *				*rte_cryptodev_info* to be filled with the
945  *				contextual information of the device.
946  *
947  * @note The capabilities field of dev_info is set to point to the first
948  * element of an array of struct rte_cryptodev_capabilities. The element after
949  * the last valid element has it's op field set to
950  * RTE_CRYPTO_OP_TYPE_UNDEFINED.
951  */
952 void
953 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info);
954 
955 
956 /**
957  * Register a callback function for specific device id.
958  *
959  * @param	dev_id		Device id.
960  * @param	event		Event interested.
961  * @param	cb_fn		User supplied callback function to be called.
962  * @param	cb_arg		Pointer to the parameters for the registered
963  *				callback.
964  *
965  * @return
966  *  - On success, zero.
967  *  - On failure, a negative value.
968  */
969 int
970 rte_cryptodev_callback_register(uint8_t dev_id,
971 		enum rte_cryptodev_event_type event,
972 		rte_cryptodev_cb_fn cb_fn, void *cb_arg);
973 
974 /**
975  * Unregister a callback function for specific device id.
976  *
977  * @param	dev_id		The device identifier.
978  * @param	event		Event interested.
979  * @param	cb_fn		User supplied callback function to be called.
980  * @param	cb_arg		Pointer to the parameters for the registered
981  *				callback.
982  *
983  * @return
984  *  - On success, zero.
985  *  - On failure, a negative value.
986  */
987 int
988 rte_cryptodev_callback_unregister(uint8_t dev_id,
989 		enum rte_cryptodev_event_type event,
990 		rte_cryptodev_cb_fn cb_fn, void *cb_arg);
991 
992 /**
993  * @warning
994  * @b EXPERIMENTAL: this API may change without prior notice.
995  *
996  * Query a cryptodev queue pair if there are pending RTE_CRYPTODEV_EVENT_ERROR
997  * events.
998  *
999  * @param          dev_id	The device identifier.
1000  * @param          qp_id	Queue pair index to be queried.
1001  *
1002  * @return
1003  *   - 1 if requested queue has a pending event.
1004  *   - 0 if no pending event is found.
1005  *   - a negative value on failure
1006  */
1007 __rte_experimental
1008 int
1009 rte_cryptodev_queue_pair_event_error_query(uint8_t dev_id, uint16_t qp_id);
1010 
1011 struct rte_cryptodev_callback;
1012 
1013 /** Structure to keep track of registered callbacks */
1014 RTE_TAILQ_HEAD(rte_cryptodev_cb_list, rte_cryptodev_callback);
1015 
1016 /**
1017  * Structure used to hold information about the callbacks to be called for a
1018  * queue pair on enqueue/dequeue.
1019  */
1020 struct rte_cryptodev_cb {
1021 	RTE_ATOMIC(struct rte_cryptodev_cb *) next;
1022 	/**< Pointer to next callback */
1023 	rte_cryptodev_callback_fn fn;
1024 	/**< Pointer to callback function */
1025 	void *arg;
1026 	/**< Pointer to argument */
1027 };
1028 
1029 /**
1030  * @internal
1031  * Structure used to hold information about the RCU for a queue pair.
1032  */
1033 struct rte_cryptodev_cb_rcu {
1034 	RTE_ATOMIC(struct rte_cryptodev_cb *) next;
1035 	/**< Pointer to next callback */
1036 	struct rte_rcu_qsbr *qsbr;
1037 	/**< RCU QSBR variable per queue pair */
1038 };
1039 
1040 /**
1041  * Get the security context for the cryptodev.
1042  *
1043  * @param dev_id
1044  *   The device identifier.
1045  * @return
1046  *   - NULL on error.
1047  *   - Pointer to security context on success.
1048  */
1049 void *
1050 rte_cryptodev_get_sec_ctx(uint8_t dev_id);
1051 
1052 /**
1053  * Create a symmetric session mempool.
1054  *
1055  * @param name
1056  *   The unique mempool name.
1057  * @param nb_elts
1058  *   The number of elements in the mempool.
1059  * @param elt_size
1060  *   The size of the element. This should be the size of the cryptodev PMD
1061  *   session private data obtained through
1062  *   rte_cryptodev_sym_get_private_session_size() function call.
1063  *   For the user who wants to use the same mempool for heterogeneous PMDs
1064  *   this value should be the maximum value of their private session sizes.
1065  *   Please note the created mempool will have bigger elt size than this
1066  *   value as necessary session header and the possible padding are filled
1067  *   into each elt.
1068  * @param cache_size
1069  *   The number of per-lcore cache elements
1070  * @param priv_size
1071  *   The private data size of each session.
1072  * @param socket_id
1073  *   The *socket_id* argument is the socket identifier in the case of
1074  *   NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
1075  *   constraint for the reserved zone.
1076  *
1077  * @return
1078  *  - On success returns the created session mempool pointer
1079  *  - On failure returns NULL
1080  */
1081 struct rte_mempool *
1082 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
1083 	uint32_t elt_size, uint32_t cache_size, uint16_t priv_size,
1084 	int socket_id);
1085 
1086 
1087 /**
1088  * Create an asymmetric session mempool.
1089  *
1090  * @param name
1091  *   The unique mempool name.
1092  * @param nb_elts
1093  *   The number of elements in the mempool.
1094  * @param cache_size
1095  *   The number of per-lcore cache elements
1096  * @param user_data_size
1097  *   The size of user data to be placed after session private data.
1098  * @param socket_id
1099  *   The *socket_id* argument is the socket identifier in the case of
1100  *   NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
1101  *   constraint for the reserved zone.
1102  *
1103  * @return
1104  *  - On success return mempool
1105  *  - On failure returns NULL
1106  */
1107 struct rte_mempool *
1108 rte_cryptodev_asym_session_pool_create(const char *name, uint32_t nb_elts,
1109 	uint32_t cache_size, uint16_t user_data_size, int socket_id);
1110 
1111 /**
1112  * Create symmetric crypto session and fill out private data for the device id,
1113  * based on its device type.
1114  *
1115  * @param   dev_id   ID of device that we want the session to be used on
1116  * @param   xforms   Symmetric crypto transform operations to apply on flow
1117  *                   processed with this session
1118  * @param   mp       Mempool to allocate symmetric session objects from
1119  *
1120  * @return
1121  *  - On success return pointer to sym-session.
1122  *  - On failure returns NULL and rte_errno is set to the error code:
1123  *    - EINVAL on invalid arguments.
1124  *    - ENOMEM on memory error for session allocation.
1125  *    - ENOTSUP if device doesn't support session configuration.
1126  */
1127 void *
1128 rte_cryptodev_sym_session_create(uint8_t dev_id,
1129 		struct rte_crypto_sym_xform *xforms,
1130 		struct rte_mempool *mp);
1131 /**
1132  * Create and initialise an asymmetric crypto session structure.
1133  * Calls the PMD to configure the private session data.
1134  *
1135  * @param   dev_id   ID of device that we want the session to be used on
1136  * @param   xforms   Asymmetric crypto transform operations to apply on flow
1137  *                   processed with this session
1138  * @param   mp       mempool to allocate asymmetric session
1139  *                   objects from
1140  * @param   session  void ** for session to be used
1141  *
1142  * @return
1143  *  - 0 on success.
1144  *  - -EINVAL on invalid arguments.
1145  *  - -ENOMEM on memory error for session allocation.
1146  *  - -ENOTSUP if device doesn't support session configuration.
1147  */
1148 int
1149 rte_cryptodev_asym_session_create(uint8_t dev_id,
1150 		struct rte_crypto_asym_xform *xforms, struct rte_mempool *mp,
1151 		void **session);
1152 
1153 /**
1154  * Frees session for the device id and returning it to its mempool.
1155  * It is the application's responsibility to ensure that the session
1156  * is not still in-flight operations using it.
1157  *
1158  * @param   dev_id   ID of device that uses the session.
1159  * @param   sess     Session header to be freed.
1160  *
1161  * @return
1162  *  - 0 if successful.
1163  *  - -EINVAL if session is NULL or the mismatched device ids.
1164  */
1165 int
1166 rte_cryptodev_sym_session_free(uint8_t dev_id,
1167 	void *sess);
1168 
1169 /**
1170  * Clears and frees asymmetric crypto session header and private data,
1171  * returning it to its original mempool.
1172  *
1173  * @param   dev_id   ID of device that uses the asymmetric session.
1174  * @param   sess     Session header to be freed.
1175  *
1176  * @return
1177  *  - 0 if successful.
1178  *  - -EINVAL if device is invalid or session is NULL.
1179  */
1180 int
1181 rte_cryptodev_asym_session_free(uint8_t dev_id, void *sess);
1182 
1183 /**
1184  * Get the size of the asymmetric session header.
1185  *
1186  * @return
1187  *   Size of the asymmetric header session.
1188  */
1189 unsigned int
1190 rte_cryptodev_asym_get_header_session_size(void);
1191 
1192 /**
1193  * Get the size of the private symmetric session data
1194  * for a device.
1195  *
1196  * @param	dev_id		The device identifier.
1197  *
1198  * @return
1199  *   - Size of the private data, if successful
1200  *   - 0 if device is invalid or does not have private
1201  *   symmetric session
1202  */
1203 unsigned int
1204 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id);
1205 
1206 /**
1207  * Get the size of the private data for asymmetric session
1208  * on device
1209  *
1210  * @param	dev_id		The device identifier.
1211  *
1212  * @return
1213  *   - Size of the asymmetric private data, if successful
1214  *   - 0 if device is invalid or does not have private session
1215  */
1216 unsigned int
1217 rte_cryptodev_asym_get_private_session_size(uint8_t dev_id);
1218 
1219 /**
1220  * Validate if the crypto device index is valid attached crypto device.
1221  *
1222  * @param	dev_id	Crypto device index.
1223  *
1224  * @return
1225  *   - If the device index is valid (1) or not (0).
1226  */
1227 unsigned int
1228 rte_cryptodev_is_valid_dev(uint8_t dev_id);
1229 
1230 /**
1231  * Provide driver identifier.
1232  *
1233  * @param name
1234  *   The pointer to a driver name.
1235  * @return
1236  *  The driver type identifier or -1 if no driver found
1237  */
1238 int rte_cryptodev_driver_id_get(const char *name);
1239 
1240 /**
1241  * Provide driver name.
1242  *
1243  * @param driver_id
1244  *   The driver identifier.
1245  * @return
1246  *  The driver name or null if no driver found
1247  */
1248 const char *rte_cryptodev_driver_name_get(uint8_t driver_id);
1249 
1250 /**
1251  * Store user data in a session.
1252  *
1253  * @param	sess		Session pointer allocated by
1254  *				*rte_cryptodev_sym_session_create*.
1255  * @param	data		Pointer to the user data.
1256  * @param	size		Size of the user data.
1257  *
1258  * @return
1259  *  - On success, zero.
1260  *  - On failure, a negative value.
1261  */
1262 int
1263 rte_cryptodev_sym_session_set_user_data(void *sess,
1264 					void *data,
1265 					uint16_t size);
1266 
1267 #define CRYPTO_SESS_OPAQUE_DATA_OFF 0
1268 /**
1269  * Get opaque data from session handle
1270  */
1271 static inline uint64_t
1272 rte_cryptodev_sym_session_opaque_data_get(void *sess)
1273 {
1274 	return *((uint64_t *)sess + CRYPTO_SESS_OPAQUE_DATA_OFF);
1275 }
1276 
1277 /**
1278  * Set opaque data in session handle
1279  */
1280 static inline void
1281 rte_cryptodev_sym_session_opaque_data_set(void *sess, uint64_t opaque)
1282 {
1283 	uint64_t *data;
1284 	data = (((uint64_t *)sess) + CRYPTO_SESS_OPAQUE_DATA_OFF);
1285 	*data = opaque;
1286 }
1287 
1288 /**
1289  * Get user data stored in a session.
1290  *
1291  * @param	sess		Session pointer allocated by
1292  *				*rte_cryptodev_sym_session_create*.
1293  *
1294  * @return
1295  *  - On success return pointer to user data.
1296  *  - On failure returns NULL.
1297  */
1298 void *
1299 rte_cryptodev_sym_session_get_user_data(void *sess);
1300 
1301 /**
1302  * Store user data in an asymmetric session.
1303  *
1304  * @param	sess		Session pointer allocated by
1305  *				*rte_cryptodev_asym_session_create*.
1306  * @param	data		Pointer to the user data.
1307  * @param	size		Size of the user data.
1308  *
1309  * @return
1310  *  - On success, zero.
1311  *  - -EINVAL if the session pointer is invalid.
1312  *  - -ENOMEM if the available user data size is smaller than the size parameter.
1313  */
1314 int
1315 rte_cryptodev_asym_session_set_user_data(void *sess, void *data, uint16_t size);
1316 
1317 /**
1318  * Get user data stored in an asymmetric session.
1319  *
1320  * @param	sess		Session pointer allocated by
1321  *				*rte_cryptodev_asym_session_create*.
1322  *
1323  * @return
1324  *  - On success return pointer to user data.
1325  *  - On failure returns NULL.
1326  */
1327 void *
1328 rte_cryptodev_asym_session_get_user_data(void *sess);
1329 
1330 /**
1331  * Perform actual crypto processing (encrypt/digest or auth/decrypt)
1332  * on user provided data.
1333  *
1334  * @param	dev_id	The device identifier.
1335  * @param	sess	Cryptodev session structure
1336  * @param	ofs	Start and stop offsets for auth and cipher operations
1337  * @param	vec	Vectorized operation descriptor
1338  *
1339  * @return
1340  *  - Returns number of successfully processed packets.
1341  */
1342 uint32_t
1343 rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id,
1344 	void *sess, union rte_crypto_sym_ofs ofs,
1345 	struct rte_crypto_sym_vec *vec);
1346 
1347 /**
1348  * Get the size of the raw data-path context buffer.
1349  *
1350  * @param	dev_id		The device identifier.
1351  *
1352  * @return
1353  *   - If the device supports raw data-path APIs, return the context size.
1354  *   - If the device does not support the APIs, return -1.
1355  */
1356 int
1357 rte_cryptodev_get_raw_dp_ctx_size(uint8_t dev_id);
1358 
1359 /**
1360  * Set session event meta data
1361  *
1362  * @param	dev_id		The device identifier.
1363  * @param	sess            Crypto or security session.
1364  * @param	op_type         Operation type.
1365  * @param	sess_type       Session type.
1366  * @param	ev_mdata	Pointer to the event crypto meta data
1367  *				(aka *union rte_event_crypto_metadata*)
1368  * @param	size            Size of ev_mdata.
1369  *
1370  * @return
1371  *  - On success, zero.
1372  *  - On failure, a negative value.
1373  */
1374 int
1375 rte_cryptodev_session_event_mdata_set(uint8_t dev_id, void *sess,
1376 	enum rte_crypto_op_type op_type,
1377 	enum rte_crypto_op_sess_type sess_type,
1378 	void *ev_mdata, uint16_t size);
1379 
1380 /**
1381  * Union of different crypto session types, including session-less xform
1382  * pointer.
1383  */
1384 union rte_cryptodev_session_ctx {void *crypto_sess;
1385 	struct rte_crypto_sym_xform *xform;
1386 	struct rte_security_session *sec_sess;
1387 };
1388 
1389 /**
1390  * Enqueue a vectorized operation descriptor into the device queue but the
1391  * driver may or may not start processing until rte_cryptodev_raw_enqueue_done()
1392  * is called.
1393  *
1394  * @param	qp		Driver specific queue pair data.
1395  * @param	drv_ctx		Driver specific context data.
1396  * @param	vec		Vectorized operation descriptor.
1397  * @param	ofs		Start and stop offsets for auth and cipher
1398  *				operations.
1399  * @param	user_data	The array of user data for dequeue later.
1400  * @param	enqueue_status	Driver written value to specify the
1401  *				enqueue status. Possible values:
1402  *				- 1: The number of operations returned are
1403  *				     enqueued successfully.
1404  *				- 0: The number of operations returned are
1405  *				     cached into the queue but are not processed
1406  *				     until rte_cryptodev_raw_enqueue_done() is
1407  *				     called.
1408  *				- negative integer: Error occurred.
1409  * @return
1410  *   - The number of operations in the descriptor successfully enqueued or
1411  *     cached into the queue but not enqueued yet, depends on the
1412  *     "enqueue_status" value.
1413  */
1414 typedef uint32_t (*cryptodev_sym_raw_enqueue_burst_t)(
1415 	void *qp, uint8_t *drv_ctx, struct rte_crypto_sym_vec *vec,
1416 	union rte_crypto_sym_ofs ofs, void *user_data[], int *enqueue_status);
1417 
1418 /**
1419  * Enqueue single raw data vector into the device queue but the driver may or
1420  * may not start processing until rte_cryptodev_raw_enqueue_done() is called.
1421  *
1422  * @param	qp		Driver specific queue pair data.
1423  * @param	drv_ctx		Driver specific context data.
1424  * @param	data_vec	The buffer data vector.
1425  * @param	n_data_vecs	Number of buffer data vectors.
1426  * @param	ofs		Start and stop offsets for auth and cipher
1427  *				operations.
1428  * @param	iv		IV virtual and IOVA addresses
1429  * @param	digest		digest virtual and IOVA addresses
1430  * @param	aad_or_auth_iv	AAD or auth IV virtual and IOVA addresses,
1431  *				depends on the algorithm used.
1432  * @param	user_data	The user data.
1433  * @return
1434  *   - 1: The data vector is enqueued successfully.
1435  *   - 0: The data vector is cached into the queue but is not processed
1436  *        until rte_cryptodev_raw_enqueue_done() is called.
1437  *   - negative integer: failure.
1438  */
1439 typedef int (*cryptodev_sym_raw_enqueue_t)(
1440 	void *qp, uint8_t *drv_ctx, struct rte_crypto_vec *data_vec,
1441 	uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs,
1442 	struct rte_crypto_va_iova_ptr *iv,
1443 	struct rte_crypto_va_iova_ptr *digest,
1444 	struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
1445 	void *user_data);
1446 
1447 /**
1448  * Inform the cryptodev queue pair to start processing or finish dequeuing all
1449  * enqueued/dequeued operations.
1450  *
1451  * @param	qp		Driver specific queue pair data.
1452  * @param	drv_ctx		Driver specific context data.
1453  * @param	n		The total number of processed operations.
1454  * @return
1455  *   - On success return 0.
1456  *   - On failure return negative integer.
1457  */
1458 typedef int (*cryptodev_sym_raw_operation_done_t)(void *qp, uint8_t *drv_ctx,
1459 	uint32_t n);
1460 
1461 /**
1462  * Typedef that the user provided for the driver to get the dequeue count.
1463  * The function may return a fixed number or the number parsed from the user
1464  * data stored in the first processed operation.
1465  *
1466  * @param	user_data	Dequeued user data.
1467  * @return
1468  *  - The number of operations to be dequeued.
1469  */
1470 typedef uint32_t (*rte_cryptodev_raw_get_dequeue_count_t)(void *user_data);
1471 
1472 /**
1473  * Typedef that the user provided to deal with post dequeue operation, such
1474  * as filling status.
1475  *
1476  * @param	user_data	Dequeued user data.
1477  * @param	index		Index number of the processed descriptor.
1478  * @param	is_op_success	Operation status provided by the driver.
1479  */
1480 typedef void (*rte_cryptodev_raw_post_dequeue_t)(void *user_data,
1481 	uint32_t index, uint8_t is_op_success);
1482 
1483 /**
1484  * Dequeue a burst of symmetric crypto processing.
1485  *
1486  * @param	qp			Driver specific queue pair data.
1487  * @param	drv_ctx			Driver specific context data.
1488  * @param	get_dequeue_count	User provided callback function to
1489  *					obtain dequeue operation count.
1490  * @param	max_nb_to_dequeue	When get_dequeue_count is NULL this
1491  *					value is used to pass the maximum
1492  *					number of operations to be dequeued.
1493  * @param	post_dequeue		User provided callback function to
1494  *					post-process a dequeued operation.
1495  * @param	out_user_data		User data pointer array to be retrieve
1496  *					from device queue. In case of
1497  *					*is_user_data_array* is set there
1498  *					should be enough room to store all
1499  *					user data.
1500  * @param	is_user_data_array	Set 1 if every dequeued user data will
1501  *					be written into out_user_data array.
1502  *					Set 0 if only the first user data will
1503  *					be written into out_user_data array.
1504  * @param	n_success		Driver written value to specific the
1505  *					total successful operations count.
1506  * @param	dequeue_status		Driver written value to specify the
1507  *					dequeue status. Possible values:
1508  *					- 1: Successfully dequeued the number
1509  *					     of operations returned. The user
1510  *					     data previously set during enqueue
1511  *					     is stored in the "out_user_data".
1512  *					- 0: The number of operations returned
1513  *					     are completed and the user data is
1514  *					     stored in the "out_user_data", but
1515  *					     they are not freed from the queue
1516  *					     until
1517  *					     rte_cryptodev_raw_dequeue_done()
1518  *					     is called.
1519  *					- negative integer: Error occurred.
1520  * @return
1521  *   - The number of operations dequeued or completed but not freed from the
1522  *     queue, depends on "dequeue_status" value.
1523  */
1524 typedef uint32_t (*cryptodev_sym_raw_dequeue_burst_t)(void *qp,
1525 	uint8_t *drv_ctx,
1526 	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
1527 	uint32_t max_nb_to_dequeue,
1528 	rte_cryptodev_raw_post_dequeue_t post_dequeue,
1529 	void **out_user_data, uint8_t is_user_data_array,
1530 	uint32_t *n_success, int *dequeue_status);
1531 
1532 /**
1533  * Dequeue a symmetric crypto processing.
1534  *
1535  * @param	qp			Driver specific queue pair data.
1536  * @param	drv_ctx			Driver specific context data.
1537  * @param	dequeue_status		Driver written value to specify the
1538  *					dequeue status. Possible values:
1539  *					- 1: Successfully dequeued a operation.
1540  *					     The user data is returned.
1541  *					- 0: The first operation in the queue
1542  *					     is completed and the user data
1543  *					     previously set during enqueue is
1544  *					     returned, but it is not freed from
1545  *					     the queue until
1546  *					     rte_cryptodev_raw_dequeue_done() is
1547  *					     called.
1548  *					- negative integer: Error occurred.
1549  * @param	op_status		Driver written value to specify
1550  *					operation status.
1551  * @return
1552  *   - The user data pointer retrieved from device queue or NULL if no
1553  *     operation is ready for dequeue.
1554  */
1555 typedef void * (*cryptodev_sym_raw_dequeue_t)(
1556 		void *qp, uint8_t *drv_ctx, int *dequeue_status,
1557 		enum rte_crypto_op_status *op_status);
1558 
1559 /**
1560  * Context data for raw data-path API crypto process. The buffer of this
1561  * structure is to be allocated by the user application with the size equal
1562  * or bigger than rte_cryptodev_get_raw_dp_ctx_size() returned value.
1563  */
1564 struct rte_crypto_raw_dp_ctx {
1565 	void *qp_data;
1566 
1567 	cryptodev_sym_raw_enqueue_t enqueue;
1568 	cryptodev_sym_raw_enqueue_burst_t enqueue_burst;
1569 	cryptodev_sym_raw_operation_done_t enqueue_done;
1570 	cryptodev_sym_raw_dequeue_t dequeue;
1571 	cryptodev_sym_raw_dequeue_burst_t dequeue_burst;
1572 	cryptodev_sym_raw_operation_done_t dequeue_done;
1573 
1574 	/* Driver specific context data */
1575 	uint8_t drv_ctx_data[];
1576 };
1577 
1578 /**
1579  * Configure raw data-path context data.
1580  *
1581  * @param	dev_id		The device identifier.
1582  * @param	qp_id		The index of the queue pair from which to
1583  *				retrieve processed packets. The value must be
1584  *				in the range [0, nb_queue_pair - 1] previously
1585  *				supplied to rte_cryptodev_configure().
1586  * @param	ctx		The raw data-path context data.
1587  * @param	sess_type	Session type.
1588  * @param	session_ctx	Session context data.
1589  * @param	is_update	Set 0 if it is to initialize the ctx.
1590  *				Set 1 if ctx is initialized and only to update
1591  *				session context data.
1592  * @return
1593  *   - On success return 0.
1594  *   - On failure return negative integer.
1595  *     - -EINVAL if input parameters are invalid.
1596  *     - -ENOTSUP if crypto device does not support raw DP operations with the
1597  *        provided session.
1598  */
1599 int
1600 rte_cryptodev_configure_raw_dp_ctx(uint8_t dev_id, uint16_t qp_id,
1601 	struct rte_crypto_raw_dp_ctx *ctx,
1602 	enum rte_crypto_op_sess_type sess_type,
1603 	union rte_cryptodev_session_ctx session_ctx,
1604 	uint8_t is_update);
1605 
1606 /**
1607  * Enqueue a vectorized operation descriptor into the device queue but the
1608  * driver may or may not start processing until rte_cryptodev_raw_enqueue_done()
1609  * is called.
1610  *
1611  * @param	ctx		The initialized raw data-path context data.
1612  * @param	vec		Vectorized operation descriptor.
1613  * @param	ofs		Start and stop offsets for auth and cipher
1614  *				operations.
1615  * @param	user_data	The array of user data for dequeue later.
1616  * @param	enqueue_status	Driver written value to specify the
1617  *				enqueue status. Possible values:
1618  *				- 1: The number of operations returned are
1619  *				     enqueued successfully.
1620  *				- 0: The number of operations returned are
1621  *				     cached into the queue but are not processed
1622  *				     until rte_cryptodev_raw_enqueue_done() is
1623  *				     called.
1624  *				- negative integer: Error occurred.
1625  * @return
1626  *   - The number of operations in the descriptor successfully enqueued or
1627  *     cached into the queue but not enqueued yet, depends on the
1628  *     "enqueue_status" value.
1629  */
1630 uint32_t
1631 rte_cryptodev_raw_enqueue_burst(struct rte_crypto_raw_dp_ctx *ctx,
1632 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
1633 	void **user_data, int *enqueue_status);
1634 
1635 /**
1636  * Enqueue single raw data vector into the device queue but the driver may or
1637  * may not start processing until rte_cryptodev_raw_enqueue_done() is called.
1638  *
1639  * @param	ctx		The initialized raw data-path context data.
1640  * @param	data_vec	The buffer data vector.
1641  * @param	n_data_vecs	Number of buffer data vectors.
1642  * @param	ofs		Start and stop offsets for auth and cipher
1643  *				operations.
1644  * @param	iv		IV virtual and IOVA addresses
1645  * @param	digest		digest virtual and IOVA addresses
1646  * @param	aad_or_auth_iv	AAD or auth IV virtual and IOVA addresses,
1647  *				depends on the algorithm used.
1648  * @param	user_data	The user data.
1649  * @return
1650  *   - 1: The data vector is enqueued successfully.
1651  *   - 0: The data vector is cached into the queue but is not processed
1652  *        until rte_cryptodev_raw_enqueue_done() is called.
1653  *   - negative integer: failure.
1654  */
1655 __rte_experimental
1656 static __rte_always_inline int
1657 rte_cryptodev_raw_enqueue(struct rte_crypto_raw_dp_ctx *ctx,
1658 	struct rte_crypto_vec *data_vec, uint16_t n_data_vecs,
1659 	union rte_crypto_sym_ofs ofs,
1660 	struct rte_crypto_va_iova_ptr *iv,
1661 	struct rte_crypto_va_iova_ptr *digest,
1662 	struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
1663 	void *user_data)
1664 {
1665 	return (*ctx->enqueue)(ctx->qp_data, ctx->drv_ctx_data, data_vec,
1666 		n_data_vecs, ofs, iv, digest, aad_or_auth_iv, user_data);
1667 }
1668 
1669 /**
1670  * Start processing all enqueued operations from last
1671  * rte_cryptodev_configure_raw_dp_ctx() call.
1672  *
1673  * @param	ctx	The initialized raw data-path context data.
1674  * @param	n	The number of operations cached.
1675  * @return
1676  *   - On success return 0.
1677  *   - On failure return negative integer.
1678  */
1679 int
1680 rte_cryptodev_raw_enqueue_done(struct rte_crypto_raw_dp_ctx *ctx,
1681 		uint32_t n);
1682 
1683 /**
1684  * Dequeue a burst of symmetric crypto processing.
1685  *
1686  * @param	ctx			The initialized raw data-path context
1687  *					data.
1688  * @param	get_dequeue_count	User provided callback function to
1689  *					obtain dequeue operation count.
1690  * @param	max_nb_to_dequeue	When get_dequeue_count is NULL this
1691  *					value is used to pass the maximum
1692  *					number of operations to be dequeued.
1693  * @param	post_dequeue		User provided callback function to
1694  *					post-process a dequeued operation.
1695  * @param	out_user_data		User data pointer array to be retrieve
1696  *					from device queue. In case of
1697  *					*is_user_data_array* is set there
1698  *					should be enough room to store all
1699  *					user data.
1700  * @param	is_user_data_array	Set 1 if every dequeued user data will
1701  *					be written into out_user_data array.
1702  *					Set 0 if only the first user data will
1703  *					be written into out_user_data array.
1704  * @param	n_success		Driver written value to specific the
1705  *					total successful operations count.
1706  * @param	dequeue_status		Driver written value to specify the
1707  *					dequeue status. Possible values:
1708  *					- 1: Successfully dequeued the number
1709  *					     of operations returned. The user
1710  *					     data previously set during enqueue
1711  *					     is stored in the "out_user_data".
1712  *					- 0: The number of operations returned
1713  *					     are completed and the user data is
1714  *					     stored in the "out_user_data", but
1715  *					     they are not freed from the queue
1716  *					     until
1717  *					     rte_cryptodev_raw_dequeue_done()
1718  *					     is called.
1719  *					- negative integer: Error occurred.
1720  * @return
1721  *   - The number of operations dequeued or completed but not freed from the
1722  *     queue, depends on "dequeue_status" value.
1723  */
1724 uint32_t
1725 rte_cryptodev_raw_dequeue_burst(struct rte_crypto_raw_dp_ctx *ctx,
1726 	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
1727 	uint32_t max_nb_to_dequeue,
1728 	rte_cryptodev_raw_post_dequeue_t post_dequeue,
1729 	void **out_user_data, uint8_t is_user_data_array,
1730 	uint32_t *n_success, int *dequeue_status);
1731 
1732 /**
1733  * Dequeue a symmetric crypto processing.
1734  *
1735  * @param	ctx			The initialized raw data-path context
1736  *					data.
1737  * @param	dequeue_status		Driver written value to specify the
1738  *					dequeue status. Possible values:
1739  *					- 1: Successfully dequeued a operation.
1740  *					     The user data is returned.
1741  *					- 0: The first operation in the queue
1742  *					     is completed and the user data
1743  *					     previously set during enqueue is
1744  *					     returned, but it is not freed from
1745  *					     the queue until
1746  *					     rte_cryptodev_raw_dequeue_done() is
1747  *					     called.
1748  *					- negative integer: Error occurred.
1749  * @param	op_status		Driver written value to specify
1750  *					operation status.
1751  * @return
1752  *   - The user data pointer retrieved from device queue or NULL if no
1753  *     operation is ready for dequeue.
1754  */
1755 __rte_experimental
1756 static __rte_always_inline void *
1757 rte_cryptodev_raw_dequeue(struct rte_crypto_raw_dp_ctx *ctx,
1758 		int *dequeue_status, enum rte_crypto_op_status *op_status)
1759 {
1760 	return (*ctx->dequeue)(ctx->qp_data, ctx->drv_ctx_data, dequeue_status,
1761 			op_status);
1762 }
1763 
1764 /**
1765  * Inform the queue pair dequeue operations is finished.
1766  *
1767  * @param	ctx	The initialized raw data-path context data.
1768  * @param	n	The number of operations.
1769  * @return
1770  *   - On success return 0.
1771  *   - On failure return negative integer.
1772  */
1773 int
1774 rte_cryptodev_raw_dequeue_done(struct rte_crypto_raw_dp_ctx *ctx,
1775 		uint32_t n);
1776 
1777 /**
1778  * Add a user callback for a given crypto device and queue pair which will be
1779  * called on crypto ops enqueue.
1780  *
1781  * This API configures a function to be called for each burst of crypto ops
1782  * received on a given crypto device queue pair. The return value is a pointer
1783  * that can be used later to remove the callback using
1784  * rte_cryptodev_remove_enq_callback().
1785  *
1786  * Callbacks registered by application would not survive
1787  * rte_cryptodev_configure() as it reinitializes the callback list.
1788  * It is user responsibility to remove all installed callbacks before
1789  * calling rte_cryptodev_configure() to avoid possible memory leakage.
1790  * Application is expected to call add API after rte_cryptodev_configure().
1791  *
1792  * Multiple functions can be registered per queue pair & they are called
1793  * in the order they were added. The API does not restrict on maximum number
1794  * of callbacks.
1795  *
1796  * @param	dev_id		The identifier of the device.
1797  * @param	qp_id		The index of the queue pair on which ops are
1798  *				to be enqueued for processing. The value
1799  *				must be in the range [0, nb_queue_pairs - 1]
1800  *				previously supplied to
1801  *				*rte_cryptodev_configure*.
1802  * @param	cb_fn		The callback function
1803  * @param	cb_arg		A generic pointer parameter which will be passed
1804  *				to each invocation of the callback function on
1805  *				this crypto device and queue pair.
1806  *
1807  * @return
1808  *  - NULL on error & rte_errno will contain the error code.
1809  *  - On success, a pointer value which can later be used to remove the
1810  *    callback.
1811  */
1812 struct rte_cryptodev_cb *
1813 rte_cryptodev_add_enq_callback(uint8_t dev_id,
1814 			       uint16_t qp_id,
1815 			       rte_cryptodev_callback_fn cb_fn,
1816 			       void *cb_arg);
1817 
1818 /**
1819  * Remove a user callback function for given crypto device and queue pair.
1820  *
1821  * This function is used to remove enqueue callbacks that were added to a
1822  * crypto device queue pair using rte_cryptodev_add_enq_callback().
1823  *
1824  *
1825  *
1826  * @param	dev_id		The identifier of the device.
1827  * @param	qp_id		The index of the queue pair on which ops are
1828  *				to be enqueued. The value must be in the
1829  *				range [0, nb_queue_pairs - 1] previously
1830  *				supplied to *rte_cryptodev_configure*.
1831  * @param	cb		Pointer to user supplied callback created via
1832  *				rte_cryptodev_add_enq_callback().
1833  *
1834  * @return
1835  *   -  0: Success. Callback was removed.
1836  *   - <0: The dev_id or the qp_id is out of range, or the callback
1837  *         is NULL or not found for the crypto device queue pair.
1838  */
1839 int rte_cryptodev_remove_enq_callback(uint8_t dev_id,
1840 				      uint16_t qp_id,
1841 				      struct rte_cryptodev_cb *cb);
1842 
1843 /**
1844  * Add a user callback for a given crypto device and queue pair which will be
1845  * called on crypto ops dequeue.
1846  *
1847  * This API configures a function to be called for each burst of crypto ops
1848  * received on a given crypto device queue pair. The return value is a pointer
1849  * that can be used later to remove the callback using
1850  * rte_cryptodev_remove_deq_callback().
1851  *
1852  * Callbacks registered by application would not survive
1853  * rte_cryptodev_configure() as it reinitializes the callback list.
1854  * It is user responsibility to remove all installed callbacks before
1855  * calling rte_cryptodev_configure() to avoid possible memory leakage.
1856  * Application is expected to call add API after rte_cryptodev_configure().
1857  *
1858  * Multiple functions can be registered per queue pair & they are called
1859  * in the order they were added. The API does not restrict on maximum number
1860  * of callbacks.
1861  *
1862  * @param	dev_id		The identifier of the device.
1863  * @param	qp_id		The index of the queue pair on which ops are
1864  *				to be dequeued. The value must be in the
1865  *				range [0, nb_queue_pairs - 1] previously
1866  *				supplied to *rte_cryptodev_configure*.
1867  * @param	cb_fn		The callback function
1868  * @param	cb_arg		A generic pointer parameter which will be passed
1869  *				to each invocation of the callback function on
1870  *				this crypto device and queue pair.
1871  *
1872  * @return
1873  *   - NULL on error & rte_errno will contain the error code.
1874  *   - On success, a pointer value which can later be used to remove the
1875  *     callback.
1876  */
1877 struct rte_cryptodev_cb *
1878 rte_cryptodev_add_deq_callback(uint8_t dev_id,
1879 			       uint16_t qp_id,
1880 			       rte_cryptodev_callback_fn cb_fn,
1881 			       void *cb_arg);
1882 
1883 /**
1884  * Remove a user callback function for given crypto device and queue pair.
1885  *
1886  * This function is used to remove dequeue callbacks that were added to a
1887  * crypto device queue pair using rte_cryptodev_add_deq_callback().
1888  *
1889  *
1890  *
1891  * @param	dev_id		The identifier of the device.
1892  * @param	qp_id		The index of the queue pair on which ops are
1893  *				to be dequeued. The value must be in the
1894  *				range [0, nb_queue_pairs - 1] previously
1895  *				supplied to *rte_cryptodev_configure*.
1896  * @param	cb		Pointer to user supplied callback created via
1897  *				rte_cryptodev_add_deq_callback().
1898  *
1899  * @return
1900  *   -  0: Success. Callback was removed.
1901  *   - <0: The dev_id or the qp_id is out of range, or the callback
1902  *         is NULL or not found for the crypto device queue pair.
1903  */
1904 int rte_cryptodev_remove_deq_callback(uint8_t dev_id,
1905 				      uint16_t qp_id,
1906 				      struct rte_cryptodev_cb *cb);
1907 
1908 #include <rte_cryptodev_core.h>
1909 
1910 #ifdef __cplusplus
1911 extern "C" {
1912 #endif
1913 /**
1914  *
1915  * Dequeue a burst of processed crypto operations from a queue on the crypto
1916  * device. The dequeued operation are stored in *rte_crypto_op* structures
1917  * whose pointers are supplied in the *ops* array.
1918  *
1919  * The rte_cryptodev_dequeue_burst() function returns the number of ops
1920  * actually dequeued, which is the number of *rte_crypto_op* data structures
1921  * effectively supplied into the *ops* array.
1922  *
1923  * A return value equal to *nb_ops* indicates that the queue contained
1924  * at least *nb_ops* operations, and this is likely to signify that other
1925  * processed operations remain in the devices output queue. Applications
1926  * implementing a "retrieve as many processed operations as possible" policy
1927  * can check this specific case and keep invoking the
1928  * rte_cryptodev_dequeue_burst() function until a value less than
1929  * *nb_ops* is returned.
1930  *
1931  * The rte_cryptodev_dequeue_burst() function does not provide any error
1932  * notification to avoid the corresponding overhead.
1933  *
1934  * @param	dev_id		The symmetric crypto device identifier
1935  * @param	qp_id		The index of the queue pair from which to
1936  *				retrieve processed packets. The value must be
1937  *				in the range [0, nb_queue_pair - 1] previously
1938  *				supplied to rte_cryptodev_configure().
1939  * @param	ops		The address of an array of pointers to
1940  *				*rte_crypto_op* structures that must be
1941  *				large enough to store *nb_ops* pointers in it.
1942  * @param	nb_ops		The maximum number of operations to dequeue.
1943  *
1944  * @return
1945  *   - The number of operations actually dequeued, which is the number
1946  *   of pointers to *rte_crypto_op* structures effectively supplied to the
1947  *   *ops* array.
1948  */
1949 static inline uint16_t
1950 rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
1951 		struct rte_crypto_op **ops, uint16_t nb_ops)
1952 {
1953 	const struct rte_crypto_fp_ops *fp_ops;
1954 	void *qp;
1955 
1956 	rte_cryptodev_trace_dequeue_burst(dev_id, qp_id, (void **)ops, nb_ops);
1957 
1958 	fp_ops = &rte_crypto_fp_ops[dev_id];
1959 	qp = fp_ops->qp.data[qp_id];
1960 
1961 	nb_ops = fp_ops->dequeue_burst(qp, ops, nb_ops);
1962 
1963 #ifdef RTE_CRYPTO_CALLBACKS
1964 	if (unlikely(fp_ops->qp.deq_cb[qp_id].next != NULL)) {
1965 		struct rte_cryptodev_cb_rcu *list;
1966 		struct rte_cryptodev_cb *cb;
1967 
1968 		/* rte_memory_order_release memory order was used when the
1969 		 * call back was inserted into the list.
1970 		 * Since there is a clear dependency between loading
1971 		 * cb and cb->fn/cb->next, rte_memory_order_acquire memory order is
1972 		 * not required.
1973 		 */
1974 		list = &fp_ops->qp.deq_cb[qp_id];
1975 		rte_rcu_qsbr_thread_online(list->qsbr, 0);
1976 		cb = rte_atomic_load_explicit(&list->next, rte_memory_order_relaxed);
1977 
1978 		while (cb != NULL) {
1979 			nb_ops = cb->fn(dev_id, qp_id, ops, nb_ops,
1980 					cb->arg);
1981 			cb = cb->next;
1982 		};
1983 
1984 		rte_rcu_qsbr_thread_offline(list->qsbr, 0);
1985 	}
1986 #endif
1987 	return nb_ops;
1988 }
1989 
1990 /**
1991  * Enqueue a burst of operations for processing on a crypto device.
1992  *
1993  * The rte_cryptodev_enqueue_burst() function is invoked to place
1994  * crypto operations on the queue *qp_id* of the device designated by
1995  * its *dev_id*.
1996  *
1997  * The *nb_ops* parameter is the number of operations to process which are
1998  * supplied in the *ops* array of *rte_crypto_op* structures.
1999  *
2000  * The rte_cryptodev_enqueue_burst() function returns the number of
2001  * operations it actually enqueued for processing. A return value equal to
2002  * *nb_ops* means that all packets have been enqueued.
2003  *
2004  * @param	dev_id		The identifier of the device.
2005  * @param	qp_id		The index of the queue pair which packets are
2006  *				to be enqueued for processing. The value
2007  *				must be in the range [0, nb_queue_pairs - 1]
2008  *				previously supplied to
2009  *				 *rte_cryptodev_configure*.
2010  * @param	ops		The address of an array of *nb_ops* pointers
2011  *				to *rte_crypto_op* structures which contain
2012  *				the crypto operations to be processed.
2013  * @param	nb_ops		The number of operations to process.
2014  *
2015  * @return
2016  * The number of operations actually enqueued on the crypto device. The return
2017  * value can be less than the value of the *nb_ops* parameter when the
2018  * crypto devices queue is full or if invalid parameters are specified in
2019  * a *rte_crypto_op*.
2020  */
2021 static inline uint16_t
2022 rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
2023 		struct rte_crypto_op **ops, uint16_t nb_ops)
2024 {
2025 	const struct rte_crypto_fp_ops *fp_ops;
2026 	void *qp;
2027 
2028 	fp_ops = &rte_crypto_fp_ops[dev_id];
2029 	qp = fp_ops->qp.data[qp_id];
2030 #ifdef RTE_CRYPTO_CALLBACKS
2031 	if (unlikely(fp_ops->qp.enq_cb[qp_id].next != NULL)) {
2032 		struct rte_cryptodev_cb_rcu *list;
2033 		struct rte_cryptodev_cb *cb;
2034 
2035 		/* rte_memory_order_release memory order was used when the
2036 		 * call back was inserted into the list.
2037 		 * Since there is a clear dependency between loading
2038 		 * cb and cb->fn/cb->next, rte_memory_order_acquire memory order is
2039 		 * not required.
2040 		 */
2041 		list = &fp_ops->qp.enq_cb[qp_id];
2042 		rte_rcu_qsbr_thread_online(list->qsbr, 0);
2043 		cb = rte_atomic_load_explicit(&list->next, rte_memory_order_relaxed);
2044 
2045 		while (cb != NULL) {
2046 			nb_ops = cb->fn(dev_id, qp_id, ops, nb_ops,
2047 					cb->arg);
2048 			cb = cb->next;
2049 		};
2050 
2051 		rte_rcu_qsbr_thread_offline(list->qsbr, 0);
2052 	}
2053 #endif
2054 
2055 	rte_cryptodev_trace_enqueue_burst(dev_id, qp_id, (void **)ops, nb_ops);
2056 	return fp_ops->enqueue_burst(qp, ops, nb_ops);
2057 }
2058 
2059 /**
2060  * @warning
2061  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
2062  *
2063  * Get the number of used descriptors or depth of a cryptodev queue pair.
2064  *
2065  * This function retrieves the number of used descriptors in a crypto queue.
2066  * Applications can use this API in the fast path to inspect QP occupancy and
2067  * take appropriate action.
2068  *
2069  * Since it is a fast-path function, no check is performed on dev_id and qp_id.
2070  * Caller must therefore ensure that the device is enabled and queue pair is setup.
2071  *
2072  * @param	dev_id		The identifier of the device.
2073  * @param	qp_id		The index of the queue pair for which used descriptor
2074  *				count is to be retrieved. The value
2075  *				must be in the range [0, nb_queue_pairs - 1]
2076  *				previously supplied to *rte_cryptodev_configure*.
2077  *
2078  * @return
2079  *  The number of used descriptors on the specified queue pair, or:
2080  *   - (-ENOTSUP) if the device does not support this function.
2081  */
2082 
2083 __rte_experimental
2084 static inline int
2085 rte_cryptodev_qp_depth_used(uint8_t dev_id, uint16_t qp_id)
2086 {
2087 	const struct rte_crypto_fp_ops *fp_ops;
2088 	void *qp;
2089 	int rc;
2090 
2091 	fp_ops = &rte_crypto_fp_ops[dev_id];
2092 	qp = fp_ops->qp.data[qp_id];
2093 
2094 	if (fp_ops->qp_depth_used == NULL) {
2095 		rc = -ENOTSUP;
2096 		goto out;
2097 	}
2098 
2099 	rc = fp_ops->qp_depth_used(qp);
2100 out:
2101 	rte_cryptodev_trace_qp_depth_used(dev_id, qp_id);
2102 	return rc;
2103 }
2104 
2105 
2106 #ifdef __cplusplus
2107 }
2108 #endif
2109 
2110 #endif /* _RTE_CRYPTODEV_H_ */
2111