xref: /dpdk/lib/cryptodev/rte_cryptodev.h (revision 3a80d7fb2ecdd6e8e48e56e3726b26980fa2a089)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020 Intel Corporation.
3  */
4 
5 #ifndef _RTE_CRYPTODEV_H_
6 #define _RTE_CRYPTODEV_H_
7 
8 /**
9  * @file rte_cryptodev.h
10  *
11  * RTE Cryptographic Device APIs
12  *
13  * Defines RTE Crypto Device APIs for the provisioning of cipher and
14  * authentication operations.
15  */
16 
17 #ifdef __cplusplus
18 extern "C" {
19 #endif
20 
21 #include <rte_compat.h>
22 #include "rte_kvargs.h"
23 #include "rte_crypto.h"
24 #include <rte_common.h>
25 #include <rte_rcu_qsbr.h>
26 
27 #include "rte_cryptodev_trace_fp.h"
28 
29 extern const char **rte_cyptodev_names;
30 
31 /* Logging Macros */
32 
33 #define CDEV_LOG_ERR(...) \
34 	RTE_LOG(ERR, CRYPTODEV, \
35 		RTE_FMT("%s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
36 			__func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,)))
37 
38 #define CDEV_LOG_INFO(...) \
39 	RTE_LOG(INFO, CRYPTODEV, \
40 		RTE_FMT(RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
41 			RTE_FMT_TAIL(__VA_ARGS__,)))
42 
43 #define CDEV_LOG_DEBUG(...) \
44 	RTE_LOG(DEBUG, CRYPTODEV, \
45 		RTE_FMT("%s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
46 			__func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,)))
47 
48 #define CDEV_PMD_TRACE(...) \
49 	RTE_LOG(DEBUG, CRYPTODEV, \
50 		RTE_FMT("[%s] %s: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
51 			dev, __func__, RTE_FMT_TAIL(__VA_ARGS__,)))
52 
53 /**
54  * A macro that points to an offset from the start
55  * of the crypto operation structure (rte_crypto_op)
56  *
57  * The returned pointer is cast to type t.
58  *
59  * @param c
60  *   The crypto operation.
61  * @param o
62  *   The offset from the start of the crypto operation.
63  * @param t
64  *   The type to cast the result into.
65  */
66 #define rte_crypto_op_ctod_offset(c, t, o)	\
67 	((t)((char *)(c) + (o)))
68 
69 /**
70  * A macro that returns the physical address that points
71  * to an offset from the start of the crypto operation
72  * (rte_crypto_op)
73  *
74  * @param c
75  *   The crypto operation.
76  * @param o
77  *   The offset from the start of the crypto operation
78  *   to calculate address from.
79  */
80 #define rte_crypto_op_ctophys_offset(c, o)	\
81 	(rte_iova_t)((c)->phys_addr + (o))
82 
83 /**
84  * Crypto parameters range description
85  */
86 struct rte_crypto_param_range {
87 	uint16_t min;	/**< minimum size */
88 	uint16_t max;	/**< maximum size */
89 	uint16_t increment;
90 	/**< if a range of sizes are supported,
91 	 * this parameter is used to indicate
92 	 * increments in byte size that are supported
93 	 * between the minimum and maximum
94 	 */
95 };
96 
97 /**
98  * Data-unit supported lengths of cipher algorithms.
99  * A bit can represent any set of data-unit sizes
100  * (single size, multiple size, range, etc).
101  */
102 #define RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_512_BYTES             RTE_BIT32(0)
103 #define RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_4096_BYTES            RTE_BIT32(1)
104 #define RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_1_MEGABYTES           RTE_BIT32(2)
105 
106 /**
107  * Symmetric Crypto Capability
108  */
109 struct rte_cryptodev_symmetric_capability {
110 	enum rte_crypto_sym_xform_type xform_type;
111 	/**< Transform type : Authentication / Cipher / AEAD */
112 	RTE_STD_C11
113 	union {
114 		struct {
115 			enum rte_crypto_auth_algorithm algo;
116 			/**< authentication algorithm */
117 			uint16_t block_size;
118 			/**< algorithm block size */
119 			struct rte_crypto_param_range key_size;
120 			/**< auth key size range */
121 			struct rte_crypto_param_range digest_size;
122 			/**< digest size range */
123 			struct rte_crypto_param_range aad_size;
124 			/**< Additional authentication data size range */
125 			struct rte_crypto_param_range iv_size;
126 			/**< Initialisation vector data size range */
127 		} auth;
128 		/**< Symmetric Authentication transform capabilities */
129 		struct {
130 			enum rte_crypto_cipher_algorithm algo;
131 			/**< cipher algorithm */
132 			uint16_t block_size;
133 			/**< algorithm block size */
134 			struct rte_crypto_param_range key_size;
135 			/**< cipher key size range */
136 			struct rte_crypto_param_range iv_size;
137 			/**< Initialisation vector data size range */
138 			uint32_t dataunit_set;
139 			/**<
140 			 * Supported data-unit lengths:
141 			 * RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_* bits
142 			 * or 0 for lengths defined in the algorithm standard.
143 			 */
144 		} cipher;
145 		/**< Symmetric Cipher transform capabilities */
146 		struct {
147 			enum rte_crypto_aead_algorithm algo;
148 			/**< AEAD algorithm */
149 			uint16_t block_size;
150 			/**< algorithm block size */
151 			struct rte_crypto_param_range key_size;
152 			/**< AEAD key size range */
153 			struct rte_crypto_param_range digest_size;
154 			/**< digest size range */
155 			struct rte_crypto_param_range aad_size;
156 			/**< Additional authentication data size range */
157 			struct rte_crypto_param_range iv_size;
158 			/**< Initialisation vector data size range */
159 		} aead;
160 	};
161 };
162 
163 /**
164  * Asymmetric Xform Crypto Capability
165  *
166  */
167 struct rte_cryptodev_asymmetric_xform_capability {
168 	enum rte_crypto_asym_xform_type xform_type;
169 	/**< Transform type: RSA/MODEXP/DH/DSA/MODINV */
170 
171 	uint32_t op_types;
172 	/**<
173 	 * Bitmask for supported rte_crypto_asym_op_type or
174 	 * rte_crypto_asym_ke_type. Which enum is used is determined
175 	 * by the rte_crypto_asym_xform_type. For key exchange algorithms
176 	 * like Diffie-Hellman it is rte_crypto_asym_ke_type, for others
177 	 * it is rte_crypto_asym_op_type.
178 	 */
179 
180 	__extension__
181 	union {
182 		struct rte_crypto_param_range modlen;
183 		/**< Range of modulus length supported by modulus based xform.
184 		 * Value 0 mean implementation default
185 		 */
186 	};
187 };
188 
189 /**
190  * Asymmetric Crypto Capability
191  *
192  */
193 struct rte_cryptodev_asymmetric_capability {
194 	struct rte_cryptodev_asymmetric_xform_capability xform_capa;
195 };
196 
197 
198 /** Structure used to capture a capability of a crypto device */
199 struct rte_cryptodev_capabilities {
200 	enum rte_crypto_op_type op;
201 	/**< Operation type */
202 
203 	RTE_STD_C11
204 	union {
205 		struct rte_cryptodev_symmetric_capability sym;
206 		/**< Symmetric operation capability parameters */
207 		struct rte_cryptodev_asymmetric_capability asym;
208 		/**< Asymmetric operation capability parameters */
209 	};
210 };
211 
212 /** Structure used to describe crypto algorithms */
213 struct rte_cryptodev_sym_capability_idx {
214 	enum rte_crypto_sym_xform_type type;
215 	union {
216 		enum rte_crypto_cipher_algorithm cipher;
217 		enum rte_crypto_auth_algorithm auth;
218 		enum rte_crypto_aead_algorithm aead;
219 	} algo;
220 };
221 
222 /**
223  * Structure used to describe asymmetric crypto xforms
224  * Each xform maps to one asym algorithm.
225  *
226  */
227 struct rte_cryptodev_asym_capability_idx {
228 	enum rte_crypto_asym_xform_type type;
229 	/**< Asymmetric xform (algo) type */
230 };
231 
232 /**
233  * Provide capabilities available for defined device and algorithm
234  *
235  * @param	dev_id		The identifier of the device.
236  * @param	idx		Description of crypto algorithms.
237  *
238  * @return
239  *   - Return description of the symmetric crypto capability if exist.
240  *   - Return NULL if the capability not exist.
241  */
242 const struct rte_cryptodev_symmetric_capability *
243 rte_cryptodev_sym_capability_get(uint8_t dev_id,
244 		const struct rte_cryptodev_sym_capability_idx *idx);
245 
246 /**
247  *  Provide capabilities available for defined device and xform
248  *
249  * @param	dev_id		The identifier of the device.
250  * @param	idx		Description of asym crypto xform.
251  *
252  * @return
253  *   - Return description of the asymmetric crypto capability if exist.
254  *   - Return NULL if the capability not exist.
255  */
256 __rte_experimental
257 const struct rte_cryptodev_asymmetric_xform_capability *
258 rte_cryptodev_asym_capability_get(uint8_t dev_id,
259 		const struct rte_cryptodev_asym_capability_idx *idx);
260 
261 /**
262  * Check if key size and initial vector are supported
263  * in crypto cipher capability
264  *
265  * @param	capability	Description of the symmetric crypto capability.
266  * @param	key_size	Cipher key size.
267  * @param	iv_size		Cipher initial vector size.
268  *
269  * @return
270  *   - Return 0 if the parameters are in range of the capability.
271  *   - Return -1 if the parameters are out of range of the capability.
272  */
273 int
274 rte_cryptodev_sym_capability_check_cipher(
275 		const struct rte_cryptodev_symmetric_capability *capability,
276 		uint16_t key_size, uint16_t iv_size);
277 
278 /**
279  * Check if key size and initial vector are supported
280  * in crypto auth capability
281  *
282  * @param	capability	Description of the symmetric crypto capability.
283  * @param	key_size	Auth key size.
284  * @param	digest_size	Auth digest size.
285  * @param	iv_size		Auth initial vector size.
286  *
287  * @return
288  *   - Return 0 if the parameters are in range of the capability.
289  *   - Return -1 if the parameters are out of range of the capability.
290  */
291 int
292 rte_cryptodev_sym_capability_check_auth(
293 		const struct rte_cryptodev_symmetric_capability *capability,
294 		uint16_t key_size, uint16_t digest_size, uint16_t iv_size);
295 
296 /**
297  * Check if key, digest, AAD and initial vector sizes are supported
298  * in crypto AEAD capability
299  *
300  * @param	capability	Description of the symmetric crypto capability.
301  * @param	key_size	AEAD key size.
302  * @param	digest_size	AEAD digest size.
303  * @param	aad_size	AEAD AAD size.
304  * @param	iv_size		AEAD IV size.
305  *
306  * @return
307  *   - Return 0 if the parameters are in range of the capability.
308  *   - Return -1 if the parameters are out of range of the capability.
309  */
310 int
311 rte_cryptodev_sym_capability_check_aead(
312 		const struct rte_cryptodev_symmetric_capability *capability,
313 		uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
314 		uint16_t iv_size);
315 
316 /**
317  * Check if op type is supported
318  *
319  * @param	capability	Description of the asymmetric crypto capability.
320  * @param	op_type		op type
321  *
322  * @return
323  *   - Return 1 if the op type is supported
324  *   - Return 0 if unsupported
325  */
326 __rte_experimental
327 int
328 rte_cryptodev_asym_xform_capability_check_optype(
329 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
330 		enum rte_crypto_asym_op_type op_type);
331 
332 /**
333  * Check if modulus length is in supported range
334  *
335  * @param	capability	Description of the asymmetric crypto capability.
336  * @param	modlen		modulus length.
337  *
338  * @return
339  *   - Return 0 if the parameters are in range of the capability.
340  *   - Return -1 if the parameters are out of range of the capability.
341  */
342 __rte_experimental
343 int
344 rte_cryptodev_asym_xform_capability_check_modlen(
345 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
346 		uint16_t modlen);
347 
348 /**
349  * Provide the cipher algorithm enum, given an algorithm string
350  *
351  * @param	algo_enum	A pointer to the cipher algorithm
352  *				enum to be filled
353  * @param	algo_string	Authentication algo string
354  *
355  * @return
356  * - Return -1 if string is not valid
357  * - Return 0 is the string is valid
358  */
359 int
360 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum,
361 		const char *algo_string);
362 
363 /**
364  * Provide the authentication algorithm enum, given an algorithm string
365  *
366  * @param	algo_enum	A pointer to the authentication algorithm
367  *				enum to be filled
368  * @param	algo_string	Authentication algo string
369  *
370  * @return
371  * - Return -1 if string is not valid
372  * - Return 0 is the string is valid
373  */
374 int
375 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum,
376 		const char *algo_string);
377 
378 /**
379  * Provide the AEAD algorithm enum, given an algorithm string
380  *
381  * @param	algo_enum	A pointer to the AEAD algorithm
382  *				enum to be filled
383  * @param	algo_string	AEAD algorithm string
384  *
385  * @return
386  * - Return -1 if string is not valid
387  * - Return 0 is the string is valid
388  */
389 int
390 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum,
391 		const char *algo_string);
392 
393 /**
394  * Provide the Asymmetric xform enum, given an xform string
395  *
396  * @param	xform_enum	A pointer to the xform type
397  *				enum to be filled
398  * @param	xform_string	xform string
399  *
400  * @return
401  * - Return -1 if string is not valid
402  * - Return 0 if the string is valid
403  */
404 __rte_experimental
405 int
406 rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum,
407 		const char *xform_string);
408 
409 /**
410  * Provide the cipher algorithm string, given an algorithm enum.
411  *
412  * @param	algo_enum	cipher algorithm enum
413  *
414  * @return
415  * - Return NULL if enum is not valid
416  * - Return algo_string corresponding to enum
417  */
418 __rte_experimental
419 const char *
420 rte_cryptodev_get_cipher_algo_string(enum rte_crypto_cipher_algorithm algo_enum);
421 
422 /**
423  * Provide the authentication algorithm string, given an algorithm enum.
424  *
425  * @param	algo_enum	auth algorithm enum
426  *
427  * @return
428  * - Return NULL if enum is not valid
429  * - Return algo_string corresponding to enum
430  */
431 __rte_experimental
432 const char *
433 rte_cryptodev_get_auth_algo_string(enum rte_crypto_auth_algorithm algo_enum);
434 
435 /**
436  * Provide the AEAD algorithm string, given an algorithm enum.
437  *
438  * @param	algo_enum	AEAD algorithm enum
439  *
440  * @return
441  * - Return NULL if enum is not valid
442  * - Return algo_string corresponding to enum
443  */
444 __rte_experimental
445 const char *
446 rte_cryptodev_get_aead_algo_string(enum rte_crypto_aead_algorithm algo_enum);
447 
448 /**
449  * Provide the Asymmetric xform string, given an xform enum.
450  *
451  * @param	xform_enum	xform type enum
452  *
453  * @return
454  * - Return NULL, if enum is not valid.
455  * - Return xform string, for valid enum.
456  */
457 __rte_experimental
458 const char *
459 rte_cryptodev_asym_get_xform_string(enum rte_crypto_asym_xform_type xform_enum);
460 
461 
462 /** Macro used at end of crypto PMD list */
463 #define RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() \
464 	{ RTE_CRYPTO_OP_TYPE_UNDEFINED }
465 
466 
467 /**
468  * Crypto device supported feature flags
469  *
470  * Note:
471  * New features flags should be added to the end of the list
472  *
473  * Keep these flags synchronised with rte_cryptodev_get_feature_name()
474  */
475 #define	RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO		(1ULL << 0)
476 /**< Symmetric crypto operations are supported */
477 #define	RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO		(1ULL << 1)
478 /**< Asymmetric crypto operations are supported */
479 #define	RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING		(1ULL << 2)
480 /**< Chaining symmetric crypto operations are supported */
481 #define	RTE_CRYPTODEV_FF_CPU_SSE			(1ULL << 3)
482 /**< Utilises CPU SIMD SSE instructions */
483 #define	RTE_CRYPTODEV_FF_CPU_AVX			(1ULL << 4)
484 /**< Utilises CPU SIMD AVX instructions */
485 #define	RTE_CRYPTODEV_FF_CPU_AVX2			(1ULL << 5)
486 /**< Utilises CPU SIMD AVX2 instructions */
487 #define	RTE_CRYPTODEV_FF_CPU_AESNI			(1ULL << 6)
488 /**< Utilises CPU AES-NI instructions */
489 #define	RTE_CRYPTODEV_FF_HW_ACCELERATED			(1ULL << 7)
490 /**< Operations are off-loaded to an
491  * external hardware accelerator
492  */
493 #define	RTE_CRYPTODEV_FF_CPU_AVX512			(1ULL << 8)
494 /**< Utilises CPU SIMD AVX512 instructions */
495 #define	RTE_CRYPTODEV_FF_IN_PLACE_SGL			(1ULL << 9)
496 /**< In-place Scatter-gather (SGL) buffers, with multiple segments,
497  * are supported
498  */
499 #define RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT		(1ULL << 10)
500 /**< Out-of-place Scatter-gather (SGL) buffers are
501  * supported in input and output
502  */
503 #define RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT		(1ULL << 11)
504 /**< Out-of-place Scatter-gather (SGL) buffers are supported
505  * in input, combined with linear buffers (LB), with a
506  * single segment in output
507  */
508 #define RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT		(1ULL << 12)
509 /**< Out-of-place Scatter-gather (SGL) buffers are supported
510  * in output, combined with linear buffers (LB) in input
511  */
512 #define RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT		(1ULL << 13)
513 /**< Out-of-place linear buffers (LB) are supported in input and output */
514 #define	RTE_CRYPTODEV_FF_CPU_NEON			(1ULL << 14)
515 /**< Utilises CPU NEON instructions */
516 #define	RTE_CRYPTODEV_FF_CPU_ARM_CE			(1ULL << 15)
517 /**< Utilises ARM CPU Cryptographic Extensions */
518 #define	RTE_CRYPTODEV_FF_SECURITY			(1ULL << 16)
519 /**< Support Security Protocol Processing */
520 #define RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP		(1ULL << 17)
521 /**< Support RSA Private Key OP with exponent */
522 #define RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT		(1ULL << 18)
523 /**< Support RSA Private Key OP with CRT (quintuple) Keys */
524 #define RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED		(1ULL << 19)
525 /**< Support encrypted-digest operations where digest is appended to data */
526 #define RTE_CRYPTODEV_FF_ASYM_SESSIONLESS		(1ULL << 20)
527 /**< Support asymmetric session-less operations */
528 #define	RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO			(1ULL << 21)
529 /**< Support symmetric cpu-crypto processing */
530 #define RTE_CRYPTODEV_FF_SYM_SESSIONLESS		(1ULL << 22)
531 /**< Support symmetric session-less operations */
532 #define RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA		(1ULL << 23)
533 /**< Support operations on data which is not byte aligned */
534 #define RTE_CRYPTODEV_FF_SYM_RAW_DP			(1ULL << 24)
535 /**< Support accelerator specific symmetric raw data-path APIs */
536 #define RTE_CRYPTODEV_FF_CIPHER_MULTIPLE_DATA_UNITS	(1ULL << 25)
537 /**< Support operations on multiple data-units message */
538 #define RTE_CRYPTODEV_FF_CIPHER_WRAPPED_KEY		(1ULL << 26)
539 /**< Support wrapped key in cipher xform  */
540 #define RTE_CRYPTODEV_FF_SECURITY_INNER_CSUM		(1ULL << 27)
541 /**< Support inner checksum computation/verification */
542 
543 /**
544  * Get the name of a crypto device feature flag
545  *
546  * @param	flag	The mask describing the flag.
547  *
548  * @return
549  *   The name of this flag, or NULL if it's not a valid feature flag.
550  */
551 
552 extern const char *
553 rte_cryptodev_get_feature_name(uint64_t flag);
554 
555 /**  Crypto device information */
556 struct rte_cryptodev_info {
557 	const char *driver_name;	/**< Driver name. */
558 	uint8_t driver_id;		/**< Driver identifier */
559 	struct rte_device *device;	/**< Generic device information. */
560 
561 	uint64_t feature_flags;
562 	/**< Feature flags exposes HW/SW features for the given device */
563 
564 	const struct rte_cryptodev_capabilities *capabilities;
565 	/**< Array of devices supported capabilities */
566 
567 	unsigned max_nb_queue_pairs;
568 	/**< Maximum number of queues pairs supported by device. */
569 
570 	uint16_t min_mbuf_headroom_req;
571 	/**< Minimum mbuf headroom required by device */
572 
573 	uint16_t min_mbuf_tailroom_req;
574 	/**< Minimum mbuf tailroom required by device */
575 
576 	struct {
577 		unsigned max_nb_sessions;
578 		/**< Maximum number of sessions supported by device.
579 		 * If 0, the device does not have any limitation in
580 		 * number of sessions that can be used.
581 		 */
582 	} sym;
583 };
584 
585 #define RTE_CRYPTODEV_DETACHED  (0)
586 #define RTE_CRYPTODEV_ATTACHED  (1)
587 
588 /** Definitions of Crypto device event types */
589 enum rte_cryptodev_event_type {
590 	RTE_CRYPTODEV_EVENT_UNKNOWN,	/**< unknown event type */
591 	RTE_CRYPTODEV_EVENT_ERROR,	/**< error interrupt event */
592 	RTE_CRYPTODEV_EVENT_MAX		/**< max value of this enum */
593 };
594 
595 /** Crypto device queue pair configuration structure. */
596 struct rte_cryptodev_qp_conf {
597 	uint32_t nb_descriptors; /**< Number of descriptors per queue pair */
598 	struct rte_mempool *mp_session;
599 	/**< The mempool for creating session in sessionless mode */
600 };
601 
602 /**
603  * Function type used for processing crypto ops when enqueue/dequeue burst is
604  * called.
605  *
606  * The callback function is called on enqueue/dequeue burst immediately.
607  *
608  * @param	dev_id		The identifier of the device.
609  * @param	qp_id		The index of the queue pair on which ops are
610  *				enqueued/dequeued. The value must be in the
611  *				range [0, nb_queue_pairs - 1] previously
612  *				supplied to *rte_cryptodev_configure*.
613  * @param	ops		The address of an array of *nb_ops* pointers
614  *				to *rte_crypto_op* structures which contain
615  *				the crypto operations to be processed.
616  * @param	nb_ops		The number of operations to process.
617  * @param	user_param	The arbitrary user parameter passed in by the
618  *				application when the callback was originally
619  *				registered.
620  * @return			The number of ops to be enqueued to the
621  *				crypto device.
622  */
623 typedef uint16_t (*rte_cryptodev_callback_fn)(uint16_t dev_id, uint16_t qp_id,
624 		struct rte_crypto_op **ops, uint16_t nb_ops, void *user_param);
625 
626 /**
627  * Typedef for application callback function to be registered by application
628  * software for notification of device events
629  *
630  * @param	dev_id	Crypto device identifier
631  * @param	event	Crypto device event to register for notification of.
632  * @param	cb_arg	User specified parameter to be passed as to passed to
633  *			users callback function.
634  */
635 typedef void (*rte_cryptodev_cb_fn)(uint8_t dev_id,
636 		enum rte_cryptodev_event_type event, void *cb_arg);
637 
638 
639 /** Crypto Device statistics */
640 struct rte_cryptodev_stats {
641 	uint64_t enqueued_count;
642 	/**< Count of all operations enqueued */
643 	uint64_t dequeued_count;
644 	/**< Count of all operations dequeued */
645 
646 	uint64_t enqueue_err_count;
647 	/**< Total error count on operations enqueued */
648 	uint64_t dequeue_err_count;
649 	/**< Total error count on operations dequeued */
650 };
651 
652 #define RTE_CRYPTODEV_NAME_MAX_LEN	(64)
653 /**< Max length of name of crypto PMD */
654 
655 /**
656  * Get the device identifier for the named crypto device.
657  *
658  * @param	name	device name to select the device structure.
659  *
660  * @return
661  *   - Returns crypto device identifier on success.
662  *   - Return -1 on failure to find named crypto device.
663  */
664 extern int
665 rte_cryptodev_get_dev_id(const char *name);
666 
667 /**
668  * Get the crypto device name given a device identifier.
669  *
670  * @param dev_id
671  *   The identifier of the device
672  *
673  * @return
674  *   - Returns crypto device name.
675  *   - Returns NULL if crypto device is not present.
676  */
677 extern const char *
678 rte_cryptodev_name_get(uint8_t dev_id);
679 
680 /**
681  * Get the total number of crypto devices that have been successfully
682  * initialised.
683  *
684  * @return
685  *   - The total number of usable crypto devices.
686  */
687 extern uint8_t
688 rte_cryptodev_count(void);
689 
690 /**
691  * Get number of crypto device defined type.
692  *
693  * @param	driver_id	driver identifier.
694  *
695  * @return
696  *   Returns number of crypto device.
697  */
698 extern uint8_t
699 rte_cryptodev_device_count_by_driver(uint8_t driver_id);
700 
701 /**
702  * Get number and identifiers of attached crypto devices that
703  * use the same crypto driver.
704  *
705  * @param	driver_name	driver name.
706  * @param	devices		output devices identifiers.
707  * @param	nb_devices	maximal number of devices.
708  *
709  * @return
710  *   Returns number of attached crypto device.
711  */
712 uint8_t
713 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
714 		uint8_t nb_devices);
715 /*
716  * Return the NUMA socket to which a device is connected
717  *
718  * @param dev_id
719  *   The identifier of the device
720  * @return
721  *   The NUMA socket id to which the device is connected or
722  *   a default of zero if the socket could not be determined.
723  *   -1 if returned is the dev_id value is out of range.
724  */
725 extern int
726 rte_cryptodev_socket_id(uint8_t dev_id);
727 
728 /** Crypto device configuration structure */
729 struct rte_cryptodev_config {
730 	int socket_id;			/**< Socket to allocate resources on */
731 	uint16_t nb_queue_pairs;
732 	/**< Number of queue pairs to configure on device */
733 	uint64_t ff_disable;
734 	/**< Feature flags to be disabled. Only the following features are
735 	 * allowed to be disabled,
736 	 *  - RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO
737 	 *  - RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO
738 	 *  - RTE_CRYTPODEV_FF_SECURITY
739 	 */
740 };
741 
742 /**
743  * Configure a device.
744  *
745  * This function must be invoked first before any other function in the
746  * API. This function can also be re-invoked when a device is in the
747  * stopped state.
748  *
749  * @param	dev_id		The identifier of the device to configure.
750  * @param	config		The crypto device configuration structure.
751  *
752  * @return
753  *   - 0: Success, device configured.
754  *   - <0: Error code returned by the driver configuration function.
755  */
756 extern int
757 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config);
758 
759 /**
760  * Start an device.
761  *
762  * The device start step is the last one and consists of setting the configured
763  * offload features and in starting the transmit and the receive units of the
764  * device.
765  * On success, all basic functions exported by the API (link status,
766  * receive/transmit, and so on) can be invoked.
767  *
768  * @param dev_id
769  *   The identifier of the device.
770  * @return
771  *   - 0: Success, device started.
772  *   - <0: Error code of the driver device start function.
773  */
774 extern int
775 rte_cryptodev_start(uint8_t dev_id);
776 
777 /**
778  * Stop an device. The device can be restarted with a call to
779  * rte_cryptodev_start()
780  *
781  * @param	dev_id		The identifier of the device.
782  */
783 extern void
784 rte_cryptodev_stop(uint8_t dev_id);
785 
786 /**
787  * Close an device. The device cannot be restarted!
788  *
789  * @param	dev_id		The identifier of the device.
790  *
791  * @return
792  *  - 0 on successfully closing device
793  *  - <0 on failure to close device
794  */
795 extern int
796 rte_cryptodev_close(uint8_t dev_id);
797 
798 /**
799  * Allocate and set up a receive queue pair for a device.
800  *
801  *
802  * @param	dev_id		The identifier of the device.
803  * @param	queue_pair_id	The index of the queue pairs to set up. The
804  *				value must be in the range [0, nb_queue_pair
805  *				- 1] previously supplied to
806  *				rte_cryptodev_configure().
807  * @param	qp_conf		The pointer to the configuration data to be
808  *				used for the queue pair.
809  * @param	socket_id	The *socket_id* argument is the socket
810  *				identifier in case of NUMA. The value can be
811  *				*SOCKET_ID_ANY* if there is no NUMA constraint
812  *				for the DMA memory allocated for the receive
813  *				queue pair.
814  *
815  * @return
816  *   - 0: Success, queue pair correctly set up.
817  *   - <0: Queue pair configuration failed
818  */
819 extern int
820 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
821 		const struct rte_cryptodev_qp_conf *qp_conf, int socket_id);
822 
823 /**
824  * Get the status of queue pairs setup on a specific crypto device
825  *
826  * @param	dev_id		Crypto device identifier.
827  * @param	queue_pair_id	The index of the queue pairs to set up. The
828  *				value must be in the range [0, nb_queue_pair
829  *				- 1] previously supplied to
830  *				rte_cryptodev_configure().
831  * @return
832  *   - 0: qp was not configured
833  *	 - 1: qp was configured
834  *	 - -EINVAL: device was not configured
835  */
836 __rte_experimental
837 int
838 rte_cryptodev_get_qp_status(uint8_t dev_id, uint16_t queue_pair_id);
839 
840 /**
841  * Get the number of queue pairs on a specific crypto device
842  *
843  * @param	dev_id		Crypto device identifier.
844  * @return
845  *   - The number of configured queue pairs.
846  */
847 extern uint16_t
848 rte_cryptodev_queue_pair_count(uint8_t dev_id);
849 
850 
851 /**
852  * Retrieve the general I/O statistics of a device.
853  *
854  * @param	dev_id		The identifier of the device.
855  * @param	stats		A pointer to a structure of type
856  *				*rte_cryptodev_stats* to be filled with the
857  *				values of device counters.
858  * @return
859  *   - Zero if successful.
860  *   - Non-zero otherwise.
861  */
862 extern int
863 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats);
864 
865 /**
866  * Reset the general I/O statistics of a device.
867  *
868  * @param	dev_id		The identifier of the device.
869  */
870 extern void
871 rte_cryptodev_stats_reset(uint8_t dev_id);
872 
873 /**
874  * Retrieve the contextual information of a device.
875  *
876  * @param	dev_id		The identifier of the device.
877  * @param	dev_info	A pointer to a structure of type
878  *				*rte_cryptodev_info* to be filled with the
879  *				contextual information of the device.
880  *
881  * @note The capabilities field of dev_info is set to point to the first
882  * element of an array of struct rte_cryptodev_capabilities. The element after
883  * the last valid element has it's op field set to
884  * RTE_CRYPTO_OP_TYPE_UNDEFINED.
885  */
886 extern void
887 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info);
888 
889 
890 /**
891  * Register a callback function for specific device id.
892  *
893  * @param	dev_id		Device id.
894  * @param	event		Event interested.
895  * @param	cb_fn		User supplied callback function to be called.
896  * @param	cb_arg		Pointer to the parameters for the registered
897  *				callback.
898  *
899  * @return
900  *  - On success, zero.
901  *  - On failure, a negative value.
902  */
903 extern int
904 rte_cryptodev_callback_register(uint8_t dev_id,
905 		enum rte_cryptodev_event_type event,
906 		rte_cryptodev_cb_fn cb_fn, void *cb_arg);
907 
908 /**
909  * Unregister a callback function for specific device id.
910  *
911  * @param	dev_id		The device identifier.
912  * @param	event		Event interested.
913  * @param	cb_fn		User supplied callback function to be called.
914  * @param	cb_arg		Pointer to the parameters for the registered
915  *				callback.
916  *
917  * @return
918  *  - On success, zero.
919  *  - On failure, a negative value.
920  */
921 extern int
922 rte_cryptodev_callback_unregister(uint8_t dev_id,
923 		enum rte_cryptodev_event_type event,
924 		rte_cryptodev_cb_fn cb_fn, void *cb_arg);
925 
926 struct rte_cryptodev_callback;
927 
928 /** Structure to keep track of registered callbacks */
929 RTE_TAILQ_HEAD(rte_cryptodev_cb_list, rte_cryptodev_callback);
930 
931 /**
932  * Structure used to hold information about the callbacks to be called for a
933  * queue pair on enqueue/dequeue.
934  */
935 struct rte_cryptodev_cb {
936 	struct rte_cryptodev_cb *next;
937 	/**< Pointer to next callback */
938 	rte_cryptodev_callback_fn fn;
939 	/**< Pointer to callback function */
940 	void *arg;
941 	/**< Pointer to argument */
942 };
943 
944 /**
945  * @internal
946  * Structure used to hold information about the RCU for a queue pair.
947  */
948 struct rte_cryptodev_cb_rcu {
949 	struct rte_cryptodev_cb *next;
950 	/**< Pointer to next callback */
951 	struct rte_rcu_qsbr *qsbr;
952 	/**< RCU QSBR variable per queue pair */
953 };
954 
955 void *
956 rte_cryptodev_get_sec_ctx(uint8_t dev_id);
957 
958 /**
959  * Create a symmetric session mempool.
960  *
961  * @param name
962  *   The unique mempool name.
963  * @param nb_elts
964  *   The number of elements in the mempool.
965  * @param elt_size
966  *   The size of the element. This value will be ignored if it is smaller than
967  *   the minimum session header size required for the system. For the user who
968  *   want to use the same mempool for sym session and session private data it
969  *   can be the maximum value of all existing devices' private data and session
970  *   header sizes.
971  * @param cache_size
972  *   The number of per-lcore cache elements
973  * @param priv_size
974  *   The private data size of each session.
975  * @param socket_id
976  *   The *socket_id* argument is the socket identifier in the case of
977  *   NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
978  *   constraint for the reserved zone.
979  *
980  * @return
981  *  - On success return size of the session
982  *  - On failure returns 0
983  */
984 __rte_experimental
985 struct rte_mempool *
986 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
987 	uint32_t elt_size, uint32_t cache_size, uint16_t priv_size,
988 	int socket_id);
989 
990 
991 /**
992  * Create an asymmetric session mempool.
993  *
994  * @param name
995  *   The unique mempool name.
996  * @param nb_elts
997  *   The number of elements in the mempool.
998  * @param cache_size
999  *   The number of per-lcore cache elements
1000  * @param user_data_size
1001  *   The size of user data to be placed after session private data.
1002  * @param socket_id
1003  *   The *socket_id* argument is the socket identifier in the case of
1004  *   NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
1005  *   constraint for the reserved zone.
1006  *
1007  * @return
1008  *  - On success return mempool
1009  *  - On failure returns NULL
1010  */
1011 __rte_experimental
1012 struct rte_mempool *
1013 rte_cryptodev_asym_session_pool_create(const char *name, uint32_t nb_elts,
1014 	uint32_t cache_size, uint16_t user_data_size, int socket_id);
1015 
1016 /**
1017  * Create symmetric crypto session and fill out private data for the device id,
1018  * based on its device type.
1019  *
1020  * @param   dev_id   ID of device that we want the session to be used on
1021  * @param   xforms   Symmetric crypto transform operations to apply on flow
1022  *                   processed with this session
1023  * @param   mp       Mempool where the private data is allocated.
1024  *
1025  * @return
1026  *  - On success return pointer to sym-session.
1027  *  - On failure returns NULL.
1028  */
1029 void *
1030 rte_cryptodev_sym_session_create(uint8_t dev_id,
1031 		struct rte_crypto_sym_xform *xforms,
1032 		struct rte_mempool *mp);
1033 /**
1034  * Create and initialise an asymmetric crypto session structure.
1035  * Calls the PMD to configure the private session data.
1036  *
1037  * @param   dev_id   ID of device that we want the session to be used on
1038  * @param   xforms   Asymmetric crypto transform operations to apply on flow
1039  *                   processed with this session
1040  * @param   mp       mempool to allocate asymmetric session
1041  *                   objects from
1042  * @param   session  void ** for session to be used
1043  *
1044  * @return
1045  *  - 0 on success.
1046  *  - -EINVAL on invalid arguments.
1047  *  - -ENOMEM on memory error for session allocation.
1048  *  - -ENOTSUP if device doesn't support session configuration.
1049  */
1050 __rte_experimental
1051 int
1052 rte_cryptodev_asym_session_create(uint8_t dev_id,
1053 		struct rte_crypto_asym_xform *xforms, struct rte_mempool *mp,
1054 		void **session);
1055 
1056 /**
1057  * Frees session for the device id and returning it to its mempool.
1058  * It is the application's responsibility to ensure that the session
1059  * is not still in-flight operations using it.
1060  *
1061  * @param   dev_id   ID of device that uses the session.
1062  * @param   sess     Session header to be freed.
1063  *
1064  * @return
1065  *  - 0 if successful.
1066  *  - -EINVAL if session is NULL or the mismatched device ids.
1067  */
1068 int
1069 rte_cryptodev_sym_session_free(uint8_t dev_id,
1070 	void *sess);
1071 
1072 /**
1073  * Clears and frees asymmetric crypto session header and private data,
1074  * returning it to its original mempool.
1075  *
1076  * @param   dev_id   ID of device that uses the asymmetric session.
1077  * @param   sess     Session header to be freed.
1078  *
1079  * @return
1080  *  - 0 if successful.
1081  *  - -EINVAL if device is invalid or session is NULL.
1082  */
1083 __rte_experimental
1084 int
1085 rte_cryptodev_asym_session_free(uint8_t dev_id, void *sess);
1086 
1087 /**
1088  * Get the size of the asymmetric session header.
1089  *
1090  * @return
1091  *   Size of the asymmetric header session.
1092  */
1093 __rte_experimental
1094 unsigned int
1095 rte_cryptodev_asym_get_header_session_size(void);
1096 
1097 /**
1098  * Get the size of the private symmetric session data
1099  * for a device.
1100  *
1101  * @param	dev_id		The device identifier.
1102  *
1103  * @return
1104  *   - Size of the private data, if successful
1105  *   - 0 if device is invalid or does not have private
1106  *   symmetric session
1107  */
1108 unsigned int
1109 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id);
1110 
1111 /**
1112  * Get the size of the private data for asymmetric session
1113  * on device
1114  *
1115  * @param	dev_id		The device identifier.
1116  *
1117  * @return
1118  *   - Size of the asymmetric private data, if successful
1119  *   - 0 if device is invalid or does not have private session
1120  */
1121 __rte_experimental
1122 unsigned int
1123 rte_cryptodev_asym_get_private_session_size(uint8_t dev_id);
1124 
1125 /**
1126  * Validate if the crypto device index is valid attached crypto device.
1127  *
1128  * @param	dev_id	Crypto device index.
1129  *
1130  * @return
1131  *   - If the device index is valid (1) or not (0).
1132  */
1133 unsigned int
1134 rte_cryptodev_is_valid_dev(uint8_t dev_id);
1135 
1136 /**
1137  * Provide driver identifier.
1138  *
1139  * @param name
1140  *   The pointer to a driver name.
1141  * @return
1142  *  The driver type identifier or -1 if no driver found
1143  */
1144 int rte_cryptodev_driver_id_get(const char *name);
1145 
1146 /**
1147  * Provide driver name.
1148  *
1149  * @param driver_id
1150  *   The driver identifier.
1151  * @return
1152  *  The driver name or null if no driver found
1153  */
1154 const char *rte_cryptodev_driver_name_get(uint8_t driver_id);
1155 
1156 /**
1157  * Store user data in a session.
1158  *
1159  * @param	sess		Session pointer allocated by
1160  *				*rte_cryptodev_sym_session_create*.
1161  * @param	data		Pointer to the user data.
1162  * @param	size		Size of the user data.
1163  *
1164  * @return
1165  *  - On success, zero.
1166  *  - On failure, a negative value.
1167  */
1168 __rte_experimental
1169 int
1170 rte_cryptodev_sym_session_set_user_data(void *sess,
1171 					void *data,
1172 					uint16_t size);
1173 
1174 #define CRYPTO_SESS_OPAQUE_DATA_OFF 0
1175 /**
1176  * Get opaque data from session handle
1177  */
1178 static inline uint64_t
1179 rte_cryptodev_sym_session_opaque_data_get(void *sess)
1180 {
1181 	return *((uint64_t *)sess + CRYPTO_SESS_OPAQUE_DATA_OFF);
1182 }
1183 
1184 /**
1185  * Set opaque data in session handle
1186  */
1187 static inline void
1188 rte_cryptodev_sym_session_opaque_data_set(void *sess, uint64_t opaque)
1189 {
1190 	uint64_t *data;
1191 	data = (((uint64_t *)sess) + CRYPTO_SESS_OPAQUE_DATA_OFF);
1192 	*data = opaque;
1193 }
1194 
1195 /**
1196  * Get user data stored in a session.
1197  *
1198  * @param	sess		Session pointer allocated by
1199  *				*rte_cryptodev_sym_session_create*.
1200  *
1201  * @return
1202  *  - On success return pointer to user data.
1203  *  - On failure returns NULL.
1204  */
1205 __rte_experimental
1206 void *
1207 rte_cryptodev_sym_session_get_user_data(void *sess);
1208 
1209 /**
1210  * Store user data in an asymmetric session.
1211  *
1212  * @param	sess		Session pointer allocated by
1213  *				*rte_cryptodev_asym_session_create*.
1214  * @param	data		Pointer to the user data.
1215  * @param	size		Size of the user data.
1216  *
1217  * @return
1218  *  - On success, zero.
1219  *  - -EINVAL if the session pointer is invalid.
1220  *  - -ENOMEM if the available user data size is smaller than the size parameter.
1221  */
1222 __rte_experimental
1223 int
1224 rte_cryptodev_asym_session_set_user_data(void *sess, void *data, uint16_t size);
1225 
1226 /**
1227  * Get user data stored in an asymmetric session.
1228  *
1229  * @param	sess		Session pointer allocated by
1230  *				*rte_cryptodev_asym_session_create*.
1231  *
1232  * @return
1233  *  - On success return pointer to user data.
1234  *  - On failure returns NULL.
1235  */
1236 __rte_experimental
1237 void *
1238 rte_cryptodev_asym_session_get_user_data(void *sess);
1239 
1240 /**
1241  * Perform actual crypto processing (encrypt/digest or auth/decrypt)
1242  * on user provided data.
1243  *
1244  * @param	dev_id	The device identifier.
1245  * @param	sess	Cryptodev session structure
1246  * @param	ofs	Start and stop offsets for auth and cipher operations
1247  * @param	vec	Vectorized operation descriptor
1248  *
1249  * @return
1250  *  - Returns number of successfully processed packets.
1251  */
1252 __rte_experimental
1253 uint32_t
1254 rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id,
1255 	void *sess, union rte_crypto_sym_ofs ofs,
1256 	struct rte_crypto_sym_vec *vec);
1257 
1258 /**
1259  * Get the size of the raw data-path context buffer.
1260  *
1261  * @param	dev_id		The device identifier.
1262  *
1263  * @return
1264  *   - If the device supports raw data-path APIs, return the context size.
1265  *   - If the device does not support the APIs, return -1.
1266  */
1267 __rte_experimental
1268 int
1269 rte_cryptodev_get_raw_dp_ctx_size(uint8_t dev_id);
1270 
1271 /**
1272  * Set session event meta data
1273  *
1274  * @param	dev_id		The device identifier.
1275  * @param	sess            Crypto or security session.
1276  * @param	op_type         Operation type.
1277  * @param	sess_type       Session type.
1278  * @param	ev_mdata	Pointer to the event crypto meta data
1279  *				(aka *union rte_event_crypto_metadata*)
1280  * @param	size            Size of ev_mdata.
1281  *
1282  * @return
1283  *  - On success, zero.
1284  *  - On failure, a negative value.
1285  */
1286 __rte_experimental
1287 int
1288 rte_cryptodev_session_event_mdata_set(uint8_t dev_id, void *sess,
1289 	enum rte_crypto_op_type op_type,
1290 	enum rte_crypto_op_sess_type sess_type,
1291 	void *ev_mdata, uint16_t size);
1292 
1293 /**
1294  * Union of different crypto session types, including session-less xform
1295  * pointer.
1296  */
1297 union rte_cryptodev_session_ctx {void *crypto_sess;
1298 	struct rte_crypto_sym_xform *xform;
1299 	struct rte_security_session *sec_sess;
1300 };
1301 
1302 /**
1303  * Enqueue a vectorized operation descriptor into the device queue but the
1304  * driver may or may not start processing until rte_cryptodev_raw_enqueue_done()
1305  * is called.
1306  *
1307  * @param	qp		Driver specific queue pair data.
1308  * @param	drv_ctx		Driver specific context data.
1309  * @param	vec		Vectorized operation descriptor.
1310  * @param	ofs		Start and stop offsets for auth and cipher
1311  *				operations.
1312  * @param	user_data	The array of user data for dequeue later.
1313  * @param	enqueue_status	Driver written value to specify the
1314  *				enqueue status. Possible values:
1315  *				- 1: The number of operations returned are
1316  *				     enqueued successfully.
1317  *				- 0: The number of operations returned are
1318  *				     cached into the queue but are not processed
1319  *				     until rte_cryptodev_raw_enqueue_done() is
1320  *				     called.
1321  *				- negative integer: Error occurred.
1322  * @return
1323  *   - The number of operations in the descriptor successfully enqueued or
1324  *     cached into the queue but not enqueued yet, depends on the
1325  *     "enqueue_status" value.
1326  */
1327 typedef uint32_t (*cryptodev_sym_raw_enqueue_burst_t)(
1328 	void *qp, uint8_t *drv_ctx, struct rte_crypto_sym_vec *vec,
1329 	union rte_crypto_sym_ofs ofs, void *user_data[], int *enqueue_status);
1330 
1331 /**
1332  * Enqueue single raw data vector into the device queue but the driver may or
1333  * may not start processing until rte_cryptodev_raw_enqueue_done() is called.
1334  *
1335  * @param	qp		Driver specific queue pair data.
1336  * @param	drv_ctx		Driver specific context data.
1337  * @param	data_vec	The buffer data vector.
1338  * @param	n_data_vecs	Number of buffer data vectors.
1339  * @param	ofs		Start and stop offsets for auth and cipher
1340  *				operations.
1341  * @param	iv		IV virtual and IOVA addresses
1342  * @param	digest		digest virtual and IOVA addresses
1343  * @param	aad_or_auth_iv	AAD or auth IV virtual and IOVA addresses,
1344  *				depends on the algorithm used.
1345  * @param	user_data	The user data.
1346  * @return
1347  *   - 1: The data vector is enqueued successfully.
1348  *   - 0: The data vector is cached into the queue but is not processed
1349  *        until rte_cryptodev_raw_enqueue_done() is called.
1350  *   - negative integer: failure.
1351  */
1352 typedef int (*cryptodev_sym_raw_enqueue_t)(
1353 	void *qp, uint8_t *drv_ctx, struct rte_crypto_vec *data_vec,
1354 	uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs,
1355 	struct rte_crypto_va_iova_ptr *iv,
1356 	struct rte_crypto_va_iova_ptr *digest,
1357 	struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
1358 	void *user_data);
1359 
1360 /**
1361  * Inform the cryptodev queue pair to start processing or finish dequeuing all
1362  * enqueued/dequeued operations.
1363  *
1364  * @param	qp		Driver specific queue pair data.
1365  * @param	drv_ctx		Driver specific context data.
1366  * @param	n		The total number of processed operations.
1367  * @return
1368  *   - On success return 0.
1369  *   - On failure return negative integer.
1370  */
1371 typedef int (*cryptodev_sym_raw_operation_done_t)(void *qp, uint8_t *drv_ctx,
1372 	uint32_t n);
1373 
1374 /**
1375  * Typedef that the user provided for the driver to get the dequeue count.
1376  * The function may return a fixed number or the number parsed from the user
1377  * data stored in the first processed operation.
1378  *
1379  * @param	user_data	Dequeued user data.
1380  * @return
1381  *  - The number of operations to be dequeued.
1382  **/
1383 typedef uint32_t (*rte_cryptodev_raw_get_dequeue_count_t)(void *user_data);
1384 
1385 /**
1386  * Typedef that the user provided to deal with post dequeue operation, such
1387  * as filling status.
1388  *
1389  * @param	user_data	Dequeued user data.
1390  * @param	index		Index number of the processed descriptor.
1391  * @param	is_op_success	Operation status provided by the driver.
1392  **/
1393 typedef void (*rte_cryptodev_raw_post_dequeue_t)(void *user_data,
1394 	uint32_t index, uint8_t is_op_success);
1395 
1396 /**
1397  * Dequeue a burst of symmetric crypto processing.
1398  *
1399  * @param	qp			Driver specific queue pair data.
1400  * @param	drv_ctx			Driver specific context data.
1401  * @param	get_dequeue_count	User provided callback function to
1402  *					obtain dequeue operation count.
1403  * @param	max_nb_to_dequeue	When get_dequeue_count is NULL this
1404  *					value is used to pass the maximum
1405  *					number of operations to be dequeued.
1406  * @param	post_dequeue		User provided callback function to
1407  *					post-process a dequeued operation.
1408  * @param	out_user_data		User data pointer array to be retrieve
1409  *					from device queue. In case of
1410  *					*is_user_data_array* is set there
1411  *					should be enough room to store all
1412  *					user data.
1413  * @param	is_user_data_array	Set 1 if every dequeued user data will
1414  *					be written into out_user_data array.
1415  *					Set 0 if only the first user data will
1416  *					be written into out_user_data array.
1417  * @param	n_success		Driver written value to specific the
1418  *					total successful operations count.
1419  * @param	dequeue_status		Driver written value to specify the
1420  *					dequeue status. Possible values:
1421  *					- 1: Successfully dequeued the number
1422  *					     of operations returned. The user
1423  *					     data previously set during enqueue
1424  *					     is stored in the "out_user_data".
1425  *					- 0: The number of operations returned
1426  *					     are completed and the user data is
1427  *					     stored in the "out_user_data", but
1428  *					     they are not freed from the queue
1429  *					     until
1430  *					     rte_cryptodev_raw_dequeue_done()
1431  *					     is called.
1432  *					- negative integer: Error occurred.
1433  * @return
1434  *   - The number of operations dequeued or completed but not freed from the
1435  *     queue, depends on "dequeue_status" value.
1436  */
1437 typedef uint32_t (*cryptodev_sym_raw_dequeue_burst_t)(void *qp,
1438 	uint8_t *drv_ctx,
1439 	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
1440 	uint32_t max_nb_to_dequeue,
1441 	rte_cryptodev_raw_post_dequeue_t post_dequeue,
1442 	void **out_user_data, uint8_t is_user_data_array,
1443 	uint32_t *n_success, int *dequeue_status);
1444 
1445 /**
1446  * Dequeue a symmetric crypto processing.
1447  *
1448  * @param	qp			Driver specific queue pair data.
1449  * @param	drv_ctx			Driver specific context data.
1450  * @param	dequeue_status		Driver written value to specify the
1451  *					dequeue status. Possible values:
1452  *					- 1: Successfully dequeued a operation.
1453  *					     The user data is returned.
1454  *					- 0: The first operation in the queue
1455  *					     is completed and the user data
1456  *					     previously set during enqueue is
1457  *					     returned, but it is not freed from
1458  *					     the queue until
1459  *					     rte_cryptodev_raw_dequeue_done() is
1460  *					     called.
1461  *					- negative integer: Error occurred.
1462  * @param	op_status		Driver written value to specify
1463  *					operation status.
1464  * @return
1465  *   - The user data pointer retrieved from device queue or NULL if no
1466  *     operation is ready for dequeue.
1467  */
1468 typedef void * (*cryptodev_sym_raw_dequeue_t)(
1469 		void *qp, uint8_t *drv_ctx, int *dequeue_status,
1470 		enum rte_crypto_op_status *op_status);
1471 
1472 /**
1473  * Context data for raw data-path API crypto process. The buffer of this
1474  * structure is to be allocated by the user application with the size equal
1475  * or bigger than rte_cryptodev_get_raw_dp_ctx_size() returned value.
1476  */
1477 struct rte_crypto_raw_dp_ctx {
1478 	void *qp_data;
1479 
1480 	cryptodev_sym_raw_enqueue_t enqueue;
1481 	cryptodev_sym_raw_enqueue_burst_t enqueue_burst;
1482 	cryptodev_sym_raw_operation_done_t enqueue_done;
1483 	cryptodev_sym_raw_dequeue_t dequeue;
1484 	cryptodev_sym_raw_dequeue_burst_t dequeue_burst;
1485 	cryptodev_sym_raw_operation_done_t dequeue_done;
1486 
1487 	/* Driver specific context data */
1488 	__extension__ uint8_t drv_ctx_data[];
1489 };
1490 
1491 /**
1492  * Configure raw data-path context data.
1493  *
1494  * NOTE:
1495  * After the context data is configured, the user should call
1496  * rte_cryptodev_raw_attach_session() before using it in
1497  * rte_cryptodev_raw_enqueue/dequeue function call.
1498  *
1499  * @param	dev_id		The device identifier.
1500  * @param	qp_id		The index of the queue pair from which to
1501  *				retrieve processed packets. The value must be
1502  *				in the range [0, nb_queue_pair - 1] previously
1503  *				supplied to rte_cryptodev_configure().
1504  * @param	ctx		The raw data-path context data.
1505  * @param	sess_type	session type.
1506  * @param	session_ctx	Session context data.
1507  * @param	is_update	Set 0 if it is to initialize the ctx.
1508  *				Set 1 if ctx is initialized and only to update
1509  *				session context data.
1510  * @return
1511  *   - On success return 0.
1512  *   - On failure return negative integer.
1513  */
1514 __rte_experimental
1515 int
1516 rte_cryptodev_configure_raw_dp_ctx(uint8_t dev_id, uint16_t qp_id,
1517 	struct rte_crypto_raw_dp_ctx *ctx,
1518 	enum rte_crypto_op_sess_type sess_type,
1519 	union rte_cryptodev_session_ctx session_ctx,
1520 	uint8_t is_update);
1521 
1522 /**
1523  * Enqueue a vectorized operation descriptor into the device queue but the
1524  * driver may or may not start processing until rte_cryptodev_raw_enqueue_done()
1525  * is called.
1526  *
1527  * @param	ctx		The initialized raw data-path context data.
1528  * @param	vec		Vectorized operation descriptor.
1529  * @param	ofs		Start and stop offsets for auth and cipher
1530  *				operations.
1531  * @param	user_data	The array of user data for dequeue later.
1532  * @param	enqueue_status	Driver written value to specify the
1533  *				enqueue status. Possible values:
1534  *				- 1: The number of operations returned are
1535  *				     enqueued successfully.
1536  *				- 0: The number of operations returned are
1537  *				     cached into the queue but are not processed
1538  *				     until rte_cryptodev_raw_enqueue_done() is
1539  *				     called.
1540  *				- negative integer: Error occurred.
1541  * @return
1542  *   - The number of operations in the descriptor successfully enqueued or
1543  *     cached into the queue but not enqueued yet, depends on the
1544  *     "enqueue_status" value.
1545  */
1546 __rte_experimental
1547 uint32_t
1548 rte_cryptodev_raw_enqueue_burst(struct rte_crypto_raw_dp_ctx *ctx,
1549 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
1550 	void **user_data, int *enqueue_status);
1551 
1552 /**
1553  * Enqueue single raw data vector into the device queue but the driver may or
1554  * may not start processing until rte_cryptodev_raw_enqueue_done() is called.
1555  *
1556  * @param	ctx		The initialized raw data-path context data.
1557  * @param	data_vec	The buffer data vector.
1558  * @param	n_data_vecs	Number of buffer data vectors.
1559  * @param	ofs		Start and stop offsets for auth and cipher
1560  *				operations.
1561  * @param	iv		IV virtual and IOVA addresses
1562  * @param	digest		digest virtual and IOVA addresses
1563  * @param	aad_or_auth_iv	AAD or auth IV virtual and IOVA addresses,
1564  *				depends on the algorithm used.
1565  * @param	user_data	The user data.
1566  * @return
1567  *   - 1: The data vector is enqueued successfully.
1568  *   - 0: The data vector is cached into the queue but is not processed
1569  *        until rte_cryptodev_raw_enqueue_done() is called.
1570  *   - negative integer: failure.
1571  */
1572 __rte_experimental
1573 static __rte_always_inline int
1574 rte_cryptodev_raw_enqueue(struct rte_crypto_raw_dp_ctx *ctx,
1575 	struct rte_crypto_vec *data_vec, uint16_t n_data_vecs,
1576 	union rte_crypto_sym_ofs ofs,
1577 	struct rte_crypto_va_iova_ptr *iv,
1578 	struct rte_crypto_va_iova_ptr *digest,
1579 	struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
1580 	void *user_data)
1581 {
1582 	return (*ctx->enqueue)(ctx->qp_data, ctx->drv_ctx_data, data_vec,
1583 		n_data_vecs, ofs, iv, digest, aad_or_auth_iv, user_data);
1584 }
1585 
1586 /**
1587  * Start processing all enqueued operations from last
1588  * rte_cryptodev_configure_raw_dp_ctx() call.
1589  *
1590  * @param	ctx	The initialized raw data-path context data.
1591  * @param	n	The number of operations cached.
1592  * @return
1593  *   - On success return 0.
1594  *   - On failure return negative integer.
1595  */
1596 __rte_experimental
1597 int
1598 rte_cryptodev_raw_enqueue_done(struct rte_crypto_raw_dp_ctx *ctx,
1599 		uint32_t n);
1600 
1601 /**
1602  * Dequeue a burst of symmetric crypto processing.
1603  *
1604  * @param	ctx			The initialized raw data-path context
1605  *					data.
1606  * @param	get_dequeue_count	User provided callback function to
1607  *					obtain dequeue operation count.
1608  * @param	max_nb_to_dequeue	When get_dequeue_count is NULL this
1609  *					value is used to pass the maximum
1610  *					number of operations to be dequeued.
1611  * @param	post_dequeue		User provided callback function to
1612  *					post-process a dequeued operation.
1613  * @param	out_user_data		User data pointer array to be retrieve
1614  *					from device queue. In case of
1615  *					*is_user_data_array* is set there
1616  *					should be enough room to store all
1617  *					user data.
1618  * @param	is_user_data_array	Set 1 if every dequeued user data will
1619  *					be written into out_user_data array.
1620  *					Set 0 if only the first user data will
1621  *					be written into out_user_data array.
1622  * @param	n_success		Driver written value to specific the
1623  *					total successful operations count.
1624  * @param	dequeue_status		Driver written value to specify the
1625  *					dequeue status. Possible values:
1626  *					- 1: Successfully dequeued the number
1627  *					     of operations returned. The user
1628  *					     data previously set during enqueue
1629  *					     is stored in the "out_user_data".
1630  *					- 0: The number of operations returned
1631  *					     are completed and the user data is
1632  *					     stored in the "out_user_data", but
1633  *					     they are not freed from the queue
1634  *					     until
1635  *					     rte_cryptodev_raw_dequeue_done()
1636  *					     is called.
1637  *					- negative integer: Error occurred.
1638  * @return
1639  *   - The number of operations dequeued or completed but not freed from the
1640  *     queue, depends on "dequeue_status" value.
1641  */
1642 __rte_experimental
1643 uint32_t
1644 rte_cryptodev_raw_dequeue_burst(struct rte_crypto_raw_dp_ctx *ctx,
1645 	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
1646 	uint32_t max_nb_to_dequeue,
1647 	rte_cryptodev_raw_post_dequeue_t post_dequeue,
1648 	void **out_user_data, uint8_t is_user_data_array,
1649 	uint32_t *n_success, int *dequeue_status);
1650 
1651 /**
1652  * Dequeue a symmetric crypto processing.
1653  *
1654  * @param	ctx			The initialized raw data-path context
1655  *					data.
1656  * @param	dequeue_status		Driver written value to specify the
1657  *					dequeue status. Possible values:
1658  *					- 1: Successfully dequeued a operation.
1659  *					     The user data is returned.
1660  *					- 0: The first operation in the queue
1661  *					     is completed and the user data
1662  *					     previously set during enqueue is
1663  *					     returned, but it is not freed from
1664  *					     the queue until
1665  *					     rte_cryptodev_raw_dequeue_done() is
1666  *					     called.
1667  *					- negative integer: Error occurred.
1668  * @param	op_status		Driver written value to specify
1669  *					operation status.
1670  * @return
1671  *   - The user data pointer retrieved from device queue or NULL if no
1672  *     operation is ready for dequeue.
1673  */
1674 __rte_experimental
1675 static __rte_always_inline void *
1676 rte_cryptodev_raw_dequeue(struct rte_crypto_raw_dp_ctx *ctx,
1677 		int *dequeue_status, enum rte_crypto_op_status *op_status)
1678 {
1679 	return (*ctx->dequeue)(ctx->qp_data, ctx->drv_ctx_data, dequeue_status,
1680 			op_status);
1681 }
1682 
1683 /**
1684  * Inform the queue pair dequeue operations is finished.
1685  *
1686  * @param	ctx	The initialized raw data-path context data.
1687  * @param	n	The number of operations.
1688  * @return
1689  *   - On success return 0.
1690  *   - On failure return negative integer.
1691  */
1692 __rte_experimental
1693 int
1694 rte_cryptodev_raw_dequeue_done(struct rte_crypto_raw_dp_ctx *ctx,
1695 		uint32_t n);
1696 
1697 /**
1698  * Add a user callback for a given crypto device and queue pair which will be
1699  * called on crypto ops enqueue.
1700  *
1701  * This API configures a function to be called for each burst of crypto ops
1702  * received on a given crypto device queue pair. The return value is a pointer
1703  * that can be used later to remove the callback using
1704  * rte_cryptodev_remove_enq_callback().
1705  *
1706  * Callbacks registered by application would not survive
1707  * rte_cryptodev_configure() as it reinitializes the callback list.
1708  * It is user responsibility to remove all installed callbacks before
1709  * calling rte_cryptodev_configure() to avoid possible memory leakage.
1710  * Application is expected to call add API after rte_cryptodev_configure().
1711  *
1712  * Multiple functions can be registered per queue pair & they are called
1713  * in the order they were added. The API does not restrict on maximum number
1714  * of callbacks.
1715  *
1716  * @param	dev_id		The identifier of the device.
1717  * @param	qp_id		The index of the queue pair on which ops are
1718  *				to be enqueued for processing. The value
1719  *				must be in the range [0, nb_queue_pairs - 1]
1720  *				previously supplied to
1721  *				*rte_cryptodev_configure*.
1722  * @param	cb_fn		The callback function
1723  * @param	cb_arg		A generic pointer parameter which will be passed
1724  *				to each invocation of the callback function on
1725  *				this crypto device and queue pair.
1726  *
1727  * @return
1728  *  - NULL on error & rte_errno will contain the error code.
1729  *  - On success, a pointer value which can later be used to remove the
1730  *    callback.
1731  */
1732 
1733 __rte_experimental
1734 struct rte_cryptodev_cb *
1735 rte_cryptodev_add_enq_callback(uint8_t dev_id,
1736 			       uint16_t qp_id,
1737 			       rte_cryptodev_callback_fn cb_fn,
1738 			       void *cb_arg);
1739 
1740 /**
1741  * Remove a user callback function for given crypto device and queue pair.
1742  *
1743  * This function is used to remove enqueue callbacks that were added to a
1744  * crypto device queue pair using rte_cryptodev_add_enq_callback().
1745  *
1746  *
1747  *
1748  * @param	dev_id		The identifier of the device.
1749  * @param	qp_id		The index of the queue pair on which ops are
1750  *				to be enqueued. The value must be in the
1751  *				range [0, nb_queue_pairs - 1] previously
1752  *				supplied to *rte_cryptodev_configure*.
1753  * @param	cb		Pointer to user supplied callback created via
1754  *				rte_cryptodev_add_enq_callback().
1755  *
1756  * @return
1757  *   -  0: Success. Callback was removed.
1758  *   - <0: The dev_id or the qp_id is out of range, or the callback
1759  *         is NULL or not found for the crypto device queue pair.
1760  */
1761 
1762 __rte_experimental
1763 int rte_cryptodev_remove_enq_callback(uint8_t dev_id,
1764 				      uint16_t qp_id,
1765 				      struct rte_cryptodev_cb *cb);
1766 
1767 /**
1768  * Add a user callback for a given crypto device and queue pair which will be
1769  * called on crypto ops dequeue.
1770  *
1771  * This API configures a function to be called for each burst of crypto ops
1772  * received on a given crypto device queue pair. The return value is a pointer
1773  * that can be used later to remove the callback using
1774  * rte_cryptodev_remove_deq_callback().
1775  *
1776  * Callbacks registered by application would not survive
1777  * rte_cryptodev_configure() as it reinitializes the callback list.
1778  * It is user responsibility to remove all installed callbacks before
1779  * calling rte_cryptodev_configure() to avoid possible memory leakage.
1780  * Application is expected to call add API after rte_cryptodev_configure().
1781  *
1782  * Multiple functions can be registered per queue pair & they are called
1783  * in the order they were added. The API does not restrict on maximum number
1784  * of callbacks.
1785  *
1786  * @param	dev_id		The identifier of the device.
1787  * @param	qp_id		The index of the queue pair on which ops are
1788  *				to be dequeued. The value must be in the
1789  *				range [0, nb_queue_pairs - 1] previously
1790  *				supplied to *rte_cryptodev_configure*.
1791  * @param	cb_fn		The callback function
1792  * @param	cb_arg		A generic pointer parameter which will be passed
1793  *				to each invocation of the callback function on
1794  *				this crypto device and queue pair.
1795  *
1796  * @return
1797  *   - NULL on error & rte_errno will contain the error code.
1798  *   - On success, a pointer value which can later be used to remove the
1799  *     callback.
1800  */
1801 
1802 __rte_experimental
1803 struct rte_cryptodev_cb *
1804 rte_cryptodev_add_deq_callback(uint8_t dev_id,
1805 			       uint16_t qp_id,
1806 			       rte_cryptodev_callback_fn cb_fn,
1807 			       void *cb_arg);
1808 
1809 /**
1810  * Remove a user callback function for given crypto device and queue pair.
1811  *
1812  * This function is used to remove dequeue callbacks that were added to a
1813  * crypto device queue pair using rte_cryptodev_add_deq_callback().
1814  *
1815  *
1816  *
1817  * @param	dev_id		The identifier of the device.
1818  * @param	qp_id		The index of the queue pair on which ops are
1819  *				to be dequeued. The value must be in the
1820  *				range [0, nb_queue_pairs - 1] previously
1821  *				supplied to *rte_cryptodev_configure*.
1822  * @param	cb		Pointer to user supplied callback created via
1823  *				rte_cryptodev_add_deq_callback().
1824  *
1825  * @return
1826  *   -  0: Success. Callback was removed.
1827  *   - <0: The dev_id or the qp_id is out of range, or the callback
1828  *         is NULL or not found for the crypto device queue pair.
1829  */
1830 __rte_experimental
1831 int rte_cryptodev_remove_deq_callback(uint8_t dev_id,
1832 				      uint16_t qp_id,
1833 				      struct rte_cryptodev_cb *cb);
1834 
1835 #include <rte_cryptodev_core.h>
1836 /**
1837  *
1838  * Dequeue a burst of processed crypto operations from a queue on the crypto
1839  * device. The dequeued operation are stored in *rte_crypto_op* structures
1840  * whose pointers are supplied in the *ops* array.
1841  *
1842  * The rte_cryptodev_dequeue_burst() function returns the number of ops
1843  * actually dequeued, which is the number of *rte_crypto_op* data structures
1844  * effectively supplied into the *ops* array.
1845  *
1846  * A return value equal to *nb_ops* indicates that the queue contained
1847  * at least *nb_ops* operations, and this is likely to signify that other
1848  * processed operations remain in the devices output queue. Applications
1849  * implementing a "retrieve as many processed operations as possible" policy
1850  * can check this specific case and keep invoking the
1851  * rte_cryptodev_dequeue_burst() function until a value less than
1852  * *nb_ops* is returned.
1853  *
1854  * The rte_cryptodev_dequeue_burst() function does not provide any error
1855  * notification to avoid the corresponding overhead.
1856  *
1857  * @param	dev_id		The symmetric crypto device identifier
1858  * @param	qp_id		The index of the queue pair from which to
1859  *				retrieve processed packets. The value must be
1860  *				in the range [0, nb_queue_pair - 1] previously
1861  *				supplied to rte_cryptodev_configure().
1862  * @param	ops		The address of an array of pointers to
1863  *				*rte_crypto_op* structures that must be
1864  *				large enough to store *nb_ops* pointers in it.
1865  * @param	nb_ops		The maximum number of operations to dequeue.
1866  *
1867  * @return
1868  *   - The number of operations actually dequeued, which is the number
1869  *   of pointers to *rte_crypto_op* structures effectively supplied to the
1870  *   *ops* array.
1871  */
1872 static inline uint16_t
1873 rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
1874 		struct rte_crypto_op **ops, uint16_t nb_ops)
1875 {
1876 	const struct rte_crypto_fp_ops *fp_ops;
1877 	void *qp;
1878 
1879 	rte_cryptodev_trace_dequeue_burst(dev_id, qp_id, (void **)ops, nb_ops);
1880 
1881 	fp_ops = &rte_crypto_fp_ops[dev_id];
1882 	qp = fp_ops->qp.data[qp_id];
1883 
1884 	nb_ops = fp_ops->dequeue_burst(qp, ops, nb_ops);
1885 
1886 #ifdef RTE_CRYPTO_CALLBACKS
1887 	if (unlikely(fp_ops->qp.deq_cb != NULL)) {
1888 		struct rte_cryptodev_cb_rcu *list;
1889 		struct rte_cryptodev_cb *cb;
1890 
1891 		/* __ATOMIC_RELEASE memory order was used when the
1892 		 * call back was inserted into the list.
1893 		 * Since there is a clear dependency between loading
1894 		 * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
1895 		 * not required.
1896 		 */
1897 		list = &fp_ops->qp.deq_cb[qp_id];
1898 		rte_rcu_qsbr_thread_online(list->qsbr, 0);
1899 		cb = __atomic_load_n(&list->next, __ATOMIC_RELAXED);
1900 
1901 		while (cb != NULL) {
1902 			nb_ops = cb->fn(dev_id, qp_id, ops, nb_ops,
1903 					cb->arg);
1904 			cb = cb->next;
1905 		};
1906 
1907 		rte_rcu_qsbr_thread_offline(list->qsbr, 0);
1908 	}
1909 #endif
1910 	return nb_ops;
1911 }
1912 
1913 /**
1914  * Enqueue a burst of operations for processing on a crypto device.
1915  *
1916  * The rte_cryptodev_enqueue_burst() function is invoked to place
1917  * crypto operations on the queue *qp_id* of the device designated by
1918  * its *dev_id*.
1919  *
1920  * The *nb_ops* parameter is the number of operations to process which are
1921  * supplied in the *ops* array of *rte_crypto_op* structures.
1922  *
1923  * The rte_cryptodev_enqueue_burst() function returns the number of
1924  * operations it actually enqueued for processing. A return value equal to
1925  * *nb_ops* means that all packets have been enqueued.
1926  *
1927  * @param	dev_id		The identifier of the device.
1928  * @param	qp_id		The index of the queue pair which packets are
1929  *				to be enqueued for processing. The value
1930  *				must be in the range [0, nb_queue_pairs - 1]
1931  *				previously supplied to
1932  *				 *rte_cryptodev_configure*.
1933  * @param	ops		The address of an array of *nb_ops* pointers
1934  *				to *rte_crypto_op* structures which contain
1935  *				the crypto operations to be processed.
1936  * @param	nb_ops		The number of operations to process.
1937  *
1938  * @return
1939  * The number of operations actually enqueued on the crypto device. The return
1940  * value can be less than the value of the *nb_ops* parameter when the
1941  * crypto devices queue is full or if invalid parameters are specified in
1942  * a *rte_crypto_op*.
1943  */
1944 static inline uint16_t
1945 rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
1946 		struct rte_crypto_op **ops, uint16_t nb_ops)
1947 {
1948 	const struct rte_crypto_fp_ops *fp_ops;
1949 	void *qp;
1950 
1951 	fp_ops = &rte_crypto_fp_ops[dev_id];
1952 	qp = fp_ops->qp.data[qp_id];
1953 #ifdef RTE_CRYPTO_CALLBACKS
1954 	if (unlikely(fp_ops->qp.enq_cb != NULL)) {
1955 		struct rte_cryptodev_cb_rcu *list;
1956 		struct rte_cryptodev_cb *cb;
1957 
1958 		/* __ATOMIC_RELEASE memory order was used when the
1959 		 * call back was inserted into the list.
1960 		 * Since there is a clear dependency between loading
1961 		 * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
1962 		 * not required.
1963 		 */
1964 		list = &fp_ops->qp.enq_cb[qp_id];
1965 		rte_rcu_qsbr_thread_online(list->qsbr, 0);
1966 		cb = __atomic_load_n(&list->next, __ATOMIC_RELAXED);
1967 
1968 		while (cb != NULL) {
1969 			nb_ops = cb->fn(dev_id, qp_id, ops, nb_ops,
1970 					cb->arg);
1971 			cb = cb->next;
1972 		};
1973 
1974 		rte_rcu_qsbr_thread_offline(list->qsbr, 0);
1975 	}
1976 #endif
1977 
1978 	rte_cryptodev_trace_enqueue_burst(dev_id, qp_id, (void **)ops, nb_ops);
1979 	return fp_ops->enqueue_burst(qp, ops, nb_ops);
1980 }
1981 
1982 
1983 
1984 #ifdef __cplusplus
1985 }
1986 #endif
1987 
1988 #endif /* _RTE_CRYPTODEV_H_ */
1989