xref: /dpdk/lib/cryptodev/rte_cryptodev.h (revision 8f1d23ece06adff5eae9f1b4365bdbbd3abee2b2)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020 Intel Corporation.
3  */
4 
5 #ifndef _RTE_CRYPTODEV_H_
6 #define _RTE_CRYPTODEV_H_
7 
8 /**
9  * @file rte_cryptodev.h
10  *
11  * RTE Cryptographic Device APIs
12  *
13  * Defines RTE Crypto Device APIs for the provisioning of cipher and
14  * authentication operations.
15  */
16 
17 #ifdef __cplusplus
18 extern "C" {
19 #endif
20 
21 #include "rte_kvargs.h"
22 #include "rte_crypto.h"
23 #include <rte_common.h>
24 #include <rte_rcu_qsbr.h>
25 
26 #include "rte_cryptodev_trace_fp.h"
27 
28 extern const char **rte_cyptodev_names;
29 
30 /* Logging Macros */
31 
32 #define CDEV_LOG_ERR(...) \
33 	RTE_LOG(ERR, CRYPTODEV, \
34 		RTE_FMT("%s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
35 			__func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,)))
36 
37 #define CDEV_LOG_INFO(...) \
38 	RTE_LOG(INFO, CRYPTODEV, \
39 		RTE_FMT(RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
40 			RTE_FMT_TAIL(__VA_ARGS__,)))
41 
42 #define CDEV_LOG_DEBUG(...) \
43 	RTE_LOG(DEBUG, CRYPTODEV, \
44 		RTE_FMT("%s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
45 			__func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,)))
46 
47 #define CDEV_PMD_TRACE(...) \
48 	RTE_LOG(DEBUG, CRYPTODEV, \
49 		RTE_FMT("[%s] %s: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
50 			dev, __func__, RTE_FMT_TAIL(__VA_ARGS__,)))
51 
52 /**
53  * A macro that points to an offset from the start
54  * of the crypto operation structure (rte_crypto_op)
55  *
56  * The returned pointer is cast to type t.
57  *
58  * @param c
59  *   The crypto operation.
60  * @param o
61  *   The offset from the start of the crypto operation.
62  * @param t
63  *   The type to cast the result into.
64  */
65 #define rte_crypto_op_ctod_offset(c, t, o)	\
66 	((t)((char *)(c) + (o)))
67 
68 /**
69  * A macro that returns the physical address that points
70  * to an offset from the start of the crypto operation
71  * (rte_crypto_op)
72  *
73  * @param c
74  *   The crypto operation.
75  * @param o
76  *   The offset from the start of the crypto operation
77  *   to calculate address from.
78  */
79 #define rte_crypto_op_ctophys_offset(c, o)	\
80 	(rte_iova_t)((c)->phys_addr + (o))
81 
82 /**
83  * Crypto parameters range description
84  */
85 struct rte_crypto_param_range {
86 	uint16_t min;	/**< minimum size */
87 	uint16_t max;	/**< maximum size */
88 	uint16_t increment;
89 	/**< if a range of sizes are supported,
90 	 * this parameter is used to indicate
91 	 * increments in byte size that are supported
92 	 * between the minimum and maximum
93 	 */
94 };
95 
96 /**
97  * Data-unit supported lengths of cipher algorithms.
98  * A bit can represent any set of data-unit sizes
99  * (single size, multiple size, range, etc).
100  */
101 #define RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_512_BYTES             RTE_BIT32(0)
102 #define RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_4096_BYTES            RTE_BIT32(1)
103 #define RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_1_MEGABYTES           RTE_BIT32(2)
104 
105 /**
106  * Symmetric Crypto Capability
107  */
108 struct rte_cryptodev_symmetric_capability {
109 	enum rte_crypto_sym_xform_type xform_type;
110 	/**< Transform type : Authentication / Cipher / AEAD */
111 	RTE_STD_C11
112 	union {
113 		struct {
114 			enum rte_crypto_auth_algorithm algo;
115 			/**< authentication algorithm */
116 			uint16_t block_size;
117 			/**< algorithm block size */
118 			struct rte_crypto_param_range key_size;
119 			/**< auth key size range */
120 			struct rte_crypto_param_range digest_size;
121 			/**< digest size range */
122 			struct rte_crypto_param_range aad_size;
123 			/**< Additional authentication data size range */
124 			struct rte_crypto_param_range iv_size;
125 			/**< Initialisation vector data size range */
126 		} auth;
127 		/**< Symmetric Authentication transform capabilities */
128 		struct {
129 			enum rte_crypto_cipher_algorithm algo;
130 			/**< cipher algorithm */
131 			uint16_t block_size;
132 			/**< algorithm block size */
133 			struct rte_crypto_param_range key_size;
134 			/**< cipher key size range */
135 			struct rte_crypto_param_range iv_size;
136 			/**< Initialisation vector data size range */
137 			uint32_t dataunit_set;
138 			/**<
139 			 * Supported data-unit lengths:
140 			 * RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_* bits
141 			 * or 0 for lengths defined in the algorithm standard.
142 			 */
143 		} cipher;
144 		/**< Symmetric Cipher transform capabilities */
145 		struct {
146 			enum rte_crypto_aead_algorithm algo;
147 			/**< AEAD algorithm */
148 			uint16_t block_size;
149 			/**< algorithm block size */
150 			struct rte_crypto_param_range key_size;
151 			/**< AEAD key size range */
152 			struct rte_crypto_param_range digest_size;
153 			/**< digest size range */
154 			struct rte_crypto_param_range aad_size;
155 			/**< Additional authentication data size range */
156 			struct rte_crypto_param_range iv_size;
157 			/**< Initialisation vector data size range */
158 		} aead;
159 	};
160 };
161 
162 /**
163  * Asymmetric Xform Crypto Capability
164  *
165  */
166 struct rte_cryptodev_asymmetric_xform_capability {
167 	enum rte_crypto_asym_xform_type xform_type;
168 	/**< Transform type: RSA/MODEXP/DH/DSA/MODINV */
169 
170 	uint32_t op_types;
171 	/**<
172 	 * Bitmask for supported rte_crypto_asym_op_type or
173 	 * rte_crypto_asym_ke_type. Which enum is used is determined
174 	 * by the rte_crypto_asym_xform_type. For key exchange algorithms
175 	 * like Diffie-Hellman it is rte_crypto_asym_ke_type, for others
176 	 * it is rte_crypto_asym_op_type.
177 	 */
178 
179 	__extension__
180 	union {
181 		struct rte_crypto_param_range modlen;
182 		/**< Range of modulus length supported by modulus based xform.
183 		 * Value 0 mean implementation default
184 		 */
185 	};
186 };
187 
188 /**
189  * Asymmetric Crypto Capability
190  *
191  */
192 struct rte_cryptodev_asymmetric_capability {
193 	struct rte_cryptodev_asymmetric_xform_capability xform_capa;
194 };
195 
196 
197 /** Structure used to capture a capability of a crypto device */
198 struct rte_cryptodev_capabilities {
199 	enum rte_crypto_op_type op;
200 	/**< Operation type */
201 
202 	RTE_STD_C11
203 	union {
204 		struct rte_cryptodev_symmetric_capability sym;
205 		/**< Symmetric operation capability parameters */
206 		struct rte_cryptodev_asymmetric_capability asym;
207 		/**< Asymmetric operation capability parameters */
208 	};
209 };
210 
211 /** Structure used to describe crypto algorithms */
212 struct rte_cryptodev_sym_capability_idx {
213 	enum rte_crypto_sym_xform_type type;
214 	union {
215 		enum rte_crypto_cipher_algorithm cipher;
216 		enum rte_crypto_auth_algorithm auth;
217 		enum rte_crypto_aead_algorithm aead;
218 	} algo;
219 };
220 
221 /**
222  * Structure used to describe asymmetric crypto xforms
223  * Each xform maps to one asym algorithm.
224  *
225  */
226 struct rte_cryptodev_asym_capability_idx {
227 	enum rte_crypto_asym_xform_type type;
228 	/**< Asymmetric xform (algo) type */
229 };
230 
231 /**
232  * Provide capabilities available for defined device and algorithm
233  *
234  * @param	dev_id		The identifier of the device.
235  * @param	idx		Description of crypto algorithms.
236  *
237  * @return
238  *   - Return description of the symmetric crypto capability if exist.
239  *   - Return NULL if the capability not exist.
240  */
241 const struct rte_cryptodev_symmetric_capability *
242 rte_cryptodev_sym_capability_get(uint8_t dev_id,
243 		const struct rte_cryptodev_sym_capability_idx *idx);
244 
245 /**
246  *  Provide capabilities available for defined device and xform
247  *
248  * @param	dev_id		The identifier of the device.
249  * @param	idx		Description of asym crypto xform.
250  *
251  * @return
252  *   - Return description of the asymmetric crypto capability if exist.
253  *   - Return NULL if the capability not exist.
254  */
255 __rte_experimental
256 const struct rte_cryptodev_asymmetric_xform_capability *
257 rte_cryptodev_asym_capability_get(uint8_t dev_id,
258 		const struct rte_cryptodev_asym_capability_idx *idx);
259 
260 /**
261  * Check if key size and initial vector are supported
262  * in crypto cipher capability
263  *
264  * @param	capability	Description of the symmetric crypto capability.
265  * @param	key_size	Cipher key size.
266  * @param	iv_size		Cipher initial vector size.
267  *
268  * @return
269  *   - Return 0 if the parameters are in range of the capability.
270  *   - Return -1 if the parameters are out of range of the capability.
271  */
272 int
273 rte_cryptodev_sym_capability_check_cipher(
274 		const struct rte_cryptodev_symmetric_capability *capability,
275 		uint16_t key_size, uint16_t iv_size);
276 
277 /**
278  * Check if key size and initial vector are supported
279  * in crypto auth capability
280  *
281  * @param	capability	Description of the symmetric crypto capability.
282  * @param	key_size	Auth key size.
283  * @param	digest_size	Auth digest size.
284  * @param	iv_size		Auth initial vector size.
285  *
286  * @return
287  *   - Return 0 if the parameters are in range of the capability.
288  *   - Return -1 if the parameters are out of range of the capability.
289  */
290 int
291 rte_cryptodev_sym_capability_check_auth(
292 		const struct rte_cryptodev_symmetric_capability *capability,
293 		uint16_t key_size, uint16_t digest_size, uint16_t iv_size);
294 
295 /**
296  * Check if key, digest, AAD and initial vector sizes are supported
297  * in crypto AEAD capability
298  *
299  * @param	capability	Description of the symmetric crypto capability.
300  * @param	key_size	AEAD key size.
301  * @param	digest_size	AEAD digest size.
302  * @param	aad_size	AEAD AAD size.
303  * @param	iv_size		AEAD IV size.
304  *
305  * @return
306  *   - Return 0 if the parameters are in range of the capability.
307  *   - Return -1 if the parameters are out of range of the capability.
308  */
309 int
310 rte_cryptodev_sym_capability_check_aead(
311 		const struct rte_cryptodev_symmetric_capability *capability,
312 		uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
313 		uint16_t iv_size);
314 
315 /**
316  * Check if op type is supported
317  *
318  * @param	capability	Description of the asymmetric crypto capability.
319  * @param	op_type		op type
320  *
321  * @return
322  *   - Return 1 if the op type is supported
323  *   - Return 0 if unsupported
324  */
325 __rte_experimental
326 int
327 rte_cryptodev_asym_xform_capability_check_optype(
328 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
329 		enum rte_crypto_asym_op_type op_type);
330 
331 /**
332  * Check if modulus length is in supported range
333  *
334  * @param	capability	Description of the asymmetric crypto capability.
335  * @param	modlen		modulus length.
336  *
337  * @return
338  *   - Return 0 if the parameters are in range of the capability.
339  *   - Return -1 if the parameters are out of range of the capability.
340  */
341 __rte_experimental
342 int
343 rte_cryptodev_asym_xform_capability_check_modlen(
344 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
345 		uint16_t modlen);
346 
347 /**
348  * Provide the cipher algorithm enum, given an algorithm string
349  *
350  * @param	algo_enum	A pointer to the cipher algorithm
351  *				enum to be filled
352  * @param	algo_string	Authentication algo string
353  *
354  * @return
355  * - Return -1 if string is not valid
356  * - Return 0 is the string is valid
357  */
358 int
359 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum,
360 		const char *algo_string);
361 
362 /**
363  * Provide the authentication algorithm enum, given an algorithm string
364  *
365  * @param	algo_enum	A pointer to the authentication algorithm
366  *				enum to be filled
367  * @param	algo_string	Authentication algo string
368  *
369  * @return
370  * - Return -1 if string is not valid
371  * - Return 0 is the string is valid
372  */
373 int
374 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum,
375 		const char *algo_string);
376 
377 /**
378  * Provide the AEAD algorithm enum, given an algorithm string
379  *
380  * @param	algo_enum	A pointer to the AEAD algorithm
381  *				enum to be filled
382  * @param	algo_string	AEAD algorithm string
383  *
384  * @return
385  * - Return -1 if string is not valid
386  * - Return 0 is the string is valid
387  */
388 int
389 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum,
390 		const char *algo_string);
391 
392 /**
393  * Provide the Asymmetric xform enum, given an xform string
394  *
395  * @param	xform_enum	A pointer to the xform type
396  *				enum to be filled
397  * @param	xform_string	xform string
398  *
399  * @return
400  * - Return -1 if string is not valid
401  * - Return 0 if the string is valid
402  */
403 __rte_experimental
404 int
405 rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum,
406 		const char *xform_string);
407 
408 
409 /** Macro used at end of crypto PMD list */
410 #define RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() \
411 	{ RTE_CRYPTO_OP_TYPE_UNDEFINED }
412 
413 
414 /**
415  * Crypto device supported feature flags
416  *
417  * Note:
418  * New features flags should be added to the end of the list
419  *
420  * Keep these flags synchronised with rte_cryptodev_get_feature_name()
421  */
422 #define	RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO		(1ULL << 0)
423 /**< Symmetric crypto operations are supported */
424 #define	RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO		(1ULL << 1)
425 /**< Asymmetric crypto operations are supported */
426 #define	RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING		(1ULL << 2)
427 /**< Chaining symmetric crypto operations are supported */
428 #define	RTE_CRYPTODEV_FF_CPU_SSE			(1ULL << 3)
429 /**< Utilises CPU SIMD SSE instructions */
430 #define	RTE_CRYPTODEV_FF_CPU_AVX			(1ULL << 4)
431 /**< Utilises CPU SIMD AVX instructions */
432 #define	RTE_CRYPTODEV_FF_CPU_AVX2			(1ULL << 5)
433 /**< Utilises CPU SIMD AVX2 instructions */
434 #define	RTE_CRYPTODEV_FF_CPU_AESNI			(1ULL << 6)
435 /**< Utilises CPU AES-NI instructions */
436 #define	RTE_CRYPTODEV_FF_HW_ACCELERATED			(1ULL << 7)
437 /**< Operations are off-loaded to an
438  * external hardware accelerator
439  */
440 #define	RTE_CRYPTODEV_FF_CPU_AVX512			(1ULL << 8)
441 /**< Utilises CPU SIMD AVX512 instructions */
442 #define	RTE_CRYPTODEV_FF_IN_PLACE_SGL			(1ULL << 9)
443 /**< In-place Scatter-gather (SGL) buffers, with multiple segments,
444  * are supported
445  */
446 #define RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT		(1ULL << 10)
447 /**< Out-of-place Scatter-gather (SGL) buffers are
448  * supported in input and output
449  */
450 #define RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT		(1ULL << 11)
451 /**< Out-of-place Scatter-gather (SGL) buffers are supported
452  * in input, combined with linear buffers (LB), with a
453  * single segment in output
454  */
455 #define RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT		(1ULL << 12)
456 /**< Out-of-place Scatter-gather (SGL) buffers are supported
457  * in output, combined with linear buffers (LB) in input
458  */
459 #define RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT		(1ULL << 13)
460 /**< Out-of-place linear buffers (LB) are supported in input and output */
461 #define	RTE_CRYPTODEV_FF_CPU_NEON			(1ULL << 14)
462 /**< Utilises CPU NEON instructions */
463 #define	RTE_CRYPTODEV_FF_CPU_ARM_CE			(1ULL << 15)
464 /**< Utilises ARM CPU Cryptographic Extensions */
465 #define	RTE_CRYPTODEV_FF_SECURITY			(1ULL << 16)
466 /**< Support Security Protocol Processing */
467 #define RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP		(1ULL << 17)
468 /**< Support RSA Private Key OP with exponent */
469 #define RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT		(1ULL << 18)
470 /**< Support RSA Private Key OP with CRT (quintuple) Keys */
471 #define RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED		(1ULL << 19)
472 /**< Support encrypted-digest operations where digest is appended to data */
473 #define RTE_CRYPTODEV_FF_ASYM_SESSIONLESS		(1ULL << 20)
474 /**< Support asymmetric session-less operations */
475 #define	RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO			(1ULL << 21)
476 /**< Support symmetric cpu-crypto processing */
477 #define RTE_CRYPTODEV_FF_SYM_SESSIONLESS		(1ULL << 22)
478 /**< Support symmetric session-less operations */
479 #define RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA		(1ULL << 23)
480 /**< Support operations on data which is not byte aligned */
481 #define RTE_CRYPTODEV_FF_SYM_RAW_DP			(1ULL << 24)
482 /**< Support accelerator specific symmetric raw data-path APIs */
483 #define RTE_CRYPTODEV_FF_CIPHER_MULTIPLE_DATA_UNITS	(1ULL << 25)
484 /**< Support operations on multiple data-units message */
485 #define RTE_CRYPTODEV_FF_CIPHER_WRAPPED_KEY		(1ULL << 26)
486 /**< Support wrapped key in cipher xform  */
487 #define RTE_CRYPTODEV_FF_SECURITY_INNER_CSUM		(1ULL << 27)
488 /**< Support inner checksum computation/verification */
489 
490 /**
491  * Get the name of a crypto device feature flag
492  *
493  * @param	flag	The mask describing the flag.
494  *
495  * @return
496  *   The name of this flag, or NULL if it's not a valid feature flag.
497  */
498 
499 extern const char *
500 rte_cryptodev_get_feature_name(uint64_t flag);
501 
502 /**  Crypto device information */
503 struct rte_cryptodev_info {
504 	const char *driver_name;	/**< Driver name. */
505 	uint8_t driver_id;		/**< Driver identifier */
506 	struct rte_device *device;	/**< Generic device information. */
507 
508 	uint64_t feature_flags;
509 	/**< Feature flags exposes HW/SW features for the given device */
510 
511 	const struct rte_cryptodev_capabilities *capabilities;
512 	/**< Array of devices supported capabilities */
513 
514 	unsigned max_nb_queue_pairs;
515 	/**< Maximum number of queues pairs supported by device. */
516 
517 	uint16_t min_mbuf_headroom_req;
518 	/**< Minimum mbuf headroom required by device */
519 
520 	uint16_t min_mbuf_tailroom_req;
521 	/**< Minimum mbuf tailroom required by device */
522 
523 	struct {
524 		unsigned max_nb_sessions;
525 		/**< Maximum number of sessions supported by device.
526 		 * If 0, the device does not have any limitation in
527 		 * number of sessions that can be used.
528 		 */
529 	} sym;
530 };
531 
532 #define RTE_CRYPTODEV_DETACHED  (0)
533 #define RTE_CRYPTODEV_ATTACHED  (1)
534 
535 /** Definitions of Crypto device event types */
536 enum rte_cryptodev_event_type {
537 	RTE_CRYPTODEV_EVENT_UNKNOWN,	/**< unknown event type */
538 	RTE_CRYPTODEV_EVENT_ERROR,	/**< error interrupt event */
539 	RTE_CRYPTODEV_EVENT_MAX		/**< max value of this enum */
540 };
541 
542 /** Crypto device queue pair configuration structure. */
543 struct rte_cryptodev_qp_conf {
544 	uint32_t nb_descriptors; /**< Number of descriptors per queue pair */
545 	struct rte_mempool *mp_session;
546 	/**< The mempool for creating session in sessionless mode */
547 	struct rte_mempool *mp_session_private;
548 	/**< The mempool for creating sess private data in sessionless mode */
549 };
550 
551 /**
552  * Function type used for processing crypto ops when enqueue/dequeue burst is
553  * called.
554  *
555  * The callback function is called on enqueue/dequeue burst immediately.
556  *
557  * @param	dev_id		The identifier of the device.
558  * @param	qp_id		The index of the queue pair on which ops are
559  *				enqueued/dequeued. The value must be in the
560  *				range [0, nb_queue_pairs - 1] previously
561  *				supplied to *rte_cryptodev_configure*.
562  * @param	ops		The address of an array of *nb_ops* pointers
563  *				to *rte_crypto_op* structures which contain
564  *				the crypto operations to be processed.
565  * @param	nb_ops		The number of operations to process.
566  * @param	user_param	The arbitrary user parameter passed in by the
567  *				application when the callback was originally
568  *				registered.
569  * @return			The number of ops to be enqueued to the
570  *				crypto device.
571  */
572 typedef uint16_t (*rte_cryptodev_callback_fn)(uint16_t dev_id, uint16_t qp_id,
573 		struct rte_crypto_op **ops, uint16_t nb_ops, void *user_param);
574 
575 /**
576  * Typedef for application callback function to be registered by application
577  * software for notification of device events
578  *
579  * @param	dev_id	Crypto device identifier
580  * @param	event	Crypto device event to register for notification of.
581  * @param	cb_arg	User specified parameter to be passed as to passed to
582  *			users callback function.
583  */
584 typedef void (*rte_cryptodev_cb_fn)(uint8_t dev_id,
585 		enum rte_cryptodev_event_type event, void *cb_arg);
586 
587 
588 /** Crypto Device statistics */
589 struct rte_cryptodev_stats {
590 	uint64_t enqueued_count;
591 	/**< Count of all operations enqueued */
592 	uint64_t dequeued_count;
593 	/**< Count of all operations dequeued */
594 
595 	uint64_t enqueue_err_count;
596 	/**< Total error count on operations enqueued */
597 	uint64_t dequeue_err_count;
598 	/**< Total error count on operations dequeued */
599 };
600 
601 #define RTE_CRYPTODEV_NAME_MAX_LEN	(64)
602 /**< Max length of name of crypto PMD */
603 
604 /**
605  * Get the device identifier for the named crypto device.
606  *
607  * @param	name	device name to select the device structure.
608  *
609  * @return
610  *   - Returns crypto device identifier on success.
611  *   - Return -1 on failure to find named crypto device.
612  */
613 extern int
614 rte_cryptodev_get_dev_id(const char *name);
615 
616 /**
617  * Get the crypto device name given a device identifier.
618  *
619  * @param dev_id
620  *   The identifier of the device
621  *
622  * @return
623  *   - Returns crypto device name.
624  *   - Returns NULL if crypto device is not present.
625  */
626 extern const char *
627 rte_cryptodev_name_get(uint8_t dev_id);
628 
629 /**
630  * Get the total number of crypto devices that have been successfully
631  * initialised.
632  *
633  * @return
634  *   - The total number of usable crypto devices.
635  */
636 extern uint8_t
637 rte_cryptodev_count(void);
638 
639 /**
640  * Get number of crypto device defined type.
641  *
642  * @param	driver_id	driver identifier.
643  *
644  * @return
645  *   Returns number of crypto device.
646  */
647 extern uint8_t
648 rte_cryptodev_device_count_by_driver(uint8_t driver_id);
649 
650 /**
651  * Get number and identifiers of attached crypto devices that
652  * use the same crypto driver.
653  *
654  * @param	driver_name	driver name.
655  * @param	devices		output devices identifiers.
656  * @param	nb_devices	maximal number of devices.
657  *
658  * @return
659  *   Returns number of attached crypto device.
660  */
661 uint8_t
662 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
663 		uint8_t nb_devices);
664 /*
665  * Return the NUMA socket to which a device is connected
666  *
667  * @param dev_id
668  *   The identifier of the device
669  * @return
670  *   The NUMA socket id to which the device is connected or
671  *   a default of zero if the socket could not be determined.
672  *   -1 if returned is the dev_id value is out of range.
673  */
674 extern int
675 rte_cryptodev_socket_id(uint8_t dev_id);
676 
677 /** Crypto device configuration structure */
678 struct rte_cryptodev_config {
679 	int socket_id;			/**< Socket to allocate resources on */
680 	uint16_t nb_queue_pairs;
681 	/**< Number of queue pairs to configure on device */
682 	uint64_t ff_disable;
683 	/**< Feature flags to be disabled. Only the following features are
684 	 * allowed to be disabled,
685 	 *  - RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO
686 	 *  - RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO
687 	 *  - RTE_CRYTPODEV_FF_SECURITY
688 	 */
689 };
690 
691 /**
692  * Configure a device.
693  *
694  * This function must be invoked first before any other function in the
695  * API. This function can also be re-invoked when a device is in the
696  * stopped state.
697  *
698  * @param	dev_id		The identifier of the device to configure.
699  * @param	config		The crypto device configuration structure.
700  *
701  * @return
702  *   - 0: Success, device configured.
703  *   - <0: Error code returned by the driver configuration function.
704  */
705 extern int
706 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config);
707 
708 /**
709  * Start an device.
710  *
711  * The device start step is the last one and consists of setting the configured
712  * offload features and in starting the transmit and the receive units of the
713  * device.
714  * On success, all basic functions exported by the API (link status,
715  * receive/transmit, and so on) can be invoked.
716  *
717  * @param dev_id
718  *   The identifier of the device.
719  * @return
720  *   - 0: Success, device started.
721  *   - <0: Error code of the driver device start function.
722  */
723 extern int
724 rte_cryptodev_start(uint8_t dev_id);
725 
726 /**
727  * Stop an device. The device can be restarted with a call to
728  * rte_cryptodev_start()
729  *
730  * @param	dev_id		The identifier of the device.
731  */
732 extern void
733 rte_cryptodev_stop(uint8_t dev_id);
734 
735 /**
736  * Close an device. The device cannot be restarted!
737  *
738  * @param	dev_id		The identifier of the device.
739  *
740  * @return
741  *  - 0 on successfully closing device
742  *  - <0 on failure to close device
743  */
744 extern int
745 rte_cryptodev_close(uint8_t dev_id);
746 
747 /**
748  * Allocate and set up a receive queue pair for a device.
749  *
750  *
751  * @param	dev_id		The identifier of the device.
752  * @param	queue_pair_id	The index of the queue pairs to set up. The
753  *				value must be in the range [0, nb_queue_pair
754  *				- 1] previously supplied to
755  *				rte_cryptodev_configure().
756  * @param	qp_conf		The pointer to the configuration data to be
757  *				used for the queue pair.
758  * @param	socket_id	The *socket_id* argument is the socket
759  *				identifier in case of NUMA. The value can be
760  *				*SOCKET_ID_ANY* if there is no NUMA constraint
761  *				for the DMA memory allocated for the receive
762  *				queue pair.
763  *
764  * @return
765  *   - 0: Success, queue pair correctly set up.
766  *   - <0: Queue pair configuration failed
767  */
768 extern int
769 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
770 		const struct rte_cryptodev_qp_conf *qp_conf, int socket_id);
771 
772 /**
773  * Get the status of queue pairs setup on a specific crypto device
774  *
775  * @param	dev_id		Crypto device identifier.
776  * @param	queue_pair_id	The index of the queue pairs to set up. The
777  *				value must be in the range [0, nb_queue_pair
778  *				- 1] previously supplied to
779  *				rte_cryptodev_configure().
780  * @return
781  *   - 0: qp was not configured
782  *	 - 1: qp was configured
783  *	 - -EINVAL: device was not configured
784  */
785 __rte_experimental
786 int
787 rte_cryptodev_get_qp_status(uint8_t dev_id, uint16_t queue_pair_id);
788 
789 /**
790  * Get the number of queue pairs on a specific crypto device
791  *
792  * @param	dev_id		Crypto device identifier.
793  * @return
794  *   - The number of configured queue pairs.
795  */
796 extern uint16_t
797 rte_cryptodev_queue_pair_count(uint8_t dev_id);
798 
799 
800 /**
801  * Retrieve the general I/O statistics of a device.
802  *
803  * @param	dev_id		The identifier of the device.
804  * @param	stats		A pointer to a structure of type
805  *				*rte_cryptodev_stats* to be filled with the
806  *				values of device counters.
807  * @return
808  *   - Zero if successful.
809  *   - Non-zero otherwise.
810  */
811 extern int
812 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats);
813 
814 /**
815  * Reset the general I/O statistics of a device.
816  *
817  * @param	dev_id		The identifier of the device.
818  */
819 extern void
820 rte_cryptodev_stats_reset(uint8_t dev_id);
821 
822 /**
823  * Retrieve the contextual information of a device.
824  *
825  * @param	dev_id		The identifier of the device.
826  * @param	dev_info	A pointer to a structure of type
827  *				*rte_cryptodev_info* to be filled with the
828  *				contextual information of the device.
829  *
830  * @note The capabilities field of dev_info is set to point to the first
831  * element of an array of struct rte_cryptodev_capabilities. The element after
832  * the last valid element has it's op field set to
833  * RTE_CRYPTO_OP_TYPE_UNDEFINED.
834  */
835 extern void
836 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info);
837 
838 
839 /**
840  * Register a callback function for specific device id.
841  *
842  * @param	dev_id		Device id.
843  * @param	event		Event interested.
844  * @param	cb_fn		User supplied callback function to be called.
845  * @param	cb_arg		Pointer to the parameters for the registered
846  *				callback.
847  *
848  * @return
849  *  - On success, zero.
850  *  - On failure, a negative value.
851  */
852 extern int
853 rte_cryptodev_callback_register(uint8_t dev_id,
854 		enum rte_cryptodev_event_type event,
855 		rte_cryptodev_cb_fn cb_fn, void *cb_arg);
856 
857 /**
858  * Unregister a callback function for specific device id.
859  *
860  * @param	dev_id		The device identifier.
861  * @param	event		Event interested.
862  * @param	cb_fn		User supplied callback function to be called.
863  * @param	cb_arg		Pointer to the parameters for the registered
864  *				callback.
865  *
866  * @return
867  *  - On success, zero.
868  *  - On failure, a negative value.
869  */
870 extern int
871 rte_cryptodev_callback_unregister(uint8_t dev_id,
872 		enum rte_cryptodev_event_type event,
873 		rte_cryptodev_cb_fn cb_fn, void *cb_arg);
874 
875 struct rte_cryptodev_callback;
876 
877 /** Structure to keep track of registered callbacks */
878 RTE_TAILQ_HEAD(rte_cryptodev_cb_list, rte_cryptodev_callback);
879 
880 /**
881  * Structure used to hold information about the callbacks to be called for a
882  * queue pair on enqueue/dequeue.
883  */
884 struct rte_cryptodev_cb {
885 	struct rte_cryptodev_cb *next;
886 	/**< Pointer to next callback */
887 	rte_cryptodev_callback_fn fn;
888 	/**< Pointer to callback function */
889 	void *arg;
890 	/**< Pointer to argument */
891 };
892 
893 /**
894  * @internal
895  * Structure used to hold information about the RCU for a queue pair.
896  */
897 struct rte_cryptodev_cb_rcu {
898 	struct rte_cryptodev_cb *next;
899 	/**< Pointer to next callback */
900 	struct rte_rcu_qsbr *qsbr;
901 	/**< RCU QSBR variable per queue pair */
902 };
903 
904 void *
905 rte_cryptodev_get_sec_ctx(uint8_t dev_id);
906 
907 /** Cryptodev symmetric crypto session
908  * Each session is derived from a fixed xform chain. Therefore each session
909  * has a fixed algo, key, op-type, digest_len etc.
910  */
911 struct rte_cryptodev_sym_session {
912 	uint64_t opaque_data;
913 	/**< Can be used for external metadata */
914 	uint16_t nb_drivers;
915 	/**< number of elements in sess_data array */
916 	uint16_t user_data_sz;
917 	/**< session user data will be placed after sess_data */
918 	__extension__ struct {
919 		void *data;
920 		uint16_t refcnt;
921 	} sess_data[];
922 	/**< Driver specific session material, variable size */
923 };
924 
925 /**
926  * Create a symmetric session mempool.
927  *
928  * @param name
929  *   The unique mempool name.
930  * @param nb_elts
931  *   The number of elements in the mempool.
932  * @param elt_size
933  *   The size of the element. This value will be ignored if it is smaller than
934  *   the minimum session header size required for the system. For the user who
935  *   want to use the same mempool for sym session and session private data it
936  *   can be the maximum value of all existing devices' private data and session
937  *   header sizes.
938  * @param cache_size
939  *   The number of per-lcore cache elements
940  * @param priv_size
941  *   The private data size of each session.
942  * @param socket_id
943  *   The *socket_id* argument is the socket identifier in the case of
944  *   NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
945  *   constraint for the reserved zone.
946  *
947  * @return
948  *  - On success return size of the session
949  *  - On failure returns 0
950  */
951 __rte_experimental
952 struct rte_mempool *
953 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
954 	uint32_t elt_size, uint32_t cache_size, uint16_t priv_size,
955 	int socket_id);
956 
957 /**
958  * Create an asymmetric session mempool.
959  *
960  * @param name
961  *   The unique mempool name.
962  * @param nb_elts
963  *   The number of elements in the mempool.
964  * @param cache_size
965  *   The number of per-lcore cache elements
966  * @param user_data_size
967  *   The size of user data to be placed after session private data.
968  * @param socket_id
969  *   The *socket_id* argument is the socket identifier in the case of
970  *   NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
971  *   constraint for the reserved zone.
972  *
973  * @return
974  *  - On success return mempool
975  *  - On failure returns NULL
976  */
977 __rte_experimental
978 struct rte_mempool *
979 rte_cryptodev_asym_session_pool_create(const char *name, uint32_t nb_elts,
980 	uint32_t cache_size, uint16_t user_data_size, int socket_id);
981 
982 /**
983  * Create symmetric crypto session header (generic with no private data)
984  *
985  * @param   mempool    Symmetric session mempool to allocate session
986  *                     objects from
987  * @return
988  *  - On success return pointer to sym-session
989  *  - On failure returns NULL
990  */
991 struct rte_cryptodev_sym_session *
992 rte_cryptodev_sym_session_create(struct rte_mempool *mempool);
993 
994 /**
995  * Create and initialise an asymmetric crypto session structure.
996  * Calls the PMD to configure the private session data.
997  *
998  * @param   dev_id   ID of device that we want the session to be used on
999  * @param   xforms   Asymmetric crypto transform operations to apply on flow
1000  *                   processed with this session
1001  * @param   mp       mempool to allocate asymmetric session
1002  *                   objects from
1003  * @param   session  void ** for session to be used
1004  *
1005  * @return
1006  *  - 0 on success.
1007  *  - -EINVAL on invalid arguments.
1008  *  - -ENOMEM on memory error for session allocation.
1009  *  - -ENOTSUP if device doesn't support session configuration.
1010  */
1011 __rte_experimental
1012 int
1013 rte_cryptodev_asym_session_create(uint8_t dev_id,
1014 		struct rte_crypto_asym_xform *xforms, struct rte_mempool *mp,
1015 		void **session);
1016 
1017 /**
1018  * Frees symmetric crypto session header, after checking that all
1019  * the device private data has been freed, returning it
1020  * to its original mempool.
1021  *
1022  * @param   sess     Session header to be freed.
1023  *
1024  * @return
1025  *  - 0 if successful.
1026  *  - -EINVAL if session is NULL.
1027  *  - -EBUSY if not all device private data has been freed.
1028  */
1029 int
1030 rte_cryptodev_sym_session_free(struct rte_cryptodev_sym_session *sess);
1031 
1032 /**
1033  * Clears and frees asymmetric crypto session header and private data,
1034  * returning it to its original mempool.
1035  *
1036  * @param   dev_id   ID of device that uses the asymmetric session.
1037  * @param   sess     Session header to be freed.
1038  *
1039  * @return
1040  *  - 0 if successful.
1041  *  - -EINVAL if device is invalid or session is NULL.
1042  */
1043 __rte_experimental
1044 int
1045 rte_cryptodev_asym_session_free(uint8_t dev_id, void *sess);
1046 
1047 /**
1048  * Fill out private data for the device id, based on its device type.
1049  *
1050  * @param   dev_id   ID of device that we want the session to be used on
1051  * @param   sess     Session where the private data will be attached to
1052  * @param   xforms   Symmetric crypto transform operations to apply on flow
1053  *                   processed with this session
1054  * @param   mempool  Mempool where the private data is allocated.
1055  *
1056  * @return
1057  *  - On success, zero.
1058  *  - -EINVAL if input parameters are invalid.
1059  *  - -ENOTSUP if crypto device does not support the crypto transform or
1060  *    does not support symmetric operations.
1061  *  - -ENOMEM if the private session could not be allocated.
1062  */
1063 int
1064 rte_cryptodev_sym_session_init(uint8_t dev_id,
1065 			struct rte_cryptodev_sym_session *sess,
1066 			struct rte_crypto_sym_xform *xforms,
1067 			struct rte_mempool *mempool);
1068 
1069 /**
1070  * Frees private data for the device id, based on its device type,
1071  * returning it to its mempool. It is the application's responsibility
1072  * to ensure that private session data is not cleared while there are
1073  * still in-flight operations using it.
1074  *
1075  * @param   dev_id   ID of device that uses the session.
1076  * @param   sess     Session containing the reference to the private data
1077  *
1078  * @return
1079  *  - 0 if successful.
1080  *  - -EINVAL if device is invalid or session is NULL.
1081  *  - -ENOTSUP if crypto device does not support symmetric operations.
1082  */
1083 int
1084 rte_cryptodev_sym_session_clear(uint8_t dev_id,
1085 			struct rte_cryptodev_sym_session *sess);
1086 
1087 /**
1088  * Get the size of the header session, for all registered drivers excluding
1089  * the user data size.
1090  *
1091  * @return
1092  *   Size of the symmetric header session.
1093  */
1094 unsigned int
1095 rte_cryptodev_sym_get_header_session_size(void);
1096 
1097 /**
1098  * Get the size of the header session from created session.
1099  *
1100  * @param sess
1101  *   The sym cryptodev session pointer
1102  *
1103  * @return
1104  *   - If sess is not NULL, return the size of the header session including
1105  *   the private data size defined within sess.
1106  *   - If sess is NULL, return 0.
1107  */
1108 __rte_experimental
1109 unsigned int
1110 rte_cryptodev_sym_get_existing_header_session_size(
1111 		struct rte_cryptodev_sym_session *sess);
1112 
1113 /**
1114  * Get the size of the asymmetric session header.
1115  *
1116  * @return
1117  *   Size of the asymmetric header session.
1118  */
1119 __rte_experimental
1120 unsigned int
1121 rte_cryptodev_asym_get_header_session_size(void);
1122 
1123 /**
1124  * Get the size of the private symmetric session data
1125  * for a device.
1126  *
1127  * @param	dev_id		The device identifier.
1128  *
1129  * @return
1130  *   - Size of the private data, if successful
1131  *   - 0 if device is invalid or does not have private
1132  *   symmetric session
1133  */
1134 unsigned int
1135 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id);
1136 
1137 /**
1138  * Get the size of the private data for asymmetric session
1139  * on device
1140  *
1141  * @param	dev_id		The device identifier.
1142  *
1143  * @return
1144  *   - Size of the asymmetric private data, if successful
1145  *   - 0 if device is invalid or does not have private session
1146  */
1147 __rte_experimental
1148 unsigned int
1149 rte_cryptodev_asym_get_private_session_size(uint8_t dev_id);
1150 
1151 /**
1152  * Validate if the crypto device index is valid attached crypto device.
1153  *
1154  * @param	dev_id	Crypto device index.
1155  *
1156  * @return
1157  *   - If the device index is valid (1) or not (0).
1158  */
1159 unsigned int
1160 rte_cryptodev_is_valid_dev(uint8_t dev_id);
1161 
1162 /**
1163  * Provide driver identifier.
1164  *
1165  * @param name
1166  *   The pointer to a driver name.
1167  * @return
1168  *  The driver type identifier or -1 if no driver found
1169  */
1170 int rte_cryptodev_driver_id_get(const char *name);
1171 
1172 /**
1173  * Provide driver name.
1174  *
1175  * @param driver_id
1176  *   The driver identifier.
1177  * @return
1178  *  The driver name or null if no driver found
1179  */
1180 const char *rte_cryptodev_driver_name_get(uint8_t driver_id);
1181 
1182 /**
1183  * Store user data in a session.
1184  *
1185  * @param	sess		Session pointer allocated by
1186  *				*rte_cryptodev_sym_session_create*.
1187  * @param	data		Pointer to the user data.
1188  * @param	size		Size of the user data.
1189  *
1190  * @return
1191  *  - On success, zero.
1192  *  - On failure, a negative value.
1193  */
1194 __rte_experimental
1195 int
1196 rte_cryptodev_sym_session_set_user_data(
1197 					struct rte_cryptodev_sym_session *sess,
1198 					void *data,
1199 					uint16_t size);
1200 
1201 /**
1202  * Get user data stored in a session.
1203  *
1204  * @param	sess		Session pointer allocated by
1205  *				*rte_cryptodev_sym_session_create*.
1206  *
1207  * @return
1208  *  - On success return pointer to user data.
1209  *  - On failure returns NULL.
1210  */
1211 __rte_experimental
1212 void *
1213 rte_cryptodev_sym_session_get_user_data(
1214 					struct rte_cryptodev_sym_session *sess);
1215 
1216 /**
1217  * Store user data in an asymmetric session.
1218  *
1219  * @param	sess		Session pointer allocated by
1220  *				*rte_cryptodev_asym_session_create*.
1221  * @param	data		Pointer to the user data.
1222  * @param	size		Size of the user data.
1223  *
1224  * @return
1225  *  - On success, zero.
1226  *  - -EINVAL if the session pointer is invalid.
1227  *  - -ENOMEM if the available user data size is smaller than the size parameter.
1228  */
1229 __rte_experimental
1230 int
1231 rte_cryptodev_asym_session_set_user_data(void *sess, void *data, uint16_t size);
1232 
1233 /**
1234  * Get user data stored in an asymmetric session.
1235  *
1236  * @param	sess		Session pointer allocated by
1237  *				*rte_cryptodev_asym_session_create*.
1238  *
1239  * @return
1240  *  - On success return pointer to user data.
1241  *  - On failure returns NULL.
1242  */
1243 __rte_experimental
1244 void *
1245 rte_cryptodev_asym_session_get_user_data(void *sess);
1246 
1247 /**
1248  * Perform actual crypto processing (encrypt/digest or auth/decrypt)
1249  * on user provided data.
1250  *
1251  * @param	dev_id	The device identifier.
1252  * @param	sess	Cryptodev session structure
1253  * @param	ofs	Start and stop offsets for auth and cipher operations
1254  * @param	vec	Vectorized operation descriptor
1255  *
1256  * @return
1257  *  - Returns number of successfully processed packets.
1258  */
1259 __rte_experimental
1260 uint32_t
1261 rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id,
1262 	struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs ofs,
1263 	struct rte_crypto_sym_vec *vec);
1264 
1265 /**
1266  * Get the size of the raw data-path context buffer.
1267  *
1268  * @param	dev_id		The device identifier.
1269  *
1270  * @return
1271  *   - If the device supports raw data-path APIs, return the context size.
1272  *   - If the device does not support the APIs, return -1.
1273  */
1274 __rte_experimental
1275 int
1276 rte_cryptodev_get_raw_dp_ctx_size(uint8_t dev_id);
1277 
1278 /**
1279  * Set session event meta data
1280  *
1281  * @param	dev_id		The device identifier.
1282  * @param	sess            Crypto or security session.
1283  * @param	op_type         Operation type.
1284  * @param	sess_type       Session type.
1285  * @param	ev_mdata	Pointer to the event crypto meta data
1286  *				(aka *union rte_event_crypto_metadata*)
1287  * @param	size            Size of ev_mdata.
1288  *
1289  * @return
1290  *  - On success, zero.
1291  *  - On failure, a negative value.
1292  */
1293 __rte_experimental
1294 int
1295 rte_cryptodev_session_event_mdata_set(uint8_t dev_id, void *sess,
1296 	enum rte_crypto_op_type op_type,
1297 	enum rte_crypto_op_sess_type sess_type,
1298 	void *ev_mdata, uint16_t size);
1299 
1300 /**
1301  * Union of different crypto session types, including session-less xform
1302  * pointer.
1303  */
1304 union rte_cryptodev_session_ctx {
1305 	struct rte_cryptodev_sym_session *crypto_sess;
1306 	struct rte_crypto_sym_xform *xform;
1307 	struct rte_security_session *sec_sess;
1308 };
1309 
1310 /**
1311  * Enqueue a vectorized operation descriptor into the device queue but the
1312  * driver may or may not start processing until rte_cryptodev_raw_enqueue_done()
1313  * is called.
1314  *
1315  * @param	qp		Driver specific queue pair data.
1316  * @param	drv_ctx		Driver specific context data.
1317  * @param	vec		Vectorized operation descriptor.
1318  * @param	ofs		Start and stop offsets for auth and cipher
1319  *				operations.
1320  * @param	user_data	The array of user data for dequeue later.
1321  * @param	enqueue_status	Driver written value to specify the
1322  *				enqueue status. Possible values:
1323  *				- 1: The number of operations returned are
1324  *				     enqueued successfully.
1325  *				- 0: The number of operations returned are
1326  *				     cached into the queue but are not processed
1327  *				     until rte_cryptodev_raw_enqueue_done() is
1328  *				     called.
1329  *				- negative integer: Error occurred.
1330  * @return
1331  *   - The number of operations in the descriptor successfully enqueued or
1332  *     cached into the queue but not enqueued yet, depends on the
1333  *     "enqueue_status" value.
1334  */
1335 typedef uint32_t (*cryptodev_sym_raw_enqueue_burst_t)(
1336 	void *qp, uint8_t *drv_ctx, struct rte_crypto_sym_vec *vec,
1337 	union rte_crypto_sym_ofs ofs, void *user_data[], int *enqueue_status);
1338 
1339 /**
1340  * Enqueue single raw data vector into the device queue but the driver may or
1341  * may not start processing until rte_cryptodev_raw_enqueue_done() is called.
1342  *
1343  * @param	qp		Driver specific queue pair data.
1344  * @param	drv_ctx		Driver specific context data.
1345  * @param	data_vec	The buffer data vector.
1346  * @param	n_data_vecs	Number of buffer data vectors.
1347  * @param	ofs		Start and stop offsets for auth and cipher
1348  *				operations.
1349  * @param	iv		IV virtual and IOVA addresses
1350  * @param	digest		digest virtual and IOVA addresses
1351  * @param	aad_or_auth_iv	AAD or auth IV virtual and IOVA addresses,
1352  *				depends on the algorithm used.
1353  * @param	user_data	The user data.
1354  * @return
1355  *   - 1: The data vector is enqueued successfully.
1356  *   - 0: The data vector is cached into the queue but is not processed
1357  *        until rte_cryptodev_raw_enqueue_done() is called.
1358  *   - negative integer: failure.
1359  */
1360 typedef int (*cryptodev_sym_raw_enqueue_t)(
1361 	void *qp, uint8_t *drv_ctx, struct rte_crypto_vec *data_vec,
1362 	uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs,
1363 	struct rte_crypto_va_iova_ptr *iv,
1364 	struct rte_crypto_va_iova_ptr *digest,
1365 	struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
1366 	void *user_data);
1367 
1368 /**
1369  * Inform the cryptodev queue pair to start processing or finish dequeuing all
1370  * enqueued/dequeued operations.
1371  *
1372  * @param	qp		Driver specific queue pair data.
1373  * @param	drv_ctx		Driver specific context data.
1374  * @param	n		The total number of processed operations.
1375  * @return
1376  *   - On success return 0.
1377  *   - On failure return negative integer.
1378  */
1379 typedef int (*cryptodev_sym_raw_operation_done_t)(void *qp, uint8_t *drv_ctx,
1380 	uint32_t n);
1381 
1382 /**
1383  * Typedef that the user provided for the driver to get the dequeue count.
1384  * The function may return a fixed number or the number parsed from the user
1385  * data stored in the first processed operation.
1386  *
1387  * @param	user_data	Dequeued user data.
1388  * @return
1389  *  - The number of operations to be dequeued.
1390  **/
1391 typedef uint32_t (*rte_cryptodev_raw_get_dequeue_count_t)(void *user_data);
1392 
1393 /**
1394  * Typedef that the user provided to deal with post dequeue operation, such
1395  * as filling status.
1396  *
1397  * @param	user_data	Dequeued user data.
1398  * @param	index		Index number of the processed descriptor.
1399  * @param	is_op_success	Operation status provided by the driver.
1400  **/
1401 typedef void (*rte_cryptodev_raw_post_dequeue_t)(void *user_data,
1402 	uint32_t index, uint8_t is_op_success);
1403 
1404 /**
1405  * Dequeue a burst of symmetric crypto processing.
1406  *
1407  * @param	qp			Driver specific queue pair data.
1408  * @param	drv_ctx			Driver specific context data.
1409  * @param	get_dequeue_count	User provided callback function to
1410  *					obtain dequeue operation count.
1411  * @param	max_nb_to_dequeue	When get_dequeue_count is NULL this
1412  *					value is used to pass the maximum
1413  *					number of operations to be dequeued.
1414  * @param	post_dequeue		User provided callback function to
1415  *					post-process a dequeued operation.
1416  * @param	out_user_data		User data pointer array to be retrieve
1417  *					from device queue. In case of
1418  *					*is_user_data_array* is set there
1419  *					should be enough room to store all
1420  *					user data.
1421  * @param	is_user_data_array	Set 1 if every dequeued user data will
1422  *					be written into out_user_data array.
1423  *					Set 0 if only the first user data will
1424  *					be written into out_user_data array.
1425  * @param	n_success		Driver written value to specific the
1426  *					total successful operations count.
1427  * @param	dequeue_status		Driver written value to specify the
1428  *					dequeue status. Possible values:
1429  *					- 1: Successfully dequeued the number
1430  *					     of operations returned. The user
1431  *					     data previously set during enqueue
1432  *					     is stored in the "out_user_data".
1433  *					- 0: The number of operations returned
1434  *					     are completed and the user data is
1435  *					     stored in the "out_user_data", but
1436  *					     they are not freed from the queue
1437  *					     until
1438  *					     rte_cryptodev_raw_dequeue_done()
1439  *					     is called.
1440  *					- negative integer: Error occurred.
1441  * @return
1442  *   - The number of operations dequeued or completed but not freed from the
1443  *     queue, depends on "dequeue_status" value.
1444  */
1445 typedef uint32_t (*cryptodev_sym_raw_dequeue_burst_t)(void *qp,
1446 	uint8_t *drv_ctx,
1447 	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
1448 	uint32_t max_nb_to_dequeue,
1449 	rte_cryptodev_raw_post_dequeue_t post_dequeue,
1450 	void **out_user_data, uint8_t is_user_data_array,
1451 	uint32_t *n_success, int *dequeue_status);
1452 
1453 /**
1454  * Dequeue a symmetric crypto processing.
1455  *
1456  * @param	qp			Driver specific queue pair data.
1457  * @param	drv_ctx			Driver specific context data.
1458  * @param	dequeue_status		Driver written value to specify the
1459  *					dequeue status. Possible values:
1460  *					- 1: Successfully dequeued a operation.
1461  *					     The user data is returned.
1462  *					- 0: The first operation in the queue
1463  *					     is completed and the user data
1464  *					     previously set during enqueue is
1465  *					     returned, but it is not freed from
1466  *					     the queue until
1467  *					     rte_cryptodev_raw_dequeue_done() is
1468  *					     called.
1469  *					- negative integer: Error occurred.
1470  * @param	op_status		Driver written value to specify
1471  *					operation status.
1472  * @return
1473  *   - The user data pointer retrieved from device queue or NULL if no
1474  *     operation is ready for dequeue.
1475  */
1476 typedef void * (*cryptodev_sym_raw_dequeue_t)(
1477 		void *qp, uint8_t *drv_ctx, int *dequeue_status,
1478 		enum rte_crypto_op_status *op_status);
1479 
1480 /**
1481  * Context data for raw data-path API crypto process. The buffer of this
1482  * structure is to be allocated by the user application with the size equal
1483  * or bigger than rte_cryptodev_get_raw_dp_ctx_size() returned value.
1484  */
1485 struct rte_crypto_raw_dp_ctx {
1486 	void *qp_data;
1487 
1488 	cryptodev_sym_raw_enqueue_t enqueue;
1489 	cryptodev_sym_raw_enqueue_burst_t enqueue_burst;
1490 	cryptodev_sym_raw_operation_done_t enqueue_done;
1491 	cryptodev_sym_raw_dequeue_t dequeue;
1492 	cryptodev_sym_raw_dequeue_burst_t dequeue_burst;
1493 	cryptodev_sym_raw_operation_done_t dequeue_done;
1494 
1495 	/* Driver specific context data */
1496 	__extension__ uint8_t drv_ctx_data[];
1497 };
1498 
1499 /**
1500  * Configure raw data-path context data.
1501  *
1502  * NOTE:
1503  * After the context data is configured, the user should call
1504  * rte_cryptodev_raw_attach_session() before using it in
1505  * rte_cryptodev_raw_enqueue/dequeue function call.
1506  *
1507  * @param	dev_id		The device identifier.
1508  * @param	qp_id		The index of the queue pair from which to
1509  *				retrieve processed packets. The value must be
1510  *				in the range [0, nb_queue_pair - 1] previously
1511  *				supplied to rte_cryptodev_configure().
1512  * @param	ctx		The raw data-path context data.
1513  * @param	sess_type	session type.
1514  * @param	session_ctx	Session context data.
1515  * @param	is_update	Set 0 if it is to initialize the ctx.
1516  *				Set 1 if ctx is initialized and only to update
1517  *				session context data.
1518  * @return
1519  *   - On success return 0.
1520  *   - On failure return negative integer.
1521  */
1522 __rte_experimental
1523 int
1524 rte_cryptodev_configure_raw_dp_ctx(uint8_t dev_id, uint16_t qp_id,
1525 	struct rte_crypto_raw_dp_ctx *ctx,
1526 	enum rte_crypto_op_sess_type sess_type,
1527 	union rte_cryptodev_session_ctx session_ctx,
1528 	uint8_t is_update);
1529 
1530 /**
1531  * Enqueue a vectorized operation descriptor into the device queue but the
1532  * driver may or may not start processing until rte_cryptodev_raw_enqueue_done()
1533  * is called.
1534  *
1535  * @param	ctx		The initialized raw data-path context data.
1536  * @param	vec		Vectorized operation descriptor.
1537  * @param	ofs		Start and stop offsets for auth and cipher
1538  *				operations.
1539  * @param	user_data	The array of user data for dequeue later.
1540  * @param	enqueue_status	Driver written value to specify the
1541  *				enqueue status. Possible values:
1542  *				- 1: The number of operations returned are
1543  *				     enqueued successfully.
1544  *				- 0: The number of operations returned are
1545  *				     cached into the queue but are not processed
1546  *				     until rte_cryptodev_raw_enqueue_done() is
1547  *				     called.
1548  *				- negative integer: Error occurred.
1549  * @return
1550  *   - The number of operations in the descriptor successfully enqueued or
1551  *     cached into the queue but not enqueued yet, depends on the
1552  *     "enqueue_status" value.
1553  */
1554 __rte_experimental
1555 uint32_t
1556 rte_cryptodev_raw_enqueue_burst(struct rte_crypto_raw_dp_ctx *ctx,
1557 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
1558 	void **user_data, int *enqueue_status);
1559 
1560 /**
1561  * Enqueue single raw data vector into the device queue but the driver may or
1562  * may not start processing until rte_cryptodev_raw_enqueue_done() is called.
1563  *
1564  * @param	ctx		The initialized raw data-path context data.
1565  * @param	data_vec	The buffer data vector.
1566  * @param	n_data_vecs	Number of buffer data vectors.
1567  * @param	ofs		Start and stop offsets for auth and cipher
1568  *				operations.
1569  * @param	iv		IV virtual and IOVA addresses
1570  * @param	digest		digest virtual and IOVA addresses
1571  * @param	aad_or_auth_iv	AAD or auth IV virtual and IOVA addresses,
1572  *				depends on the algorithm used.
1573  * @param	user_data	The user data.
1574  * @return
1575  *   - 1: The data vector is enqueued successfully.
1576  *   - 0: The data vector is cached into the queue but is not processed
1577  *        until rte_cryptodev_raw_enqueue_done() is called.
1578  *   - negative integer: failure.
1579  */
1580 __rte_experimental
1581 static __rte_always_inline int
1582 rte_cryptodev_raw_enqueue(struct rte_crypto_raw_dp_ctx *ctx,
1583 	struct rte_crypto_vec *data_vec, uint16_t n_data_vecs,
1584 	union rte_crypto_sym_ofs ofs,
1585 	struct rte_crypto_va_iova_ptr *iv,
1586 	struct rte_crypto_va_iova_ptr *digest,
1587 	struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
1588 	void *user_data)
1589 {
1590 	return (*ctx->enqueue)(ctx->qp_data, ctx->drv_ctx_data, data_vec,
1591 		n_data_vecs, ofs, iv, digest, aad_or_auth_iv, user_data);
1592 }
1593 
1594 /**
1595  * Start processing all enqueued operations from last
1596  * rte_cryptodev_configure_raw_dp_ctx() call.
1597  *
1598  * @param	ctx	The initialized raw data-path context data.
1599  * @param	n	The number of operations cached.
1600  * @return
1601  *   - On success return 0.
1602  *   - On failure return negative integer.
1603  */
1604 __rte_experimental
1605 int
1606 rte_cryptodev_raw_enqueue_done(struct rte_crypto_raw_dp_ctx *ctx,
1607 		uint32_t n);
1608 
1609 /**
1610  * Dequeue a burst of symmetric crypto processing.
1611  *
1612  * @param	ctx			The initialized raw data-path context
1613  *					data.
1614  * @param	get_dequeue_count	User provided callback function to
1615  *					obtain dequeue operation count.
1616  * @param	max_nb_to_dequeue	When get_dequeue_count is NULL this
1617  *					value is used to pass the maximum
1618  *					number of operations to be dequeued.
1619  * @param	post_dequeue		User provided callback function to
1620  *					post-process a dequeued operation.
1621  * @param	out_user_data		User data pointer array to be retrieve
1622  *					from device queue. In case of
1623  *					*is_user_data_array* is set there
1624  *					should be enough room to store all
1625  *					user data.
1626  * @param	is_user_data_array	Set 1 if every dequeued user data will
1627  *					be written into out_user_data array.
1628  *					Set 0 if only the first user data will
1629  *					be written into out_user_data array.
1630  * @param	n_success		Driver written value to specific the
1631  *					total successful operations count.
1632  * @param	dequeue_status		Driver written value to specify the
1633  *					dequeue status. Possible values:
1634  *					- 1: Successfully dequeued the number
1635  *					     of operations returned. The user
1636  *					     data previously set during enqueue
1637  *					     is stored in the "out_user_data".
1638  *					- 0: The number of operations returned
1639  *					     are completed and the user data is
1640  *					     stored in the "out_user_data", but
1641  *					     they are not freed from the queue
1642  *					     until
1643  *					     rte_cryptodev_raw_dequeue_done()
1644  *					     is called.
1645  *					- negative integer: Error occurred.
1646  * @return
1647  *   - The number of operations dequeued or completed but not freed from the
1648  *     queue, depends on "dequeue_status" value.
1649  */
1650 __rte_experimental
1651 uint32_t
1652 rte_cryptodev_raw_dequeue_burst(struct rte_crypto_raw_dp_ctx *ctx,
1653 	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
1654 	uint32_t max_nb_to_dequeue,
1655 	rte_cryptodev_raw_post_dequeue_t post_dequeue,
1656 	void **out_user_data, uint8_t is_user_data_array,
1657 	uint32_t *n_success, int *dequeue_status);
1658 
1659 /**
1660  * Dequeue a symmetric crypto processing.
1661  *
1662  * @param	ctx			The initialized raw data-path context
1663  *					data.
1664  * @param	dequeue_status		Driver written value to specify the
1665  *					dequeue status. Possible values:
1666  *					- 1: Successfully dequeued a operation.
1667  *					     The user data is returned.
1668  *					- 0: The first operation in the queue
1669  *					     is completed and the user data
1670  *					     previously set during enqueue is
1671  *					     returned, but it is not freed from
1672  *					     the queue until
1673  *					     rte_cryptodev_raw_dequeue_done() is
1674  *					     called.
1675  *					- negative integer: Error occurred.
1676  * @param	op_status		Driver written value to specify
1677  *					operation status.
1678  * @return
1679  *   - The user data pointer retrieved from device queue or NULL if no
1680  *     operation is ready for dequeue.
1681  */
1682 __rte_experimental
1683 static __rte_always_inline void *
1684 rte_cryptodev_raw_dequeue(struct rte_crypto_raw_dp_ctx *ctx,
1685 		int *dequeue_status, enum rte_crypto_op_status *op_status)
1686 {
1687 	return (*ctx->dequeue)(ctx->qp_data, ctx->drv_ctx_data, dequeue_status,
1688 			op_status);
1689 }
1690 
1691 /**
1692  * Inform the queue pair dequeue operations is finished.
1693  *
1694  * @param	ctx	The initialized raw data-path context data.
1695  * @param	n	The number of operations.
1696  * @return
1697  *   - On success return 0.
1698  *   - On failure return negative integer.
1699  */
1700 __rte_experimental
1701 int
1702 rte_cryptodev_raw_dequeue_done(struct rte_crypto_raw_dp_ctx *ctx,
1703 		uint32_t n);
1704 
1705 /**
1706  * Add a user callback for a given crypto device and queue pair which will be
1707  * called on crypto ops enqueue.
1708  *
1709  * This API configures a function to be called for each burst of crypto ops
1710  * received on a given crypto device queue pair. The return value is a pointer
1711  * that can be used later to remove the callback using
1712  * rte_cryptodev_remove_enq_callback().
1713  *
1714  * Callbacks registered by application would not survive
1715  * rte_cryptodev_configure() as it reinitializes the callback list.
1716  * It is user responsibility to remove all installed callbacks before
1717  * calling rte_cryptodev_configure() to avoid possible memory leakage.
1718  * Application is expected to call add API after rte_cryptodev_configure().
1719  *
1720  * Multiple functions can be registered per queue pair & they are called
1721  * in the order they were added. The API does not restrict on maximum number
1722  * of callbacks.
1723  *
1724  * @param	dev_id		The identifier of the device.
1725  * @param	qp_id		The index of the queue pair on which ops are
1726  *				to be enqueued for processing. The value
1727  *				must be in the range [0, nb_queue_pairs - 1]
1728  *				previously supplied to
1729  *				*rte_cryptodev_configure*.
1730  * @param	cb_fn		The callback function
1731  * @param	cb_arg		A generic pointer parameter which will be passed
1732  *				to each invocation of the callback function on
1733  *				this crypto device and queue pair.
1734  *
1735  * @return
1736  *  - NULL on error & rte_errno will contain the error code.
1737  *  - On success, a pointer value which can later be used to remove the
1738  *    callback.
1739  */
1740 
1741 __rte_experimental
1742 struct rte_cryptodev_cb *
1743 rte_cryptodev_add_enq_callback(uint8_t dev_id,
1744 			       uint16_t qp_id,
1745 			       rte_cryptodev_callback_fn cb_fn,
1746 			       void *cb_arg);
1747 
1748 /**
1749  * Remove a user callback function for given crypto device and queue pair.
1750  *
1751  * This function is used to remove enqueue callbacks that were added to a
1752  * crypto device queue pair using rte_cryptodev_add_enq_callback().
1753  *
1754  *
1755  *
1756  * @param	dev_id		The identifier of the device.
1757  * @param	qp_id		The index of the queue pair on which ops are
1758  *				to be enqueued. The value must be in the
1759  *				range [0, nb_queue_pairs - 1] previously
1760  *				supplied to *rte_cryptodev_configure*.
1761  * @param	cb		Pointer to user supplied callback created via
1762  *				rte_cryptodev_add_enq_callback().
1763  *
1764  * @return
1765  *   -  0: Success. Callback was removed.
1766  *   - <0: The dev_id or the qp_id is out of range, or the callback
1767  *         is NULL or not found for the crypto device queue pair.
1768  */
1769 
1770 __rte_experimental
1771 int rte_cryptodev_remove_enq_callback(uint8_t dev_id,
1772 				      uint16_t qp_id,
1773 				      struct rte_cryptodev_cb *cb);
1774 
1775 /**
1776  * Add a user callback for a given crypto device and queue pair which will be
1777  * called on crypto ops dequeue.
1778  *
1779  * This API configures a function to be called for each burst of crypto ops
1780  * received on a given crypto device queue pair. The return value is a pointer
1781  * that can be used later to remove the callback using
1782  * rte_cryptodev_remove_deq_callback().
1783  *
1784  * Callbacks registered by application would not survive
1785  * rte_cryptodev_configure() as it reinitializes the callback list.
1786  * It is user responsibility to remove all installed callbacks before
1787  * calling rte_cryptodev_configure() to avoid possible memory leakage.
1788  * Application is expected to call add API after rte_cryptodev_configure().
1789  *
1790  * Multiple functions can be registered per queue pair & they are called
1791  * in the order they were added. The API does not restrict on maximum number
1792  * of callbacks.
1793  *
1794  * @param	dev_id		The identifier of the device.
1795  * @param	qp_id		The index of the queue pair on which ops are
1796  *				to be dequeued. The value must be in the
1797  *				range [0, nb_queue_pairs - 1] previously
1798  *				supplied to *rte_cryptodev_configure*.
1799  * @param	cb_fn		The callback function
1800  * @param	cb_arg		A generic pointer parameter which will be passed
1801  *				to each invocation of the callback function on
1802  *				this crypto device and queue pair.
1803  *
1804  * @return
1805  *   - NULL on error & rte_errno will contain the error code.
1806  *   - On success, a pointer value which can later be used to remove the
1807  *     callback.
1808  */
1809 
1810 __rte_experimental
1811 struct rte_cryptodev_cb *
1812 rte_cryptodev_add_deq_callback(uint8_t dev_id,
1813 			       uint16_t qp_id,
1814 			       rte_cryptodev_callback_fn cb_fn,
1815 			       void *cb_arg);
1816 
1817 /**
1818  * Remove a user callback function for given crypto device and queue pair.
1819  *
1820  * This function is used to remove dequeue callbacks that were added to a
1821  * crypto device queue pair using rte_cryptodev_add_deq_callback().
1822  *
1823  *
1824  *
1825  * @param	dev_id		The identifier of the device.
1826  * @param	qp_id		The index of the queue pair on which ops are
1827  *				to be dequeued. The value must be in the
1828  *				range [0, nb_queue_pairs - 1] previously
1829  *				supplied to *rte_cryptodev_configure*.
1830  * @param	cb		Pointer to user supplied callback created via
1831  *				rte_cryptodev_add_deq_callback().
1832  *
1833  * @return
1834  *   -  0: Success. Callback was removed.
1835  *   - <0: The dev_id or the qp_id is out of range, or the callback
1836  *         is NULL or not found for the crypto device queue pair.
1837  */
1838 __rte_experimental
1839 int rte_cryptodev_remove_deq_callback(uint8_t dev_id,
1840 				      uint16_t qp_id,
1841 				      struct rte_cryptodev_cb *cb);
1842 
1843 #include <rte_cryptodev_core.h>
1844 /**
1845  *
1846  * Dequeue a burst of processed crypto operations from a queue on the crypto
1847  * device. The dequeued operation are stored in *rte_crypto_op* structures
1848  * whose pointers are supplied in the *ops* array.
1849  *
1850  * The rte_cryptodev_dequeue_burst() function returns the number of ops
1851  * actually dequeued, which is the number of *rte_crypto_op* data structures
1852  * effectively supplied into the *ops* array.
1853  *
1854  * A return value equal to *nb_ops* indicates that the queue contained
1855  * at least *nb_ops* operations, and this is likely to signify that other
1856  * processed operations remain in the devices output queue. Applications
1857  * implementing a "retrieve as many processed operations as possible" policy
1858  * can check this specific case and keep invoking the
1859  * rte_cryptodev_dequeue_burst() function until a value less than
1860  * *nb_ops* is returned.
1861  *
1862  * The rte_cryptodev_dequeue_burst() function does not provide any error
1863  * notification to avoid the corresponding overhead.
1864  *
1865  * @param	dev_id		The symmetric crypto device identifier
1866  * @param	qp_id		The index of the queue pair from which to
1867  *				retrieve processed packets. The value must be
1868  *				in the range [0, nb_queue_pair - 1] previously
1869  *				supplied to rte_cryptodev_configure().
1870  * @param	ops		The address of an array of pointers to
1871  *				*rte_crypto_op* structures that must be
1872  *				large enough to store *nb_ops* pointers in it.
1873  * @param	nb_ops		The maximum number of operations to dequeue.
1874  *
1875  * @return
1876  *   - The number of operations actually dequeued, which is the number
1877  *   of pointers to *rte_crypto_op* structures effectively supplied to the
1878  *   *ops* array.
1879  */
1880 static inline uint16_t
1881 rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
1882 		struct rte_crypto_op **ops, uint16_t nb_ops)
1883 {
1884 	const struct rte_crypto_fp_ops *fp_ops;
1885 	void *qp;
1886 
1887 	rte_cryptodev_trace_dequeue_burst(dev_id, qp_id, (void **)ops, nb_ops);
1888 
1889 	fp_ops = &rte_crypto_fp_ops[dev_id];
1890 	qp = fp_ops->qp.data[qp_id];
1891 
1892 	nb_ops = fp_ops->dequeue_burst(qp, ops, nb_ops);
1893 
1894 #ifdef RTE_CRYPTO_CALLBACKS
1895 	if (unlikely(fp_ops->qp.deq_cb != NULL)) {
1896 		struct rte_cryptodev_cb_rcu *list;
1897 		struct rte_cryptodev_cb *cb;
1898 
1899 		/* __ATOMIC_RELEASE memory order was used when the
1900 		 * call back was inserted into the list.
1901 		 * Since there is a clear dependency between loading
1902 		 * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
1903 		 * not required.
1904 		 */
1905 		list = &fp_ops->qp.deq_cb[qp_id];
1906 		rte_rcu_qsbr_thread_online(list->qsbr, 0);
1907 		cb = __atomic_load_n(&list->next, __ATOMIC_RELAXED);
1908 
1909 		while (cb != NULL) {
1910 			nb_ops = cb->fn(dev_id, qp_id, ops, nb_ops,
1911 					cb->arg);
1912 			cb = cb->next;
1913 		};
1914 
1915 		rte_rcu_qsbr_thread_offline(list->qsbr, 0);
1916 	}
1917 #endif
1918 	return nb_ops;
1919 }
1920 
1921 /**
1922  * Enqueue a burst of operations for processing on a crypto device.
1923  *
1924  * The rte_cryptodev_enqueue_burst() function is invoked to place
1925  * crypto operations on the queue *qp_id* of the device designated by
1926  * its *dev_id*.
1927  *
1928  * The *nb_ops* parameter is the number of operations to process which are
1929  * supplied in the *ops* array of *rte_crypto_op* structures.
1930  *
1931  * The rte_cryptodev_enqueue_burst() function returns the number of
1932  * operations it actually enqueued for processing. A return value equal to
1933  * *nb_ops* means that all packets have been enqueued.
1934  *
1935  * @param	dev_id		The identifier of the device.
1936  * @param	qp_id		The index of the queue pair which packets are
1937  *				to be enqueued for processing. The value
1938  *				must be in the range [0, nb_queue_pairs - 1]
1939  *				previously supplied to
1940  *				 *rte_cryptodev_configure*.
1941  * @param	ops		The address of an array of *nb_ops* pointers
1942  *				to *rte_crypto_op* structures which contain
1943  *				the crypto operations to be processed.
1944  * @param	nb_ops		The number of operations to process.
1945  *
1946  * @return
1947  * The number of operations actually enqueued on the crypto device. The return
1948  * value can be less than the value of the *nb_ops* parameter when the
1949  * crypto devices queue is full or if invalid parameters are specified in
1950  * a *rte_crypto_op*.
1951  */
1952 static inline uint16_t
1953 rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
1954 		struct rte_crypto_op **ops, uint16_t nb_ops)
1955 {
1956 	const struct rte_crypto_fp_ops *fp_ops;
1957 	void *qp;
1958 
1959 	fp_ops = &rte_crypto_fp_ops[dev_id];
1960 	qp = fp_ops->qp.data[qp_id];
1961 #ifdef RTE_CRYPTO_CALLBACKS
1962 	if (unlikely(fp_ops->qp.enq_cb != NULL)) {
1963 		struct rte_cryptodev_cb_rcu *list;
1964 		struct rte_cryptodev_cb *cb;
1965 
1966 		/* __ATOMIC_RELEASE memory order was used when the
1967 		 * call back was inserted into the list.
1968 		 * Since there is a clear dependency between loading
1969 		 * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
1970 		 * not required.
1971 		 */
1972 		list = &fp_ops->qp.enq_cb[qp_id];
1973 		rte_rcu_qsbr_thread_online(list->qsbr, 0);
1974 		cb = __atomic_load_n(&list->next, __ATOMIC_RELAXED);
1975 
1976 		while (cb != NULL) {
1977 			nb_ops = cb->fn(dev_id, qp_id, ops, nb_ops,
1978 					cb->arg);
1979 			cb = cb->next;
1980 		};
1981 
1982 		rte_rcu_qsbr_thread_offline(list->qsbr, 0);
1983 	}
1984 #endif
1985 
1986 	rte_cryptodev_trace_enqueue_burst(dev_id, qp_id, (void **)ops, nb_ops);
1987 	return fp_ops->enqueue_burst(qp, ops, nb_ops);
1988 }
1989 
1990 
1991 
1992 #ifdef __cplusplus
1993 }
1994 #endif
1995 
1996 #endif /* _RTE_CRYPTODEV_H_ */
1997