xref: /dpdk/lib/cryptodev/rte_cryptodev.h (revision fb360c75062d71014c1bba90db64f493fb0ae9e2)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020 Intel Corporation.
3  */
4 
5 #ifndef _RTE_CRYPTODEV_H_
6 #define _RTE_CRYPTODEV_H_
7 
8 /**
9  * @file rte_cryptodev.h
10  *
11  * RTE Cryptographic Device APIs
12  *
13  * Defines RTE Crypto Device APIs for the provisioning of cipher and
14  * authentication operations.
15  */
16 
17 #ifdef __cplusplus
18 extern "C" {
19 #endif
20 
21 #include <rte_compat.h>
22 #include "rte_kvargs.h"
23 #include "rte_crypto.h"
24 #include <rte_common.h>
25 #include <rte_rcu_qsbr.h>
26 
27 #include "rte_cryptodev_trace_fp.h"
28 
29 extern const char **rte_cyptodev_names;
30 
31 /* Logging Macros */
32 
33 #define CDEV_LOG_ERR(...) \
34 	RTE_LOG(ERR, CRYPTODEV, \
35 		RTE_FMT("%s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
36 			__func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,)))
37 
38 #define CDEV_LOG_INFO(...) \
39 	RTE_LOG(INFO, CRYPTODEV, \
40 		RTE_FMT(RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
41 			RTE_FMT_TAIL(__VA_ARGS__,)))
42 
43 #define CDEV_LOG_DEBUG(...) \
44 	RTE_LOG(DEBUG, CRYPTODEV, \
45 		RTE_FMT("%s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
46 			__func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,)))
47 
48 #define CDEV_PMD_TRACE(...) \
49 	RTE_LOG(DEBUG, CRYPTODEV, \
50 		RTE_FMT("[%s] %s: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
51 			dev, __func__, RTE_FMT_TAIL(__VA_ARGS__,)))
52 
53 /**
54  * A macro that points to an offset from the start
55  * of the crypto operation structure (rte_crypto_op)
56  *
57  * The returned pointer is cast to type t.
58  *
59  * @param c
60  *   The crypto operation.
61  * @param o
62  *   The offset from the start of the crypto operation.
63  * @param t
64  *   The type to cast the result into.
65  */
66 #define rte_crypto_op_ctod_offset(c, t, o)	\
67 	((t)((char *)(c) + (o)))
68 
69 /**
70  * A macro that returns the physical address that points
71  * to an offset from the start of the crypto operation
72  * (rte_crypto_op)
73  *
74  * @param c
75  *   The crypto operation.
76  * @param o
77  *   The offset from the start of the crypto operation
78  *   to calculate address from.
79  */
80 #define rte_crypto_op_ctophys_offset(c, o)	\
81 	(rte_iova_t)((c)->phys_addr + (o))
82 
83 /**
84  * Crypto parameters range description
85  */
86 struct rte_crypto_param_range {
87 	uint16_t min;	/**< minimum size */
88 	uint16_t max;	/**< maximum size */
89 	uint16_t increment;
90 	/**< if a range of sizes are supported,
91 	 * this parameter is used to indicate
92 	 * increments in byte size that are supported
93 	 * between the minimum and maximum
94 	 */
95 };
96 
97 /**
98  * Data-unit supported lengths of cipher algorithms.
99  * A bit can represent any set of data-unit sizes
100  * (single size, multiple size, range, etc).
101  */
102 #define RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_512_BYTES             RTE_BIT32(0)
103 #define RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_4096_BYTES            RTE_BIT32(1)
104 #define RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_1_MEGABYTES           RTE_BIT32(2)
105 
106 /**
107  * Symmetric Crypto Capability
108  */
109 struct rte_cryptodev_symmetric_capability {
110 	enum rte_crypto_sym_xform_type xform_type;
111 	/**< Transform type : Authentication / Cipher / AEAD */
112 	RTE_STD_C11
113 	union {
114 		struct {
115 			enum rte_crypto_auth_algorithm algo;
116 			/**< authentication algorithm */
117 			uint16_t block_size;
118 			/**< algorithm block size */
119 			struct rte_crypto_param_range key_size;
120 			/**< auth key size range */
121 			struct rte_crypto_param_range digest_size;
122 			/**< digest size range */
123 			struct rte_crypto_param_range aad_size;
124 			/**< Additional authentication data size range */
125 			struct rte_crypto_param_range iv_size;
126 			/**< Initialisation vector data size range */
127 		} auth;
128 		/**< Symmetric Authentication transform capabilities */
129 		struct {
130 			enum rte_crypto_cipher_algorithm algo;
131 			/**< cipher algorithm */
132 			uint16_t block_size;
133 			/**< algorithm block size */
134 			struct rte_crypto_param_range key_size;
135 			/**< cipher key size range */
136 			struct rte_crypto_param_range iv_size;
137 			/**< Initialisation vector data size range */
138 			uint32_t dataunit_set;
139 			/**<
140 			 * Supported data-unit lengths:
141 			 * RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_* bits
142 			 * or 0 for lengths defined in the algorithm standard.
143 			 */
144 		} cipher;
145 		/**< Symmetric Cipher transform capabilities */
146 		struct {
147 			enum rte_crypto_aead_algorithm algo;
148 			/**< AEAD algorithm */
149 			uint16_t block_size;
150 			/**< algorithm block size */
151 			struct rte_crypto_param_range key_size;
152 			/**< AEAD key size range */
153 			struct rte_crypto_param_range digest_size;
154 			/**< digest size range */
155 			struct rte_crypto_param_range aad_size;
156 			/**< Additional authentication data size range */
157 			struct rte_crypto_param_range iv_size;
158 			/**< Initialisation vector data size range */
159 		} aead;
160 	};
161 };
162 
163 /**
164  * Asymmetric Xform Crypto Capability
165  *
166  */
167 struct rte_cryptodev_asymmetric_xform_capability {
168 	enum rte_crypto_asym_xform_type xform_type;
169 	/**< Transform type: RSA/MODEXP/DH/DSA/MODINV */
170 
171 	uint32_t op_types;
172 	/**<
173 	 * Bitmask for supported rte_crypto_asym_op_type or
174 	 * rte_crypto_asym_ke_type. Which enum is used is determined
175 	 * by the rte_crypto_asym_xform_type. For key exchange algorithms
176 	 * like Diffie-Hellman it is rte_crypto_asym_ke_type, for others
177 	 * it is rte_crypto_asym_op_type.
178 	 */
179 
180 	__extension__
181 	union {
182 		struct rte_crypto_param_range modlen;
183 		/**< Range of modulus length supported by modulus based xform.
184 		 * Value 0 mean implementation default
185 		 */
186 	};
187 };
188 
189 /**
190  * Asymmetric Crypto Capability
191  *
192  */
193 struct rte_cryptodev_asymmetric_capability {
194 	struct rte_cryptodev_asymmetric_xform_capability xform_capa;
195 };
196 
197 
198 /** Structure used to capture a capability of a crypto device */
199 struct rte_cryptodev_capabilities {
200 	enum rte_crypto_op_type op;
201 	/**< Operation type */
202 
203 	RTE_STD_C11
204 	union {
205 		struct rte_cryptodev_symmetric_capability sym;
206 		/**< Symmetric operation capability parameters */
207 		struct rte_cryptodev_asymmetric_capability asym;
208 		/**< Asymmetric operation capability parameters */
209 	};
210 };
211 
212 /** Structure used to describe crypto algorithms */
213 struct rte_cryptodev_sym_capability_idx {
214 	enum rte_crypto_sym_xform_type type;
215 	union {
216 		enum rte_crypto_cipher_algorithm cipher;
217 		enum rte_crypto_auth_algorithm auth;
218 		enum rte_crypto_aead_algorithm aead;
219 	} algo;
220 };
221 
222 /**
223  * Structure used to describe asymmetric crypto xforms
224  * Each xform maps to one asym algorithm.
225  *
226  */
227 struct rte_cryptodev_asym_capability_idx {
228 	enum rte_crypto_asym_xform_type type;
229 	/**< Asymmetric xform (algo) type */
230 };
231 
232 /**
233  * Provide capabilities available for defined device and algorithm
234  *
235  * @param	dev_id		The identifier of the device.
236  * @param	idx		Description of crypto algorithms.
237  *
238  * @return
239  *   - Return description of the symmetric crypto capability if exist.
240  *   - Return NULL if the capability not exist.
241  */
242 const struct rte_cryptodev_symmetric_capability *
243 rte_cryptodev_sym_capability_get(uint8_t dev_id,
244 		const struct rte_cryptodev_sym_capability_idx *idx);
245 
246 /**
247  *  Provide capabilities available for defined device and xform
248  *
249  * @param	dev_id		The identifier of the device.
250  * @param	idx		Description of asym crypto xform.
251  *
252  * @return
253  *   - Return description of the asymmetric crypto capability if exist.
254  *   - Return NULL if the capability not exist.
255  */
256 __rte_experimental
257 const struct rte_cryptodev_asymmetric_xform_capability *
258 rte_cryptodev_asym_capability_get(uint8_t dev_id,
259 		const struct rte_cryptodev_asym_capability_idx *idx);
260 
261 /**
262  * Check if key size and initial vector are supported
263  * in crypto cipher capability
264  *
265  * @param	capability	Description of the symmetric crypto capability.
266  * @param	key_size	Cipher key size.
267  * @param	iv_size		Cipher initial vector size.
268  *
269  * @return
270  *   - Return 0 if the parameters are in range of the capability.
271  *   - Return -1 if the parameters are out of range of the capability.
272  */
273 int
274 rte_cryptodev_sym_capability_check_cipher(
275 		const struct rte_cryptodev_symmetric_capability *capability,
276 		uint16_t key_size, uint16_t iv_size);
277 
278 /**
279  * Check if key size and initial vector are supported
280  * in crypto auth capability
281  *
282  * @param	capability	Description of the symmetric crypto capability.
283  * @param	key_size	Auth key size.
284  * @param	digest_size	Auth digest size.
285  * @param	iv_size		Auth initial vector size.
286  *
287  * @return
288  *   - Return 0 if the parameters are in range of the capability.
289  *   - Return -1 if the parameters are out of range of the capability.
290  */
291 int
292 rte_cryptodev_sym_capability_check_auth(
293 		const struct rte_cryptodev_symmetric_capability *capability,
294 		uint16_t key_size, uint16_t digest_size, uint16_t iv_size);
295 
296 /**
297  * Check if key, digest, AAD and initial vector sizes are supported
298  * in crypto AEAD capability
299  *
300  * @param	capability	Description of the symmetric crypto capability.
301  * @param	key_size	AEAD key size.
302  * @param	digest_size	AEAD digest size.
303  * @param	aad_size	AEAD AAD size.
304  * @param	iv_size		AEAD IV size.
305  *
306  * @return
307  *   - Return 0 if the parameters are in range of the capability.
308  *   - Return -1 if the parameters are out of range of the capability.
309  */
310 int
311 rte_cryptodev_sym_capability_check_aead(
312 		const struct rte_cryptodev_symmetric_capability *capability,
313 		uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
314 		uint16_t iv_size);
315 
316 /**
317  * Check if op type is supported
318  *
319  * @param	capability	Description of the asymmetric crypto capability.
320  * @param	op_type		op type
321  *
322  * @return
323  *   - Return 1 if the op type is supported
324  *   - Return 0 if unsupported
325  */
326 __rte_experimental
327 int
328 rte_cryptodev_asym_xform_capability_check_optype(
329 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
330 		enum rte_crypto_asym_op_type op_type);
331 
332 /**
333  * Check if modulus length is in supported range
334  *
335  * @param	capability	Description of the asymmetric crypto capability.
336  * @param	modlen		modulus length.
337  *
338  * @return
339  *   - Return 0 if the parameters are in range of the capability.
340  *   - Return -1 if the parameters are out of range of the capability.
341  */
342 __rte_experimental
343 int
344 rte_cryptodev_asym_xform_capability_check_modlen(
345 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
346 		uint16_t modlen);
347 
348 /**
349  * Provide the cipher algorithm enum, given an algorithm string
350  *
351  * @param	algo_enum	A pointer to the cipher algorithm
352  *				enum to be filled
353  * @param	algo_string	Authentication algo string
354  *
355  * @return
356  * - Return -1 if string is not valid
357  * - Return 0 is the string is valid
358  */
359 int
360 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum,
361 		const char *algo_string);
362 
363 /**
364  * Provide the authentication algorithm enum, given an algorithm string
365  *
366  * @param	algo_enum	A pointer to the authentication algorithm
367  *				enum to be filled
368  * @param	algo_string	Authentication algo string
369  *
370  * @return
371  * - Return -1 if string is not valid
372  * - Return 0 is the string is valid
373  */
374 int
375 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum,
376 		const char *algo_string);
377 
378 /**
379  * Provide the AEAD algorithm enum, given an algorithm string
380  *
381  * @param	algo_enum	A pointer to the AEAD algorithm
382  *				enum to be filled
383  * @param	algo_string	AEAD algorithm string
384  *
385  * @return
386  * - Return -1 if string is not valid
387  * - Return 0 is the string is valid
388  */
389 int
390 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum,
391 		const char *algo_string);
392 
393 /**
394  * Provide the Asymmetric xform enum, given an xform string
395  *
396  * @param	xform_enum	A pointer to the xform type
397  *				enum to be filled
398  * @param	xform_string	xform string
399  *
400  * @return
401  * - Return -1 if string is not valid
402  * - Return 0 if the string is valid
403  */
404 __rte_experimental
405 int
406 rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum,
407 		const char *xform_string);
408 
409 
410 /** Macro used at end of crypto PMD list */
411 #define RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() \
412 	{ RTE_CRYPTO_OP_TYPE_UNDEFINED }
413 
414 
415 /**
416  * Crypto device supported feature flags
417  *
418  * Note:
419  * New features flags should be added to the end of the list
420  *
421  * Keep these flags synchronised with rte_cryptodev_get_feature_name()
422  */
423 #define	RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO		(1ULL << 0)
424 /**< Symmetric crypto operations are supported */
425 #define	RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO		(1ULL << 1)
426 /**< Asymmetric crypto operations are supported */
427 #define	RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING		(1ULL << 2)
428 /**< Chaining symmetric crypto operations are supported */
429 #define	RTE_CRYPTODEV_FF_CPU_SSE			(1ULL << 3)
430 /**< Utilises CPU SIMD SSE instructions */
431 #define	RTE_CRYPTODEV_FF_CPU_AVX			(1ULL << 4)
432 /**< Utilises CPU SIMD AVX instructions */
433 #define	RTE_CRYPTODEV_FF_CPU_AVX2			(1ULL << 5)
434 /**< Utilises CPU SIMD AVX2 instructions */
435 #define	RTE_CRYPTODEV_FF_CPU_AESNI			(1ULL << 6)
436 /**< Utilises CPU AES-NI instructions */
437 #define	RTE_CRYPTODEV_FF_HW_ACCELERATED			(1ULL << 7)
438 /**< Operations are off-loaded to an
439  * external hardware accelerator
440  */
441 #define	RTE_CRYPTODEV_FF_CPU_AVX512			(1ULL << 8)
442 /**< Utilises CPU SIMD AVX512 instructions */
443 #define	RTE_CRYPTODEV_FF_IN_PLACE_SGL			(1ULL << 9)
444 /**< In-place Scatter-gather (SGL) buffers, with multiple segments,
445  * are supported
446  */
447 #define RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT		(1ULL << 10)
448 /**< Out-of-place Scatter-gather (SGL) buffers are
449  * supported in input and output
450  */
451 #define RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT		(1ULL << 11)
452 /**< Out-of-place Scatter-gather (SGL) buffers are supported
453  * in input, combined with linear buffers (LB), with a
454  * single segment in output
455  */
456 #define RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT		(1ULL << 12)
457 /**< Out-of-place Scatter-gather (SGL) buffers are supported
458  * in output, combined with linear buffers (LB) in input
459  */
460 #define RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT		(1ULL << 13)
461 /**< Out-of-place linear buffers (LB) are supported in input and output */
462 #define	RTE_CRYPTODEV_FF_CPU_NEON			(1ULL << 14)
463 /**< Utilises CPU NEON instructions */
464 #define	RTE_CRYPTODEV_FF_CPU_ARM_CE			(1ULL << 15)
465 /**< Utilises ARM CPU Cryptographic Extensions */
466 #define	RTE_CRYPTODEV_FF_SECURITY			(1ULL << 16)
467 /**< Support Security Protocol Processing */
468 #define RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP		(1ULL << 17)
469 /**< Support RSA Private Key OP with exponent */
470 #define RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT		(1ULL << 18)
471 /**< Support RSA Private Key OP with CRT (quintuple) Keys */
472 #define RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED		(1ULL << 19)
473 /**< Support encrypted-digest operations where digest is appended to data */
474 #define RTE_CRYPTODEV_FF_ASYM_SESSIONLESS		(1ULL << 20)
475 /**< Support asymmetric session-less operations */
476 #define	RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO			(1ULL << 21)
477 /**< Support symmetric cpu-crypto processing */
478 #define RTE_CRYPTODEV_FF_SYM_SESSIONLESS		(1ULL << 22)
479 /**< Support symmetric session-less operations */
480 #define RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA		(1ULL << 23)
481 /**< Support operations on data which is not byte aligned */
482 #define RTE_CRYPTODEV_FF_SYM_RAW_DP			(1ULL << 24)
483 /**< Support accelerator specific symmetric raw data-path APIs */
484 #define RTE_CRYPTODEV_FF_CIPHER_MULTIPLE_DATA_UNITS	(1ULL << 25)
485 /**< Support operations on multiple data-units message */
486 #define RTE_CRYPTODEV_FF_CIPHER_WRAPPED_KEY		(1ULL << 26)
487 /**< Support wrapped key in cipher xform  */
488 #define RTE_CRYPTODEV_FF_SECURITY_INNER_CSUM		(1ULL << 27)
489 /**< Support inner checksum computation/verification */
490 
491 /**
492  * Get the name of a crypto device feature flag
493  *
494  * @param	flag	The mask describing the flag.
495  *
496  * @return
497  *   The name of this flag, or NULL if it's not a valid feature flag.
498  */
499 
500 extern const char *
501 rte_cryptodev_get_feature_name(uint64_t flag);
502 
503 /**  Crypto device information */
504 struct rte_cryptodev_info {
505 	const char *driver_name;	/**< Driver name. */
506 	uint8_t driver_id;		/**< Driver identifier */
507 	struct rte_device *device;	/**< Generic device information. */
508 
509 	uint64_t feature_flags;
510 	/**< Feature flags exposes HW/SW features for the given device */
511 
512 	const struct rte_cryptodev_capabilities *capabilities;
513 	/**< Array of devices supported capabilities */
514 
515 	unsigned max_nb_queue_pairs;
516 	/**< Maximum number of queues pairs supported by device. */
517 
518 	uint16_t min_mbuf_headroom_req;
519 	/**< Minimum mbuf headroom required by device */
520 
521 	uint16_t min_mbuf_tailroom_req;
522 	/**< Minimum mbuf tailroom required by device */
523 
524 	struct {
525 		unsigned max_nb_sessions;
526 		/**< Maximum number of sessions supported by device.
527 		 * If 0, the device does not have any limitation in
528 		 * number of sessions that can be used.
529 		 */
530 	} sym;
531 };
532 
533 #define RTE_CRYPTODEV_DETACHED  (0)
534 #define RTE_CRYPTODEV_ATTACHED  (1)
535 
536 /** Definitions of Crypto device event types */
537 enum rte_cryptodev_event_type {
538 	RTE_CRYPTODEV_EVENT_UNKNOWN,	/**< unknown event type */
539 	RTE_CRYPTODEV_EVENT_ERROR,	/**< error interrupt event */
540 	RTE_CRYPTODEV_EVENT_MAX		/**< max value of this enum */
541 };
542 
543 /** Crypto device queue pair configuration structure. */
544 struct rte_cryptodev_qp_conf {
545 	uint32_t nb_descriptors; /**< Number of descriptors per queue pair */
546 	struct rte_mempool *mp_session;
547 	/**< The mempool for creating session in sessionless mode */
548 };
549 
550 /**
551  * Function type used for processing crypto ops when enqueue/dequeue burst is
552  * called.
553  *
554  * The callback function is called on enqueue/dequeue burst immediately.
555  *
556  * @param	dev_id		The identifier of the device.
557  * @param	qp_id		The index of the queue pair on which ops are
558  *				enqueued/dequeued. The value must be in the
559  *				range [0, nb_queue_pairs - 1] previously
560  *				supplied to *rte_cryptodev_configure*.
561  * @param	ops		The address of an array of *nb_ops* pointers
562  *				to *rte_crypto_op* structures which contain
563  *				the crypto operations to be processed.
564  * @param	nb_ops		The number of operations to process.
565  * @param	user_param	The arbitrary user parameter passed in by the
566  *				application when the callback was originally
567  *				registered.
568  * @return			The number of ops to be enqueued to the
569  *				crypto device.
570  */
571 typedef uint16_t (*rte_cryptodev_callback_fn)(uint16_t dev_id, uint16_t qp_id,
572 		struct rte_crypto_op **ops, uint16_t nb_ops, void *user_param);
573 
574 /**
575  * Typedef for application callback function to be registered by application
576  * software for notification of device events
577  *
578  * @param	dev_id	Crypto device identifier
579  * @param	event	Crypto device event to register for notification of.
580  * @param	cb_arg	User specified parameter to be passed as to passed to
581  *			users callback function.
582  */
583 typedef void (*rte_cryptodev_cb_fn)(uint8_t dev_id,
584 		enum rte_cryptodev_event_type event, void *cb_arg);
585 
586 
587 /** Crypto Device statistics */
588 struct rte_cryptodev_stats {
589 	uint64_t enqueued_count;
590 	/**< Count of all operations enqueued */
591 	uint64_t dequeued_count;
592 	/**< Count of all operations dequeued */
593 
594 	uint64_t enqueue_err_count;
595 	/**< Total error count on operations enqueued */
596 	uint64_t dequeue_err_count;
597 	/**< Total error count on operations dequeued */
598 };
599 
600 #define RTE_CRYPTODEV_NAME_MAX_LEN	(64)
601 /**< Max length of name of crypto PMD */
602 
603 /**
604  * Get the device identifier for the named crypto device.
605  *
606  * @param	name	device name to select the device structure.
607  *
608  * @return
609  *   - Returns crypto device identifier on success.
610  *   - Return -1 on failure to find named crypto device.
611  */
612 extern int
613 rte_cryptodev_get_dev_id(const char *name);
614 
615 /**
616  * Get the crypto device name given a device identifier.
617  *
618  * @param dev_id
619  *   The identifier of the device
620  *
621  * @return
622  *   - Returns crypto device name.
623  *   - Returns NULL if crypto device is not present.
624  */
625 extern const char *
626 rte_cryptodev_name_get(uint8_t dev_id);
627 
628 /**
629  * Get the total number of crypto devices that have been successfully
630  * initialised.
631  *
632  * @return
633  *   - The total number of usable crypto devices.
634  */
635 extern uint8_t
636 rte_cryptodev_count(void);
637 
638 /**
639  * Get number of crypto device defined type.
640  *
641  * @param	driver_id	driver identifier.
642  *
643  * @return
644  *   Returns number of crypto device.
645  */
646 extern uint8_t
647 rte_cryptodev_device_count_by_driver(uint8_t driver_id);
648 
649 /**
650  * Get number and identifiers of attached crypto devices that
651  * use the same crypto driver.
652  *
653  * @param	driver_name	driver name.
654  * @param	devices		output devices identifiers.
655  * @param	nb_devices	maximal number of devices.
656  *
657  * @return
658  *   Returns number of attached crypto device.
659  */
660 uint8_t
661 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
662 		uint8_t nb_devices);
663 /*
664  * Return the NUMA socket to which a device is connected
665  *
666  * @param dev_id
667  *   The identifier of the device
668  * @return
669  *   The NUMA socket id to which the device is connected or
670  *   a default of zero if the socket could not be determined.
671  *   -1 if returned is the dev_id value is out of range.
672  */
673 extern int
674 rte_cryptodev_socket_id(uint8_t dev_id);
675 
676 /** Crypto device configuration structure */
677 struct rte_cryptodev_config {
678 	int socket_id;			/**< Socket to allocate resources on */
679 	uint16_t nb_queue_pairs;
680 	/**< Number of queue pairs to configure on device */
681 	uint64_t ff_disable;
682 	/**< Feature flags to be disabled. Only the following features are
683 	 * allowed to be disabled,
684 	 *  - RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO
685 	 *  - RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO
686 	 *  - RTE_CRYTPODEV_FF_SECURITY
687 	 */
688 };
689 
690 /**
691  * Configure a device.
692  *
693  * This function must be invoked first before any other function in the
694  * API. This function can also be re-invoked when a device is in the
695  * stopped state.
696  *
697  * @param	dev_id		The identifier of the device to configure.
698  * @param	config		The crypto device configuration structure.
699  *
700  * @return
701  *   - 0: Success, device configured.
702  *   - <0: Error code returned by the driver configuration function.
703  */
704 extern int
705 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config);
706 
707 /**
708  * Start an device.
709  *
710  * The device start step is the last one and consists of setting the configured
711  * offload features and in starting the transmit and the receive units of the
712  * device.
713  * On success, all basic functions exported by the API (link status,
714  * receive/transmit, and so on) can be invoked.
715  *
716  * @param dev_id
717  *   The identifier of the device.
718  * @return
719  *   - 0: Success, device started.
720  *   - <0: Error code of the driver device start function.
721  */
722 extern int
723 rte_cryptodev_start(uint8_t dev_id);
724 
725 /**
726  * Stop an device. The device can be restarted with a call to
727  * rte_cryptodev_start()
728  *
729  * @param	dev_id		The identifier of the device.
730  */
731 extern void
732 rte_cryptodev_stop(uint8_t dev_id);
733 
734 /**
735  * Close an device. The device cannot be restarted!
736  *
737  * @param	dev_id		The identifier of the device.
738  *
739  * @return
740  *  - 0 on successfully closing device
741  *  - <0 on failure to close device
742  */
743 extern int
744 rte_cryptodev_close(uint8_t dev_id);
745 
746 /**
747  * Allocate and set up a receive queue pair for a device.
748  *
749  *
750  * @param	dev_id		The identifier of the device.
751  * @param	queue_pair_id	The index of the queue pairs to set up. The
752  *				value must be in the range [0, nb_queue_pair
753  *				- 1] previously supplied to
754  *				rte_cryptodev_configure().
755  * @param	qp_conf		The pointer to the configuration data to be
756  *				used for the queue pair.
757  * @param	socket_id	The *socket_id* argument is the socket
758  *				identifier in case of NUMA. The value can be
759  *				*SOCKET_ID_ANY* if there is no NUMA constraint
760  *				for the DMA memory allocated for the receive
761  *				queue pair.
762  *
763  * @return
764  *   - 0: Success, queue pair correctly set up.
765  *   - <0: Queue pair configuration failed
766  */
767 extern int
768 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
769 		const struct rte_cryptodev_qp_conf *qp_conf, int socket_id);
770 
771 /**
772  * Get the status of queue pairs setup on a specific crypto device
773  *
774  * @param	dev_id		Crypto device identifier.
775  * @param	queue_pair_id	The index of the queue pairs to set up. The
776  *				value must be in the range [0, nb_queue_pair
777  *				- 1] previously supplied to
778  *				rte_cryptodev_configure().
779  * @return
780  *   - 0: qp was not configured
781  *	 - 1: qp was configured
782  *	 - -EINVAL: device was not configured
783  */
784 __rte_experimental
785 int
786 rte_cryptodev_get_qp_status(uint8_t dev_id, uint16_t queue_pair_id);
787 
788 /**
789  * Get the number of queue pairs on a specific crypto device
790  *
791  * @param	dev_id		Crypto device identifier.
792  * @return
793  *   - The number of configured queue pairs.
794  */
795 extern uint16_t
796 rte_cryptodev_queue_pair_count(uint8_t dev_id);
797 
798 
799 /**
800  * Retrieve the general I/O statistics of a device.
801  *
802  * @param	dev_id		The identifier of the device.
803  * @param	stats		A pointer to a structure of type
804  *				*rte_cryptodev_stats* to be filled with the
805  *				values of device counters.
806  * @return
807  *   - Zero if successful.
808  *   - Non-zero otherwise.
809  */
810 extern int
811 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats);
812 
813 /**
814  * Reset the general I/O statistics of a device.
815  *
816  * @param	dev_id		The identifier of the device.
817  */
818 extern void
819 rte_cryptodev_stats_reset(uint8_t dev_id);
820 
821 /**
822  * Retrieve the contextual information of a device.
823  *
824  * @param	dev_id		The identifier of the device.
825  * @param	dev_info	A pointer to a structure of type
826  *				*rte_cryptodev_info* to be filled with the
827  *				contextual information of the device.
828  *
829  * @note The capabilities field of dev_info is set to point to the first
830  * element of an array of struct rte_cryptodev_capabilities. The element after
831  * the last valid element has it's op field set to
832  * RTE_CRYPTO_OP_TYPE_UNDEFINED.
833  */
834 extern void
835 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info);
836 
837 
838 /**
839  * Register a callback function for specific device id.
840  *
841  * @param	dev_id		Device id.
842  * @param	event		Event interested.
843  * @param	cb_fn		User supplied callback function to be called.
844  * @param	cb_arg		Pointer to the parameters for the registered
845  *				callback.
846  *
847  * @return
848  *  - On success, zero.
849  *  - On failure, a negative value.
850  */
851 extern int
852 rte_cryptodev_callback_register(uint8_t dev_id,
853 		enum rte_cryptodev_event_type event,
854 		rte_cryptodev_cb_fn cb_fn, void *cb_arg);
855 
856 /**
857  * Unregister a callback function for specific device id.
858  *
859  * @param	dev_id		The device identifier.
860  * @param	event		Event interested.
861  * @param	cb_fn		User supplied callback function to be called.
862  * @param	cb_arg		Pointer to the parameters for the registered
863  *				callback.
864  *
865  * @return
866  *  - On success, zero.
867  *  - On failure, a negative value.
868  */
869 extern int
870 rte_cryptodev_callback_unregister(uint8_t dev_id,
871 		enum rte_cryptodev_event_type event,
872 		rte_cryptodev_cb_fn cb_fn, void *cb_arg);
873 
874 struct rte_cryptodev_callback;
875 
876 /** Structure to keep track of registered callbacks */
877 RTE_TAILQ_HEAD(rte_cryptodev_cb_list, rte_cryptodev_callback);
878 
879 /**
880  * Structure used to hold information about the callbacks to be called for a
881  * queue pair on enqueue/dequeue.
882  */
883 struct rte_cryptodev_cb {
884 	struct rte_cryptodev_cb *next;
885 	/**< Pointer to next callback */
886 	rte_cryptodev_callback_fn fn;
887 	/**< Pointer to callback function */
888 	void *arg;
889 	/**< Pointer to argument */
890 };
891 
892 /**
893  * @internal
894  * Structure used to hold information about the RCU for a queue pair.
895  */
896 struct rte_cryptodev_cb_rcu {
897 	struct rte_cryptodev_cb *next;
898 	/**< Pointer to next callback */
899 	struct rte_rcu_qsbr *qsbr;
900 	/**< RCU QSBR variable per queue pair */
901 };
902 
903 void *
904 rte_cryptodev_get_sec_ctx(uint8_t dev_id);
905 
906 /**
907  * Create a symmetric session mempool.
908  *
909  * @param name
910  *   The unique mempool name.
911  * @param nb_elts
912  *   The number of elements in the mempool.
913  * @param elt_size
914  *   The size of the element. This value will be ignored if it is smaller than
915  *   the minimum session header size required for the system. For the user who
916  *   want to use the same mempool for sym session and session private data it
917  *   can be the maximum value of all existing devices' private data and session
918  *   header sizes.
919  * @param cache_size
920  *   The number of per-lcore cache elements
921  * @param priv_size
922  *   The private data size of each session.
923  * @param socket_id
924  *   The *socket_id* argument is the socket identifier in the case of
925  *   NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
926  *   constraint for the reserved zone.
927  *
928  * @return
929  *  - On success return size of the session
930  *  - On failure returns 0
931  */
932 __rte_experimental
933 struct rte_mempool *
934 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
935 	uint32_t elt_size, uint32_t cache_size, uint16_t priv_size,
936 	int socket_id);
937 
938 
939 /**
940  * Create an asymmetric session mempool.
941  *
942  * @param name
943  *   The unique mempool name.
944  * @param nb_elts
945  *   The number of elements in the mempool.
946  * @param cache_size
947  *   The number of per-lcore cache elements
948  * @param user_data_size
949  *   The size of user data to be placed after session private data.
950  * @param socket_id
951  *   The *socket_id* argument is the socket identifier in the case of
952  *   NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
953  *   constraint for the reserved zone.
954  *
955  * @return
956  *  - On success return mempool
957  *  - On failure returns NULL
958  */
959 __rte_experimental
960 struct rte_mempool *
961 rte_cryptodev_asym_session_pool_create(const char *name, uint32_t nb_elts,
962 	uint32_t cache_size, uint16_t user_data_size, int socket_id);
963 
964 /**
965  * Create symmetric crypto session and fill out private data for the device id,
966  * based on its device type.
967  *
968  * @param   dev_id   ID of device that we want the session to be used on
969  * @param   xforms   Symmetric crypto transform operations to apply on flow
970  *                   processed with this session
971  * @param   mp       Mempool where the private data is allocated.
972  *
973  * @return
974  *  - On success return pointer to sym-session.
975  *  - On failure returns NULL.
976  */
977 void *
978 rte_cryptodev_sym_session_create(uint8_t dev_id,
979 		struct rte_crypto_sym_xform *xforms,
980 		struct rte_mempool *mp);
981 /**
982  * Create and initialise an asymmetric crypto session structure.
983  * Calls the PMD to configure the private session data.
984  *
985  * @param   dev_id   ID of device that we want the session to be used on
986  * @param   xforms   Asymmetric crypto transform operations to apply on flow
987  *                   processed with this session
988  * @param   mp       mempool to allocate asymmetric session
989  *                   objects from
990  * @param   session  void ** for session to be used
991  *
992  * @return
993  *  - 0 on success.
994  *  - -EINVAL on invalid arguments.
995  *  - -ENOMEM on memory error for session allocation.
996  *  - -ENOTSUP if device doesn't support session configuration.
997  */
998 __rte_experimental
999 int
1000 rte_cryptodev_asym_session_create(uint8_t dev_id,
1001 		struct rte_crypto_asym_xform *xforms, struct rte_mempool *mp,
1002 		void **session);
1003 
1004 /**
1005  * Frees session for the device id and returning it to its mempool.
1006  * It is the application's responsibility to ensure that the session
1007  * is not still in-flight operations using it.
1008  *
1009  * @param   dev_id   ID of device that uses the session.
1010  * @param   sess     Session header to be freed.
1011  *
1012  * @return
1013  *  - 0 if successful.
1014  *  - -EINVAL if session is NULL or the mismatched device ids.
1015  */
1016 int
1017 rte_cryptodev_sym_session_free(uint8_t dev_id,
1018 	void *sess);
1019 
1020 /**
1021  * Clears and frees asymmetric crypto session header and private data,
1022  * returning it to its original mempool.
1023  *
1024  * @param   dev_id   ID of device that uses the asymmetric session.
1025  * @param   sess     Session header to be freed.
1026  *
1027  * @return
1028  *  - 0 if successful.
1029  *  - -EINVAL if device is invalid or session is NULL.
1030  */
1031 __rte_experimental
1032 int
1033 rte_cryptodev_asym_session_free(uint8_t dev_id, void *sess);
1034 
1035 /**
1036  * Get the size of the asymmetric session header.
1037  *
1038  * @return
1039  *   Size of the asymmetric header session.
1040  */
1041 __rte_experimental
1042 unsigned int
1043 rte_cryptodev_asym_get_header_session_size(void);
1044 
1045 /**
1046  * Get the size of the private symmetric session data
1047  * for a device.
1048  *
1049  * @param	dev_id		The device identifier.
1050  *
1051  * @return
1052  *   - Size of the private data, if successful
1053  *   - 0 if device is invalid or does not have private
1054  *   symmetric session
1055  */
1056 unsigned int
1057 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id);
1058 
1059 /**
1060  * Get the size of the private data for asymmetric session
1061  * on device
1062  *
1063  * @param	dev_id		The device identifier.
1064  *
1065  * @return
1066  *   - Size of the asymmetric private data, if successful
1067  *   - 0 if device is invalid or does not have private session
1068  */
1069 __rte_experimental
1070 unsigned int
1071 rte_cryptodev_asym_get_private_session_size(uint8_t dev_id);
1072 
1073 /**
1074  * Validate if the crypto device index is valid attached crypto device.
1075  *
1076  * @param	dev_id	Crypto device index.
1077  *
1078  * @return
1079  *   - If the device index is valid (1) or not (0).
1080  */
1081 unsigned int
1082 rte_cryptodev_is_valid_dev(uint8_t dev_id);
1083 
1084 /**
1085  * Provide driver identifier.
1086  *
1087  * @param name
1088  *   The pointer to a driver name.
1089  * @return
1090  *  The driver type identifier or -1 if no driver found
1091  */
1092 int rte_cryptodev_driver_id_get(const char *name);
1093 
1094 /**
1095  * Provide driver name.
1096  *
1097  * @param driver_id
1098  *   The driver identifier.
1099  * @return
1100  *  The driver name or null if no driver found
1101  */
1102 const char *rte_cryptodev_driver_name_get(uint8_t driver_id);
1103 
1104 /**
1105  * Store user data in a session.
1106  *
1107  * @param	sess		Session pointer allocated by
1108  *				*rte_cryptodev_sym_session_create*.
1109  * @param	data		Pointer to the user data.
1110  * @param	size		Size of the user data.
1111  *
1112  * @return
1113  *  - On success, zero.
1114  *  - On failure, a negative value.
1115  */
1116 __rte_experimental
1117 int
1118 rte_cryptodev_sym_session_set_user_data(void *sess,
1119 					void *data,
1120 					uint16_t size);
1121 
1122 #define CRYPTO_SESS_OPAQUE_DATA_OFF 0
1123 /**
1124  * Get opaque data from session handle
1125  */
1126 static inline uint64_t
1127 rte_cryptodev_sym_session_opaque_data_get(void *sess)
1128 {
1129 	return *((uint64_t *)sess + CRYPTO_SESS_OPAQUE_DATA_OFF);
1130 }
1131 
1132 /**
1133  * Set opaque data in session handle
1134  */
1135 static inline void
1136 rte_cryptodev_sym_session_opaque_data_set(void *sess, uint64_t opaque)
1137 {
1138 	uint64_t *data;
1139 	data = (((uint64_t *)sess) + CRYPTO_SESS_OPAQUE_DATA_OFF);
1140 	*data = opaque;
1141 }
1142 
1143 /**
1144  * Get user data stored in a session.
1145  *
1146  * @param	sess		Session pointer allocated by
1147  *				*rte_cryptodev_sym_session_create*.
1148  *
1149  * @return
1150  *  - On success return pointer to user data.
1151  *  - On failure returns NULL.
1152  */
1153 __rte_experimental
1154 void *
1155 rte_cryptodev_sym_session_get_user_data(void *sess);
1156 
1157 /**
1158  * Store user data in an asymmetric session.
1159  *
1160  * @param	sess		Session pointer allocated by
1161  *				*rte_cryptodev_asym_session_create*.
1162  * @param	data		Pointer to the user data.
1163  * @param	size		Size of the user data.
1164  *
1165  * @return
1166  *  - On success, zero.
1167  *  - -EINVAL if the session pointer is invalid.
1168  *  - -ENOMEM if the available user data size is smaller than the size parameter.
1169  */
1170 __rte_experimental
1171 int
1172 rte_cryptodev_asym_session_set_user_data(void *sess, void *data, uint16_t size);
1173 
1174 /**
1175  * Get user data stored in an asymmetric session.
1176  *
1177  * @param	sess		Session pointer allocated by
1178  *				*rte_cryptodev_asym_session_create*.
1179  *
1180  * @return
1181  *  - On success return pointer to user data.
1182  *  - On failure returns NULL.
1183  */
1184 __rte_experimental
1185 void *
1186 rte_cryptodev_asym_session_get_user_data(void *sess);
1187 
1188 /**
1189  * Perform actual crypto processing (encrypt/digest or auth/decrypt)
1190  * on user provided data.
1191  *
1192  * @param	dev_id	The device identifier.
1193  * @param	sess	Cryptodev session structure
1194  * @param	ofs	Start and stop offsets for auth and cipher operations
1195  * @param	vec	Vectorized operation descriptor
1196  *
1197  * @return
1198  *  - Returns number of successfully processed packets.
1199  */
1200 __rte_experimental
1201 uint32_t
1202 rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id,
1203 	void *sess, union rte_crypto_sym_ofs ofs,
1204 	struct rte_crypto_sym_vec *vec);
1205 
1206 /**
1207  * Get the size of the raw data-path context buffer.
1208  *
1209  * @param	dev_id		The device identifier.
1210  *
1211  * @return
1212  *   - If the device supports raw data-path APIs, return the context size.
1213  *   - If the device does not support the APIs, return -1.
1214  */
1215 __rte_experimental
1216 int
1217 rte_cryptodev_get_raw_dp_ctx_size(uint8_t dev_id);
1218 
1219 /**
1220  * Set session event meta data
1221  *
1222  * @param	dev_id		The device identifier.
1223  * @param	sess            Crypto or security session.
1224  * @param	op_type         Operation type.
1225  * @param	sess_type       Session type.
1226  * @param	ev_mdata	Pointer to the event crypto meta data
1227  *				(aka *union rte_event_crypto_metadata*)
1228  * @param	size            Size of ev_mdata.
1229  *
1230  * @return
1231  *  - On success, zero.
1232  *  - On failure, a negative value.
1233  */
1234 __rte_experimental
1235 int
1236 rte_cryptodev_session_event_mdata_set(uint8_t dev_id, void *sess,
1237 	enum rte_crypto_op_type op_type,
1238 	enum rte_crypto_op_sess_type sess_type,
1239 	void *ev_mdata, uint16_t size);
1240 
1241 /**
1242  * Union of different crypto session types, including session-less xform
1243  * pointer.
1244  */
1245 union rte_cryptodev_session_ctx {void *crypto_sess;
1246 	struct rte_crypto_sym_xform *xform;
1247 	struct rte_security_session *sec_sess;
1248 };
1249 
1250 /**
1251  * Enqueue a vectorized operation descriptor into the device queue but the
1252  * driver may or may not start processing until rte_cryptodev_raw_enqueue_done()
1253  * is called.
1254  *
1255  * @param	qp		Driver specific queue pair data.
1256  * @param	drv_ctx		Driver specific context data.
1257  * @param	vec		Vectorized operation descriptor.
1258  * @param	ofs		Start and stop offsets for auth and cipher
1259  *				operations.
1260  * @param	user_data	The array of user data for dequeue later.
1261  * @param	enqueue_status	Driver written value to specify the
1262  *				enqueue status. Possible values:
1263  *				- 1: The number of operations returned are
1264  *				     enqueued successfully.
1265  *				- 0: The number of operations returned are
1266  *				     cached into the queue but are not processed
1267  *				     until rte_cryptodev_raw_enqueue_done() is
1268  *				     called.
1269  *				- negative integer: Error occurred.
1270  * @return
1271  *   - The number of operations in the descriptor successfully enqueued or
1272  *     cached into the queue but not enqueued yet, depends on the
1273  *     "enqueue_status" value.
1274  */
1275 typedef uint32_t (*cryptodev_sym_raw_enqueue_burst_t)(
1276 	void *qp, uint8_t *drv_ctx, struct rte_crypto_sym_vec *vec,
1277 	union rte_crypto_sym_ofs ofs, void *user_data[], int *enqueue_status);
1278 
1279 /**
1280  * Enqueue single raw data vector into the device queue but the driver may or
1281  * may not start processing until rte_cryptodev_raw_enqueue_done() is called.
1282  *
1283  * @param	qp		Driver specific queue pair data.
1284  * @param	drv_ctx		Driver specific context data.
1285  * @param	data_vec	The buffer data vector.
1286  * @param	n_data_vecs	Number of buffer data vectors.
1287  * @param	ofs		Start and stop offsets for auth and cipher
1288  *				operations.
1289  * @param	iv		IV virtual and IOVA addresses
1290  * @param	digest		digest virtual and IOVA addresses
1291  * @param	aad_or_auth_iv	AAD or auth IV virtual and IOVA addresses,
1292  *				depends on the algorithm used.
1293  * @param	user_data	The user data.
1294  * @return
1295  *   - 1: The data vector is enqueued successfully.
1296  *   - 0: The data vector is cached into the queue but is not processed
1297  *        until rte_cryptodev_raw_enqueue_done() is called.
1298  *   - negative integer: failure.
1299  */
1300 typedef int (*cryptodev_sym_raw_enqueue_t)(
1301 	void *qp, uint8_t *drv_ctx, struct rte_crypto_vec *data_vec,
1302 	uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs,
1303 	struct rte_crypto_va_iova_ptr *iv,
1304 	struct rte_crypto_va_iova_ptr *digest,
1305 	struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
1306 	void *user_data);
1307 
1308 /**
1309  * Inform the cryptodev queue pair to start processing or finish dequeuing all
1310  * enqueued/dequeued operations.
1311  *
1312  * @param	qp		Driver specific queue pair data.
1313  * @param	drv_ctx		Driver specific context data.
1314  * @param	n		The total number of processed operations.
1315  * @return
1316  *   - On success return 0.
1317  *   - On failure return negative integer.
1318  */
1319 typedef int (*cryptodev_sym_raw_operation_done_t)(void *qp, uint8_t *drv_ctx,
1320 	uint32_t n);
1321 
1322 /**
1323  * Typedef that the user provided for the driver to get the dequeue count.
1324  * The function may return a fixed number or the number parsed from the user
1325  * data stored in the first processed operation.
1326  *
1327  * @param	user_data	Dequeued user data.
1328  * @return
1329  *  - The number of operations to be dequeued.
1330  **/
1331 typedef uint32_t (*rte_cryptodev_raw_get_dequeue_count_t)(void *user_data);
1332 
1333 /**
1334  * Typedef that the user provided to deal with post dequeue operation, such
1335  * as filling status.
1336  *
1337  * @param	user_data	Dequeued user data.
1338  * @param	index		Index number of the processed descriptor.
1339  * @param	is_op_success	Operation status provided by the driver.
1340  **/
1341 typedef void (*rte_cryptodev_raw_post_dequeue_t)(void *user_data,
1342 	uint32_t index, uint8_t is_op_success);
1343 
1344 /**
1345  * Dequeue a burst of symmetric crypto processing.
1346  *
1347  * @param	qp			Driver specific queue pair data.
1348  * @param	drv_ctx			Driver specific context data.
1349  * @param	get_dequeue_count	User provided callback function to
1350  *					obtain dequeue operation count.
1351  * @param	max_nb_to_dequeue	When get_dequeue_count is NULL this
1352  *					value is used to pass the maximum
1353  *					number of operations to be dequeued.
1354  * @param	post_dequeue		User provided callback function to
1355  *					post-process a dequeued operation.
1356  * @param	out_user_data		User data pointer array to be retrieve
1357  *					from device queue. In case of
1358  *					*is_user_data_array* is set there
1359  *					should be enough room to store all
1360  *					user data.
1361  * @param	is_user_data_array	Set 1 if every dequeued user data will
1362  *					be written into out_user_data array.
1363  *					Set 0 if only the first user data will
1364  *					be written into out_user_data array.
1365  * @param	n_success		Driver written value to specific the
1366  *					total successful operations count.
1367  * @param	dequeue_status		Driver written value to specify the
1368  *					dequeue status. Possible values:
1369  *					- 1: Successfully dequeued the number
1370  *					     of operations returned. The user
1371  *					     data previously set during enqueue
1372  *					     is stored in the "out_user_data".
1373  *					- 0: The number of operations returned
1374  *					     are completed and the user data is
1375  *					     stored in the "out_user_data", but
1376  *					     they are not freed from the queue
1377  *					     until
1378  *					     rte_cryptodev_raw_dequeue_done()
1379  *					     is called.
1380  *					- negative integer: Error occurred.
1381  * @return
1382  *   - The number of operations dequeued or completed but not freed from the
1383  *     queue, depends on "dequeue_status" value.
1384  */
1385 typedef uint32_t (*cryptodev_sym_raw_dequeue_burst_t)(void *qp,
1386 	uint8_t *drv_ctx,
1387 	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
1388 	uint32_t max_nb_to_dequeue,
1389 	rte_cryptodev_raw_post_dequeue_t post_dequeue,
1390 	void **out_user_data, uint8_t is_user_data_array,
1391 	uint32_t *n_success, int *dequeue_status);
1392 
1393 /**
1394  * Dequeue a symmetric crypto processing.
1395  *
1396  * @param	qp			Driver specific queue pair data.
1397  * @param	drv_ctx			Driver specific context data.
1398  * @param	dequeue_status		Driver written value to specify the
1399  *					dequeue status. Possible values:
1400  *					- 1: Successfully dequeued a operation.
1401  *					     The user data is returned.
1402  *					- 0: The first operation in the queue
1403  *					     is completed and the user data
1404  *					     previously set during enqueue is
1405  *					     returned, but it is not freed from
1406  *					     the queue until
1407  *					     rte_cryptodev_raw_dequeue_done() is
1408  *					     called.
1409  *					- negative integer: Error occurred.
1410  * @param	op_status		Driver written value to specify
1411  *					operation status.
1412  * @return
1413  *   - The user data pointer retrieved from device queue or NULL if no
1414  *     operation is ready for dequeue.
1415  */
1416 typedef void * (*cryptodev_sym_raw_dequeue_t)(
1417 		void *qp, uint8_t *drv_ctx, int *dequeue_status,
1418 		enum rte_crypto_op_status *op_status);
1419 
1420 /**
1421  * Context data for raw data-path API crypto process. The buffer of this
1422  * structure is to be allocated by the user application with the size equal
1423  * or bigger than rte_cryptodev_get_raw_dp_ctx_size() returned value.
1424  */
1425 struct rte_crypto_raw_dp_ctx {
1426 	void *qp_data;
1427 
1428 	cryptodev_sym_raw_enqueue_t enqueue;
1429 	cryptodev_sym_raw_enqueue_burst_t enqueue_burst;
1430 	cryptodev_sym_raw_operation_done_t enqueue_done;
1431 	cryptodev_sym_raw_dequeue_t dequeue;
1432 	cryptodev_sym_raw_dequeue_burst_t dequeue_burst;
1433 	cryptodev_sym_raw_operation_done_t dequeue_done;
1434 
1435 	/* Driver specific context data */
1436 	__extension__ uint8_t drv_ctx_data[];
1437 };
1438 
1439 /**
1440  * Configure raw data-path context data.
1441  *
1442  * NOTE:
1443  * After the context data is configured, the user should call
1444  * rte_cryptodev_raw_attach_session() before using it in
1445  * rte_cryptodev_raw_enqueue/dequeue function call.
1446  *
1447  * @param	dev_id		The device identifier.
1448  * @param	qp_id		The index of the queue pair from which to
1449  *				retrieve processed packets. The value must be
1450  *				in the range [0, nb_queue_pair - 1] previously
1451  *				supplied to rte_cryptodev_configure().
1452  * @param	ctx		The raw data-path context data.
1453  * @param	sess_type	session type.
1454  * @param	session_ctx	Session context data.
1455  * @param	is_update	Set 0 if it is to initialize the ctx.
1456  *				Set 1 if ctx is initialized and only to update
1457  *				session context data.
1458  * @return
1459  *   - On success return 0.
1460  *   - On failure return negative integer.
1461  */
1462 __rte_experimental
1463 int
1464 rte_cryptodev_configure_raw_dp_ctx(uint8_t dev_id, uint16_t qp_id,
1465 	struct rte_crypto_raw_dp_ctx *ctx,
1466 	enum rte_crypto_op_sess_type sess_type,
1467 	union rte_cryptodev_session_ctx session_ctx,
1468 	uint8_t is_update);
1469 
1470 /**
1471  * Enqueue a vectorized operation descriptor into the device queue but the
1472  * driver may or may not start processing until rte_cryptodev_raw_enqueue_done()
1473  * is called.
1474  *
1475  * @param	ctx		The initialized raw data-path context data.
1476  * @param	vec		Vectorized operation descriptor.
1477  * @param	ofs		Start and stop offsets for auth and cipher
1478  *				operations.
1479  * @param	user_data	The array of user data for dequeue later.
1480  * @param	enqueue_status	Driver written value to specify the
1481  *				enqueue status. Possible values:
1482  *				- 1: The number of operations returned are
1483  *				     enqueued successfully.
1484  *				- 0: The number of operations returned are
1485  *				     cached into the queue but are not processed
1486  *				     until rte_cryptodev_raw_enqueue_done() is
1487  *				     called.
1488  *				- negative integer: Error occurred.
1489  * @return
1490  *   - The number of operations in the descriptor successfully enqueued or
1491  *     cached into the queue but not enqueued yet, depends on the
1492  *     "enqueue_status" value.
1493  */
1494 __rte_experimental
1495 uint32_t
1496 rte_cryptodev_raw_enqueue_burst(struct rte_crypto_raw_dp_ctx *ctx,
1497 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
1498 	void **user_data, int *enqueue_status);
1499 
1500 /**
1501  * Enqueue single raw data vector into the device queue but the driver may or
1502  * may not start processing until rte_cryptodev_raw_enqueue_done() is called.
1503  *
1504  * @param	ctx		The initialized raw data-path context data.
1505  * @param	data_vec	The buffer data vector.
1506  * @param	n_data_vecs	Number of buffer data vectors.
1507  * @param	ofs		Start and stop offsets for auth and cipher
1508  *				operations.
1509  * @param	iv		IV virtual and IOVA addresses
1510  * @param	digest		digest virtual and IOVA addresses
1511  * @param	aad_or_auth_iv	AAD or auth IV virtual and IOVA addresses,
1512  *				depends on the algorithm used.
1513  * @param	user_data	The user data.
1514  * @return
1515  *   - 1: The data vector is enqueued successfully.
1516  *   - 0: The data vector is cached into the queue but is not processed
1517  *        until rte_cryptodev_raw_enqueue_done() is called.
1518  *   - negative integer: failure.
1519  */
1520 __rte_experimental
1521 static __rte_always_inline int
1522 rte_cryptodev_raw_enqueue(struct rte_crypto_raw_dp_ctx *ctx,
1523 	struct rte_crypto_vec *data_vec, uint16_t n_data_vecs,
1524 	union rte_crypto_sym_ofs ofs,
1525 	struct rte_crypto_va_iova_ptr *iv,
1526 	struct rte_crypto_va_iova_ptr *digest,
1527 	struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
1528 	void *user_data)
1529 {
1530 	return (*ctx->enqueue)(ctx->qp_data, ctx->drv_ctx_data, data_vec,
1531 		n_data_vecs, ofs, iv, digest, aad_or_auth_iv, user_data);
1532 }
1533 
1534 /**
1535  * Start processing all enqueued operations from last
1536  * rte_cryptodev_configure_raw_dp_ctx() call.
1537  *
1538  * @param	ctx	The initialized raw data-path context data.
1539  * @param	n	The number of operations cached.
1540  * @return
1541  *   - On success return 0.
1542  *   - On failure return negative integer.
1543  */
1544 __rte_experimental
1545 int
1546 rte_cryptodev_raw_enqueue_done(struct rte_crypto_raw_dp_ctx *ctx,
1547 		uint32_t n);
1548 
1549 /**
1550  * Dequeue a burst of symmetric crypto processing.
1551  *
1552  * @param	ctx			The initialized raw data-path context
1553  *					data.
1554  * @param	get_dequeue_count	User provided callback function to
1555  *					obtain dequeue operation count.
1556  * @param	max_nb_to_dequeue	When get_dequeue_count is NULL this
1557  *					value is used to pass the maximum
1558  *					number of operations to be dequeued.
1559  * @param	post_dequeue		User provided callback function to
1560  *					post-process a dequeued operation.
1561  * @param	out_user_data		User data pointer array to be retrieve
1562  *					from device queue. In case of
1563  *					*is_user_data_array* is set there
1564  *					should be enough room to store all
1565  *					user data.
1566  * @param	is_user_data_array	Set 1 if every dequeued user data will
1567  *					be written into out_user_data array.
1568  *					Set 0 if only the first user data will
1569  *					be written into out_user_data array.
1570  * @param	n_success		Driver written value to specific the
1571  *					total successful operations count.
1572  * @param	dequeue_status		Driver written value to specify the
1573  *					dequeue status. Possible values:
1574  *					- 1: Successfully dequeued the number
1575  *					     of operations returned. The user
1576  *					     data previously set during enqueue
1577  *					     is stored in the "out_user_data".
1578  *					- 0: The number of operations returned
1579  *					     are completed and the user data is
1580  *					     stored in the "out_user_data", but
1581  *					     they are not freed from the queue
1582  *					     until
1583  *					     rte_cryptodev_raw_dequeue_done()
1584  *					     is called.
1585  *					- negative integer: Error occurred.
1586  * @return
1587  *   - The number of operations dequeued or completed but not freed from the
1588  *     queue, depends on "dequeue_status" value.
1589  */
1590 __rte_experimental
1591 uint32_t
1592 rte_cryptodev_raw_dequeue_burst(struct rte_crypto_raw_dp_ctx *ctx,
1593 	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
1594 	uint32_t max_nb_to_dequeue,
1595 	rte_cryptodev_raw_post_dequeue_t post_dequeue,
1596 	void **out_user_data, uint8_t is_user_data_array,
1597 	uint32_t *n_success, int *dequeue_status);
1598 
1599 /**
1600  * Dequeue a symmetric crypto processing.
1601  *
1602  * @param	ctx			The initialized raw data-path context
1603  *					data.
1604  * @param	dequeue_status		Driver written value to specify the
1605  *					dequeue status. Possible values:
1606  *					- 1: Successfully dequeued a operation.
1607  *					     The user data is returned.
1608  *					- 0: The first operation in the queue
1609  *					     is completed and the user data
1610  *					     previously set during enqueue is
1611  *					     returned, but it is not freed from
1612  *					     the queue until
1613  *					     rte_cryptodev_raw_dequeue_done() is
1614  *					     called.
1615  *					- negative integer: Error occurred.
1616  * @param	op_status		Driver written value to specify
1617  *					operation status.
1618  * @return
1619  *   - The user data pointer retrieved from device queue or NULL if no
1620  *     operation is ready for dequeue.
1621  */
1622 __rte_experimental
1623 static __rte_always_inline void *
1624 rte_cryptodev_raw_dequeue(struct rte_crypto_raw_dp_ctx *ctx,
1625 		int *dequeue_status, enum rte_crypto_op_status *op_status)
1626 {
1627 	return (*ctx->dequeue)(ctx->qp_data, ctx->drv_ctx_data, dequeue_status,
1628 			op_status);
1629 }
1630 
1631 /**
1632  * Inform the queue pair dequeue operations is finished.
1633  *
1634  * @param	ctx	The initialized raw data-path context data.
1635  * @param	n	The number of operations.
1636  * @return
1637  *   - On success return 0.
1638  *   - On failure return negative integer.
1639  */
1640 __rte_experimental
1641 int
1642 rte_cryptodev_raw_dequeue_done(struct rte_crypto_raw_dp_ctx *ctx,
1643 		uint32_t n);
1644 
1645 /**
1646  * Add a user callback for a given crypto device and queue pair which will be
1647  * called on crypto ops enqueue.
1648  *
1649  * This API configures a function to be called for each burst of crypto ops
1650  * received on a given crypto device queue pair. The return value is a pointer
1651  * that can be used later to remove the callback using
1652  * rte_cryptodev_remove_enq_callback().
1653  *
1654  * Callbacks registered by application would not survive
1655  * rte_cryptodev_configure() as it reinitializes the callback list.
1656  * It is user responsibility to remove all installed callbacks before
1657  * calling rte_cryptodev_configure() to avoid possible memory leakage.
1658  * Application is expected to call add API after rte_cryptodev_configure().
1659  *
1660  * Multiple functions can be registered per queue pair & they are called
1661  * in the order they were added. The API does not restrict on maximum number
1662  * of callbacks.
1663  *
1664  * @param	dev_id		The identifier of the device.
1665  * @param	qp_id		The index of the queue pair on which ops are
1666  *				to be enqueued for processing. The value
1667  *				must be in the range [0, nb_queue_pairs - 1]
1668  *				previously supplied to
1669  *				*rte_cryptodev_configure*.
1670  * @param	cb_fn		The callback function
1671  * @param	cb_arg		A generic pointer parameter which will be passed
1672  *				to each invocation of the callback function on
1673  *				this crypto device and queue pair.
1674  *
1675  * @return
1676  *  - NULL on error & rte_errno will contain the error code.
1677  *  - On success, a pointer value which can later be used to remove the
1678  *    callback.
1679  */
1680 
1681 __rte_experimental
1682 struct rte_cryptodev_cb *
1683 rte_cryptodev_add_enq_callback(uint8_t dev_id,
1684 			       uint16_t qp_id,
1685 			       rte_cryptodev_callback_fn cb_fn,
1686 			       void *cb_arg);
1687 
1688 /**
1689  * Remove a user callback function for given crypto device and queue pair.
1690  *
1691  * This function is used to remove enqueue callbacks that were added to a
1692  * crypto device queue pair using rte_cryptodev_add_enq_callback().
1693  *
1694  *
1695  *
1696  * @param	dev_id		The identifier of the device.
1697  * @param	qp_id		The index of the queue pair on which ops are
1698  *				to be enqueued. The value must be in the
1699  *				range [0, nb_queue_pairs - 1] previously
1700  *				supplied to *rte_cryptodev_configure*.
1701  * @param	cb		Pointer to user supplied callback created via
1702  *				rte_cryptodev_add_enq_callback().
1703  *
1704  * @return
1705  *   -  0: Success. Callback was removed.
1706  *   - <0: The dev_id or the qp_id is out of range, or the callback
1707  *         is NULL or not found for the crypto device queue pair.
1708  */
1709 
1710 __rte_experimental
1711 int rte_cryptodev_remove_enq_callback(uint8_t dev_id,
1712 				      uint16_t qp_id,
1713 				      struct rte_cryptodev_cb *cb);
1714 
1715 /**
1716  * Add a user callback for a given crypto device and queue pair which will be
1717  * called on crypto ops dequeue.
1718  *
1719  * This API configures a function to be called for each burst of crypto ops
1720  * received on a given crypto device queue pair. The return value is a pointer
1721  * that can be used later to remove the callback using
1722  * rte_cryptodev_remove_deq_callback().
1723  *
1724  * Callbacks registered by application would not survive
1725  * rte_cryptodev_configure() as it reinitializes the callback list.
1726  * It is user responsibility to remove all installed callbacks before
1727  * calling rte_cryptodev_configure() to avoid possible memory leakage.
1728  * Application is expected to call add API after rte_cryptodev_configure().
1729  *
1730  * Multiple functions can be registered per queue pair & they are called
1731  * in the order they were added. The API does not restrict on maximum number
1732  * of callbacks.
1733  *
1734  * @param	dev_id		The identifier of the device.
1735  * @param	qp_id		The index of the queue pair on which ops are
1736  *				to be dequeued. The value must be in the
1737  *				range [0, nb_queue_pairs - 1] previously
1738  *				supplied to *rte_cryptodev_configure*.
1739  * @param	cb_fn		The callback function
1740  * @param	cb_arg		A generic pointer parameter which will be passed
1741  *				to each invocation of the callback function on
1742  *				this crypto device and queue pair.
1743  *
1744  * @return
1745  *   - NULL on error & rte_errno will contain the error code.
1746  *   - On success, a pointer value which can later be used to remove the
1747  *     callback.
1748  */
1749 
1750 __rte_experimental
1751 struct rte_cryptodev_cb *
1752 rte_cryptodev_add_deq_callback(uint8_t dev_id,
1753 			       uint16_t qp_id,
1754 			       rte_cryptodev_callback_fn cb_fn,
1755 			       void *cb_arg);
1756 
1757 /**
1758  * Remove a user callback function for given crypto device and queue pair.
1759  *
1760  * This function is used to remove dequeue callbacks that were added to a
1761  * crypto device queue pair using rte_cryptodev_add_deq_callback().
1762  *
1763  *
1764  *
1765  * @param	dev_id		The identifier of the device.
1766  * @param	qp_id		The index of the queue pair on which ops are
1767  *				to be dequeued. The value must be in the
1768  *				range [0, nb_queue_pairs - 1] previously
1769  *				supplied to *rte_cryptodev_configure*.
1770  * @param	cb		Pointer to user supplied callback created via
1771  *				rte_cryptodev_add_deq_callback().
1772  *
1773  * @return
1774  *   -  0: Success. Callback was removed.
1775  *   - <0: The dev_id or the qp_id is out of range, or the callback
1776  *         is NULL or not found for the crypto device queue pair.
1777  */
1778 __rte_experimental
1779 int rte_cryptodev_remove_deq_callback(uint8_t dev_id,
1780 				      uint16_t qp_id,
1781 				      struct rte_cryptodev_cb *cb);
1782 
1783 #include <rte_cryptodev_core.h>
1784 /**
1785  *
1786  * Dequeue a burst of processed crypto operations from a queue on the crypto
1787  * device. The dequeued operation are stored in *rte_crypto_op* structures
1788  * whose pointers are supplied in the *ops* array.
1789  *
1790  * The rte_cryptodev_dequeue_burst() function returns the number of ops
1791  * actually dequeued, which is the number of *rte_crypto_op* data structures
1792  * effectively supplied into the *ops* array.
1793  *
1794  * A return value equal to *nb_ops* indicates that the queue contained
1795  * at least *nb_ops* operations, and this is likely to signify that other
1796  * processed operations remain in the devices output queue. Applications
1797  * implementing a "retrieve as many processed operations as possible" policy
1798  * can check this specific case and keep invoking the
1799  * rte_cryptodev_dequeue_burst() function until a value less than
1800  * *nb_ops* is returned.
1801  *
1802  * The rte_cryptodev_dequeue_burst() function does not provide any error
1803  * notification to avoid the corresponding overhead.
1804  *
1805  * @param	dev_id		The symmetric crypto device identifier
1806  * @param	qp_id		The index of the queue pair from which to
1807  *				retrieve processed packets. The value must be
1808  *				in the range [0, nb_queue_pair - 1] previously
1809  *				supplied to rte_cryptodev_configure().
1810  * @param	ops		The address of an array of pointers to
1811  *				*rte_crypto_op* structures that must be
1812  *				large enough to store *nb_ops* pointers in it.
1813  * @param	nb_ops		The maximum number of operations to dequeue.
1814  *
1815  * @return
1816  *   - The number of operations actually dequeued, which is the number
1817  *   of pointers to *rte_crypto_op* structures effectively supplied to the
1818  *   *ops* array.
1819  */
1820 static inline uint16_t
1821 rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
1822 		struct rte_crypto_op **ops, uint16_t nb_ops)
1823 {
1824 	const struct rte_crypto_fp_ops *fp_ops;
1825 	void *qp;
1826 
1827 	rte_cryptodev_trace_dequeue_burst(dev_id, qp_id, (void **)ops, nb_ops);
1828 
1829 	fp_ops = &rte_crypto_fp_ops[dev_id];
1830 	qp = fp_ops->qp.data[qp_id];
1831 
1832 	nb_ops = fp_ops->dequeue_burst(qp, ops, nb_ops);
1833 
1834 #ifdef RTE_CRYPTO_CALLBACKS
1835 	if (unlikely(fp_ops->qp.deq_cb != NULL)) {
1836 		struct rte_cryptodev_cb_rcu *list;
1837 		struct rte_cryptodev_cb *cb;
1838 
1839 		/* __ATOMIC_RELEASE memory order was used when the
1840 		 * call back was inserted into the list.
1841 		 * Since there is a clear dependency between loading
1842 		 * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
1843 		 * not required.
1844 		 */
1845 		list = &fp_ops->qp.deq_cb[qp_id];
1846 		rte_rcu_qsbr_thread_online(list->qsbr, 0);
1847 		cb = __atomic_load_n(&list->next, __ATOMIC_RELAXED);
1848 
1849 		while (cb != NULL) {
1850 			nb_ops = cb->fn(dev_id, qp_id, ops, nb_ops,
1851 					cb->arg);
1852 			cb = cb->next;
1853 		};
1854 
1855 		rte_rcu_qsbr_thread_offline(list->qsbr, 0);
1856 	}
1857 #endif
1858 	return nb_ops;
1859 }
1860 
1861 /**
1862  * Enqueue a burst of operations for processing on a crypto device.
1863  *
1864  * The rte_cryptodev_enqueue_burst() function is invoked to place
1865  * crypto operations on the queue *qp_id* of the device designated by
1866  * its *dev_id*.
1867  *
1868  * The *nb_ops* parameter is the number of operations to process which are
1869  * supplied in the *ops* array of *rte_crypto_op* structures.
1870  *
1871  * The rte_cryptodev_enqueue_burst() function returns the number of
1872  * operations it actually enqueued for processing. A return value equal to
1873  * *nb_ops* means that all packets have been enqueued.
1874  *
1875  * @param	dev_id		The identifier of the device.
1876  * @param	qp_id		The index of the queue pair which packets are
1877  *				to be enqueued for processing. The value
1878  *				must be in the range [0, nb_queue_pairs - 1]
1879  *				previously supplied to
1880  *				 *rte_cryptodev_configure*.
1881  * @param	ops		The address of an array of *nb_ops* pointers
1882  *				to *rte_crypto_op* structures which contain
1883  *				the crypto operations to be processed.
1884  * @param	nb_ops		The number of operations to process.
1885  *
1886  * @return
1887  * The number of operations actually enqueued on the crypto device. The return
1888  * value can be less than the value of the *nb_ops* parameter when the
1889  * crypto devices queue is full or if invalid parameters are specified in
1890  * a *rte_crypto_op*.
1891  */
1892 static inline uint16_t
1893 rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
1894 		struct rte_crypto_op **ops, uint16_t nb_ops)
1895 {
1896 	const struct rte_crypto_fp_ops *fp_ops;
1897 	void *qp;
1898 
1899 	fp_ops = &rte_crypto_fp_ops[dev_id];
1900 	qp = fp_ops->qp.data[qp_id];
1901 #ifdef RTE_CRYPTO_CALLBACKS
1902 	if (unlikely(fp_ops->qp.enq_cb != NULL)) {
1903 		struct rte_cryptodev_cb_rcu *list;
1904 		struct rte_cryptodev_cb *cb;
1905 
1906 		/* __ATOMIC_RELEASE memory order was used when the
1907 		 * call back was inserted into the list.
1908 		 * Since there is a clear dependency between loading
1909 		 * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
1910 		 * not required.
1911 		 */
1912 		list = &fp_ops->qp.enq_cb[qp_id];
1913 		rte_rcu_qsbr_thread_online(list->qsbr, 0);
1914 		cb = __atomic_load_n(&list->next, __ATOMIC_RELAXED);
1915 
1916 		while (cb != NULL) {
1917 			nb_ops = cb->fn(dev_id, qp_id, ops, nb_ops,
1918 					cb->arg);
1919 			cb = cb->next;
1920 		};
1921 
1922 		rte_rcu_qsbr_thread_offline(list->qsbr, 0);
1923 	}
1924 #endif
1925 
1926 	rte_cryptodev_trace_enqueue_burst(dev_id, qp_id, (void **)ops, nb_ops);
1927 	return fp_ops->enqueue_burst(qp, ops, nb_ops);
1928 }
1929 
1930 
1931 
1932 #ifdef __cplusplus
1933 }
1934 #endif
1935 
1936 #endif /* _RTE_CRYPTODEV_H_ */
1937