xref: /dpdk/lib/cryptodev/rte_cryptodev.h (revision f9dfb59edbccae50e7c5508348aa2b4b84413048)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020 Intel Corporation.
3  */
4 
5 #ifndef _RTE_CRYPTODEV_H_
6 #define _RTE_CRYPTODEV_H_
7 
8 /**
9  * @file rte_cryptodev.h
10  *
11  * RTE Cryptographic Device APIs
12  *
13  * Defines RTE Crypto Device APIs for the provisioning of cipher and
14  * authentication operations.
15  */
16 
17 #ifdef __cplusplus
18 extern "C" {
19 #endif
20 
21 #include "rte_kvargs.h"
22 #include "rte_crypto.h"
23 #include <rte_common.h>
24 #include <rte_rcu_qsbr.h>
25 
26 #include "rte_cryptodev_trace_fp.h"
27 
28 extern const char **rte_cyptodev_names;
29 
30 /* Logging Macros */
31 
32 #define CDEV_LOG_ERR(...) \
33 	RTE_LOG(ERR, CRYPTODEV, \
34 		RTE_FMT("%s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
35 			__func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,)))
36 
37 #define CDEV_LOG_INFO(...) \
38 	RTE_LOG(INFO, CRYPTODEV, \
39 		RTE_FMT(RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
40 			RTE_FMT_TAIL(__VA_ARGS__,)))
41 
42 #define CDEV_LOG_DEBUG(...) \
43 	RTE_LOG(DEBUG, CRYPTODEV, \
44 		RTE_FMT("%s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
45 			__func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,)))
46 
47 #define CDEV_PMD_TRACE(...) \
48 	RTE_LOG(DEBUG, CRYPTODEV, \
49 		RTE_FMT("[%s] %s: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
50 			dev, __func__, RTE_FMT_TAIL(__VA_ARGS__,)))
51 
52 /**
53  * A macro that points to an offset from the start
54  * of the crypto operation structure (rte_crypto_op)
55  *
56  * The returned pointer is cast to type t.
57  *
58  * @param c
59  *   The crypto operation.
60  * @param o
61  *   The offset from the start of the crypto operation.
62  * @param t
63  *   The type to cast the result into.
64  */
65 #define rte_crypto_op_ctod_offset(c, t, o)	\
66 	((t)((char *)(c) + (o)))
67 
68 /**
69  * A macro that returns the physical address that points
70  * to an offset from the start of the crypto operation
71  * (rte_crypto_op)
72  *
73  * @param c
74  *   The crypto operation.
75  * @param o
76  *   The offset from the start of the crypto operation
77  *   to calculate address from.
78  */
79 #define rte_crypto_op_ctophys_offset(c, o)	\
80 	(rte_iova_t)((c)->phys_addr + (o))
81 
82 /**
83  * Crypto parameters range description
84  */
85 struct rte_crypto_param_range {
86 	uint16_t min;	/**< minimum size */
87 	uint16_t max;	/**< maximum size */
88 	uint16_t increment;
89 	/**< if a range of sizes are supported,
90 	 * this parameter is used to indicate
91 	 * increments in byte size that are supported
92 	 * between the minimum and maximum
93 	 */
94 };
95 
96 /**
97  * Data-unit supported lengths of cipher algorithms.
98  * A bit can represent any set of data-unit sizes
99  * (single size, multiple size, range, etc).
100  */
101 #define RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_512_BYTES             RTE_BIT32(0)
102 #define RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_4096_BYTES            RTE_BIT32(1)
103 #define RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_1_MEGABYTES           RTE_BIT32(2)
104 
105 /**
106  * Symmetric Crypto Capability
107  */
108 struct rte_cryptodev_symmetric_capability {
109 	enum rte_crypto_sym_xform_type xform_type;
110 	/**< Transform type : Authentication / Cipher / AEAD */
111 	RTE_STD_C11
112 	union {
113 		struct {
114 			enum rte_crypto_auth_algorithm algo;
115 			/**< authentication algorithm */
116 			uint16_t block_size;
117 			/**< algorithm block size */
118 			struct rte_crypto_param_range key_size;
119 			/**< auth key size range */
120 			struct rte_crypto_param_range digest_size;
121 			/**< digest size range */
122 			struct rte_crypto_param_range aad_size;
123 			/**< Additional authentication data size range */
124 			struct rte_crypto_param_range iv_size;
125 			/**< Initialisation vector data size range */
126 		} auth;
127 		/**< Symmetric Authentication transform capabilities */
128 		struct {
129 			enum rte_crypto_cipher_algorithm algo;
130 			/**< cipher algorithm */
131 			uint16_t block_size;
132 			/**< algorithm block size */
133 			struct rte_crypto_param_range key_size;
134 			/**< cipher key size range */
135 			struct rte_crypto_param_range iv_size;
136 			/**< Initialisation vector data size range */
137 			uint32_t dataunit_set;
138 			/**<
139 			 * Supported data-unit lengths:
140 			 * RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_* bits
141 			 * or 0 for lengths defined in the algorithm standard.
142 			 */
143 		} cipher;
144 		/**< Symmetric Cipher transform capabilities */
145 		struct {
146 			enum rte_crypto_aead_algorithm algo;
147 			/**< AEAD algorithm */
148 			uint16_t block_size;
149 			/**< algorithm block size */
150 			struct rte_crypto_param_range key_size;
151 			/**< AEAD key size range */
152 			struct rte_crypto_param_range digest_size;
153 			/**< digest size range */
154 			struct rte_crypto_param_range aad_size;
155 			/**< Additional authentication data size range */
156 			struct rte_crypto_param_range iv_size;
157 			/**< Initialisation vector data size range */
158 		} aead;
159 	};
160 };
161 
162 /**
163  * Asymmetric Xform Crypto Capability
164  *
165  */
166 struct rte_cryptodev_asymmetric_xform_capability {
167 	enum rte_crypto_asym_xform_type xform_type;
168 	/**< Transform type: RSA/MODEXP/DH/DSA/MODINV */
169 
170 	uint32_t op_types;
171 	/**<
172 	 * Bitmask for supported rte_crypto_asym_op_type or
173 	 * rte_crypto_asym_ke_type. Which enum is used is determined
174 	 * by the rte_crypto_asym_xform_type. For key exchange algorithms
175 	 * like Diffie-Hellman it is rte_crypto_asym_ke_type, for others
176 	 * it is rte_crypto_asym_op_type.
177 	 */
178 
179 	__extension__
180 	union {
181 		struct rte_crypto_param_range modlen;
182 		/**< Range of modulus length supported by modulus based xform.
183 		 * Value 0 mean implementation default
184 		 */
185 	};
186 };
187 
188 /**
189  * Asymmetric Crypto Capability
190  *
191  */
192 struct rte_cryptodev_asymmetric_capability {
193 	struct rte_cryptodev_asymmetric_xform_capability xform_capa;
194 };
195 
196 
197 /** Structure used to capture a capability of a crypto device */
198 struct rte_cryptodev_capabilities {
199 	enum rte_crypto_op_type op;
200 	/**< Operation type */
201 
202 	RTE_STD_C11
203 	union {
204 		struct rte_cryptodev_symmetric_capability sym;
205 		/**< Symmetric operation capability parameters */
206 		struct rte_cryptodev_asymmetric_capability asym;
207 		/**< Asymmetric operation capability parameters */
208 	};
209 };
210 
211 /** Structure used to describe crypto algorithms */
212 struct rte_cryptodev_sym_capability_idx {
213 	enum rte_crypto_sym_xform_type type;
214 	union {
215 		enum rte_crypto_cipher_algorithm cipher;
216 		enum rte_crypto_auth_algorithm auth;
217 		enum rte_crypto_aead_algorithm aead;
218 	} algo;
219 };
220 
221 /**
222  * Structure used to describe asymmetric crypto xforms
223  * Each xform maps to one asym algorithm.
224  *
225  */
226 struct rte_cryptodev_asym_capability_idx {
227 	enum rte_crypto_asym_xform_type type;
228 	/**< Asymmetric xform (algo) type */
229 };
230 
231 /**
232  * Provide capabilities available for defined device and algorithm
233  *
234  * @param	dev_id		The identifier of the device.
235  * @param	idx		Description of crypto algorithms.
236  *
237  * @return
238  *   - Return description of the symmetric crypto capability if exist.
239  *   - Return NULL if the capability not exist.
240  */
241 const struct rte_cryptodev_symmetric_capability *
242 rte_cryptodev_sym_capability_get(uint8_t dev_id,
243 		const struct rte_cryptodev_sym_capability_idx *idx);
244 
245 /**
246  *  Provide capabilities available for defined device and xform
247  *
248  * @param	dev_id		The identifier of the device.
249  * @param	idx		Description of asym crypto xform.
250  *
251  * @return
252  *   - Return description of the asymmetric crypto capability if exist.
253  *   - Return NULL if the capability not exist.
254  */
255 __rte_experimental
256 const struct rte_cryptodev_asymmetric_xform_capability *
257 rte_cryptodev_asym_capability_get(uint8_t dev_id,
258 		const struct rte_cryptodev_asym_capability_idx *idx);
259 
260 /**
261  * Check if key size and initial vector are supported
262  * in crypto cipher capability
263  *
264  * @param	capability	Description of the symmetric crypto capability.
265  * @param	key_size	Cipher key size.
266  * @param	iv_size		Cipher initial vector size.
267  *
268  * @return
269  *   - Return 0 if the parameters are in range of the capability.
270  *   - Return -1 if the parameters are out of range of the capability.
271  */
272 int
273 rte_cryptodev_sym_capability_check_cipher(
274 		const struct rte_cryptodev_symmetric_capability *capability,
275 		uint16_t key_size, uint16_t iv_size);
276 
277 /**
278  * Check if key size and initial vector are supported
279  * in crypto auth capability
280  *
281  * @param	capability	Description of the symmetric crypto capability.
282  * @param	key_size	Auth key size.
283  * @param	digest_size	Auth digest size.
284  * @param	iv_size		Auth initial vector size.
285  *
286  * @return
287  *   - Return 0 if the parameters are in range of the capability.
288  *   - Return -1 if the parameters are out of range of the capability.
289  */
290 int
291 rte_cryptodev_sym_capability_check_auth(
292 		const struct rte_cryptodev_symmetric_capability *capability,
293 		uint16_t key_size, uint16_t digest_size, uint16_t iv_size);
294 
295 /**
296  * Check if key, digest, AAD and initial vector sizes are supported
297  * in crypto AEAD capability
298  *
299  * @param	capability	Description of the symmetric crypto capability.
300  * @param	key_size	AEAD key size.
301  * @param	digest_size	AEAD digest size.
302  * @param	aad_size	AEAD AAD size.
303  * @param	iv_size		AEAD IV size.
304  *
305  * @return
306  *   - Return 0 if the parameters are in range of the capability.
307  *   - Return -1 if the parameters are out of range of the capability.
308  */
309 int
310 rte_cryptodev_sym_capability_check_aead(
311 		const struct rte_cryptodev_symmetric_capability *capability,
312 		uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
313 		uint16_t iv_size);
314 
315 /**
316  * Check if op type is supported
317  *
318  * @param	capability	Description of the asymmetric crypto capability.
319  * @param	op_type		op type
320  *
321  * @return
322  *   - Return 1 if the op type is supported
323  *   - Return 0 if unsupported
324  */
325 __rte_experimental
326 int
327 rte_cryptodev_asym_xform_capability_check_optype(
328 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
329 		enum rte_crypto_asym_op_type op_type);
330 
331 /**
332  * Check if modulus length is in supported range
333  *
334  * @param	capability	Description of the asymmetric crypto capability.
335  * @param	modlen		modulus length.
336  *
337  * @return
338  *   - Return 0 if the parameters are in range of the capability.
339  *   - Return -1 if the parameters are out of range of the capability.
340  */
341 __rte_experimental
342 int
343 rte_cryptodev_asym_xform_capability_check_modlen(
344 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
345 		uint16_t modlen);
346 
347 /**
348  * Provide the cipher algorithm enum, given an algorithm string
349  *
350  * @param	algo_enum	A pointer to the cipher algorithm
351  *				enum to be filled
352  * @param	algo_string	Authentication algo string
353  *
354  * @return
355  * - Return -1 if string is not valid
356  * - Return 0 is the string is valid
357  */
358 int
359 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum,
360 		const char *algo_string);
361 
362 /**
363  * Provide the authentication algorithm enum, given an algorithm string
364  *
365  * @param	algo_enum	A pointer to the authentication algorithm
366  *				enum to be filled
367  * @param	algo_string	Authentication algo string
368  *
369  * @return
370  * - Return -1 if string is not valid
371  * - Return 0 is the string is valid
372  */
373 int
374 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum,
375 		const char *algo_string);
376 
377 /**
378  * Provide the AEAD algorithm enum, given an algorithm string
379  *
380  * @param	algo_enum	A pointer to the AEAD algorithm
381  *				enum to be filled
382  * @param	algo_string	AEAD algorithm string
383  *
384  * @return
385  * - Return -1 if string is not valid
386  * - Return 0 is the string is valid
387  */
388 int
389 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum,
390 		const char *algo_string);
391 
392 /**
393  * Provide the Asymmetric xform enum, given an xform string
394  *
395  * @param	xform_enum	A pointer to the xform type
396  *				enum to be filled
397  * @param	xform_string	xform string
398  *
399  * @return
400  * - Return -1 if string is not valid
401  * - Return 0 if the string is valid
402  */
403 __rte_experimental
404 int
405 rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum,
406 		const char *xform_string);
407 
408 
409 /** Macro used at end of crypto PMD list */
410 #define RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() \
411 	{ RTE_CRYPTO_OP_TYPE_UNDEFINED }
412 
413 
414 /**
415  * Crypto device supported feature flags
416  *
417  * Note:
418  * New features flags should be added to the end of the list
419  *
420  * Keep these flags synchronised with rte_cryptodev_get_feature_name()
421  */
422 #define	RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO		(1ULL << 0)
423 /**< Symmetric crypto operations are supported */
424 #define	RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO		(1ULL << 1)
425 /**< Asymmetric crypto operations are supported */
426 #define	RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING		(1ULL << 2)
427 /**< Chaining symmetric crypto operations are supported */
428 #define	RTE_CRYPTODEV_FF_CPU_SSE			(1ULL << 3)
429 /**< Utilises CPU SIMD SSE instructions */
430 #define	RTE_CRYPTODEV_FF_CPU_AVX			(1ULL << 4)
431 /**< Utilises CPU SIMD AVX instructions */
432 #define	RTE_CRYPTODEV_FF_CPU_AVX2			(1ULL << 5)
433 /**< Utilises CPU SIMD AVX2 instructions */
434 #define	RTE_CRYPTODEV_FF_CPU_AESNI			(1ULL << 6)
435 /**< Utilises CPU AES-NI instructions */
436 #define	RTE_CRYPTODEV_FF_HW_ACCELERATED			(1ULL << 7)
437 /**< Operations are off-loaded to an
438  * external hardware accelerator
439  */
440 #define	RTE_CRYPTODEV_FF_CPU_AVX512			(1ULL << 8)
441 /**< Utilises CPU SIMD AVX512 instructions */
442 #define	RTE_CRYPTODEV_FF_IN_PLACE_SGL			(1ULL << 9)
443 /**< In-place Scatter-gather (SGL) buffers, with multiple segments,
444  * are supported
445  */
446 #define RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT		(1ULL << 10)
447 /**< Out-of-place Scatter-gather (SGL) buffers are
448  * supported in input and output
449  */
450 #define RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT		(1ULL << 11)
451 /**< Out-of-place Scatter-gather (SGL) buffers are supported
452  * in input, combined with linear buffers (LB), with a
453  * single segment in output
454  */
455 #define RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT		(1ULL << 12)
456 /**< Out-of-place Scatter-gather (SGL) buffers are supported
457  * in output, combined with linear buffers (LB) in input
458  */
459 #define RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT		(1ULL << 13)
460 /**< Out-of-place linear buffers (LB) are supported in input and output */
461 #define	RTE_CRYPTODEV_FF_CPU_NEON			(1ULL << 14)
462 /**< Utilises CPU NEON instructions */
463 #define	RTE_CRYPTODEV_FF_CPU_ARM_CE			(1ULL << 15)
464 /**< Utilises ARM CPU Cryptographic Extensions */
465 #define	RTE_CRYPTODEV_FF_SECURITY			(1ULL << 16)
466 /**< Support Security Protocol Processing */
467 #define RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP		(1ULL << 17)
468 /**< Support RSA Private Key OP with exponent */
469 #define RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT		(1ULL << 18)
470 /**< Support RSA Private Key OP with CRT (quintuple) Keys */
471 #define RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED		(1ULL << 19)
472 /**< Support encrypted-digest operations where digest is appended to data */
473 #define RTE_CRYPTODEV_FF_ASYM_SESSIONLESS		(1ULL << 20)
474 /**< Support asymmetric session-less operations */
475 #define	RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO			(1ULL << 21)
476 /**< Support symmetric cpu-crypto processing */
477 #define RTE_CRYPTODEV_FF_SYM_SESSIONLESS		(1ULL << 22)
478 /**< Support symmetric session-less operations */
479 #define RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA		(1ULL << 23)
480 /**< Support operations on data which is not byte aligned */
481 #define RTE_CRYPTODEV_FF_SYM_RAW_DP			(1ULL << 24)
482 /**< Support accelerator specific symmetric raw data-path APIs */
483 #define RTE_CRYPTODEV_FF_CIPHER_MULTIPLE_DATA_UNITS	(1ULL << 25)
484 /**< Support operations on multiple data-units message */
485 #define RTE_CRYPTODEV_FF_CIPHER_WRAPPED_KEY		(1ULL << 26)
486 /**< Support wrapped key in cipher xform  */
487 #define RTE_CRYPTODEV_FF_SECURITY_INNER_CSUM		(1ULL << 27)
488 /**< Support inner checksum computation/verification */
489 
490 /**
491  * Get the name of a crypto device feature flag
492  *
493  * @param	flag	The mask describing the flag.
494  *
495  * @return
496  *   The name of this flag, or NULL if it's not a valid feature flag.
497  */
498 
499 extern const char *
500 rte_cryptodev_get_feature_name(uint64_t flag);
501 
502 /**  Crypto device information */
503 struct rte_cryptodev_info {
504 	const char *driver_name;	/**< Driver name. */
505 	uint8_t driver_id;		/**< Driver identifier */
506 	struct rte_device *device;	/**< Generic device information. */
507 
508 	uint64_t feature_flags;
509 	/**< Feature flags exposes HW/SW features for the given device */
510 
511 	const struct rte_cryptodev_capabilities *capabilities;
512 	/**< Array of devices supported capabilities */
513 
514 	unsigned max_nb_queue_pairs;
515 	/**< Maximum number of queues pairs supported by device. */
516 
517 	uint16_t min_mbuf_headroom_req;
518 	/**< Minimum mbuf headroom required by device */
519 
520 	uint16_t min_mbuf_tailroom_req;
521 	/**< Minimum mbuf tailroom required by device */
522 
523 	struct {
524 		unsigned max_nb_sessions;
525 		/**< Maximum number of sessions supported by device.
526 		 * If 0, the device does not have any limitation in
527 		 * number of sessions that can be used.
528 		 */
529 	} sym;
530 };
531 
532 #define RTE_CRYPTODEV_DETACHED  (0)
533 #define RTE_CRYPTODEV_ATTACHED  (1)
534 
535 /** Definitions of Crypto device event types */
536 enum rte_cryptodev_event_type {
537 	RTE_CRYPTODEV_EVENT_UNKNOWN,	/**< unknown event type */
538 	RTE_CRYPTODEV_EVENT_ERROR,	/**< error interrupt event */
539 	RTE_CRYPTODEV_EVENT_MAX		/**< max value of this enum */
540 };
541 
542 /** Crypto device queue pair configuration structure. */
543 struct rte_cryptodev_qp_conf {
544 	uint32_t nb_descriptors; /**< Number of descriptors per queue pair */
545 	struct rte_mempool *mp_session;
546 	/**< The mempool for creating session in sessionless mode */
547 };
548 
549 /**
550  * Function type used for processing crypto ops when enqueue/dequeue burst is
551  * called.
552  *
553  * The callback function is called on enqueue/dequeue burst immediately.
554  *
555  * @param	dev_id		The identifier of the device.
556  * @param	qp_id		The index of the queue pair on which ops are
557  *				enqueued/dequeued. The value must be in the
558  *				range [0, nb_queue_pairs - 1] previously
559  *				supplied to *rte_cryptodev_configure*.
560  * @param	ops		The address of an array of *nb_ops* pointers
561  *				to *rte_crypto_op* structures which contain
562  *				the crypto operations to be processed.
563  * @param	nb_ops		The number of operations to process.
564  * @param	user_param	The arbitrary user parameter passed in by the
565  *				application when the callback was originally
566  *				registered.
567  * @return			The number of ops to be enqueued to the
568  *				crypto device.
569  */
570 typedef uint16_t (*rte_cryptodev_callback_fn)(uint16_t dev_id, uint16_t qp_id,
571 		struct rte_crypto_op **ops, uint16_t nb_ops, void *user_param);
572 
573 /**
574  * Typedef for application callback function to be registered by application
575  * software for notification of device events
576  *
577  * @param	dev_id	Crypto device identifier
578  * @param	event	Crypto device event to register for notification of.
579  * @param	cb_arg	User specified parameter to be passed as to passed to
580  *			users callback function.
581  */
582 typedef void (*rte_cryptodev_cb_fn)(uint8_t dev_id,
583 		enum rte_cryptodev_event_type event, void *cb_arg);
584 
585 
586 /** Crypto Device statistics */
587 struct rte_cryptodev_stats {
588 	uint64_t enqueued_count;
589 	/**< Count of all operations enqueued */
590 	uint64_t dequeued_count;
591 	/**< Count of all operations dequeued */
592 
593 	uint64_t enqueue_err_count;
594 	/**< Total error count on operations enqueued */
595 	uint64_t dequeue_err_count;
596 	/**< Total error count on operations dequeued */
597 };
598 
599 #define RTE_CRYPTODEV_NAME_MAX_LEN	(64)
600 /**< Max length of name of crypto PMD */
601 
602 /**
603  * Get the device identifier for the named crypto device.
604  *
605  * @param	name	device name to select the device structure.
606  *
607  * @return
608  *   - Returns crypto device identifier on success.
609  *   - Return -1 on failure to find named crypto device.
610  */
611 extern int
612 rte_cryptodev_get_dev_id(const char *name);
613 
614 /**
615  * Get the crypto device name given a device identifier.
616  *
617  * @param dev_id
618  *   The identifier of the device
619  *
620  * @return
621  *   - Returns crypto device name.
622  *   - Returns NULL if crypto device is not present.
623  */
624 extern const char *
625 rte_cryptodev_name_get(uint8_t dev_id);
626 
627 /**
628  * Get the total number of crypto devices that have been successfully
629  * initialised.
630  *
631  * @return
632  *   - The total number of usable crypto devices.
633  */
634 extern uint8_t
635 rte_cryptodev_count(void);
636 
637 /**
638  * Get number of crypto device defined type.
639  *
640  * @param	driver_id	driver identifier.
641  *
642  * @return
643  *   Returns number of crypto device.
644  */
645 extern uint8_t
646 rte_cryptodev_device_count_by_driver(uint8_t driver_id);
647 
648 /**
649  * Get number and identifiers of attached crypto devices that
650  * use the same crypto driver.
651  *
652  * @param	driver_name	driver name.
653  * @param	devices		output devices identifiers.
654  * @param	nb_devices	maximal number of devices.
655  *
656  * @return
657  *   Returns number of attached crypto device.
658  */
659 uint8_t
660 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
661 		uint8_t nb_devices);
662 /*
663  * Return the NUMA socket to which a device is connected
664  *
665  * @param dev_id
666  *   The identifier of the device
667  * @return
668  *   The NUMA socket id to which the device is connected or
669  *   a default of zero if the socket could not be determined.
670  *   -1 if returned is the dev_id value is out of range.
671  */
672 extern int
673 rte_cryptodev_socket_id(uint8_t dev_id);
674 
675 /** Crypto device configuration structure */
676 struct rte_cryptodev_config {
677 	int socket_id;			/**< Socket to allocate resources on */
678 	uint16_t nb_queue_pairs;
679 	/**< Number of queue pairs to configure on device */
680 	uint64_t ff_disable;
681 	/**< Feature flags to be disabled. Only the following features are
682 	 * allowed to be disabled,
683 	 *  - RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO
684 	 *  - RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO
685 	 *  - RTE_CRYTPODEV_FF_SECURITY
686 	 */
687 };
688 
689 /**
690  * Configure a device.
691  *
692  * This function must be invoked first before any other function in the
693  * API. This function can also be re-invoked when a device is in the
694  * stopped state.
695  *
696  * @param	dev_id		The identifier of the device to configure.
697  * @param	config		The crypto device configuration structure.
698  *
699  * @return
700  *   - 0: Success, device configured.
701  *   - <0: Error code returned by the driver configuration function.
702  */
703 extern int
704 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config);
705 
706 /**
707  * Start an device.
708  *
709  * The device start step is the last one and consists of setting the configured
710  * offload features and in starting the transmit and the receive units of the
711  * device.
712  * On success, all basic functions exported by the API (link status,
713  * receive/transmit, and so on) can be invoked.
714  *
715  * @param dev_id
716  *   The identifier of the device.
717  * @return
718  *   - 0: Success, device started.
719  *   - <0: Error code of the driver device start function.
720  */
721 extern int
722 rte_cryptodev_start(uint8_t dev_id);
723 
724 /**
725  * Stop an device. The device can be restarted with a call to
726  * rte_cryptodev_start()
727  *
728  * @param	dev_id		The identifier of the device.
729  */
730 extern void
731 rte_cryptodev_stop(uint8_t dev_id);
732 
733 /**
734  * Close an device. The device cannot be restarted!
735  *
736  * @param	dev_id		The identifier of the device.
737  *
738  * @return
739  *  - 0 on successfully closing device
740  *  - <0 on failure to close device
741  */
742 extern int
743 rte_cryptodev_close(uint8_t dev_id);
744 
745 /**
746  * Allocate and set up a receive queue pair for a device.
747  *
748  *
749  * @param	dev_id		The identifier of the device.
750  * @param	queue_pair_id	The index of the queue pairs to set up. The
751  *				value must be in the range [0, nb_queue_pair
752  *				- 1] previously supplied to
753  *				rte_cryptodev_configure().
754  * @param	qp_conf		The pointer to the configuration data to be
755  *				used for the queue pair.
756  * @param	socket_id	The *socket_id* argument is the socket
757  *				identifier in case of NUMA. The value can be
758  *				*SOCKET_ID_ANY* if there is no NUMA constraint
759  *				for the DMA memory allocated for the receive
760  *				queue pair.
761  *
762  * @return
763  *   - 0: Success, queue pair correctly set up.
764  *   - <0: Queue pair configuration failed
765  */
766 extern int
767 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
768 		const struct rte_cryptodev_qp_conf *qp_conf, int socket_id);
769 
770 /**
771  * Get the status of queue pairs setup on a specific crypto device
772  *
773  * @param	dev_id		Crypto device identifier.
774  * @param	queue_pair_id	The index of the queue pairs to set up. The
775  *				value must be in the range [0, nb_queue_pair
776  *				- 1] previously supplied to
777  *				rte_cryptodev_configure().
778  * @return
779  *   - 0: qp was not configured
780  *	 - 1: qp was configured
781  *	 - -EINVAL: device was not configured
782  */
783 __rte_experimental
784 int
785 rte_cryptodev_get_qp_status(uint8_t dev_id, uint16_t queue_pair_id);
786 
787 /**
788  * Get the number of queue pairs on a specific crypto device
789  *
790  * @param	dev_id		Crypto device identifier.
791  * @return
792  *   - The number of configured queue pairs.
793  */
794 extern uint16_t
795 rte_cryptodev_queue_pair_count(uint8_t dev_id);
796 
797 
798 /**
799  * Retrieve the general I/O statistics of a device.
800  *
801  * @param	dev_id		The identifier of the device.
802  * @param	stats		A pointer to a structure of type
803  *				*rte_cryptodev_stats* to be filled with the
804  *				values of device counters.
805  * @return
806  *   - Zero if successful.
807  *   - Non-zero otherwise.
808  */
809 extern int
810 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats);
811 
812 /**
813  * Reset the general I/O statistics of a device.
814  *
815  * @param	dev_id		The identifier of the device.
816  */
817 extern void
818 rte_cryptodev_stats_reset(uint8_t dev_id);
819 
820 /**
821  * Retrieve the contextual information of a device.
822  *
823  * @param	dev_id		The identifier of the device.
824  * @param	dev_info	A pointer to a structure of type
825  *				*rte_cryptodev_info* to be filled with the
826  *				contextual information of the device.
827  *
828  * @note The capabilities field of dev_info is set to point to the first
829  * element of an array of struct rte_cryptodev_capabilities. The element after
830  * the last valid element has it's op field set to
831  * RTE_CRYPTO_OP_TYPE_UNDEFINED.
832  */
833 extern void
834 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info);
835 
836 
837 /**
838  * Register a callback function for specific device id.
839  *
840  * @param	dev_id		Device id.
841  * @param	event		Event interested.
842  * @param	cb_fn		User supplied callback function to be called.
843  * @param	cb_arg		Pointer to the parameters for the registered
844  *				callback.
845  *
846  * @return
847  *  - On success, zero.
848  *  - On failure, a negative value.
849  */
850 extern int
851 rte_cryptodev_callback_register(uint8_t dev_id,
852 		enum rte_cryptodev_event_type event,
853 		rte_cryptodev_cb_fn cb_fn, void *cb_arg);
854 
855 /**
856  * Unregister a callback function for specific device id.
857  *
858  * @param	dev_id		The device identifier.
859  * @param	event		Event interested.
860  * @param	cb_fn		User supplied callback function to be called.
861  * @param	cb_arg		Pointer to the parameters for the registered
862  *				callback.
863  *
864  * @return
865  *  - On success, zero.
866  *  - On failure, a negative value.
867  */
868 extern int
869 rte_cryptodev_callback_unregister(uint8_t dev_id,
870 		enum rte_cryptodev_event_type event,
871 		rte_cryptodev_cb_fn cb_fn, void *cb_arg);
872 
873 struct rte_cryptodev_callback;
874 
875 /** Structure to keep track of registered callbacks */
876 RTE_TAILQ_HEAD(rte_cryptodev_cb_list, rte_cryptodev_callback);
877 
878 /**
879  * Structure used to hold information about the callbacks to be called for a
880  * queue pair on enqueue/dequeue.
881  */
882 struct rte_cryptodev_cb {
883 	struct rte_cryptodev_cb *next;
884 	/**< Pointer to next callback */
885 	rte_cryptodev_callback_fn fn;
886 	/**< Pointer to callback function */
887 	void *arg;
888 	/**< Pointer to argument */
889 };
890 
891 /**
892  * @internal
893  * Structure used to hold information about the RCU for a queue pair.
894  */
895 struct rte_cryptodev_cb_rcu {
896 	struct rte_cryptodev_cb *next;
897 	/**< Pointer to next callback */
898 	struct rte_rcu_qsbr *qsbr;
899 	/**< RCU QSBR variable per queue pair */
900 };
901 
902 void *
903 rte_cryptodev_get_sec_ctx(uint8_t dev_id);
904 
905 /**
906  * Create a symmetric session mempool.
907  *
908  * @param name
909  *   The unique mempool name.
910  * @param nb_elts
911  *   The number of elements in the mempool.
912  * @param elt_size
913  *   The size of the element. This value will be ignored if it is smaller than
914  *   the minimum session header size required for the system. For the user who
915  *   want to use the same mempool for sym session and session private data it
916  *   can be the maximum value of all existing devices' private data and session
917  *   header sizes.
918  * @param cache_size
919  *   The number of per-lcore cache elements
920  * @param priv_size
921  *   The private data size of each session.
922  * @param socket_id
923  *   The *socket_id* argument is the socket identifier in the case of
924  *   NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
925  *   constraint for the reserved zone.
926  *
927  * @return
928  *  - On success return size of the session
929  *  - On failure returns 0
930  */
931 __rte_experimental
932 struct rte_mempool *
933 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
934 	uint32_t elt_size, uint32_t cache_size, uint16_t priv_size,
935 	int socket_id);
936 
937 
938 /**
939  * Create an asymmetric session mempool.
940  *
941  * @param name
942  *   The unique mempool name.
943  * @param nb_elts
944  *   The number of elements in the mempool.
945  * @param cache_size
946  *   The number of per-lcore cache elements
947  * @param user_data_size
948  *   The size of user data to be placed after session private data.
949  * @param socket_id
950  *   The *socket_id* argument is the socket identifier in the case of
951  *   NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
952  *   constraint for the reserved zone.
953  *
954  * @return
955  *  - On success return mempool
956  *  - On failure returns NULL
957  */
958 __rte_experimental
959 struct rte_mempool *
960 rte_cryptodev_asym_session_pool_create(const char *name, uint32_t nb_elts,
961 	uint32_t cache_size, uint16_t user_data_size, int socket_id);
962 
963 /**
964  * Create symmetric crypto session and fill out private data for the device id,
965  * based on its device type.
966  *
967  * @param   dev_id   ID of device that we want the session to be used on
968  * @param   xforms   Symmetric crypto transform operations to apply on flow
969  *                   processed with this session
970  * @param   mp       Mempool where the private data is allocated.
971  *
972  * @return
973  *  - On success return pointer to sym-session.
974  *  - On failure returns NULL.
975  */
976 void *
977 rte_cryptodev_sym_session_create(uint8_t dev_id,
978 		struct rte_crypto_sym_xform *xforms,
979 		struct rte_mempool *mp);
980 /**
981  * Create and initialise an asymmetric crypto session structure.
982  * Calls the PMD to configure the private session data.
983  *
984  * @param   dev_id   ID of device that we want the session to be used on
985  * @param   xforms   Asymmetric crypto transform operations to apply on flow
986  *                   processed with this session
987  * @param   mp       mempool to allocate asymmetric session
988  *                   objects from
989  * @param   session  void ** for session to be used
990  *
991  * @return
992  *  - 0 on success.
993  *  - -EINVAL on invalid arguments.
994  *  - -ENOMEM on memory error for session allocation.
995  *  - -ENOTSUP if device doesn't support session configuration.
996  */
997 __rte_experimental
998 int
999 rte_cryptodev_asym_session_create(uint8_t dev_id,
1000 		struct rte_crypto_asym_xform *xforms, struct rte_mempool *mp,
1001 		void **session);
1002 
1003 /**
1004  * Frees session for the device id and returning it to its mempool.
1005  * It is the application's responsibility to ensure that the session
1006  * is not still in-flight operations using it.
1007  *
1008  * @param   dev_id   ID of device that uses the session.
1009  * @param   sess     Session header to be freed.
1010  *
1011  * @return
1012  *  - 0 if successful.
1013  *  - -EINVAL if session is NULL or the mismatched device ids.
1014  */
1015 int
1016 rte_cryptodev_sym_session_free(uint8_t dev_id,
1017 	void *sess);
1018 
1019 /**
1020  * Clears and frees asymmetric crypto session header and private data,
1021  * returning it to its original mempool.
1022  *
1023  * @param   dev_id   ID of device that uses the asymmetric session.
1024  * @param   sess     Session header to be freed.
1025  *
1026  * @return
1027  *  - 0 if successful.
1028  *  - -EINVAL if device is invalid or session is NULL.
1029  */
1030 __rte_experimental
1031 int
1032 rte_cryptodev_asym_session_free(uint8_t dev_id, void *sess);
1033 
1034 /**
1035  * Get the size of the asymmetric session header.
1036  *
1037  * @return
1038  *   Size of the asymmetric header session.
1039  */
1040 __rte_experimental
1041 unsigned int
1042 rte_cryptodev_asym_get_header_session_size(void);
1043 
1044 /**
1045  * Get the size of the private symmetric session data
1046  * for a device.
1047  *
1048  * @param	dev_id		The device identifier.
1049  *
1050  * @return
1051  *   - Size of the private data, if successful
1052  *   - 0 if device is invalid or does not have private
1053  *   symmetric session
1054  */
1055 unsigned int
1056 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id);
1057 
1058 /**
1059  * Get the size of the private data for asymmetric session
1060  * on device
1061  *
1062  * @param	dev_id		The device identifier.
1063  *
1064  * @return
1065  *   - Size of the asymmetric private data, if successful
1066  *   - 0 if device is invalid or does not have private session
1067  */
1068 __rte_experimental
1069 unsigned int
1070 rte_cryptodev_asym_get_private_session_size(uint8_t dev_id);
1071 
1072 /**
1073  * Validate if the crypto device index is valid attached crypto device.
1074  *
1075  * @param	dev_id	Crypto device index.
1076  *
1077  * @return
1078  *   - If the device index is valid (1) or not (0).
1079  */
1080 unsigned int
1081 rte_cryptodev_is_valid_dev(uint8_t dev_id);
1082 
1083 /**
1084  * Provide driver identifier.
1085  *
1086  * @param name
1087  *   The pointer to a driver name.
1088  * @return
1089  *  The driver type identifier or -1 if no driver found
1090  */
1091 int rte_cryptodev_driver_id_get(const char *name);
1092 
1093 /**
1094  * Provide driver name.
1095  *
1096  * @param driver_id
1097  *   The driver identifier.
1098  * @return
1099  *  The driver name or null if no driver found
1100  */
1101 const char *rte_cryptodev_driver_name_get(uint8_t driver_id);
1102 
1103 /**
1104  * Store user data in a session.
1105  *
1106  * @param	sess		Session pointer allocated by
1107  *				*rte_cryptodev_sym_session_create*.
1108  * @param	data		Pointer to the user data.
1109  * @param	size		Size of the user data.
1110  *
1111  * @return
1112  *  - On success, zero.
1113  *  - On failure, a negative value.
1114  */
1115 __rte_experimental
1116 int
1117 rte_cryptodev_sym_session_set_user_data(void *sess,
1118 					void *data,
1119 					uint16_t size);
1120 
1121 #define CRYPTO_SESS_OPAQUE_DATA_OFF 0
1122 /**
1123  * Get opaque data from session handle
1124  */
1125 static inline uint64_t
1126 rte_cryptodev_sym_session_opaque_data_get(void *sess)
1127 {
1128 	return *((uint64_t *)sess + CRYPTO_SESS_OPAQUE_DATA_OFF);
1129 }
1130 
1131 /**
1132  * Set opaque data in session handle
1133  */
1134 static inline void
1135 rte_cryptodev_sym_session_opaque_data_set(void *sess, uint64_t opaque)
1136 {
1137 	uint64_t *data;
1138 	data = (((uint64_t *)sess) + CRYPTO_SESS_OPAQUE_DATA_OFF);
1139 	*data = opaque;
1140 }
1141 
1142 /**
1143  * Get user data stored in a session.
1144  *
1145  * @param	sess		Session pointer allocated by
1146  *				*rte_cryptodev_sym_session_create*.
1147  *
1148  * @return
1149  *  - On success return pointer to user data.
1150  *  - On failure returns NULL.
1151  */
1152 __rte_experimental
1153 void *
1154 rte_cryptodev_sym_session_get_user_data(void *sess);
1155 
1156 /**
1157  * Store user data in an asymmetric session.
1158  *
1159  * @param	sess		Session pointer allocated by
1160  *				*rte_cryptodev_asym_session_create*.
1161  * @param	data		Pointer to the user data.
1162  * @param	size		Size of the user data.
1163  *
1164  * @return
1165  *  - On success, zero.
1166  *  - -EINVAL if the session pointer is invalid.
1167  *  - -ENOMEM if the available user data size is smaller than the size parameter.
1168  */
1169 __rte_experimental
1170 int
1171 rte_cryptodev_asym_session_set_user_data(void *sess, void *data, uint16_t size);
1172 
1173 /**
1174  * Get user data stored in an asymmetric session.
1175  *
1176  * @param	sess		Session pointer allocated by
1177  *				*rte_cryptodev_asym_session_create*.
1178  *
1179  * @return
1180  *  - On success return pointer to user data.
1181  *  - On failure returns NULL.
1182  */
1183 __rte_experimental
1184 void *
1185 rte_cryptodev_asym_session_get_user_data(void *sess);
1186 
1187 /**
1188  * Perform actual crypto processing (encrypt/digest or auth/decrypt)
1189  * on user provided data.
1190  *
1191  * @param	dev_id	The device identifier.
1192  * @param	sess	Cryptodev session structure
1193  * @param	ofs	Start and stop offsets for auth and cipher operations
1194  * @param	vec	Vectorized operation descriptor
1195  *
1196  * @return
1197  *  - Returns number of successfully processed packets.
1198  */
1199 __rte_experimental
1200 uint32_t
1201 rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id,
1202 	void *sess, union rte_crypto_sym_ofs ofs,
1203 	struct rte_crypto_sym_vec *vec);
1204 
1205 /**
1206  * Get the size of the raw data-path context buffer.
1207  *
1208  * @param	dev_id		The device identifier.
1209  *
1210  * @return
1211  *   - If the device supports raw data-path APIs, return the context size.
1212  *   - If the device does not support the APIs, return -1.
1213  */
1214 __rte_experimental
1215 int
1216 rte_cryptodev_get_raw_dp_ctx_size(uint8_t dev_id);
1217 
1218 /**
1219  * Set session event meta data
1220  *
1221  * @param	dev_id		The device identifier.
1222  * @param	sess            Crypto or security session.
1223  * @param	op_type         Operation type.
1224  * @param	sess_type       Session type.
1225  * @param	ev_mdata	Pointer to the event crypto meta data
1226  *				(aka *union rte_event_crypto_metadata*)
1227  * @param	size            Size of ev_mdata.
1228  *
1229  * @return
1230  *  - On success, zero.
1231  *  - On failure, a negative value.
1232  */
1233 __rte_experimental
1234 int
1235 rte_cryptodev_session_event_mdata_set(uint8_t dev_id, void *sess,
1236 	enum rte_crypto_op_type op_type,
1237 	enum rte_crypto_op_sess_type sess_type,
1238 	void *ev_mdata, uint16_t size);
1239 
1240 /**
1241  * Union of different crypto session types, including session-less xform
1242  * pointer.
1243  */
1244 union rte_cryptodev_session_ctx {void *crypto_sess;
1245 	struct rte_crypto_sym_xform *xform;
1246 	struct rte_security_session *sec_sess;
1247 };
1248 
1249 /**
1250  * Enqueue a vectorized operation descriptor into the device queue but the
1251  * driver may or may not start processing until rte_cryptodev_raw_enqueue_done()
1252  * is called.
1253  *
1254  * @param	qp		Driver specific queue pair data.
1255  * @param	drv_ctx		Driver specific context data.
1256  * @param	vec		Vectorized operation descriptor.
1257  * @param	ofs		Start and stop offsets for auth and cipher
1258  *				operations.
1259  * @param	user_data	The array of user data for dequeue later.
1260  * @param	enqueue_status	Driver written value to specify the
1261  *				enqueue status. Possible values:
1262  *				- 1: The number of operations returned are
1263  *				     enqueued successfully.
1264  *				- 0: The number of operations returned are
1265  *				     cached into the queue but are not processed
1266  *				     until rte_cryptodev_raw_enqueue_done() is
1267  *				     called.
1268  *				- negative integer: Error occurred.
1269  * @return
1270  *   - The number of operations in the descriptor successfully enqueued or
1271  *     cached into the queue but not enqueued yet, depends on the
1272  *     "enqueue_status" value.
1273  */
1274 typedef uint32_t (*cryptodev_sym_raw_enqueue_burst_t)(
1275 	void *qp, uint8_t *drv_ctx, struct rte_crypto_sym_vec *vec,
1276 	union rte_crypto_sym_ofs ofs, void *user_data[], int *enqueue_status);
1277 
1278 /**
1279  * Enqueue single raw data vector into the device queue but the driver may or
1280  * may not start processing until rte_cryptodev_raw_enqueue_done() is called.
1281  *
1282  * @param	qp		Driver specific queue pair data.
1283  * @param	drv_ctx		Driver specific context data.
1284  * @param	data_vec	The buffer data vector.
1285  * @param	n_data_vecs	Number of buffer data vectors.
1286  * @param	ofs		Start and stop offsets for auth and cipher
1287  *				operations.
1288  * @param	iv		IV virtual and IOVA addresses
1289  * @param	digest		digest virtual and IOVA addresses
1290  * @param	aad_or_auth_iv	AAD or auth IV virtual and IOVA addresses,
1291  *				depends on the algorithm used.
1292  * @param	user_data	The user data.
1293  * @return
1294  *   - 1: The data vector is enqueued successfully.
1295  *   - 0: The data vector is cached into the queue but is not processed
1296  *        until rte_cryptodev_raw_enqueue_done() is called.
1297  *   - negative integer: failure.
1298  */
1299 typedef int (*cryptodev_sym_raw_enqueue_t)(
1300 	void *qp, uint8_t *drv_ctx, struct rte_crypto_vec *data_vec,
1301 	uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs,
1302 	struct rte_crypto_va_iova_ptr *iv,
1303 	struct rte_crypto_va_iova_ptr *digest,
1304 	struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
1305 	void *user_data);
1306 
1307 /**
1308  * Inform the cryptodev queue pair to start processing or finish dequeuing all
1309  * enqueued/dequeued operations.
1310  *
1311  * @param	qp		Driver specific queue pair data.
1312  * @param	drv_ctx		Driver specific context data.
1313  * @param	n		The total number of processed operations.
1314  * @return
1315  *   - On success return 0.
1316  *   - On failure return negative integer.
1317  */
1318 typedef int (*cryptodev_sym_raw_operation_done_t)(void *qp, uint8_t *drv_ctx,
1319 	uint32_t n);
1320 
1321 /**
1322  * Typedef that the user provided for the driver to get the dequeue count.
1323  * The function may return a fixed number or the number parsed from the user
1324  * data stored in the first processed operation.
1325  *
1326  * @param	user_data	Dequeued user data.
1327  * @return
1328  *  - The number of operations to be dequeued.
1329  **/
1330 typedef uint32_t (*rte_cryptodev_raw_get_dequeue_count_t)(void *user_data);
1331 
1332 /**
1333  * Typedef that the user provided to deal with post dequeue operation, such
1334  * as filling status.
1335  *
1336  * @param	user_data	Dequeued user data.
1337  * @param	index		Index number of the processed descriptor.
1338  * @param	is_op_success	Operation status provided by the driver.
1339  **/
1340 typedef void (*rte_cryptodev_raw_post_dequeue_t)(void *user_data,
1341 	uint32_t index, uint8_t is_op_success);
1342 
1343 /**
1344  * Dequeue a burst of symmetric crypto processing.
1345  *
1346  * @param	qp			Driver specific queue pair data.
1347  * @param	drv_ctx			Driver specific context data.
1348  * @param	get_dequeue_count	User provided callback function to
1349  *					obtain dequeue operation count.
1350  * @param	max_nb_to_dequeue	When get_dequeue_count is NULL this
1351  *					value is used to pass the maximum
1352  *					number of operations to be dequeued.
1353  * @param	post_dequeue		User provided callback function to
1354  *					post-process a dequeued operation.
1355  * @param	out_user_data		User data pointer array to be retrieve
1356  *					from device queue. In case of
1357  *					*is_user_data_array* is set there
1358  *					should be enough room to store all
1359  *					user data.
1360  * @param	is_user_data_array	Set 1 if every dequeued user data will
1361  *					be written into out_user_data array.
1362  *					Set 0 if only the first user data will
1363  *					be written into out_user_data array.
1364  * @param	n_success		Driver written value to specific the
1365  *					total successful operations count.
1366  * @param	dequeue_status		Driver written value to specify the
1367  *					dequeue status. Possible values:
1368  *					- 1: Successfully dequeued the number
1369  *					     of operations returned. The user
1370  *					     data previously set during enqueue
1371  *					     is stored in the "out_user_data".
1372  *					- 0: The number of operations returned
1373  *					     are completed and the user data is
1374  *					     stored in the "out_user_data", but
1375  *					     they are not freed from the queue
1376  *					     until
1377  *					     rte_cryptodev_raw_dequeue_done()
1378  *					     is called.
1379  *					- negative integer: Error occurred.
1380  * @return
1381  *   - The number of operations dequeued or completed but not freed from the
1382  *     queue, depends on "dequeue_status" value.
1383  */
1384 typedef uint32_t (*cryptodev_sym_raw_dequeue_burst_t)(void *qp,
1385 	uint8_t *drv_ctx,
1386 	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
1387 	uint32_t max_nb_to_dequeue,
1388 	rte_cryptodev_raw_post_dequeue_t post_dequeue,
1389 	void **out_user_data, uint8_t is_user_data_array,
1390 	uint32_t *n_success, int *dequeue_status);
1391 
1392 /**
1393  * Dequeue a symmetric crypto processing.
1394  *
1395  * @param	qp			Driver specific queue pair data.
1396  * @param	drv_ctx			Driver specific context data.
1397  * @param	dequeue_status		Driver written value to specify the
1398  *					dequeue status. Possible values:
1399  *					- 1: Successfully dequeued a operation.
1400  *					     The user data is returned.
1401  *					- 0: The first operation in the queue
1402  *					     is completed and the user data
1403  *					     previously set during enqueue is
1404  *					     returned, but it is not freed from
1405  *					     the queue until
1406  *					     rte_cryptodev_raw_dequeue_done() is
1407  *					     called.
1408  *					- negative integer: Error occurred.
1409  * @param	op_status		Driver written value to specify
1410  *					operation status.
1411  * @return
1412  *   - The user data pointer retrieved from device queue or NULL if no
1413  *     operation is ready for dequeue.
1414  */
1415 typedef void * (*cryptodev_sym_raw_dequeue_t)(
1416 		void *qp, uint8_t *drv_ctx, int *dequeue_status,
1417 		enum rte_crypto_op_status *op_status);
1418 
1419 /**
1420  * Context data for raw data-path API crypto process. The buffer of this
1421  * structure is to be allocated by the user application with the size equal
1422  * or bigger than rte_cryptodev_get_raw_dp_ctx_size() returned value.
1423  */
1424 struct rte_crypto_raw_dp_ctx {
1425 	void *qp_data;
1426 
1427 	cryptodev_sym_raw_enqueue_t enqueue;
1428 	cryptodev_sym_raw_enqueue_burst_t enqueue_burst;
1429 	cryptodev_sym_raw_operation_done_t enqueue_done;
1430 	cryptodev_sym_raw_dequeue_t dequeue;
1431 	cryptodev_sym_raw_dequeue_burst_t dequeue_burst;
1432 	cryptodev_sym_raw_operation_done_t dequeue_done;
1433 
1434 	/* Driver specific context data */
1435 	__extension__ uint8_t drv_ctx_data[];
1436 };
1437 
1438 /**
1439  * Configure raw data-path context data.
1440  *
1441  * NOTE:
1442  * After the context data is configured, the user should call
1443  * rte_cryptodev_raw_attach_session() before using it in
1444  * rte_cryptodev_raw_enqueue/dequeue function call.
1445  *
1446  * @param	dev_id		The device identifier.
1447  * @param	qp_id		The index of the queue pair from which to
1448  *				retrieve processed packets. The value must be
1449  *				in the range [0, nb_queue_pair - 1] previously
1450  *				supplied to rte_cryptodev_configure().
1451  * @param	ctx		The raw data-path context data.
1452  * @param	sess_type	session type.
1453  * @param	session_ctx	Session context data.
1454  * @param	is_update	Set 0 if it is to initialize the ctx.
1455  *				Set 1 if ctx is initialized and only to update
1456  *				session context data.
1457  * @return
1458  *   - On success return 0.
1459  *   - On failure return negative integer.
1460  */
1461 __rte_experimental
1462 int
1463 rte_cryptodev_configure_raw_dp_ctx(uint8_t dev_id, uint16_t qp_id,
1464 	struct rte_crypto_raw_dp_ctx *ctx,
1465 	enum rte_crypto_op_sess_type sess_type,
1466 	union rte_cryptodev_session_ctx session_ctx,
1467 	uint8_t is_update);
1468 
1469 /**
1470  * Enqueue a vectorized operation descriptor into the device queue but the
1471  * driver may or may not start processing until rte_cryptodev_raw_enqueue_done()
1472  * is called.
1473  *
1474  * @param	ctx		The initialized raw data-path context data.
1475  * @param	vec		Vectorized operation descriptor.
1476  * @param	ofs		Start and stop offsets for auth and cipher
1477  *				operations.
1478  * @param	user_data	The array of user data for dequeue later.
1479  * @param	enqueue_status	Driver written value to specify the
1480  *				enqueue status. Possible values:
1481  *				- 1: The number of operations returned are
1482  *				     enqueued successfully.
1483  *				- 0: The number of operations returned are
1484  *				     cached into the queue but are not processed
1485  *				     until rte_cryptodev_raw_enqueue_done() is
1486  *				     called.
1487  *				- negative integer: Error occurred.
1488  * @return
1489  *   - The number of operations in the descriptor successfully enqueued or
1490  *     cached into the queue but not enqueued yet, depends on the
1491  *     "enqueue_status" value.
1492  */
1493 __rte_experimental
1494 uint32_t
1495 rte_cryptodev_raw_enqueue_burst(struct rte_crypto_raw_dp_ctx *ctx,
1496 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
1497 	void **user_data, int *enqueue_status);
1498 
1499 /**
1500  * Enqueue single raw data vector into the device queue but the driver may or
1501  * may not start processing until rte_cryptodev_raw_enqueue_done() is called.
1502  *
1503  * @param	ctx		The initialized raw data-path context data.
1504  * @param	data_vec	The buffer data vector.
1505  * @param	n_data_vecs	Number of buffer data vectors.
1506  * @param	ofs		Start and stop offsets for auth and cipher
1507  *				operations.
1508  * @param	iv		IV virtual and IOVA addresses
1509  * @param	digest		digest virtual and IOVA addresses
1510  * @param	aad_or_auth_iv	AAD or auth IV virtual and IOVA addresses,
1511  *				depends on the algorithm used.
1512  * @param	user_data	The user data.
1513  * @return
1514  *   - 1: The data vector is enqueued successfully.
1515  *   - 0: The data vector is cached into the queue but is not processed
1516  *        until rte_cryptodev_raw_enqueue_done() is called.
1517  *   - negative integer: failure.
1518  */
1519 __rte_experimental
1520 static __rte_always_inline int
1521 rte_cryptodev_raw_enqueue(struct rte_crypto_raw_dp_ctx *ctx,
1522 	struct rte_crypto_vec *data_vec, uint16_t n_data_vecs,
1523 	union rte_crypto_sym_ofs ofs,
1524 	struct rte_crypto_va_iova_ptr *iv,
1525 	struct rte_crypto_va_iova_ptr *digest,
1526 	struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
1527 	void *user_data)
1528 {
1529 	return (*ctx->enqueue)(ctx->qp_data, ctx->drv_ctx_data, data_vec,
1530 		n_data_vecs, ofs, iv, digest, aad_or_auth_iv, user_data);
1531 }
1532 
1533 /**
1534  * Start processing all enqueued operations from last
1535  * rte_cryptodev_configure_raw_dp_ctx() call.
1536  *
1537  * @param	ctx	The initialized raw data-path context data.
1538  * @param	n	The number of operations cached.
1539  * @return
1540  *   - On success return 0.
1541  *   - On failure return negative integer.
1542  */
1543 __rte_experimental
1544 int
1545 rte_cryptodev_raw_enqueue_done(struct rte_crypto_raw_dp_ctx *ctx,
1546 		uint32_t n);
1547 
1548 /**
1549  * Dequeue a burst of symmetric crypto processing.
1550  *
1551  * @param	ctx			The initialized raw data-path context
1552  *					data.
1553  * @param	get_dequeue_count	User provided callback function to
1554  *					obtain dequeue operation count.
1555  * @param	max_nb_to_dequeue	When get_dequeue_count is NULL this
1556  *					value is used to pass the maximum
1557  *					number of operations to be dequeued.
1558  * @param	post_dequeue		User provided callback function to
1559  *					post-process a dequeued operation.
1560  * @param	out_user_data		User data pointer array to be retrieve
1561  *					from device queue. In case of
1562  *					*is_user_data_array* is set there
1563  *					should be enough room to store all
1564  *					user data.
1565  * @param	is_user_data_array	Set 1 if every dequeued user data will
1566  *					be written into out_user_data array.
1567  *					Set 0 if only the first user data will
1568  *					be written into out_user_data array.
1569  * @param	n_success		Driver written value to specific the
1570  *					total successful operations count.
1571  * @param	dequeue_status		Driver written value to specify the
1572  *					dequeue status. Possible values:
1573  *					- 1: Successfully dequeued the number
1574  *					     of operations returned. The user
1575  *					     data previously set during enqueue
1576  *					     is stored in the "out_user_data".
1577  *					- 0: The number of operations returned
1578  *					     are completed and the user data is
1579  *					     stored in the "out_user_data", but
1580  *					     they are not freed from the queue
1581  *					     until
1582  *					     rte_cryptodev_raw_dequeue_done()
1583  *					     is called.
1584  *					- negative integer: Error occurred.
1585  * @return
1586  *   - The number of operations dequeued or completed but not freed from the
1587  *     queue, depends on "dequeue_status" value.
1588  */
1589 __rte_experimental
1590 uint32_t
1591 rte_cryptodev_raw_dequeue_burst(struct rte_crypto_raw_dp_ctx *ctx,
1592 	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
1593 	uint32_t max_nb_to_dequeue,
1594 	rte_cryptodev_raw_post_dequeue_t post_dequeue,
1595 	void **out_user_data, uint8_t is_user_data_array,
1596 	uint32_t *n_success, int *dequeue_status);
1597 
1598 /**
1599  * Dequeue a symmetric crypto processing.
1600  *
1601  * @param	ctx			The initialized raw data-path context
1602  *					data.
1603  * @param	dequeue_status		Driver written value to specify the
1604  *					dequeue status. Possible values:
1605  *					- 1: Successfully dequeued a operation.
1606  *					     The user data is returned.
1607  *					- 0: The first operation in the queue
1608  *					     is completed and the user data
1609  *					     previously set during enqueue is
1610  *					     returned, but it is not freed from
1611  *					     the queue until
1612  *					     rte_cryptodev_raw_dequeue_done() is
1613  *					     called.
1614  *					- negative integer: Error occurred.
1615  * @param	op_status		Driver written value to specify
1616  *					operation status.
1617  * @return
1618  *   - The user data pointer retrieved from device queue or NULL if no
1619  *     operation is ready for dequeue.
1620  */
1621 __rte_experimental
1622 static __rte_always_inline void *
1623 rte_cryptodev_raw_dequeue(struct rte_crypto_raw_dp_ctx *ctx,
1624 		int *dequeue_status, enum rte_crypto_op_status *op_status)
1625 {
1626 	return (*ctx->dequeue)(ctx->qp_data, ctx->drv_ctx_data, dequeue_status,
1627 			op_status);
1628 }
1629 
1630 /**
1631  * Inform the queue pair dequeue operations is finished.
1632  *
1633  * @param	ctx	The initialized raw data-path context data.
1634  * @param	n	The number of operations.
1635  * @return
1636  *   - On success return 0.
1637  *   - On failure return negative integer.
1638  */
1639 __rte_experimental
1640 int
1641 rte_cryptodev_raw_dequeue_done(struct rte_crypto_raw_dp_ctx *ctx,
1642 		uint32_t n);
1643 
1644 /**
1645  * Add a user callback for a given crypto device and queue pair which will be
1646  * called on crypto ops enqueue.
1647  *
1648  * This API configures a function to be called for each burst of crypto ops
1649  * received on a given crypto device queue pair. The return value is a pointer
1650  * that can be used later to remove the callback using
1651  * rte_cryptodev_remove_enq_callback().
1652  *
1653  * Callbacks registered by application would not survive
1654  * rte_cryptodev_configure() as it reinitializes the callback list.
1655  * It is user responsibility to remove all installed callbacks before
1656  * calling rte_cryptodev_configure() to avoid possible memory leakage.
1657  * Application is expected to call add API after rte_cryptodev_configure().
1658  *
1659  * Multiple functions can be registered per queue pair & they are called
1660  * in the order they were added. The API does not restrict on maximum number
1661  * of callbacks.
1662  *
1663  * @param	dev_id		The identifier of the device.
1664  * @param	qp_id		The index of the queue pair on which ops are
1665  *				to be enqueued for processing. The value
1666  *				must be in the range [0, nb_queue_pairs - 1]
1667  *				previously supplied to
1668  *				*rte_cryptodev_configure*.
1669  * @param	cb_fn		The callback function
1670  * @param	cb_arg		A generic pointer parameter which will be passed
1671  *				to each invocation of the callback function on
1672  *				this crypto device and queue pair.
1673  *
1674  * @return
1675  *  - NULL on error & rte_errno will contain the error code.
1676  *  - On success, a pointer value which can later be used to remove the
1677  *    callback.
1678  */
1679 
1680 __rte_experimental
1681 struct rte_cryptodev_cb *
1682 rte_cryptodev_add_enq_callback(uint8_t dev_id,
1683 			       uint16_t qp_id,
1684 			       rte_cryptodev_callback_fn cb_fn,
1685 			       void *cb_arg);
1686 
1687 /**
1688  * Remove a user callback function for given crypto device and queue pair.
1689  *
1690  * This function is used to remove enqueue callbacks that were added to a
1691  * crypto device queue pair using rte_cryptodev_add_enq_callback().
1692  *
1693  *
1694  *
1695  * @param	dev_id		The identifier of the device.
1696  * @param	qp_id		The index of the queue pair on which ops are
1697  *				to be enqueued. The value must be in the
1698  *				range [0, nb_queue_pairs - 1] previously
1699  *				supplied to *rte_cryptodev_configure*.
1700  * @param	cb		Pointer to user supplied callback created via
1701  *				rte_cryptodev_add_enq_callback().
1702  *
1703  * @return
1704  *   -  0: Success. Callback was removed.
1705  *   - <0: The dev_id or the qp_id is out of range, or the callback
1706  *         is NULL or not found for the crypto device queue pair.
1707  */
1708 
1709 __rte_experimental
1710 int rte_cryptodev_remove_enq_callback(uint8_t dev_id,
1711 				      uint16_t qp_id,
1712 				      struct rte_cryptodev_cb *cb);
1713 
1714 /**
1715  * Add a user callback for a given crypto device and queue pair which will be
1716  * called on crypto ops dequeue.
1717  *
1718  * This API configures a function to be called for each burst of crypto ops
1719  * received on a given crypto device queue pair. The return value is a pointer
1720  * that can be used later to remove the callback using
1721  * rte_cryptodev_remove_deq_callback().
1722  *
1723  * Callbacks registered by application would not survive
1724  * rte_cryptodev_configure() as it reinitializes the callback list.
1725  * It is user responsibility to remove all installed callbacks before
1726  * calling rte_cryptodev_configure() to avoid possible memory leakage.
1727  * Application is expected to call add API after rte_cryptodev_configure().
1728  *
1729  * Multiple functions can be registered per queue pair & they are called
1730  * in the order they were added. The API does not restrict on maximum number
1731  * of callbacks.
1732  *
1733  * @param	dev_id		The identifier of the device.
1734  * @param	qp_id		The index of the queue pair on which ops are
1735  *				to be dequeued. The value must be in the
1736  *				range [0, nb_queue_pairs - 1] previously
1737  *				supplied to *rte_cryptodev_configure*.
1738  * @param	cb_fn		The callback function
1739  * @param	cb_arg		A generic pointer parameter which will be passed
1740  *				to each invocation of the callback function on
1741  *				this crypto device and queue pair.
1742  *
1743  * @return
1744  *   - NULL on error & rte_errno will contain the error code.
1745  *   - On success, a pointer value which can later be used to remove the
1746  *     callback.
1747  */
1748 
1749 __rte_experimental
1750 struct rte_cryptodev_cb *
1751 rte_cryptodev_add_deq_callback(uint8_t dev_id,
1752 			       uint16_t qp_id,
1753 			       rte_cryptodev_callback_fn cb_fn,
1754 			       void *cb_arg);
1755 
1756 /**
1757  * Remove a user callback function for given crypto device and queue pair.
1758  *
1759  * This function is used to remove dequeue callbacks that were added to a
1760  * crypto device queue pair using rte_cryptodev_add_deq_callback().
1761  *
1762  *
1763  *
1764  * @param	dev_id		The identifier of the device.
1765  * @param	qp_id		The index of the queue pair on which ops are
1766  *				to be dequeued. The value must be in the
1767  *				range [0, nb_queue_pairs - 1] previously
1768  *				supplied to *rte_cryptodev_configure*.
1769  * @param	cb		Pointer to user supplied callback created via
1770  *				rte_cryptodev_add_deq_callback().
1771  *
1772  * @return
1773  *   -  0: Success. Callback was removed.
1774  *   - <0: The dev_id or the qp_id is out of range, or the callback
1775  *         is NULL or not found for the crypto device queue pair.
1776  */
1777 __rte_experimental
1778 int rte_cryptodev_remove_deq_callback(uint8_t dev_id,
1779 				      uint16_t qp_id,
1780 				      struct rte_cryptodev_cb *cb);
1781 
1782 #include <rte_cryptodev_core.h>
1783 /**
1784  *
1785  * Dequeue a burst of processed crypto operations from a queue on the crypto
1786  * device. The dequeued operation are stored in *rte_crypto_op* structures
1787  * whose pointers are supplied in the *ops* array.
1788  *
1789  * The rte_cryptodev_dequeue_burst() function returns the number of ops
1790  * actually dequeued, which is the number of *rte_crypto_op* data structures
1791  * effectively supplied into the *ops* array.
1792  *
1793  * A return value equal to *nb_ops* indicates that the queue contained
1794  * at least *nb_ops* operations, and this is likely to signify that other
1795  * processed operations remain in the devices output queue. Applications
1796  * implementing a "retrieve as many processed operations as possible" policy
1797  * can check this specific case and keep invoking the
1798  * rte_cryptodev_dequeue_burst() function until a value less than
1799  * *nb_ops* is returned.
1800  *
1801  * The rte_cryptodev_dequeue_burst() function does not provide any error
1802  * notification to avoid the corresponding overhead.
1803  *
1804  * @param	dev_id		The symmetric crypto device identifier
1805  * @param	qp_id		The index of the queue pair from which to
1806  *				retrieve processed packets. The value must be
1807  *				in the range [0, nb_queue_pair - 1] previously
1808  *				supplied to rte_cryptodev_configure().
1809  * @param	ops		The address of an array of pointers to
1810  *				*rte_crypto_op* structures that must be
1811  *				large enough to store *nb_ops* pointers in it.
1812  * @param	nb_ops		The maximum number of operations to dequeue.
1813  *
1814  * @return
1815  *   - The number of operations actually dequeued, which is the number
1816  *   of pointers to *rte_crypto_op* structures effectively supplied to the
1817  *   *ops* array.
1818  */
1819 static inline uint16_t
1820 rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
1821 		struct rte_crypto_op **ops, uint16_t nb_ops)
1822 {
1823 	const struct rte_crypto_fp_ops *fp_ops;
1824 	void *qp;
1825 
1826 	rte_cryptodev_trace_dequeue_burst(dev_id, qp_id, (void **)ops, nb_ops);
1827 
1828 	fp_ops = &rte_crypto_fp_ops[dev_id];
1829 	qp = fp_ops->qp.data[qp_id];
1830 
1831 	nb_ops = fp_ops->dequeue_burst(qp, ops, nb_ops);
1832 
1833 #ifdef RTE_CRYPTO_CALLBACKS
1834 	if (unlikely(fp_ops->qp.deq_cb != NULL)) {
1835 		struct rte_cryptodev_cb_rcu *list;
1836 		struct rte_cryptodev_cb *cb;
1837 
1838 		/* __ATOMIC_RELEASE memory order was used when the
1839 		 * call back was inserted into the list.
1840 		 * Since there is a clear dependency between loading
1841 		 * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
1842 		 * not required.
1843 		 */
1844 		list = &fp_ops->qp.deq_cb[qp_id];
1845 		rte_rcu_qsbr_thread_online(list->qsbr, 0);
1846 		cb = __atomic_load_n(&list->next, __ATOMIC_RELAXED);
1847 
1848 		while (cb != NULL) {
1849 			nb_ops = cb->fn(dev_id, qp_id, ops, nb_ops,
1850 					cb->arg);
1851 			cb = cb->next;
1852 		};
1853 
1854 		rte_rcu_qsbr_thread_offline(list->qsbr, 0);
1855 	}
1856 #endif
1857 	return nb_ops;
1858 }
1859 
1860 /**
1861  * Enqueue a burst of operations for processing on a crypto device.
1862  *
1863  * The rte_cryptodev_enqueue_burst() function is invoked to place
1864  * crypto operations on the queue *qp_id* of the device designated by
1865  * its *dev_id*.
1866  *
1867  * The *nb_ops* parameter is the number of operations to process which are
1868  * supplied in the *ops* array of *rte_crypto_op* structures.
1869  *
1870  * The rte_cryptodev_enqueue_burst() function returns the number of
1871  * operations it actually enqueued for processing. A return value equal to
1872  * *nb_ops* means that all packets have been enqueued.
1873  *
1874  * @param	dev_id		The identifier of the device.
1875  * @param	qp_id		The index of the queue pair which packets are
1876  *				to be enqueued for processing. The value
1877  *				must be in the range [0, nb_queue_pairs - 1]
1878  *				previously supplied to
1879  *				 *rte_cryptodev_configure*.
1880  * @param	ops		The address of an array of *nb_ops* pointers
1881  *				to *rte_crypto_op* structures which contain
1882  *				the crypto operations to be processed.
1883  * @param	nb_ops		The number of operations to process.
1884  *
1885  * @return
1886  * The number of operations actually enqueued on the crypto device. The return
1887  * value can be less than the value of the *nb_ops* parameter when the
1888  * crypto devices queue is full or if invalid parameters are specified in
1889  * a *rte_crypto_op*.
1890  */
1891 static inline uint16_t
1892 rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
1893 		struct rte_crypto_op **ops, uint16_t nb_ops)
1894 {
1895 	const struct rte_crypto_fp_ops *fp_ops;
1896 	void *qp;
1897 
1898 	fp_ops = &rte_crypto_fp_ops[dev_id];
1899 	qp = fp_ops->qp.data[qp_id];
1900 #ifdef RTE_CRYPTO_CALLBACKS
1901 	if (unlikely(fp_ops->qp.enq_cb != NULL)) {
1902 		struct rte_cryptodev_cb_rcu *list;
1903 		struct rte_cryptodev_cb *cb;
1904 
1905 		/* __ATOMIC_RELEASE memory order was used when the
1906 		 * call back was inserted into the list.
1907 		 * Since there is a clear dependency between loading
1908 		 * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
1909 		 * not required.
1910 		 */
1911 		list = &fp_ops->qp.enq_cb[qp_id];
1912 		rte_rcu_qsbr_thread_online(list->qsbr, 0);
1913 		cb = __atomic_load_n(&list->next, __ATOMIC_RELAXED);
1914 
1915 		while (cb != NULL) {
1916 			nb_ops = cb->fn(dev_id, qp_id, ops, nb_ops,
1917 					cb->arg);
1918 			cb = cb->next;
1919 		};
1920 
1921 		rte_rcu_qsbr_thread_offline(list->qsbr, 0);
1922 	}
1923 #endif
1924 
1925 	rte_cryptodev_trace_enqueue_burst(dev_id, qp_id, (void **)ops, nb_ops);
1926 	return fp_ops->enqueue_burst(qp, ops, nb_ops);
1927 }
1928 
1929 
1930 
1931 #ifdef __cplusplus
1932 }
1933 #endif
1934 
1935 #endif /* _RTE_CRYPTODEV_H_ */
1936