xref: /dpdk/lib/cryptodev/rte_cryptodev.h (revision 3c4898ef762eeb2578b9ae3d7f6e3a0e5cbca8c8)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020 Intel Corporation.
3  */
4 
5 #ifndef _RTE_CRYPTODEV_H_
6 #define _RTE_CRYPTODEV_H_
7 
8 /**
9  * @file rte_cryptodev.h
10  *
11  * RTE Cryptographic Device APIs
12  *
13  * Defines RTE Crypto Device APIs for the provisioning of cipher and
14  * authentication operations.
15  */
16 
17 #ifdef __cplusplus
18 extern "C" {
19 #endif
20 
21 #include <rte_compat.h>
22 #include "rte_kvargs.h"
23 #include "rte_crypto.h"
24 #include <rte_common.h>
25 #include <rte_rcu_qsbr.h>
26 
27 #include "rte_cryptodev_trace_fp.h"
28 
29 extern const char **rte_cyptodev_names;
30 
31 /* Logging Macros */
32 
33 #define CDEV_LOG_ERR(...) \
34 	RTE_LOG(ERR, CRYPTODEV, \
35 		RTE_FMT("%s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
36 			__func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,)))
37 
38 #define CDEV_LOG_INFO(...) \
39 	RTE_LOG(INFO, CRYPTODEV, \
40 		RTE_FMT(RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
41 			RTE_FMT_TAIL(__VA_ARGS__,)))
42 
43 #define CDEV_LOG_DEBUG(...) \
44 	RTE_LOG(DEBUG, CRYPTODEV, \
45 		RTE_FMT("%s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
46 			__func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,)))
47 
48 #define CDEV_PMD_TRACE(...) \
49 	RTE_LOG(DEBUG, CRYPTODEV, \
50 		RTE_FMT("[%s] %s: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
51 			dev, __func__, RTE_FMT_TAIL(__VA_ARGS__,)))
52 
53 /**
54  * A macro that points to an offset from the start
55  * of the crypto operation structure (rte_crypto_op)
56  *
57  * The returned pointer is cast to type t.
58  *
59  * @param c
60  *   The crypto operation.
61  * @param o
62  *   The offset from the start of the crypto operation.
63  * @param t
64  *   The type to cast the result into.
65  */
66 #define rte_crypto_op_ctod_offset(c, t, o)	\
67 	((t)((char *)(c) + (o)))
68 
69 /**
70  * A macro that returns the physical address that points
71  * to an offset from the start of the crypto operation
72  * (rte_crypto_op)
73  *
74  * @param c
75  *   The crypto operation.
76  * @param o
77  *   The offset from the start of the crypto operation
78  *   to calculate address from.
79  */
80 #define rte_crypto_op_ctophys_offset(c, o)	\
81 	(rte_iova_t)((c)->phys_addr + (o))
82 
83 /**
84  * Crypto parameters range description
85  */
86 struct rte_crypto_param_range {
87 	uint16_t min;	/**< minimum size */
88 	uint16_t max;	/**< maximum size */
89 	uint16_t increment;
90 	/**< if a range of sizes are supported,
91 	 * this parameter is used to indicate
92 	 * increments in byte size that are supported
93 	 * between the minimum and maximum
94 	 */
95 };
96 
97 /**
98  * Data-unit supported lengths of cipher algorithms.
99  * A bit can represent any set of data-unit sizes
100  * (single size, multiple size, range, etc).
101  */
102 #define RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_512_BYTES             RTE_BIT32(0)
103 #define RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_4096_BYTES            RTE_BIT32(1)
104 #define RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_1_MEGABYTES           RTE_BIT32(2)
105 
106 /**
107  * Symmetric Crypto Capability
108  */
109 struct rte_cryptodev_symmetric_capability {
110 	enum rte_crypto_sym_xform_type xform_type;
111 	/**< Transform type : Authentication / Cipher / AEAD */
112 	union {
113 		struct {
114 			enum rte_crypto_auth_algorithm algo;
115 			/**< authentication algorithm */
116 			uint16_t block_size;
117 			/**< algorithm block size */
118 			struct rte_crypto_param_range key_size;
119 			/**< auth key size range */
120 			struct rte_crypto_param_range digest_size;
121 			/**< digest size range */
122 			struct rte_crypto_param_range aad_size;
123 			/**< Additional authentication data size range */
124 			struct rte_crypto_param_range iv_size;
125 			/**< Initialisation vector data size range */
126 		} auth;
127 		/**< Symmetric Authentication transform capabilities */
128 		struct {
129 			enum rte_crypto_cipher_algorithm algo;
130 			/**< cipher algorithm */
131 			uint16_t block_size;
132 			/**< algorithm block size */
133 			struct rte_crypto_param_range key_size;
134 			/**< cipher key size range */
135 			struct rte_crypto_param_range iv_size;
136 			/**< Initialisation vector data size range */
137 			uint32_t dataunit_set;
138 			/**<
139 			 * Supported data-unit lengths:
140 			 * RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_* bits
141 			 * or 0 for lengths defined in the algorithm standard.
142 			 */
143 		} cipher;
144 		/**< Symmetric Cipher transform capabilities */
145 		struct {
146 			enum rte_crypto_aead_algorithm algo;
147 			/**< AEAD algorithm */
148 			uint16_t block_size;
149 			/**< algorithm block size */
150 			struct rte_crypto_param_range key_size;
151 			/**< AEAD key size range */
152 			struct rte_crypto_param_range digest_size;
153 			/**< digest size range */
154 			struct rte_crypto_param_range aad_size;
155 			/**< Additional authentication data size range */
156 			struct rte_crypto_param_range iv_size;
157 			/**< Initialisation vector data size range */
158 		} aead;
159 	};
160 };
161 
162 /**
163  * Asymmetric Xform Crypto Capability
164  */
165 struct rte_cryptodev_asymmetric_xform_capability {
166 	enum rte_crypto_asym_xform_type xform_type;
167 	/**< Transform type: RSA/MODEXP/DH/DSA/MODINV */
168 
169 	uint32_t op_types;
170 	/**<
171 	 * Bitmask for supported rte_crypto_asym_op_type or
172 	 * rte_crypto_asym_ke_type. Which enum is used is determined
173 	 * by the rte_crypto_asym_xform_type. For key exchange algorithms
174 	 * like Diffie-Hellman it is rte_crypto_asym_ke_type, for others
175 	 * it is rte_crypto_asym_op_type.
176 	 */
177 
178 	__extension__
179 	union {
180 		struct rte_crypto_param_range modlen;
181 		/**< Range of modulus length supported by modulus based xform.
182 		 * Value 0 mean implementation default
183 		 */
184 
185 		uint8_t internal_rng;
186 		/**< Availability of random number generator for Elliptic curve based xform.
187 		 * Value 0 means unavailable, and application should pass the required
188 		 * random value. Otherwise, PMD would internally compute the random number.
189 		 */
190 	};
191 
192 	uint64_t hash_algos;
193 	/**< Bitmask of hash algorithms supported for op_type. */
194 };
195 
196 /**
197  * Asymmetric Crypto Capability
198  */
199 struct rte_cryptodev_asymmetric_capability {
200 	struct rte_cryptodev_asymmetric_xform_capability xform_capa;
201 };
202 
203 
204 /** Structure used to capture a capability of a crypto device */
205 struct rte_cryptodev_capabilities {
206 	enum rte_crypto_op_type op;
207 	/**< Operation type */
208 
209 	union {
210 		struct rte_cryptodev_symmetric_capability sym;
211 		/**< Symmetric operation capability parameters */
212 		struct rte_cryptodev_asymmetric_capability asym;
213 		/**< Asymmetric operation capability parameters */
214 	};
215 };
216 
217 /** Structure used to describe crypto algorithms */
218 struct rte_cryptodev_sym_capability_idx {
219 	enum rte_crypto_sym_xform_type type;
220 	union {
221 		enum rte_crypto_cipher_algorithm cipher;
222 		enum rte_crypto_auth_algorithm auth;
223 		enum rte_crypto_aead_algorithm aead;
224 	} algo;
225 };
226 
227 /**
228  * Structure used to describe asymmetric crypto xforms
229  * Each xform maps to one asym algorithm.
230  */
231 struct rte_cryptodev_asym_capability_idx {
232 	enum rte_crypto_asym_xform_type type;
233 	/**< Asymmetric xform (algo) type */
234 };
235 
236 /**
237  * Provide capabilities available for defined device and algorithm
238  *
239  * @param	dev_id		The identifier of the device.
240  * @param	idx		Description of crypto algorithms.
241  *
242  * @return
243  *   - Return description of the symmetric crypto capability if exist.
244  *   - Return NULL if the capability not exist.
245  */
246 const struct rte_cryptodev_symmetric_capability *
247 rte_cryptodev_sym_capability_get(uint8_t dev_id,
248 		const struct rte_cryptodev_sym_capability_idx *idx);
249 
250 /**
251  *  Provide capabilities available for defined device and xform
252  *
253  * @param	dev_id		The identifier of the device.
254  * @param	idx		Description of asym crypto xform.
255  *
256  * @return
257  *   - Return description of the asymmetric crypto capability if exist.
258  *   - Return NULL if the capability not exist.
259  */
260 const struct rte_cryptodev_asymmetric_xform_capability *
261 rte_cryptodev_asym_capability_get(uint8_t dev_id,
262 		const struct rte_cryptodev_asym_capability_idx *idx);
263 
264 /**
265  * Check if key size and initial vector are supported
266  * in crypto cipher capability
267  *
268  * @param	capability	Description of the symmetric crypto capability.
269  * @param	key_size	Cipher key size.
270  * @param	iv_size		Cipher initial vector size.
271  *
272  * @return
273  *   - Return 0 if the parameters are in range of the capability.
274  *   - Return -1 if the parameters are out of range of the capability.
275  */
276 int
277 rte_cryptodev_sym_capability_check_cipher(
278 		const struct rte_cryptodev_symmetric_capability *capability,
279 		uint16_t key_size, uint16_t iv_size);
280 
281 /**
282  * Check if key size and initial vector are supported
283  * in crypto auth capability
284  *
285  * @param	capability	Description of the symmetric crypto capability.
286  * @param	key_size	Auth key size.
287  * @param	digest_size	Auth digest size.
288  * @param	iv_size		Auth initial vector size.
289  *
290  * @return
291  *   - Return 0 if the parameters are in range of the capability.
292  *   - Return -1 if the parameters are out of range of the capability.
293  */
294 int
295 rte_cryptodev_sym_capability_check_auth(
296 		const struct rte_cryptodev_symmetric_capability *capability,
297 		uint16_t key_size, uint16_t digest_size, uint16_t iv_size);
298 
299 /**
300  * Check if key, digest, AAD and initial vector sizes are supported
301  * in crypto AEAD capability
302  *
303  * @param	capability	Description of the symmetric crypto capability.
304  * @param	key_size	AEAD key size.
305  * @param	digest_size	AEAD digest size.
306  * @param	aad_size	AEAD AAD size.
307  * @param	iv_size		AEAD IV size.
308  *
309  * @return
310  *   - Return 0 if the parameters are in range of the capability.
311  *   - Return -1 if the parameters are out of range of the capability.
312  */
313 int
314 rte_cryptodev_sym_capability_check_aead(
315 		const struct rte_cryptodev_symmetric_capability *capability,
316 		uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
317 		uint16_t iv_size);
318 
319 /**
320  * Check if op type is supported
321  *
322  * @param	capability	Description of the asymmetric crypto capability.
323  * @param	op_type		op type
324  *
325  * @return
326  *   - Return 1 if the op type is supported
327  *   - Return 0 if unsupported
328  */
329 int
330 rte_cryptodev_asym_xform_capability_check_optype(
331 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
332 		enum rte_crypto_asym_op_type op_type);
333 
334 /**
335  * Check if modulus length is in supported range
336  *
337  * @param	capability	Description of the asymmetric crypto capability.
338  * @param	modlen		modulus length.
339  *
340  * @return
341  *   - Return 0 if the parameters are in range of the capability.
342  *   - Return -1 if the parameters are out of range of the capability.
343  */
344 int
345 rte_cryptodev_asym_xform_capability_check_modlen(
346 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
347 		uint16_t modlen);
348 
349 /**
350  * Check if hash algorithm is supported.
351  *
352  * @param	capability	Asymmetric crypto capability.
353  * @param	hash		Hash algorithm.
354  *
355  * @return
356  *   - Return true if the hash algorithm is supported.
357  *   - Return false if the hash algorithm is not supported.
358  */
359 bool
360 rte_cryptodev_asym_xform_capability_check_hash(
361 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
362 	enum rte_crypto_auth_algorithm hash);
363 
364 /**
365  * Provide the cipher algorithm enum, given an algorithm string
366  *
367  * @param	algo_enum	A pointer to the cipher algorithm
368  *				enum to be filled
369  * @param	algo_string	Authentication algo string
370  *
371  * @return
372  * - Return -1 if string is not valid
373  * - Return 0 is the string is valid
374  */
375 int
376 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum,
377 		const char *algo_string);
378 
379 /**
380  * Provide the authentication algorithm enum, given an algorithm string
381  *
382  * @param	algo_enum	A pointer to the authentication algorithm
383  *				enum to be filled
384  * @param	algo_string	Authentication algo string
385  *
386  * @return
387  * - Return -1 if string is not valid
388  * - Return 0 is the string is valid
389  */
390 int
391 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum,
392 		const char *algo_string);
393 
394 /**
395  * Provide the AEAD algorithm enum, given an algorithm string
396  *
397  * @param	algo_enum	A pointer to the AEAD algorithm
398  *				enum to be filled
399  * @param	algo_string	AEAD algorithm string
400  *
401  * @return
402  * - Return -1 if string is not valid
403  * - Return 0 is the string is valid
404  */
405 int
406 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum,
407 		const char *algo_string);
408 
409 /**
410  * Provide the Asymmetric xform enum, given an xform string
411  *
412  * @param	xform_enum	A pointer to the xform type
413  *				enum to be filled
414  * @param	xform_string	xform string
415  *
416  * @return
417  * - Return -1 if string is not valid
418  * - Return 0 if the string is valid
419  */
420 int
421 rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum,
422 		const char *xform_string);
423 
424 /**
425  * Provide the cipher algorithm string, given an algorithm enum.
426  *
427  * @param	algo_enum	cipher algorithm enum
428  *
429  * @return
430  * - Return NULL if enum is not valid
431  * - Return algo_string corresponding to enum
432  */
433 __rte_experimental
434 const char *
435 rte_cryptodev_get_cipher_algo_string(enum rte_crypto_cipher_algorithm algo_enum);
436 
437 /**
438  * Provide the authentication algorithm string, given an algorithm enum.
439  *
440  * @param	algo_enum	auth algorithm enum
441  *
442  * @return
443  * - Return NULL if enum is not valid
444  * - Return algo_string corresponding to enum
445  */
446 __rte_experimental
447 const char *
448 rte_cryptodev_get_auth_algo_string(enum rte_crypto_auth_algorithm algo_enum);
449 
450 /**
451  * Provide the AEAD algorithm string, given an algorithm enum.
452  *
453  * @param	algo_enum	AEAD algorithm enum
454  *
455  * @return
456  * - Return NULL if enum is not valid
457  * - Return algo_string corresponding to enum
458  */
459 __rte_experimental
460 const char *
461 rte_cryptodev_get_aead_algo_string(enum rte_crypto_aead_algorithm algo_enum);
462 
463 /**
464  * Provide the Asymmetric xform string, given an xform enum.
465  *
466  * @param	xform_enum	xform type enum
467  *
468  * @return
469  * - Return NULL, if enum is not valid.
470  * - Return xform string, for valid enum.
471  */
472 __rte_experimental
473 const char *
474 rte_cryptodev_asym_get_xform_string(enum rte_crypto_asym_xform_type xform_enum);
475 
476 
477 /** Macro used at end of crypto PMD list */
478 #define RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() \
479 	{ RTE_CRYPTO_OP_TYPE_UNDEFINED }
480 
481 
482 /**
483  * Crypto device supported feature flags
484  *
485  * Note:
486  * New features flags should be added to the end of the list
487  *
488  * Keep these flags synchronised with rte_cryptodev_get_feature_name()
489  */
490 #define	RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO		(1ULL << 0)
491 /**< Symmetric crypto operations are supported */
492 #define	RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO		(1ULL << 1)
493 /**< Asymmetric crypto operations are supported */
494 #define	RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING		(1ULL << 2)
495 /**< Chaining symmetric crypto operations are supported */
496 #define	RTE_CRYPTODEV_FF_CPU_SSE			(1ULL << 3)
497 /**< Utilises CPU SIMD SSE instructions */
498 #define	RTE_CRYPTODEV_FF_CPU_AVX			(1ULL << 4)
499 /**< Utilises CPU SIMD AVX instructions */
500 #define	RTE_CRYPTODEV_FF_CPU_AVX2			(1ULL << 5)
501 /**< Utilises CPU SIMD AVX2 instructions */
502 #define	RTE_CRYPTODEV_FF_CPU_AESNI			(1ULL << 6)
503 /**< Utilises CPU AES-NI instructions */
504 #define	RTE_CRYPTODEV_FF_HW_ACCELERATED			(1ULL << 7)
505 /**< Operations are off-loaded to an
506  * external hardware accelerator
507  */
508 #define	RTE_CRYPTODEV_FF_CPU_AVX512			(1ULL << 8)
509 /**< Utilises CPU SIMD AVX512 instructions */
510 #define	RTE_CRYPTODEV_FF_IN_PLACE_SGL			(1ULL << 9)
511 /**< In-place Scatter-gather (SGL) buffers, with multiple segments,
512  * are supported
513  */
514 #define RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT		(1ULL << 10)
515 /**< Out-of-place Scatter-gather (SGL) buffers are
516  * supported in input and output
517  */
518 #define RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT		(1ULL << 11)
519 /**< Out-of-place Scatter-gather (SGL) buffers are supported
520  * in input, combined with linear buffers (LB), with a
521  * single segment in output
522  */
523 #define RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT		(1ULL << 12)
524 /**< Out-of-place Scatter-gather (SGL) buffers are supported
525  * in output, combined with linear buffers (LB) in input
526  */
527 #define RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT		(1ULL << 13)
528 /**< Out-of-place linear buffers (LB) are supported in input and output */
529 #define	RTE_CRYPTODEV_FF_CPU_NEON			(1ULL << 14)
530 /**< Utilises CPU NEON instructions */
531 #define	RTE_CRYPTODEV_FF_CPU_ARM_CE			(1ULL << 15)
532 /**< Utilises ARM CPU Cryptographic Extensions */
533 #define	RTE_CRYPTODEV_FF_SECURITY			(1ULL << 16)
534 /**< Support Security Protocol Processing */
535 #define RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP		(1ULL << 17)
536 /**< Support RSA Private Key OP with exponent */
537 #define RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT		(1ULL << 18)
538 /**< Support RSA Private Key OP with CRT (quintuple) Keys */
539 #define RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED		(1ULL << 19)
540 /**< Support encrypted-digest operations where digest is appended to data */
541 #define RTE_CRYPTODEV_FF_ASYM_SESSIONLESS		(1ULL << 20)
542 /**< Support asymmetric session-less operations */
543 #define	RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO			(1ULL << 21)
544 /**< Support symmetric cpu-crypto processing */
545 #define RTE_CRYPTODEV_FF_SYM_SESSIONLESS		(1ULL << 22)
546 /**< Support symmetric session-less operations */
547 #define RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA		(1ULL << 23)
548 /**< Support operations on data which is not byte aligned */
549 #define RTE_CRYPTODEV_FF_SYM_RAW_DP			(1ULL << 24)
550 /**< Support accelerator specific symmetric raw data-path APIs */
551 #define RTE_CRYPTODEV_FF_CIPHER_MULTIPLE_DATA_UNITS	(1ULL << 25)
552 /**< Support operations on multiple data-units message */
553 #define RTE_CRYPTODEV_FF_CIPHER_WRAPPED_KEY		(1ULL << 26)
554 /**< Support wrapped key in cipher xform  */
555 #define RTE_CRYPTODEV_FF_SECURITY_INNER_CSUM		(1ULL << 27)
556 /**< Support inner checksum computation/verification */
557 #define RTE_CRYPTODEV_FF_SECURITY_RX_INJECT		(1ULL << 28)
558 /**< Support Rx injection after security processing */
559 
560 /**
561  * Get the name of a crypto device feature flag
562  *
563  * @param	flag	The mask describing the flag.
564  *
565  * @return
566  *   The name of this flag, or NULL if it's not a valid feature flag.
567  */
568 const char *
569 rte_cryptodev_get_feature_name(uint64_t flag);
570 
571 /**  Crypto device information */
572 /* Structure rte_cryptodev_info 8< */
573 struct rte_cryptodev_info {
574 	const char *driver_name;	/**< Driver name. */
575 	uint8_t driver_id;		/**< Driver identifier */
576 	struct rte_device *device;	/**< Generic device information. */
577 
578 	uint64_t feature_flags;
579 	/**< Feature flags exposes HW/SW features for the given device */
580 
581 	const struct rte_cryptodev_capabilities *capabilities;
582 	/**< Array of devices supported capabilities */
583 
584 	unsigned max_nb_queue_pairs;
585 	/**< Maximum number of queues pairs supported by device. */
586 
587 	uint16_t min_mbuf_headroom_req;
588 	/**< Minimum mbuf headroom required by device */
589 
590 	uint16_t min_mbuf_tailroom_req;
591 	/**< Minimum mbuf tailroom required by device */
592 
593 	struct {
594 		unsigned max_nb_sessions;
595 		/**< Maximum number of sessions supported by device.
596 		 * If 0, the device does not have any limitation in
597 		 * number of sessions that can be used.
598 		 */
599 	} sym;
600 };
601 /* >8 End of structure rte_cryptodev_info. */
602 
603 #define RTE_CRYPTODEV_DETACHED  (0)
604 #define RTE_CRYPTODEV_ATTACHED  (1)
605 
606 /** Definitions of Crypto device event types */
607 enum rte_cryptodev_event_type {
608 	RTE_CRYPTODEV_EVENT_UNKNOWN,	/**< unknown event type */
609 	RTE_CRYPTODEV_EVENT_ERROR,	/**< error interrupt event */
610 	RTE_CRYPTODEV_EVENT_MAX		/**< max value of this enum */
611 };
612 
613 /** Crypto device queue pair configuration structure. */
614 /* Structure rte_cryptodev_qp_conf 8<*/
615 struct rte_cryptodev_qp_conf {
616 	uint32_t nb_descriptors; /**< Number of descriptors per queue pair */
617 	struct rte_mempool *mp_session;
618 	/**< The mempool for creating session in sessionless mode */
619 };
620 /* >8 End of structure rte_cryptodev_qp_conf. */
621 
622 /**
623  * Function type used for processing crypto ops when enqueue/dequeue burst is
624  * called.
625  *
626  * The callback function is called on enqueue/dequeue burst immediately.
627  *
628  * @param	dev_id		The identifier of the device.
629  * @param	qp_id		The index of the queue pair on which ops are
630  *				enqueued/dequeued. The value must be in the
631  *				range [0, nb_queue_pairs - 1] previously
632  *				supplied to *rte_cryptodev_configure*.
633  * @param	ops		The address of an array of *nb_ops* pointers
634  *				to *rte_crypto_op* structures which contain
635  *				the crypto operations to be processed.
636  * @param	nb_ops		The number of operations to process.
637  * @param	user_param	The arbitrary user parameter passed in by the
638  *				application when the callback was originally
639  *				registered.
640  * @return			The number of ops to be enqueued to the
641  *				crypto device.
642  */
643 typedef uint16_t (*rte_cryptodev_callback_fn)(uint16_t dev_id, uint16_t qp_id,
644 		struct rte_crypto_op **ops, uint16_t nb_ops, void *user_param);
645 
646 /**
647  * Typedef for application callback function to be registered by application
648  * software for notification of device events
649  *
650  * @param	dev_id	Crypto device identifier
651  * @param	event	Crypto device event to register for notification of.
652  * @param	cb_arg	User specified parameter to be passed as to passed to
653  *			users callback function.
654  */
655 typedef void (*rte_cryptodev_cb_fn)(uint8_t dev_id,
656 		enum rte_cryptodev_event_type event, void *cb_arg);
657 
658 
659 /** Crypto Device statistics */
660 struct rte_cryptodev_stats {
661 	uint64_t enqueued_count;
662 	/**< Count of all operations enqueued */
663 	uint64_t dequeued_count;
664 	/**< Count of all operations dequeued */
665 
666 	uint64_t enqueue_err_count;
667 	/**< Total error count on operations enqueued */
668 	uint64_t dequeue_err_count;
669 	/**< Total error count on operations dequeued */
670 };
671 
672 #define RTE_CRYPTODEV_NAME_MAX_LEN	(64)
673 /**< Max length of name of crypto PMD */
674 
675 /**
676  * Get the device identifier for the named crypto device.
677  *
678  * @param	name	device name to select the device structure.
679  *
680  * @return
681  *   - Returns crypto device identifier on success.
682  *   - Return -1 on failure to find named crypto device.
683  */
684 int
685 rte_cryptodev_get_dev_id(const char *name);
686 
687 /**
688  * Get the crypto device name given a device identifier.
689  *
690  * @param dev_id
691  *   The identifier of the device
692  *
693  * @return
694  *   - Returns crypto device name.
695  *   - Returns NULL if crypto device is not present.
696  */
697 const char *
698 rte_cryptodev_name_get(uint8_t dev_id);
699 
700 /**
701  * Get the total number of crypto devices that have been successfully
702  * initialised.
703  *
704  * @return
705  *   - The total number of usable crypto devices.
706  */
707 uint8_t
708 rte_cryptodev_count(void);
709 
710 /**
711  * Get number of crypto device defined type.
712  *
713  * @param	driver_id	driver identifier.
714  *
715  * @return
716  *   Returns number of crypto device.
717  */
718 uint8_t
719 rte_cryptodev_device_count_by_driver(uint8_t driver_id);
720 
721 /**
722  * Get number and identifiers of attached crypto devices that
723  * use the same crypto driver.
724  *
725  * @param	driver_name	driver name.
726  * @param	devices		output devices identifiers.
727  * @param	nb_devices	maximal number of devices.
728  *
729  * @return
730  *   Returns number of attached crypto device.
731  */
732 uint8_t
733 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
734 		uint8_t nb_devices);
735 /*
736  * Return the NUMA socket to which a device is connected
737  *
738  * @param dev_id
739  *   The identifier of the device
740  * @return
741  *   The NUMA socket id to which the device is connected or
742  *   a default of zero if the socket could not be determined.
743  *   -1 if returned is the dev_id value is out of range.
744  */
745 int
746 rte_cryptodev_socket_id(uint8_t dev_id);
747 
748 /** Crypto device configuration structure */
749 /* Structure rte_cryptodev_config 8< */
750 struct rte_cryptodev_config {
751 	int socket_id;			/**< Socket to allocate resources on */
752 	uint16_t nb_queue_pairs;
753 	/**< Number of queue pairs to configure on device */
754 	uint64_t ff_disable;
755 	/**< Feature flags to be disabled. Only the following features are
756 	 * allowed to be disabled,
757 	 *  - RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO
758 	 *  - RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO
759 	 *  - RTE_CRYTPODEV_FF_SECURITY
760 	 */
761 };
762 /* >8 End of structure rte_cryptodev_config. */
763 
764 /**
765  * Configure a device.
766  *
767  * This function must be invoked first before any other function in the
768  * API. This function can also be re-invoked when a device is in the
769  * stopped state.
770  *
771  * @param	dev_id		The identifier of the device to configure.
772  * @param	config		The crypto device configuration structure.
773  *
774  * @return
775  *   - 0: Success, device configured.
776  *   - <0: Error code returned by the driver configuration function.
777  */
778 int
779 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config);
780 
781 /**
782  * Start an device.
783  *
784  * The device start step is the last one and consists of setting the configured
785  * offload features and in starting the transmit and the receive units of the
786  * device.
787  * On success, all basic functions exported by the API (link status,
788  * receive/transmit, and so on) can be invoked.
789  *
790  * @param dev_id
791  *   The identifier of the device.
792  * @return
793  *   - 0: Success, device started.
794  *   - <0: Error code of the driver device start function.
795  */
796 int
797 rte_cryptodev_start(uint8_t dev_id);
798 
799 /**
800  * Stop an device. The device can be restarted with a call to
801  * rte_cryptodev_start()
802  *
803  * @param	dev_id		The identifier of the device.
804  */
805 void
806 rte_cryptodev_stop(uint8_t dev_id);
807 
808 /**
809  * Close an device. The device cannot be restarted!
810  *
811  * @param	dev_id		The identifier of the device.
812  *
813  * @return
814  *  - 0 on successfully closing device
815  *  - <0 on failure to close device
816  */
817 int
818 rte_cryptodev_close(uint8_t dev_id);
819 
820 /**
821  * Allocate and set up a receive queue pair for a device.
822  *
823  *
824  * @param	dev_id		The identifier of the device.
825  * @param	queue_pair_id	The index of the queue pairs to set up. The
826  *				value must be in the range [0, nb_queue_pair
827  *				- 1] previously supplied to
828  *				rte_cryptodev_configure().
829  * @param	qp_conf		The pointer to the configuration data to be
830  *				used for the queue pair.
831  * @param	socket_id	The *socket_id* argument is the socket
832  *				identifier in case of NUMA. The value can be
833  *				*SOCKET_ID_ANY* if there is no NUMA constraint
834  *				for the DMA memory allocated for the receive
835  *				queue pair.
836  *
837  * @return
838  *   - 0: Success, queue pair correctly set up.
839  *   - <0: Queue pair configuration failed
840  */
841 int
842 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
843 		const struct rte_cryptodev_qp_conf *qp_conf, int socket_id);
844 
845 /**
846  * Get the status of queue pairs setup on a specific crypto device
847  *
848  * @param	dev_id		Crypto device identifier.
849  * @param	queue_pair_id	The index of the queue pairs to set up. The
850  *				value must be in the range [0, nb_queue_pair
851  *				- 1] previously supplied to
852  *				rte_cryptodev_configure().
853  * @return
854  *   - 0: qp was not configured
855  *	 - 1: qp was configured
856  *	 - -EINVAL: device was not configured
857  */
858 int
859 rte_cryptodev_get_qp_status(uint8_t dev_id, uint16_t queue_pair_id);
860 
861 /**
862  * Get the number of queue pairs on a specific crypto device
863  *
864  * @param	dev_id		Crypto device identifier.
865  * @return
866  *   - The number of configured queue pairs.
867  */
868 uint16_t
869 rte_cryptodev_queue_pair_count(uint8_t dev_id);
870 
871 
872 /**
873  * Retrieve the general I/O statistics of a device.
874  *
875  * @param	dev_id		The identifier of the device.
876  * @param	stats		A pointer to a structure of type
877  *				*rte_cryptodev_stats* to be filled with the
878  *				values of device counters.
879  * @return
880  *   - Zero if successful.
881  *   - Non-zero otherwise.
882  */
883 int
884 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats);
885 
886 /**
887  * Reset the general I/O statistics of a device.
888  *
889  * @param	dev_id		The identifier of the device.
890  */
891 void
892 rte_cryptodev_stats_reset(uint8_t dev_id);
893 
894 /**
895  * Retrieve the contextual information of a device.
896  *
897  * @param	dev_id		The identifier of the device.
898  * @param	dev_info	A pointer to a structure of type
899  *				*rte_cryptodev_info* to be filled with the
900  *				contextual information of the device.
901  *
902  * @note The capabilities field of dev_info is set to point to the first
903  * element of an array of struct rte_cryptodev_capabilities. The element after
904  * the last valid element has it's op field set to
905  * RTE_CRYPTO_OP_TYPE_UNDEFINED.
906  */
907 void
908 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info);
909 
910 
911 /**
912  * Register a callback function for specific device id.
913  *
914  * @param	dev_id		Device id.
915  * @param	event		Event interested.
916  * @param	cb_fn		User supplied callback function to be called.
917  * @param	cb_arg		Pointer to the parameters for the registered
918  *				callback.
919  *
920  * @return
921  *  - On success, zero.
922  *  - On failure, a negative value.
923  */
924 int
925 rte_cryptodev_callback_register(uint8_t dev_id,
926 		enum rte_cryptodev_event_type event,
927 		rte_cryptodev_cb_fn cb_fn, void *cb_arg);
928 
929 /**
930  * Unregister a callback function for specific device id.
931  *
932  * @param	dev_id		The device identifier.
933  * @param	event		Event interested.
934  * @param	cb_fn		User supplied callback function to be called.
935  * @param	cb_arg		Pointer to the parameters for the registered
936  *				callback.
937  *
938  * @return
939  *  - On success, zero.
940  *  - On failure, a negative value.
941  */
942 int
943 rte_cryptodev_callback_unregister(uint8_t dev_id,
944 		enum rte_cryptodev_event_type event,
945 		rte_cryptodev_cb_fn cb_fn, void *cb_arg);
946 
947 /**
948  * @warning
949  * @b EXPERIMENTAL: this API may change without prior notice.
950  *
951  * Query a cryptodev queue pair if there are pending RTE_CRYPTODEV_EVENT_ERROR
952  * events.
953  *
954  * @param          dev_id	The device identifier.
955  * @param          qp_id	Queue pair index to be queried.
956  *
957  * @return
958  *   - 1 if requested queue has a pending event.
959  *   - 0 if no pending event is found.
960  *   - a negative value on failure
961  */
962 __rte_experimental
963 int
964 rte_cryptodev_queue_pair_event_error_query(uint8_t dev_id, uint16_t qp_id);
965 
966 struct rte_cryptodev_callback;
967 
968 /** Structure to keep track of registered callbacks */
969 RTE_TAILQ_HEAD(rte_cryptodev_cb_list, rte_cryptodev_callback);
970 
971 /**
972  * Structure used to hold information about the callbacks to be called for a
973  * queue pair on enqueue/dequeue.
974  */
975 struct rte_cryptodev_cb {
976 	RTE_ATOMIC(struct rte_cryptodev_cb *) next;
977 	/**< Pointer to next callback */
978 	rte_cryptodev_callback_fn fn;
979 	/**< Pointer to callback function */
980 	void *arg;
981 	/**< Pointer to argument */
982 };
983 
984 /**
985  * @internal
986  * Structure used to hold information about the RCU for a queue pair.
987  */
988 struct rte_cryptodev_cb_rcu {
989 	RTE_ATOMIC(struct rte_cryptodev_cb *) next;
990 	/**< Pointer to next callback */
991 	struct rte_rcu_qsbr *qsbr;
992 	/**< RCU QSBR variable per queue pair */
993 };
994 
995 /**
996  * Get the security context for the cryptodev.
997  *
998  * @param dev_id
999  *   The device identifier.
1000  * @return
1001  *   - NULL on error.
1002  *   - Pointer to security context on success.
1003  */
1004 void *
1005 rte_cryptodev_get_sec_ctx(uint8_t dev_id);
1006 
1007 /**
1008  * Create a symmetric session mempool.
1009  *
1010  * @param name
1011  *   The unique mempool name.
1012  * @param nb_elts
1013  *   The number of elements in the mempool.
1014  * @param elt_size
1015  *   The size of the element. This should be the size of the cryptodev PMD
1016  *   session private data obtained through
1017  *   rte_cryptodev_sym_get_private_session_size() function call.
1018  *   For the user who wants to use the same mempool for heterogeneous PMDs
1019  *   this value should be the maximum value of their private session sizes.
1020  *   Please note the created mempool will have bigger elt size than this
1021  *   value as necessary session header and the possible padding are filled
1022  *   into each elt.
1023  * @param cache_size
1024  *   The number of per-lcore cache elements
1025  * @param priv_size
1026  *   The private data size of each session.
1027  * @param socket_id
1028  *   The *socket_id* argument is the socket identifier in the case of
1029  *   NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
1030  *   constraint for the reserved zone.
1031  *
1032  * @return
1033  *  - On success returns the created session mempool pointer
1034  *  - On failure returns NULL
1035  */
1036 struct rte_mempool *
1037 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
1038 	uint32_t elt_size, uint32_t cache_size, uint16_t priv_size,
1039 	int socket_id);
1040 
1041 
1042 /**
1043  * Create an asymmetric session mempool.
1044  *
1045  * @param name
1046  *   The unique mempool name.
1047  * @param nb_elts
1048  *   The number of elements in the mempool.
1049  * @param cache_size
1050  *   The number of per-lcore cache elements
1051  * @param user_data_size
1052  *   The size of user data to be placed after session private data.
1053  * @param socket_id
1054  *   The *socket_id* argument is the socket identifier in the case of
1055  *   NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
1056  *   constraint for the reserved zone.
1057  *
1058  * @return
1059  *  - On success return mempool
1060  *  - On failure returns NULL
1061  */
1062 struct rte_mempool *
1063 rte_cryptodev_asym_session_pool_create(const char *name, uint32_t nb_elts,
1064 	uint32_t cache_size, uint16_t user_data_size, int socket_id);
1065 
1066 /**
1067  * Create symmetric crypto session and fill out private data for the device id,
1068  * based on its device type.
1069  *
1070  * @param   dev_id   ID of device that we want the session to be used on
1071  * @param   xforms   Symmetric crypto transform operations to apply on flow
1072  *                   processed with this session
1073  * @param   mp       Mempool to allocate symmetric session objects from
1074  *
1075  * @return
1076  *  - On success return pointer to sym-session.
1077  *  - On failure returns NULL and rte_errno is set to the error code:
1078  *    - EINVAL on invalid arguments.
1079  *    - ENOMEM on memory error for session allocation.
1080  *    - ENOTSUP if device doesn't support session configuration.
1081  */
1082 void *
1083 rte_cryptodev_sym_session_create(uint8_t dev_id,
1084 		struct rte_crypto_sym_xform *xforms,
1085 		struct rte_mempool *mp);
1086 /**
1087  * Create and initialise an asymmetric crypto session structure.
1088  * Calls the PMD to configure the private session data.
1089  *
1090  * @param   dev_id   ID of device that we want the session to be used on
1091  * @param   xforms   Asymmetric crypto transform operations to apply on flow
1092  *                   processed with this session
1093  * @param   mp       mempool to allocate asymmetric session
1094  *                   objects from
1095  * @param   session  void ** for session to be used
1096  *
1097  * @return
1098  *  - 0 on success.
1099  *  - -EINVAL on invalid arguments.
1100  *  - -ENOMEM on memory error for session allocation.
1101  *  - -ENOTSUP if device doesn't support session configuration.
1102  */
1103 int
1104 rte_cryptodev_asym_session_create(uint8_t dev_id,
1105 		struct rte_crypto_asym_xform *xforms, struct rte_mempool *mp,
1106 		void **session);
1107 
1108 /**
1109  * Frees session for the device id and returning it to its mempool.
1110  * It is the application's responsibility to ensure that the session
1111  * is not still in-flight operations using it.
1112  *
1113  * @param   dev_id   ID of device that uses the session.
1114  * @param   sess     Session header to be freed.
1115  *
1116  * @return
1117  *  - 0 if successful.
1118  *  - -EINVAL if session is NULL or the mismatched device ids.
1119  */
1120 int
1121 rte_cryptodev_sym_session_free(uint8_t dev_id,
1122 	void *sess);
1123 
1124 /**
1125  * Clears and frees asymmetric crypto session header and private data,
1126  * returning it to its original mempool.
1127  *
1128  * @param   dev_id   ID of device that uses the asymmetric session.
1129  * @param   sess     Session header to be freed.
1130  *
1131  * @return
1132  *  - 0 if successful.
1133  *  - -EINVAL if device is invalid or session is NULL.
1134  */
1135 int
1136 rte_cryptodev_asym_session_free(uint8_t dev_id, void *sess);
1137 
1138 /**
1139  * Get the size of the asymmetric session header.
1140  *
1141  * @return
1142  *   Size of the asymmetric header session.
1143  */
1144 unsigned int
1145 rte_cryptodev_asym_get_header_session_size(void);
1146 
1147 /**
1148  * Get the size of the private symmetric session data
1149  * for a device.
1150  *
1151  * @param	dev_id		The device identifier.
1152  *
1153  * @return
1154  *   - Size of the private data, if successful
1155  *   - 0 if device is invalid or does not have private
1156  *   symmetric session
1157  */
1158 unsigned int
1159 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id);
1160 
1161 /**
1162  * Get the size of the private data for asymmetric session
1163  * on device
1164  *
1165  * @param	dev_id		The device identifier.
1166  *
1167  * @return
1168  *   - Size of the asymmetric private data, if successful
1169  *   - 0 if device is invalid or does not have private session
1170  */
1171 unsigned int
1172 rte_cryptodev_asym_get_private_session_size(uint8_t dev_id);
1173 
1174 /**
1175  * Validate if the crypto device index is valid attached crypto device.
1176  *
1177  * @param	dev_id	Crypto device index.
1178  *
1179  * @return
1180  *   - If the device index is valid (1) or not (0).
1181  */
1182 unsigned int
1183 rte_cryptodev_is_valid_dev(uint8_t dev_id);
1184 
1185 /**
1186  * Provide driver identifier.
1187  *
1188  * @param name
1189  *   The pointer to a driver name.
1190  * @return
1191  *  The driver type identifier or -1 if no driver found
1192  */
1193 int rte_cryptodev_driver_id_get(const char *name);
1194 
1195 /**
1196  * Provide driver name.
1197  *
1198  * @param driver_id
1199  *   The driver identifier.
1200  * @return
1201  *  The driver name or null if no driver found
1202  */
1203 const char *rte_cryptodev_driver_name_get(uint8_t driver_id);
1204 
1205 /**
1206  * Store user data in a session.
1207  *
1208  * @param	sess		Session pointer allocated by
1209  *				*rte_cryptodev_sym_session_create*.
1210  * @param	data		Pointer to the user data.
1211  * @param	size		Size of the user data.
1212  *
1213  * @return
1214  *  - On success, zero.
1215  *  - On failure, a negative value.
1216  */
1217 int
1218 rte_cryptodev_sym_session_set_user_data(void *sess,
1219 					void *data,
1220 					uint16_t size);
1221 
1222 #define CRYPTO_SESS_OPAQUE_DATA_OFF 0
1223 /**
1224  * Get opaque data from session handle
1225  */
1226 static inline uint64_t
1227 rte_cryptodev_sym_session_opaque_data_get(void *sess)
1228 {
1229 	return *((uint64_t *)sess + CRYPTO_SESS_OPAQUE_DATA_OFF);
1230 }
1231 
1232 /**
1233  * Set opaque data in session handle
1234  */
1235 static inline void
1236 rte_cryptodev_sym_session_opaque_data_set(void *sess, uint64_t opaque)
1237 {
1238 	uint64_t *data;
1239 	data = (((uint64_t *)sess) + CRYPTO_SESS_OPAQUE_DATA_OFF);
1240 	*data = opaque;
1241 }
1242 
1243 /**
1244  * Get user data stored in a session.
1245  *
1246  * @param	sess		Session pointer allocated by
1247  *				*rte_cryptodev_sym_session_create*.
1248  *
1249  * @return
1250  *  - On success return pointer to user data.
1251  *  - On failure returns NULL.
1252  */
1253 void *
1254 rte_cryptodev_sym_session_get_user_data(void *sess);
1255 
1256 /**
1257  * Store user data in an asymmetric session.
1258  *
1259  * @param	sess		Session pointer allocated by
1260  *				*rte_cryptodev_asym_session_create*.
1261  * @param	data		Pointer to the user data.
1262  * @param	size		Size of the user data.
1263  *
1264  * @return
1265  *  - On success, zero.
1266  *  - -EINVAL if the session pointer is invalid.
1267  *  - -ENOMEM if the available user data size is smaller than the size parameter.
1268  */
1269 int
1270 rte_cryptodev_asym_session_set_user_data(void *sess, void *data, uint16_t size);
1271 
1272 /**
1273  * Get user data stored in an asymmetric session.
1274  *
1275  * @param	sess		Session pointer allocated by
1276  *				*rte_cryptodev_asym_session_create*.
1277  *
1278  * @return
1279  *  - On success return pointer to user data.
1280  *  - On failure returns NULL.
1281  */
1282 void *
1283 rte_cryptodev_asym_session_get_user_data(void *sess);
1284 
1285 /**
1286  * Perform actual crypto processing (encrypt/digest or auth/decrypt)
1287  * on user provided data.
1288  *
1289  * @param	dev_id	The device identifier.
1290  * @param	sess	Cryptodev session structure
1291  * @param	ofs	Start and stop offsets for auth and cipher operations
1292  * @param	vec	Vectorized operation descriptor
1293  *
1294  * @return
1295  *  - Returns number of successfully processed packets.
1296  */
1297 uint32_t
1298 rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id,
1299 	void *sess, union rte_crypto_sym_ofs ofs,
1300 	struct rte_crypto_sym_vec *vec);
1301 
1302 /**
1303  * Get the size of the raw data-path context buffer.
1304  *
1305  * @param	dev_id		The device identifier.
1306  *
1307  * @return
1308  *   - If the device supports raw data-path APIs, return the context size.
1309  *   - If the device does not support the APIs, return -1.
1310  */
1311 int
1312 rte_cryptodev_get_raw_dp_ctx_size(uint8_t dev_id);
1313 
1314 /**
1315  * Set session event meta data
1316  *
1317  * @param	dev_id		The device identifier.
1318  * @param	sess            Crypto or security session.
1319  * @param	op_type         Operation type.
1320  * @param	sess_type       Session type.
1321  * @param	ev_mdata	Pointer to the event crypto meta data
1322  *				(aka *union rte_event_crypto_metadata*)
1323  * @param	size            Size of ev_mdata.
1324  *
1325  * @return
1326  *  - On success, zero.
1327  *  - On failure, a negative value.
1328  */
1329 int
1330 rte_cryptodev_session_event_mdata_set(uint8_t dev_id, void *sess,
1331 	enum rte_crypto_op_type op_type,
1332 	enum rte_crypto_op_sess_type sess_type,
1333 	void *ev_mdata, uint16_t size);
1334 
1335 /**
1336  * Union of different crypto session types, including session-less xform
1337  * pointer.
1338  */
1339 union rte_cryptodev_session_ctx {void *crypto_sess;
1340 	struct rte_crypto_sym_xform *xform;
1341 	struct rte_security_session *sec_sess;
1342 };
1343 
1344 /**
1345  * Enqueue a vectorized operation descriptor into the device queue but the
1346  * driver may or may not start processing until rte_cryptodev_raw_enqueue_done()
1347  * is called.
1348  *
1349  * @param	qp		Driver specific queue pair data.
1350  * @param	drv_ctx		Driver specific context data.
1351  * @param	vec		Vectorized operation descriptor.
1352  * @param	ofs		Start and stop offsets for auth and cipher
1353  *				operations.
1354  * @param	user_data	The array of user data for dequeue later.
1355  * @param	enqueue_status	Driver written value to specify the
1356  *				enqueue status. Possible values:
1357  *				- 1: The number of operations returned are
1358  *				     enqueued successfully.
1359  *				- 0: The number of operations returned are
1360  *				     cached into the queue but are not processed
1361  *				     until rte_cryptodev_raw_enqueue_done() is
1362  *				     called.
1363  *				- negative integer: Error occurred.
1364  * @return
1365  *   - The number of operations in the descriptor successfully enqueued or
1366  *     cached into the queue but not enqueued yet, depends on the
1367  *     "enqueue_status" value.
1368  */
1369 typedef uint32_t (*cryptodev_sym_raw_enqueue_burst_t)(
1370 	void *qp, uint8_t *drv_ctx, struct rte_crypto_sym_vec *vec,
1371 	union rte_crypto_sym_ofs ofs, void *user_data[], int *enqueue_status);
1372 
1373 /**
1374  * Enqueue single raw data vector into the device queue but the driver may or
1375  * may not start processing until rte_cryptodev_raw_enqueue_done() is called.
1376  *
1377  * @param	qp		Driver specific queue pair data.
1378  * @param	drv_ctx		Driver specific context data.
1379  * @param	data_vec	The buffer data vector.
1380  * @param	n_data_vecs	Number of buffer data vectors.
1381  * @param	ofs		Start and stop offsets for auth and cipher
1382  *				operations.
1383  * @param	iv		IV virtual and IOVA addresses
1384  * @param	digest		digest virtual and IOVA addresses
1385  * @param	aad_or_auth_iv	AAD or auth IV virtual and IOVA addresses,
1386  *				depends on the algorithm used.
1387  * @param	user_data	The user data.
1388  * @return
1389  *   - 1: The data vector is enqueued successfully.
1390  *   - 0: The data vector is cached into the queue but is not processed
1391  *        until rte_cryptodev_raw_enqueue_done() is called.
1392  *   - negative integer: failure.
1393  */
1394 typedef int (*cryptodev_sym_raw_enqueue_t)(
1395 	void *qp, uint8_t *drv_ctx, struct rte_crypto_vec *data_vec,
1396 	uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs,
1397 	struct rte_crypto_va_iova_ptr *iv,
1398 	struct rte_crypto_va_iova_ptr *digest,
1399 	struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
1400 	void *user_data);
1401 
1402 /**
1403  * Inform the cryptodev queue pair to start processing or finish dequeuing all
1404  * enqueued/dequeued operations.
1405  *
1406  * @param	qp		Driver specific queue pair data.
1407  * @param	drv_ctx		Driver specific context data.
1408  * @param	n		The total number of processed operations.
1409  * @return
1410  *   - On success return 0.
1411  *   - On failure return negative integer.
1412  */
1413 typedef int (*cryptodev_sym_raw_operation_done_t)(void *qp, uint8_t *drv_ctx,
1414 	uint32_t n);
1415 
1416 /**
1417  * Typedef that the user provided for the driver to get the dequeue count.
1418  * The function may return a fixed number or the number parsed from the user
1419  * data stored in the first processed operation.
1420  *
1421  * @param	user_data	Dequeued user data.
1422  * @return
1423  *  - The number of operations to be dequeued.
1424  */
1425 typedef uint32_t (*rte_cryptodev_raw_get_dequeue_count_t)(void *user_data);
1426 
1427 /**
1428  * Typedef that the user provided to deal with post dequeue operation, such
1429  * as filling status.
1430  *
1431  * @param	user_data	Dequeued user data.
1432  * @param	index		Index number of the processed descriptor.
1433  * @param	is_op_success	Operation status provided by the driver.
1434  */
1435 typedef void (*rte_cryptodev_raw_post_dequeue_t)(void *user_data,
1436 	uint32_t index, uint8_t is_op_success);
1437 
1438 /**
1439  * Dequeue a burst of symmetric crypto processing.
1440  *
1441  * @param	qp			Driver specific queue pair data.
1442  * @param	drv_ctx			Driver specific context data.
1443  * @param	get_dequeue_count	User provided callback function to
1444  *					obtain dequeue operation count.
1445  * @param	max_nb_to_dequeue	When get_dequeue_count is NULL this
1446  *					value is used to pass the maximum
1447  *					number of operations to be dequeued.
1448  * @param	post_dequeue		User provided callback function to
1449  *					post-process a dequeued operation.
1450  * @param	out_user_data		User data pointer array to be retrieve
1451  *					from device queue. In case of
1452  *					*is_user_data_array* is set there
1453  *					should be enough room to store all
1454  *					user data.
1455  * @param	is_user_data_array	Set 1 if every dequeued user data will
1456  *					be written into out_user_data array.
1457  *					Set 0 if only the first user data will
1458  *					be written into out_user_data array.
1459  * @param	n_success		Driver written value to specific the
1460  *					total successful operations count.
1461  * @param	dequeue_status		Driver written value to specify the
1462  *					dequeue status. Possible values:
1463  *					- 1: Successfully dequeued the number
1464  *					     of operations returned. The user
1465  *					     data previously set during enqueue
1466  *					     is stored in the "out_user_data".
1467  *					- 0: The number of operations returned
1468  *					     are completed and the user data is
1469  *					     stored in the "out_user_data", but
1470  *					     they are not freed from the queue
1471  *					     until
1472  *					     rte_cryptodev_raw_dequeue_done()
1473  *					     is called.
1474  *					- negative integer: Error occurred.
1475  * @return
1476  *   - The number of operations dequeued or completed but not freed from the
1477  *     queue, depends on "dequeue_status" value.
1478  */
1479 typedef uint32_t (*cryptodev_sym_raw_dequeue_burst_t)(void *qp,
1480 	uint8_t *drv_ctx,
1481 	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
1482 	uint32_t max_nb_to_dequeue,
1483 	rte_cryptodev_raw_post_dequeue_t post_dequeue,
1484 	void **out_user_data, uint8_t is_user_data_array,
1485 	uint32_t *n_success, int *dequeue_status);
1486 
1487 /**
1488  * Dequeue a symmetric crypto processing.
1489  *
1490  * @param	qp			Driver specific queue pair data.
1491  * @param	drv_ctx			Driver specific context data.
1492  * @param	dequeue_status		Driver written value to specify the
1493  *					dequeue status. Possible values:
1494  *					- 1: Successfully dequeued a operation.
1495  *					     The user data is returned.
1496  *					- 0: The first operation in the queue
1497  *					     is completed and the user data
1498  *					     previously set during enqueue is
1499  *					     returned, but it is not freed from
1500  *					     the queue until
1501  *					     rte_cryptodev_raw_dequeue_done() is
1502  *					     called.
1503  *					- negative integer: Error occurred.
1504  * @param	op_status		Driver written value to specify
1505  *					operation status.
1506  * @return
1507  *   - The user data pointer retrieved from device queue or NULL if no
1508  *     operation is ready for dequeue.
1509  */
1510 typedef void * (*cryptodev_sym_raw_dequeue_t)(
1511 		void *qp, uint8_t *drv_ctx, int *dequeue_status,
1512 		enum rte_crypto_op_status *op_status);
1513 
1514 /**
1515  * Context data for raw data-path API crypto process. The buffer of this
1516  * structure is to be allocated by the user application with the size equal
1517  * or bigger than rte_cryptodev_get_raw_dp_ctx_size() returned value.
1518  */
1519 struct rte_crypto_raw_dp_ctx {
1520 	void *qp_data;
1521 
1522 	cryptodev_sym_raw_enqueue_t enqueue;
1523 	cryptodev_sym_raw_enqueue_burst_t enqueue_burst;
1524 	cryptodev_sym_raw_operation_done_t enqueue_done;
1525 	cryptodev_sym_raw_dequeue_t dequeue;
1526 	cryptodev_sym_raw_dequeue_burst_t dequeue_burst;
1527 	cryptodev_sym_raw_operation_done_t dequeue_done;
1528 
1529 	/* Driver specific context data */
1530 	__extension__ uint8_t drv_ctx_data[];
1531 };
1532 
1533 /**
1534  * Configure raw data-path context data.
1535  *
1536  * @param	dev_id		The device identifier.
1537  * @param	qp_id		The index of the queue pair from which to
1538  *				retrieve processed packets. The value must be
1539  *				in the range [0, nb_queue_pair - 1] previously
1540  *				supplied to rte_cryptodev_configure().
1541  * @param	ctx		The raw data-path context data.
1542  * @param	sess_type	Session type.
1543  * @param	session_ctx	Session context data.
1544  * @param	is_update	Set 0 if it is to initialize the ctx.
1545  *				Set 1 if ctx is initialized and only to update
1546  *				session context data.
1547  * @return
1548  *   - On success return 0.
1549  *   - On failure return negative integer.
1550  *     - -EINVAL if input parameters are invalid.
1551  *     - -ENOTSUP if crypto device does not support raw DP operations with the
1552  *        provided session.
1553  */
1554 int
1555 rte_cryptodev_configure_raw_dp_ctx(uint8_t dev_id, uint16_t qp_id,
1556 	struct rte_crypto_raw_dp_ctx *ctx,
1557 	enum rte_crypto_op_sess_type sess_type,
1558 	union rte_cryptodev_session_ctx session_ctx,
1559 	uint8_t is_update);
1560 
1561 /**
1562  * Enqueue a vectorized operation descriptor into the device queue but the
1563  * driver may or may not start processing until rte_cryptodev_raw_enqueue_done()
1564  * is called.
1565  *
1566  * @param	ctx		The initialized raw data-path context data.
1567  * @param	vec		Vectorized operation descriptor.
1568  * @param	ofs		Start and stop offsets for auth and cipher
1569  *				operations.
1570  * @param	user_data	The array of user data for dequeue later.
1571  * @param	enqueue_status	Driver written value to specify the
1572  *				enqueue status. Possible values:
1573  *				- 1: The number of operations returned are
1574  *				     enqueued successfully.
1575  *				- 0: The number of operations returned are
1576  *				     cached into the queue but are not processed
1577  *				     until rte_cryptodev_raw_enqueue_done() is
1578  *				     called.
1579  *				- negative integer: Error occurred.
1580  * @return
1581  *   - The number of operations in the descriptor successfully enqueued or
1582  *     cached into the queue but not enqueued yet, depends on the
1583  *     "enqueue_status" value.
1584  */
1585 uint32_t
1586 rte_cryptodev_raw_enqueue_burst(struct rte_crypto_raw_dp_ctx *ctx,
1587 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
1588 	void **user_data, int *enqueue_status);
1589 
1590 /**
1591  * Enqueue single raw data vector into the device queue but the driver may or
1592  * may not start processing until rte_cryptodev_raw_enqueue_done() is called.
1593  *
1594  * @param	ctx		The initialized raw data-path context data.
1595  * @param	data_vec	The buffer data vector.
1596  * @param	n_data_vecs	Number of buffer data vectors.
1597  * @param	ofs		Start and stop offsets for auth and cipher
1598  *				operations.
1599  * @param	iv		IV virtual and IOVA addresses
1600  * @param	digest		digest virtual and IOVA addresses
1601  * @param	aad_or_auth_iv	AAD or auth IV virtual and IOVA addresses,
1602  *				depends on the algorithm used.
1603  * @param	user_data	The user data.
1604  * @return
1605  *   - 1: The data vector is enqueued successfully.
1606  *   - 0: The data vector is cached into the queue but is not processed
1607  *        until rte_cryptodev_raw_enqueue_done() is called.
1608  *   - negative integer: failure.
1609  */
1610 __rte_experimental
1611 static __rte_always_inline int
1612 rte_cryptodev_raw_enqueue(struct rte_crypto_raw_dp_ctx *ctx,
1613 	struct rte_crypto_vec *data_vec, uint16_t n_data_vecs,
1614 	union rte_crypto_sym_ofs ofs,
1615 	struct rte_crypto_va_iova_ptr *iv,
1616 	struct rte_crypto_va_iova_ptr *digest,
1617 	struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
1618 	void *user_data)
1619 {
1620 	return (*ctx->enqueue)(ctx->qp_data, ctx->drv_ctx_data, data_vec,
1621 		n_data_vecs, ofs, iv, digest, aad_or_auth_iv, user_data);
1622 }
1623 
1624 /**
1625  * Start processing all enqueued operations from last
1626  * rte_cryptodev_configure_raw_dp_ctx() call.
1627  *
1628  * @param	ctx	The initialized raw data-path context data.
1629  * @param	n	The number of operations cached.
1630  * @return
1631  *   - On success return 0.
1632  *   - On failure return negative integer.
1633  */
1634 int
1635 rte_cryptodev_raw_enqueue_done(struct rte_crypto_raw_dp_ctx *ctx,
1636 		uint32_t n);
1637 
1638 /**
1639  * Dequeue a burst of symmetric crypto processing.
1640  *
1641  * @param	ctx			The initialized raw data-path context
1642  *					data.
1643  * @param	get_dequeue_count	User provided callback function to
1644  *					obtain dequeue operation count.
1645  * @param	max_nb_to_dequeue	When get_dequeue_count is NULL this
1646  *					value is used to pass the maximum
1647  *					number of operations to be dequeued.
1648  * @param	post_dequeue		User provided callback function to
1649  *					post-process a dequeued operation.
1650  * @param	out_user_data		User data pointer array to be retrieve
1651  *					from device queue. In case of
1652  *					*is_user_data_array* is set there
1653  *					should be enough room to store all
1654  *					user data.
1655  * @param	is_user_data_array	Set 1 if every dequeued user data will
1656  *					be written into out_user_data array.
1657  *					Set 0 if only the first user data will
1658  *					be written into out_user_data array.
1659  * @param	n_success		Driver written value to specific the
1660  *					total successful operations count.
1661  * @param	dequeue_status		Driver written value to specify the
1662  *					dequeue status. Possible values:
1663  *					- 1: Successfully dequeued the number
1664  *					     of operations returned. The user
1665  *					     data previously set during enqueue
1666  *					     is stored in the "out_user_data".
1667  *					- 0: The number of operations returned
1668  *					     are completed and the user data is
1669  *					     stored in the "out_user_data", but
1670  *					     they are not freed from the queue
1671  *					     until
1672  *					     rte_cryptodev_raw_dequeue_done()
1673  *					     is called.
1674  *					- negative integer: Error occurred.
1675  * @return
1676  *   - The number of operations dequeued or completed but not freed from the
1677  *     queue, depends on "dequeue_status" value.
1678  */
1679 uint32_t
1680 rte_cryptodev_raw_dequeue_burst(struct rte_crypto_raw_dp_ctx *ctx,
1681 	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
1682 	uint32_t max_nb_to_dequeue,
1683 	rte_cryptodev_raw_post_dequeue_t post_dequeue,
1684 	void **out_user_data, uint8_t is_user_data_array,
1685 	uint32_t *n_success, int *dequeue_status);
1686 
1687 /**
1688  * Dequeue a symmetric crypto processing.
1689  *
1690  * @param	ctx			The initialized raw data-path context
1691  *					data.
1692  * @param	dequeue_status		Driver written value to specify the
1693  *					dequeue status. Possible values:
1694  *					- 1: Successfully dequeued a operation.
1695  *					     The user data is returned.
1696  *					- 0: The first operation in the queue
1697  *					     is completed and the user data
1698  *					     previously set during enqueue is
1699  *					     returned, but it is not freed from
1700  *					     the queue until
1701  *					     rte_cryptodev_raw_dequeue_done() is
1702  *					     called.
1703  *					- negative integer: Error occurred.
1704  * @param	op_status		Driver written value to specify
1705  *					operation status.
1706  * @return
1707  *   - The user data pointer retrieved from device queue or NULL if no
1708  *     operation is ready for dequeue.
1709  */
1710 __rte_experimental
1711 static __rte_always_inline void *
1712 rte_cryptodev_raw_dequeue(struct rte_crypto_raw_dp_ctx *ctx,
1713 		int *dequeue_status, enum rte_crypto_op_status *op_status)
1714 {
1715 	return (*ctx->dequeue)(ctx->qp_data, ctx->drv_ctx_data, dequeue_status,
1716 			op_status);
1717 }
1718 
1719 /**
1720  * Inform the queue pair dequeue operations is finished.
1721  *
1722  * @param	ctx	The initialized raw data-path context data.
1723  * @param	n	The number of operations.
1724  * @return
1725  *   - On success return 0.
1726  *   - On failure return negative integer.
1727  */
1728 int
1729 rte_cryptodev_raw_dequeue_done(struct rte_crypto_raw_dp_ctx *ctx,
1730 		uint32_t n);
1731 
1732 /**
1733  * Add a user callback for a given crypto device and queue pair which will be
1734  * called on crypto ops enqueue.
1735  *
1736  * This API configures a function to be called for each burst of crypto ops
1737  * received on a given crypto device queue pair. The return value is a pointer
1738  * that can be used later to remove the callback using
1739  * rte_cryptodev_remove_enq_callback().
1740  *
1741  * Callbacks registered by application would not survive
1742  * rte_cryptodev_configure() as it reinitializes the callback list.
1743  * It is user responsibility to remove all installed callbacks before
1744  * calling rte_cryptodev_configure() to avoid possible memory leakage.
1745  * Application is expected to call add API after rte_cryptodev_configure().
1746  *
1747  * Multiple functions can be registered per queue pair & they are called
1748  * in the order they were added. The API does not restrict on maximum number
1749  * of callbacks.
1750  *
1751  * @param	dev_id		The identifier of the device.
1752  * @param	qp_id		The index of the queue pair on which ops are
1753  *				to be enqueued for processing. The value
1754  *				must be in the range [0, nb_queue_pairs - 1]
1755  *				previously supplied to
1756  *				*rte_cryptodev_configure*.
1757  * @param	cb_fn		The callback function
1758  * @param	cb_arg		A generic pointer parameter which will be passed
1759  *				to each invocation of the callback function on
1760  *				this crypto device and queue pair.
1761  *
1762  * @return
1763  *  - NULL on error & rte_errno will contain the error code.
1764  *  - On success, a pointer value which can later be used to remove the
1765  *    callback.
1766  */
1767 struct rte_cryptodev_cb *
1768 rte_cryptodev_add_enq_callback(uint8_t dev_id,
1769 			       uint16_t qp_id,
1770 			       rte_cryptodev_callback_fn cb_fn,
1771 			       void *cb_arg);
1772 
1773 /**
1774  * Remove a user callback function for given crypto device and queue pair.
1775  *
1776  * This function is used to remove enqueue callbacks that were added to a
1777  * crypto device queue pair using rte_cryptodev_add_enq_callback().
1778  *
1779  *
1780  *
1781  * @param	dev_id		The identifier of the device.
1782  * @param	qp_id		The index of the queue pair on which ops are
1783  *				to be enqueued. The value must be in the
1784  *				range [0, nb_queue_pairs - 1] previously
1785  *				supplied to *rte_cryptodev_configure*.
1786  * @param	cb		Pointer to user supplied callback created via
1787  *				rte_cryptodev_add_enq_callback().
1788  *
1789  * @return
1790  *   -  0: Success. Callback was removed.
1791  *   - <0: The dev_id or the qp_id is out of range, or the callback
1792  *         is NULL or not found for the crypto device queue pair.
1793  */
1794 int rte_cryptodev_remove_enq_callback(uint8_t dev_id,
1795 				      uint16_t qp_id,
1796 				      struct rte_cryptodev_cb *cb);
1797 
1798 /**
1799  * Add a user callback for a given crypto device and queue pair which will be
1800  * called on crypto ops dequeue.
1801  *
1802  * This API configures a function to be called for each burst of crypto ops
1803  * received on a given crypto device queue pair. The return value is a pointer
1804  * that can be used later to remove the callback using
1805  * rte_cryptodev_remove_deq_callback().
1806  *
1807  * Callbacks registered by application would not survive
1808  * rte_cryptodev_configure() as it reinitializes the callback list.
1809  * It is user responsibility to remove all installed callbacks before
1810  * calling rte_cryptodev_configure() to avoid possible memory leakage.
1811  * Application is expected to call add API after rte_cryptodev_configure().
1812  *
1813  * Multiple functions can be registered per queue pair & they are called
1814  * in the order they were added. The API does not restrict on maximum number
1815  * of callbacks.
1816  *
1817  * @param	dev_id		The identifier of the device.
1818  * @param	qp_id		The index of the queue pair on which ops are
1819  *				to be dequeued. The value must be in the
1820  *				range [0, nb_queue_pairs - 1] previously
1821  *				supplied to *rte_cryptodev_configure*.
1822  * @param	cb_fn		The callback function
1823  * @param	cb_arg		A generic pointer parameter which will be passed
1824  *				to each invocation of the callback function on
1825  *				this crypto device and queue pair.
1826  *
1827  * @return
1828  *   - NULL on error & rte_errno will contain the error code.
1829  *   - On success, a pointer value which can later be used to remove the
1830  *     callback.
1831  */
1832 struct rte_cryptodev_cb *
1833 rte_cryptodev_add_deq_callback(uint8_t dev_id,
1834 			       uint16_t qp_id,
1835 			       rte_cryptodev_callback_fn cb_fn,
1836 			       void *cb_arg);
1837 
1838 /**
1839  * Remove a user callback function for given crypto device and queue pair.
1840  *
1841  * This function is used to remove dequeue callbacks that were added to a
1842  * crypto device queue pair using rte_cryptodev_add_deq_callback().
1843  *
1844  *
1845  *
1846  * @param	dev_id		The identifier of the device.
1847  * @param	qp_id		The index of the queue pair on which ops are
1848  *				to be dequeued. The value must be in the
1849  *				range [0, nb_queue_pairs - 1] previously
1850  *				supplied to *rte_cryptodev_configure*.
1851  * @param	cb		Pointer to user supplied callback created via
1852  *				rte_cryptodev_add_deq_callback().
1853  *
1854  * @return
1855  *   -  0: Success. Callback was removed.
1856  *   - <0: The dev_id or the qp_id is out of range, or the callback
1857  *         is NULL or not found for the crypto device queue pair.
1858  */
1859 int rte_cryptodev_remove_deq_callback(uint8_t dev_id,
1860 				      uint16_t qp_id,
1861 				      struct rte_cryptodev_cb *cb);
1862 
1863 #include <rte_cryptodev_core.h>
1864 /**
1865  *
1866  * Dequeue a burst of processed crypto operations from a queue on the crypto
1867  * device. The dequeued operation are stored in *rte_crypto_op* structures
1868  * whose pointers are supplied in the *ops* array.
1869  *
1870  * The rte_cryptodev_dequeue_burst() function returns the number of ops
1871  * actually dequeued, which is the number of *rte_crypto_op* data structures
1872  * effectively supplied into the *ops* array.
1873  *
1874  * A return value equal to *nb_ops* indicates that the queue contained
1875  * at least *nb_ops* operations, and this is likely to signify that other
1876  * processed operations remain in the devices output queue. Applications
1877  * implementing a "retrieve as many processed operations as possible" policy
1878  * can check this specific case and keep invoking the
1879  * rte_cryptodev_dequeue_burst() function until a value less than
1880  * *nb_ops* is returned.
1881  *
1882  * The rte_cryptodev_dequeue_burst() function does not provide any error
1883  * notification to avoid the corresponding overhead.
1884  *
1885  * @param	dev_id		The symmetric crypto device identifier
1886  * @param	qp_id		The index of the queue pair from which to
1887  *				retrieve processed packets. The value must be
1888  *				in the range [0, nb_queue_pair - 1] previously
1889  *				supplied to rte_cryptodev_configure().
1890  * @param	ops		The address of an array of pointers to
1891  *				*rte_crypto_op* structures that must be
1892  *				large enough to store *nb_ops* pointers in it.
1893  * @param	nb_ops		The maximum number of operations to dequeue.
1894  *
1895  * @return
1896  *   - The number of operations actually dequeued, which is the number
1897  *   of pointers to *rte_crypto_op* structures effectively supplied to the
1898  *   *ops* array.
1899  */
1900 static inline uint16_t
1901 rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
1902 		struct rte_crypto_op **ops, uint16_t nb_ops)
1903 {
1904 	const struct rte_crypto_fp_ops *fp_ops;
1905 	void *qp;
1906 
1907 	rte_cryptodev_trace_dequeue_burst(dev_id, qp_id, (void **)ops, nb_ops);
1908 
1909 	fp_ops = &rte_crypto_fp_ops[dev_id];
1910 	qp = fp_ops->qp.data[qp_id];
1911 
1912 	nb_ops = fp_ops->dequeue_burst(qp, ops, nb_ops);
1913 
1914 #ifdef RTE_CRYPTO_CALLBACKS
1915 	if (unlikely(fp_ops->qp.deq_cb != NULL)) {
1916 		struct rte_cryptodev_cb_rcu *list;
1917 		struct rte_cryptodev_cb *cb;
1918 
1919 		/* rte_memory_order_release memory order was used when the
1920 		 * call back was inserted into the list.
1921 		 * Since there is a clear dependency between loading
1922 		 * cb and cb->fn/cb->next, rte_memory_order_acquire memory order is
1923 		 * not required.
1924 		 */
1925 		list = &fp_ops->qp.deq_cb[qp_id];
1926 		rte_rcu_qsbr_thread_online(list->qsbr, 0);
1927 		cb = rte_atomic_load_explicit(&list->next, rte_memory_order_relaxed);
1928 
1929 		while (cb != NULL) {
1930 			nb_ops = cb->fn(dev_id, qp_id, ops, nb_ops,
1931 					cb->arg);
1932 			cb = cb->next;
1933 		};
1934 
1935 		rte_rcu_qsbr_thread_offline(list->qsbr, 0);
1936 	}
1937 #endif
1938 	return nb_ops;
1939 }
1940 
1941 /**
1942  * Enqueue a burst of operations for processing on a crypto device.
1943  *
1944  * The rte_cryptodev_enqueue_burst() function is invoked to place
1945  * crypto operations on the queue *qp_id* of the device designated by
1946  * its *dev_id*.
1947  *
1948  * The *nb_ops* parameter is the number of operations to process which are
1949  * supplied in the *ops* array of *rte_crypto_op* structures.
1950  *
1951  * The rte_cryptodev_enqueue_burst() function returns the number of
1952  * operations it actually enqueued for processing. A return value equal to
1953  * *nb_ops* means that all packets have been enqueued.
1954  *
1955  * @param	dev_id		The identifier of the device.
1956  * @param	qp_id		The index of the queue pair which packets are
1957  *				to be enqueued for processing. The value
1958  *				must be in the range [0, nb_queue_pairs - 1]
1959  *				previously supplied to
1960  *				 *rte_cryptodev_configure*.
1961  * @param	ops		The address of an array of *nb_ops* pointers
1962  *				to *rte_crypto_op* structures which contain
1963  *				the crypto operations to be processed.
1964  * @param	nb_ops		The number of operations to process.
1965  *
1966  * @return
1967  * The number of operations actually enqueued on the crypto device. The return
1968  * value can be less than the value of the *nb_ops* parameter when the
1969  * crypto devices queue is full or if invalid parameters are specified in
1970  * a *rte_crypto_op*.
1971  */
1972 static inline uint16_t
1973 rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
1974 		struct rte_crypto_op **ops, uint16_t nb_ops)
1975 {
1976 	const struct rte_crypto_fp_ops *fp_ops;
1977 	void *qp;
1978 
1979 	fp_ops = &rte_crypto_fp_ops[dev_id];
1980 	qp = fp_ops->qp.data[qp_id];
1981 #ifdef RTE_CRYPTO_CALLBACKS
1982 	if (unlikely(fp_ops->qp.enq_cb != NULL)) {
1983 		struct rte_cryptodev_cb_rcu *list;
1984 		struct rte_cryptodev_cb *cb;
1985 
1986 		/* rte_memory_order_release memory order was used when the
1987 		 * call back was inserted into the list.
1988 		 * Since there is a clear dependency between loading
1989 		 * cb and cb->fn/cb->next, rte_memory_order_acquire memory order is
1990 		 * not required.
1991 		 */
1992 		list = &fp_ops->qp.enq_cb[qp_id];
1993 		rte_rcu_qsbr_thread_online(list->qsbr, 0);
1994 		cb = rte_atomic_load_explicit(&list->next, rte_memory_order_relaxed);
1995 
1996 		while (cb != NULL) {
1997 			nb_ops = cb->fn(dev_id, qp_id, ops, nb_ops,
1998 					cb->arg);
1999 			cb = cb->next;
2000 		};
2001 
2002 		rte_rcu_qsbr_thread_offline(list->qsbr, 0);
2003 	}
2004 #endif
2005 
2006 	rte_cryptodev_trace_enqueue_burst(dev_id, qp_id, (void **)ops, nb_ops);
2007 	return fp_ops->enqueue_burst(qp, ops, nb_ops);
2008 }
2009 
2010 
2011 
2012 #ifdef __cplusplus
2013 }
2014 #endif
2015 
2016 #endif /* _RTE_CRYPTODEV_H_ */
2017