xref: /dpdk/lib/cryptodev/rte_cryptodev.h (revision 3178e37c65a676366f33f0bc56f49d9b26a06448)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020 Intel Corporation.
3  */
4 
5 #ifndef _RTE_CRYPTODEV_H_
6 #define _RTE_CRYPTODEV_H_
7 
8 /**
9  * @file rte_cryptodev.h
10  *
11  * RTE Cryptographic Device APIs
12  *
13  * Defines RTE Crypto Device APIs for the provisioning of cipher and
14  * authentication operations.
15  */
16 
17 #ifdef __cplusplus
18 extern "C" {
19 #endif
20 
21 #include <rte_compat.h>
22 #include "rte_kvargs.h"
23 #include "rte_crypto.h"
24 #include <rte_common.h>
25 #include <rte_rcu_qsbr.h>
26 
27 #include "rte_cryptodev_trace_fp.h"
28 
29 extern const char **rte_cyptodev_names;
30 
31 /**
32  * @internal Logtype used for cryptodev related messages.
33  */
34 extern int rte_cryptodev_logtype;
35 #define RTE_LOGTYPE_CRYPTODEV rte_cryptodev_logtype
36 
37 /* Logging Macros */
38 #define CDEV_LOG_ERR(...) \
39 	RTE_LOG_LINE(ERR, CRYPTODEV, \
40 		RTE_FMT("%s() line %u: " RTE_FMT_HEAD(__VA_ARGS__ ,), \
41 			__func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__ ,)))
42 
43 #define CDEV_LOG_INFO(...) \
44 	RTE_LOG_LINE(INFO, CRYPTODEV, "" __VA_ARGS__)
45 
46 #define CDEV_LOG_DEBUG(...) \
47 	RTE_LOG_LINE(DEBUG, CRYPTODEV, \
48 		RTE_FMT("%s() line %u: " RTE_FMT_HEAD(__VA_ARGS__ ,), \
49 			__func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__ ,)))
50 
51 #define CDEV_PMD_TRACE(...) \
52 	RTE_LOG_LINE(DEBUG, CRYPTODEV, \
53 		RTE_FMT("[%s] %s: " RTE_FMT_HEAD(__VA_ARGS__ ,), \
54 			dev, __func__, RTE_FMT_TAIL(__VA_ARGS__ ,)))
55 
56 /**
57  * A macro that points to an offset from the start
58  * of the crypto operation structure (rte_crypto_op)
59  *
60  * The returned pointer is cast to type t.
61  *
62  * @param c
63  *   The crypto operation.
64  * @param o
65  *   The offset from the start of the crypto operation.
66  * @param t
67  *   The type to cast the result into.
68  */
69 #define rte_crypto_op_ctod_offset(c, t, o)	\
70 	((t)((char *)(c) + (o)))
71 
72 /**
73  * A macro that returns the physical address that points
74  * to an offset from the start of the crypto operation
75  * (rte_crypto_op)
76  *
77  * @param c
78  *   The crypto operation.
79  * @param o
80  *   The offset from the start of the crypto operation
81  *   to calculate address from.
82  */
83 #define rte_crypto_op_ctophys_offset(c, o)	\
84 	(rte_iova_t)((c)->phys_addr + (o))
85 
86 /**
87  * Crypto parameters range description
88  */
89 struct rte_crypto_param_range {
90 	uint16_t min;	/**< minimum size */
91 	uint16_t max;	/**< maximum size */
92 	uint16_t increment;
93 	/**< if a range of sizes are supported,
94 	 * this parameter is used to indicate
95 	 * increments in byte size that are supported
96 	 * between the minimum and maximum
97 	 */
98 };
99 
100 /**
101  * Data-unit supported lengths of cipher algorithms.
102  * A bit can represent any set of data-unit sizes
103  * (single size, multiple size, range, etc).
104  */
105 #define RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_512_BYTES             RTE_BIT32(0)
106 #define RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_4096_BYTES            RTE_BIT32(1)
107 #define RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_1_MEGABYTES           RTE_BIT32(2)
108 
109 /**
110  * Symmetric Crypto Capability
111  */
112 struct rte_cryptodev_symmetric_capability {
113 	enum rte_crypto_sym_xform_type xform_type;
114 	/**< Transform type : Authentication / Cipher / AEAD */
115 	union {
116 		struct {
117 			enum rte_crypto_auth_algorithm algo;
118 			/**< authentication algorithm */
119 			uint16_t block_size;
120 			/**< algorithm block size */
121 			struct rte_crypto_param_range key_size;
122 			/**< auth key size range */
123 			struct rte_crypto_param_range digest_size;
124 			/**< digest size range */
125 			struct rte_crypto_param_range aad_size;
126 			/**< Additional authentication data size range */
127 			struct rte_crypto_param_range iv_size;
128 			/**< Initialisation vector data size range */
129 		} auth;
130 		/**< Symmetric Authentication transform capabilities */
131 		struct {
132 			enum rte_crypto_cipher_algorithm algo;
133 			/**< cipher algorithm */
134 			uint16_t block_size;
135 			/**< algorithm block size */
136 			struct rte_crypto_param_range key_size;
137 			/**< cipher key size range */
138 			struct rte_crypto_param_range iv_size;
139 			/**< Initialisation vector data size range */
140 			uint32_t dataunit_set;
141 			/**<
142 			 * Supported data-unit lengths:
143 			 * RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_* bits
144 			 * or 0 for lengths defined in the algorithm standard.
145 			 */
146 		} cipher;
147 		/**< Symmetric Cipher transform capabilities */
148 		struct {
149 			enum rte_crypto_aead_algorithm algo;
150 			/**< AEAD algorithm */
151 			uint16_t block_size;
152 			/**< algorithm block size */
153 			struct rte_crypto_param_range key_size;
154 			/**< AEAD key size range */
155 			struct rte_crypto_param_range digest_size;
156 			/**< digest size range */
157 			struct rte_crypto_param_range aad_size;
158 			/**< Additional authentication data size range */
159 			struct rte_crypto_param_range iv_size;
160 			/**< Initialisation vector data size range */
161 		} aead;
162 	};
163 };
164 
165 /**
166  * Asymmetric Xform Crypto Capability
167  */
168 struct rte_cryptodev_asymmetric_xform_capability {
169 	enum rte_crypto_asym_xform_type xform_type;
170 	/**< Transform type: RSA/MODEXP/DH/DSA/MODINV */
171 
172 	uint32_t op_types;
173 	/**<
174 	 * Bitmask for supported rte_crypto_asym_op_type or
175 	 * rte_crypto_asym_ke_type. Which enum is used is determined
176 	 * by the rte_crypto_asym_xform_type. For key exchange algorithms
177 	 * like Diffie-Hellman it is rte_crypto_asym_ke_type, for others
178 	 * it is rte_crypto_asym_op_type.
179 	 */
180 
181 	__extension__
182 	union {
183 		struct rte_crypto_param_range modlen;
184 		/**< Range of modulus length supported by modulus based xform.
185 		 * Value 0 mean implementation default
186 		 */
187 
188 		uint8_t internal_rng;
189 		/**< Availability of random number generator for Elliptic curve based xform.
190 		 * Value 0 means unavailable, and application should pass the required
191 		 * random value. Otherwise, PMD would internally compute the random number.
192 		 */
193 	};
194 
195 	uint64_t hash_algos;
196 	/**< Bitmask of hash algorithms supported for op_type. */
197 };
198 
199 /**
200  * Asymmetric Crypto Capability
201  */
202 struct rte_cryptodev_asymmetric_capability {
203 	struct rte_cryptodev_asymmetric_xform_capability xform_capa;
204 };
205 
206 
207 /** Structure used to capture a capability of a crypto device */
208 struct rte_cryptodev_capabilities {
209 	enum rte_crypto_op_type op;
210 	/**< Operation type */
211 
212 	union {
213 		struct rte_cryptodev_symmetric_capability sym;
214 		/**< Symmetric operation capability parameters */
215 		struct rte_cryptodev_asymmetric_capability asym;
216 		/**< Asymmetric operation capability parameters */
217 	};
218 };
219 
220 /** Structure used to describe crypto algorithms */
221 struct rte_cryptodev_sym_capability_idx {
222 	enum rte_crypto_sym_xform_type type;
223 	union {
224 		enum rte_crypto_cipher_algorithm cipher;
225 		enum rte_crypto_auth_algorithm auth;
226 		enum rte_crypto_aead_algorithm aead;
227 	} algo;
228 };
229 
230 /**
231  * Structure used to describe asymmetric crypto xforms
232  * Each xform maps to one asym algorithm.
233  */
234 struct rte_cryptodev_asym_capability_idx {
235 	enum rte_crypto_asym_xform_type type;
236 	/**< Asymmetric xform (algo) type */
237 };
238 
239 /**
240  * Provide capabilities available for defined device and algorithm
241  *
242  * @param	dev_id		The identifier of the device.
243  * @param	idx		Description of crypto algorithms.
244  *
245  * @return
246  *   - Return description of the symmetric crypto capability if exist.
247  *   - Return NULL if the capability not exist.
248  */
249 const struct rte_cryptodev_symmetric_capability *
250 rte_cryptodev_sym_capability_get(uint8_t dev_id,
251 		const struct rte_cryptodev_sym_capability_idx *idx);
252 
253 /**
254  *  Provide capabilities available for defined device and xform
255  *
256  * @param	dev_id		The identifier of the device.
257  * @param	idx		Description of asym crypto xform.
258  *
259  * @return
260  *   - Return description of the asymmetric crypto capability if exist.
261  *   - Return NULL if the capability not exist.
262  */
263 const struct rte_cryptodev_asymmetric_xform_capability *
264 rte_cryptodev_asym_capability_get(uint8_t dev_id,
265 		const struct rte_cryptodev_asym_capability_idx *idx);
266 
267 /**
268  * Check if key size and initial vector are supported
269  * in crypto cipher capability
270  *
271  * @param	capability	Description of the symmetric crypto capability.
272  * @param	key_size	Cipher key size.
273  * @param	iv_size		Cipher initial vector size.
274  *
275  * @return
276  *   - Return 0 if the parameters are in range of the capability.
277  *   - Return -1 if the parameters are out of range of the capability.
278  */
279 int
280 rte_cryptodev_sym_capability_check_cipher(
281 		const struct rte_cryptodev_symmetric_capability *capability,
282 		uint16_t key_size, uint16_t iv_size);
283 
284 /**
285  * Check if key size and initial vector are supported
286  * in crypto auth capability
287  *
288  * @param	capability	Description of the symmetric crypto capability.
289  * @param	key_size	Auth key size.
290  * @param	digest_size	Auth digest size.
291  * @param	iv_size		Auth initial vector size.
292  *
293  * @return
294  *   - Return 0 if the parameters are in range of the capability.
295  *   - Return -1 if the parameters are out of range of the capability.
296  */
297 int
298 rte_cryptodev_sym_capability_check_auth(
299 		const struct rte_cryptodev_symmetric_capability *capability,
300 		uint16_t key_size, uint16_t digest_size, uint16_t iv_size);
301 
302 /**
303  * Check if key, digest, AAD and initial vector sizes are supported
304  * in crypto AEAD capability
305  *
306  * @param	capability	Description of the symmetric crypto capability.
307  * @param	key_size	AEAD key size.
308  * @param	digest_size	AEAD digest size.
309  * @param	aad_size	AEAD AAD size.
310  * @param	iv_size		AEAD IV size.
311  *
312  * @return
313  *   - Return 0 if the parameters are in range of the capability.
314  *   - Return -1 if the parameters are out of range of the capability.
315  */
316 int
317 rte_cryptodev_sym_capability_check_aead(
318 		const struct rte_cryptodev_symmetric_capability *capability,
319 		uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
320 		uint16_t iv_size);
321 
322 /**
323  * Check if op type is supported
324  *
325  * @param	capability	Description of the asymmetric crypto capability.
326  * @param	op_type		op type
327  *
328  * @return
329  *   - Return 1 if the op type is supported
330  *   - Return 0 if unsupported
331  */
332 int
333 rte_cryptodev_asym_xform_capability_check_optype(
334 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
335 		enum rte_crypto_asym_op_type op_type);
336 
337 /**
338  * Check if modulus length is in supported range
339  *
340  * @param	capability	Description of the asymmetric crypto capability.
341  * @param	modlen		modulus length.
342  *
343  * @return
344  *   - Return 0 if the parameters are in range of the capability.
345  *   - Return -1 if the parameters are out of range of the capability.
346  */
347 int
348 rte_cryptodev_asym_xform_capability_check_modlen(
349 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
350 		uint16_t modlen);
351 
352 /**
353  * Check if hash algorithm is supported.
354  *
355  * @param	capability	Asymmetric crypto capability.
356  * @param	hash		Hash algorithm.
357  *
358  * @return
359  *   - Return true if the hash algorithm is supported.
360  *   - Return false if the hash algorithm is not supported.
361  */
362 bool
363 rte_cryptodev_asym_xform_capability_check_hash(
364 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
365 	enum rte_crypto_auth_algorithm hash);
366 
367 /**
368  * Provide the cipher algorithm enum, given an algorithm string
369  *
370  * @param	algo_enum	A pointer to the cipher algorithm
371  *				enum to be filled
372  * @param	algo_string	Authentication algo string
373  *
374  * @return
375  * - Return -1 if string is not valid
376  * - Return 0 is the string is valid
377  */
378 int
379 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum,
380 		const char *algo_string);
381 
382 /**
383  * Provide the authentication algorithm enum, given an algorithm string
384  *
385  * @param	algo_enum	A pointer to the authentication algorithm
386  *				enum to be filled
387  * @param	algo_string	Authentication algo string
388  *
389  * @return
390  * - Return -1 if string is not valid
391  * - Return 0 is the string is valid
392  */
393 int
394 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum,
395 		const char *algo_string);
396 
397 /**
398  * Provide the AEAD algorithm enum, given an algorithm string
399  *
400  * @param	algo_enum	A pointer to the AEAD algorithm
401  *				enum to be filled
402  * @param	algo_string	AEAD algorithm string
403  *
404  * @return
405  * - Return -1 if string is not valid
406  * - Return 0 is the string is valid
407  */
408 int
409 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum,
410 		const char *algo_string);
411 
412 /**
413  * Provide the Asymmetric xform enum, given an xform string
414  *
415  * @param	xform_enum	A pointer to the xform type
416  *				enum to be filled
417  * @param	xform_string	xform string
418  *
419  * @return
420  * - Return -1 if string is not valid
421  * - Return 0 if the string is valid
422  */
423 int
424 rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum,
425 		const char *xform_string);
426 
427 /**
428  * Provide the cipher algorithm string, given an algorithm enum.
429  *
430  * @param	algo_enum	cipher algorithm enum
431  *
432  * @return
433  * - Return NULL if enum is not valid
434  * - Return algo_string corresponding to enum
435  */
436 __rte_experimental
437 const char *
438 rte_cryptodev_get_cipher_algo_string(enum rte_crypto_cipher_algorithm algo_enum);
439 
440 /**
441  * Provide the authentication algorithm string, given an algorithm enum.
442  *
443  * @param	algo_enum	auth algorithm enum
444  *
445  * @return
446  * - Return NULL if enum is not valid
447  * - Return algo_string corresponding to enum
448  */
449 __rte_experimental
450 const char *
451 rte_cryptodev_get_auth_algo_string(enum rte_crypto_auth_algorithm algo_enum);
452 
453 /**
454  * Provide the AEAD algorithm string, given an algorithm enum.
455  *
456  * @param	algo_enum	AEAD algorithm enum
457  *
458  * @return
459  * - Return NULL if enum is not valid
460  * - Return algo_string corresponding to enum
461  */
462 __rte_experimental
463 const char *
464 rte_cryptodev_get_aead_algo_string(enum rte_crypto_aead_algorithm algo_enum);
465 
466 /**
467  * Provide the Asymmetric xform string, given an xform enum.
468  *
469  * @param	xform_enum	xform type enum
470  *
471  * @return
472  * - Return NULL, if enum is not valid.
473  * - Return xform string, for valid enum.
474  */
475 __rte_experimental
476 const char *
477 rte_cryptodev_asym_get_xform_string(enum rte_crypto_asym_xform_type xform_enum);
478 
479 
480 /** Macro used at end of crypto PMD list */
481 #define RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() \
482 	{ RTE_CRYPTO_OP_TYPE_UNDEFINED }
483 
484 
485 /**
486  * Crypto device supported feature flags
487  *
488  * Note:
489  * New features flags should be added to the end of the list
490  *
491  * Keep these flags synchronised with rte_cryptodev_get_feature_name()
492  */
493 #define	RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO		(1ULL << 0)
494 /**< Symmetric crypto operations are supported */
495 #define	RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO		(1ULL << 1)
496 /**< Asymmetric crypto operations are supported */
497 #define	RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING		(1ULL << 2)
498 /**< Chaining symmetric crypto operations are supported */
499 #define	RTE_CRYPTODEV_FF_CPU_SSE			(1ULL << 3)
500 /**< Utilises CPU SIMD SSE instructions */
501 #define	RTE_CRYPTODEV_FF_CPU_AVX			(1ULL << 4)
502 /**< Utilises CPU SIMD AVX instructions */
503 #define	RTE_CRYPTODEV_FF_CPU_AVX2			(1ULL << 5)
504 /**< Utilises CPU SIMD AVX2 instructions */
505 #define	RTE_CRYPTODEV_FF_CPU_AESNI			(1ULL << 6)
506 /**< Utilises CPU AES-NI instructions */
507 #define	RTE_CRYPTODEV_FF_HW_ACCELERATED			(1ULL << 7)
508 /**< Operations are off-loaded to an
509  * external hardware accelerator
510  */
511 #define	RTE_CRYPTODEV_FF_CPU_AVX512			(1ULL << 8)
512 /**< Utilises CPU SIMD AVX512 instructions */
513 #define	RTE_CRYPTODEV_FF_IN_PLACE_SGL			(1ULL << 9)
514 /**< In-place Scatter-gather (SGL) buffers, with multiple segments,
515  * are supported
516  */
517 #define RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT		(1ULL << 10)
518 /**< Out-of-place Scatter-gather (SGL) buffers are
519  * supported in input and output
520  */
521 #define RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT		(1ULL << 11)
522 /**< Out-of-place Scatter-gather (SGL) buffers are supported
523  * in input, combined with linear buffers (LB), with a
524  * single segment in output
525  */
526 #define RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT		(1ULL << 12)
527 /**< Out-of-place Scatter-gather (SGL) buffers are supported
528  * in output, combined with linear buffers (LB) in input
529  */
530 #define RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT		(1ULL << 13)
531 /**< Out-of-place linear buffers (LB) are supported in input and output */
532 #define	RTE_CRYPTODEV_FF_CPU_NEON			(1ULL << 14)
533 /**< Utilises CPU NEON instructions */
534 #define	RTE_CRYPTODEV_FF_CPU_ARM_CE			(1ULL << 15)
535 /**< Utilises ARM CPU Cryptographic Extensions */
536 #define	RTE_CRYPTODEV_FF_SECURITY			(1ULL << 16)
537 /**< Support Security Protocol Processing */
538 #define RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP		(1ULL << 17)
539 /**< Support RSA Private Key OP with exponent */
540 #define RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT		(1ULL << 18)
541 /**< Support RSA Private Key OP with CRT (quintuple) Keys */
542 #define RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED		(1ULL << 19)
543 /**< Support encrypted-digest operations where digest is appended to data */
544 #define RTE_CRYPTODEV_FF_ASYM_SESSIONLESS		(1ULL << 20)
545 /**< Support asymmetric session-less operations */
546 #define	RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO			(1ULL << 21)
547 /**< Support symmetric cpu-crypto processing */
548 #define RTE_CRYPTODEV_FF_SYM_SESSIONLESS		(1ULL << 22)
549 /**< Support symmetric session-less operations */
550 #define RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA		(1ULL << 23)
551 /**< Support operations on data which is not byte aligned */
552 #define RTE_CRYPTODEV_FF_SYM_RAW_DP			(1ULL << 24)
553 /**< Support accelerator specific symmetric raw data-path APIs */
554 #define RTE_CRYPTODEV_FF_CIPHER_MULTIPLE_DATA_UNITS	(1ULL << 25)
555 /**< Support operations on multiple data-units message */
556 #define RTE_CRYPTODEV_FF_CIPHER_WRAPPED_KEY		(1ULL << 26)
557 /**< Support wrapped key in cipher xform  */
558 #define RTE_CRYPTODEV_FF_SECURITY_INNER_CSUM		(1ULL << 27)
559 /**< Support inner checksum computation/verification */
560 #define RTE_CRYPTODEV_FF_SECURITY_RX_INJECT		(1ULL << 28)
561 /**< Support Rx injection after security processing */
562 
563 /**
564  * Get the name of a crypto device feature flag
565  *
566  * @param	flag	The mask describing the flag.
567  *
568  * @return
569  *   The name of this flag, or NULL if it's not a valid feature flag.
570  */
571 const char *
572 rte_cryptodev_get_feature_name(uint64_t flag);
573 
574 /**  Crypto device information */
575 /* Structure rte_cryptodev_info 8< */
576 struct rte_cryptodev_info {
577 	const char *driver_name;	/**< Driver name. */
578 	uint8_t driver_id;		/**< Driver identifier */
579 	struct rte_device *device;	/**< Generic device information. */
580 
581 	uint64_t feature_flags;
582 	/**< Feature flags exposes HW/SW features for the given device */
583 
584 	const struct rte_cryptodev_capabilities *capabilities;
585 	/**< Array of devices supported capabilities */
586 
587 	unsigned max_nb_queue_pairs;
588 	/**< Maximum number of queues pairs supported by device. */
589 
590 	uint16_t min_mbuf_headroom_req;
591 	/**< Minimum mbuf headroom required by device */
592 
593 	uint16_t min_mbuf_tailroom_req;
594 	/**< Minimum mbuf tailroom required by device */
595 
596 	struct {
597 		unsigned max_nb_sessions;
598 		/**< Maximum number of sessions supported by device.
599 		 * If 0, the device does not have any limitation in
600 		 * number of sessions that can be used.
601 		 */
602 	} sym;
603 };
604 /* >8 End of structure rte_cryptodev_info. */
605 
606 #define RTE_CRYPTODEV_DETACHED  (0)
607 #define RTE_CRYPTODEV_ATTACHED  (1)
608 
609 /** Definitions of Crypto device event types */
610 enum rte_cryptodev_event_type {
611 	RTE_CRYPTODEV_EVENT_UNKNOWN,	/**< unknown event type */
612 	RTE_CRYPTODEV_EVENT_ERROR,	/**< error interrupt event */
613 	RTE_CRYPTODEV_EVENT_MAX		/**< max value of this enum */
614 };
615 
616 /** Crypto device queue pair configuration structure. */
617 /* Structure rte_cryptodev_qp_conf 8<*/
618 struct rte_cryptodev_qp_conf {
619 	uint32_t nb_descriptors; /**< Number of descriptors per queue pair */
620 	struct rte_mempool *mp_session;
621 	/**< The mempool for creating session in sessionless mode */
622 };
623 /* >8 End of structure rte_cryptodev_qp_conf. */
624 
625 /**
626  * Function type used for processing crypto ops when enqueue/dequeue burst is
627  * called.
628  *
629  * The callback function is called on enqueue/dequeue burst immediately.
630  *
631  * @param	dev_id		The identifier of the device.
632  * @param	qp_id		The index of the queue pair on which ops are
633  *				enqueued/dequeued. The value must be in the
634  *				range [0, nb_queue_pairs - 1] previously
635  *				supplied to *rte_cryptodev_configure*.
636  * @param	ops		The address of an array of *nb_ops* pointers
637  *				to *rte_crypto_op* structures which contain
638  *				the crypto operations to be processed.
639  * @param	nb_ops		The number of operations to process.
640  * @param	user_param	The arbitrary user parameter passed in by the
641  *				application when the callback was originally
642  *				registered.
643  * @return			The number of ops to be enqueued to the
644  *				crypto device.
645  */
646 typedef uint16_t (*rte_cryptodev_callback_fn)(uint16_t dev_id, uint16_t qp_id,
647 		struct rte_crypto_op **ops, uint16_t nb_ops, void *user_param);
648 
649 /**
650  * Typedef for application callback function to be registered by application
651  * software for notification of device events
652  *
653  * @param	dev_id	Crypto device identifier
654  * @param	event	Crypto device event to register for notification of.
655  * @param	cb_arg	User specified parameter to be passed as to passed to
656  *			users callback function.
657  */
658 typedef void (*rte_cryptodev_cb_fn)(uint8_t dev_id,
659 		enum rte_cryptodev_event_type event, void *cb_arg);
660 
661 
662 /** Crypto Device statistics */
663 struct rte_cryptodev_stats {
664 	uint64_t enqueued_count;
665 	/**< Count of all operations enqueued */
666 	uint64_t dequeued_count;
667 	/**< Count of all operations dequeued */
668 
669 	uint64_t enqueue_err_count;
670 	/**< Total error count on operations enqueued */
671 	uint64_t dequeue_err_count;
672 	/**< Total error count on operations dequeued */
673 };
674 
675 #define RTE_CRYPTODEV_NAME_MAX_LEN	(64)
676 /**< Max length of name of crypto PMD */
677 
678 /**
679  * Get the device identifier for the named crypto device.
680  *
681  * @param	name	device name to select the device structure.
682  *
683  * @return
684  *   - Returns crypto device identifier on success.
685  *   - Return -1 on failure to find named crypto device.
686  */
687 int
688 rte_cryptodev_get_dev_id(const char *name);
689 
690 /**
691  * Get the crypto device name given a device identifier.
692  *
693  * @param dev_id
694  *   The identifier of the device
695  *
696  * @return
697  *   - Returns crypto device name.
698  *   - Returns NULL if crypto device is not present.
699  */
700 const char *
701 rte_cryptodev_name_get(uint8_t dev_id);
702 
703 /**
704  * Get the total number of crypto devices that have been successfully
705  * initialised.
706  *
707  * @return
708  *   - The total number of usable crypto devices.
709  */
710 uint8_t
711 rte_cryptodev_count(void);
712 
713 /**
714  * Get number of crypto device defined type.
715  *
716  * @param	driver_id	driver identifier.
717  *
718  * @return
719  *   Returns number of crypto device.
720  */
721 uint8_t
722 rte_cryptodev_device_count_by_driver(uint8_t driver_id);
723 
724 /**
725  * Get number and identifiers of attached crypto devices that
726  * use the same crypto driver.
727  *
728  * @param	driver_name	driver name.
729  * @param	devices		output devices identifiers.
730  * @param	nb_devices	maximal number of devices.
731  *
732  * @return
733  *   Returns number of attached crypto device.
734  */
735 uint8_t
736 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
737 		uint8_t nb_devices);
738 /*
739  * Return the NUMA socket to which a device is connected
740  *
741  * @param dev_id
742  *   The identifier of the device
743  * @return
744  *   The NUMA socket id to which the device is connected or
745  *   a default of zero if the socket could not be determined.
746  *   -1 if returned is the dev_id value is out of range.
747  */
748 int
749 rte_cryptodev_socket_id(uint8_t dev_id);
750 
751 /** Crypto device configuration structure */
752 /* Structure rte_cryptodev_config 8< */
753 struct rte_cryptodev_config {
754 	int socket_id;			/**< Socket to allocate resources on */
755 	uint16_t nb_queue_pairs;
756 	/**< Number of queue pairs to configure on device */
757 	uint64_t ff_disable;
758 	/**< Feature flags to be disabled. Only the following features are
759 	 * allowed to be disabled,
760 	 *  - RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO
761 	 *  - RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO
762 	 *  - RTE_CRYTPODEV_FF_SECURITY
763 	 */
764 };
765 /* >8 End of structure rte_cryptodev_config. */
766 
767 /**
768  * Configure a device.
769  *
770  * This function must be invoked first before any other function in the
771  * API. This function can also be re-invoked when a device is in the
772  * stopped state.
773  *
774  * @param	dev_id		The identifier of the device to configure.
775  * @param	config		The crypto device configuration structure.
776  *
777  * @return
778  *   - 0: Success, device configured.
779  *   - <0: Error code returned by the driver configuration function.
780  */
781 int
782 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config);
783 
784 /**
785  * Start an device.
786  *
787  * The device start step is the last one and consists of setting the configured
788  * offload features and in starting the transmit and the receive units of the
789  * device.
790  * On success, all basic functions exported by the API (link status,
791  * receive/transmit, and so on) can be invoked.
792  *
793  * @param dev_id
794  *   The identifier of the device.
795  * @return
796  *   - 0: Success, device started.
797  *   - <0: Error code of the driver device start function.
798  */
799 int
800 rte_cryptodev_start(uint8_t dev_id);
801 
802 /**
803  * Stop an device. The device can be restarted with a call to
804  * rte_cryptodev_start()
805  *
806  * @param	dev_id		The identifier of the device.
807  */
808 void
809 rte_cryptodev_stop(uint8_t dev_id);
810 
811 /**
812  * Close an device. The device cannot be restarted!
813  *
814  * @param	dev_id		The identifier of the device.
815  *
816  * @return
817  *  - 0 on successfully closing device
818  *  - <0 on failure to close device
819  */
820 int
821 rte_cryptodev_close(uint8_t dev_id);
822 
823 /**
824  * Allocate and set up a receive queue pair for a device.
825  *
826  *
827  * @param	dev_id		The identifier of the device.
828  * @param	queue_pair_id	The index of the queue pairs to set up. The
829  *				value must be in the range [0, nb_queue_pair
830  *				- 1] previously supplied to
831  *				rte_cryptodev_configure().
832  * @param	qp_conf		The pointer to the configuration data to be
833  *				used for the queue pair.
834  * @param	socket_id	The *socket_id* argument is the socket
835  *				identifier in case of NUMA. The value can be
836  *				*SOCKET_ID_ANY* if there is no NUMA constraint
837  *				for the DMA memory allocated for the receive
838  *				queue pair.
839  *
840  * @return
841  *   - 0: Success, queue pair correctly set up.
842  *   - <0: Queue pair configuration failed
843  */
844 int
845 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
846 		const struct rte_cryptodev_qp_conf *qp_conf, int socket_id);
847 
848 /**
849  * Get the status of queue pairs setup on a specific crypto device
850  *
851  * @param	dev_id		Crypto device identifier.
852  * @param	queue_pair_id	The index of the queue pairs to set up. The
853  *				value must be in the range [0, nb_queue_pair
854  *				- 1] previously supplied to
855  *				rte_cryptodev_configure().
856  * @return
857  *   - 0: qp was not configured
858  *	 - 1: qp was configured
859  *	 - -EINVAL: device was not configured
860  */
861 int
862 rte_cryptodev_get_qp_status(uint8_t dev_id, uint16_t queue_pair_id);
863 
864 /**
865  * Get the number of queue pairs on a specific crypto device
866  *
867  * @param	dev_id		Crypto device identifier.
868  * @return
869  *   - The number of configured queue pairs.
870  */
871 uint16_t
872 rte_cryptodev_queue_pair_count(uint8_t dev_id);
873 
874 
875 /**
876  * Retrieve the general I/O statistics of a device.
877  *
878  * @param	dev_id		The identifier of the device.
879  * @param	stats		A pointer to a structure of type
880  *				*rte_cryptodev_stats* to be filled with the
881  *				values of device counters.
882  * @return
883  *   - Zero if successful.
884  *   - Non-zero otherwise.
885  */
886 int
887 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats);
888 
889 /**
890  * Reset the general I/O statistics of a device.
891  *
892  * @param	dev_id		The identifier of the device.
893  */
894 void
895 rte_cryptodev_stats_reset(uint8_t dev_id);
896 
897 /**
898  * Retrieve the contextual information of a device.
899  *
900  * @param	dev_id		The identifier of the device.
901  * @param	dev_info	A pointer to a structure of type
902  *				*rte_cryptodev_info* to be filled with the
903  *				contextual information of the device.
904  *
905  * @note The capabilities field of dev_info is set to point to the first
906  * element of an array of struct rte_cryptodev_capabilities. The element after
907  * the last valid element has it's op field set to
908  * RTE_CRYPTO_OP_TYPE_UNDEFINED.
909  */
910 void
911 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info);
912 
913 
914 /**
915  * Register a callback function for specific device id.
916  *
917  * @param	dev_id		Device id.
918  * @param	event		Event interested.
919  * @param	cb_fn		User supplied callback function to be called.
920  * @param	cb_arg		Pointer to the parameters for the registered
921  *				callback.
922  *
923  * @return
924  *  - On success, zero.
925  *  - On failure, a negative value.
926  */
927 int
928 rte_cryptodev_callback_register(uint8_t dev_id,
929 		enum rte_cryptodev_event_type event,
930 		rte_cryptodev_cb_fn cb_fn, void *cb_arg);
931 
932 /**
933  * Unregister a callback function for specific device id.
934  *
935  * @param	dev_id		The device identifier.
936  * @param	event		Event interested.
937  * @param	cb_fn		User supplied callback function to be called.
938  * @param	cb_arg		Pointer to the parameters for the registered
939  *				callback.
940  *
941  * @return
942  *  - On success, zero.
943  *  - On failure, a negative value.
944  */
945 int
946 rte_cryptodev_callback_unregister(uint8_t dev_id,
947 		enum rte_cryptodev_event_type event,
948 		rte_cryptodev_cb_fn cb_fn, void *cb_arg);
949 
950 /**
951  * @warning
952  * @b EXPERIMENTAL: this API may change without prior notice.
953  *
954  * Query a cryptodev queue pair if there are pending RTE_CRYPTODEV_EVENT_ERROR
955  * events.
956  *
957  * @param          dev_id	The device identifier.
958  * @param          qp_id	Queue pair index to be queried.
959  *
960  * @return
961  *   - 1 if requested queue has a pending event.
962  *   - 0 if no pending event is found.
963  *   - a negative value on failure
964  */
965 __rte_experimental
966 int
967 rte_cryptodev_queue_pair_event_error_query(uint8_t dev_id, uint16_t qp_id);
968 
969 struct rte_cryptodev_callback;
970 
971 /** Structure to keep track of registered callbacks */
972 RTE_TAILQ_HEAD(rte_cryptodev_cb_list, rte_cryptodev_callback);
973 
974 /**
975  * Structure used to hold information about the callbacks to be called for a
976  * queue pair on enqueue/dequeue.
977  */
978 struct rte_cryptodev_cb {
979 	RTE_ATOMIC(struct rte_cryptodev_cb *) next;
980 	/**< Pointer to next callback */
981 	rte_cryptodev_callback_fn fn;
982 	/**< Pointer to callback function */
983 	void *arg;
984 	/**< Pointer to argument */
985 };
986 
987 /**
988  * @internal
989  * Structure used to hold information about the RCU for a queue pair.
990  */
991 struct rte_cryptodev_cb_rcu {
992 	RTE_ATOMIC(struct rte_cryptodev_cb *) next;
993 	/**< Pointer to next callback */
994 	struct rte_rcu_qsbr *qsbr;
995 	/**< RCU QSBR variable per queue pair */
996 };
997 
998 /**
999  * Get the security context for the cryptodev.
1000  *
1001  * @param dev_id
1002  *   The device identifier.
1003  * @return
1004  *   - NULL on error.
1005  *   - Pointer to security context on success.
1006  */
1007 void *
1008 rte_cryptodev_get_sec_ctx(uint8_t dev_id);
1009 
1010 /**
1011  * Create a symmetric session mempool.
1012  *
1013  * @param name
1014  *   The unique mempool name.
1015  * @param nb_elts
1016  *   The number of elements in the mempool.
1017  * @param elt_size
1018  *   The size of the element. This should be the size of the cryptodev PMD
1019  *   session private data obtained through
1020  *   rte_cryptodev_sym_get_private_session_size() function call.
1021  *   For the user who wants to use the same mempool for heterogeneous PMDs
1022  *   this value should be the maximum value of their private session sizes.
1023  *   Please note the created mempool will have bigger elt size than this
1024  *   value as necessary session header and the possible padding are filled
1025  *   into each elt.
1026  * @param cache_size
1027  *   The number of per-lcore cache elements
1028  * @param priv_size
1029  *   The private data size of each session.
1030  * @param socket_id
1031  *   The *socket_id* argument is the socket identifier in the case of
1032  *   NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
1033  *   constraint for the reserved zone.
1034  *
1035  * @return
1036  *  - On success returns the created session mempool pointer
1037  *  - On failure returns NULL
1038  */
1039 struct rte_mempool *
1040 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
1041 	uint32_t elt_size, uint32_t cache_size, uint16_t priv_size,
1042 	int socket_id);
1043 
1044 
1045 /**
1046  * Create an asymmetric session mempool.
1047  *
1048  * @param name
1049  *   The unique mempool name.
1050  * @param nb_elts
1051  *   The number of elements in the mempool.
1052  * @param cache_size
1053  *   The number of per-lcore cache elements
1054  * @param user_data_size
1055  *   The size of user data to be placed after session private data.
1056  * @param socket_id
1057  *   The *socket_id* argument is the socket identifier in the case of
1058  *   NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
1059  *   constraint for the reserved zone.
1060  *
1061  * @return
1062  *  - On success return mempool
1063  *  - On failure returns NULL
1064  */
1065 struct rte_mempool *
1066 rte_cryptodev_asym_session_pool_create(const char *name, uint32_t nb_elts,
1067 	uint32_t cache_size, uint16_t user_data_size, int socket_id);
1068 
1069 /**
1070  * Create symmetric crypto session and fill out private data for the device id,
1071  * based on its device type.
1072  *
1073  * @param   dev_id   ID of device that we want the session to be used on
1074  * @param   xforms   Symmetric crypto transform operations to apply on flow
1075  *                   processed with this session
1076  * @param   mp       Mempool to allocate symmetric session objects from
1077  *
1078  * @return
1079  *  - On success return pointer to sym-session.
1080  *  - On failure returns NULL and rte_errno is set to the error code:
1081  *    - EINVAL on invalid arguments.
1082  *    - ENOMEM on memory error for session allocation.
1083  *    - ENOTSUP if device doesn't support session configuration.
1084  */
1085 void *
1086 rte_cryptodev_sym_session_create(uint8_t dev_id,
1087 		struct rte_crypto_sym_xform *xforms,
1088 		struct rte_mempool *mp);
1089 /**
1090  * Create and initialise an asymmetric crypto session structure.
1091  * Calls the PMD to configure the private session data.
1092  *
1093  * @param   dev_id   ID of device that we want the session to be used on
1094  * @param   xforms   Asymmetric crypto transform operations to apply on flow
1095  *                   processed with this session
1096  * @param   mp       mempool to allocate asymmetric session
1097  *                   objects from
1098  * @param   session  void ** for session to be used
1099  *
1100  * @return
1101  *  - 0 on success.
1102  *  - -EINVAL on invalid arguments.
1103  *  - -ENOMEM on memory error for session allocation.
1104  *  - -ENOTSUP if device doesn't support session configuration.
1105  */
1106 int
1107 rte_cryptodev_asym_session_create(uint8_t dev_id,
1108 		struct rte_crypto_asym_xform *xforms, struct rte_mempool *mp,
1109 		void **session);
1110 
1111 /**
1112  * Frees session for the device id and returning it to its mempool.
1113  * It is the application's responsibility to ensure that the session
1114  * is not still in-flight operations using it.
1115  *
1116  * @param   dev_id   ID of device that uses the session.
1117  * @param   sess     Session header to be freed.
1118  *
1119  * @return
1120  *  - 0 if successful.
1121  *  - -EINVAL if session is NULL or the mismatched device ids.
1122  */
1123 int
1124 rte_cryptodev_sym_session_free(uint8_t dev_id,
1125 	void *sess);
1126 
1127 /**
1128  * Clears and frees asymmetric crypto session header and private data,
1129  * returning it to its original mempool.
1130  *
1131  * @param   dev_id   ID of device that uses the asymmetric session.
1132  * @param   sess     Session header to be freed.
1133  *
1134  * @return
1135  *  - 0 if successful.
1136  *  - -EINVAL if device is invalid or session is NULL.
1137  */
1138 int
1139 rte_cryptodev_asym_session_free(uint8_t dev_id, void *sess);
1140 
1141 /**
1142  * Get the size of the asymmetric session header.
1143  *
1144  * @return
1145  *   Size of the asymmetric header session.
1146  */
1147 unsigned int
1148 rte_cryptodev_asym_get_header_session_size(void);
1149 
1150 /**
1151  * Get the size of the private symmetric session data
1152  * for a device.
1153  *
1154  * @param	dev_id		The device identifier.
1155  *
1156  * @return
1157  *   - Size of the private data, if successful
1158  *   - 0 if device is invalid or does not have private
1159  *   symmetric session
1160  */
1161 unsigned int
1162 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id);
1163 
1164 /**
1165  * Get the size of the private data for asymmetric session
1166  * on device
1167  *
1168  * @param	dev_id		The device identifier.
1169  *
1170  * @return
1171  *   - Size of the asymmetric private data, if successful
1172  *   - 0 if device is invalid or does not have private session
1173  */
1174 unsigned int
1175 rte_cryptodev_asym_get_private_session_size(uint8_t dev_id);
1176 
1177 /**
1178  * Validate if the crypto device index is valid attached crypto device.
1179  *
1180  * @param	dev_id	Crypto device index.
1181  *
1182  * @return
1183  *   - If the device index is valid (1) or not (0).
1184  */
1185 unsigned int
1186 rte_cryptodev_is_valid_dev(uint8_t dev_id);
1187 
1188 /**
1189  * Provide driver identifier.
1190  *
1191  * @param name
1192  *   The pointer to a driver name.
1193  * @return
1194  *  The driver type identifier or -1 if no driver found
1195  */
1196 int rte_cryptodev_driver_id_get(const char *name);
1197 
1198 /**
1199  * Provide driver name.
1200  *
1201  * @param driver_id
1202  *   The driver identifier.
1203  * @return
1204  *  The driver name or null if no driver found
1205  */
1206 const char *rte_cryptodev_driver_name_get(uint8_t driver_id);
1207 
1208 /**
1209  * Store user data in a session.
1210  *
1211  * @param	sess		Session pointer allocated by
1212  *				*rte_cryptodev_sym_session_create*.
1213  * @param	data		Pointer to the user data.
1214  * @param	size		Size of the user data.
1215  *
1216  * @return
1217  *  - On success, zero.
1218  *  - On failure, a negative value.
1219  */
1220 int
1221 rte_cryptodev_sym_session_set_user_data(void *sess,
1222 					void *data,
1223 					uint16_t size);
1224 
1225 #define CRYPTO_SESS_OPAQUE_DATA_OFF 0
1226 /**
1227  * Get opaque data from session handle
1228  */
1229 static inline uint64_t
1230 rte_cryptodev_sym_session_opaque_data_get(void *sess)
1231 {
1232 	return *((uint64_t *)sess + CRYPTO_SESS_OPAQUE_DATA_OFF);
1233 }
1234 
1235 /**
1236  * Set opaque data in session handle
1237  */
1238 static inline void
1239 rte_cryptodev_sym_session_opaque_data_set(void *sess, uint64_t opaque)
1240 {
1241 	uint64_t *data;
1242 	data = (((uint64_t *)sess) + CRYPTO_SESS_OPAQUE_DATA_OFF);
1243 	*data = opaque;
1244 }
1245 
1246 /**
1247  * Get user data stored in a session.
1248  *
1249  * @param	sess		Session pointer allocated by
1250  *				*rte_cryptodev_sym_session_create*.
1251  *
1252  * @return
1253  *  - On success return pointer to user data.
1254  *  - On failure returns NULL.
1255  */
1256 void *
1257 rte_cryptodev_sym_session_get_user_data(void *sess);
1258 
1259 /**
1260  * Store user data in an asymmetric session.
1261  *
1262  * @param	sess		Session pointer allocated by
1263  *				*rte_cryptodev_asym_session_create*.
1264  * @param	data		Pointer to the user data.
1265  * @param	size		Size of the user data.
1266  *
1267  * @return
1268  *  - On success, zero.
1269  *  - -EINVAL if the session pointer is invalid.
1270  *  - -ENOMEM if the available user data size is smaller than the size parameter.
1271  */
1272 int
1273 rte_cryptodev_asym_session_set_user_data(void *sess, void *data, uint16_t size);
1274 
1275 /**
1276  * Get user data stored in an asymmetric session.
1277  *
1278  * @param	sess		Session pointer allocated by
1279  *				*rte_cryptodev_asym_session_create*.
1280  *
1281  * @return
1282  *  - On success return pointer to user data.
1283  *  - On failure returns NULL.
1284  */
1285 void *
1286 rte_cryptodev_asym_session_get_user_data(void *sess);
1287 
1288 /**
1289  * Perform actual crypto processing (encrypt/digest or auth/decrypt)
1290  * on user provided data.
1291  *
1292  * @param	dev_id	The device identifier.
1293  * @param	sess	Cryptodev session structure
1294  * @param	ofs	Start and stop offsets for auth and cipher operations
1295  * @param	vec	Vectorized operation descriptor
1296  *
1297  * @return
1298  *  - Returns number of successfully processed packets.
1299  */
1300 uint32_t
1301 rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id,
1302 	void *sess, union rte_crypto_sym_ofs ofs,
1303 	struct rte_crypto_sym_vec *vec);
1304 
1305 /**
1306  * Get the size of the raw data-path context buffer.
1307  *
1308  * @param	dev_id		The device identifier.
1309  *
1310  * @return
1311  *   - If the device supports raw data-path APIs, return the context size.
1312  *   - If the device does not support the APIs, return -1.
1313  */
1314 int
1315 rte_cryptodev_get_raw_dp_ctx_size(uint8_t dev_id);
1316 
1317 /**
1318  * Set session event meta data
1319  *
1320  * @param	dev_id		The device identifier.
1321  * @param	sess            Crypto or security session.
1322  * @param	op_type         Operation type.
1323  * @param	sess_type       Session type.
1324  * @param	ev_mdata	Pointer to the event crypto meta data
1325  *				(aka *union rte_event_crypto_metadata*)
1326  * @param	size            Size of ev_mdata.
1327  *
1328  * @return
1329  *  - On success, zero.
1330  *  - On failure, a negative value.
1331  */
1332 int
1333 rte_cryptodev_session_event_mdata_set(uint8_t dev_id, void *sess,
1334 	enum rte_crypto_op_type op_type,
1335 	enum rte_crypto_op_sess_type sess_type,
1336 	void *ev_mdata, uint16_t size);
1337 
1338 /**
1339  * Union of different crypto session types, including session-less xform
1340  * pointer.
1341  */
1342 union rte_cryptodev_session_ctx {void *crypto_sess;
1343 	struct rte_crypto_sym_xform *xform;
1344 	struct rte_security_session *sec_sess;
1345 };
1346 
1347 /**
1348  * Enqueue a vectorized operation descriptor into the device queue but the
1349  * driver may or may not start processing until rte_cryptodev_raw_enqueue_done()
1350  * is called.
1351  *
1352  * @param	qp		Driver specific queue pair data.
1353  * @param	drv_ctx		Driver specific context data.
1354  * @param	vec		Vectorized operation descriptor.
1355  * @param	ofs		Start and stop offsets for auth and cipher
1356  *				operations.
1357  * @param	user_data	The array of user data for dequeue later.
1358  * @param	enqueue_status	Driver written value to specify the
1359  *				enqueue status. Possible values:
1360  *				- 1: The number of operations returned are
1361  *				     enqueued successfully.
1362  *				- 0: The number of operations returned are
1363  *				     cached into the queue but are not processed
1364  *				     until rte_cryptodev_raw_enqueue_done() is
1365  *				     called.
1366  *				- negative integer: Error occurred.
1367  * @return
1368  *   - The number of operations in the descriptor successfully enqueued or
1369  *     cached into the queue but not enqueued yet, depends on the
1370  *     "enqueue_status" value.
1371  */
1372 typedef uint32_t (*cryptodev_sym_raw_enqueue_burst_t)(
1373 	void *qp, uint8_t *drv_ctx, struct rte_crypto_sym_vec *vec,
1374 	union rte_crypto_sym_ofs ofs, void *user_data[], int *enqueue_status);
1375 
1376 /**
1377  * Enqueue single raw data vector into the device queue but the driver may or
1378  * may not start processing until rte_cryptodev_raw_enqueue_done() is called.
1379  *
1380  * @param	qp		Driver specific queue pair data.
1381  * @param	drv_ctx		Driver specific context data.
1382  * @param	data_vec	The buffer data vector.
1383  * @param	n_data_vecs	Number of buffer data vectors.
1384  * @param	ofs		Start and stop offsets for auth and cipher
1385  *				operations.
1386  * @param	iv		IV virtual and IOVA addresses
1387  * @param	digest		digest virtual and IOVA addresses
1388  * @param	aad_or_auth_iv	AAD or auth IV virtual and IOVA addresses,
1389  *				depends on the algorithm used.
1390  * @param	user_data	The user data.
1391  * @return
1392  *   - 1: The data vector is enqueued successfully.
1393  *   - 0: The data vector is cached into the queue but is not processed
1394  *        until rte_cryptodev_raw_enqueue_done() is called.
1395  *   - negative integer: failure.
1396  */
1397 typedef int (*cryptodev_sym_raw_enqueue_t)(
1398 	void *qp, uint8_t *drv_ctx, struct rte_crypto_vec *data_vec,
1399 	uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs,
1400 	struct rte_crypto_va_iova_ptr *iv,
1401 	struct rte_crypto_va_iova_ptr *digest,
1402 	struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
1403 	void *user_data);
1404 
1405 /**
1406  * Inform the cryptodev queue pair to start processing or finish dequeuing all
1407  * enqueued/dequeued operations.
1408  *
1409  * @param	qp		Driver specific queue pair data.
1410  * @param	drv_ctx		Driver specific context data.
1411  * @param	n		The total number of processed operations.
1412  * @return
1413  *   - On success return 0.
1414  *   - On failure return negative integer.
1415  */
1416 typedef int (*cryptodev_sym_raw_operation_done_t)(void *qp, uint8_t *drv_ctx,
1417 	uint32_t n);
1418 
1419 /**
1420  * Typedef that the user provided for the driver to get the dequeue count.
1421  * The function may return a fixed number or the number parsed from the user
1422  * data stored in the first processed operation.
1423  *
1424  * @param	user_data	Dequeued user data.
1425  * @return
1426  *  - The number of operations to be dequeued.
1427  */
1428 typedef uint32_t (*rte_cryptodev_raw_get_dequeue_count_t)(void *user_data);
1429 
1430 /**
1431  * Typedef that the user provided to deal with post dequeue operation, such
1432  * as filling status.
1433  *
1434  * @param	user_data	Dequeued user data.
1435  * @param	index		Index number of the processed descriptor.
1436  * @param	is_op_success	Operation status provided by the driver.
1437  */
1438 typedef void (*rte_cryptodev_raw_post_dequeue_t)(void *user_data,
1439 	uint32_t index, uint8_t is_op_success);
1440 
1441 /**
1442  * Dequeue a burst of symmetric crypto processing.
1443  *
1444  * @param	qp			Driver specific queue pair data.
1445  * @param	drv_ctx			Driver specific context data.
1446  * @param	get_dequeue_count	User provided callback function to
1447  *					obtain dequeue operation count.
1448  * @param	max_nb_to_dequeue	When get_dequeue_count is NULL this
1449  *					value is used to pass the maximum
1450  *					number of operations to be dequeued.
1451  * @param	post_dequeue		User provided callback function to
1452  *					post-process a dequeued operation.
1453  * @param	out_user_data		User data pointer array to be retrieve
1454  *					from device queue. In case of
1455  *					*is_user_data_array* is set there
1456  *					should be enough room to store all
1457  *					user data.
1458  * @param	is_user_data_array	Set 1 if every dequeued user data will
1459  *					be written into out_user_data array.
1460  *					Set 0 if only the first user data will
1461  *					be written into out_user_data array.
1462  * @param	n_success		Driver written value to specific the
1463  *					total successful operations count.
1464  * @param	dequeue_status		Driver written value to specify the
1465  *					dequeue status. Possible values:
1466  *					- 1: Successfully dequeued the number
1467  *					     of operations returned. The user
1468  *					     data previously set during enqueue
1469  *					     is stored in the "out_user_data".
1470  *					- 0: The number of operations returned
1471  *					     are completed and the user data is
1472  *					     stored in the "out_user_data", but
1473  *					     they are not freed from the queue
1474  *					     until
1475  *					     rte_cryptodev_raw_dequeue_done()
1476  *					     is called.
1477  *					- negative integer: Error occurred.
1478  * @return
1479  *   - The number of operations dequeued or completed but not freed from the
1480  *     queue, depends on "dequeue_status" value.
1481  */
1482 typedef uint32_t (*cryptodev_sym_raw_dequeue_burst_t)(void *qp,
1483 	uint8_t *drv_ctx,
1484 	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
1485 	uint32_t max_nb_to_dequeue,
1486 	rte_cryptodev_raw_post_dequeue_t post_dequeue,
1487 	void **out_user_data, uint8_t is_user_data_array,
1488 	uint32_t *n_success, int *dequeue_status);
1489 
1490 /**
1491  * Dequeue a symmetric crypto processing.
1492  *
1493  * @param	qp			Driver specific queue pair data.
1494  * @param	drv_ctx			Driver specific context data.
1495  * @param	dequeue_status		Driver written value to specify the
1496  *					dequeue status. Possible values:
1497  *					- 1: Successfully dequeued a operation.
1498  *					     The user data is returned.
1499  *					- 0: The first operation in the queue
1500  *					     is completed and the user data
1501  *					     previously set during enqueue is
1502  *					     returned, but it is not freed from
1503  *					     the queue until
1504  *					     rte_cryptodev_raw_dequeue_done() is
1505  *					     called.
1506  *					- negative integer: Error occurred.
1507  * @param	op_status		Driver written value to specify
1508  *					operation status.
1509  * @return
1510  *   - The user data pointer retrieved from device queue or NULL if no
1511  *     operation is ready for dequeue.
1512  */
1513 typedef void * (*cryptodev_sym_raw_dequeue_t)(
1514 		void *qp, uint8_t *drv_ctx, int *dequeue_status,
1515 		enum rte_crypto_op_status *op_status);
1516 
1517 /**
1518  * Context data for raw data-path API crypto process. The buffer of this
1519  * structure is to be allocated by the user application with the size equal
1520  * or bigger than rte_cryptodev_get_raw_dp_ctx_size() returned value.
1521  */
1522 struct rte_crypto_raw_dp_ctx {
1523 	void *qp_data;
1524 
1525 	cryptodev_sym_raw_enqueue_t enqueue;
1526 	cryptodev_sym_raw_enqueue_burst_t enqueue_burst;
1527 	cryptodev_sym_raw_operation_done_t enqueue_done;
1528 	cryptodev_sym_raw_dequeue_t dequeue;
1529 	cryptodev_sym_raw_dequeue_burst_t dequeue_burst;
1530 	cryptodev_sym_raw_operation_done_t dequeue_done;
1531 
1532 	/* Driver specific context data */
1533 	__extension__ uint8_t drv_ctx_data[];
1534 };
1535 
1536 /**
1537  * Configure raw data-path context data.
1538  *
1539  * @param	dev_id		The device identifier.
1540  * @param	qp_id		The index of the queue pair from which to
1541  *				retrieve processed packets. The value must be
1542  *				in the range [0, nb_queue_pair - 1] previously
1543  *				supplied to rte_cryptodev_configure().
1544  * @param	ctx		The raw data-path context data.
1545  * @param	sess_type	Session type.
1546  * @param	session_ctx	Session context data.
1547  * @param	is_update	Set 0 if it is to initialize the ctx.
1548  *				Set 1 if ctx is initialized and only to update
1549  *				session context data.
1550  * @return
1551  *   - On success return 0.
1552  *   - On failure return negative integer.
1553  *     - -EINVAL if input parameters are invalid.
1554  *     - -ENOTSUP if crypto device does not support raw DP operations with the
1555  *        provided session.
1556  */
1557 int
1558 rte_cryptodev_configure_raw_dp_ctx(uint8_t dev_id, uint16_t qp_id,
1559 	struct rte_crypto_raw_dp_ctx *ctx,
1560 	enum rte_crypto_op_sess_type sess_type,
1561 	union rte_cryptodev_session_ctx session_ctx,
1562 	uint8_t is_update);
1563 
1564 /**
1565  * Enqueue a vectorized operation descriptor into the device queue but the
1566  * driver may or may not start processing until rte_cryptodev_raw_enqueue_done()
1567  * is called.
1568  *
1569  * @param	ctx		The initialized raw data-path context data.
1570  * @param	vec		Vectorized operation descriptor.
1571  * @param	ofs		Start and stop offsets for auth and cipher
1572  *				operations.
1573  * @param	user_data	The array of user data for dequeue later.
1574  * @param	enqueue_status	Driver written value to specify the
1575  *				enqueue status. Possible values:
1576  *				- 1: The number of operations returned are
1577  *				     enqueued successfully.
1578  *				- 0: The number of operations returned are
1579  *				     cached into the queue but are not processed
1580  *				     until rte_cryptodev_raw_enqueue_done() is
1581  *				     called.
1582  *				- negative integer: Error occurred.
1583  * @return
1584  *   - The number of operations in the descriptor successfully enqueued or
1585  *     cached into the queue but not enqueued yet, depends on the
1586  *     "enqueue_status" value.
1587  */
1588 uint32_t
1589 rte_cryptodev_raw_enqueue_burst(struct rte_crypto_raw_dp_ctx *ctx,
1590 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
1591 	void **user_data, int *enqueue_status);
1592 
1593 /**
1594  * Enqueue single raw data vector into the device queue but the driver may or
1595  * may not start processing until rte_cryptodev_raw_enqueue_done() is called.
1596  *
1597  * @param	ctx		The initialized raw data-path context data.
1598  * @param	data_vec	The buffer data vector.
1599  * @param	n_data_vecs	Number of buffer data vectors.
1600  * @param	ofs		Start and stop offsets for auth and cipher
1601  *				operations.
1602  * @param	iv		IV virtual and IOVA addresses
1603  * @param	digest		digest virtual and IOVA addresses
1604  * @param	aad_or_auth_iv	AAD or auth IV virtual and IOVA addresses,
1605  *				depends on the algorithm used.
1606  * @param	user_data	The user data.
1607  * @return
1608  *   - 1: The data vector is enqueued successfully.
1609  *   - 0: The data vector is cached into the queue but is not processed
1610  *        until rte_cryptodev_raw_enqueue_done() is called.
1611  *   - negative integer: failure.
1612  */
1613 __rte_experimental
1614 static __rte_always_inline int
1615 rte_cryptodev_raw_enqueue(struct rte_crypto_raw_dp_ctx *ctx,
1616 	struct rte_crypto_vec *data_vec, uint16_t n_data_vecs,
1617 	union rte_crypto_sym_ofs ofs,
1618 	struct rte_crypto_va_iova_ptr *iv,
1619 	struct rte_crypto_va_iova_ptr *digest,
1620 	struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
1621 	void *user_data)
1622 {
1623 	return (*ctx->enqueue)(ctx->qp_data, ctx->drv_ctx_data, data_vec,
1624 		n_data_vecs, ofs, iv, digest, aad_or_auth_iv, user_data);
1625 }
1626 
1627 /**
1628  * Start processing all enqueued operations from last
1629  * rte_cryptodev_configure_raw_dp_ctx() call.
1630  *
1631  * @param	ctx	The initialized raw data-path context data.
1632  * @param	n	The number of operations cached.
1633  * @return
1634  *   - On success return 0.
1635  *   - On failure return negative integer.
1636  */
1637 int
1638 rte_cryptodev_raw_enqueue_done(struct rte_crypto_raw_dp_ctx *ctx,
1639 		uint32_t n);
1640 
1641 /**
1642  * Dequeue a burst of symmetric crypto processing.
1643  *
1644  * @param	ctx			The initialized raw data-path context
1645  *					data.
1646  * @param	get_dequeue_count	User provided callback function to
1647  *					obtain dequeue operation count.
1648  * @param	max_nb_to_dequeue	When get_dequeue_count is NULL this
1649  *					value is used to pass the maximum
1650  *					number of operations to be dequeued.
1651  * @param	post_dequeue		User provided callback function to
1652  *					post-process a dequeued operation.
1653  * @param	out_user_data		User data pointer array to be retrieve
1654  *					from device queue. In case of
1655  *					*is_user_data_array* is set there
1656  *					should be enough room to store all
1657  *					user data.
1658  * @param	is_user_data_array	Set 1 if every dequeued user data will
1659  *					be written into out_user_data array.
1660  *					Set 0 if only the first user data will
1661  *					be written into out_user_data array.
1662  * @param	n_success		Driver written value to specific the
1663  *					total successful operations count.
1664  * @param	dequeue_status		Driver written value to specify the
1665  *					dequeue status. Possible values:
1666  *					- 1: Successfully dequeued the number
1667  *					     of operations returned. The user
1668  *					     data previously set during enqueue
1669  *					     is stored in the "out_user_data".
1670  *					- 0: The number of operations returned
1671  *					     are completed and the user data is
1672  *					     stored in the "out_user_data", but
1673  *					     they are not freed from the queue
1674  *					     until
1675  *					     rte_cryptodev_raw_dequeue_done()
1676  *					     is called.
1677  *					- negative integer: Error occurred.
1678  * @return
1679  *   - The number of operations dequeued or completed but not freed from the
1680  *     queue, depends on "dequeue_status" value.
1681  */
1682 uint32_t
1683 rte_cryptodev_raw_dequeue_burst(struct rte_crypto_raw_dp_ctx *ctx,
1684 	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
1685 	uint32_t max_nb_to_dequeue,
1686 	rte_cryptodev_raw_post_dequeue_t post_dequeue,
1687 	void **out_user_data, uint8_t is_user_data_array,
1688 	uint32_t *n_success, int *dequeue_status);
1689 
1690 /**
1691  * Dequeue a symmetric crypto processing.
1692  *
1693  * @param	ctx			The initialized raw data-path context
1694  *					data.
1695  * @param	dequeue_status		Driver written value to specify the
1696  *					dequeue status. Possible values:
1697  *					- 1: Successfully dequeued a operation.
1698  *					     The user data is returned.
1699  *					- 0: The first operation in the queue
1700  *					     is completed and the user data
1701  *					     previously set during enqueue is
1702  *					     returned, but it is not freed from
1703  *					     the queue until
1704  *					     rte_cryptodev_raw_dequeue_done() is
1705  *					     called.
1706  *					- negative integer: Error occurred.
1707  * @param	op_status		Driver written value to specify
1708  *					operation status.
1709  * @return
1710  *   - The user data pointer retrieved from device queue or NULL if no
1711  *     operation is ready for dequeue.
1712  */
1713 __rte_experimental
1714 static __rte_always_inline void *
1715 rte_cryptodev_raw_dequeue(struct rte_crypto_raw_dp_ctx *ctx,
1716 		int *dequeue_status, enum rte_crypto_op_status *op_status)
1717 {
1718 	return (*ctx->dequeue)(ctx->qp_data, ctx->drv_ctx_data, dequeue_status,
1719 			op_status);
1720 }
1721 
1722 /**
1723  * Inform the queue pair dequeue operations is finished.
1724  *
1725  * @param	ctx	The initialized raw data-path context data.
1726  * @param	n	The number of operations.
1727  * @return
1728  *   - On success return 0.
1729  *   - On failure return negative integer.
1730  */
1731 int
1732 rte_cryptodev_raw_dequeue_done(struct rte_crypto_raw_dp_ctx *ctx,
1733 		uint32_t n);
1734 
1735 /**
1736  * Add a user callback for a given crypto device and queue pair which will be
1737  * called on crypto ops enqueue.
1738  *
1739  * This API configures a function to be called for each burst of crypto ops
1740  * received on a given crypto device queue pair. The return value is a pointer
1741  * that can be used later to remove the callback using
1742  * rte_cryptodev_remove_enq_callback().
1743  *
1744  * Callbacks registered by application would not survive
1745  * rte_cryptodev_configure() as it reinitializes the callback list.
1746  * It is user responsibility to remove all installed callbacks before
1747  * calling rte_cryptodev_configure() to avoid possible memory leakage.
1748  * Application is expected to call add API after rte_cryptodev_configure().
1749  *
1750  * Multiple functions can be registered per queue pair & they are called
1751  * in the order they were added. The API does not restrict on maximum number
1752  * of callbacks.
1753  *
1754  * @param	dev_id		The identifier of the device.
1755  * @param	qp_id		The index of the queue pair on which ops are
1756  *				to be enqueued for processing. The value
1757  *				must be in the range [0, nb_queue_pairs - 1]
1758  *				previously supplied to
1759  *				*rte_cryptodev_configure*.
1760  * @param	cb_fn		The callback function
1761  * @param	cb_arg		A generic pointer parameter which will be passed
1762  *				to each invocation of the callback function on
1763  *				this crypto device and queue pair.
1764  *
1765  * @return
1766  *  - NULL on error & rte_errno will contain the error code.
1767  *  - On success, a pointer value which can later be used to remove the
1768  *    callback.
1769  */
1770 struct rte_cryptodev_cb *
1771 rte_cryptodev_add_enq_callback(uint8_t dev_id,
1772 			       uint16_t qp_id,
1773 			       rte_cryptodev_callback_fn cb_fn,
1774 			       void *cb_arg);
1775 
1776 /**
1777  * Remove a user callback function for given crypto device and queue pair.
1778  *
1779  * This function is used to remove enqueue callbacks that were added to a
1780  * crypto device queue pair using rte_cryptodev_add_enq_callback().
1781  *
1782  *
1783  *
1784  * @param	dev_id		The identifier of the device.
1785  * @param	qp_id		The index of the queue pair on which ops are
1786  *				to be enqueued. The value must be in the
1787  *				range [0, nb_queue_pairs - 1] previously
1788  *				supplied to *rte_cryptodev_configure*.
1789  * @param	cb		Pointer to user supplied callback created via
1790  *				rte_cryptodev_add_enq_callback().
1791  *
1792  * @return
1793  *   -  0: Success. Callback was removed.
1794  *   - <0: The dev_id or the qp_id is out of range, or the callback
1795  *         is NULL or not found for the crypto device queue pair.
1796  */
1797 int rte_cryptodev_remove_enq_callback(uint8_t dev_id,
1798 				      uint16_t qp_id,
1799 				      struct rte_cryptodev_cb *cb);
1800 
1801 /**
1802  * Add a user callback for a given crypto device and queue pair which will be
1803  * called on crypto ops dequeue.
1804  *
1805  * This API configures a function to be called for each burst of crypto ops
1806  * received on a given crypto device queue pair. The return value is a pointer
1807  * that can be used later to remove the callback using
1808  * rte_cryptodev_remove_deq_callback().
1809  *
1810  * Callbacks registered by application would not survive
1811  * rte_cryptodev_configure() as it reinitializes the callback list.
1812  * It is user responsibility to remove all installed callbacks before
1813  * calling rte_cryptodev_configure() to avoid possible memory leakage.
1814  * Application is expected to call add API after rte_cryptodev_configure().
1815  *
1816  * Multiple functions can be registered per queue pair & they are called
1817  * in the order they were added. The API does not restrict on maximum number
1818  * of callbacks.
1819  *
1820  * @param	dev_id		The identifier of the device.
1821  * @param	qp_id		The index of the queue pair on which ops are
1822  *				to be dequeued. The value must be in the
1823  *				range [0, nb_queue_pairs - 1] previously
1824  *				supplied to *rte_cryptodev_configure*.
1825  * @param	cb_fn		The callback function
1826  * @param	cb_arg		A generic pointer parameter which will be passed
1827  *				to each invocation of the callback function on
1828  *				this crypto device and queue pair.
1829  *
1830  * @return
1831  *   - NULL on error & rte_errno will contain the error code.
1832  *   - On success, a pointer value which can later be used to remove the
1833  *     callback.
1834  */
1835 struct rte_cryptodev_cb *
1836 rte_cryptodev_add_deq_callback(uint8_t dev_id,
1837 			       uint16_t qp_id,
1838 			       rte_cryptodev_callback_fn cb_fn,
1839 			       void *cb_arg);
1840 
1841 /**
1842  * Remove a user callback function for given crypto device and queue pair.
1843  *
1844  * This function is used to remove dequeue callbacks that were added to a
1845  * crypto device queue pair using rte_cryptodev_add_deq_callback().
1846  *
1847  *
1848  *
1849  * @param	dev_id		The identifier of the device.
1850  * @param	qp_id		The index of the queue pair on which ops are
1851  *				to be dequeued. The value must be in the
1852  *				range [0, nb_queue_pairs - 1] previously
1853  *				supplied to *rte_cryptodev_configure*.
1854  * @param	cb		Pointer to user supplied callback created via
1855  *				rte_cryptodev_add_deq_callback().
1856  *
1857  * @return
1858  *   -  0: Success. Callback was removed.
1859  *   - <0: The dev_id or the qp_id is out of range, or the callback
1860  *         is NULL or not found for the crypto device queue pair.
1861  */
1862 int rte_cryptodev_remove_deq_callback(uint8_t dev_id,
1863 				      uint16_t qp_id,
1864 				      struct rte_cryptodev_cb *cb);
1865 
1866 #include <rte_cryptodev_core.h>
1867 /**
1868  *
1869  * Dequeue a burst of processed crypto operations from a queue on the crypto
1870  * device. The dequeued operation are stored in *rte_crypto_op* structures
1871  * whose pointers are supplied in the *ops* array.
1872  *
1873  * The rte_cryptodev_dequeue_burst() function returns the number of ops
1874  * actually dequeued, which is the number of *rte_crypto_op* data structures
1875  * effectively supplied into the *ops* array.
1876  *
1877  * A return value equal to *nb_ops* indicates that the queue contained
1878  * at least *nb_ops* operations, and this is likely to signify that other
1879  * processed operations remain in the devices output queue. Applications
1880  * implementing a "retrieve as many processed operations as possible" policy
1881  * can check this specific case and keep invoking the
1882  * rte_cryptodev_dequeue_burst() function until a value less than
1883  * *nb_ops* is returned.
1884  *
1885  * The rte_cryptodev_dequeue_burst() function does not provide any error
1886  * notification to avoid the corresponding overhead.
1887  *
1888  * @param	dev_id		The symmetric crypto device identifier
1889  * @param	qp_id		The index of the queue pair from which to
1890  *				retrieve processed packets. The value must be
1891  *				in the range [0, nb_queue_pair - 1] previously
1892  *				supplied to rte_cryptodev_configure().
1893  * @param	ops		The address of an array of pointers to
1894  *				*rte_crypto_op* structures that must be
1895  *				large enough to store *nb_ops* pointers in it.
1896  * @param	nb_ops		The maximum number of operations to dequeue.
1897  *
1898  * @return
1899  *   - The number of operations actually dequeued, which is the number
1900  *   of pointers to *rte_crypto_op* structures effectively supplied to the
1901  *   *ops* array.
1902  */
1903 static inline uint16_t
1904 rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
1905 		struct rte_crypto_op **ops, uint16_t nb_ops)
1906 {
1907 	const struct rte_crypto_fp_ops *fp_ops;
1908 	void *qp;
1909 
1910 	rte_cryptodev_trace_dequeue_burst(dev_id, qp_id, (void **)ops, nb_ops);
1911 
1912 	fp_ops = &rte_crypto_fp_ops[dev_id];
1913 	qp = fp_ops->qp.data[qp_id];
1914 
1915 	nb_ops = fp_ops->dequeue_burst(qp, ops, nb_ops);
1916 
1917 #ifdef RTE_CRYPTO_CALLBACKS
1918 	if (unlikely(fp_ops->qp.deq_cb != NULL)) {
1919 		struct rte_cryptodev_cb_rcu *list;
1920 		struct rte_cryptodev_cb *cb;
1921 
1922 		/* rte_memory_order_release memory order was used when the
1923 		 * call back was inserted into the list.
1924 		 * Since there is a clear dependency between loading
1925 		 * cb and cb->fn/cb->next, rte_memory_order_acquire memory order is
1926 		 * not required.
1927 		 */
1928 		list = &fp_ops->qp.deq_cb[qp_id];
1929 		rte_rcu_qsbr_thread_online(list->qsbr, 0);
1930 		cb = rte_atomic_load_explicit(&list->next, rte_memory_order_relaxed);
1931 
1932 		while (cb != NULL) {
1933 			nb_ops = cb->fn(dev_id, qp_id, ops, nb_ops,
1934 					cb->arg);
1935 			cb = cb->next;
1936 		};
1937 
1938 		rte_rcu_qsbr_thread_offline(list->qsbr, 0);
1939 	}
1940 #endif
1941 	return nb_ops;
1942 }
1943 
1944 /**
1945  * Enqueue a burst of operations for processing on a crypto device.
1946  *
1947  * The rte_cryptodev_enqueue_burst() function is invoked to place
1948  * crypto operations on the queue *qp_id* of the device designated by
1949  * its *dev_id*.
1950  *
1951  * The *nb_ops* parameter is the number of operations to process which are
1952  * supplied in the *ops* array of *rte_crypto_op* structures.
1953  *
1954  * The rte_cryptodev_enqueue_burst() function returns the number of
1955  * operations it actually enqueued for processing. A return value equal to
1956  * *nb_ops* means that all packets have been enqueued.
1957  *
1958  * @param	dev_id		The identifier of the device.
1959  * @param	qp_id		The index of the queue pair which packets are
1960  *				to be enqueued for processing. The value
1961  *				must be in the range [0, nb_queue_pairs - 1]
1962  *				previously supplied to
1963  *				 *rte_cryptodev_configure*.
1964  * @param	ops		The address of an array of *nb_ops* pointers
1965  *				to *rte_crypto_op* structures which contain
1966  *				the crypto operations to be processed.
1967  * @param	nb_ops		The number of operations to process.
1968  *
1969  * @return
1970  * The number of operations actually enqueued on the crypto device. The return
1971  * value can be less than the value of the *nb_ops* parameter when the
1972  * crypto devices queue is full or if invalid parameters are specified in
1973  * a *rte_crypto_op*.
1974  */
1975 static inline uint16_t
1976 rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
1977 		struct rte_crypto_op **ops, uint16_t nb_ops)
1978 {
1979 	const struct rte_crypto_fp_ops *fp_ops;
1980 	void *qp;
1981 
1982 	fp_ops = &rte_crypto_fp_ops[dev_id];
1983 	qp = fp_ops->qp.data[qp_id];
1984 #ifdef RTE_CRYPTO_CALLBACKS
1985 	if (unlikely(fp_ops->qp.enq_cb != NULL)) {
1986 		struct rte_cryptodev_cb_rcu *list;
1987 		struct rte_cryptodev_cb *cb;
1988 
1989 		/* rte_memory_order_release memory order was used when the
1990 		 * call back was inserted into the list.
1991 		 * Since there is a clear dependency between loading
1992 		 * cb and cb->fn/cb->next, rte_memory_order_acquire memory order is
1993 		 * not required.
1994 		 */
1995 		list = &fp_ops->qp.enq_cb[qp_id];
1996 		rte_rcu_qsbr_thread_online(list->qsbr, 0);
1997 		cb = rte_atomic_load_explicit(&list->next, rte_memory_order_relaxed);
1998 
1999 		while (cb != NULL) {
2000 			nb_ops = cb->fn(dev_id, qp_id, ops, nb_ops,
2001 					cb->arg);
2002 			cb = cb->next;
2003 		};
2004 
2005 		rte_rcu_qsbr_thread_offline(list->qsbr, 0);
2006 	}
2007 #endif
2008 
2009 	rte_cryptodev_trace_enqueue_burst(dev_id, qp_id, (void **)ops, nb_ops);
2010 	return fp_ops->enqueue_burst(qp, ops, nb_ops);
2011 }
2012 
2013 
2014 
2015 #ifdef __cplusplus
2016 }
2017 #endif
2018 
2019 #endif /* _RTE_CRYPTODEV_H_ */
2020