xref: /dpdk/lib/cryptodev/rte_cryptodev.h (revision e9fd1ebf981f361844aea9ec94e17f4bda5e1479)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020 Intel Corporation.
3  */
4 
5 #ifndef _RTE_CRYPTODEV_H_
6 #define _RTE_CRYPTODEV_H_
7 
8 /**
9  * @file rte_cryptodev.h
10  *
11  * RTE Cryptographic Device APIs
12  *
13  * Defines RTE Crypto Device APIs for the provisioning of cipher and
14  * authentication operations.
15  */
16 
17 #ifdef __cplusplus
18 extern "C" {
19 #endif
20 
21 #include <rte_compat.h>
22 #include "rte_kvargs.h"
23 #include "rte_crypto.h"
24 #include <rte_common.h>
25 #include <rte_rcu_qsbr.h>
26 
27 #include "rte_cryptodev_trace_fp.h"
28 
29 /**
30  * @internal Logtype used for cryptodev related messages.
31  */
32 extern int rte_cryptodev_logtype;
33 #define RTE_LOGTYPE_CRYPTODEV rte_cryptodev_logtype
34 
35 /* Logging Macros */
36 #define CDEV_LOG_ERR(...) \
37 	RTE_LOG_LINE_PREFIX(ERR, CRYPTODEV, \
38 		"%s() line %u: ", __func__ RTE_LOG_COMMA __LINE__, __VA_ARGS__)
39 
40 #define CDEV_LOG_INFO(...) \
41 	RTE_LOG_LINE(INFO, CRYPTODEV, "" __VA_ARGS__)
42 
43 #define CDEV_LOG_DEBUG(...) \
44 	RTE_LOG_LINE_PREFIX(DEBUG, CRYPTODEV, \
45 		"%s() line %u: ", __func__ RTE_LOG_COMMA __LINE__, __VA_ARGS__)
46 
47 #define CDEV_PMD_TRACE(...) \
48 	RTE_LOG_LINE_PREFIX(DEBUG, CRYPTODEV, \
49 		"[%s] %s: ", dev RTE_LOG_COMMA __func__, __VA_ARGS__)
50 
51 /**
52  * A macro that points to an offset from the start
53  * of the crypto operation structure (rte_crypto_op)
54  *
55  * The returned pointer is cast to type t.
56  *
57  * @param c
58  *   The crypto operation.
59  * @param o
60  *   The offset from the start of the crypto operation.
61  * @param t
62  *   The type to cast the result into.
63  */
64 #define rte_crypto_op_ctod_offset(c, t, o)	\
65 	((t)((char *)(c) + (o)))
66 
67 /**
68  * A macro that returns the physical address that points
69  * to an offset from the start of the crypto operation
70  * (rte_crypto_op)
71  *
72  * @param c
73  *   The crypto operation.
74  * @param o
75  *   The offset from the start of the crypto operation
76  *   to calculate address from.
77  */
78 #define rte_crypto_op_ctophys_offset(c, o)	\
79 	(rte_iova_t)((c)->phys_addr + (o))
80 
81 /**
82  * Crypto parameters range description
83  */
84 struct rte_crypto_param_range {
85 	uint16_t min;	/**< minimum size */
86 	uint16_t max;	/**< maximum size */
87 	uint16_t increment;
88 	/**< if a range of sizes are supported,
89 	 * this parameter is used to indicate
90 	 * increments in byte size that are supported
91 	 * between the minimum and maximum
92 	 */
93 };
94 
95 /**
96  * Data-unit supported lengths of cipher algorithms.
97  * A bit can represent any set of data-unit sizes
98  * (single size, multiple size, range, etc).
99  */
100 #define RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_512_BYTES             RTE_BIT32(0)
101 #define RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_4096_BYTES            RTE_BIT32(1)
102 #define RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_1_MEGABYTES           RTE_BIT32(2)
103 
104 /**
105  * Symmetric Crypto Capability
106  */
107 struct rte_cryptodev_symmetric_capability {
108 	enum rte_crypto_sym_xform_type xform_type;
109 	/**< Transform type : Authentication / Cipher / AEAD */
110 	union {
111 		struct {
112 			enum rte_crypto_auth_algorithm algo;
113 			/**< authentication algorithm */
114 			uint16_t block_size;
115 			/**< algorithm block size */
116 			struct rte_crypto_param_range key_size;
117 			/**< auth key size range */
118 			struct rte_crypto_param_range digest_size;
119 			/**< digest size range */
120 			struct rte_crypto_param_range aad_size;
121 			/**< Additional authentication data size range */
122 			struct rte_crypto_param_range iv_size;
123 			/**< Initialisation vector data size range */
124 		} auth;
125 		/**< Symmetric Authentication transform capabilities */
126 		struct {
127 			enum rte_crypto_cipher_algorithm algo;
128 			/**< cipher algorithm */
129 			uint16_t block_size;
130 			/**< algorithm block size */
131 			struct rte_crypto_param_range key_size;
132 			/**< cipher key size range */
133 			struct rte_crypto_param_range iv_size;
134 			/**< Initialisation vector data size range */
135 			uint32_t dataunit_set;
136 			/**<
137 			 * Supported data-unit lengths:
138 			 * RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_* bits
139 			 * or 0 for lengths defined in the algorithm standard.
140 			 */
141 		} cipher;
142 		/**< Symmetric Cipher transform capabilities */
143 		struct {
144 			enum rte_crypto_aead_algorithm algo;
145 			/**< AEAD algorithm */
146 			uint16_t block_size;
147 			/**< algorithm block size */
148 			struct rte_crypto_param_range key_size;
149 			/**< AEAD key size range */
150 			struct rte_crypto_param_range digest_size;
151 			/**< digest size range */
152 			struct rte_crypto_param_range aad_size;
153 			/**< Additional authentication data size range */
154 			struct rte_crypto_param_range iv_size;
155 			/**< Initialisation vector data size range */
156 		} aead;
157 	};
158 };
159 
160 /**
161  * Asymmetric Xform Crypto Capability
162  */
163 struct rte_cryptodev_asymmetric_xform_capability {
164 	enum rte_crypto_asym_xform_type xform_type;
165 	/**< Transform type: RSA/MODEXP/DH/DSA/MODINV */
166 
167 	uint32_t op_types;
168 	/**<
169 	 * Bitmask for supported rte_crypto_asym_op_type or
170 	 * rte_crypto_asym_ke_type. Which enum is used is determined
171 	 * by the rte_crypto_asym_xform_type. For key exchange algorithms
172 	 * like Diffie-Hellman it is rte_crypto_asym_ke_type, for others
173 	 * it is rte_crypto_asym_op_type.
174 	 */
175 
176 	__extension__
177 	union {
178 		struct rte_crypto_param_range modlen;
179 		/**< Range of modulus length supported by modulus based xform.
180 		 * Value 0 mean implementation default
181 		 */
182 
183 		uint8_t internal_rng;
184 		/**< Availability of random number generator for Elliptic curve based xform.
185 		 * Value 0 means unavailable, and application should pass the required
186 		 * random value. Otherwise, PMD would internally compute the random number.
187 		 */
188 	};
189 
190 	uint64_t hash_algos;
191 	/**< Bitmask of hash algorithms supported for op_type. */
192 };
193 
194 /**
195  * Asymmetric Crypto Capability
196  */
197 struct rte_cryptodev_asymmetric_capability {
198 	struct rte_cryptodev_asymmetric_xform_capability xform_capa;
199 };
200 
201 
202 /** Structure used to capture a capability of a crypto device */
203 struct rte_cryptodev_capabilities {
204 	enum rte_crypto_op_type op;
205 	/**< Operation type */
206 
207 	union {
208 		struct rte_cryptodev_symmetric_capability sym;
209 		/**< Symmetric operation capability parameters */
210 		struct rte_cryptodev_asymmetric_capability asym;
211 		/**< Asymmetric operation capability parameters */
212 	};
213 };
214 
215 /** Structure used to describe crypto algorithms */
216 struct rte_cryptodev_sym_capability_idx {
217 	enum rte_crypto_sym_xform_type type;
218 	union {
219 		enum rte_crypto_cipher_algorithm cipher;
220 		enum rte_crypto_auth_algorithm auth;
221 		enum rte_crypto_aead_algorithm aead;
222 	} algo;
223 };
224 
225 /**
226  * Structure used to describe asymmetric crypto xforms
227  * Each xform maps to one asym algorithm.
228  */
229 struct rte_cryptodev_asym_capability_idx {
230 	enum rte_crypto_asym_xform_type type;
231 	/**< Asymmetric xform (algo) type */
232 };
233 
234 /**
235  * Provide capabilities available for defined device and algorithm
236  *
237  * @param	dev_id		The identifier of the device.
238  * @param	idx		Description of crypto algorithms.
239  *
240  * @return
241  *   - Return description of the symmetric crypto capability if exist.
242  *   - Return NULL if the capability not exist.
243  */
244 const struct rte_cryptodev_symmetric_capability *
245 rte_cryptodev_sym_capability_get(uint8_t dev_id,
246 		const struct rte_cryptodev_sym_capability_idx *idx);
247 
248 /**
249  *  Provide capabilities available for defined device and xform
250  *
251  * @param	dev_id		The identifier of the device.
252  * @param	idx		Description of asym crypto xform.
253  *
254  * @return
255  *   - Return description of the asymmetric crypto capability if exist.
256  *   - Return NULL if the capability not exist.
257  */
258 const struct rte_cryptodev_asymmetric_xform_capability *
259 rte_cryptodev_asym_capability_get(uint8_t dev_id,
260 		const struct rte_cryptodev_asym_capability_idx *idx);
261 
262 /**
263  * Check if key size and initial vector are supported
264  * in crypto cipher capability
265  *
266  * @param	capability	Description of the symmetric crypto capability.
267  * @param	key_size	Cipher key size.
268  * @param	iv_size		Cipher initial vector size.
269  *
270  * @return
271  *   - Return 0 if the parameters are in range of the capability.
272  *   - Return -1 if the parameters are out of range of the capability.
273  */
274 int
275 rte_cryptodev_sym_capability_check_cipher(
276 		const struct rte_cryptodev_symmetric_capability *capability,
277 		uint16_t key_size, uint16_t iv_size);
278 
279 /**
280  * Check if key size and initial vector are supported
281  * in crypto auth capability
282  *
283  * @param	capability	Description of the symmetric crypto capability.
284  * @param	key_size	Auth key size.
285  * @param	digest_size	Auth digest size.
286  * @param	iv_size		Auth initial vector size.
287  *
288  * @return
289  *   - Return 0 if the parameters are in range of the capability.
290  *   - Return -1 if the parameters are out of range of the capability.
291  */
292 int
293 rte_cryptodev_sym_capability_check_auth(
294 		const struct rte_cryptodev_symmetric_capability *capability,
295 		uint16_t key_size, uint16_t digest_size, uint16_t iv_size);
296 
297 /**
298  * Check if key, digest, AAD and initial vector sizes are supported
299  * in crypto AEAD capability
300  *
301  * @param	capability	Description of the symmetric crypto capability.
302  * @param	key_size	AEAD key size.
303  * @param	digest_size	AEAD digest size.
304  * @param	aad_size	AEAD AAD size.
305  * @param	iv_size		AEAD IV size.
306  *
307  * @return
308  *   - Return 0 if the parameters are in range of the capability.
309  *   - Return -1 if the parameters are out of range of the capability.
310  */
311 int
312 rte_cryptodev_sym_capability_check_aead(
313 		const struct rte_cryptodev_symmetric_capability *capability,
314 		uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
315 		uint16_t iv_size);
316 
317 /**
318  * Check if op type is supported
319  *
320  * @param	capability	Description of the asymmetric crypto capability.
321  * @param	op_type		op type
322  *
323  * @return
324  *   - Return 1 if the op type is supported
325  *   - Return 0 if unsupported
326  */
327 int
328 rte_cryptodev_asym_xform_capability_check_optype(
329 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
330 		enum rte_crypto_asym_op_type op_type);
331 
332 /**
333  * Check if modulus length is in supported range
334  *
335  * @param	capability	Description of the asymmetric crypto capability.
336  * @param	modlen		modulus length.
337  *
338  * @return
339  *   - Return 0 if the parameters are in range of the capability.
340  *   - Return -1 if the parameters are out of range of the capability.
341  */
342 int
343 rte_cryptodev_asym_xform_capability_check_modlen(
344 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
345 		uint16_t modlen);
346 
347 /**
348  * Check if hash algorithm is supported.
349  *
350  * @param	capability	Asymmetric crypto capability.
351  * @param	hash		Hash algorithm.
352  *
353  * @return
354  *   - Return true if the hash algorithm is supported.
355  *   - Return false if the hash algorithm is not supported.
356  */
357 bool
358 rte_cryptodev_asym_xform_capability_check_hash(
359 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
360 	enum rte_crypto_auth_algorithm hash);
361 
362 /**
363  * Provide the cipher algorithm enum, given an algorithm string
364  *
365  * @param	algo_enum	A pointer to the cipher algorithm
366  *				enum to be filled
367  * @param	algo_string	Authentication algo string
368  *
369  * @return
370  * - Return -1 if string is not valid
371  * - Return 0 is the string is valid
372  */
373 int
374 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum,
375 		const char *algo_string);
376 
377 /**
378  * Provide the authentication algorithm enum, given an algorithm string
379  *
380  * @param	algo_enum	A pointer to the authentication algorithm
381  *				enum to be filled
382  * @param	algo_string	Authentication algo string
383  *
384  * @return
385  * - Return -1 if string is not valid
386  * - Return 0 is the string is valid
387  */
388 int
389 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum,
390 		const char *algo_string);
391 
392 /**
393  * Provide the AEAD algorithm enum, given an algorithm string
394  *
395  * @param	algo_enum	A pointer to the AEAD algorithm
396  *				enum to be filled
397  * @param	algo_string	AEAD algorithm string
398  *
399  * @return
400  * - Return -1 if string is not valid
401  * - Return 0 is the string is valid
402  */
403 int
404 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum,
405 		const char *algo_string);
406 
407 /**
408  * Provide the Asymmetric xform enum, given an xform string
409  *
410  * @param	xform_enum	A pointer to the xform type
411  *				enum to be filled
412  * @param	xform_string	xform string
413  *
414  * @return
415  * - Return -1 if string is not valid
416  * - Return 0 if the string is valid
417  */
418 int
419 rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum,
420 		const char *xform_string);
421 
422 /**
423  * Provide the cipher algorithm string, given an algorithm enum.
424  *
425  * @param	algo_enum	cipher algorithm enum
426  *
427  * @return
428  * - Return NULL if enum is not valid
429  * - Return algo_string corresponding to enum
430  */
431 __rte_experimental
432 const char *
433 rte_cryptodev_get_cipher_algo_string(enum rte_crypto_cipher_algorithm algo_enum);
434 
435 /**
436  * Provide the authentication algorithm string, given an algorithm enum.
437  *
438  * @param	algo_enum	auth algorithm enum
439  *
440  * @return
441  * - Return NULL if enum is not valid
442  * - Return algo_string corresponding to enum
443  */
444 __rte_experimental
445 const char *
446 rte_cryptodev_get_auth_algo_string(enum rte_crypto_auth_algorithm algo_enum);
447 
448 /**
449  * Provide the AEAD algorithm string, given an algorithm enum.
450  *
451  * @param	algo_enum	AEAD algorithm enum
452  *
453  * @return
454  * - Return NULL if enum is not valid
455  * - Return algo_string corresponding to enum
456  */
457 __rte_experimental
458 const char *
459 rte_cryptodev_get_aead_algo_string(enum rte_crypto_aead_algorithm algo_enum);
460 
461 /**
462  * Provide the Asymmetric xform string, given an xform enum.
463  *
464  * @param	xform_enum	xform type enum
465  *
466  * @return
467  * - Return NULL, if enum is not valid.
468  * - Return xform string, for valid enum.
469  */
470 __rte_experimental
471 const char *
472 rte_cryptodev_asym_get_xform_string(enum rte_crypto_asym_xform_type xform_enum);
473 
474 
475 /** Macro used at end of crypto PMD list */
476 #define RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() \
477 	{ RTE_CRYPTO_OP_TYPE_UNDEFINED }
478 
479 
480 /**
481  * Crypto device supported feature flags
482  *
483  * Note:
484  * New features flags should be added to the end of the list
485  *
486  * Keep these flags synchronised with rte_cryptodev_get_feature_name()
487  */
488 #define	RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO		(1ULL << 0)
489 /**< Symmetric crypto operations are supported */
490 #define	RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO		(1ULL << 1)
491 /**< Asymmetric crypto operations are supported */
492 #define	RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING		(1ULL << 2)
493 /**< Chaining symmetric crypto operations are supported */
494 #define	RTE_CRYPTODEV_FF_CPU_SSE			(1ULL << 3)
495 /**< Utilises CPU SIMD SSE instructions */
496 #define	RTE_CRYPTODEV_FF_CPU_AVX			(1ULL << 4)
497 /**< Utilises CPU SIMD AVX instructions */
498 #define	RTE_CRYPTODEV_FF_CPU_AVX2			(1ULL << 5)
499 /**< Utilises CPU SIMD AVX2 instructions */
500 #define	RTE_CRYPTODEV_FF_CPU_AESNI			(1ULL << 6)
501 /**< Utilises CPU AES-NI instructions */
502 #define	RTE_CRYPTODEV_FF_HW_ACCELERATED			(1ULL << 7)
503 /**< Operations are off-loaded to an
504  * external hardware accelerator
505  */
506 #define	RTE_CRYPTODEV_FF_CPU_AVX512			(1ULL << 8)
507 /**< Utilises CPU SIMD AVX512 instructions */
508 #define	RTE_CRYPTODEV_FF_IN_PLACE_SGL			(1ULL << 9)
509 /**< In-place Scatter-gather (SGL) buffers, with multiple segments,
510  * are supported
511  */
512 #define RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT		(1ULL << 10)
513 /**< Out-of-place Scatter-gather (SGL) buffers are
514  * supported in input and output
515  */
516 #define RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT		(1ULL << 11)
517 /**< Out-of-place Scatter-gather (SGL) buffers are supported
518  * in input, combined with linear buffers (LB), with a
519  * single segment in output
520  */
521 #define RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT		(1ULL << 12)
522 /**< Out-of-place Scatter-gather (SGL) buffers are supported
523  * in output, combined with linear buffers (LB) in input
524  */
525 #define RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT		(1ULL << 13)
526 /**< Out-of-place linear buffers (LB) are supported in input and output */
527 #define	RTE_CRYPTODEV_FF_CPU_NEON			(1ULL << 14)
528 /**< Utilises CPU NEON instructions */
529 #define	RTE_CRYPTODEV_FF_CPU_ARM_CE			(1ULL << 15)
530 /**< Utilises ARM CPU Cryptographic Extensions */
531 #define	RTE_CRYPTODEV_FF_SECURITY			(1ULL << 16)
532 /**< Support Security Protocol Processing */
533 #define RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP		(1ULL << 17)
534 /**< Support RSA Private Key OP with exponent */
535 #define RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT		(1ULL << 18)
536 /**< Support RSA Private Key OP with CRT (quintuple) Keys */
537 #define RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED		(1ULL << 19)
538 /**< Support encrypted-digest operations where digest is appended to data */
539 #define RTE_CRYPTODEV_FF_ASYM_SESSIONLESS		(1ULL << 20)
540 /**< Support asymmetric session-less operations */
541 #define	RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO			(1ULL << 21)
542 /**< Support symmetric cpu-crypto processing */
543 #define RTE_CRYPTODEV_FF_SYM_SESSIONLESS		(1ULL << 22)
544 /**< Support symmetric session-less operations */
545 #define RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA		(1ULL << 23)
546 /**< Support operations on data which is not byte aligned */
547 #define RTE_CRYPTODEV_FF_SYM_RAW_DP			(1ULL << 24)
548 /**< Support accelerator specific symmetric raw data-path APIs */
549 #define RTE_CRYPTODEV_FF_CIPHER_MULTIPLE_DATA_UNITS	(1ULL << 25)
550 /**< Support operations on multiple data-units message */
551 #define RTE_CRYPTODEV_FF_CIPHER_WRAPPED_KEY		(1ULL << 26)
552 /**< Support wrapped key in cipher xform  */
553 #define RTE_CRYPTODEV_FF_SECURITY_INNER_CSUM		(1ULL << 27)
554 /**< Support inner checksum computation/verification */
555 #define RTE_CRYPTODEV_FF_SECURITY_RX_INJECT		(1ULL << 28)
556 /**< Support Rx injection after security processing */
557 
558 /**
559  * Get the name of a crypto device feature flag
560  *
561  * @param	flag	The mask describing the flag.
562  *
563  * @return
564  *   The name of this flag, or NULL if it's not a valid feature flag.
565  */
566 const char *
567 rte_cryptodev_get_feature_name(uint64_t flag);
568 
569 /**  Crypto device information */
570 /* Structure rte_cryptodev_info 8< */
571 struct rte_cryptodev_info {
572 	const char *driver_name;	/**< Driver name. */
573 	uint8_t driver_id;		/**< Driver identifier */
574 	struct rte_device *device;	/**< Generic device information. */
575 
576 	uint64_t feature_flags;
577 	/**< Feature flags exposes HW/SW features for the given device */
578 
579 	const struct rte_cryptodev_capabilities *capabilities;
580 	/**< Array of devices supported capabilities */
581 
582 	unsigned max_nb_queue_pairs;
583 	/**< Maximum number of queues pairs supported by device. */
584 
585 	uint16_t min_mbuf_headroom_req;
586 	/**< Minimum mbuf headroom required by device */
587 
588 	uint16_t min_mbuf_tailroom_req;
589 	/**< Minimum mbuf tailroom required by device */
590 
591 	struct {
592 		unsigned max_nb_sessions;
593 		/**< Maximum number of sessions supported by device.
594 		 * If 0, the device does not have any limitation in
595 		 * number of sessions that can be used.
596 		 */
597 	} sym;
598 };
599 /* >8 End of structure rte_cryptodev_info. */
600 
601 #define RTE_CRYPTODEV_DETACHED  (0)
602 #define RTE_CRYPTODEV_ATTACHED  (1)
603 
604 /** Definitions of Crypto device event types */
605 enum rte_cryptodev_event_type {
606 	RTE_CRYPTODEV_EVENT_UNKNOWN,	/**< unknown event type */
607 	RTE_CRYPTODEV_EVENT_ERROR,	/**< error interrupt event */
608 	RTE_CRYPTODEV_EVENT_MAX		/**< max value of this enum */
609 };
610 
611 /** Crypto device queue pair configuration structure. */
612 /* Structure rte_cryptodev_qp_conf 8<*/
613 struct rte_cryptodev_qp_conf {
614 	uint32_t nb_descriptors; /**< Number of descriptors per queue pair */
615 	struct rte_mempool *mp_session;
616 	/**< The mempool for creating session in sessionless mode */
617 };
618 /* >8 End of structure rte_cryptodev_qp_conf. */
619 
620 /**
621  * Function type used for processing crypto ops when enqueue/dequeue burst is
622  * called.
623  *
624  * The callback function is called on enqueue/dequeue burst immediately.
625  *
626  * @param	dev_id		The identifier of the device.
627  * @param	qp_id		The index of the queue pair on which ops are
628  *				enqueued/dequeued. The value must be in the
629  *				range [0, nb_queue_pairs - 1] previously
630  *				supplied to *rte_cryptodev_configure*.
631  * @param	ops		The address of an array of *nb_ops* pointers
632  *				to *rte_crypto_op* structures which contain
633  *				the crypto operations to be processed.
634  * @param	nb_ops		The number of operations to process.
635  * @param	user_param	The arbitrary user parameter passed in by the
636  *				application when the callback was originally
637  *				registered.
638  * @return			The number of ops to be enqueued to the
639  *				crypto device.
640  */
641 typedef uint16_t (*rte_cryptodev_callback_fn)(uint16_t dev_id, uint16_t qp_id,
642 		struct rte_crypto_op **ops, uint16_t nb_ops, void *user_param);
643 
644 /**
645  * Typedef for application callback function to be registered by application
646  * software for notification of device events
647  *
648  * @param	dev_id	Crypto device identifier
649  * @param	event	Crypto device event to register for notification of.
650  * @param	cb_arg	User specified parameter to be passed as to passed to
651  *			users callback function.
652  */
653 typedef void (*rte_cryptodev_cb_fn)(uint8_t dev_id,
654 		enum rte_cryptodev_event_type event, void *cb_arg);
655 
656 
657 /** Crypto Device statistics */
658 struct rte_cryptodev_stats {
659 	uint64_t enqueued_count;
660 	/**< Count of all operations enqueued */
661 	uint64_t dequeued_count;
662 	/**< Count of all operations dequeued */
663 
664 	uint64_t enqueue_err_count;
665 	/**< Total error count on operations enqueued */
666 	uint64_t dequeue_err_count;
667 	/**< Total error count on operations dequeued */
668 };
669 
670 #define RTE_CRYPTODEV_NAME_MAX_LEN	(64)
671 /**< Max length of name of crypto PMD */
672 
673 /**
674  * Get the device identifier for the named crypto device.
675  *
676  * @param	name	device name to select the device structure.
677  *
678  * @return
679  *   - Returns crypto device identifier on success.
680  *   - Return -1 on failure to find named crypto device.
681  */
682 int
683 rte_cryptodev_get_dev_id(const char *name);
684 
685 /**
686  * Get the crypto device name given a device identifier.
687  *
688  * @param dev_id
689  *   The identifier of the device
690  *
691  * @return
692  *   - Returns crypto device name.
693  *   - Returns NULL if crypto device is not present.
694  */
695 const char *
696 rte_cryptodev_name_get(uint8_t dev_id);
697 
698 /**
699  * Get the total number of crypto devices that have been successfully
700  * initialised.
701  *
702  * @return
703  *   - The total number of usable crypto devices.
704  */
705 uint8_t
706 rte_cryptodev_count(void);
707 
708 /**
709  * Get number of crypto device defined type.
710  *
711  * @param	driver_id	driver identifier.
712  *
713  * @return
714  *   Returns number of crypto device.
715  */
716 uint8_t
717 rte_cryptodev_device_count_by_driver(uint8_t driver_id);
718 
719 /**
720  * Get number and identifiers of attached crypto devices that
721  * use the same crypto driver.
722  *
723  * @param	driver_name	driver name.
724  * @param	devices		output devices identifiers.
725  * @param	nb_devices	maximal number of devices.
726  *
727  * @return
728  *   Returns number of attached crypto device.
729  */
730 uint8_t
731 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
732 		uint8_t nb_devices);
733 /*
734  * Return the NUMA socket to which a device is connected
735  *
736  * @param dev_id
737  *   The identifier of the device
738  * @return
739  *   The NUMA socket id to which the device is connected or
740  *   a default of zero if the socket could not be determined.
741  *   -1 if returned is the dev_id value is out of range.
742  */
743 int
744 rte_cryptodev_socket_id(uint8_t dev_id);
745 
746 /** Crypto device configuration structure */
747 /* Structure rte_cryptodev_config 8< */
748 struct rte_cryptodev_config {
749 	int socket_id;			/**< Socket to allocate resources on */
750 	uint16_t nb_queue_pairs;
751 	/**< Number of queue pairs to configure on device */
752 	uint64_t ff_disable;
753 	/**< Feature flags to be disabled. Only the following features are
754 	 * allowed to be disabled,
755 	 *  - RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO
756 	 *  - RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO
757 	 *  - RTE_CRYTPODEV_FF_SECURITY
758 	 */
759 };
760 /* >8 End of structure rte_cryptodev_config. */
761 
762 /**
763  * Configure a device.
764  *
765  * This function must be invoked first before any other function in the
766  * API. This function can also be re-invoked when a device is in the
767  * stopped state.
768  *
769  * @param	dev_id		The identifier of the device to configure.
770  * @param	config		The crypto device configuration structure.
771  *
772  * @return
773  *   - 0: Success, device configured.
774  *   - <0: Error code returned by the driver configuration function.
775  */
776 int
777 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config);
778 
779 /**
780  * Start an device.
781  *
782  * The device start step is the last one and consists of setting the configured
783  * offload features and in starting the transmit and the receive units of the
784  * device.
785  * On success, all basic functions exported by the API (link status,
786  * receive/transmit, and so on) can be invoked.
787  *
788  * @param dev_id
789  *   The identifier of the device.
790  * @return
791  *   - 0: Success, device started.
792  *   - <0: Error code of the driver device start function.
793  */
794 int
795 rte_cryptodev_start(uint8_t dev_id);
796 
797 /**
798  * Stop an device. The device can be restarted with a call to
799  * rte_cryptodev_start()
800  *
801  * @param	dev_id		The identifier of the device.
802  */
803 void
804 rte_cryptodev_stop(uint8_t dev_id);
805 
806 /**
807  * Close an device. The device cannot be restarted!
808  *
809  * @param	dev_id		The identifier of the device.
810  *
811  * @return
812  *  - 0 on successfully closing device
813  *  - <0 on failure to close device
814  */
815 int
816 rte_cryptodev_close(uint8_t dev_id);
817 
818 /**
819  * Allocate and set up a receive queue pair for a device.
820  *
821  *
822  * @param	dev_id		The identifier of the device.
823  * @param	queue_pair_id	The index of the queue pairs to set up. The
824  *				value must be in the range [0, nb_queue_pair
825  *				- 1] previously supplied to
826  *				rte_cryptodev_configure().
827  * @param	qp_conf		The pointer to the configuration data to be
828  *				used for the queue pair.
829  * @param	socket_id	The *socket_id* argument is the socket
830  *				identifier in case of NUMA. The value can be
831  *				*SOCKET_ID_ANY* if there is no NUMA constraint
832  *				for the DMA memory allocated for the receive
833  *				queue pair.
834  *
835  * @return
836  *   - 0: Success, queue pair correctly set up.
837  *   - <0: Queue pair configuration failed
838  */
839 int
840 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
841 		const struct rte_cryptodev_qp_conf *qp_conf, int socket_id);
842 
843 /**
844  * Get the status of queue pairs setup on a specific crypto device
845  *
846  * @param	dev_id		Crypto device identifier.
847  * @param	queue_pair_id	The index of the queue pairs to set up. The
848  *				value must be in the range [0, nb_queue_pair
849  *				- 1] previously supplied to
850  *				rte_cryptodev_configure().
851  * @return
852  *   - 0: qp was not configured
853  *	 - 1: qp was configured
854  *	 - -EINVAL: device was not configured
855  */
856 int
857 rte_cryptodev_get_qp_status(uint8_t dev_id, uint16_t queue_pair_id);
858 
859 /**
860  * Get the number of queue pairs on a specific crypto device
861  *
862  * @param	dev_id		Crypto device identifier.
863  * @return
864  *   - The number of configured queue pairs.
865  */
866 uint16_t
867 rte_cryptodev_queue_pair_count(uint8_t dev_id);
868 
869 
870 /**
871  * Retrieve the general I/O statistics of a device.
872  *
873  * @param	dev_id		The identifier of the device.
874  * @param	stats		A pointer to a structure of type
875  *				*rte_cryptodev_stats* to be filled with the
876  *				values of device counters.
877  * @return
878  *   - Zero if successful.
879  *   - Non-zero otherwise.
880  */
881 int
882 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats);
883 
884 /**
885  * Reset the general I/O statistics of a device.
886  *
887  * @param	dev_id		The identifier of the device.
888  */
889 void
890 rte_cryptodev_stats_reset(uint8_t dev_id);
891 
892 /**
893  * Retrieve the contextual information of a device.
894  *
895  * @param	dev_id		The identifier of the device.
896  * @param	dev_info	A pointer to a structure of type
897  *				*rte_cryptodev_info* to be filled with the
898  *				contextual information of the device.
899  *
900  * @note The capabilities field of dev_info is set to point to the first
901  * element of an array of struct rte_cryptodev_capabilities. The element after
902  * the last valid element has it's op field set to
903  * RTE_CRYPTO_OP_TYPE_UNDEFINED.
904  */
905 void
906 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info);
907 
908 
909 /**
910  * Register a callback function for specific device id.
911  *
912  * @param	dev_id		Device id.
913  * @param	event		Event interested.
914  * @param	cb_fn		User supplied callback function to be called.
915  * @param	cb_arg		Pointer to the parameters for the registered
916  *				callback.
917  *
918  * @return
919  *  - On success, zero.
920  *  - On failure, a negative value.
921  */
922 int
923 rte_cryptodev_callback_register(uint8_t dev_id,
924 		enum rte_cryptodev_event_type event,
925 		rte_cryptodev_cb_fn cb_fn, void *cb_arg);
926 
927 /**
928  * Unregister a callback function for specific device id.
929  *
930  * @param	dev_id		The device identifier.
931  * @param	event		Event interested.
932  * @param	cb_fn		User supplied callback function to be called.
933  * @param	cb_arg		Pointer to the parameters for the registered
934  *				callback.
935  *
936  * @return
937  *  - On success, zero.
938  *  - On failure, a negative value.
939  */
940 int
941 rte_cryptodev_callback_unregister(uint8_t dev_id,
942 		enum rte_cryptodev_event_type event,
943 		rte_cryptodev_cb_fn cb_fn, void *cb_arg);
944 
945 /**
946  * @warning
947  * @b EXPERIMENTAL: this API may change without prior notice.
948  *
949  * Query a cryptodev queue pair if there are pending RTE_CRYPTODEV_EVENT_ERROR
950  * events.
951  *
952  * @param          dev_id	The device identifier.
953  * @param          qp_id	Queue pair index to be queried.
954  *
955  * @return
956  *   - 1 if requested queue has a pending event.
957  *   - 0 if no pending event is found.
958  *   - a negative value on failure
959  */
960 __rte_experimental
961 int
962 rte_cryptodev_queue_pair_event_error_query(uint8_t dev_id, uint16_t qp_id);
963 
964 struct rte_cryptodev_callback;
965 
966 /** Structure to keep track of registered callbacks */
967 RTE_TAILQ_HEAD(rte_cryptodev_cb_list, rte_cryptodev_callback);
968 
969 /**
970  * Structure used to hold information about the callbacks to be called for a
971  * queue pair on enqueue/dequeue.
972  */
973 struct rte_cryptodev_cb {
974 	RTE_ATOMIC(struct rte_cryptodev_cb *) next;
975 	/**< Pointer to next callback */
976 	rte_cryptodev_callback_fn fn;
977 	/**< Pointer to callback function */
978 	void *arg;
979 	/**< Pointer to argument */
980 };
981 
982 /**
983  * @internal
984  * Structure used to hold information about the RCU for a queue pair.
985  */
986 struct rte_cryptodev_cb_rcu {
987 	RTE_ATOMIC(struct rte_cryptodev_cb *) next;
988 	/**< Pointer to next callback */
989 	struct rte_rcu_qsbr *qsbr;
990 	/**< RCU QSBR variable per queue pair */
991 };
992 
993 /**
994  * Get the security context for the cryptodev.
995  *
996  * @param dev_id
997  *   The device identifier.
998  * @return
999  *   - NULL on error.
1000  *   - Pointer to security context on success.
1001  */
1002 void *
1003 rte_cryptodev_get_sec_ctx(uint8_t dev_id);
1004 
1005 /**
1006  * Create a symmetric session mempool.
1007  *
1008  * @param name
1009  *   The unique mempool name.
1010  * @param nb_elts
1011  *   The number of elements in the mempool.
1012  * @param elt_size
1013  *   The size of the element. This should be the size of the cryptodev PMD
1014  *   session private data obtained through
1015  *   rte_cryptodev_sym_get_private_session_size() function call.
1016  *   For the user who wants to use the same mempool for heterogeneous PMDs
1017  *   this value should be the maximum value of their private session sizes.
1018  *   Please note the created mempool will have bigger elt size than this
1019  *   value as necessary session header and the possible padding are filled
1020  *   into each elt.
1021  * @param cache_size
1022  *   The number of per-lcore cache elements
1023  * @param priv_size
1024  *   The private data size of each session.
1025  * @param socket_id
1026  *   The *socket_id* argument is the socket identifier in the case of
1027  *   NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
1028  *   constraint for the reserved zone.
1029  *
1030  * @return
1031  *  - On success returns the created session mempool pointer
1032  *  - On failure returns NULL
1033  */
1034 struct rte_mempool *
1035 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
1036 	uint32_t elt_size, uint32_t cache_size, uint16_t priv_size,
1037 	int socket_id);
1038 
1039 
1040 /**
1041  * Create an asymmetric session mempool.
1042  *
1043  * @param name
1044  *   The unique mempool name.
1045  * @param nb_elts
1046  *   The number of elements in the mempool.
1047  * @param cache_size
1048  *   The number of per-lcore cache elements
1049  * @param user_data_size
1050  *   The size of user data to be placed after session private data.
1051  * @param socket_id
1052  *   The *socket_id* argument is the socket identifier in the case of
1053  *   NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
1054  *   constraint for the reserved zone.
1055  *
1056  * @return
1057  *  - On success return mempool
1058  *  - On failure returns NULL
1059  */
1060 struct rte_mempool *
1061 rte_cryptodev_asym_session_pool_create(const char *name, uint32_t nb_elts,
1062 	uint32_t cache_size, uint16_t user_data_size, int socket_id);
1063 
1064 /**
1065  * Create symmetric crypto session and fill out private data for the device id,
1066  * based on its device type.
1067  *
1068  * @param   dev_id   ID of device that we want the session to be used on
1069  * @param   xforms   Symmetric crypto transform operations to apply on flow
1070  *                   processed with this session
1071  * @param   mp       Mempool to allocate symmetric session objects from
1072  *
1073  * @return
1074  *  - On success return pointer to sym-session.
1075  *  - On failure returns NULL and rte_errno is set to the error code:
1076  *    - EINVAL on invalid arguments.
1077  *    - ENOMEM on memory error for session allocation.
1078  *    - ENOTSUP if device doesn't support session configuration.
1079  */
1080 void *
1081 rte_cryptodev_sym_session_create(uint8_t dev_id,
1082 		struct rte_crypto_sym_xform *xforms,
1083 		struct rte_mempool *mp);
1084 /**
1085  * Create and initialise an asymmetric crypto session structure.
1086  * Calls the PMD to configure the private session data.
1087  *
1088  * @param   dev_id   ID of device that we want the session to be used on
1089  * @param   xforms   Asymmetric crypto transform operations to apply on flow
1090  *                   processed with this session
1091  * @param   mp       mempool to allocate asymmetric session
1092  *                   objects from
1093  * @param   session  void ** for session to be used
1094  *
1095  * @return
1096  *  - 0 on success.
1097  *  - -EINVAL on invalid arguments.
1098  *  - -ENOMEM on memory error for session allocation.
1099  *  - -ENOTSUP if device doesn't support session configuration.
1100  */
1101 int
1102 rte_cryptodev_asym_session_create(uint8_t dev_id,
1103 		struct rte_crypto_asym_xform *xforms, struct rte_mempool *mp,
1104 		void **session);
1105 
1106 /**
1107  * Frees session for the device id and returning it to its mempool.
1108  * It is the application's responsibility to ensure that the session
1109  * is not still in-flight operations using it.
1110  *
1111  * @param   dev_id   ID of device that uses the session.
1112  * @param   sess     Session header to be freed.
1113  *
1114  * @return
1115  *  - 0 if successful.
1116  *  - -EINVAL if session is NULL or the mismatched device ids.
1117  */
1118 int
1119 rte_cryptodev_sym_session_free(uint8_t dev_id,
1120 	void *sess);
1121 
1122 /**
1123  * Clears and frees asymmetric crypto session header and private data,
1124  * returning it to its original mempool.
1125  *
1126  * @param   dev_id   ID of device that uses the asymmetric session.
1127  * @param   sess     Session header to be freed.
1128  *
1129  * @return
1130  *  - 0 if successful.
1131  *  - -EINVAL if device is invalid or session is NULL.
1132  */
1133 int
1134 rte_cryptodev_asym_session_free(uint8_t dev_id, void *sess);
1135 
1136 /**
1137  * Get the size of the asymmetric session header.
1138  *
1139  * @return
1140  *   Size of the asymmetric header session.
1141  */
1142 unsigned int
1143 rte_cryptodev_asym_get_header_session_size(void);
1144 
1145 /**
1146  * Get the size of the private symmetric session data
1147  * for a device.
1148  *
1149  * @param	dev_id		The device identifier.
1150  *
1151  * @return
1152  *   - Size of the private data, if successful
1153  *   - 0 if device is invalid or does not have private
1154  *   symmetric session
1155  */
1156 unsigned int
1157 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id);
1158 
1159 /**
1160  * Get the size of the private data for asymmetric session
1161  * on device
1162  *
1163  * @param	dev_id		The device identifier.
1164  *
1165  * @return
1166  *   - Size of the asymmetric private data, if successful
1167  *   - 0 if device is invalid or does not have private session
1168  */
1169 unsigned int
1170 rte_cryptodev_asym_get_private_session_size(uint8_t dev_id);
1171 
1172 /**
1173  * Validate if the crypto device index is valid attached crypto device.
1174  *
1175  * @param	dev_id	Crypto device index.
1176  *
1177  * @return
1178  *   - If the device index is valid (1) or not (0).
1179  */
1180 unsigned int
1181 rte_cryptodev_is_valid_dev(uint8_t dev_id);
1182 
1183 /**
1184  * Provide driver identifier.
1185  *
1186  * @param name
1187  *   The pointer to a driver name.
1188  * @return
1189  *  The driver type identifier or -1 if no driver found
1190  */
1191 int rte_cryptodev_driver_id_get(const char *name);
1192 
1193 /**
1194  * Provide driver name.
1195  *
1196  * @param driver_id
1197  *   The driver identifier.
1198  * @return
1199  *  The driver name or null if no driver found
1200  */
1201 const char *rte_cryptodev_driver_name_get(uint8_t driver_id);
1202 
1203 /**
1204  * Store user data in a session.
1205  *
1206  * @param	sess		Session pointer allocated by
1207  *				*rte_cryptodev_sym_session_create*.
1208  * @param	data		Pointer to the user data.
1209  * @param	size		Size of the user data.
1210  *
1211  * @return
1212  *  - On success, zero.
1213  *  - On failure, a negative value.
1214  */
1215 int
1216 rte_cryptodev_sym_session_set_user_data(void *sess,
1217 					void *data,
1218 					uint16_t size);
1219 
1220 #define CRYPTO_SESS_OPAQUE_DATA_OFF 0
1221 /**
1222  * Get opaque data from session handle
1223  */
1224 static inline uint64_t
1225 rte_cryptodev_sym_session_opaque_data_get(void *sess)
1226 {
1227 	return *((uint64_t *)sess + CRYPTO_SESS_OPAQUE_DATA_OFF);
1228 }
1229 
1230 /**
1231  * Set opaque data in session handle
1232  */
1233 static inline void
1234 rte_cryptodev_sym_session_opaque_data_set(void *sess, uint64_t opaque)
1235 {
1236 	uint64_t *data;
1237 	data = (((uint64_t *)sess) + CRYPTO_SESS_OPAQUE_DATA_OFF);
1238 	*data = opaque;
1239 }
1240 
1241 /**
1242  * Get user data stored in a session.
1243  *
1244  * @param	sess		Session pointer allocated by
1245  *				*rte_cryptodev_sym_session_create*.
1246  *
1247  * @return
1248  *  - On success return pointer to user data.
1249  *  - On failure returns NULL.
1250  */
1251 void *
1252 rte_cryptodev_sym_session_get_user_data(void *sess);
1253 
1254 /**
1255  * Store user data in an asymmetric session.
1256  *
1257  * @param	sess		Session pointer allocated by
1258  *				*rte_cryptodev_asym_session_create*.
1259  * @param	data		Pointer to the user data.
1260  * @param	size		Size of the user data.
1261  *
1262  * @return
1263  *  - On success, zero.
1264  *  - -EINVAL if the session pointer is invalid.
1265  *  - -ENOMEM if the available user data size is smaller than the size parameter.
1266  */
1267 int
1268 rte_cryptodev_asym_session_set_user_data(void *sess, void *data, uint16_t size);
1269 
1270 /**
1271  * Get user data stored in an asymmetric session.
1272  *
1273  * @param	sess		Session pointer allocated by
1274  *				*rte_cryptodev_asym_session_create*.
1275  *
1276  * @return
1277  *  - On success return pointer to user data.
1278  *  - On failure returns NULL.
1279  */
1280 void *
1281 rte_cryptodev_asym_session_get_user_data(void *sess);
1282 
1283 /**
1284  * Perform actual crypto processing (encrypt/digest or auth/decrypt)
1285  * on user provided data.
1286  *
1287  * @param	dev_id	The device identifier.
1288  * @param	sess	Cryptodev session structure
1289  * @param	ofs	Start and stop offsets for auth and cipher operations
1290  * @param	vec	Vectorized operation descriptor
1291  *
1292  * @return
1293  *  - Returns number of successfully processed packets.
1294  */
1295 uint32_t
1296 rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id,
1297 	void *sess, union rte_crypto_sym_ofs ofs,
1298 	struct rte_crypto_sym_vec *vec);
1299 
1300 /**
1301  * Get the size of the raw data-path context buffer.
1302  *
1303  * @param	dev_id		The device identifier.
1304  *
1305  * @return
1306  *   - If the device supports raw data-path APIs, return the context size.
1307  *   - If the device does not support the APIs, return -1.
1308  */
1309 int
1310 rte_cryptodev_get_raw_dp_ctx_size(uint8_t dev_id);
1311 
1312 /**
1313  * Set session event meta data
1314  *
1315  * @param	dev_id		The device identifier.
1316  * @param	sess            Crypto or security session.
1317  * @param	op_type         Operation type.
1318  * @param	sess_type       Session type.
1319  * @param	ev_mdata	Pointer to the event crypto meta data
1320  *				(aka *union rte_event_crypto_metadata*)
1321  * @param	size            Size of ev_mdata.
1322  *
1323  * @return
1324  *  - On success, zero.
1325  *  - On failure, a negative value.
1326  */
1327 int
1328 rte_cryptodev_session_event_mdata_set(uint8_t dev_id, void *sess,
1329 	enum rte_crypto_op_type op_type,
1330 	enum rte_crypto_op_sess_type sess_type,
1331 	void *ev_mdata, uint16_t size);
1332 
1333 /**
1334  * Union of different crypto session types, including session-less xform
1335  * pointer.
1336  */
1337 union rte_cryptodev_session_ctx {void *crypto_sess;
1338 	struct rte_crypto_sym_xform *xform;
1339 	struct rte_security_session *sec_sess;
1340 };
1341 
1342 /**
1343  * Enqueue a vectorized operation descriptor into the device queue but the
1344  * driver may or may not start processing until rte_cryptodev_raw_enqueue_done()
1345  * is called.
1346  *
1347  * @param	qp		Driver specific queue pair data.
1348  * @param	drv_ctx		Driver specific context data.
1349  * @param	vec		Vectorized operation descriptor.
1350  * @param	ofs		Start and stop offsets for auth and cipher
1351  *				operations.
1352  * @param	user_data	The array of user data for dequeue later.
1353  * @param	enqueue_status	Driver written value to specify the
1354  *				enqueue status. Possible values:
1355  *				- 1: The number of operations returned are
1356  *				     enqueued successfully.
1357  *				- 0: The number of operations returned are
1358  *				     cached into the queue but are not processed
1359  *				     until rte_cryptodev_raw_enqueue_done() is
1360  *				     called.
1361  *				- negative integer: Error occurred.
1362  * @return
1363  *   - The number of operations in the descriptor successfully enqueued or
1364  *     cached into the queue but not enqueued yet, depends on the
1365  *     "enqueue_status" value.
1366  */
1367 typedef uint32_t (*cryptodev_sym_raw_enqueue_burst_t)(
1368 	void *qp, uint8_t *drv_ctx, struct rte_crypto_sym_vec *vec,
1369 	union rte_crypto_sym_ofs ofs, void *user_data[], int *enqueue_status);
1370 
1371 /**
1372  * Enqueue single raw data vector into the device queue but the driver may or
1373  * may not start processing until rte_cryptodev_raw_enqueue_done() is called.
1374  *
1375  * @param	qp		Driver specific queue pair data.
1376  * @param	drv_ctx		Driver specific context data.
1377  * @param	data_vec	The buffer data vector.
1378  * @param	n_data_vecs	Number of buffer data vectors.
1379  * @param	ofs		Start and stop offsets for auth and cipher
1380  *				operations.
1381  * @param	iv		IV virtual and IOVA addresses
1382  * @param	digest		digest virtual and IOVA addresses
1383  * @param	aad_or_auth_iv	AAD or auth IV virtual and IOVA addresses,
1384  *				depends on the algorithm used.
1385  * @param	user_data	The user data.
1386  * @return
1387  *   - 1: The data vector is enqueued successfully.
1388  *   - 0: The data vector is cached into the queue but is not processed
1389  *        until rte_cryptodev_raw_enqueue_done() is called.
1390  *   - negative integer: failure.
1391  */
1392 typedef int (*cryptodev_sym_raw_enqueue_t)(
1393 	void *qp, uint8_t *drv_ctx, struct rte_crypto_vec *data_vec,
1394 	uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs,
1395 	struct rte_crypto_va_iova_ptr *iv,
1396 	struct rte_crypto_va_iova_ptr *digest,
1397 	struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
1398 	void *user_data);
1399 
1400 /**
1401  * Inform the cryptodev queue pair to start processing or finish dequeuing all
1402  * enqueued/dequeued operations.
1403  *
1404  * @param	qp		Driver specific queue pair data.
1405  * @param	drv_ctx		Driver specific context data.
1406  * @param	n		The total number of processed operations.
1407  * @return
1408  *   - On success return 0.
1409  *   - On failure return negative integer.
1410  */
1411 typedef int (*cryptodev_sym_raw_operation_done_t)(void *qp, uint8_t *drv_ctx,
1412 	uint32_t n);
1413 
1414 /**
1415  * Typedef that the user provided for the driver to get the dequeue count.
1416  * The function may return a fixed number or the number parsed from the user
1417  * data stored in the first processed operation.
1418  *
1419  * @param	user_data	Dequeued user data.
1420  * @return
1421  *  - The number of operations to be dequeued.
1422  */
1423 typedef uint32_t (*rte_cryptodev_raw_get_dequeue_count_t)(void *user_data);
1424 
1425 /**
1426  * Typedef that the user provided to deal with post dequeue operation, such
1427  * as filling status.
1428  *
1429  * @param	user_data	Dequeued user data.
1430  * @param	index		Index number of the processed descriptor.
1431  * @param	is_op_success	Operation status provided by the driver.
1432  */
1433 typedef void (*rte_cryptodev_raw_post_dequeue_t)(void *user_data,
1434 	uint32_t index, uint8_t is_op_success);
1435 
1436 /**
1437  * Dequeue a burst of symmetric crypto processing.
1438  *
1439  * @param	qp			Driver specific queue pair data.
1440  * @param	drv_ctx			Driver specific context data.
1441  * @param	get_dequeue_count	User provided callback function to
1442  *					obtain dequeue operation count.
1443  * @param	max_nb_to_dequeue	When get_dequeue_count is NULL this
1444  *					value is used to pass the maximum
1445  *					number of operations to be dequeued.
1446  * @param	post_dequeue		User provided callback function to
1447  *					post-process a dequeued operation.
1448  * @param	out_user_data		User data pointer array to be retrieve
1449  *					from device queue. In case of
1450  *					*is_user_data_array* is set there
1451  *					should be enough room to store all
1452  *					user data.
1453  * @param	is_user_data_array	Set 1 if every dequeued user data will
1454  *					be written into out_user_data array.
1455  *					Set 0 if only the first user data will
1456  *					be written into out_user_data array.
1457  * @param	n_success		Driver written value to specific the
1458  *					total successful operations count.
1459  * @param	dequeue_status		Driver written value to specify the
1460  *					dequeue status. Possible values:
1461  *					- 1: Successfully dequeued the number
1462  *					     of operations returned. The user
1463  *					     data previously set during enqueue
1464  *					     is stored in the "out_user_data".
1465  *					- 0: The number of operations returned
1466  *					     are completed and the user data is
1467  *					     stored in the "out_user_data", but
1468  *					     they are not freed from the queue
1469  *					     until
1470  *					     rte_cryptodev_raw_dequeue_done()
1471  *					     is called.
1472  *					- negative integer: Error occurred.
1473  * @return
1474  *   - The number of operations dequeued or completed but not freed from the
1475  *     queue, depends on "dequeue_status" value.
1476  */
1477 typedef uint32_t (*cryptodev_sym_raw_dequeue_burst_t)(void *qp,
1478 	uint8_t *drv_ctx,
1479 	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
1480 	uint32_t max_nb_to_dequeue,
1481 	rte_cryptodev_raw_post_dequeue_t post_dequeue,
1482 	void **out_user_data, uint8_t is_user_data_array,
1483 	uint32_t *n_success, int *dequeue_status);
1484 
1485 /**
1486  * Dequeue a symmetric crypto processing.
1487  *
1488  * @param	qp			Driver specific queue pair data.
1489  * @param	drv_ctx			Driver specific context data.
1490  * @param	dequeue_status		Driver written value to specify the
1491  *					dequeue status. Possible values:
1492  *					- 1: Successfully dequeued a operation.
1493  *					     The user data is returned.
1494  *					- 0: The first operation in the queue
1495  *					     is completed and the user data
1496  *					     previously set during enqueue is
1497  *					     returned, but it is not freed from
1498  *					     the queue until
1499  *					     rte_cryptodev_raw_dequeue_done() is
1500  *					     called.
1501  *					- negative integer: Error occurred.
1502  * @param	op_status		Driver written value to specify
1503  *					operation status.
1504  * @return
1505  *   - The user data pointer retrieved from device queue or NULL if no
1506  *     operation is ready for dequeue.
1507  */
1508 typedef void * (*cryptodev_sym_raw_dequeue_t)(
1509 		void *qp, uint8_t *drv_ctx, int *dequeue_status,
1510 		enum rte_crypto_op_status *op_status);
1511 
1512 /**
1513  * Context data for raw data-path API crypto process. The buffer of this
1514  * structure is to be allocated by the user application with the size equal
1515  * or bigger than rte_cryptodev_get_raw_dp_ctx_size() returned value.
1516  */
1517 struct rte_crypto_raw_dp_ctx {
1518 	void *qp_data;
1519 
1520 	cryptodev_sym_raw_enqueue_t enqueue;
1521 	cryptodev_sym_raw_enqueue_burst_t enqueue_burst;
1522 	cryptodev_sym_raw_operation_done_t enqueue_done;
1523 	cryptodev_sym_raw_dequeue_t dequeue;
1524 	cryptodev_sym_raw_dequeue_burst_t dequeue_burst;
1525 	cryptodev_sym_raw_operation_done_t dequeue_done;
1526 
1527 	/* Driver specific context data */
1528 	__extension__ uint8_t drv_ctx_data[];
1529 };
1530 
1531 /**
1532  * Configure raw data-path context data.
1533  *
1534  * @param	dev_id		The device identifier.
1535  * @param	qp_id		The index of the queue pair from which to
1536  *				retrieve processed packets. The value must be
1537  *				in the range [0, nb_queue_pair - 1] previously
1538  *				supplied to rte_cryptodev_configure().
1539  * @param	ctx		The raw data-path context data.
1540  * @param	sess_type	Session type.
1541  * @param	session_ctx	Session context data.
1542  * @param	is_update	Set 0 if it is to initialize the ctx.
1543  *				Set 1 if ctx is initialized and only to update
1544  *				session context data.
1545  * @return
1546  *   - On success return 0.
1547  *   - On failure return negative integer.
1548  *     - -EINVAL if input parameters are invalid.
1549  *     - -ENOTSUP if crypto device does not support raw DP operations with the
1550  *        provided session.
1551  */
1552 int
1553 rte_cryptodev_configure_raw_dp_ctx(uint8_t dev_id, uint16_t qp_id,
1554 	struct rte_crypto_raw_dp_ctx *ctx,
1555 	enum rte_crypto_op_sess_type sess_type,
1556 	union rte_cryptodev_session_ctx session_ctx,
1557 	uint8_t is_update);
1558 
1559 /**
1560  * Enqueue a vectorized operation descriptor into the device queue but the
1561  * driver may or may not start processing until rte_cryptodev_raw_enqueue_done()
1562  * is called.
1563  *
1564  * @param	ctx		The initialized raw data-path context data.
1565  * @param	vec		Vectorized operation descriptor.
1566  * @param	ofs		Start and stop offsets for auth and cipher
1567  *				operations.
1568  * @param	user_data	The array of user data for dequeue later.
1569  * @param	enqueue_status	Driver written value to specify the
1570  *				enqueue status. Possible values:
1571  *				- 1: The number of operations returned are
1572  *				     enqueued successfully.
1573  *				- 0: The number of operations returned are
1574  *				     cached into the queue but are not processed
1575  *				     until rte_cryptodev_raw_enqueue_done() is
1576  *				     called.
1577  *				- negative integer: Error occurred.
1578  * @return
1579  *   - The number of operations in the descriptor successfully enqueued or
1580  *     cached into the queue but not enqueued yet, depends on the
1581  *     "enqueue_status" value.
1582  */
1583 uint32_t
1584 rte_cryptodev_raw_enqueue_burst(struct rte_crypto_raw_dp_ctx *ctx,
1585 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
1586 	void **user_data, int *enqueue_status);
1587 
1588 /**
1589  * Enqueue single raw data vector into the device queue but the driver may or
1590  * may not start processing until rte_cryptodev_raw_enqueue_done() is called.
1591  *
1592  * @param	ctx		The initialized raw data-path context data.
1593  * @param	data_vec	The buffer data vector.
1594  * @param	n_data_vecs	Number of buffer data vectors.
1595  * @param	ofs		Start and stop offsets for auth and cipher
1596  *				operations.
1597  * @param	iv		IV virtual and IOVA addresses
1598  * @param	digest		digest virtual and IOVA addresses
1599  * @param	aad_or_auth_iv	AAD or auth IV virtual and IOVA addresses,
1600  *				depends on the algorithm used.
1601  * @param	user_data	The user data.
1602  * @return
1603  *   - 1: The data vector is enqueued successfully.
1604  *   - 0: The data vector is cached into the queue but is not processed
1605  *        until rte_cryptodev_raw_enqueue_done() is called.
1606  *   - negative integer: failure.
1607  */
1608 __rte_experimental
1609 static __rte_always_inline int
1610 rte_cryptodev_raw_enqueue(struct rte_crypto_raw_dp_ctx *ctx,
1611 	struct rte_crypto_vec *data_vec, uint16_t n_data_vecs,
1612 	union rte_crypto_sym_ofs ofs,
1613 	struct rte_crypto_va_iova_ptr *iv,
1614 	struct rte_crypto_va_iova_ptr *digest,
1615 	struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
1616 	void *user_data)
1617 {
1618 	return (*ctx->enqueue)(ctx->qp_data, ctx->drv_ctx_data, data_vec,
1619 		n_data_vecs, ofs, iv, digest, aad_or_auth_iv, user_data);
1620 }
1621 
1622 /**
1623  * Start processing all enqueued operations from last
1624  * rte_cryptodev_configure_raw_dp_ctx() call.
1625  *
1626  * @param	ctx	The initialized raw data-path context data.
1627  * @param	n	The number of operations cached.
1628  * @return
1629  *   - On success return 0.
1630  *   - On failure return negative integer.
1631  */
1632 int
1633 rte_cryptodev_raw_enqueue_done(struct rte_crypto_raw_dp_ctx *ctx,
1634 		uint32_t n);
1635 
1636 /**
1637  * Dequeue a burst of symmetric crypto processing.
1638  *
1639  * @param	ctx			The initialized raw data-path context
1640  *					data.
1641  * @param	get_dequeue_count	User provided callback function to
1642  *					obtain dequeue operation count.
1643  * @param	max_nb_to_dequeue	When get_dequeue_count is NULL this
1644  *					value is used to pass the maximum
1645  *					number of operations to be dequeued.
1646  * @param	post_dequeue		User provided callback function to
1647  *					post-process a dequeued operation.
1648  * @param	out_user_data		User data pointer array to be retrieve
1649  *					from device queue. In case of
1650  *					*is_user_data_array* is set there
1651  *					should be enough room to store all
1652  *					user data.
1653  * @param	is_user_data_array	Set 1 if every dequeued user data will
1654  *					be written into out_user_data array.
1655  *					Set 0 if only the first user data will
1656  *					be written into out_user_data array.
1657  * @param	n_success		Driver written value to specific the
1658  *					total successful operations count.
1659  * @param	dequeue_status		Driver written value to specify the
1660  *					dequeue status. Possible values:
1661  *					- 1: Successfully dequeued the number
1662  *					     of operations returned. The user
1663  *					     data previously set during enqueue
1664  *					     is stored in the "out_user_data".
1665  *					- 0: The number of operations returned
1666  *					     are completed and the user data is
1667  *					     stored in the "out_user_data", but
1668  *					     they are not freed from the queue
1669  *					     until
1670  *					     rte_cryptodev_raw_dequeue_done()
1671  *					     is called.
1672  *					- negative integer: Error occurred.
1673  * @return
1674  *   - The number of operations dequeued or completed but not freed from the
1675  *     queue, depends on "dequeue_status" value.
1676  */
1677 uint32_t
1678 rte_cryptodev_raw_dequeue_burst(struct rte_crypto_raw_dp_ctx *ctx,
1679 	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
1680 	uint32_t max_nb_to_dequeue,
1681 	rte_cryptodev_raw_post_dequeue_t post_dequeue,
1682 	void **out_user_data, uint8_t is_user_data_array,
1683 	uint32_t *n_success, int *dequeue_status);
1684 
1685 /**
1686  * Dequeue a symmetric crypto processing.
1687  *
1688  * @param	ctx			The initialized raw data-path context
1689  *					data.
1690  * @param	dequeue_status		Driver written value to specify the
1691  *					dequeue status. Possible values:
1692  *					- 1: Successfully dequeued a operation.
1693  *					     The user data is returned.
1694  *					- 0: The first operation in the queue
1695  *					     is completed and the user data
1696  *					     previously set during enqueue is
1697  *					     returned, but it is not freed from
1698  *					     the queue until
1699  *					     rte_cryptodev_raw_dequeue_done() is
1700  *					     called.
1701  *					- negative integer: Error occurred.
1702  * @param	op_status		Driver written value to specify
1703  *					operation status.
1704  * @return
1705  *   - The user data pointer retrieved from device queue or NULL if no
1706  *     operation is ready for dequeue.
1707  */
1708 __rte_experimental
1709 static __rte_always_inline void *
1710 rte_cryptodev_raw_dequeue(struct rte_crypto_raw_dp_ctx *ctx,
1711 		int *dequeue_status, enum rte_crypto_op_status *op_status)
1712 {
1713 	return (*ctx->dequeue)(ctx->qp_data, ctx->drv_ctx_data, dequeue_status,
1714 			op_status);
1715 }
1716 
1717 /**
1718  * Inform the queue pair dequeue operations is finished.
1719  *
1720  * @param	ctx	The initialized raw data-path context data.
1721  * @param	n	The number of operations.
1722  * @return
1723  *   - On success return 0.
1724  *   - On failure return negative integer.
1725  */
1726 int
1727 rte_cryptodev_raw_dequeue_done(struct rte_crypto_raw_dp_ctx *ctx,
1728 		uint32_t n);
1729 
1730 /**
1731  * Add a user callback for a given crypto device and queue pair which will be
1732  * called on crypto ops enqueue.
1733  *
1734  * This API configures a function to be called for each burst of crypto ops
1735  * received on a given crypto device queue pair. The return value is a pointer
1736  * that can be used later to remove the callback using
1737  * rte_cryptodev_remove_enq_callback().
1738  *
1739  * Callbacks registered by application would not survive
1740  * rte_cryptodev_configure() as it reinitializes the callback list.
1741  * It is user responsibility to remove all installed callbacks before
1742  * calling rte_cryptodev_configure() to avoid possible memory leakage.
1743  * Application is expected to call add API after rte_cryptodev_configure().
1744  *
1745  * Multiple functions can be registered per queue pair & they are called
1746  * in the order they were added. The API does not restrict on maximum number
1747  * of callbacks.
1748  *
1749  * @param	dev_id		The identifier of the device.
1750  * @param	qp_id		The index of the queue pair on which ops are
1751  *				to be enqueued for processing. The value
1752  *				must be in the range [0, nb_queue_pairs - 1]
1753  *				previously supplied to
1754  *				*rte_cryptodev_configure*.
1755  * @param	cb_fn		The callback function
1756  * @param	cb_arg		A generic pointer parameter which will be passed
1757  *				to each invocation of the callback function on
1758  *				this crypto device and queue pair.
1759  *
1760  * @return
1761  *  - NULL on error & rte_errno will contain the error code.
1762  *  - On success, a pointer value which can later be used to remove the
1763  *    callback.
1764  */
1765 struct rte_cryptodev_cb *
1766 rte_cryptodev_add_enq_callback(uint8_t dev_id,
1767 			       uint16_t qp_id,
1768 			       rte_cryptodev_callback_fn cb_fn,
1769 			       void *cb_arg);
1770 
1771 /**
1772  * Remove a user callback function for given crypto device and queue pair.
1773  *
1774  * This function is used to remove enqueue callbacks that were added to a
1775  * crypto device queue pair using rte_cryptodev_add_enq_callback().
1776  *
1777  *
1778  *
1779  * @param	dev_id		The identifier of the device.
1780  * @param	qp_id		The index of the queue pair on which ops are
1781  *				to be enqueued. The value must be in the
1782  *				range [0, nb_queue_pairs - 1] previously
1783  *				supplied to *rte_cryptodev_configure*.
1784  * @param	cb		Pointer to user supplied callback created via
1785  *				rte_cryptodev_add_enq_callback().
1786  *
1787  * @return
1788  *   -  0: Success. Callback was removed.
1789  *   - <0: The dev_id or the qp_id is out of range, or the callback
1790  *         is NULL or not found for the crypto device queue pair.
1791  */
1792 int rte_cryptodev_remove_enq_callback(uint8_t dev_id,
1793 				      uint16_t qp_id,
1794 				      struct rte_cryptodev_cb *cb);
1795 
1796 /**
1797  * Add a user callback for a given crypto device and queue pair which will be
1798  * called on crypto ops dequeue.
1799  *
1800  * This API configures a function to be called for each burst of crypto ops
1801  * received on a given crypto device queue pair. The return value is a pointer
1802  * that can be used later to remove the callback using
1803  * rte_cryptodev_remove_deq_callback().
1804  *
1805  * Callbacks registered by application would not survive
1806  * rte_cryptodev_configure() as it reinitializes the callback list.
1807  * It is user responsibility to remove all installed callbacks before
1808  * calling rte_cryptodev_configure() to avoid possible memory leakage.
1809  * Application is expected to call add API after rte_cryptodev_configure().
1810  *
1811  * Multiple functions can be registered per queue pair & they are called
1812  * in the order they were added. The API does not restrict on maximum number
1813  * of callbacks.
1814  *
1815  * @param	dev_id		The identifier of the device.
1816  * @param	qp_id		The index of the queue pair on which ops are
1817  *				to be dequeued. The value must be in the
1818  *				range [0, nb_queue_pairs - 1] previously
1819  *				supplied to *rte_cryptodev_configure*.
1820  * @param	cb_fn		The callback function
1821  * @param	cb_arg		A generic pointer parameter which will be passed
1822  *				to each invocation of the callback function on
1823  *				this crypto device and queue pair.
1824  *
1825  * @return
1826  *   - NULL on error & rte_errno will contain the error code.
1827  *   - On success, a pointer value which can later be used to remove the
1828  *     callback.
1829  */
1830 struct rte_cryptodev_cb *
1831 rte_cryptodev_add_deq_callback(uint8_t dev_id,
1832 			       uint16_t qp_id,
1833 			       rte_cryptodev_callback_fn cb_fn,
1834 			       void *cb_arg);
1835 
1836 /**
1837  * Remove a user callback function for given crypto device and queue pair.
1838  *
1839  * This function is used to remove dequeue callbacks that were added to a
1840  * crypto device queue pair using rte_cryptodev_add_deq_callback().
1841  *
1842  *
1843  *
1844  * @param	dev_id		The identifier of the device.
1845  * @param	qp_id		The index of the queue pair on which ops are
1846  *				to be dequeued. The value must be in the
1847  *				range [0, nb_queue_pairs - 1] previously
1848  *				supplied to *rte_cryptodev_configure*.
1849  * @param	cb		Pointer to user supplied callback created via
1850  *				rte_cryptodev_add_deq_callback().
1851  *
1852  * @return
1853  *   -  0: Success. Callback was removed.
1854  *   - <0: The dev_id or the qp_id is out of range, or the callback
1855  *         is NULL or not found for the crypto device queue pair.
1856  */
1857 int rte_cryptodev_remove_deq_callback(uint8_t dev_id,
1858 				      uint16_t qp_id,
1859 				      struct rte_cryptodev_cb *cb);
1860 
1861 #include <rte_cryptodev_core.h>
1862 /**
1863  *
1864  * Dequeue a burst of processed crypto operations from a queue on the crypto
1865  * device. The dequeued operation are stored in *rte_crypto_op* structures
1866  * whose pointers are supplied in the *ops* array.
1867  *
1868  * The rte_cryptodev_dequeue_burst() function returns the number of ops
1869  * actually dequeued, which is the number of *rte_crypto_op* data structures
1870  * effectively supplied into the *ops* array.
1871  *
1872  * A return value equal to *nb_ops* indicates that the queue contained
1873  * at least *nb_ops* operations, and this is likely to signify that other
1874  * processed operations remain in the devices output queue. Applications
1875  * implementing a "retrieve as many processed operations as possible" policy
1876  * can check this specific case and keep invoking the
1877  * rte_cryptodev_dequeue_burst() function until a value less than
1878  * *nb_ops* is returned.
1879  *
1880  * The rte_cryptodev_dequeue_burst() function does not provide any error
1881  * notification to avoid the corresponding overhead.
1882  *
1883  * @param	dev_id		The symmetric crypto device identifier
1884  * @param	qp_id		The index of the queue pair from which to
1885  *				retrieve processed packets. The value must be
1886  *				in the range [0, nb_queue_pair - 1] previously
1887  *				supplied to rte_cryptodev_configure().
1888  * @param	ops		The address of an array of pointers to
1889  *				*rte_crypto_op* structures that must be
1890  *				large enough to store *nb_ops* pointers in it.
1891  * @param	nb_ops		The maximum number of operations to dequeue.
1892  *
1893  * @return
1894  *   - The number of operations actually dequeued, which is the number
1895  *   of pointers to *rte_crypto_op* structures effectively supplied to the
1896  *   *ops* array.
1897  */
1898 static inline uint16_t
1899 rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
1900 		struct rte_crypto_op **ops, uint16_t nb_ops)
1901 {
1902 	const struct rte_crypto_fp_ops *fp_ops;
1903 	void *qp;
1904 
1905 	rte_cryptodev_trace_dequeue_burst(dev_id, qp_id, (void **)ops, nb_ops);
1906 
1907 	fp_ops = &rte_crypto_fp_ops[dev_id];
1908 	qp = fp_ops->qp.data[qp_id];
1909 
1910 	nb_ops = fp_ops->dequeue_burst(qp, ops, nb_ops);
1911 
1912 #ifdef RTE_CRYPTO_CALLBACKS
1913 	if (unlikely(fp_ops->qp.deq_cb != NULL)) {
1914 		struct rte_cryptodev_cb_rcu *list;
1915 		struct rte_cryptodev_cb *cb;
1916 
1917 		/* rte_memory_order_release memory order was used when the
1918 		 * call back was inserted into the list.
1919 		 * Since there is a clear dependency between loading
1920 		 * cb and cb->fn/cb->next, rte_memory_order_acquire memory order is
1921 		 * not required.
1922 		 */
1923 		list = &fp_ops->qp.deq_cb[qp_id];
1924 		rte_rcu_qsbr_thread_online(list->qsbr, 0);
1925 		cb = rte_atomic_load_explicit(&list->next, rte_memory_order_relaxed);
1926 
1927 		while (cb != NULL) {
1928 			nb_ops = cb->fn(dev_id, qp_id, ops, nb_ops,
1929 					cb->arg);
1930 			cb = cb->next;
1931 		};
1932 
1933 		rte_rcu_qsbr_thread_offline(list->qsbr, 0);
1934 	}
1935 #endif
1936 	return nb_ops;
1937 }
1938 
1939 /**
1940  * Enqueue a burst of operations for processing on a crypto device.
1941  *
1942  * The rte_cryptodev_enqueue_burst() function is invoked to place
1943  * crypto operations on the queue *qp_id* of the device designated by
1944  * its *dev_id*.
1945  *
1946  * The *nb_ops* parameter is the number of operations to process which are
1947  * supplied in the *ops* array of *rte_crypto_op* structures.
1948  *
1949  * The rte_cryptodev_enqueue_burst() function returns the number of
1950  * operations it actually enqueued for processing. A return value equal to
1951  * *nb_ops* means that all packets have been enqueued.
1952  *
1953  * @param	dev_id		The identifier of the device.
1954  * @param	qp_id		The index of the queue pair which packets are
1955  *				to be enqueued for processing. The value
1956  *				must be in the range [0, nb_queue_pairs - 1]
1957  *				previously supplied to
1958  *				 *rte_cryptodev_configure*.
1959  * @param	ops		The address of an array of *nb_ops* pointers
1960  *				to *rte_crypto_op* structures which contain
1961  *				the crypto operations to be processed.
1962  * @param	nb_ops		The number of operations to process.
1963  *
1964  * @return
1965  * The number of operations actually enqueued on the crypto device. The return
1966  * value can be less than the value of the *nb_ops* parameter when the
1967  * crypto devices queue is full or if invalid parameters are specified in
1968  * a *rte_crypto_op*.
1969  */
1970 static inline uint16_t
1971 rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
1972 		struct rte_crypto_op **ops, uint16_t nb_ops)
1973 {
1974 	const struct rte_crypto_fp_ops *fp_ops;
1975 	void *qp;
1976 
1977 	fp_ops = &rte_crypto_fp_ops[dev_id];
1978 	qp = fp_ops->qp.data[qp_id];
1979 #ifdef RTE_CRYPTO_CALLBACKS
1980 	if (unlikely(fp_ops->qp.enq_cb != NULL)) {
1981 		struct rte_cryptodev_cb_rcu *list;
1982 		struct rte_cryptodev_cb *cb;
1983 
1984 		/* rte_memory_order_release memory order was used when the
1985 		 * call back was inserted into the list.
1986 		 * Since there is a clear dependency between loading
1987 		 * cb and cb->fn/cb->next, rte_memory_order_acquire memory order is
1988 		 * not required.
1989 		 */
1990 		list = &fp_ops->qp.enq_cb[qp_id];
1991 		rte_rcu_qsbr_thread_online(list->qsbr, 0);
1992 		cb = rte_atomic_load_explicit(&list->next, rte_memory_order_relaxed);
1993 
1994 		while (cb != NULL) {
1995 			nb_ops = cb->fn(dev_id, qp_id, ops, nb_ops,
1996 					cb->arg);
1997 			cb = cb->next;
1998 		};
1999 
2000 		rte_rcu_qsbr_thread_offline(list->qsbr, 0);
2001 	}
2002 #endif
2003 
2004 	rte_cryptodev_trace_enqueue_burst(dev_id, qp_id, (void **)ops, nb_ops);
2005 	return fp_ops->enqueue_burst(qp, ops, nb_ops);
2006 }
2007 
2008 
2009 
2010 #ifdef __cplusplus
2011 }
2012 #endif
2013 
2014 #endif /* _RTE_CRYPTODEV_H_ */
2015