xref: /dpdk/lib/cryptodev/rte_cryptodev.h (revision c56185fc183fc0532d2f03aaf04bbf0989ea91a5)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020 Intel Corporation.
3  */
4 
5 #ifndef _RTE_CRYPTODEV_H_
6 #define _RTE_CRYPTODEV_H_
7 
8 /**
9  * @file rte_cryptodev.h
10  *
11  * RTE Cryptographic Device APIs
12  *
13  * Defines RTE Crypto Device APIs for the provisioning of cipher and
14  * authentication operations.
15  */
16 
17 #ifdef __cplusplus
18 extern "C" {
19 #endif
20 
21 #include <rte_compat.h>
22 #include "rte_kvargs.h"
23 #include "rte_crypto.h"
24 #include <rte_common.h>
25 #include <rte_rcu_qsbr.h>
26 
27 #include "rte_cryptodev_trace_fp.h"
28 
29 extern const char **rte_cyptodev_names;
30 
31 /* Logging Macros */
32 
33 #define CDEV_LOG_ERR(...) \
34 	RTE_LOG(ERR, CRYPTODEV, \
35 		RTE_FMT("%s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
36 			__func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,)))
37 
38 #define CDEV_LOG_INFO(...) \
39 	RTE_LOG(INFO, CRYPTODEV, \
40 		RTE_FMT(RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
41 			RTE_FMT_TAIL(__VA_ARGS__,)))
42 
43 #define CDEV_LOG_DEBUG(...) \
44 	RTE_LOG(DEBUG, CRYPTODEV, \
45 		RTE_FMT("%s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
46 			__func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,)))
47 
48 #define CDEV_PMD_TRACE(...) \
49 	RTE_LOG(DEBUG, CRYPTODEV, \
50 		RTE_FMT("[%s] %s: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
51 			dev, __func__, RTE_FMT_TAIL(__VA_ARGS__,)))
52 
53 /**
54  * A macro that points to an offset from the start
55  * of the crypto operation structure (rte_crypto_op)
56  *
57  * The returned pointer is cast to type t.
58  *
59  * @param c
60  *   The crypto operation.
61  * @param o
62  *   The offset from the start of the crypto operation.
63  * @param t
64  *   The type to cast the result into.
65  */
66 #define rte_crypto_op_ctod_offset(c, t, o)	\
67 	((t)((char *)(c) + (o)))
68 
69 /**
70  * A macro that returns the physical address that points
71  * to an offset from the start of the crypto operation
72  * (rte_crypto_op)
73  *
74  * @param c
75  *   The crypto operation.
76  * @param o
77  *   The offset from the start of the crypto operation
78  *   to calculate address from.
79  */
80 #define rte_crypto_op_ctophys_offset(c, o)	\
81 	(rte_iova_t)((c)->phys_addr + (o))
82 
83 /**
84  * Crypto parameters range description
85  */
86 struct rte_crypto_param_range {
87 	uint16_t min;	/**< minimum size */
88 	uint16_t max;	/**< maximum size */
89 	uint16_t increment;
90 	/**< if a range of sizes are supported,
91 	 * this parameter is used to indicate
92 	 * increments in byte size that are supported
93 	 * between the minimum and maximum
94 	 */
95 };
96 
97 /**
98  * Data-unit supported lengths of cipher algorithms.
99  * A bit can represent any set of data-unit sizes
100  * (single size, multiple size, range, etc).
101  */
102 #define RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_512_BYTES             RTE_BIT32(0)
103 #define RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_4096_BYTES            RTE_BIT32(1)
104 #define RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_1_MEGABYTES           RTE_BIT32(2)
105 
106 /**
107  * Symmetric Crypto Capability
108  */
109 struct rte_cryptodev_symmetric_capability {
110 	enum rte_crypto_sym_xform_type xform_type;
111 	/**< Transform type : Authentication / Cipher / AEAD */
112 	union {
113 		struct {
114 			enum rte_crypto_auth_algorithm algo;
115 			/**< authentication algorithm */
116 			uint16_t block_size;
117 			/**< algorithm block size */
118 			struct rte_crypto_param_range key_size;
119 			/**< auth key size range */
120 			struct rte_crypto_param_range digest_size;
121 			/**< digest size range */
122 			struct rte_crypto_param_range aad_size;
123 			/**< Additional authentication data size range */
124 			struct rte_crypto_param_range iv_size;
125 			/**< Initialisation vector data size range */
126 		} auth;
127 		/**< Symmetric Authentication transform capabilities */
128 		struct {
129 			enum rte_crypto_cipher_algorithm algo;
130 			/**< cipher algorithm */
131 			uint16_t block_size;
132 			/**< algorithm block size */
133 			struct rte_crypto_param_range key_size;
134 			/**< cipher key size range */
135 			struct rte_crypto_param_range iv_size;
136 			/**< Initialisation vector data size range */
137 			uint32_t dataunit_set;
138 			/**<
139 			 * Supported data-unit lengths:
140 			 * RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_* bits
141 			 * or 0 for lengths defined in the algorithm standard.
142 			 */
143 		} cipher;
144 		/**< Symmetric Cipher transform capabilities */
145 		struct {
146 			enum rte_crypto_aead_algorithm algo;
147 			/**< AEAD algorithm */
148 			uint16_t block_size;
149 			/**< algorithm block size */
150 			struct rte_crypto_param_range key_size;
151 			/**< AEAD key size range */
152 			struct rte_crypto_param_range digest_size;
153 			/**< digest size range */
154 			struct rte_crypto_param_range aad_size;
155 			/**< Additional authentication data size range */
156 			struct rte_crypto_param_range iv_size;
157 			/**< Initialisation vector data size range */
158 		} aead;
159 	};
160 };
161 
162 /**
163  * Asymmetric Xform Crypto Capability
164  */
165 struct rte_cryptodev_asymmetric_xform_capability {
166 	enum rte_crypto_asym_xform_type xform_type;
167 	/**< Transform type: RSA/MODEXP/DH/DSA/MODINV */
168 
169 	uint32_t op_types;
170 	/**<
171 	 * Bitmask for supported rte_crypto_asym_op_type or
172 	 * rte_crypto_asym_ke_type. Which enum is used is determined
173 	 * by the rte_crypto_asym_xform_type. For key exchange algorithms
174 	 * like Diffie-Hellman it is rte_crypto_asym_ke_type, for others
175 	 * it is rte_crypto_asym_op_type.
176 	 */
177 
178 	__extension__
179 	union {
180 		struct rte_crypto_param_range modlen;
181 		/**< Range of modulus length supported by modulus based xform.
182 		 * Value 0 mean implementation default
183 		 */
184 	};
185 };
186 
187 /**
188  * Asymmetric Crypto Capability
189  */
190 struct rte_cryptodev_asymmetric_capability {
191 	struct rte_cryptodev_asymmetric_xform_capability xform_capa;
192 };
193 
194 
195 /** Structure used to capture a capability of a crypto device */
196 struct rte_cryptodev_capabilities {
197 	enum rte_crypto_op_type op;
198 	/**< Operation type */
199 
200 	union {
201 		struct rte_cryptodev_symmetric_capability sym;
202 		/**< Symmetric operation capability parameters */
203 		struct rte_cryptodev_asymmetric_capability asym;
204 		/**< Asymmetric operation capability parameters */
205 	};
206 };
207 
208 /** Structure used to describe crypto algorithms */
209 struct rte_cryptodev_sym_capability_idx {
210 	enum rte_crypto_sym_xform_type type;
211 	union {
212 		enum rte_crypto_cipher_algorithm cipher;
213 		enum rte_crypto_auth_algorithm auth;
214 		enum rte_crypto_aead_algorithm aead;
215 	} algo;
216 };
217 
218 /**
219  * Structure used to describe asymmetric crypto xforms
220  * Each xform maps to one asym algorithm.
221  */
222 struct rte_cryptodev_asym_capability_idx {
223 	enum rte_crypto_asym_xform_type type;
224 	/**< Asymmetric xform (algo) type */
225 };
226 
227 /**
228  * Provide capabilities available for defined device and algorithm
229  *
230  * @param	dev_id		The identifier of the device.
231  * @param	idx		Description of crypto algorithms.
232  *
233  * @return
234  *   - Return description of the symmetric crypto capability if exist.
235  *   - Return NULL if the capability not exist.
236  */
237 const struct rte_cryptodev_symmetric_capability *
238 rte_cryptodev_sym_capability_get(uint8_t dev_id,
239 		const struct rte_cryptodev_sym_capability_idx *idx);
240 
241 /**
242  *  Provide capabilities available for defined device and xform
243  *
244  * @param	dev_id		The identifier of the device.
245  * @param	idx		Description of asym crypto xform.
246  *
247  * @return
248  *   - Return description of the asymmetric crypto capability if exist.
249  *   - Return NULL if the capability not exist.
250  */
251 __rte_experimental
252 const struct rte_cryptodev_asymmetric_xform_capability *
253 rte_cryptodev_asym_capability_get(uint8_t dev_id,
254 		const struct rte_cryptodev_asym_capability_idx *idx);
255 
256 /**
257  * Check if key size and initial vector are supported
258  * in crypto cipher capability
259  *
260  * @param	capability	Description of the symmetric crypto capability.
261  * @param	key_size	Cipher key size.
262  * @param	iv_size		Cipher initial vector size.
263  *
264  * @return
265  *   - Return 0 if the parameters are in range of the capability.
266  *   - Return -1 if the parameters are out of range of the capability.
267  */
268 int
269 rte_cryptodev_sym_capability_check_cipher(
270 		const struct rte_cryptodev_symmetric_capability *capability,
271 		uint16_t key_size, uint16_t iv_size);
272 
273 /**
274  * Check if key size and initial vector are supported
275  * in crypto auth capability
276  *
277  * @param	capability	Description of the symmetric crypto capability.
278  * @param	key_size	Auth key size.
279  * @param	digest_size	Auth digest size.
280  * @param	iv_size		Auth initial vector size.
281  *
282  * @return
283  *   - Return 0 if the parameters are in range of the capability.
284  *   - Return -1 if the parameters are out of range of the capability.
285  */
286 int
287 rte_cryptodev_sym_capability_check_auth(
288 		const struct rte_cryptodev_symmetric_capability *capability,
289 		uint16_t key_size, uint16_t digest_size, uint16_t iv_size);
290 
291 /**
292  * Check if key, digest, AAD and initial vector sizes are supported
293  * in crypto AEAD capability
294  *
295  * @param	capability	Description of the symmetric crypto capability.
296  * @param	key_size	AEAD key size.
297  * @param	digest_size	AEAD digest size.
298  * @param	aad_size	AEAD AAD size.
299  * @param	iv_size		AEAD IV size.
300  *
301  * @return
302  *   - Return 0 if the parameters are in range of the capability.
303  *   - Return -1 if the parameters are out of range of the capability.
304  */
305 int
306 rte_cryptodev_sym_capability_check_aead(
307 		const struct rte_cryptodev_symmetric_capability *capability,
308 		uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
309 		uint16_t iv_size);
310 
311 /**
312  * Check if op type is supported
313  *
314  * @param	capability	Description of the asymmetric crypto capability.
315  * @param	op_type		op type
316  *
317  * @return
318  *   - Return 1 if the op type is supported
319  *   - Return 0 if unsupported
320  */
321 __rte_experimental
322 int
323 rte_cryptodev_asym_xform_capability_check_optype(
324 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
325 		enum rte_crypto_asym_op_type op_type);
326 
327 /**
328  * Check if modulus length is in supported range
329  *
330  * @param	capability	Description of the asymmetric crypto capability.
331  * @param	modlen		modulus length.
332  *
333  * @return
334  *   - Return 0 if the parameters are in range of the capability.
335  *   - Return -1 if the parameters are out of range of the capability.
336  */
337 __rte_experimental
338 int
339 rte_cryptodev_asym_xform_capability_check_modlen(
340 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
341 		uint16_t modlen);
342 
343 /**
344  * Provide the cipher algorithm enum, given an algorithm string
345  *
346  * @param	algo_enum	A pointer to the cipher algorithm
347  *				enum to be filled
348  * @param	algo_string	Authentication algo string
349  *
350  * @return
351  * - Return -1 if string is not valid
352  * - Return 0 is the string is valid
353  */
354 int
355 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum,
356 		const char *algo_string);
357 
358 /**
359  * Provide the authentication algorithm enum, given an algorithm string
360  *
361  * @param	algo_enum	A pointer to the authentication algorithm
362  *				enum to be filled
363  * @param	algo_string	Authentication algo string
364  *
365  * @return
366  * - Return -1 if string is not valid
367  * - Return 0 is the string is valid
368  */
369 int
370 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum,
371 		const char *algo_string);
372 
373 /**
374  * Provide the AEAD algorithm enum, given an algorithm string
375  *
376  * @param	algo_enum	A pointer to the AEAD algorithm
377  *				enum to be filled
378  * @param	algo_string	AEAD algorithm string
379  *
380  * @return
381  * - Return -1 if string is not valid
382  * - Return 0 is the string is valid
383  */
384 int
385 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum,
386 		const char *algo_string);
387 
388 /**
389  * Provide the Asymmetric xform enum, given an xform string
390  *
391  * @param	xform_enum	A pointer to the xform type
392  *				enum to be filled
393  * @param	xform_string	xform string
394  *
395  * @return
396  * - Return -1 if string is not valid
397  * - Return 0 if the string is valid
398  */
399 __rte_experimental
400 int
401 rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum,
402 		const char *xform_string);
403 
404 /**
405  * Provide the cipher algorithm string, given an algorithm enum.
406  *
407  * @param	algo_enum	cipher algorithm enum
408  *
409  * @return
410  * - Return NULL if enum is not valid
411  * - Return algo_string corresponding to enum
412  */
413 __rte_experimental
414 const char *
415 rte_cryptodev_get_cipher_algo_string(enum rte_crypto_cipher_algorithm algo_enum);
416 
417 /**
418  * Provide the authentication algorithm string, given an algorithm enum.
419  *
420  * @param	algo_enum	auth algorithm enum
421  *
422  * @return
423  * - Return NULL if enum is not valid
424  * - Return algo_string corresponding to enum
425  */
426 __rte_experimental
427 const char *
428 rte_cryptodev_get_auth_algo_string(enum rte_crypto_auth_algorithm algo_enum);
429 
430 /**
431  * Provide the AEAD algorithm string, given an algorithm enum.
432  *
433  * @param	algo_enum	AEAD algorithm enum
434  *
435  * @return
436  * - Return NULL if enum is not valid
437  * - Return algo_string corresponding to enum
438  */
439 __rte_experimental
440 const char *
441 rte_cryptodev_get_aead_algo_string(enum rte_crypto_aead_algorithm algo_enum);
442 
443 /**
444  * Provide the Asymmetric xform string, given an xform enum.
445  *
446  * @param	xform_enum	xform type enum
447  *
448  * @return
449  * - Return NULL, if enum is not valid.
450  * - Return xform string, for valid enum.
451  */
452 __rte_experimental
453 const char *
454 rte_cryptodev_asym_get_xform_string(enum rte_crypto_asym_xform_type xform_enum);
455 
456 
457 /** Macro used at end of crypto PMD list */
458 #define RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() \
459 	{ RTE_CRYPTO_OP_TYPE_UNDEFINED }
460 
461 
462 /**
463  * Crypto device supported feature flags
464  *
465  * Note:
466  * New features flags should be added to the end of the list
467  *
468  * Keep these flags synchronised with rte_cryptodev_get_feature_name()
469  */
470 #define	RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO		(1ULL << 0)
471 /**< Symmetric crypto operations are supported */
472 #define	RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO		(1ULL << 1)
473 /**< Asymmetric crypto operations are supported */
474 #define	RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING		(1ULL << 2)
475 /**< Chaining symmetric crypto operations are supported */
476 #define	RTE_CRYPTODEV_FF_CPU_SSE			(1ULL << 3)
477 /**< Utilises CPU SIMD SSE instructions */
478 #define	RTE_CRYPTODEV_FF_CPU_AVX			(1ULL << 4)
479 /**< Utilises CPU SIMD AVX instructions */
480 #define	RTE_CRYPTODEV_FF_CPU_AVX2			(1ULL << 5)
481 /**< Utilises CPU SIMD AVX2 instructions */
482 #define	RTE_CRYPTODEV_FF_CPU_AESNI			(1ULL << 6)
483 /**< Utilises CPU AES-NI instructions */
484 #define	RTE_CRYPTODEV_FF_HW_ACCELERATED			(1ULL << 7)
485 /**< Operations are off-loaded to an
486  * external hardware accelerator
487  */
488 #define	RTE_CRYPTODEV_FF_CPU_AVX512			(1ULL << 8)
489 /**< Utilises CPU SIMD AVX512 instructions */
490 #define	RTE_CRYPTODEV_FF_IN_PLACE_SGL			(1ULL << 9)
491 /**< In-place Scatter-gather (SGL) buffers, with multiple segments,
492  * are supported
493  */
494 #define RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT		(1ULL << 10)
495 /**< Out-of-place Scatter-gather (SGL) buffers are
496  * supported in input and output
497  */
498 #define RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT		(1ULL << 11)
499 /**< Out-of-place Scatter-gather (SGL) buffers are supported
500  * in input, combined with linear buffers (LB), with a
501  * single segment in output
502  */
503 #define RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT		(1ULL << 12)
504 /**< Out-of-place Scatter-gather (SGL) buffers are supported
505  * in output, combined with linear buffers (LB) in input
506  */
507 #define RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT		(1ULL << 13)
508 /**< Out-of-place linear buffers (LB) are supported in input and output */
509 #define	RTE_CRYPTODEV_FF_CPU_NEON			(1ULL << 14)
510 /**< Utilises CPU NEON instructions */
511 #define	RTE_CRYPTODEV_FF_CPU_ARM_CE			(1ULL << 15)
512 /**< Utilises ARM CPU Cryptographic Extensions */
513 #define	RTE_CRYPTODEV_FF_SECURITY			(1ULL << 16)
514 /**< Support Security Protocol Processing */
515 #define RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP		(1ULL << 17)
516 /**< Support RSA Private Key OP with exponent */
517 #define RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT		(1ULL << 18)
518 /**< Support RSA Private Key OP with CRT (quintuple) Keys */
519 #define RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED		(1ULL << 19)
520 /**< Support encrypted-digest operations where digest is appended to data */
521 #define RTE_CRYPTODEV_FF_ASYM_SESSIONLESS		(1ULL << 20)
522 /**< Support asymmetric session-less operations */
523 #define	RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO			(1ULL << 21)
524 /**< Support symmetric cpu-crypto processing */
525 #define RTE_CRYPTODEV_FF_SYM_SESSIONLESS		(1ULL << 22)
526 /**< Support symmetric session-less operations */
527 #define RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA		(1ULL << 23)
528 /**< Support operations on data which is not byte aligned */
529 #define RTE_CRYPTODEV_FF_SYM_RAW_DP			(1ULL << 24)
530 /**< Support accelerator specific symmetric raw data-path APIs */
531 #define RTE_CRYPTODEV_FF_CIPHER_MULTIPLE_DATA_UNITS	(1ULL << 25)
532 /**< Support operations on multiple data-units message */
533 #define RTE_CRYPTODEV_FF_CIPHER_WRAPPED_KEY		(1ULL << 26)
534 /**< Support wrapped key in cipher xform  */
535 #define RTE_CRYPTODEV_FF_SECURITY_INNER_CSUM		(1ULL << 27)
536 /**< Support inner checksum computation/verification */
537 
538 /**
539  * Get the name of a crypto device feature flag
540  *
541  * @param	flag	The mask describing the flag.
542  *
543  * @return
544  *   The name of this flag, or NULL if it's not a valid feature flag.
545  */
546 const char *
547 rte_cryptodev_get_feature_name(uint64_t flag);
548 
549 /**  Crypto device information */
550 /* Structure rte_cryptodev_info 8< */
551 struct rte_cryptodev_info {
552 	const char *driver_name;	/**< Driver name. */
553 	uint8_t driver_id;		/**< Driver identifier */
554 	struct rte_device *device;	/**< Generic device information. */
555 
556 	uint64_t feature_flags;
557 	/**< Feature flags exposes HW/SW features for the given device */
558 
559 	const struct rte_cryptodev_capabilities *capabilities;
560 	/**< Array of devices supported capabilities */
561 
562 	unsigned max_nb_queue_pairs;
563 	/**< Maximum number of queues pairs supported by device. */
564 
565 	uint16_t min_mbuf_headroom_req;
566 	/**< Minimum mbuf headroom required by device */
567 
568 	uint16_t min_mbuf_tailroom_req;
569 	/**< Minimum mbuf tailroom required by device */
570 
571 	struct {
572 		unsigned max_nb_sessions;
573 		/**< Maximum number of sessions supported by device.
574 		 * If 0, the device does not have any limitation in
575 		 * number of sessions that can be used.
576 		 */
577 	} sym;
578 };
579 /* >8 End of structure rte_cryptodev_info. */
580 
581 #define RTE_CRYPTODEV_DETACHED  (0)
582 #define RTE_CRYPTODEV_ATTACHED  (1)
583 
584 /** Definitions of Crypto device event types */
585 enum rte_cryptodev_event_type {
586 	RTE_CRYPTODEV_EVENT_UNKNOWN,	/**< unknown event type */
587 	RTE_CRYPTODEV_EVENT_ERROR,	/**< error interrupt event */
588 	RTE_CRYPTODEV_EVENT_MAX		/**< max value of this enum */
589 };
590 
591 /** Crypto device queue pair configuration structure. */
592 /* Structure rte_cryptodev_qp_conf 8<*/
593 struct rte_cryptodev_qp_conf {
594 	uint32_t nb_descriptors; /**< Number of descriptors per queue pair */
595 	struct rte_mempool *mp_session;
596 	/**< The mempool for creating session in sessionless mode */
597 };
598 /* >8 End of structure rte_cryptodev_qp_conf. */
599 
600 /**
601  * Function type used for processing crypto ops when enqueue/dequeue burst is
602  * called.
603  *
604  * The callback function is called on enqueue/dequeue burst immediately.
605  *
606  * @param	dev_id		The identifier of the device.
607  * @param	qp_id		The index of the queue pair on which ops are
608  *				enqueued/dequeued. The value must be in the
609  *				range [0, nb_queue_pairs - 1] previously
610  *				supplied to *rte_cryptodev_configure*.
611  * @param	ops		The address of an array of *nb_ops* pointers
612  *				to *rte_crypto_op* structures which contain
613  *				the crypto operations to be processed.
614  * @param	nb_ops		The number of operations to process.
615  * @param	user_param	The arbitrary user parameter passed in by the
616  *				application when the callback was originally
617  *				registered.
618  * @return			The number of ops to be enqueued to the
619  *				crypto device.
620  */
621 typedef uint16_t (*rte_cryptodev_callback_fn)(uint16_t dev_id, uint16_t qp_id,
622 		struct rte_crypto_op **ops, uint16_t nb_ops, void *user_param);
623 
624 /**
625  * Typedef for application callback function to be registered by application
626  * software for notification of device events
627  *
628  * @param	dev_id	Crypto device identifier
629  * @param	event	Crypto device event to register for notification of.
630  * @param	cb_arg	User specified parameter to be passed as to passed to
631  *			users callback function.
632  */
633 typedef void (*rte_cryptodev_cb_fn)(uint8_t dev_id,
634 		enum rte_cryptodev_event_type event, void *cb_arg);
635 
636 
637 /** Crypto Device statistics */
638 struct rte_cryptodev_stats {
639 	uint64_t enqueued_count;
640 	/**< Count of all operations enqueued */
641 	uint64_t dequeued_count;
642 	/**< Count of all operations dequeued */
643 
644 	uint64_t enqueue_err_count;
645 	/**< Total error count on operations enqueued */
646 	uint64_t dequeue_err_count;
647 	/**< Total error count on operations dequeued */
648 };
649 
650 #define RTE_CRYPTODEV_NAME_MAX_LEN	(64)
651 /**< Max length of name of crypto PMD */
652 
653 /**
654  * Get the device identifier for the named crypto device.
655  *
656  * @param	name	device name to select the device structure.
657  *
658  * @return
659  *   - Returns crypto device identifier on success.
660  *   - Return -1 on failure to find named crypto device.
661  */
662 int
663 rte_cryptodev_get_dev_id(const char *name);
664 
665 /**
666  * Get the crypto device name given a device identifier.
667  *
668  * @param dev_id
669  *   The identifier of the device
670  *
671  * @return
672  *   - Returns crypto device name.
673  *   - Returns NULL if crypto device is not present.
674  */
675 const char *
676 rte_cryptodev_name_get(uint8_t dev_id);
677 
678 /**
679  * Get the total number of crypto devices that have been successfully
680  * initialised.
681  *
682  * @return
683  *   - The total number of usable crypto devices.
684  */
685 uint8_t
686 rte_cryptodev_count(void);
687 
688 /**
689  * Get number of crypto device defined type.
690  *
691  * @param	driver_id	driver identifier.
692  *
693  * @return
694  *   Returns number of crypto device.
695  */
696 uint8_t
697 rte_cryptodev_device_count_by_driver(uint8_t driver_id);
698 
699 /**
700  * Get number and identifiers of attached crypto devices that
701  * use the same crypto driver.
702  *
703  * @param	driver_name	driver name.
704  * @param	devices		output devices identifiers.
705  * @param	nb_devices	maximal number of devices.
706  *
707  * @return
708  *   Returns number of attached crypto device.
709  */
710 uint8_t
711 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
712 		uint8_t nb_devices);
713 /*
714  * Return the NUMA socket to which a device is connected
715  *
716  * @param dev_id
717  *   The identifier of the device
718  * @return
719  *   The NUMA socket id to which the device is connected or
720  *   a default of zero if the socket could not be determined.
721  *   -1 if returned is the dev_id value is out of range.
722  */
723 int
724 rte_cryptodev_socket_id(uint8_t dev_id);
725 
726 /** Crypto device configuration structure */
727 /* Structure rte_cryptodev_config 8< */
728 struct rte_cryptodev_config {
729 	int socket_id;			/**< Socket to allocate resources on */
730 	uint16_t nb_queue_pairs;
731 	/**< Number of queue pairs to configure on device */
732 	uint64_t ff_disable;
733 	/**< Feature flags to be disabled. Only the following features are
734 	 * allowed to be disabled,
735 	 *  - RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO
736 	 *  - RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO
737 	 *  - RTE_CRYTPODEV_FF_SECURITY
738 	 */
739 };
740 /* >8 End of structure rte_cryptodev_config. */
741 
742 /**
743  * Configure a device.
744  *
745  * This function must be invoked first before any other function in the
746  * API. This function can also be re-invoked when a device is in the
747  * stopped state.
748  *
749  * @param	dev_id		The identifier of the device to configure.
750  * @param	config		The crypto device configuration structure.
751  *
752  * @return
753  *   - 0: Success, device configured.
754  *   - <0: Error code returned by the driver configuration function.
755  */
756 int
757 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config);
758 
759 /**
760  * Start an device.
761  *
762  * The device start step is the last one and consists of setting the configured
763  * offload features and in starting the transmit and the receive units of the
764  * device.
765  * On success, all basic functions exported by the API (link status,
766  * receive/transmit, and so on) can be invoked.
767  *
768  * @param dev_id
769  *   The identifier of the device.
770  * @return
771  *   - 0: Success, device started.
772  *   - <0: Error code of the driver device start function.
773  */
774 int
775 rte_cryptodev_start(uint8_t dev_id);
776 
777 /**
778  * Stop an device. The device can be restarted with a call to
779  * rte_cryptodev_start()
780  *
781  * @param	dev_id		The identifier of the device.
782  */
783 void
784 rte_cryptodev_stop(uint8_t dev_id);
785 
786 /**
787  * Close an device. The device cannot be restarted!
788  *
789  * @param	dev_id		The identifier of the device.
790  *
791  * @return
792  *  - 0 on successfully closing device
793  *  - <0 on failure to close device
794  */
795 int
796 rte_cryptodev_close(uint8_t dev_id);
797 
798 /**
799  * Allocate and set up a receive queue pair for a device.
800  *
801  *
802  * @param	dev_id		The identifier of the device.
803  * @param	queue_pair_id	The index of the queue pairs to set up. The
804  *				value must be in the range [0, nb_queue_pair
805  *				- 1] previously supplied to
806  *				rte_cryptodev_configure().
807  * @param	qp_conf		The pointer to the configuration data to be
808  *				used for the queue pair.
809  * @param	socket_id	The *socket_id* argument is the socket
810  *				identifier in case of NUMA. The value can be
811  *				*SOCKET_ID_ANY* if there is no NUMA constraint
812  *				for the DMA memory allocated for the receive
813  *				queue pair.
814  *
815  * @return
816  *   - 0: Success, queue pair correctly set up.
817  *   - <0: Queue pair configuration failed
818  */
819 int
820 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
821 		const struct rte_cryptodev_qp_conf *qp_conf, int socket_id);
822 
823 /**
824  * Get the status of queue pairs setup on a specific crypto device
825  *
826  * @param	dev_id		Crypto device identifier.
827  * @param	queue_pair_id	The index of the queue pairs to set up. The
828  *				value must be in the range [0, nb_queue_pair
829  *				- 1] previously supplied to
830  *				rte_cryptodev_configure().
831  * @return
832  *   - 0: qp was not configured
833  *	 - 1: qp was configured
834  *	 - -EINVAL: device was not configured
835  */
836 __rte_experimental
837 int
838 rte_cryptodev_get_qp_status(uint8_t dev_id, uint16_t queue_pair_id);
839 
840 /**
841  * Get the number of queue pairs on a specific crypto device
842  *
843  * @param	dev_id		Crypto device identifier.
844  * @return
845  *   - The number of configured queue pairs.
846  */
847 uint16_t
848 rte_cryptodev_queue_pair_count(uint8_t dev_id);
849 
850 
851 /**
852  * Retrieve the general I/O statistics of a device.
853  *
854  * @param	dev_id		The identifier of the device.
855  * @param	stats		A pointer to a structure of type
856  *				*rte_cryptodev_stats* to be filled with the
857  *				values of device counters.
858  * @return
859  *   - Zero if successful.
860  *   - Non-zero otherwise.
861  */
862 int
863 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats);
864 
865 /**
866  * Reset the general I/O statistics of a device.
867  *
868  * @param	dev_id		The identifier of the device.
869  */
870 void
871 rte_cryptodev_stats_reset(uint8_t dev_id);
872 
873 /**
874  * Retrieve the contextual information of a device.
875  *
876  * @param	dev_id		The identifier of the device.
877  * @param	dev_info	A pointer to a structure of type
878  *				*rte_cryptodev_info* to be filled with the
879  *				contextual information of the device.
880  *
881  * @note The capabilities field of dev_info is set to point to the first
882  * element of an array of struct rte_cryptodev_capabilities. The element after
883  * the last valid element has it's op field set to
884  * RTE_CRYPTO_OP_TYPE_UNDEFINED.
885  */
886 void
887 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info);
888 
889 
890 /**
891  * Register a callback function for specific device id.
892  *
893  * @param	dev_id		Device id.
894  * @param	event		Event interested.
895  * @param	cb_fn		User supplied callback function to be called.
896  * @param	cb_arg		Pointer to the parameters for the registered
897  *				callback.
898  *
899  * @return
900  *  - On success, zero.
901  *  - On failure, a negative value.
902  */
903 int
904 rte_cryptodev_callback_register(uint8_t dev_id,
905 		enum rte_cryptodev_event_type event,
906 		rte_cryptodev_cb_fn cb_fn, void *cb_arg);
907 
908 /**
909  * Unregister a callback function for specific device id.
910  *
911  * @param	dev_id		The device identifier.
912  * @param	event		Event interested.
913  * @param	cb_fn		User supplied callback function to be called.
914  * @param	cb_arg		Pointer to the parameters for the registered
915  *				callback.
916  *
917  * @return
918  *  - On success, zero.
919  *  - On failure, a negative value.
920  */
921 int
922 rte_cryptodev_callback_unregister(uint8_t dev_id,
923 		enum rte_cryptodev_event_type event,
924 		rte_cryptodev_cb_fn cb_fn, void *cb_arg);
925 
926 /**
927  * @warning
928  * @b EXPERIMENTAL: this API may change without prior notice.
929  *
930  * Query a cryptodev queue pair if there are pending RTE_CRYPTODEV_EVENT_ERROR
931  * events.
932  *
933  * @param          dev_id	The device identifier.
934  * @param          qp_id	Queue pair index to be queried.
935  *
936  * @return
937  *   - 1 if requested queue has a pending event.
938  *   - 0 if no pending event is found.
939  *   - a negative value on failure
940  */
941 __rte_experimental
942 int
943 rte_cryptodev_queue_pair_event_error_query(uint8_t dev_id, uint16_t qp_id);
944 
945 struct rte_cryptodev_callback;
946 
947 /** Structure to keep track of registered callbacks */
948 RTE_TAILQ_HEAD(rte_cryptodev_cb_list, rte_cryptodev_callback);
949 
950 /**
951  * Structure used to hold information about the callbacks to be called for a
952  * queue pair on enqueue/dequeue.
953  */
954 struct rte_cryptodev_cb {
955 	struct rte_cryptodev_cb *next;
956 	/**< Pointer to next callback */
957 	rte_cryptodev_callback_fn fn;
958 	/**< Pointer to callback function */
959 	void *arg;
960 	/**< Pointer to argument */
961 };
962 
963 /**
964  * @internal
965  * Structure used to hold information about the RCU for a queue pair.
966  */
967 struct rte_cryptodev_cb_rcu {
968 	struct rte_cryptodev_cb *next;
969 	/**< Pointer to next callback */
970 	struct rte_rcu_qsbr *qsbr;
971 	/**< RCU QSBR variable per queue pair */
972 };
973 
974 void *
975 rte_cryptodev_get_sec_ctx(uint8_t dev_id);
976 
977 /**
978  * Create a symmetric session mempool.
979  *
980  * @param name
981  *   The unique mempool name.
982  * @param nb_elts
983  *   The number of elements in the mempool.
984  * @param elt_size
985  *   The size of the element. This should be the size of the cryptodev PMD
986  *   session private data obtained through
987  *   rte_cryptodev_sym_get_private_session_size() function call.
988  *   For the user who wants to use the same mempool for heterogeneous PMDs
989  *   this value should be the maximum value of their private session sizes.
990  *   Please note the created mempool will have bigger elt size than this
991  *   value as necessary session header and the possible padding are filled
992  *   into each elt.
993  * @param cache_size
994  *   The number of per-lcore cache elements
995  * @param priv_size
996  *   The private data size of each session.
997  * @param socket_id
998  *   The *socket_id* argument is the socket identifier in the case of
999  *   NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
1000  *   constraint for the reserved zone.
1001  *
1002  * @return
1003  *  - On success returns the created session mempool pointer
1004  *  - On failure returns NULL
1005  */
1006 __rte_experimental
1007 struct rte_mempool *
1008 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
1009 	uint32_t elt_size, uint32_t cache_size, uint16_t priv_size,
1010 	int socket_id);
1011 
1012 
1013 /**
1014  * Create an asymmetric session mempool.
1015  *
1016  * @param name
1017  *   The unique mempool name.
1018  * @param nb_elts
1019  *   The number of elements in the mempool.
1020  * @param cache_size
1021  *   The number of per-lcore cache elements
1022  * @param user_data_size
1023  *   The size of user data to be placed after session private data.
1024  * @param socket_id
1025  *   The *socket_id* argument is the socket identifier in the case of
1026  *   NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
1027  *   constraint for the reserved zone.
1028  *
1029  * @return
1030  *  - On success return mempool
1031  *  - On failure returns NULL
1032  */
1033 __rte_experimental
1034 struct rte_mempool *
1035 rte_cryptodev_asym_session_pool_create(const char *name, uint32_t nb_elts,
1036 	uint32_t cache_size, uint16_t user_data_size, int socket_id);
1037 
1038 /**
1039  * Create symmetric crypto session and fill out private data for the device id,
1040  * based on its device type.
1041  *
1042  * @param   dev_id   ID of device that we want the session to be used on
1043  * @param   xforms   Symmetric crypto transform operations to apply on flow
1044  *                   processed with this session
1045  * @param   mp       Mempool to allocate symmetric session objects from
1046  *
1047  * @return
1048  *  - On success return pointer to sym-session.
1049  *  - On failure returns NULL and rte_errno is set to the error code:
1050  *    - EINVAL on invalid arguments.
1051  *    - ENOMEM on memory error for session allocation.
1052  *    - ENOTSUP if device doesn't support session configuration.
1053  */
1054 void *
1055 rte_cryptodev_sym_session_create(uint8_t dev_id,
1056 		struct rte_crypto_sym_xform *xforms,
1057 		struct rte_mempool *mp);
1058 /**
1059  * Create and initialise an asymmetric crypto session structure.
1060  * Calls the PMD to configure the private session data.
1061  *
1062  * @param   dev_id   ID of device that we want the session to be used on
1063  * @param   xforms   Asymmetric crypto transform operations to apply on flow
1064  *                   processed with this session
1065  * @param   mp       mempool to allocate asymmetric session
1066  *                   objects from
1067  * @param   session  void ** for session to be used
1068  *
1069  * @return
1070  *  - 0 on success.
1071  *  - -EINVAL on invalid arguments.
1072  *  - -ENOMEM on memory error for session allocation.
1073  *  - -ENOTSUP if device doesn't support session configuration.
1074  */
1075 __rte_experimental
1076 int
1077 rte_cryptodev_asym_session_create(uint8_t dev_id,
1078 		struct rte_crypto_asym_xform *xforms, struct rte_mempool *mp,
1079 		void **session);
1080 
1081 /**
1082  * Frees session for the device id and returning it to its mempool.
1083  * It is the application's responsibility to ensure that the session
1084  * is not still in-flight operations using it.
1085  *
1086  * @param   dev_id   ID of device that uses the session.
1087  * @param   sess     Session header to be freed.
1088  *
1089  * @return
1090  *  - 0 if successful.
1091  *  - -EINVAL if session is NULL or the mismatched device ids.
1092  */
1093 int
1094 rte_cryptodev_sym_session_free(uint8_t dev_id,
1095 	void *sess);
1096 
1097 /**
1098  * Clears and frees asymmetric crypto session header and private data,
1099  * returning it to its original mempool.
1100  *
1101  * @param   dev_id   ID of device that uses the asymmetric session.
1102  * @param   sess     Session header to be freed.
1103  *
1104  * @return
1105  *  - 0 if successful.
1106  *  - -EINVAL if device is invalid or session is NULL.
1107  */
1108 __rte_experimental
1109 int
1110 rte_cryptodev_asym_session_free(uint8_t dev_id, void *sess);
1111 
1112 /**
1113  * Get the size of the asymmetric session header.
1114  *
1115  * @return
1116  *   Size of the asymmetric header session.
1117  */
1118 __rte_experimental
1119 unsigned int
1120 rte_cryptodev_asym_get_header_session_size(void);
1121 
1122 /**
1123  * Get the size of the private symmetric session data
1124  * for a device.
1125  *
1126  * @param	dev_id		The device identifier.
1127  *
1128  * @return
1129  *   - Size of the private data, if successful
1130  *   - 0 if device is invalid or does not have private
1131  *   symmetric session
1132  */
1133 unsigned int
1134 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id);
1135 
1136 /**
1137  * Get the size of the private data for asymmetric session
1138  * on device
1139  *
1140  * @param	dev_id		The device identifier.
1141  *
1142  * @return
1143  *   - Size of the asymmetric private data, if successful
1144  *   - 0 if device is invalid or does not have private session
1145  */
1146 __rte_experimental
1147 unsigned int
1148 rte_cryptodev_asym_get_private_session_size(uint8_t dev_id);
1149 
1150 /**
1151  * Validate if the crypto device index is valid attached crypto device.
1152  *
1153  * @param	dev_id	Crypto device index.
1154  *
1155  * @return
1156  *   - If the device index is valid (1) or not (0).
1157  */
1158 unsigned int
1159 rte_cryptodev_is_valid_dev(uint8_t dev_id);
1160 
1161 /**
1162  * Provide driver identifier.
1163  *
1164  * @param name
1165  *   The pointer to a driver name.
1166  * @return
1167  *  The driver type identifier or -1 if no driver found
1168  */
1169 int rte_cryptodev_driver_id_get(const char *name);
1170 
1171 /**
1172  * Provide driver name.
1173  *
1174  * @param driver_id
1175  *   The driver identifier.
1176  * @return
1177  *  The driver name or null if no driver found
1178  */
1179 const char *rte_cryptodev_driver_name_get(uint8_t driver_id);
1180 
1181 /**
1182  * Store user data in a session.
1183  *
1184  * @param	sess		Session pointer allocated by
1185  *				*rte_cryptodev_sym_session_create*.
1186  * @param	data		Pointer to the user data.
1187  * @param	size		Size of the user data.
1188  *
1189  * @return
1190  *  - On success, zero.
1191  *  - On failure, a negative value.
1192  */
1193 __rte_experimental
1194 int
1195 rte_cryptodev_sym_session_set_user_data(void *sess,
1196 					void *data,
1197 					uint16_t size);
1198 
1199 #define CRYPTO_SESS_OPAQUE_DATA_OFF 0
1200 /**
1201  * Get opaque data from session handle
1202  */
1203 static inline uint64_t
1204 rte_cryptodev_sym_session_opaque_data_get(void *sess)
1205 {
1206 	return *((uint64_t *)sess + CRYPTO_SESS_OPAQUE_DATA_OFF);
1207 }
1208 
1209 /**
1210  * Set opaque data in session handle
1211  */
1212 static inline void
1213 rte_cryptodev_sym_session_opaque_data_set(void *sess, uint64_t opaque)
1214 {
1215 	uint64_t *data;
1216 	data = (((uint64_t *)sess) + CRYPTO_SESS_OPAQUE_DATA_OFF);
1217 	*data = opaque;
1218 }
1219 
1220 /**
1221  * Get user data stored in a session.
1222  *
1223  * @param	sess		Session pointer allocated by
1224  *				*rte_cryptodev_sym_session_create*.
1225  *
1226  * @return
1227  *  - On success return pointer to user data.
1228  *  - On failure returns NULL.
1229  */
1230 __rte_experimental
1231 void *
1232 rte_cryptodev_sym_session_get_user_data(void *sess);
1233 
1234 /**
1235  * Store user data in an asymmetric session.
1236  *
1237  * @param	sess		Session pointer allocated by
1238  *				*rte_cryptodev_asym_session_create*.
1239  * @param	data		Pointer to the user data.
1240  * @param	size		Size of the user data.
1241  *
1242  * @return
1243  *  - On success, zero.
1244  *  - -EINVAL if the session pointer is invalid.
1245  *  - -ENOMEM if the available user data size is smaller than the size parameter.
1246  */
1247 __rte_experimental
1248 int
1249 rte_cryptodev_asym_session_set_user_data(void *sess, void *data, uint16_t size);
1250 
1251 /**
1252  * Get user data stored in an asymmetric session.
1253  *
1254  * @param	sess		Session pointer allocated by
1255  *				*rte_cryptodev_asym_session_create*.
1256  *
1257  * @return
1258  *  - On success return pointer to user data.
1259  *  - On failure returns NULL.
1260  */
1261 __rte_experimental
1262 void *
1263 rte_cryptodev_asym_session_get_user_data(void *sess);
1264 
1265 /**
1266  * Perform actual crypto processing (encrypt/digest or auth/decrypt)
1267  * on user provided data.
1268  *
1269  * @param	dev_id	The device identifier.
1270  * @param	sess	Cryptodev session structure
1271  * @param	ofs	Start and stop offsets for auth and cipher operations
1272  * @param	vec	Vectorized operation descriptor
1273  *
1274  * @return
1275  *  - Returns number of successfully processed packets.
1276  */
1277 __rte_experimental
1278 uint32_t
1279 rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id,
1280 	void *sess, union rte_crypto_sym_ofs ofs,
1281 	struct rte_crypto_sym_vec *vec);
1282 
1283 /**
1284  * Get the size of the raw data-path context buffer.
1285  *
1286  * @param	dev_id		The device identifier.
1287  *
1288  * @return
1289  *   - If the device supports raw data-path APIs, return the context size.
1290  *   - If the device does not support the APIs, return -1.
1291  */
1292 __rte_experimental
1293 int
1294 rte_cryptodev_get_raw_dp_ctx_size(uint8_t dev_id);
1295 
1296 /**
1297  * Set session event meta data
1298  *
1299  * @param	dev_id		The device identifier.
1300  * @param	sess            Crypto or security session.
1301  * @param	op_type         Operation type.
1302  * @param	sess_type       Session type.
1303  * @param	ev_mdata	Pointer to the event crypto meta data
1304  *				(aka *union rte_event_crypto_metadata*)
1305  * @param	size            Size of ev_mdata.
1306  *
1307  * @return
1308  *  - On success, zero.
1309  *  - On failure, a negative value.
1310  */
1311 __rte_experimental
1312 int
1313 rte_cryptodev_session_event_mdata_set(uint8_t dev_id, void *sess,
1314 	enum rte_crypto_op_type op_type,
1315 	enum rte_crypto_op_sess_type sess_type,
1316 	void *ev_mdata, uint16_t size);
1317 
1318 /**
1319  * Union of different crypto session types, including session-less xform
1320  * pointer.
1321  */
1322 union rte_cryptodev_session_ctx {void *crypto_sess;
1323 	struct rte_crypto_sym_xform *xform;
1324 	struct rte_security_session *sec_sess;
1325 };
1326 
1327 /**
1328  * Enqueue a vectorized operation descriptor into the device queue but the
1329  * driver may or may not start processing until rte_cryptodev_raw_enqueue_done()
1330  * is called.
1331  *
1332  * @param	qp		Driver specific queue pair data.
1333  * @param	drv_ctx		Driver specific context data.
1334  * @param	vec		Vectorized operation descriptor.
1335  * @param	ofs		Start and stop offsets for auth and cipher
1336  *				operations.
1337  * @param	user_data	The array of user data for dequeue later.
1338  * @param	enqueue_status	Driver written value to specify the
1339  *				enqueue status. Possible values:
1340  *				- 1: The number of operations returned are
1341  *				     enqueued successfully.
1342  *				- 0: The number of operations returned are
1343  *				     cached into the queue but are not processed
1344  *				     until rte_cryptodev_raw_enqueue_done() is
1345  *				     called.
1346  *				- negative integer: Error occurred.
1347  * @return
1348  *   - The number of operations in the descriptor successfully enqueued or
1349  *     cached into the queue but not enqueued yet, depends on the
1350  *     "enqueue_status" value.
1351  */
1352 typedef uint32_t (*cryptodev_sym_raw_enqueue_burst_t)(
1353 	void *qp, uint8_t *drv_ctx, struct rte_crypto_sym_vec *vec,
1354 	union rte_crypto_sym_ofs ofs, void *user_data[], int *enqueue_status);
1355 
1356 /**
1357  * Enqueue single raw data vector into the device queue but the driver may or
1358  * may not start processing until rte_cryptodev_raw_enqueue_done() is called.
1359  *
1360  * @param	qp		Driver specific queue pair data.
1361  * @param	drv_ctx		Driver specific context data.
1362  * @param	data_vec	The buffer data vector.
1363  * @param	n_data_vecs	Number of buffer data vectors.
1364  * @param	ofs		Start and stop offsets for auth and cipher
1365  *				operations.
1366  * @param	iv		IV virtual and IOVA addresses
1367  * @param	digest		digest virtual and IOVA addresses
1368  * @param	aad_or_auth_iv	AAD or auth IV virtual and IOVA addresses,
1369  *				depends on the algorithm used.
1370  * @param	user_data	The user data.
1371  * @return
1372  *   - 1: The data vector is enqueued successfully.
1373  *   - 0: The data vector is cached into the queue but is not processed
1374  *        until rte_cryptodev_raw_enqueue_done() is called.
1375  *   - negative integer: failure.
1376  */
1377 typedef int (*cryptodev_sym_raw_enqueue_t)(
1378 	void *qp, uint8_t *drv_ctx, struct rte_crypto_vec *data_vec,
1379 	uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs,
1380 	struct rte_crypto_va_iova_ptr *iv,
1381 	struct rte_crypto_va_iova_ptr *digest,
1382 	struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
1383 	void *user_data);
1384 
1385 /**
1386  * Inform the cryptodev queue pair to start processing or finish dequeuing all
1387  * enqueued/dequeued operations.
1388  *
1389  * @param	qp		Driver specific queue pair data.
1390  * @param	drv_ctx		Driver specific context data.
1391  * @param	n		The total number of processed operations.
1392  * @return
1393  *   - On success return 0.
1394  *   - On failure return negative integer.
1395  */
1396 typedef int (*cryptodev_sym_raw_operation_done_t)(void *qp, uint8_t *drv_ctx,
1397 	uint32_t n);
1398 
1399 /**
1400  * Typedef that the user provided for the driver to get the dequeue count.
1401  * The function may return a fixed number or the number parsed from the user
1402  * data stored in the first processed operation.
1403  *
1404  * @param	user_data	Dequeued user data.
1405  * @return
1406  *  - The number of operations to be dequeued.
1407  */
1408 typedef uint32_t (*rte_cryptodev_raw_get_dequeue_count_t)(void *user_data);
1409 
1410 /**
1411  * Typedef that the user provided to deal with post dequeue operation, such
1412  * as filling status.
1413  *
1414  * @param	user_data	Dequeued user data.
1415  * @param	index		Index number of the processed descriptor.
1416  * @param	is_op_success	Operation status provided by the driver.
1417  */
1418 typedef void (*rte_cryptodev_raw_post_dequeue_t)(void *user_data,
1419 	uint32_t index, uint8_t is_op_success);
1420 
1421 /**
1422  * Dequeue a burst of symmetric crypto processing.
1423  *
1424  * @param	qp			Driver specific queue pair data.
1425  * @param	drv_ctx			Driver specific context data.
1426  * @param	get_dequeue_count	User provided callback function to
1427  *					obtain dequeue operation count.
1428  * @param	max_nb_to_dequeue	When get_dequeue_count is NULL this
1429  *					value is used to pass the maximum
1430  *					number of operations to be dequeued.
1431  * @param	post_dequeue		User provided callback function to
1432  *					post-process a dequeued operation.
1433  * @param	out_user_data		User data pointer array to be retrieve
1434  *					from device queue. In case of
1435  *					*is_user_data_array* is set there
1436  *					should be enough room to store all
1437  *					user data.
1438  * @param	is_user_data_array	Set 1 if every dequeued user data will
1439  *					be written into out_user_data array.
1440  *					Set 0 if only the first user data will
1441  *					be written into out_user_data array.
1442  * @param	n_success		Driver written value to specific the
1443  *					total successful operations count.
1444  * @param	dequeue_status		Driver written value to specify the
1445  *					dequeue status. Possible values:
1446  *					- 1: Successfully dequeued the number
1447  *					     of operations returned. The user
1448  *					     data previously set during enqueue
1449  *					     is stored in the "out_user_data".
1450  *					- 0: The number of operations returned
1451  *					     are completed and the user data is
1452  *					     stored in the "out_user_data", but
1453  *					     they are not freed from the queue
1454  *					     until
1455  *					     rte_cryptodev_raw_dequeue_done()
1456  *					     is called.
1457  *					- negative integer: Error occurred.
1458  * @return
1459  *   - The number of operations dequeued or completed but not freed from the
1460  *     queue, depends on "dequeue_status" value.
1461  */
1462 typedef uint32_t (*cryptodev_sym_raw_dequeue_burst_t)(void *qp,
1463 	uint8_t *drv_ctx,
1464 	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
1465 	uint32_t max_nb_to_dequeue,
1466 	rte_cryptodev_raw_post_dequeue_t post_dequeue,
1467 	void **out_user_data, uint8_t is_user_data_array,
1468 	uint32_t *n_success, int *dequeue_status);
1469 
1470 /**
1471  * Dequeue a symmetric crypto processing.
1472  *
1473  * @param	qp			Driver specific queue pair data.
1474  * @param	drv_ctx			Driver specific context data.
1475  * @param	dequeue_status		Driver written value to specify the
1476  *					dequeue status. Possible values:
1477  *					- 1: Successfully dequeued a operation.
1478  *					     The user data is returned.
1479  *					- 0: The first operation in the queue
1480  *					     is completed and the user data
1481  *					     previously set during enqueue is
1482  *					     returned, but it is not freed from
1483  *					     the queue until
1484  *					     rte_cryptodev_raw_dequeue_done() is
1485  *					     called.
1486  *					- negative integer: Error occurred.
1487  * @param	op_status		Driver written value to specify
1488  *					operation status.
1489  * @return
1490  *   - The user data pointer retrieved from device queue or NULL if no
1491  *     operation is ready for dequeue.
1492  */
1493 typedef void * (*cryptodev_sym_raw_dequeue_t)(
1494 		void *qp, uint8_t *drv_ctx, int *dequeue_status,
1495 		enum rte_crypto_op_status *op_status);
1496 
1497 /**
1498  * Context data for raw data-path API crypto process. The buffer of this
1499  * structure is to be allocated by the user application with the size equal
1500  * or bigger than rte_cryptodev_get_raw_dp_ctx_size() returned value.
1501  */
1502 struct rte_crypto_raw_dp_ctx {
1503 	void *qp_data;
1504 
1505 	cryptodev_sym_raw_enqueue_t enqueue;
1506 	cryptodev_sym_raw_enqueue_burst_t enqueue_burst;
1507 	cryptodev_sym_raw_operation_done_t enqueue_done;
1508 	cryptodev_sym_raw_dequeue_t dequeue;
1509 	cryptodev_sym_raw_dequeue_burst_t dequeue_burst;
1510 	cryptodev_sym_raw_operation_done_t dequeue_done;
1511 
1512 	/* Driver specific context data */
1513 	__extension__ uint8_t drv_ctx_data[];
1514 };
1515 
1516 /**
1517  * Configure raw data-path context data.
1518  *
1519  * @param	dev_id		The device identifier.
1520  * @param	qp_id		The index of the queue pair from which to
1521  *				retrieve processed packets. The value must be
1522  *				in the range [0, nb_queue_pair - 1] previously
1523  *				supplied to rte_cryptodev_configure().
1524  * @param	ctx		The raw data-path context data.
1525  * @param	sess_type	Session type.
1526  * @param	session_ctx	Session context data.
1527  * @param	is_update	Set 0 if it is to initialize the ctx.
1528  *				Set 1 if ctx is initialized and only to update
1529  *				session context data.
1530  * @return
1531  *   - On success return 0.
1532  *   - On failure return negative integer.
1533  *     - -EINVAL if input parameters are invalid.
1534  *     - -ENOTSUP if crypto device does not support raw DP operations with the
1535  *        provided session.
1536  */
1537 __rte_experimental
1538 int
1539 rte_cryptodev_configure_raw_dp_ctx(uint8_t dev_id, uint16_t qp_id,
1540 	struct rte_crypto_raw_dp_ctx *ctx,
1541 	enum rte_crypto_op_sess_type sess_type,
1542 	union rte_cryptodev_session_ctx session_ctx,
1543 	uint8_t is_update);
1544 
1545 /**
1546  * Enqueue a vectorized operation descriptor into the device queue but the
1547  * driver may or may not start processing until rte_cryptodev_raw_enqueue_done()
1548  * is called.
1549  *
1550  * @param	ctx		The initialized raw data-path context data.
1551  * @param	vec		Vectorized operation descriptor.
1552  * @param	ofs		Start and stop offsets for auth and cipher
1553  *				operations.
1554  * @param	user_data	The array of user data for dequeue later.
1555  * @param	enqueue_status	Driver written value to specify the
1556  *				enqueue status. Possible values:
1557  *				- 1: The number of operations returned are
1558  *				     enqueued successfully.
1559  *				- 0: The number of operations returned are
1560  *				     cached into the queue but are not processed
1561  *				     until rte_cryptodev_raw_enqueue_done() is
1562  *				     called.
1563  *				- negative integer: Error occurred.
1564  * @return
1565  *   - The number of operations in the descriptor successfully enqueued or
1566  *     cached into the queue but not enqueued yet, depends on the
1567  *     "enqueue_status" value.
1568  */
1569 __rte_experimental
1570 uint32_t
1571 rte_cryptodev_raw_enqueue_burst(struct rte_crypto_raw_dp_ctx *ctx,
1572 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
1573 	void **user_data, int *enqueue_status);
1574 
1575 /**
1576  * Enqueue single raw data vector into the device queue but the driver may or
1577  * may not start processing until rte_cryptodev_raw_enqueue_done() is called.
1578  *
1579  * @param	ctx		The initialized raw data-path context data.
1580  * @param	data_vec	The buffer data vector.
1581  * @param	n_data_vecs	Number of buffer data vectors.
1582  * @param	ofs		Start and stop offsets for auth and cipher
1583  *				operations.
1584  * @param	iv		IV virtual and IOVA addresses
1585  * @param	digest		digest virtual and IOVA addresses
1586  * @param	aad_or_auth_iv	AAD or auth IV virtual and IOVA addresses,
1587  *				depends on the algorithm used.
1588  * @param	user_data	The user data.
1589  * @return
1590  *   - 1: The data vector is enqueued successfully.
1591  *   - 0: The data vector is cached into the queue but is not processed
1592  *        until rte_cryptodev_raw_enqueue_done() is called.
1593  *   - negative integer: failure.
1594  */
1595 __rte_experimental
1596 static __rte_always_inline int
1597 rte_cryptodev_raw_enqueue(struct rte_crypto_raw_dp_ctx *ctx,
1598 	struct rte_crypto_vec *data_vec, uint16_t n_data_vecs,
1599 	union rte_crypto_sym_ofs ofs,
1600 	struct rte_crypto_va_iova_ptr *iv,
1601 	struct rte_crypto_va_iova_ptr *digest,
1602 	struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
1603 	void *user_data)
1604 {
1605 	return (*ctx->enqueue)(ctx->qp_data, ctx->drv_ctx_data, data_vec,
1606 		n_data_vecs, ofs, iv, digest, aad_or_auth_iv, user_data);
1607 }
1608 
1609 /**
1610  * Start processing all enqueued operations from last
1611  * rte_cryptodev_configure_raw_dp_ctx() call.
1612  *
1613  * @param	ctx	The initialized raw data-path context data.
1614  * @param	n	The number of operations cached.
1615  * @return
1616  *   - On success return 0.
1617  *   - On failure return negative integer.
1618  */
1619 __rte_experimental
1620 int
1621 rte_cryptodev_raw_enqueue_done(struct rte_crypto_raw_dp_ctx *ctx,
1622 		uint32_t n);
1623 
1624 /**
1625  * Dequeue a burst of symmetric crypto processing.
1626  *
1627  * @param	ctx			The initialized raw data-path context
1628  *					data.
1629  * @param	get_dequeue_count	User provided callback function to
1630  *					obtain dequeue operation count.
1631  * @param	max_nb_to_dequeue	When get_dequeue_count is NULL this
1632  *					value is used to pass the maximum
1633  *					number of operations to be dequeued.
1634  * @param	post_dequeue		User provided callback function to
1635  *					post-process a dequeued operation.
1636  * @param	out_user_data		User data pointer array to be retrieve
1637  *					from device queue. In case of
1638  *					*is_user_data_array* is set there
1639  *					should be enough room to store all
1640  *					user data.
1641  * @param	is_user_data_array	Set 1 if every dequeued user data will
1642  *					be written into out_user_data array.
1643  *					Set 0 if only the first user data will
1644  *					be written into out_user_data array.
1645  * @param	n_success		Driver written value to specific the
1646  *					total successful operations count.
1647  * @param	dequeue_status		Driver written value to specify the
1648  *					dequeue status. Possible values:
1649  *					- 1: Successfully dequeued the number
1650  *					     of operations returned. The user
1651  *					     data previously set during enqueue
1652  *					     is stored in the "out_user_data".
1653  *					- 0: The number of operations returned
1654  *					     are completed and the user data is
1655  *					     stored in the "out_user_data", but
1656  *					     they are not freed from the queue
1657  *					     until
1658  *					     rte_cryptodev_raw_dequeue_done()
1659  *					     is called.
1660  *					- negative integer: Error occurred.
1661  * @return
1662  *   - The number of operations dequeued or completed but not freed from the
1663  *     queue, depends on "dequeue_status" value.
1664  */
1665 __rte_experimental
1666 uint32_t
1667 rte_cryptodev_raw_dequeue_burst(struct rte_crypto_raw_dp_ctx *ctx,
1668 	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
1669 	uint32_t max_nb_to_dequeue,
1670 	rte_cryptodev_raw_post_dequeue_t post_dequeue,
1671 	void **out_user_data, uint8_t is_user_data_array,
1672 	uint32_t *n_success, int *dequeue_status);
1673 
1674 /**
1675  * Dequeue a symmetric crypto processing.
1676  *
1677  * @param	ctx			The initialized raw data-path context
1678  *					data.
1679  * @param	dequeue_status		Driver written value to specify the
1680  *					dequeue status. Possible values:
1681  *					- 1: Successfully dequeued a operation.
1682  *					     The user data is returned.
1683  *					- 0: The first operation in the queue
1684  *					     is completed and the user data
1685  *					     previously set during enqueue is
1686  *					     returned, but it is not freed from
1687  *					     the queue until
1688  *					     rte_cryptodev_raw_dequeue_done() is
1689  *					     called.
1690  *					- negative integer: Error occurred.
1691  * @param	op_status		Driver written value to specify
1692  *					operation status.
1693  * @return
1694  *   - The user data pointer retrieved from device queue or NULL if no
1695  *     operation is ready for dequeue.
1696  */
1697 __rte_experimental
1698 static __rte_always_inline void *
1699 rte_cryptodev_raw_dequeue(struct rte_crypto_raw_dp_ctx *ctx,
1700 		int *dequeue_status, enum rte_crypto_op_status *op_status)
1701 {
1702 	return (*ctx->dequeue)(ctx->qp_data, ctx->drv_ctx_data, dequeue_status,
1703 			op_status);
1704 }
1705 
1706 /**
1707  * Inform the queue pair dequeue operations is finished.
1708  *
1709  * @param	ctx	The initialized raw data-path context data.
1710  * @param	n	The number of operations.
1711  * @return
1712  *   - On success return 0.
1713  *   - On failure return negative integer.
1714  */
1715 __rte_experimental
1716 int
1717 rte_cryptodev_raw_dequeue_done(struct rte_crypto_raw_dp_ctx *ctx,
1718 		uint32_t n);
1719 
1720 /**
1721  * Add a user callback for a given crypto device and queue pair which will be
1722  * called on crypto ops enqueue.
1723  *
1724  * This API configures a function to be called for each burst of crypto ops
1725  * received on a given crypto device queue pair. The return value is a pointer
1726  * that can be used later to remove the callback using
1727  * rte_cryptodev_remove_enq_callback().
1728  *
1729  * Callbacks registered by application would not survive
1730  * rte_cryptodev_configure() as it reinitializes the callback list.
1731  * It is user responsibility to remove all installed callbacks before
1732  * calling rte_cryptodev_configure() to avoid possible memory leakage.
1733  * Application is expected to call add API after rte_cryptodev_configure().
1734  *
1735  * Multiple functions can be registered per queue pair & they are called
1736  * in the order they were added. The API does not restrict on maximum number
1737  * of callbacks.
1738  *
1739  * @param	dev_id		The identifier of the device.
1740  * @param	qp_id		The index of the queue pair on which ops are
1741  *				to be enqueued for processing. The value
1742  *				must be in the range [0, nb_queue_pairs - 1]
1743  *				previously supplied to
1744  *				*rte_cryptodev_configure*.
1745  * @param	cb_fn		The callback function
1746  * @param	cb_arg		A generic pointer parameter which will be passed
1747  *				to each invocation of the callback function on
1748  *				this crypto device and queue pair.
1749  *
1750  * @return
1751  *  - NULL on error & rte_errno will contain the error code.
1752  *  - On success, a pointer value which can later be used to remove the
1753  *    callback.
1754  */
1755 
1756 __rte_experimental
1757 struct rte_cryptodev_cb *
1758 rte_cryptodev_add_enq_callback(uint8_t dev_id,
1759 			       uint16_t qp_id,
1760 			       rte_cryptodev_callback_fn cb_fn,
1761 			       void *cb_arg);
1762 
1763 /**
1764  * Remove a user callback function for given crypto device and queue pair.
1765  *
1766  * This function is used to remove enqueue callbacks that were added to a
1767  * crypto device queue pair using rte_cryptodev_add_enq_callback().
1768  *
1769  *
1770  *
1771  * @param	dev_id		The identifier of the device.
1772  * @param	qp_id		The index of the queue pair on which ops are
1773  *				to be enqueued. The value must be in the
1774  *				range [0, nb_queue_pairs - 1] previously
1775  *				supplied to *rte_cryptodev_configure*.
1776  * @param	cb		Pointer to user supplied callback created via
1777  *				rte_cryptodev_add_enq_callback().
1778  *
1779  * @return
1780  *   -  0: Success. Callback was removed.
1781  *   - <0: The dev_id or the qp_id is out of range, or the callback
1782  *         is NULL or not found for the crypto device queue pair.
1783  */
1784 
1785 __rte_experimental
1786 int rte_cryptodev_remove_enq_callback(uint8_t dev_id,
1787 				      uint16_t qp_id,
1788 				      struct rte_cryptodev_cb *cb);
1789 
1790 /**
1791  * Add a user callback for a given crypto device and queue pair which will be
1792  * called on crypto ops dequeue.
1793  *
1794  * This API configures a function to be called for each burst of crypto ops
1795  * received on a given crypto device queue pair. The return value is a pointer
1796  * that can be used later to remove the callback using
1797  * rte_cryptodev_remove_deq_callback().
1798  *
1799  * Callbacks registered by application would not survive
1800  * rte_cryptodev_configure() as it reinitializes the callback list.
1801  * It is user responsibility to remove all installed callbacks before
1802  * calling rte_cryptodev_configure() to avoid possible memory leakage.
1803  * Application is expected to call add API after rte_cryptodev_configure().
1804  *
1805  * Multiple functions can be registered per queue pair & they are called
1806  * in the order they were added. The API does not restrict on maximum number
1807  * of callbacks.
1808  *
1809  * @param	dev_id		The identifier of the device.
1810  * @param	qp_id		The index of the queue pair on which ops are
1811  *				to be dequeued. The value must be in the
1812  *				range [0, nb_queue_pairs - 1] previously
1813  *				supplied to *rte_cryptodev_configure*.
1814  * @param	cb_fn		The callback function
1815  * @param	cb_arg		A generic pointer parameter which will be passed
1816  *				to each invocation of the callback function on
1817  *				this crypto device and queue pair.
1818  *
1819  * @return
1820  *   - NULL on error & rte_errno will contain the error code.
1821  *   - On success, a pointer value which can later be used to remove the
1822  *     callback.
1823  */
1824 
1825 __rte_experimental
1826 struct rte_cryptodev_cb *
1827 rte_cryptodev_add_deq_callback(uint8_t dev_id,
1828 			       uint16_t qp_id,
1829 			       rte_cryptodev_callback_fn cb_fn,
1830 			       void *cb_arg);
1831 
1832 /**
1833  * Remove a user callback function for given crypto device and queue pair.
1834  *
1835  * This function is used to remove dequeue callbacks that were added to a
1836  * crypto device queue pair using rte_cryptodev_add_deq_callback().
1837  *
1838  *
1839  *
1840  * @param	dev_id		The identifier of the device.
1841  * @param	qp_id		The index of the queue pair on which ops are
1842  *				to be dequeued. The value must be in the
1843  *				range [0, nb_queue_pairs - 1] previously
1844  *				supplied to *rte_cryptodev_configure*.
1845  * @param	cb		Pointer to user supplied callback created via
1846  *				rte_cryptodev_add_deq_callback().
1847  *
1848  * @return
1849  *   -  0: Success. Callback was removed.
1850  *   - <0: The dev_id or the qp_id is out of range, or the callback
1851  *         is NULL or not found for the crypto device queue pair.
1852  */
1853 __rte_experimental
1854 int rte_cryptodev_remove_deq_callback(uint8_t dev_id,
1855 				      uint16_t qp_id,
1856 				      struct rte_cryptodev_cb *cb);
1857 
1858 #include <rte_cryptodev_core.h>
1859 /**
1860  *
1861  * Dequeue a burst of processed crypto operations from a queue on the crypto
1862  * device. The dequeued operation are stored in *rte_crypto_op* structures
1863  * whose pointers are supplied in the *ops* array.
1864  *
1865  * The rte_cryptodev_dequeue_burst() function returns the number of ops
1866  * actually dequeued, which is the number of *rte_crypto_op* data structures
1867  * effectively supplied into the *ops* array.
1868  *
1869  * A return value equal to *nb_ops* indicates that the queue contained
1870  * at least *nb_ops* operations, and this is likely to signify that other
1871  * processed operations remain in the devices output queue. Applications
1872  * implementing a "retrieve as many processed operations as possible" policy
1873  * can check this specific case and keep invoking the
1874  * rte_cryptodev_dequeue_burst() function until a value less than
1875  * *nb_ops* is returned.
1876  *
1877  * The rte_cryptodev_dequeue_burst() function does not provide any error
1878  * notification to avoid the corresponding overhead.
1879  *
1880  * @param	dev_id		The symmetric crypto device identifier
1881  * @param	qp_id		The index of the queue pair from which to
1882  *				retrieve processed packets. The value must be
1883  *				in the range [0, nb_queue_pair - 1] previously
1884  *				supplied to rte_cryptodev_configure().
1885  * @param	ops		The address of an array of pointers to
1886  *				*rte_crypto_op* structures that must be
1887  *				large enough to store *nb_ops* pointers in it.
1888  * @param	nb_ops		The maximum number of operations to dequeue.
1889  *
1890  * @return
1891  *   - The number of operations actually dequeued, which is the number
1892  *   of pointers to *rte_crypto_op* structures effectively supplied to the
1893  *   *ops* array.
1894  */
1895 static inline uint16_t
1896 rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
1897 		struct rte_crypto_op **ops, uint16_t nb_ops)
1898 {
1899 	const struct rte_crypto_fp_ops *fp_ops;
1900 	void *qp;
1901 
1902 	rte_cryptodev_trace_dequeue_burst(dev_id, qp_id, (void **)ops, nb_ops);
1903 
1904 	fp_ops = &rte_crypto_fp_ops[dev_id];
1905 	qp = fp_ops->qp.data[qp_id];
1906 
1907 	nb_ops = fp_ops->dequeue_burst(qp, ops, nb_ops);
1908 
1909 #ifdef RTE_CRYPTO_CALLBACKS
1910 	if (unlikely(fp_ops->qp.deq_cb != NULL)) {
1911 		struct rte_cryptodev_cb_rcu *list;
1912 		struct rte_cryptodev_cb *cb;
1913 
1914 		/* __ATOMIC_RELEASE memory order was used when the
1915 		 * call back was inserted into the list.
1916 		 * Since there is a clear dependency between loading
1917 		 * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
1918 		 * not required.
1919 		 */
1920 		list = &fp_ops->qp.deq_cb[qp_id];
1921 		rte_rcu_qsbr_thread_online(list->qsbr, 0);
1922 		cb = __atomic_load_n(&list->next, __ATOMIC_RELAXED);
1923 
1924 		while (cb != NULL) {
1925 			nb_ops = cb->fn(dev_id, qp_id, ops, nb_ops,
1926 					cb->arg);
1927 			cb = cb->next;
1928 		};
1929 
1930 		rte_rcu_qsbr_thread_offline(list->qsbr, 0);
1931 	}
1932 #endif
1933 	return nb_ops;
1934 }
1935 
1936 /**
1937  * Enqueue a burst of operations for processing on a crypto device.
1938  *
1939  * The rte_cryptodev_enqueue_burst() function is invoked to place
1940  * crypto operations on the queue *qp_id* of the device designated by
1941  * its *dev_id*.
1942  *
1943  * The *nb_ops* parameter is the number of operations to process which are
1944  * supplied in the *ops* array of *rte_crypto_op* structures.
1945  *
1946  * The rte_cryptodev_enqueue_burst() function returns the number of
1947  * operations it actually enqueued for processing. A return value equal to
1948  * *nb_ops* means that all packets have been enqueued.
1949  *
1950  * @param	dev_id		The identifier of the device.
1951  * @param	qp_id		The index of the queue pair which packets are
1952  *				to be enqueued for processing. The value
1953  *				must be in the range [0, nb_queue_pairs - 1]
1954  *				previously supplied to
1955  *				 *rte_cryptodev_configure*.
1956  * @param	ops		The address of an array of *nb_ops* pointers
1957  *				to *rte_crypto_op* structures which contain
1958  *				the crypto operations to be processed.
1959  * @param	nb_ops		The number of operations to process.
1960  *
1961  * @return
1962  * The number of operations actually enqueued on the crypto device. The return
1963  * value can be less than the value of the *nb_ops* parameter when the
1964  * crypto devices queue is full or if invalid parameters are specified in
1965  * a *rte_crypto_op*.
1966  */
1967 static inline uint16_t
1968 rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
1969 		struct rte_crypto_op **ops, uint16_t nb_ops)
1970 {
1971 	const struct rte_crypto_fp_ops *fp_ops;
1972 	void *qp;
1973 
1974 	fp_ops = &rte_crypto_fp_ops[dev_id];
1975 	qp = fp_ops->qp.data[qp_id];
1976 #ifdef RTE_CRYPTO_CALLBACKS
1977 	if (unlikely(fp_ops->qp.enq_cb != NULL)) {
1978 		struct rte_cryptodev_cb_rcu *list;
1979 		struct rte_cryptodev_cb *cb;
1980 
1981 		/* __ATOMIC_RELEASE memory order was used when the
1982 		 * call back was inserted into the list.
1983 		 * Since there is a clear dependency between loading
1984 		 * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
1985 		 * not required.
1986 		 */
1987 		list = &fp_ops->qp.enq_cb[qp_id];
1988 		rte_rcu_qsbr_thread_online(list->qsbr, 0);
1989 		cb = __atomic_load_n(&list->next, __ATOMIC_RELAXED);
1990 
1991 		while (cb != NULL) {
1992 			nb_ops = cb->fn(dev_id, qp_id, ops, nb_ops,
1993 					cb->arg);
1994 			cb = cb->next;
1995 		};
1996 
1997 		rte_rcu_qsbr_thread_offline(list->qsbr, 0);
1998 	}
1999 #endif
2000 
2001 	rte_cryptodev_trace_enqueue_burst(dev_id, qp_id, (void **)ops, nb_ops);
2002 	return fp_ops->enqueue_burst(qp, ops, nb_ops);
2003 }
2004 
2005 
2006 
2007 #ifdef __cplusplus
2008 }
2009 #endif
2010 
2011 #endif /* _RTE_CRYPTODEV_H_ */
2012