xref: /dpdk/lib/cryptodev/rte_cryptodev.h (revision da7e701151ea8b742d4c38ace3e4fefd1b4507fc)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020 Intel Corporation.
3  */
4 
5 #ifndef _RTE_CRYPTODEV_H_
6 #define _RTE_CRYPTODEV_H_
7 
8 /**
9  * @file rte_cryptodev.h
10  *
11  * RTE Cryptographic Device APIs
12  *
13  * Defines RTE Crypto Device APIs for the provisioning of cipher and
14  * authentication operations.
15  */
16 
17 #ifdef __cplusplus
18 extern "C" {
19 #endif
20 
21 #include <rte_compat.h>
22 #include "rte_kvargs.h"
23 #include "rte_crypto.h"
24 #include <rte_common.h>
25 #include <rte_rcu_qsbr.h>
26 
27 #include "rte_cryptodev_trace_fp.h"
28 
29 extern const char **rte_cyptodev_names;
30 
31 /* Logging Macros */
32 
33 #define CDEV_LOG_ERR(...) \
34 	RTE_LOG(ERR, CRYPTODEV, \
35 		RTE_FMT("%s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
36 			__func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,)))
37 
38 #define CDEV_LOG_INFO(...) \
39 	RTE_LOG(INFO, CRYPTODEV, \
40 		RTE_FMT(RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
41 			RTE_FMT_TAIL(__VA_ARGS__,)))
42 
43 #define CDEV_LOG_DEBUG(...) \
44 	RTE_LOG(DEBUG, CRYPTODEV, \
45 		RTE_FMT("%s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
46 			__func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,)))
47 
48 #define CDEV_PMD_TRACE(...) \
49 	RTE_LOG(DEBUG, CRYPTODEV, \
50 		RTE_FMT("[%s] %s: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
51 			dev, __func__, RTE_FMT_TAIL(__VA_ARGS__,)))
52 
53 /**
54  * A macro that points to an offset from the start
55  * of the crypto operation structure (rte_crypto_op)
56  *
57  * The returned pointer is cast to type t.
58  *
59  * @param c
60  *   The crypto operation.
61  * @param o
62  *   The offset from the start of the crypto operation.
63  * @param t
64  *   The type to cast the result into.
65  */
66 #define rte_crypto_op_ctod_offset(c, t, o)	\
67 	((t)((char *)(c) + (o)))
68 
69 /**
70  * A macro that returns the physical address that points
71  * to an offset from the start of the crypto operation
72  * (rte_crypto_op)
73  *
74  * @param c
75  *   The crypto operation.
76  * @param o
77  *   The offset from the start of the crypto operation
78  *   to calculate address from.
79  */
80 #define rte_crypto_op_ctophys_offset(c, o)	\
81 	(rte_iova_t)((c)->phys_addr + (o))
82 
83 /**
84  * Crypto parameters range description
85  */
86 struct rte_crypto_param_range {
87 	uint16_t min;	/**< minimum size */
88 	uint16_t max;	/**< maximum size */
89 	uint16_t increment;
90 	/**< if a range of sizes are supported,
91 	 * this parameter is used to indicate
92 	 * increments in byte size that are supported
93 	 * between the minimum and maximum
94 	 */
95 };
96 
97 /**
98  * Data-unit supported lengths of cipher algorithms.
99  * A bit can represent any set of data-unit sizes
100  * (single size, multiple size, range, etc).
101  */
102 #define RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_512_BYTES             RTE_BIT32(0)
103 #define RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_4096_BYTES            RTE_BIT32(1)
104 #define RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_1_MEGABYTES           RTE_BIT32(2)
105 
106 /**
107  * Symmetric Crypto Capability
108  */
109 struct rte_cryptodev_symmetric_capability {
110 	enum rte_crypto_sym_xform_type xform_type;
111 	/**< Transform type : Authentication / Cipher / AEAD */
112 	union {
113 		struct {
114 			enum rte_crypto_auth_algorithm algo;
115 			/**< authentication algorithm */
116 			uint16_t block_size;
117 			/**< algorithm block size */
118 			struct rte_crypto_param_range key_size;
119 			/**< auth key size range */
120 			struct rte_crypto_param_range digest_size;
121 			/**< digest size range */
122 			struct rte_crypto_param_range aad_size;
123 			/**< Additional authentication data size range */
124 			struct rte_crypto_param_range iv_size;
125 			/**< Initialisation vector data size range */
126 		} auth;
127 		/**< Symmetric Authentication transform capabilities */
128 		struct {
129 			enum rte_crypto_cipher_algorithm algo;
130 			/**< cipher algorithm */
131 			uint16_t block_size;
132 			/**< algorithm block size */
133 			struct rte_crypto_param_range key_size;
134 			/**< cipher key size range */
135 			struct rte_crypto_param_range iv_size;
136 			/**< Initialisation vector data size range */
137 			uint32_t dataunit_set;
138 			/**<
139 			 * Supported data-unit lengths:
140 			 * RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_* bits
141 			 * or 0 for lengths defined in the algorithm standard.
142 			 */
143 		} cipher;
144 		/**< Symmetric Cipher transform capabilities */
145 		struct {
146 			enum rte_crypto_aead_algorithm algo;
147 			/**< AEAD algorithm */
148 			uint16_t block_size;
149 			/**< algorithm block size */
150 			struct rte_crypto_param_range key_size;
151 			/**< AEAD key size range */
152 			struct rte_crypto_param_range digest_size;
153 			/**< digest size range */
154 			struct rte_crypto_param_range aad_size;
155 			/**< Additional authentication data size range */
156 			struct rte_crypto_param_range iv_size;
157 			/**< Initialisation vector data size range */
158 		} aead;
159 	};
160 };
161 
162 /**
163  * Asymmetric Xform Crypto Capability
164  */
165 struct rte_cryptodev_asymmetric_xform_capability {
166 	enum rte_crypto_asym_xform_type xform_type;
167 	/**< Transform type: RSA/MODEXP/DH/DSA/MODINV */
168 
169 	uint32_t op_types;
170 	/**<
171 	 * Bitmask for supported rte_crypto_asym_op_type or
172 	 * rte_crypto_asym_ke_type. Which enum is used is determined
173 	 * by the rte_crypto_asym_xform_type. For key exchange algorithms
174 	 * like Diffie-Hellman it is rte_crypto_asym_ke_type, for others
175 	 * it is rte_crypto_asym_op_type.
176 	 */
177 
178 	__extension__
179 	union {
180 		struct rte_crypto_param_range modlen;
181 		/**< Range of modulus length supported by modulus based xform.
182 		 * Value 0 mean implementation default
183 		 */
184 
185 		uint8_t internal_rng;
186 		/**< Availability of random number generator for Elliptic curve based xform.
187 		 * Value 0 means unavailable, and application should pass the required
188 		 * random value. Otherwise, PMD would internally compute the random number.
189 		 */
190 	};
191 
192 	uint64_t hash_algos;
193 	/**< Bitmask of hash algorithms supported for op_type. */
194 };
195 
196 /**
197  * Asymmetric Crypto Capability
198  */
199 struct rte_cryptodev_asymmetric_capability {
200 	struct rte_cryptodev_asymmetric_xform_capability xform_capa;
201 };
202 
203 
204 /** Structure used to capture a capability of a crypto device */
205 struct rte_cryptodev_capabilities {
206 	enum rte_crypto_op_type op;
207 	/**< Operation type */
208 
209 	union {
210 		struct rte_cryptodev_symmetric_capability sym;
211 		/**< Symmetric operation capability parameters */
212 		struct rte_cryptodev_asymmetric_capability asym;
213 		/**< Asymmetric operation capability parameters */
214 	};
215 };
216 
217 /** Structure used to describe crypto algorithms */
218 struct rte_cryptodev_sym_capability_idx {
219 	enum rte_crypto_sym_xform_type type;
220 	union {
221 		enum rte_crypto_cipher_algorithm cipher;
222 		enum rte_crypto_auth_algorithm auth;
223 		enum rte_crypto_aead_algorithm aead;
224 	} algo;
225 };
226 
227 /**
228  * Structure used to describe asymmetric crypto xforms
229  * Each xform maps to one asym algorithm.
230  */
231 struct rte_cryptodev_asym_capability_idx {
232 	enum rte_crypto_asym_xform_type type;
233 	/**< Asymmetric xform (algo) type */
234 };
235 
236 /**
237  * Provide capabilities available for defined device and algorithm
238  *
239  * @param	dev_id		The identifier of the device.
240  * @param	idx		Description of crypto algorithms.
241  *
242  * @return
243  *   - Return description of the symmetric crypto capability if exist.
244  *   - Return NULL if the capability not exist.
245  */
246 const struct rte_cryptodev_symmetric_capability *
247 rte_cryptodev_sym_capability_get(uint8_t dev_id,
248 		const struct rte_cryptodev_sym_capability_idx *idx);
249 
250 /**
251  *  Provide capabilities available for defined device and xform
252  *
253  * @param	dev_id		The identifier of the device.
254  * @param	idx		Description of asym crypto xform.
255  *
256  * @return
257  *   - Return description of the asymmetric crypto capability if exist.
258  *   - Return NULL if the capability not exist.
259  */
260 __rte_experimental
261 const struct rte_cryptodev_asymmetric_xform_capability *
262 rte_cryptodev_asym_capability_get(uint8_t dev_id,
263 		const struct rte_cryptodev_asym_capability_idx *idx);
264 
265 /**
266  * Check if key size and initial vector are supported
267  * in crypto cipher capability
268  *
269  * @param	capability	Description of the symmetric crypto capability.
270  * @param	key_size	Cipher key size.
271  * @param	iv_size		Cipher initial vector size.
272  *
273  * @return
274  *   - Return 0 if the parameters are in range of the capability.
275  *   - Return -1 if the parameters are out of range of the capability.
276  */
277 int
278 rte_cryptodev_sym_capability_check_cipher(
279 		const struct rte_cryptodev_symmetric_capability *capability,
280 		uint16_t key_size, uint16_t iv_size);
281 
282 /**
283  * Check if key size and initial vector are supported
284  * in crypto auth capability
285  *
286  * @param	capability	Description of the symmetric crypto capability.
287  * @param	key_size	Auth key size.
288  * @param	digest_size	Auth digest size.
289  * @param	iv_size		Auth initial vector size.
290  *
291  * @return
292  *   - Return 0 if the parameters are in range of the capability.
293  *   - Return -1 if the parameters are out of range of the capability.
294  */
295 int
296 rte_cryptodev_sym_capability_check_auth(
297 		const struct rte_cryptodev_symmetric_capability *capability,
298 		uint16_t key_size, uint16_t digest_size, uint16_t iv_size);
299 
300 /**
301  * Check if key, digest, AAD and initial vector sizes are supported
302  * in crypto AEAD capability
303  *
304  * @param	capability	Description of the symmetric crypto capability.
305  * @param	key_size	AEAD key size.
306  * @param	digest_size	AEAD digest size.
307  * @param	aad_size	AEAD AAD size.
308  * @param	iv_size		AEAD IV size.
309  *
310  * @return
311  *   - Return 0 if the parameters are in range of the capability.
312  *   - Return -1 if the parameters are out of range of the capability.
313  */
314 int
315 rte_cryptodev_sym_capability_check_aead(
316 		const struct rte_cryptodev_symmetric_capability *capability,
317 		uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
318 		uint16_t iv_size);
319 
320 /**
321  * Check if op type is supported
322  *
323  * @param	capability	Description of the asymmetric crypto capability.
324  * @param	op_type		op type
325  *
326  * @return
327  *   - Return 1 if the op type is supported
328  *   - Return 0 if unsupported
329  */
330 __rte_experimental
331 int
332 rte_cryptodev_asym_xform_capability_check_optype(
333 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
334 		enum rte_crypto_asym_op_type op_type);
335 
336 /**
337  * Check if modulus length is in supported range
338  *
339  * @param	capability	Description of the asymmetric crypto capability.
340  * @param	modlen		modulus length.
341  *
342  * @return
343  *   - Return 0 if the parameters are in range of the capability.
344  *   - Return -1 if the parameters are out of range of the capability.
345  */
346 __rte_experimental
347 int
348 rte_cryptodev_asym_xform_capability_check_modlen(
349 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
350 		uint16_t modlen);
351 
352 /**
353  * Check if hash algorithm is supported.
354  *
355  * @param	capability	Asymmetric crypto capability.
356  * @param	hash		Hash algorithm.
357  *
358  * @return
359  *   - Return true if the hash algorithm is supported.
360  *   - Return false if the hash algorithm is not supported.
361  */
362 __rte_experimental
363 bool
364 rte_cryptodev_asym_xform_capability_check_hash(
365 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
366 	enum rte_crypto_auth_algorithm hash);
367 
368 /**
369  * Provide the cipher algorithm enum, given an algorithm string
370  *
371  * @param	algo_enum	A pointer to the cipher algorithm
372  *				enum to be filled
373  * @param	algo_string	Authentication algo string
374  *
375  * @return
376  * - Return -1 if string is not valid
377  * - Return 0 is the string is valid
378  */
379 int
380 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum,
381 		const char *algo_string);
382 
383 /**
384  * Provide the authentication algorithm enum, given an algorithm string
385  *
386  * @param	algo_enum	A pointer to the authentication algorithm
387  *				enum to be filled
388  * @param	algo_string	Authentication algo string
389  *
390  * @return
391  * - Return -1 if string is not valid
392  * - Return 0 is the string is valid
393  */
394 int
395 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum,
396 		const char *algo_string);
397 
398 /**
399  * Provide the AEAD algorithm enum, given an algorithm string
400  *
401  * @param	algo_enum	A pointer to the AEAD algorithm
402  *				enum to be filled
403  * @param	algo_string	AEAD algorithm string
404  *
405  * @return
406  * - Return -1 if string is not valid
407  * - Return 0 is the string is valid
408  */
409 int
410 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum,
411 		const char *algo_string);
412 
413 /**
414  * Provide the Asymmetric xform enum, given an xform string
415  *
416  * @param	xform_enum	A pointer to the xform type
417  *				enum to be filled
418  * @param	xform_string	xform string
419  *
420  * @return
421  * - Return -1 if string is not valid
422  * - Return 0 if the string is valid
423  */
424 __rte_experimental
425 int
426 rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum,
427 		const char *xform_string);
428 
429 /**
430  * Provide the cipher algorithm string, given an algorithm enum.
431  *
432  * @param	algo_enum	cipher algorithm enum
433  *
434  * @return
435  * - Return NULL if enum is not valid
436  * - Return algo_string corresponding to enum
437  */
438 __rte_experimental
439 const char *
440 rte_cryptodev_get_cipher_algo_string(enum rte_crypto_cipher_algorithm algo_enum);
441 
442 /**
443  * Provide the authentication algorithm string, given an algorithm enum.
444  *
445  * @param	algo_enum	auth algorithm enum
446  *
447  * @return
448  * - Return NULL if enum is not valid
449  * - Return algo_string corresponding to enum
450  */
451 __rte_experimental
452 const char *
453 rte_cryptodev_get_auth_algo_string(enum rte_crypto_auth_algorithm algo_enum);
454 
455 /**
456  * Provide the AEAD algorithm string, given an algorithm enum.
457  *
458  * @param	algo_enum	AEAD algorithm enum
459  *
460  * @return
461  * - Return NULL if enum is not valid
462  * - Return algo_string corresponding to enum
463  */
464 __rte_experimental
465 const char *
466 rte_cryptodev_get_aead_algo_string(enum rte_crypto_aead_algorithm algo_enum);
467 
468 /**
469  * Provide the Asymmetric xform string, given an xform enum.
470  *
471  * @param	xform_enum	xform type enum
472  *
473  * @return
474  * - Return NULL, if enum is not valid.
475  * - Return xform string, for valid enum.
476  */
477 __rte_experimental
478 const char *
479 rte_cryptodev_asym_get_xform_string(enum rte_crypto_asym_xform_type xform_enum);
480 
481 
482 /** Macro used at end of crypto PMD list */
483 #define RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() \
484 	{ RTE_CRYPTO_OP_TYPE_UNDEFINED }
485 
486 
487 /**
488  * Crypto device supported feature flags
489  *
490  * Note:
491  * New features flags should be added to the end of the list
492  *
493  * Keep these flags synchronised with rte_cryptodev_get_feature_name()
494  */
495 #define	RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO		(1ULL << 0)
496 /**< Symmetric crypto operations are supported */
497 #define	RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO		(1ULL << 1)
498 /**< Asymmetric crypto operations are supported */
499 #define	RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING		(1ULL << 2)
500 /**< Chaining symmetric crypto operations are supported */
501 #define	RTE_CRYPTODEV_FF_CPU_SSE			(1ULL << 3)
502 /**< Utilises CPU SIMD SSE instructions */
503 #define	RTE_CRYPTODEV_FF_CPU_AVX			(1ULL << 4)
504 /**< Utilises CPU SIMD AVX instructions */
505 #define	RTE_CRYPTODEV_FF_CPU_AVX2			(1ULL << 5)
506 /**< Utilises CPU SIMD AVX2 instructions */
507 #define	RTE_CRYPTODEV_FF_CPU_AESNI			(1ULL << 6)
508 /**< Utilises CPU AES-NI instructions */
509 #define	RTE_CRYPTODEV_FF_HW_ACCELERATED			(1ULL << 7)
510 /**< Operations are off-loaded to an
511  * external hardware accelerator
512  */
513 #define	RTE_CRYPTODEV_FF_CPU_AVX512			(1ULL << 8)
514 /**< Utilises CPU SIMD AVX512 instructions */
515 #define	RTE_CRYPTODEV_FF_IN_PLACE_SGL			(1ULL << 9)
516 /**< In-place Scatter-gather (SGL) buffers, with multiple segments,
517  * are supported
518  */
519 #define RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT		(1ULL << 10)
520 /**< Out-of-place Scatter-gather (SGL) buffers are
521  * supported in input and output
522  */
523 #define RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT		(1ULL << 11)
524 /**< Out-of-place Scatter-gather (SGL) buffers are supported
525  * in input, combined with linear buffers (LB), with a
526  * single segment in output
527  */
528 #define RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT		(1ULL << 12)
529 /**< Out-of-place Scatter-gather (SGL) buffers are supported
530  * in output, combined with linear buffers (LB) in input
531  */
532 #define RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT		(1ULL << 13)
533 /**< Out-of-place linear buffers (LB) are supported in input and output */
534 #define	RTE_CRYPTODEV_FF_CPU_NEON			(1ULL << 14)
535 /**< Utilises CPU NEON instructions */
536 #define	RTE_CRYPTODEV_FF_CPU_ARM_CE			(1ULL << 15)
537 /**< Utilises ARM CPU Cryptographic Extensions */
538 #define	RTE_CRYPTODEV_FF_SECURITY			(1ULL << 16)
539 /**< Support Security Protocol Processing */
540 #define RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP		(1ULL << 17)
541 /**< Support RSA Private Key OP with exponent */
542 #define RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT		(1ULL << 18)
543 /**< Support RSA Private Key OP with CRT (quintuple) Keys */
544 #define RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED		(1ULL << 19)
545 /**< Support encrypted-digest operations where digest is appended to data */
546 #define RTE_CRYPTODEV_FF_ASYM_SESSIONLESS		(1ULL << 20)
547 /**< Support asymmetric session-less operations */
548 #define	RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO			(1ULL << 21)
549 /**< Support symmetric cpu-crypto processing */
550 #define RTE_CRYPTODEV_FF_SYM_SESSIONLESS		(1ULL << 22)
551 /**< Support symmetric session-less operations */
552 #define RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA		(1ULL << 23)
553 /**< Support operations on data which is not byte aligned */
554 #define RTE_CRYPTODEV_FF_SYM_RAW_DP			(1ULL << 24)
555 /**< Support accelerator specific symmetric raw data-path APIs */
556 #define RTE_CRYPTODEV_FF_CIPHER_MULTIPLE_DATA_UNITS	(1ULL << 25)
557 /**< Support operations on multiple data-units message */
558 #define RTE_CRYPTODEV_FF_CIPHER_WRAPPED_KEY		(1ULL << 26)
559 /**< Support wrapped key in cipher xform  */
560 #define RTE_CRYPTODEV_FF_SECURITY_INNER_CSUM		(1ULL << 27)
561 /**< Support inner checksum computation/verification */
562 #define RTE_CRYPTODEV_FF_SECURITY_RX_INJECT		(1ULL << 28)
563 /**< Support Rx injection after security processing */
564 
565 /**
566  * Get the name of a crypto device feature flag
567  *
568  * @param	flag	The mask describing the flag.
569  *
570  * @return
571  *   The name of this flag, or NULL if it's not a valid feature flag.
572  */
573 const char *
574 rte_cryptodev_get_feature_name(uint64_t flag);
575 
576 /**  Crypto device information */
577 /* Structure rte_cryptodev_info 8< */
578 struct rte_cryptodev_info {
579 	const char *driver_name;	/**< Driver name. */
580 	uint8_t driver_id;		/**< Driver identifier */
581 	struct rte_device *device;	/**< Generic device information. */
582 
583 	uint64_t feature_flags;
584 	/**< Feature flags exposes HW/SW features for the given device */
585 
586 	const struct rte_cryptodev_capabilities *capabilities;
587 	/**< Array of devices supported capabilities */
588 
589 	unsigned max_nb_queue_pairs;
590 	/**< Maximum number of queues pairs supported by device. */
591 
592 	uint16_t min_mbuf_headroom_req;
593 	/**< Minimum mbuf headroom required by device */
594 
595 	uint16_t min_mbuf_tailroom_req;
596 	/**< Minimum mbuf tailroom required by device */
597 
598 	struct {
599 		unsigned max_nb_sessions;
600 		/**< Maximum number of sessions supported by device.
601 		 * If 0, the device does not have any limitation in
602 		 * number of sessions that can be used.
603 		 */
604 	} sym;
605 };
606 /* >8 End of structure rte_cryptodev_info. */
607 
608 #define RTE_CRYPTODEV_DETACHED  (0)
609 #define RTE_CRYPTODEV_ATTACHED  (1)
610 
611 /** Definitions of Crypto device event types */
612 enum rte_cryptodev_event_type {
613 	RTE_CRYPTODEV_EVENT_UNKNOWN,	/**< unknown event type */
614 	RTE_CRYPTODEV_EVENT_ERROR,	/**< error interrupt event */
615 	RTE_CRYPTODEV_EVENT_MAX		/**< max value of this enum */
616 };
617 
618 /** Crypto device queue pair configuration structure. */
619 /* Structure rte_cryptodev_qp_conf 8<*/
620 struct rte_cryptodev_qp_conf {
621 	uint32_t nb_descriptors; /**< Number of descriptors per queue pair */
622 	struct rte_mempool *mp_session;
623 	/**< The mempool for creating session in sessionless mode */
624 };
625 /* >8 End of structure rte_cryptodev_qp_conf. */
626 
627 /**
628  * Function type used for processing crypto ops when enqueue/dequeue burst is
629  * called.
630  *
631  * The callback function is called on enqueue/dequeue burst immediately.
632  *
633  * @param	dev_id		The identifier of the device.
634  * @param	qp_id		The index of the queue pair on which ops are
635  *				enqueued/dequeued. The value must be in the
636  *				range [0, nb_queue_pairs - 1] previously
637  *				supplied to *rte_cryptodev_configure*.
638  * @param	ops		The address of an array of *nb_ops* pointers
639  *				to *rte_crypto_op* structures which contain
640  *				the crypto operations to be processed.
641  * @param	nb_ops		The number of operations to process.
642  * @param	user_param	The arbitrary user parameter passed in by the
643  *				application when the callback was originally
644  *				registered.
645  * @return			The number of ops to be enqueued to the
646  *				crypto device.
647  */
648 typedef uint16_t (*rte_cryptodev_callback_fn)(uint16_t dev_id, uint16_t qp_id,
649 		struct rte_crypto_op **ops, uint16_t nb_ops, void *user_param);
650 
651 /**
652  * Typedef for application callback function to be registered by application
653  * software for notification of device events
654  *
655  * @param	dev_id	Crypto device identifier
656  * @param	event	Crypto device event to register for notification of.
657  * @param	cb_arg	User specified parameter to be passed as to passed to
658  *			users callback function.
659  */
660 typedef void (*rte_cryptodev_cb_fn)(uint8_t dev_id,
661 		enum rte_cryptodev_event_type event, void *cb_arg);
662 
663 
664 /** Crypto Device statistics */
665 struct rte_cryptodev_stats {
666 	uint64_t enqueued_count;
667 	/**< Count of all operations enqueued */
668 	uint64_t dequeued_count;
669 	/**< Count of all operations dequeued */
670 
671 	uint64_t enqueue_err_count;
672 	/**< Total error count on operations enqueued */
673 	uint64_t dequeue_err_count;
674 	/**< Total error count on operations dequeued */
675 };
676 
677 #define RTE_CRYPTODEV_NAME_MAX_LEN	(64)
678 /**< Max length of name of crypto PMD */
679 
680 /**
681  * Get the device identifier for the named crypto device.
682  *
683  * @param	name	device name to select the device structure.
684  *
685  * @return
686  *   - Returns crypto device identifier on success.
687  *   - Return -1 on failure to find named crypto device.
688  */
689 int
690 rte_cryptodev_get_dev_id(const char *name);
691 
692 /**
693  * Get the crypto device name given a device identifier.
694  *
695  * @param dev_id
696  *   The identifier of the device
697  *
698  * @return
699  *   - Returns crypto device name.
700  *   - Returns NULL if crypto device is not present.
701  */
702 const char *
703 rte_cryptodev_name_get(uint8_t dev_id);
704 
705 /**
706  * Get the total number of crypto devices that have been successfully
707  * initialised.
708  *
709  * @return
710  *   - The total number of usable crypto devices.
711  */
712 uint8_t
713 rte_cryptodev_count(void);
714 
715 /**
716  * Get number of crypto device defined type.
717  *
718  * @param	driver_id	driver identifier.
719  *
720  * @return
721  *   Returns number of crypto device.
722  */
723 uint8_t
724 rte_cryptodev_device_count_by_driver(uint8_t driver_id);
725 
726 /**
727  * Get number and identifiers of attached crypto devices that
728  * use the same crypto driver.
729  *
730  * @param	driver_name	driver name.
731  * @param	devices		output devices identifiers.
732  * @param	nb_devices	maximal number of devices.
733  *
734  * @return
735  *   Returns number of attached crypto device.
736  */
737 uint8_t
738 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
739 		uint8_t nb_devices);
740 /*
741  * Return the NUMA socket to which a device is connected
742  *
743  * @param dev_id
744  *   The identifier of the device
745  * @return
746  *   The NUMA socket id to which the device is connected or
747  *   a default of zero if the socket could not be determined.
748  *   -1 if returned is the dev_id value is out of range.
749  */
750 int
751 rte_cryptodev_socket_id(uint8_t dev_id);
752 
753 /** Crypto device configuration structure */
754 /* Structure rte_cryptodev_config 8< */
755 struct rte_cryptodev_config {
756 	int socket_id;			/**< Socket to allocate resources on */
757 	uint16_t nb_queue_pairs;
758 	/**< Number of queue pairs to configure on device */
759 	uint64_t ff_disable;
760 	/**< Feature flags to be disabled. Only the following features are
761 	 * allowed to be disabled,
762 	 *  - RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO
763 	 *  - RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO
764 	 *  - RTE_CRYTPODEV_FF_SECURITY
765 	 */
766 };
767 /* >8 End of structure rte_cryptodev_config. */
768 
769 /**
770  * Configure a device.
771  *
772  * This function must be invoked first before any other function in the
773  * API. This function can also be re-invoked when a device is in the
774  * stopped state.
775  *
776  * @param	dev_id		The identifier of the device to configure.
777  * @param	config		The crypto device configuration structure.
778  *
779  * @return
780  *   - 0: Success, device configured.
781  *   - <0: Error code returned by the driver configuration function.
782  */
783 int
784 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config);
785 
786 /**
787  * Start an device.
788  *
789  * The device start step is the last one and consists of setting the configured
790  * offload features and in starting the transmit and the receive units of the
791  * device.
792  * On success, all basic functions exported by the API (link status,
793  * receive/transmit, and so on) can be invoked.
794  *
795  * @param dev_id
796  *   The identifier of the device.
797  * @return
798  *   - 0: Success, device started.
799  *   - <0: Error code of the driver device start function.
800  */
801 int
802 rte_cryptodev_start(uint8_t dev_id);
803 
804 /**
805  * Stop an device. The device can be restarted with a call to
806  * rte_cryptodev_start()
807  *
808  * @param	dev_id		The identifier of the device.
809  */
810 void
811 rte_cryptodev_stop(uint8_t dev_id);
812 
813 /**
814  * Close an device. The device cannot be restarted!
815  *
816  * @param	dev_id		The identifier of the device.
817  *
818  * @return
819  *  - 0 on successfully closing device
820  *  - <0 on failure to close device
821  */
822 int
823 rte_cryptodev_close(uint8_t dev_id);
824 
825 /**
826  * Allocate and set up a receive queue pair for a device.
827  *
828  *
829  * @param	dev_id		The identifier of the device.
830  * @param	queue_pair_id	The index of the queue pairs to set up. The
831  *				value must be in the range [0, nb_queue_pair
832  *				- 1] previously supplied to
833  *				rte_cryptodev_configure().
834  * @param	qp_conf		The pointer to the configuration data to be
835  *				used for the queue pair.
836  * @param	socket_id	The *socket_id* argument is the socket
837  *				identifier in case of NUMA. The value can be
838  *				*SOCKET_ID_ANY* if there is no NUMA constraint
839  *				for the DMA memory allocated for the receive
840  *				queue pair.
841  *
842  * @return
843  *   - 0: Success, queue pair correctly set up.
844  *   - <0: Queue pair configuration failed
845  */
846 int
847 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
848 		const struct rte_cryptodev_qp_conf *qp_conf, int socket_id);
849 
850 /**
851  * Get the status of queue pairs setup on a specific crypto device
852  *
853  * @param	dev_id		Crypto device identifier.
854  * @param	queue_pair_id	The index of the queue pairs to set up. The
855  *				value must be in the range [0, nb_queue_pair
856  *				- 1] previously supplied to
857  *				rte_cryptodev_configure().
858  * @return
859  *   - 0: qp was not configured
860  *	 - 1: qp was configured
861  *	 - -EINVAL: device was not configured
862  */
863 __rte_experimental
864 int
865 rte_cryptodev_get_qp_status(uint8_t dev_id, uint16_t queue_pair_id);
866 
867 /**
868  * Get the number of queue pairs on a specific crypto device
869  *
870  * @param	dev_id		Crypto device identifier.
871  * @return
872  *   - The number of configured queue pairs.
873  */
874 uint16_t
875 rte_cryptodev_queue_pair_count(uint8_t dev_id);
876 
877 
878 /**
879  * Retrieve the general I/O statistics of a device.
880  *
881  * @param	dev_id		The identifier of the device.
882  * @param	stats		A pointer to a structure of type
883  *				*rte_cryptodev_stats* to be filled with the
884  *				values of device counters.
885  * @return
886  *   - Zero if successful.
887  *   - Non-zero otherwise.
888  */
889 int
890 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats);
891 
892 /**
893  * Reset the general I/O statistics of a device.
894  *
895  * @param	dev_id		The identifier of the device.
896  */
897 void
898 rte_cryptodev_stats_reset(uint8_t dev_id);
899 
900 /**
901  * Retrieve the contextual information of a device.
902  *
903  * @param	dev_id		The identifier of the device.
904  * @param	dev_info	A pointer to a structure of type
905  *				*rte_cryptodev_info* to be filled with the
906  *				contextual information of the device.
907  *
908  * @note The capabilities field of dev_info is set to point to the first
909  * element of an array of struct rte_cryptodev_capabilities. The element after
910  * the last valid element has it's op field set to
911  * RTE_CRYPTO_OP_TYPE_UNDEFINED.
912  */
913 void
914 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info);
915 
916 
917 /**
918  * Register a callback function for specific device id.
919  *
920  * @param	dev_id		Device id.
921  * @param	event		Event interested.
922  * @param	cb_fn		User supplied callback function to be called.
923  * @param	cb_arg		Pointer to the parameters for the registered
924  *				callback.
925  *
926  * @return
927  *  - On success, zero.
928  *  - On failure, a negative value.
929  */
930 int
931 rte_cryptodev_callback_register(uint8_t dev_id,
932 		enum rte_cryptodev_event_type event,
933 		rte_cryptodev_cb_fn cb_fn, void *cb_arg);
934 
935 /**
936  * Unregister a callback function for specific device id.
937  *
938  * @param	dev_id		The device identifier.
939  * @param	event		Event interested.
940  * @param	cb_fn		User supplied callback function to be called.
941  * @param	cb_arg		Pointer to the parameters for the registered
942  *				callback.
943  *
944  * @return
945  *  - On success, zero.
946  *  - On failure, a negative value.
947  */
948 int
949 rte_cryptodev_callback_unregister(uint8_t dev_id,
950 		enum rte_cryptodev_event_type event,
951 		rte_cryptodev_cb_fn cb_fn, void *cb_arg);
952 
953 /**
954  * @warning
955  * @b EXPERIMENTAL: this API may change without prior notice.
956  *
957  * Query a cryptodev queue pair if there are pending RTE_CRYPTODEV_EVENT_ERROR
958  * events.
959  *
960  * @param          dev_id	The device identifier.
961  * @param          qp_id	Queue pair index to be queried.
962  *
963  * @return
964  *   - 1 if requested queue has a pending event.
965  *   - 0 if no pending event is found.
966  *   - a negative value on failure
967  */
968 __rte_experimental
969 int
970 rte_cryptodev_queue_pair_event_error_query(uint8_t dev_id, uint16_t qp_id);
971 
972 struct rte_cryptodev_callback;
973 
974 /** Structure to keep track of registered callbacks */
975 RTE_TAILQ_HEAD(rte_cryptodev_cb_list, rte_cryptodev_callback);
976 
977 /**
978  * Structure used to hold information about the callbacks to be called for a
979  * queue pair on enqueue/dequeue.
980  */
981 struct rte_cryptodev_cb {
982 	RTE_ATOMIC(struct rte_cryptodev_cb *) next;
983 	/**< Pointer to next callback */
984 	rte_cryptodev_callback_fn fn;
985 	/**< Pointer to callback function */
986 	void *arg;
987 	/**< Pointer to argument */
988 };
989 
990 /**
991  * @internal
992  * Structure used to hold information about the RCU for a queue pair.
993  */
994 struct rte_cryptodev_cb_rcu {
995 	RTE_ATOMIC(struct rte_cryptodev_cb *) next;
996 	/**< Pointer to next callback */
997 	struct rte_rcu_qsbr *qsbr;
998 	/**< RCU QSBR variable per queue pair */
999 };
1000 
1001 /**
1002  * Get the security context for the cryptodev.
1003  *
1004  * @param dev_id
1005  *   The device identifier.
1006  * @return
1007  *   - NULL on error.
1008  *   - Pointer to security context on success.
1009  */
1010 void *
1011 rte_cryptodev_get_sec_ctx(uint8_t dev_id);
1012 
1013 /**
1014  * Create a symmetric session mempool.
1015  *
1016  * @param name
1017  *   The unique mempool name.
1018  * @param nb_elts
1019  *   The number of elements in the mempool.
1020  * @param elt_size
1021  *   The size of the element. This should be the size of the cryptodev PMD
1022  *   session private data obtained through
1023  *   rte_cryptodev_sym_get_private_session_size() function call.
1024  *   For the user who wants to use the same mempool for heterogeneous PMDs
1025  *   this value should be the maximum value of their private session sizes.
1026  *   Please note the created mempool will have bigger elt size than this
1027  *   value as necessary session header and the possible padding are filled
1028  *   into each elt.
1029  * @param cache_size
1030  *   The number of per-lcore cache elements
1031  * @param priv_size
1032  *   The private data size of each session.
1033  * @param socket_id
1034  *   The *socket_id* argument is the socket identifier in the case of
1035  *   NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
1036  *   constraint for the reserved zone.
1037  *
1038  * @return
1039  *  - On success returns the created session mempool pointer
1040  *  - On failure returns NULL
1041  */
1042 __rte_experimental
1043 struct rte_mempool *
1044 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
1045 	uint32_t elt_size, uint32_t cache_size, uint16_t priv_size,
1046 	int socket_id);
1047 
1048 
1049 /**
1050  * Create an asymmetric session mempool.
1051  *
1052  * @param name
1053  *   The unique mempool name.
1054  * @param nb_elts
1055  *   The number of elements in the mempool.
1056  * @param cache_size
1057  *   The number of per-lcore cache elements
1058  * @param user_data_size
1059  *   The size of user data to be placed after session private data.
1060  * @param socket_id
1061  *   The *socket_id* argument is the socket identifier in the case of
1062  *   NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
1063  *   constraint for the reserved zone.
1064  *
1065  * @return
1066  *  - On success return mempool
1067  *  - On failure returns NULL
1068  */
1069 __rte_experimental
1070 struct rte_mempool *
1071 rte_cryptodev_asym_session_pool_create(const char *name, uint32_t nb_elts,
1072 	uint32_t cache_size, uint16_t user_data_size, int socket_id);
1073 
1074 /**
1075  * Create symmetric crypto session and fill out private data for the device id,
1076  * based on its device type.
1077  *
1078  * @param   dev_id   ID of device that we want the session to be used on
1079  * @param   xforms   Symmetric crypto transform operations to apply on flow
1080  *                   processed with this session
1081  * @param   mp       Mempool to allocate symmetric session objects from
1082  *
1083  * @return
1084  *  - On success return pointer to sym-session.
1085  *  - On failure returns NULL and rte_errno is set to the error code:
1086  *    - EINVAL on invalid arguments.
1087  *    - ENOMEM on memory error for session allocation.
1088  *    - ENOTSUP if device doesn't support session configuration.
1089  */
1090 void *
1091 rte_cryptodev_sym_session_create(uint8_t dev_id,
1092 		struct rte_crypto_sym_xform *xforms,
1093 		struct rte_mempool *mp);
1094 /**
1095  * Create and initialise an asymmetric crypto session structure.
1096  * Calls the PMD to configure the private session data.
1097  *
1098  * @param   dev_id   ID of device that we want the session to be used on
1099  * @param   xforms   Asymmetric crypto transform operations to apply on flow
1100  *                   processed with this session
1101  * @param   mp       mempool to allocate asymmetric session
1102  *                   objects from
1103  * @param   session  void ** for session to be used
1104  *
1105  * @return
1106  *  - 0 on success.
1107  *  - -EINVAL on invalid arguments.
1108  *  - -ENOMEM on memory error for session allocation.
1109  *  - -ENOTSUP if device doesn't support session configuration.
1110  */
1111 __rte_experimental
1112 int
1113 rte_cryptodev_asym_session_create(uint8_t dev_id,
1114 		struct rte_crypto_asym_xform *xforms, struct rte_mempool *mp,
1115 		void **session);
1116 
1117 /**
1118  * Frees session for the device id and returning it to its mempool.
1119  * It is the application's responsibility to ensure that the session
1120  * is not still in-flight operations using it.
1121  *
1122  * @param   dev_id   ID of device that uses the session.
1123  * @param   sess     Session header to be freed.
1124  *
1125  * @return
1126  *  - 0 if successful.
1127  *  - -EINVAL if session is NULL or the mismatched device ids.
1128  */
1129 int
1130 rte_cryptodev_sym_session_free(uint8_t dev_id,
1131 	void *sess);
1132 
1133 /**
1134  * Clears and frees asymmetric crypto session header and private data,
1135  * returning it to its original mempool.
1136  *
1137  * @param   dev_id   ID of device that uses the asymmetric session.
1138  * @param   sess     Session header to be freed.
1139  *
1140  * @return
1141  *  - 0 if successful.
1142  *  - -EINVAL if device is invalid or session is NULL.
1143  */
1144 __rte_experimental
1145 int
1146 rte_cryptodev_asym_session_free(uint8_t dev_id, void *sess);
1147 
1148 /**
1149  * Get the size of the asymmetric session header.
1150  *
1151  * @return
1152  *   Size of the asymmetric header session.
1153  */
1154 __rte_experimental
1155 unsigned int
1156 rte_cryptodev_asym_get_header_session_size(void);
1157 
1158 /**
1159  * Get the size of the private symmetric session data
1160  * for a device.
1161  *
1162  * @param	dev_id		The device identifier.
1163  *
1164  * @return
1165  *   - Size of the private data, if successful
1166  *   - 0 if device is invalid or does not have private
1167  *   symmetric session
1168  */
1169 unsigned int
1170 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id);
1171 
1172 /**
1173  * Get the size of the private data for asymmetric session
1174  * on device
1175  *
1176  * @param	dev_id		The device identifier.
1177  *
1178  * @return
1179  *   - Size of the asymmetric private data, if successful
1180  *   - 0 if device is invalid or does not have private session
1181  */
1182 __rte_experimental
1183 unsigned int
1184 rte_cryptodev_asym_get_private_session_size(uint8_t dev_id);
1185 
1186 /**
1187  * Validate if the crypto device index is valid attached crypto device.
1188  *
1189  * @param	dev_id	Crypto device index.
1190  *
1191  * @return
1192  *   - If the device index is valid (1) or not (0).
1193  */
1194 unsigned int
1195 rte_cryptodev_is_valid_dev(uint8_t dev_id);
1196 
1197 /**
1198  * Provide driver identifier.
1199  *
1200  * @param name
1201  *   The pointer to a driver name.
1202  * @return
1203  *  The driver type identifier or -1 if no driver found
1204  */
1205 int rte_cryptodev_driver_id_get(const char *name);
1206 
1207 /**
1208  * Provide driver name.
1209  *
1210  * @param driver_id
1211  *   The driver identifier.
1212  * @return
1213  *  The driver name or null if no driver found
1214  */
1215 const char *rte_cryptodev_driver_name_get(uint8_t driver_id);
1216 
1217 /**
1218  * Store user data in a session.
1219  *
1220  * @param	sess		Session pointer allocated by
1221  *				*rte_cryptodev_sym_session_create*.
1222  * @param	data		Pointer to the user data.
1223  * @param	size		Size of the user data.
1224  *
1225  * @return
1226  *  - On success, zero.
1227  *  - On failure, a negative value.
1228  */
1229 __rte_experimental
1230 int
1231 rte_cryptodev_sym_session_set_user_data(void *sess,
1232 					void *data,
1233 					uint16_t size);
1234 
1235 #define CRYPTO_SESS_OPAQUE_DATA_OFF 0
1236 /**
1237  * Get opaque data from session handle
1238  */
1239 static inline uint64_t
1240 rte_cryptodev_sym_session_opaque_data_get(void *sess)
1241 {
1242 	return *((uint64_t *)sess + CRYPTO_SESS_OPAQUE_DATA_OFF);
1243 }
1244 
1245 /**
1246  * Set opaque data in session handle
1247  */
1248 static inline void
1249 rte_cryptodev_sym_session_opaque_data_set(void *sess, uint64_t opaque)
1250 {
1251 	uint64_t *data;
1252 	data = (((uint64_t *)sess) + CRYPTO_SESS_OPAQUE_DATA_OFF);
1253 	*data = opaque;
1254 }
1255 
1256 /**
1257  * Get user data stored in a session.
1258  *
1259  * @param	sess		Session pointer allocated by
1260  *				*rte_cryptodev_sym_session_create*.
1261  *
1262  * @return
1263  *  - On success return pointer to user data.
1264  *  - On failure returns NULL.
1265  */
1266 __rte_experimental
1267 void *
1268 rte_cryptodev_sym_session_get_user_data(void *sess);
1269 
1270 /**
1271  * Store user data in an asymmetric session.
1272  *
1273  * @param	sess		Session pointer allocated by
1274  *				*rte_cryptodev_asym_session_create*.
1275  * @param	data		Pointer to the user data.
1276  * @param	size		Size of the user data.
1277  *
1278  * @return
1279  *  - On success, zero.
1280  *  - -EINVAL if the session pointer is invalid.
1281  *  - -ENOMEM if the available user data size is smaller than the size parameter.
1282  */
1283 __rte_experimental
1284 int
1285 rte_cryptodev_asym_session_set_user_data(void *sess, void *data, uint16_t size);
1286 
1287 /**
1288  * Get user data stored in an asymmetric session.
1289  *
1290  * @param	sess		Session pointer allocated by
1291  *				*rte_cryptodev_asym_session_create*.
1292  *
1293  * @return
1294  *  - On success return pointer to user data.
1295  *  - On failure returns NULL.
1296  */
1297 __rte_experimental
1298 void *
1299 rte_cryptodev_asym_session_get_user_data(void *sess);
1300 
1301 /**
1302  * Perform actual crypto processing (encrypt/digest or auth/decrypt)
1303  * on user provided data.
1304  *
1305  * @param	dev_id	The device identifier.
1306  * @param	sess	Cryptodev session structure
1307  * @param	ofs	Start and stop offsets for auth and cipher operations
1308  * @param	vec	Vectorized operation descriptor
1309  *
1310  * @return
1311  *  - Returns number of successfully processed packets.
1312  */
1313 __rte_experimental
1314 uint32_t
1315 rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id,
1316 	void *sess, union rte_crypto_sym_ofs ofs,
1317 	struct rte_crypto_sym_vec *vec);
1318 
1319 /**
1320  * Get the size of the raw data-path context buffer.
1321  *
1322  * @param	dev_id		The device identifier.
1323  *
1324  * @return
1325  *   - If the device supports raw data-path APIs, return the context size.
1326  *   - If the device does not support the APIs, return -1.
1327  */
1328 __rte_experimental
1329 int
1330 rte_cryptodev_get_raw_dp_ctx_size(uint8_t dev_id);
1331 
1332 /**
1333  * Set session event meta data
1334  *
1335  * @param	dev_id		The device identifier.
1336  * @param	sess            Crypto or security session.
1337  * @param	op_type         Operation type.
1338  * @param	sess_type       Session type.
1339  * @param	ev_mdata	Pointer to the event crypto meta data
1340  *				(aka *union rte_event_crypto_metadata*)
1341  * @param	size            Size of ev_mdata.
1342  *
1343  * @return
1344  *  - On success, zero.
1345  *  - On failure, a negative value.
1346  */
1347 __rte_experimental
1348 int
1349 rte_cryptodev_session_event_mdata_set(uint8_t dev_id, void *sess,
1350 	enum rte_crypto_op_type op_type,
1351 	enum rte_crypto_op_sess_type sess_type,
1352 	void *ev_mdata, uint16_t size);
1353 
1354 /**
1355  * Union of different crypto session types, including session-less xform
1356  * pointer.
1357  */
1358 union rte_cryptodev_session_ctx {void *crypto_sess;
1359 	struct rte_crypto_sym_xform *xform;
1360 	struct rte_security_session *sec_sess;
1361 };
1362 
1363 /**
1364  * Enqueue a vectorized operation descriptor into the device queue but the
1365  * driver may or may not start processing until rte_cryptodev_raw_enqueue_done()
1366  * is called.
1367  *
1368  * @param	qp		Driver specific queue pair data.
1369  * @param	drv_ctx		Driver specific context data.
1370  * @param	vec		Vectorized operation descriptor.
1371  * @param	ofs		Start and stop offsets for auth and cipher
1372  *				operations.
1373  * @param	user_data	The array of user data for dequeue later.
1374  * @param	enqueue_status	Driver written value to specify the
1375  *				enqueue status. Possible values:
1376  *				- 1: The number of operations returned are
1377  *				     enqueued successfully.
1378  *				- 0: The number of operations returned are
1379  *				     cached into the queue but are not processed
1380  *				     until rte_cryptodev_raw_enqueue_done() is
1381  *				     called.
1382  *				- negative integer: Error occurred.
1383  * @return
1384  *   - The number of operations in the descriptor successfully enqueued or
1385  *     cached into the queue but not enqueued yet, depends on the
1386  *     "enqueue_status" value.
1387  */
1388 typedef uint32_t (*cryptodev_sym_raw_enqueue_burst_t)(
1389 	void *qp, uint8_t *drv_ctx, struct rte_crypto_sym_vec *vec,
1390 	union rte_crypto_sym_ofs ofs, void *user_data[], int *enqueue_status);
1391 
1392 /**
1393  * Enqueue single raw data vector into the device queue but the driver may or
1394  * may not start processing until rte_cryptodev_raw_enqueue_done() is called.
1395  *
1396  * @param	qp		Driver specific queue pair data.
1397  * @param	drv_ctx		Driver specific context data.
1398  * @param	data_vec	The buffer data vector.
1399  * @param	n_data_vecs	Number of buffer data vectors.
1400  * @param	ofs		Start and stop offsets for auth and cipher
1401  *				operations.
1402  * @param	iv		IV virtual and IOVA addresses
1403  * @param	digest		digest virtual and IOVA addresses
1404  * @param	aad_or_auth_iv	AAD or auth IV virtual and IOVA addresses,
1405  *				depends on the algorithm used.
1406  * @param	user_data	The user data.
1407  * @return
1408  *   - 1: The data vector is enqueued successfully.
1409  *   - 0: The data vector is cached into the queue but is not processed
1410  *        until rte_cryptodev_raw_enqueue_done() is called.
1411  *   - negative integer: failure.
1412  */
1413 typedef int (*cryptodev_sym_raw_enqueue_t)(
1414 	void *qp, uint8_t *drv_ctx, struct rte_crypto_vec *data_vec,
1415 	uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs,
1416 	struct rte_crypto_va_iova_ptr *iv,
1417 	struct rte_crypto_va_iova_ptr *digest,
1418 	struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
1419 	void *user_data);
1420 
1421 /**
1422  * Inform the cryptodev queue pair to start processing or finish dequeuing all
1423  * enqueued/dequeued operations.
1424  *
1425  * @param	qp		Driver specific queue pair data.
1426  * @param	drv_ctx		Driver specific context data.
1427  * @param	n		The total number of processed operations.
1428  * @return
1429  *   - On success return 0.
1430  *   - On failure return negative integer.
1431  */
1432 typedef int (*cryptodev_sym_raw_operation_done_t)(void *qp, uint8_t *drv_ctx,
1433 	uint32_t n);
1434 
1435 /**
1436  * Typedef that the user provided for the driver to get the dequeue count.
1437  * The function may return a fixed number or the number parsed from the user
1438  * data stored in the first processed operation.
1439  *
1440  * @param	user_data	Dequeued user data.
1441  * @return
1442  *  - The number of operations to be dequeued.
1443  */
1444 typedef uint32_t (*rte_cryptodev_raw_get_dequeue_count_t)(void *user_data);
1445 
1446 /**
1447  * Typedef that the user provided to deal with post dequeue operation, such
1448  * as filling status.
1449  *
1450  * @param	user_data	Dequeued user data.
1451  * @param	index		Index number of the processed descriptor.
1452  * @param	is_op_success	Operation status provided by the driver.
1453  */
1454 typedef void (*rte_cryptodev_raw_post_dequeue_t)(void *user_data,
1455 	uint32_t index, uint8_t is_op_success);
1456 
1457 /**
1458  * Dequeue a burst of symmetric crypto processing.
1459  *
1460  * @param	qp			Driver specific queue pair data.
1461  * @param	drv_ctx			Driver specific context data.
1462  * @param	get_dequeue_count	User provided callback function to
1463  *					obtain dequeue operation count.
1464  * @param	max_nb_to_dequeue	When get_dequeue_count is NULL this
1465  *					value is used to pass the maximum
1466  *					number of operations to be dequeued.
1467  * @param	post_dequeue		User provided callback function to
1468  *					post-process a dequeued operation.
1469  * @param	out_user_data		User data pointer array to be retrieve
1470  *					from device queue. In case of
1471  *					*is_user_data_array* is set there
1472  *					should be enough room to store all
1473  *					user data.
1474  * @param	is_user_data_array	Set 1 if every dequeued user data will
1475  *					be written into out_user_data array.
1476  *					Set 0 if only the first user data will
1477  *					be written into out_user_data array.
1478  * @param	n_success		Driver written value to specific the
1479  *					total successful operations count.
1480  * @param	dequeue_status		Driver written value to specify the
1481  *					dequeue status. Possible values:
1482  *					- 1: Successfully dequeued the number
1483  *					     of operations returned. The user
1484  *					     data previously set during enqueue
1485  *					     is stored in the "out_user_data".
1486  *					- 0: The number of operations returned
1487  *					     are completed and the user data is
1488  *					     stored in the "out_user_data", but
1489  *					     they are not freed from the queue
1490  *					     until
1491  *					     rte_cryptodev_raw_dequeue_done()
1492  *					     is called.
1493  *					- negative integer: Error occurred.
1494  * @return
1495  *   - The number of operations dequeued or completed but not freed from the
1496  *     queue, depends on "dequeue_status" value.
1497  */
1498 typedef uint32_t (*cryptodev_sym_raw_dequeue_burst_t)(void *qp,
1499 	uint8_t *drv_ctx,
1500 	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
1501 	uint32_t max_nb_to_dequeue,
1502 	rte_cryptodev_raw_post_dequeue_t post_dequeue,
1503 	void **out_user_data, uint8_t is_user_data_array,
1504 	uint32_t *n_success, int *dequeue_status);
1505 
1506 /**
1507  * Dequeue a symmetric crypto processing.
1508  *
1509  * @param	qp			Driver specific queue pair data.
1510  * @param	drv_ctx			Driver specific context data.
1511  * @param	dequeue_status		Driver written value to specify the
1512  *					dequeue status. Possible values:
1513  *					- 1: Successfully dequeued a operation.
1514  *					     The user data is returned.
1515  *					- 0: The first operation in the queue
1516  *					     is completed and the user data
1517  *					     previously set during enqueue is
1518  *					     returned, but it is not freed from
1519  *					     the queue until
1520  *					     rte_cryptodev_raw_dequeue_done() is
1521  *					     called.
1522  *					- negative integer: Error occurred.
1523  * @param	op_status		Driver written value to specify
1524  *					operation status.
1525  * @return
1526  *   - The user data pointer retrieved from device queue or NULL if no
1527  *     operation is ready for dequeue.
1528  */
1529 typedef void * (*cryptodev_sym_raw_dequeue_t)(
1530 		void *qp, uint8_t *drv_ctx, int *dequeue_status,
1531 		enum rte_crypto_op_status *op_status);
1532 
1533 /**
1534  * Context data for raw data-path API crypto process. The buffer of this
1535  * structure is to be allocated by the user application with the size equal
1536  * or bigger than rte_cryptodev_get_raw_dp_ctx_size() returned value.
1537  */
1538 struct rte_crypto_raw_dp_ctx {
1539 	void *qp_data;
1540 
1541 	cryptodev_sym_raw_enqueue_t enqueue;
1542 	cryptodev_sym_raw_enqueue_burst_t enqueue_burst;
1543 	cryptodev_sym_raw_operation_done_t enqueue_done;
1544 	cryptodev_sym_raw_dequeue_t dequeue;
1545 	cryptodev_sym_raw_dequeue_burst_t dequeue_burst;
1546 	cryptodev_sym_raw_operation_done_t dequeue_done;
1547 
1548 	/* Driver specific context data */
1549 	__extension__ uint8_t drv_ctx_data[];
1550 };
1551 
1552 /**
1553  * Configure raw data-path context data.
1554  *
1555  * @param	dev_id		The device identifier.
1556  * @param	qp_id		The index of the queue pair from which to
1557  *				retrieve processed packets. The value must be
1558  *				in the range [0, nb_queue_pair - 1] previously
1559  *				supplied to rte_cryptodev_configure().
1560  * @param	ctx		The raw data-path context data.
1561  * @param	sess_type	Session type.
1562  * @param	session_ctx	Session context data.
1563  * @param	is_update	Set 0 if it is to initialize the ctx.
1564  *				Set 1 if ctx is initialized and only to update
1565  *				session context data.
1566  * @return
1567  *   - On success return 0.
1568  *   - On failure return negative integer.
1569  *     - -EINVAL if input parameters are invalid.
1570  *     - -ENOTSUP if crypto device does not support raw DP operations with the
1571  *        provided session.
1572  */
1573 __rte_experimental
1574 int
1575 rte_cryptodev_configure_raw_dp_ctx(uint8_t dev_id, uint16_t qp_id,
1576 	struct rte_crypto_raw_dp_ctx *ctx,
1577 	enum rte_crypto_op_sess_type sess_type,
1578 	union rte_cryptodev_session_ctx session_ctx,
1579 	uint8_t is_update);
1580 
1581 /**
1582  * Enqueue a vectorized operation descriptor into the device queue but the
1583  * driver may or may not start processing until rte_cryptodev_raw_enqueue_done()
1584  * is called.
1585  *
1586  * @param	ctx		The initialized raw data-path context data.
1587  * @param	vec		Vectorized operation descriptor.
1588  * @param	ofs		Start and stop offsets for auth and cipher
1589  *				operations.
1590  * @param	user_data	The array of user data for dequeue later.
1591  * @param	enqueue_status	Driver written value to specify the
1592  *				enqueue status. Possible values:
1593  *				- 1: The number of operations returned are
1594  *				     enqueued successfully.
1595  *				- 0: The number of operations returned are
1596  *				     cached into the queue but are not processed
1597  *				     until rte_cryptodev_raw_enqueue_done() is
1598  *				     called.
1599  *				- negative integer: Error occurred.
1600  * @return
1601  *   - The number of operations in the descriptor successfully enqueued or
1602  *     cached into the queue but not enqueued yet, depends on the
1603  *     "enqueue_status" value.
1604  */
1605 __rte_experimental
1606 uint32_t
1607 rte_cryptodev_raw_enqueue_burst(struct rte_crypto_raw_dp_ctx *ctx,
1608 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
1609 	void **user_data, int *enqueue_status);
1610 
1611 /**
1612  * Enqueue single raw data vector into the device queue but the driver may or
1613  * may not start processing until rte_cryptodev_raw_enqueue_done() is called.
1614  *
1615  * @param	ctx		The initialized raw data-path context data.
1616  * @param	data_vec	The buffer data vector.
1617  * @param	n_data_vecs	Number of buffer data vectors.
1618  * @param	ofs		Start and stop offsets for auth and cipher
1619  *				operations.
1620  * @param	iv		IV virtual and IOVA addresses
1621  * @param	digest		digest virtual and IOVA addresses
1622  * @param	aad_or_auth_iv	AAD or auth IV virtual and IOVA addresses,
1623  *				depends on the algorithm used.
1624  * @param	user_data	The user data.
1625  * @return
1626  *   - 1: The data vector is enqueued successfully.
1627  *   - 0: The data vector is cached into the queue but is not processed
1628  *        until rte_cryptodev_raw_enqueue_done() is called.
1629  *   - negative integer: failure.
1630  */
1631 __rte_experimental
1632 static __rte_always_inline int
1633 rte_cryptodev_raw_enqueue(struct rte_crypto_raw_dp_ctx *ctx,
1634 	struct rte_crypto_vec *data_vec, uint16_t n_data_vecs,
1635 	union rte_crypto_sym_ofs ofs,
1636 	struct rte_crypto_va_iova_ptr *iv,
1637 	struct rte_crypto_va_iova_ptr *digest,
1638 	struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
1639 	void *user_data)
1640 {
1641 	return (*ctx->enqueue)(ctx->qp_data, ctx->drv_ctx_data, data_vec,
1642 		n_data_vecs, ofs, iv, digest, aad_or_auth_iv, user_data);
1643 }
1644 
1645 /**
1646  * Start processing all enqueued operations from last
1647  * rte_cryptodev_configure_raw_dp_ctx() call.
1648  *
1649  * @param	ctx	The initialized raw data-path context data.
1650  * @param	n	The number of operations cached.
1651  * @return
1652  *   - On success return 0.
1653  *   - On failure return negative integer.
1654  */
1655 __rte_experimental
1656 int
1657 rte_cryptodev_raw_enqueue_done(struct rte_crypto_raw_dp_ctx *ctx,
1658 		uint32_t n);
1659 
1660 /**
1661  * Dequeue a burst of symmetric crypto processing.
1662  *
1663  * @param	ctx			The initialized raw data-path context
1664  *					data.
1665  * @param	get_dequeue_count	User provided callback function to
1666  *					obtain dequeue operation count.
1667  * @param	max_nb_to_dequeue	When get_dequeue_count is NULL this
1668  *					value is used to pass the maximum
1669  *					number of operations to be dequeued.
1670  * @param	post_dequeue		User provided callback function to
1671  *					post-process a dequeued operation.
1672  * @param	out_user_data		User data pointer array to be retrieve
1673  *					from device queue. In case of
1674  *					*is_user_data_array* is set there
1675  *					should be enough room to store all
1676  *					user data.
1677  * @param	is_user_data_array	Set 1 if every dequeued user data will
1678  *					be written into out_user_data array.
1679  *					Set 0 if only the first user data will
1680  *					be written into out_user_data array.
1681  * @param	n_success		Driver written value to specific the
1682  *					total successful operations count.
1683  * @param	dequeue_status		Driver written value to specify the
1684  *					dequeue status. Possible values:
1685  *					- 1: Successfully dequeued the number
1686  *					     of operations returned. The user
1687  *					     data previously set during enqueue
1688  *					     is stored in the "out_user_data".
1689  *					- 0: The number of operations returned
1690  *					     are completed and the user data is
1691  *					     stored in the "out_user_data", but
1692  *					     they are not freed from the queue
1693  *					     until
1694  *					     rte_cryptodev_raw_dequeue_done()
1695  *					     is called.
1696  *					- negative integer: Error occurred.
1697  * @return
1698  *   - The number of operations dequeued or completed but not freed from the
1699  *     queue, depends on "dequeue_status" value.
1700  */
1701 __rte_experimental
1702 uint32_t
1703 rte_cryptodev_raw_dequeue_burst(struct rte_crypto_raw_dp_ctx *ctx,
1704 	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
1705 	uint32_t max_nb_to_dequeue,
1706 	rte_cryptodev_raw_post_dequeue_t post_dequeue,
1707 	void **out_user_data, uint8_t is_user_data_array,
1708 	uint32_t *n_success, int *dequeue_status);
1709 
1710 /**
1711  * Dequeue a symmetric crypto processing.
1712  *
1713  * @param	ctx			The initialized raw data-path context
1714  *					data.
1715  * @param	dequeue_status		Driver written value to specify the
1716  *					dequeue status. Possible values:
1717  *					- 1: Successfully dequeued a operation.
1718  *					     The user data is returned.
1719  *					- 0: The first operation in the queue
1720  *					     is completed and the user data
1721  *					     previously set during enqueue is
1722  *					     returned, but it is not freed from
1723  *					     the queue until
1724  *					     rte_cryptodev_raw_dequeue_done() is
1725  *					     called.
1726  *					- negative integer: Error occurred.
1727  * @param	op_status		Driver written value to specify
1728  *					operation status.
1729  * @return
1730  *   - The user data pointer retrieved from device queue or NULL if no
1731  *     operation is ready for dequeue.
1732  */
1733 __rte_experimental
1734 static __rte_always_inline void *
1735 rte_cryptodev_raw_dequeue(struct rte_crypto_raw_dp_ctx *ctx,
1736 		int *dequeue_status, enum rte_crypto_op_status *op_status)
1737 {
1738 	return (*ctx->dequeue)(ctx->qp_data, ctx->drv_ctx_data, dequeue_status,
1739 			op_status);
1740 }
1741 
1742 /**
1743  * Inform the queue pair dequeue operations is finished.
1744  *
1745  * @param	ctx	The initialized raw data-path context data.
1746  * @param	n	The number of operations.
1747  * @return
1748  *   - On success return 0.
1749  *   - On failure return negative integer.
1750  */
1751 __rte_experimental
1752 int
1753 rte_cryptodev_raw_dequeue_done(struct rte_crypto_raw_dp_ctx *ctx,
1754 		uint32_t n);
1755 
1756 /**
1757  * Add a user callback for a given crypto device and queue pair which will be
1758  * called on crypto ops enqueue.
1759  *
1760  * This API configures a function to be called for each burst of crypto ops
1761  * received on a given crypto device queue pair. The return value is a pointer
1762  * that can be used later to remove the callback using
1763  * rte_cryptodev_remove_enq_callback().
1764  *
1765  * Callbacks registered by application would not survive
1766  * rte_cryptodev_configure() as it reinitializes the callback list.
1767  * It is user responsibility to remove all installed callbacks before
1768  * calling rte_cryptodev_configure() to avoid possible memory leakage.
1769  * Application is expected to call add API after rte_cryptodev_configure().
1770  *
1771  * Multiple functions can be registered per queue pair & they are called
1772  * in the order they were added. The API does not restrict on maximum number
1773  * of callbacks.
1774  *
1775  * @param	dev_id		The identifier of the device.
1776  * @param	qp_id		The index of the queue pair on which ops are
1777  *				to be enqueued for processing. The value
1778  *				must be in the range [0, nb_queue_pairs - 1]
1779  *				previously supplied to
1780  *				*rte_cryptodev_configure*.
1781  * @param	cb_fn		The callback function
1782  * @param	cb_arg		A generic pointer parameter which will be passed
1783  *				to each invocation of the callback function on
1784  *				this crypto device and queue pair.
1785  *
1786  * @return
1787  *  - NULL on error & rte_errno will contain the error code.
1788  *  - On success, a pointer value which can later be used to remove the
1789  *    callback.
1790  */
1791 
1792 __rte_experimental
1793 struct rte_cryptodev_cb *
1794 rte_cryptodev_add_enq_callback(uint8_t dev_id,
1795 			       uint16_t qp_id,
1796 			       rte_cryptodev_callback_fn cb_fn,
1797 			       void *cb_arg);
1798 
1799 /**
1800  * Remove a user callback function for given crypto device and queue pair.
1801  *
1802  * This function is used to remove enqueue callbacks that were added to a
1803  * crypto device queue pair using rte_cryptodev_add_enq_callback().
1804  *
1805  *
1806  *
1807  * @param	dev_id		The identifier of the device.
1808  * @param	qp_id		The index of the queue pair on which ops are
1809  *				to be enqueued. The value must be in the
1810  *				range [0, nb_queue_pairs - 1] previously
1811  *				supplied to *rte_cryptodev_configure*.
1812  * @param	cb		Pointer to user supplied callback created via
1813  *				rte_cryptodev_add_enq_callback().
1814  *
1815  * @return
1816  *   -  0: Success. Callback was removed.
1817  *   - <0: The dev_id or the qp_id is out of range, or the callback
1818  *         is NULL or not found for the crypto device queue pair.
1819  */
1820 
1821 __rte_experimental
1822 int rte_cryptodev_remove_enq_callback(uint8_t dev_id,
1823 				      uint16_t qp_id,
1824 				      struct rte_cryptodev_cb *cb);
1825 
1826 /**
1827  * Add a user callback for a given crypto device and queue pair which will be
1828  * called on crypto ops dequeue.
1829  *
1830  * This API configures a function to be called for each burst of crypto ops
1831  * received on a given crypto device queue pair. The return value is a pointer
1832  * that can be used later to remove the callback using
1833  * rte_cryptodev_remove_deq_callback().
1834  *
1835  * Callbacks registered by application would not survive
1836  * rte_cryptodev_configure() as it reinitializes the callback list.
1837  * It is user responsibility to remove all installed callbacks before
1838  * calling rte_cryptodev_configure() to avoid possible memory leakage.
1839  * Application is expected to call add API after rte_cryptodev_configure().
1840  *
1841  * Multiple functions can be registered per queue pair & they are called
1842  * in the order they were added. The API does not restrict on maximum number
1843  * of callbacks.
1844  *
1845  * @param	dev_id		The identifier of the device.
1846  * @param	qp_id		The index of the queue pair on which ops are
1847  *				to be dequeued. The value must be in the
1848  *				range [0, nb_queue_pairs - 1] previously
1849  *				supplied to *rte_cryptodev_configure*.
1850  * @param	cb_fn		The callback function
1851  * @param	cb_arg		A generic pointer parameter which will be passed
1852  *				to each invocation of the callback function on
1853  *				this crypto device and queue pair.
1854  *
1855  * @return
1856  *   - NULL on error & rte_errno will contain the error code.
1857  *   - On success, a pointer value which can later be used to remove the
1858  *     callback.
1859  */
1860 
1861 __rte_experimental
1862 struct rte_cryptodev_cb *
1863 rte_cryptodev_add_deq_callback(uint8_t dev_id,
1864 			       uint16_t qp_id,
1865 			       rte_cryptodev_callback_fn cb_fn,
1866 			       void *cb_arg);
1867 
1868 /**
1869  * Remove a user callback function for given crypto device and queue pair.
1870  *
1871  * This function is used to remove dequeue callbacks that were added to a
1872  * crypto device queue pair using rte_cryptodev_add_deq_callback().
1873  *
1874  *
1875  *
1876  * @param	dev_id		The identifier of the device.
1877  * @param	qp_id		The index of the queue pair on which ops are
1878  *				to be dequeued. The value must be in the
1879  *				range [0, nb_queue_pairs - 1] previously
1880  *				supplied to *rte_cryptodev_configure*.
1881  * @param	cb		Pointer to user supplied callback created via
1882  *				rte_cryptodev_add_deq_callback().
1883  *
1884  * @return
1885  *   -  0: Success. Callback was removed.
1886  *   - <0: The dev_id or the qp_id is out of range, or the callback
1887  *         is NULL or not found for the crypto device queue pair.
1888  */
1889 __rte_experimental
1890 int rte_cryptodev_remove_deq_callback(uint8_t dev_id,
1891 				      uint16_t qp_id,
1892 				      struct rte_cryptodev_cb *cb);
1893 
1894 #include <rte_cryptodev_core.h>
1895 /**
1896  *
1897  * Dequeue a burst of processed crypto operations from a queue on the crypto
1898  * device. The dequeued operation are stored in *rte_crypto_op* structures
1899  * whose pointers are supplied in the *ops* array.
1900  *
1901  * The rte_cryptodev_dequeue_burst() function returns the number of ops
1902  * actually dequeued, which is the number of *rte_crypto_op* data structures
1903  * effectively supplied into the *ops* array.
1904  *
1905  * A return value equal to *nb_ops* indicates that the queue contained
1906  * at least *nb_ops* operations, and this is likely to signify that other
1907  * processed operations remain in the devices output queue. Applications
1908  * implementing a "retrieve as many processed operations as possible" policy
1909  * can check this specific case and keep invoking the
1910  * rte_cryptodev_dequeue_burst() function until a value less than
1911  * *nb_ops* is returned.
1912  *
1913  * The rte_cryptodev_dequeue_burst() function does not provide any error
1914  * notification to avoid the corresponding overhead.
1915  *
1916  * @param	dev_id		The symmetric crypto device identifier
1917  * @param	qp_id		The index of the queue pair from which to
1918  *				retrieve processed packets. The value must be
1919  *				in the range [0, nb_queue_pair - 1] previously
1920  *				supplied to rte_cryptodev_configure().
1921  * @param	ops		The address of an array of pointers to
1922  *				*rte_crypto_op* structures that must be
1923  *				large enough to store *nb_ops* pointers in it.
1924  * @param	nb_ops		The maximum number of operations to dequeue.
1925  *
1926  * @return
1927  *   - The number of operations actually dequeued, which is the number
1928  *   of pointers to *rte_crypto_op* structures effectively supplied to the
1929  *   *ops* array.
1930  */
1931 static inline uint16_t
1932 rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
1933 		struct rte_crypto_op **ops, uint16_t nb_ops)
1934 {
1935 	const struct rte_crypto_fp_ops *fp_ops;
1936 	void *qp;
1937 
1938 	rte_cryptodev_trace_dequeue_burst(dev_id, qp_id, (void **)ops, nb_ops);
1939 
1940 	fp_ops = &rte_crypto_fp_ops[dev_id];
1941 	qp = fp_ops->qp.data[qp_id];
1942 
1943 	nb_ops = fp_ops->dequeue_burst(qp, ops, nb_ops);
1944 
1945 #ifdef RTE_CRYPTO_CALLBACKS
1946 	if (unlikely(fp_ops->qp.deq_cb != NULL)) {
1947 		struct rte_cryptodev_cb_rcu *list;
1948 		struct rte_cryptodev_cb *cb;
1949 
1950 		/* rte_memory_order_release memory order was used when the
1951 		 * call back was inserted into the list.
1952 		 * Since there is a clear dependency between loading
1953 		 * cb and cb->fn/cb->next, rte_memory_order_acquire memory order is
1954 		 * not required.
1955 		 */
1956 		list = &fp_ops->qp.deq_cb[qp_id];
1957 		rte_rcu_qsbr_thread_online(list->qsbr, 0);
1958 		cb = rte_atomic_load_explicit(&list->next, rte_memory_order_relaxed);
1959 
1960 		while (cb != NULL) {
1961 			nb_ops = cb->fn(dev_id, qp_id, ops, nb_ops,
1962 					cb->arg);
1963 			cb = cb->next;
1964 		};
1965 
1966 		rte_rcu_qsbr_thread_offline(list->qsbr, 0);
1967 	}
1968 #endif
1969 	return nb_ops;
1970 }
1971 
1972 /**
1973  * Enqueue a burst of operations for processing on a crypto device.
1974  *
1975  * The rte_cryptodev_enqueue_burst() function is invoked to place
1976  * crypto operations on the queue *qp_id* of the device designated by
1977  * its *dev_id*.
1978  *
1979  * The *nb_ops* parameter is the number of operations to process which are
1980  * supplied in the *ops* array of *rte_crypto_op* structures.
1981  *
1982  * The rte_cryptodev_enqueue_burst() function returns the number of
1983  * operations it actually enqueued for processing. A return value equal to
1984  * *nb_ops* means that all packets have been enqueued.
1985  *
1986  * @param	dev_id		The identifier of the device.
1987  * @param	qp_id		The index of the queue pair which packets are
1988  *				to be enqueued for processing. The value
1989  *				must be in the range [0, nb_queue_pairs - 1]
1990  *				previously supplied to
1991  *				 *rte_cryptodev_configure*.
1992  * @param	ops		The address of an array of *nb_ops* pointers
1993  *				to *rte_crypto_op* structures which contain
1994  *				the crypto operations to be processed.
1995  * @param	nb_ops		The number of operations to process.
1996  *
1997  * @return
1998  * The number of operations actually enqueued on the crypto device. The return
1999  * value can be less than the value of the *nb_ops* parameter when the
2000  * crypto devices queue is full or if invalid parameters are specified in
2001  * a *rte_crypto_op*.
2002  */
2003 static inline uint16_t
2004 rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
2005 		struct rte_crypto_op **ops, uint16_t nb_ops)
2006 {
2007 	const struct rte_crypto_fp_ops *fp_ops;
2008 	void *qp;
2009 
2010 	fp_ops = &rte_crypto_fp_ops[dev_id];
2011 	qp = fp_ops->qp.data[qp_id];
2012 #ifdef RTE_CRYPTO_CALLBACKS
2013 	if (unlikely(fp_ops->qp.enq_cb != NULL)) {
2014 		struct rte_cryptodev_cb_rcu *list;
2015 		struct rte_cryptodev_cb *cb;
2016 
2017 		/* rte_memory_order_release memory order was used when the
2018 		 * call back was inserted into the list.
2019 		 * Since there is a clear dependency between loading
2020 		 * cb and cb->fn/cb->next, rte_memory_order_acquire memory order is
2021 		 * not required.
2022 		 */
2023 		list = &fp_ops->qp.enq_cb[qp_id];
2024 		rte_rcu_qsbr_thread_online(list->qsbr, 0);
2025 		cb = rte_atomic_load_explicit(&list->next, rte_memory_order_relaxed);
2026 
2027 		while (cb != NULL) {
2028 			nb_ops = cb->fn(dev_id, qp_id, ops, nb_ops,
2029 					cb->arg);
2030 			cb = cb->next;
2031 		};
2032 
2033 		rte_rcu_qsbr_thread_offline(list->qsbr, 0);
2034 	}
2035 #endif
2036 
2037 	rte_cryptodev_trace_enqueue_burst(dev_id, qp_id, (void **)ops, nb_ops);
2038 	return fp_ops->enqueue_burst(qp, ops, nb_ops);
2039 }
2040 
2041 
2042 
2043 #ifdef __cplusplus
2044 }
2045 #endif
2046 
2047 #endif /* _RTE_CRYPTODEV_H_ */
2048