xref: /dpdk/lib/cryptodev/rte_cryptodev.h (revision 6ef8e70ecfbd0963a35a301bc9d6d0745891f6e3)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020 Intel Corporation.
3  */
4 
5 #ifndef _RTE_CRYPTODEV_H_
6 #define _RTE_CRYPTODEV_H_
7 
8 /**
9  * @file rte_cryptodev.h
10  *
11  * RTE Cryptographic Device APIs
12  *
13  * Defines RTE Crypto Device APIs for the provisioning of cipher and
14  * authentication operations.
15  */
16 
17 #include <rte_compat.h>
18 #include "rte_kvargs.h"
19 #include "rte_crypto.h"
20 #include <rte_common.h>
21 #include <rte_rcu_qsbr.h>
22 
23 #include "rte_cryptodev_trace_fp.h"
24 
25 /**
26  * @internal Logtype used for cryptodev related messages.
27  */
28 extern int rte_cryptodev_logtype;
29 #define RTE_LOGTYPE_CRYPTODEV rte_cryptodev_logtype
30 
31 /* Logging Macros */
32 #define CDEV_LOG_ERR(...) \
33 	RTE_LOG_LINE_PREFIX(ERR, CRYPTODEV, \
34 		"%s() line %u: ", __func__ RTE_LOG_COMMA __LINE__, __VA_ARGS__)
35 
36 #define CDEV_LOG_INFO(...) \
37 	RTE_LOG_LINE(INFO, CRYPTODEV, "" __VA_ARGS__)
38 
39 #define CDEV_LOG_DEBUG(...) \
40 	RTE_LOG_LINE_PREFIX(DEBUG, CRYPTODEV, \
41 		"%s() line %u: ", __func__ RTE_LOG_COMMA __LINE__, __VA_ARGS__)
42 
43 #define CDEV_PMD_TRACE(...) \
44 	RTE_LOG_LINE_PREFIX(DEBUG, CRYPTODEV, \
45 		"[%s] %s: ", dev RTE_LOG_COMMA __func__, __VA_ARGS__)
46 
47 /**
48  * A macro that points to an offset from the start
49  * of the crypto operation structure (rte_crypto_op)
50  *
51  * The returned pointer is cast to type t.
52  *
53  * @param c
54  *   The crypto operation.
55  * @param o
56  *   The offset from the start of the crypto operation.
57  * @param t
58  *   The type to cast the result into.
59  */
60 #define rte_crypto_op_ctod_offset(c, t, o)	\
61 	((t)((char *)(c) + (o)))
62 
63 /**
64  * A macro that returns the physical address that points
65  * to an offset from the start of the crypto operation
66  * (rte_crypto_op)
67  *
68  * @param c
69  *   The crypto operation.
70  * @param o
71  *   The offset from the start of the crypto operation
72  *   to calculate address from.
73  */
74 #define rte_crypto_op_ctophys_offset(c, o)	\
75 	(rte_iova_t)((c)->phys_addr + (o))
76 
77 /**
78  * Crypto parameters range description
79  */
80 struct rte_crypto_param_range {
81 	uint16_t min;	/**< minimum size */
82 	uint16_t max;	/**< maximum size */
83 	uint16_t increment;
84 	/**< if a range of sizes are supported,
85 	 * this parameter is used to indicate
86 	 * increments in byte size that are supported
87 	 * between the minimum and maximum
88 	 */
89 };
90 
91 /**
92  * Data-unit supported lengths of cipher algorithms.
93  * A bit can represent any set of data-unit sizes
94  * (single size, multiple size, range, etc).
95  */
96 #define RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_512_BYTES             RTE_BIT32(0)
97 #define RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_4096_BYTES            RTE_BIT32(1)
98 #define RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_1_MEGABYTES           RTE_BIT32(2)
99 
100 /**
101  * Symmetric Crypto Capability
102  */
103 struct rte_cryptodev_symmetric_capability {
104 	enum rte_crypto_sym_xform_type xform_type;
105 	/**< Transform type : Authentication / Cipher / AEAD */
106 	union {
107 		struct {
108 			enum rte_crypto_auth_algorithm algo;
109 			/**< authentication algorithm */
110 			uint16_t block_size;
111 			/**< algorithm block size */
112 			struct rte_crypto_param_range key_size;
113 			/**< auth key size range */
114 			struct rte_crypto_param_range digest_size;
115 			/**< digest size range */
116 			struct rte_crypto_param_range aad_size;
117 			/**< Additional authentication data size range */
118 			struct rte_crypto_param_range iv_size;
119 			/**< Initialisation vector data size range */
120 		} auth;
121 		/**< Symmetric Authentication transform capabilities */
122 		struct {
123 			enum rte_crypto_cipher_algorithm algo;
124 			/**< cipher algorithm */
125 			uint16_t block_size;
126 			/**< algorithm block size */
127 			struct rte_crypto_param_range key_size;
128 			/**< cipher key size range */
129 			struct rte_crypto_param_range iv_size;
130 			/**< Initialisation vector data size range */
131 			uint32_t dataunit_set;
132 			/**<
133 			 * Supported data-unit lengths:
134 			 * RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_* bits
135 			 * or 0 for lengths defined in the algorithm standard.
136 			 */
137 		} cipher;
138 		/**< Symmetric Cipher transform capabilities */
139 		struct {
140 			enum rte_crypto_aead_algorithm algo;
141 			/**< AEAD algorithm */
142 			uint16_t block_size;
143 			/**< algorithm block size */
144 			struct rte_crypto_param_range key_size;
145 			/**< AEAD key size range */
146 			struct rte_crypto_param_range digest_size;
147 			/**< digest size range */
148 			struct rte_crypto_param_range aad_size;
149 			/**< Additional authentication data size range */
150 			struct rte_crypto_param_range iv_size;
151 			/**< Initialisation vector data size range */
152 		} aead;
153 	};
154 };
155 
156 /**
157  * Asymmetric Xform Crypto Capability
158  */
159 struct rte_cryptodev_asymmetric_xform_capability {
160 	enum rte_crypto_asym_xform_type xform_type;
161 	/**< Transform type: RSA/MODEXP/DH/DSA/MODINV */
162 
163 	uint32_t op_types;
164 	/**<
165 	 * Bitmask for supported rte_crypto_asym_op_type or
166 	 * rte_crypto_asym_ke_type. Which enum is used is determined
167 	 * by the rte_crypto_asym_xform_type. For key exchange algorithms
168 	 * like Diffie-Hellman it is rte_crypto_asym_ke_type, for others
169 	 * it is rte_crypto_asym_op_type.
170 	 */
171 
172 	__extension__
173 	union {
174 		struct rte_crypto_param_range modlen;
175 		/**< Range of modulus length supported by modulus based xform.
176 		 * Value 0 mean implementation default
177 		 */
178 
179 		uint8_t internal_rng;
180 		/**< Availability of random number generator for Elliptic curve based xform.
181 		 * Value 0 means unavailable, and application should pass the required
182 		 * random value. Otherwise, PMD would internally compute the random number.
183 		 */
184 	};
185 
186 	uint64_t hash_algos;
187 	/**< Bitmask of hash algorithms supported for op_type. */
188 };
189 
190 /**
191  * Asymmetric Crypto Capability
192  */
193 struct rte_cryptodev_asymmetric_capability {
194 	struct rte_cryptodev_asymmetric_xform_capability xform_capa;
195 };
196 
197 
198 /** Structure used to capture a capability of a crypto device */
199 struct rte_cryptodev_capabilities {
200 	enum rte_crypto_op_type op;
201 	/**< Operation type */
202 
203 	union {
204 		struct rte_cryptodev_symmetric_capability sym;
205 		/**< Symmetric operation capability parameters */
206 		struct rte_cryptodev_asymmetric_capability asym;
207 		/**< Asymmetric operation capability parameters */
208 	};
209 };
210 
211 /** Structure used to describe crypto algorithms */
212 struct rte_cryptodev_sym_capability_idx {
213 	enum rte_crypto_sym_xform_type type;
214 	union {
215 		enum rte_crypto_cipher_algorithm cipher;
216 		enum rte_crypto_auth_algorithm auth;
217 		enum rte_crypto_aead_algorithm aead;
218 	} algo;
219 };
220 
221 /**
222  * Structure used to describe asymmetric crypto xforms
223  * Each xform maps to one asym algorithm.
224  */
225 struct rte_cryptodev_asym_capability_idx {
226 	enum rte_crypto_asym_xform_type type;
227 	/**< Asymmetric xform (algo) type */
228 };
229 
230 /**
231  * Provide capabilities available for defined device and algorithm
232  *
233  * @param	dev_id		The identifier of the device.
234  * @param	idx		Description of crypto algorithms.
235  *
236  * @return
237  *   - Return description of the symmetric crypto capability if exist.
238  *   - Return NULL if the capability not exist.
239  */
240 const struct rte_cryptodev_symmetric_capability *
241 rte_cryptodev_sym_capability_get(uint8_t dev_id,
242 		const struct rte_cryptodev_sym_capability_idx *idx);
243 
244 /**
245  *  Provide capabilities available for defined device and xform
246  *
247  * @param	dev_id		The identifier of the device.
248  * @param	idx		Description of asym crypto xform.
249  *
250  * @return
251  *   - Return description of the asymmetric crypto capability if exist.
252  *   - Return NULL if the capability not exist.
253  */
254 const struct rte_cryptodev_asymmetric_xform_capability *
255 rte_cryptodev_asym_capability_get(uint8_t dev_id,
256 		const struct rte_cryptodev_asym_capability_idx *idx);
257 
258 /**
259  * Check if key size and initial vector are supported
260  * in crypto cipher capability
261  *
262  * @param	capability	Description of the symmetric crypto capability.
263  * @param	key_size	Cipher key size.
264  * @param	iv_size		Cipher initial vector size.
265  *
266  * @return
267  *   - Return 0 if the parameters are in range of the capability.
268  *   - Return -1 if the parameters are out of range of the capability.
269  */
270 int
271 rte_cryptodev_sym_capability_check_cipher(
272 		const struct rte_cryptodev_symmetric_capability *capability,
273 		uint16_t key_size, uint16_t iv_size);
274 
275 /**
276  * Check if key size and initial vector are supported
277  * in crypto auth capability
278  *
279  * @param	capability	Description of the symmetric crypto capability.
280  * @param	key_size	Auth key size.
281  * @param	digest_size	Auth digest size.
282  * @param	iv_size		Auth initial vector size.
283  *
284  * @return
285  *   - Return 0 if the parameters are in range of the capability.
286  *   - Return -1 if the parameters are out of range of the capability.
287  */
288 int
289 rte_cryptodev_sym_capability_check_auth(
290 		const struct rte_cryptodev_symmetric_capability *capability,
291 		uint16_t key_size, uint16_t digest_size, uint16_t iv_size);
292 
293 /**
294  * Check if key, digest, AAD and initial vector sizes are supported
295  * in crypto AEAD capability
296  *
297  * @param	capability	Description of the symmetric crypto capability.
298  * @param	key_size	AEAD key size.
299  * @param	digest_size	AEAD digest size.
300  * @param	aad_size	AEAD AAD size.
301  * @param	iv_size		AEAD IV size.
302  *
303  * @return
304  *   - Return 0 if the parameters are in range of the capability.
305  *   - Return -1 if the parameters are out of range of the capability.
306  */
307 int
308 rte_cryptodev_sym_capability_check_aead(
309 		const struct rte_cryptodev_symmetric_capability *capability,
310 		uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
311 		uint16_t iv_size);
312 
313 /**
314  * Check if op type is supported
315  *
316  * @param	capability	Description of the asymmetric crypto capability.
317  * @param	op_type		op type
318  *
319  * @return
320  *   - Return 1 if the op type is supported
321  *   - Return 0 if unsupported
322  */
323 int
324 rte_cryptodev_asym_xform_capability_check_optype(
325 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
326 		enum rte_crypto_asym_op_type op_type);
327 
328 /**
329  * Check if modulus length is in supported range
330  *
331  * @param	capability	Description of the asymmetric crypto capability.
332  * @param	modlen		modulus length.
333  *
334  * @return
335  *   - Return 0 if the parameters are in range of the capability.
336  *   - Return -1 if the parameters are out of range of the capability.
337  */
338 int
339 rte_cryptodev_asym_xform_capability_check_modlen(
340 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
341 		uint16_t modlen);
342 
343 /**
344  * Check if hash algorithm is supported.
345  *
346  * @param	capability	Asymmetric crypto capability.
347  * @param	hash		Hash algorithm.
348  *
349  * @return
350  *   - Return true if the hash algorithm is supported.
351  *   - Return false if the hash algorithm is not supported.
352  */
353 bool
354 rte_cryptodev_asym_xform_capability_check_hash(
355 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
356 	enum rte_crypto_auth_algorithm hash);
357 
358 /**
359  * Provide the cipher algorithm enum, given an algorithm string
360  *
361  * @param	algo_enum	A pointer to the cipher algorithm
362  *				enum to be filled
363  * @param	algo_string	Authentication algo string
364  *
365  * @return
366  * - Return -1 if string is not valid
367  * - Return 0 is the string is valid
368  */
369 int
370 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum,
371 		const char *algo_string);
372 
373 /**
374  * Provide the authentication algorithm enum, given an algorithm string
375  *
376  * @param	algo_enum	A pointer to the authentication algorithm
377  *				enum to be filled
378  * @param	algo_string	Authentication algo string
379  *
380  * @return
381  * - Return -1 if string is not valid
382  * - Return 0 is the string is valid
383  */
384 int
385 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum,
386 		const char *algo_string);
387 
388 /**
389  * Provide the AEAD algorithm enum, given an algorithm string
390  *
391  * @param	algo_enum	A pointer to the AEAD algorithm
392  *				enum to be filled
393  * @param	algo_string	AEAD algorithm string
394  *
395  * @return
396  * - Return -1 if string is not valid
397  * - Return 0 is the string is valid
398  */
399 int
400 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum,
401 		const char *algo_string);
402 
403 /**
404  * Provide the Asymmetric xform enum, given an xform string
405  *
406  * @param	xform_enum	A pointer to the xform type
407  *				enum to be filled
408  * @param	xform_string	xform string
409  *
410  * @return
411  * - Return -1 if string is not valid
412  * - Return 0 if the string is valid
413  */
414 int
415 rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum,
416 		const char *xform_string);
417 
418 /**
419  * Provide the cipher algorithm string, given an algorithm enum.
420  *
421  * @param	algo_enum	cipher algorithm enum
422  *
423  * @return
424  * - Return NULL if enum is not valid
425  * - Return algo_string corresponding to enum
426  */
427 __rte_experimental
428 const char *
429 rte_cryptodev_get_cipher_algo_string(enum rte_crypto_cipher_algorithm algo_enum);
430 
431 /**
432  * Provide the authentication algorithm string, given an algorithm enum.
433  *
434  * @param	algo_enum	auth algorithm enum
435  *
436  * @return
437  * - Return NULL if enum is not valid
438  * - Return algo_string corresponding to enum
439  */
440 __rte_experimental
441 const char *
442 rte_cryptodev_get_auth_algo_string(enum rte_crypto_auth_algorithm algo_enum);
443 
444 /**
445  * Provide the AEAD algorithm string, given an algorithm enum.
446  *
447  * @param	algo_enum	AEAD algorithm enum
448  *
449  * @return
450  * - Return NULL if enum is not valid
451  * - Return algo_string corresponding to enum
452  */
453 __rte_experimental
454 const char *
455 rte_cryptodev_get_aead_algo_string(enum rte_crypto_aead_algorithm algo_enum);
456 
457 /**
458  * Provide the Asymmetric xform string, given an xform enum.
459  *
460  * @param	xform_enum	xform type enum
461  *
462  * @return
463  * - Return NULL, if enum is not valid.
464  * - Return xform string, for valid enum.
465  */
466 __rte_experimental
467 const char *
468 rte_cryptodev_asym_get_xform_string(enum rte_crypto_asym_xform_type xform_enum);
469 
470 
471 /** Macro used at end of crypto PMD list */
472 #define RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() \
473 	{ RTE_CRYPTO_OP_TYPE_UNDEFINED }
474 
475 
476 /**
477  * Crypto device supported feature flags
478  *
479  * Note:
480  * New features flags should be added to the end of the list
481  *
482  * Keep these flags synchronised with rte_cryptodev_get_feature_name()
483  */
484 #define	RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO		(1ULL << 0)
485 /**< Symmetric crypto operations are supported */
486 #define	RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO		(1ULL << 1)
487 /**< Asymmetric crypto operations are supported */
488 #define	RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING		(1ULL << 2)
489 /**< Chaining symmetric crypto operations are supported */
490 #define	RTE_CRYPTODEV_FF_CPU_SSE			(1ULL << 3)
491 /**< Utilises CPU SIMD SSE instructions */
492 #define	RTE_CRYPTODEV_FF_CPU_AVX			(1ULL << 4)
493 /**< Utilises CPU SIMD AVX instructions */
494 #define	RTE_CRYPTODEV_FF_CPU_AVX2			(1ULL << 5)
495 /**< Utilises CPU SIMD AVX2 instructions */
496 #define	RTE_CRYPTODEV_FF_CPU_AESNI			(1ULL << 6)
497 /**< Utilises CPU AES-NI instructions */
498 #define	RTE_CRYPTODEV_FF_HW_ACCELERATED			(1ULL << 7)
499 /**< Operations are off-loaded to an
500  * external hardware accelerator
501  */
502 #define	RTE_CRYPTODEV_FF_CPU_AVX512			(1ULL << 8)
503 /**< Utilises CPU SIMD AVX512 instructions */
504 #define	RTE_CRYPTODEV_FF_IN_PLACE_SGL			(1ULL << 9)
505 /**< In-place Scatter-gather (SGL) buffers, with multiple segments,
506  * are supported
507  */
508 #define RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT		(1ULL << 10)
509 /**< Out-of-place Scatter-gather (SGL) buffers are
510  * supported in input and output
511  */
512 #define RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT		(1ULL << 11)
513 /**< Out-of-place Scatter-gather (SGL) buffers are supported
514  * in input, combined with linear buffers (LB), with a
515  * single segment in output
516  */
517 #define RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT		(1ULL << 12)
518 /**< Out-of-place Scatter-gather (SGL) buffers are supported
519  * in output, combined with linear buffers (LB) in input
520  */
521 #define RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT		(1ULL << 13)
522 /**< Out-of-place linear buffers (LB) are supported in input and output */
523 #define	RTE_CRYPTODEV_FF_CPU_NEON			(1ULL << 14)
524 /**< Utilises CPU NEON instructions */
525 #define	RTE_CRYPTODEV_FF_CPU_ARM_CE			(1ULL << 15)
526 /**< Utilises ARM CPU Cryptographic Extensions */
527 #define	RTE_CRYPTODEV_FF_SECURITY			(1ULL << 16)
528 /**< Support Security Protocol Processing */
529 #define RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP		(1ULL << 17)
530 /**< Support RSA Private Key OP with exponent */
531 #define RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT		(1ULL << 18)
532 /**< Support RSA Private Key OP with CRT (quintuple) Keys */
533 #define RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED		(1ULL << 19)
534 /**< Support encrypted-digest operations where digest is appended to data */
535 #define RTE_CRYPTODEV_FF_ASYM_SESSIONLESS		(1ULL << 20)
536 /**< Support asymmetric session-less operations */
537 #define	RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO			(1ULL << 21)
538 /**< Support symmetric cpu-crypto processing */
539 #define RTE_CRYPTODEV_FF_SYM_SESSIONLESS		(1ULL << 22)
540 /**< Support symmetric session-less operations */
541 #define RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA		(1ULL << 23)
542 /**< Support operations on data which is not byte aligned */
543 #define RTE_CRYPTODEV_FF_SYM_RAW_DP			(1ULL << 24)
544 /**< Support accelerator specific symmetric raw data-path APIs */
545 #define RTE_CRYPTODEV_FF_CIPHER_MULTIPLE_DATA_UNITS	(1ULL << 25)
546 /**< Support operations on multiple data-units message */
547 #define RTE_CRYPTODEV_FF_CIPHER_WRAPPED_KEY		(1ULL << 26)
548 /**< Support wrapped key in cipher xform  */
549 #define RTE_CRYPTODEV_FF_SECURITY_INNER_CSUM		(1ULL << 27)
550 /**< Support inner checksum computation/verification */
551 #define RTE_CRYPTODEV_FF_SECURITY_RX_INJECT		(1ULL << 28)
552 /**< Support Rx injection after security processing */
553 
554 /**
555  * Get the name of a crypto device feature flag
556  *
557  * @param	flag	The mask describing the flag.
558  *
559  * @return
560  *   The name of this flag, or NULL if it's not a valid feature flag.
561  */
562 const char *
563 rte_cryptodev_get_feature_name(uint64_t flag);
564 
565 /**  Crypto device information */
566 /* Structure rte_cryptodev_info 8< */
567 struct rte_cryptodev_info {
568 	const char *driver_name;	/**< Driver name. */
569 	uint8_t driver_id;		/**< Driver identifier */
570 	struct rte_device *device;	/**< Generic device information. */
571 
572 	uint64_t feature_flags;
573 	/**< Feature flags exposes HW/SW features for the given device */
574 
575 	const struct rte_cryptodev_capabilities *capabilities;
576 	/**< Array of devices supported capabilities */
577 
578 	unsigned max_nb_queue_pairs;
579 	/**< Maximum number of queues pairs supported by device. */
580 
581 	uint16_t min_mbuf_headroom_req;
582 	/**< Minimum mbuf headroom required by device */
583 
584 	uint16_t min_mbuf_tailroom_req;
585 	/**< Minimum mbuf tailroom required by device */
586 
587 	struct {
588 		unsigned max_nb_sessions;
589 		/**< Maximum number of sessions supported by device.
590 		 * If 0, the device does not have any limitation in
591 		 * number of sessions that can be used.
592 		 */
593 	} sym;
594 };
595 /* >8 End of structure rte_cryptodev_info. */
596 
597 #define RTE_CRYPTODEV_DETACHED  (0)
598 #define RTE_CRYPTODEV_ATTACHED  (1)
599 
600 /** Definitions of Crypto device event types */
601 enum rte_cryptodev_event_type {
602 	RTE_CRYPTODEV_EVENT_UNKNOWN,	/**< unknown event type */
603 	RTE_CRYPTODEV_EVENT_ERROR,	/**< error interrupt event */
604 	RTE_CRYPTODEV_EVENT_MAX		/**< max value of this enum */
605 };
606 
607 /* Crypto queue pair priority levels */
608 #define RTE_CRYPTODEV_QP_PRIORITY_HIGHEST   0
609 /**< Highest priority of a cryptodev queue pair
610  * @see rte_cryptodev_queue_pair_setup(), rte_cryptodev_enqueue_burst()
611  */
612 #define RTE_CRYPTODEV_QP_PRIORITY_NORMAL    128
613 /**< Normal priority of a cryptodev queue pair
614  * @see rte_cryptodev_queue_pair_setup(), rte_cryptodev_enqueue_burst()
615  */
616 #define RTE_CRYPTODEV_QP_PRIORITY_LOWEST    255
617 /**< Lowest priority of a cryptodev queue pair
618  * @see rte_cryptodev_queue_pair_setup(), rte_cryptodev_enqueue_burst()
619  */
620 
621 /** Crypto device queue pair configuration structure. */
622 /* Structure rte_cryptodev_qp_conf 8<*/
623 struct rte_cryptodev_qp_conf {
624 	uint32_t nb_descriptors; /**< Number of descriptors per queue pair */
625 	struct rte_mempool *mp_session;
626 	/**< The mempool for creating session in sessionless mode */
627 	uint8_t priority;
628 	/**< Priority for this queue pair relative to other queue pairs.
629 	 *
630 	 * The requested priority should in the range of
631 	 * [@ref RTE_CRYPTODEV_QP_PRIORITY_HIGHEST, @ref RTE_CRYPTODEV_QP_PRIORITY_LOWEST].
632 	 * The implementation may normalize the requested priority to
633 	 * device supported priority value.
634 	 */
635 };
636 /* >8 End of structure rte_cryptodev_qp_conf. */
637 
638 /**
639  * Function type used for processing crypto ops when enqueue/dequeue burst is
640  * called.
641  *
642  * The callback function is called on enqueue/dequeue burst immediately.
643  *
644  * @param	dev_id		The identifier of the device.
645  * @param	qp_id		The index of the queue pair on which ops are
646  *				enqueued/dequeued. The value must be in the
647  *				range [0, nb_queue_pairs - 1] previously
648  *				supplied to *rte_cryptodev_configure*.
649  * @param	ops		The address of an array of *nb_ops* pointers
650  *				to *rte_crypto_op* structures which contain
651  *				the crypto operations to be processed.
652  * @param	nb_ops		The number of operations to process.
653  * @param	user_param	The arbitrary user parameter passed in by the
654  *				application when the callback was originally
655  *				registered.
656  * @return			The number of ops to be enqueued to the
657  *				crypto device.
658  */
659 typedef uint16_t (*rte_cryptodev_callback_fn)(uint16_t dev_id, uint16_t qp_id,
660 		struct rte_crypto_op **ops, uint16_t nb_ops, void *user_param);
661 
662 /**
663  * Typedef for application callback function to be registered by application
664  * software for notification of device events
665  *
666  * @param	dev_id	Crypto device identifier
667  * @param	event	Crypto device event to register for notification of.
668  * @param	cb_arg	User specified parameter to be passed as to passed to
669  *			users callback function.
670  */
671 typedef void (*rte_cryptodev_cb_fn)(uint8_t dev_id,
672 		enum rte_cryptodev_event_type event, void *cb_arg);
673 
674 
675 /** Crypto Device statistics */
676 struct rte_cryptodev_stats {
677 	uint64_t enqueued_count;
678 	/**< Count of all operations enqueued */
679 	uint64_t dequeued_count;
680 	/**< Count of all operations dequeued */
681 
682 	uint64_t enqueue_err_count;
683 	/**< Total error count on operations enqueued */
684 	uint64_t dequeue_err_count;
685 	/**< Total error count on operations dequeued */
686 };
687 
688 #define RTE_CRYPTODEV_NAME_MAX_LEN	(64)
689 /**< Max length of name of crypto PMD */
690 
691 /**
692  * Get the device identifier for the named crypto device.
693  *
694  * @param	name	device name to select the device structure.
695  *
696  * @return
697  *   - Returns crypto device identifier on success.
698  *   - Return -1 on failure to find named crypto device.
699  */
700 int
701 rte_cryptodev_get_dev_id(const char *name);
702 
703 /**
704  * Get the crypto device name given a device identifier.
705  *
706  * @param dev_id
707  *   The identifier of the device
708  *
709  * @return
710  *   - Returns crypto device name.
711  *   - Returns NULL if crypto device is not present.
712  */
713 const char *
714 rte_cryptodev_name_get(uint8_t dev_id);
715 
716 /**
717  * Get the total number of crypto devices that have been successfully
718  * initialised.
719  *
720  * @return
721  *   - The total number of usable crypto devices.
722  */
723 uint8_t
724 rte_cryptodev_count(void);
725 
726 /**
727  * Get number of crypto device defined type.
728  *
729  * @param	driver_id	driver identifier.
730  *
731  * @return
732  *   Returns number of crypto device.
733  */
734 uint8_t
735 rte_cryptodev_device_count_by_driver(uint8_t driver_id);
736 
737 /**
738  * Get number and identifiers of attached crypto devices that
739  * use the same crypto driver.
740  *
741  * @param	driver_name	driver name.
742  * @param	devices		output devices identifiers.
743  * @param	nb_devices	maximal number of devices.
744  *
745  * @return
746  *   Returns number of attached crypto device.
747  */
748 uint8_t
749 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
750 		uint8_t nb_devices);
751 /*
752  * Return the NUMA socket to which a device is connected
753  *
754  * @param dev_id
755  *   The identifier of the device
756  * @return
757  *   The NUMA socket id to which the device is connected or
758  *   a default of zero if the socket could not be determined.
759  *   -1 if returned is the dev_id value is out of range.
760  */
761 int
762 rte_cryptodev_socket_id(uint8_t dev_id);
763 
764 /** Crypto device configuration structure */
765 /* Structure rte_cryptodev_config 8< */
766 struct rte_cryptodev_config {
767 	int socket_id;			/**< Socket to allocate resources on */
768 	uint16_t nb_queue_pairs;
769 	/**< Number of queue pairs to configure on device */
770 	uint64_t ff_disable;
771 	/**< Feature flags to be disabled. Only the following features are
772 	 * allowed to be disabled,
773 	 *  - RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO
774 	 *  - RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO
775 	 *  - RTE_CRYTPODEV_FF_SECURITY
776 	 */
777 };
778 /* >8 End of structure rte_cryptodev_config. */
779 
780 /**
781  * Configure a device.
782  *
783  * This function must be invoked first before any other function in the
784  * API. This function can also be re-invoked when a device is in the
785  * stopped state.
786  *
787  * @param	dev_id		The identifier of the device to configure.
788  * @param	config		The crypto device configuration structure.
789  *
790  * @return
791  *   - 0: Success, device configured.
792  *   - <0: Error code returned by the driver configuration function.
793  */
794 int
795 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config);
796 
797 /**
798  * Start an device.
799  *
800  * The device start step is the last one and consists of setting the configured
801  * offload features and in starting the transmit and the receive units of the
802  * device.
803  * On success, all basic functions exported by the API (link status,
804  * receive/transmit, and so on) can be invoked.
805  *
806  * @param dev_id
807  *   The identifier of the device.
808  * @return
809  *   - 0: Success, device started.
810  *   - <0: Error code of the driver device start function.
811  */
812 int
813 rte_cryptodev_start(uint8_t dev_id);
814 
815 /**
816  * Stop an device. The device can be restarted with a call to
817  * rte_cryptodev_start()
818  *
819  * @param	dev_id		The identifier of the device.
820  */
821 void
822 rte_cryptodev_stop(uint8_t dev_id);
823 
824 /**
825  * Close an device. The device cannot be restarted!
826  *
827  * @param	dev_id		The identifier of the device.
828  *
829  * @return
830  *  - 0 on successfully closing device
831  *  - <0 on failure to close device
832  */
833 int
834 rte_cryptodev_close(uint8_t dev_id);
835 
836 /**
837  * Allocate and set up a receive queue pair for a device.
838  *
839  *
840  * @param	dev_id		The identifier of the device.
841  * @param	queue_pair_id	The index of the queue pairs to set up. The
842  *				value must be in the range [0, nb_queue_pair
843  *				- 1] previously supplied to
844  *				rte_cryptodev_configure().
845  * @param	qp_conf		The pointer to the configuration data to be
846  *				used for the queue pair.
847  * @param	socket_id	The *socket_id* argument is the socket
848  *				identifier in case of NUMA. The value can be
849  *				*SOCKET_ID_ANY* if there is no NUMA constraint
850  *				for the DMA memory allocated for the receive
851  *				queue pair.
852  *
853  * @return
854  *   - 0: Success, queue pair correctly set up.
855  *   - <0: Queue pair configuration failed
856  */
857 int
858 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
859 		const struct rte_cryptodev_qp_conf *qp_conf, int socket_id);
860 
861 /**
862  * Get the status of queue pairs setup on a specific crypto device
863  *
864  * @param	dev_id		Crypto device identifier.
865  * @param	queue_pair_id	The index of the queue pairs to set up. The
866  *				value must be in the range [0, nb_queue_pair
867  *				- 1] previously supplied to
868  *				rte_cryptodev_configure().
869  * @return
870  *   - 0: qp was not configured
871  *	 - 1: qp was configured
872  *	 - -EINVAL: device was not configured
873  */
874 int
875 rte_cryptodev_get_qp_status(uint8_t dev_id, uint16_t queue_pair_id);
876 
877 /**
878  * Get the number of queue pairs on a specific crypto device
879  *
880  * @param	dev_id		Crypto device identifier.
881  * @return
882  *   - The number of configured queue pairs.
883  */
884 uint16_t
885 rte_cryptodev_queue_pair_count(uint8_t dev_id);
886 
887 
888 /**
889  * Retrieve the general I/O statistics of a device.
890  *
891  * @param	dev_id		The identifier of the device.
892  * @param	stats		A pointer to a structure of type
893  *				*rte_cryptodev_stats* to be filled with the
894  *				values of device counters.
895  * @return
896  *   - Zero if successful.
897  *   - Non-zero otherwise.
898  */
899 int
900 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats);
901 
902 /**
903  * Reset the general I/O statistics of a device.
904  *
905  * @param	dev_id		The identifier of the device.
906  */
907 void
908 rte_cryptodev_stats_reset(uint8_t dev_id);
909 
910 /**
911  * Retrieve the contextual information of a device.
912  *
913  * @param	dev_id		The identifier of the device.
914  * @param	dev_info	A pointer to a structure of type
915  *				*rte_cryptodev_info* to be filled with the
916  *				contextual information of the device.
917  *
918  * @note The capabilities field of dev_info is set to point to the first
919  * element of an array of struct rte_cryptodev_capabilities. The element after
920  * the last valid element has it's op field set to
921  * RTE_CRYPTO_OP_TYPE_UNDEFINED.
922  */
923 void
924 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info);
925 
926 
927 /**
928  * Register a callback function for specific device id.
929  *
930  * @param	dev_id		Device id.
931  * @param	event		Event interested.
932  * @param	cb_fn		User supplied callback function to be called.
933  * @param	cb_arg		Pointer to the parameters for the registered
934  *				callback.
935  *
936  * @return
937  *  - On success, zero.
938  *  - On failure, a negative value.
939  */
940 int
941 rte_cryptodev_callback_register(uint8_t dev_id,
942 		enum rte_cryptodev_event_type event,
943 		rte_cryptodev_cb_fn cb_fn, void *cb_arg);
944 
945 /**
946  * Unregister a callback function for specific device id.
947  *
948  * @param	dev_id		The device identifier.
949  * @param	event		Event interested.
950  * @param	cb_fn		User supplied callback function to be called.
951  * @param	cb_arg		Pointer to the parameters for the registered
952  *				callback.
953  *
954  * @return
955  *  - On success, zero.
956  *  - On failure, a negative value.
957  */
958 int
959 rte_cryptodev_callback_unregister(uint8_t dev_id,
960 		enum rte_cryptodev_event_type event,
961 		rte_cryptodev_cb_fn cb_fn, void *cb_arg);
962 
963 /**
964  * @warning
965  * @b EXPERIMENTAL: this API may change without prior notice.
966  *
967  * Query a cryptodev queue pair if there are pending RTE_CRYPTODEV_EVENT_ERROR
968  * events.
969  *
970  * @param          dev_id	The device identifier.
971  * @param          qp_id	Queue pair index to be queried.
972  *
973  * @return
974  *   - 1 if requested queue has a pending event.
975  *   - 0 if no pending event is found.
976  *   - a negative value on failure
977  */
978 __rte_experimental
979 int
980 rte_cryptodev_queue_pair_event_error_query(uint8_t dev_id, uint16_t qp_id);
981 
982 struct rte_cryptodev_callback;
983 
984 /** Structure to keep track of registered callbacks */
985 RTE_TAILQ_HEAD(rte_cryptodev_cb_list, rte_cryptodev_callback);
986 
987 /**
988  * Structure used to hold information about the callbacks to be called for a
989  * queue pair on enqueue/dequeue.
990  */
991 struct rte_cryptodev_cb {
992 	RTE_ATOMIC(struct rte_cryptodev_cb *) next;
993 	/**< Pointer to next callback */
994 	rte_cryptodev_callback_fn fn;
995 	/**< Pointer to callback function */
996 	void *arg;
997 	/**< Pointer to argument */
998 };
999 
1000 /**
1001  * @internal
1002  * Structure used to hold information about the RCU for a queue pair.
1003  */
1004 struct rte_cryptodev_cb_rcu {
1005 	RTE_ATOMIC(struct rte_cryptodev_cb *) next;
1006 	/**< Pointer to next callback */
1007 	struct rte_rcu_qsbr *qsbr;
1008 	/**< RCU QSBR variable per queue pair */
1009 };
1010 
1011 /**
1012  * Get the security context for the cryptodev.
1013  *
1014  * @param dev_id
1015  *   The device identifier.
1016  * @return
1017  *   - NULL on error.
1018  *   - Pointer to security context on success.
1019  */
1020 void *
1021 rte_cryptodev_get_sec_ctx(uint8_t dev_id);
1022 
1023 /**
1024  * Create a symmetric session mempool.
1025  *
1026  * @param name
1027  *   The unique mempool name.
1028  * @param nb_elts
1029  *   The number of elements in the mempool.
1030  * @param elt_size
1031  *   The size of the element. This should be the size of the cryptodev PMD
1032  *   session private data obtained through
1033  *   rte_cryptodev_sym_get_private_session_size() function call.
1034  *   For the user who wants to use the same mempool for heterogeneous PMDs
1035  *   this value should be the maximum value of their private session sizes.
1036  *   Please note the created mempool will have bigger elt size than this
1037  *   value as necessary session header and the possible padding are filled
1038  *   into each elt.
1039  * @param cache_size
1040  *   The number of per-lcore cache elements
1041  * @param priv_size
1042  *   The private data size of each session.
1043  * @param socket_id
1044  *   The *socket_id* argument is the socket identifier in the case of
1045  *   NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
1046  *   constraint for the reserved zone.
1047  *
1048  * @return
1049  *  - On success returns the created session mempool pointer
1050  *  - On failure returns NULL
1051  */
1052 struct rte_mempool *
1053 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
1054 	uint32_t elt_size, uint32_t cache_size, uint16_t priv_size,
1055 	int socket_id);
1056 
1057 
1058 /**
1059  * Create an asymmetric session mempool.
1060  *
1061  * @param name
1062  *   The unique mempool name.
1063  * @param nb_elts
1064  *   The number of elements in the mempool.
1065  * @param cache_size
1066  *   The number of per-lcore cache elements
1067  * @param user_data_size
1068  *   The size of user data to be placed after session private data.
1069  * @param socket_id
1070  *   The *socket_id* argument is the socket identifier in the case of
1071  *   NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
1072  *   constraint for the reserved zone.
1073  *
1074  * @return
1075  *  - On success return mempool
1076  *  - On failure returns NULL
1077  */
1078 struct rte_mempool *
1079 rte_cryptodev_asym_session_pool_create(const char *name, uint32_t nb_elts,
1080 	uint32_t cache_size, uint16_t user_data_size, int socket_id);
1081 
1082 /**
1083  * Create symmetric crypto session and fill out private data for the device id,
1084  * based on its device type.
1085  *
1086  * @param   dev_id   ID of device that we want the session to be used on
1087  * @param   xforms   Symmetric crypto transform operations to apply on flow
1088  *                   processed with this session
1089  * @param   mp       Mempool to allocate symmetric session objects from
1090  *
1091  * @return
1092  *  - On success return pointer to sym-session.
1093  *  - On failure returns NULL and rte_errno is set to the error code:
1094  *    - EINVAL on invalid arguments.
1095  *    - ENOMEM on memory error for session allocation.
1096  *    - ENOTSUP if device doesn't support session configuration.
1097  */
1098 void *
1099 rte_cryptodev_sym_session_create(uint8_t dev_id,
1100 		struct rte_crypto_sym_xform *xforms,
1101 		struct rte_mempool *mp);
1102 /**
1103  * Create and initialise an asymmetric crypto session structure.
1104  * Calls the PMD to configure the private session data.
1105  *
1106  * @param   dev_id   ID of device that we want the session to be used on
1107  * @param   xforms   Asymmetric crypto transform operations to apply on flow
1108  *                   processed with this session
1109  * @param   mp       mempool to allocate asymmetric session
1110  *                   objects from
1111  * @param   session  void ** for session to be used
1112  *
1113  * @return
1114  *  - 0 on success.
1115  *  - -EINVAL on invalid arguments.
1116  *  - -ENOMEM on memory error for session allocation.
1117  *  - -ENOTSUP if device doesn't support session configuration.
1118  */
1119 int
1120 rte_cryptodev_asym_session_create(uint8_t dev_id,
1121 		struct rte_crypto_asym_xform *xforms, struct rte_mempool *mp,
1122 		void **session);
1123 
1124 /**
1125  * Frees session for the device id and returning it to its mempool.
1126  * It is the application's responsibility to ensure that the session
1127  * is not still in-flight operations using it.
1128  *
1129  * @param   dev_id   ID of device that uses the session.
1130  * @param   sess     Session header to be freed.
1131  *
1132  * @return
1133  *  - 0 if successful.
1134  *  - -EINVAL if session is NULL or the mismatched device ids.
1135  */
1136 int
1137 rte_cryptodev_sym_session_free(uint8_t dev_id,
1138 	void *sess);
1139 
1140 /**
1141  * Clears and frees asymmetric crypto session header and private data,
1142  * returning it to its original mempool.
1143  *
1144  * @param   dev_id   ID of device that uses the asymmetric session.
1145  * @param   sess     Session header to be freed.
1146  *
1147  * @return
1148  *  - 0 if successful.
1149  *  - -EINVAL if device is invalid or session is NULL.
1150  */
1151 int
1152 rte_cryptodev_asym_session_free(uint8_t dev_id, void *sess);
1153 
1154 /**
1155  * Get the size of the asymmetric session header.
1156  *
1157  * @return
1158  *   Size of the asymmetric header session.
1159  */
1160 unsigned int
1161 rte_cryptodev_asym_get_header_session_size(void);
1162 
1163 /**
1164  * Get the size of the private symmetric session data
1165  * for a device.
1166  *
1167  * @param	dev_id		The device identifier.
1168  *
1169  * @return
1170  *   - Size of the private data, if successful
1171  *   - 0 if device is invalid or does not have private
1172  *   symmetric session
1173  */
1174 unsigned int
1175 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id);
1176 
1177 /**
1178  * Get the size of the private data for asymmetric session
1179  * on device
1180  *
1181  * @param	dev_id		The device identifier.
1182  *
1183  * @return
1184  *   - Size of the asymmetric private data, if successful
1185  *   - 0 if device is invalid or does not have private session
1186  */
1187 unsigned int
1188 rte_cryptodev_asym_get_private_session_size(uint8_t dev_id);
1189 
1190 /**
1191  * Validate if the crypto device index is valid attached crypto device.
1192  *
1193  * @param	dev_id	Crypto device index.
1194  *
1195  * @return
1196  *   - If the device index is valid (1) or not (0).
1197  */
1198 unsigned int
1199 rte_cryptodev_is_valid_dev(uint8_t dev_id);
1200 
1201 /**
1202  * Provide driver identifier.
1203  *
1204  * @param name
1205  *   The pointer to a driver name.
1206  * @return
1207  *  The driver type identifier or -1 if no driver found
1208  */
1209 int rte_cryptodev_driver_id_get(const char *name);
1210 
1211 /**
1212  * Provide driver name.
1213  *
1214  * @param driver_id
1215  *   The driver identifier.
1216  * @return
1217  *  The driver name or null if no driver found
1218  */
1219 const char *rte_cryptodev_driver_name_get(uint8_t driver_id);
1220 
1221 /**
1222  * Store user data in a session.
1223  *
1224  * @param	sess		Session pointer allocated by
1225  *				*rte_cryptodev_sym_session_create*.
1226  * @param	data		Pointer to the user data.
1227  * @param	size		Size of the user data.
1228  *
1229  * @return
1230  *  - On success, zero.
1231  *  - On failure, a negative value.
1232  */
1233 int
1234 rte_cryptodev_sym_session_set_user_data(void *sess,
1235 					void *data,
1236 					uint16_t size);
1237 
1238 #define CRYPTO_SESS_OPAQUE_DATA_OFF 0
1239 /**
1240  * Get opaque data from session handle
1241  */
1242 static inline uint64_t
1243 rte_cryptodev_sym_session_opaque_data_get(void *sess)
1244 {
1245 	return *((uint64_t *)sess + CRYPTO_SESS_OPAQUE_DATA_OFF);
1246 }
1247 
1248 /**
1249  * Set opaque data in session handle
1250  */
1251 static inline void
1252 rte_cryptodev_sym_session_opaque_data_set(void *sess, uint64_t opaque)
1253 {
1254 	uint64_t *data;
1255 	data = (((uint64_t *)sess) + CRYPTO_SESS_OPAQUE_DATA_OFF);
1256 	*data = opaque;
1257 }
1258 
1259 /**
1260  * Get user data stored in a session.
1261  *
1262  * @param	sess		Session pointer allocated by
1263  *				*rte_cryptodev_sym_session_create*.
1264  *
1265  * @return
1266  *  - On success return pointer to user data.
1267  *  - On failure returns NULL.
1268  */
1269 void *
1270 rte_cryptodev_sym_session_get_user_data(void *sess);
1271 
1272 /**
1273  * Store user data in an asymmetric session.
1274  *
1275  * @param	sess		Session pointer allocated by
1276  *				*rte_cryptodev_asym_session_create*.
1277  * @param	data		Pointer to the user data.
1278  * @param	size		Size of the user data.
1279  *
1280  * @return
1281  *  - On success, zero.
1282  *  - -EINVAL if the session pointer is invalid.
1283  *  - -ENOMEM if the available user data size is smaller than the size parameter.
1284  */
1285 int
1286 rte_cryptodev_asym_session_set_user_data(void *sess, void *data, uint16_t size);
1287 
1288 /**
1289  * Get user data stored in an asymmetric session.
1290  *
1291  * @param	sess		Session pointer allocated by
1292  *				*rte_cryptodev_asym_session_create*.
1293  *
1294  * @return
1295  *  - On success return pointer to user data.
1296  *  - On failure returns NULL.
1297  */
1298 void *
1299 rte_cryptodev_asym_session_get_user_data(void *sess);
1300 
1301 /**
1302  * Perform actual crypto processing (encrypt/digest or auth/decrypt)
1303  * on user provided data.
1304  *
1305  * @param	dev_id	The device identifier.
1306  * @param	sess	Cryptodev session structure
1307  * @param	ofs	Start and stop offsets for auth and cipher operations
1308  * @param	vec	Vectorized operation descriptor
1309  *
1310  * @return
1311  *  - Returns number of successfully processed packets.
1312  */
1313 uint32_t
1314 rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id,
1315 	void *sess, union rte_crypto_sym_ofs ofs,
1316 	struct rte_crypto_sym_vec *vec);
1317 
1318 /**
1319  * Get the size of the raw data-path context buffer.
1320  *
1321  * @param	dev_id		The device identifier.
1322  *
1323  * @return
1324  *   - If the device supports raw data-path APIs, return the context size.
1325  *   - If the device does not support the APIs, return -1.
1326  */
1327 int
1328 rte_cryptodev_get_raw_dp_ctx_size(uint8_t dev_id);
1329 
1330 /**
1331  * Set session event meta data
1332  *
1333  * @param	dev_id		The device identifier.
1334  * @param	sess            Crypto or security session.
1335  * @param	op_type         Operation type.
1336  * @param	sess_type       Session type.
1337  * @param	ev_mdata	Pointer to the event crypto meta data
1338  *				(aka *union rte_event_crypto_metadata*)
1339  * @param	size            Size of ev_mdata.
1340  *
1341  * @return
1342  *  - On success, zero.
1343  *  - On failure, a negative value.
1344  */
1345 int
1346 rte_cryptodev_session_event_mdata_set(uint8_t dev_id, void *sess,
1347 	enum rte_crypto_op_type op_type,
1348 	enum rte_crypto_op_sess_type sess_type,
1349 	void *ev_mdata, uint16_t size);
1350 
1351 /**
1352  * Union of different crypto session types, including session-less xform
1353  * pointer.
1354  */
1355 union rte_cryptodev_session_ctx {void *crypto_sess;
1356 	struct rte_crypto_sym_xform *xform;
1357 	struct rte_security_session *sec_sess;
1358 };
1359 
1360 /**
1361  * Enqueue a vectorized operation descriptor into the device queue but the
1362  * driver may or may not start processing until rte_cryptodev_raw_enqueue_done()
1363  * is called.
1364  *
1365  * @param	qp		Driver specific queue pair data.
1366  * @param	drv_ctx		Driver specific context data.
1367  * @param	vec		Vectorized operation descriptor.
1368  * @param	ofs		Start and stop offsets for auth and cipher
1369  *				operations.
1370  * @param	user_data	The array of user data for dequeue later.
1371  * @param	enqueue_status	Driver written value to specify the
1372  *				enqueue status. Possible values:
1373  *				- 1: The number of operations returned are
1374  *				     enqueued successfully.
1375  *				- 0: The number of operations returned are
1376  *				     cached into the queue but are not processed
1377  *				     until rte_cryptodev_raw_enqueue_done() is
1378  *				     called.
1379  *				- negative integer: Error occurred.
1380  * @return
1381  *   - The number of operations in the descriptor successfully enqueued or
1382  *     cached into the queue but not enqueued yet, depends on the
1383  *     "enqueue_status" value.
1384  */
1385 typedef uint32_t (*cryptodev_sym_raw_enqueue_burst_t)(
1386 	void *qp, uint8_t *drv_ctx, struct rte_crypto_sym_vec *vec,
1387 	union rte_crypto_sym_ofs ofs, void *user_data[], int *enqueue_status);
1388 
1389 /**
1390  * Enqueue single raw data vector into the device queue but the driver may or
1391  * may not start processing until rte_cryptodev_raw_enqueue_done() is called.
1392  *
1393  * @param	qp		Driver specific queue pair data.
1394  * @param	drv_ctx		Driver specific context data.
1395  * @param	data_vec	The buffer data vector.
1396  * @param	n_data_vecs	Number of buffer data vectors.
1397  * @param	ofs		Start and stop offsets for auth and cipher
1398  *				operations.
1399  * @param	iv		IV virtual and IOVA addresses
1400  * @param	digest		digest virtual and IOVA addresses
1401  * @param	aad_or_auth_iv	AAD or auth IV virtual and IOVA addresses,
1402  *				depends on the algorithm used.
1403  * @param	user_data	The user data.
1404  * @return
1405  *   - 1: The data vector is enqueued successfully.
1406  *   - 0: The data vector is cached into the queue but is not processed
1407  *        until rte_cryptodev_raw_enqueue_done() is called.
1408  *   - negative integer: failure.
1409  */
1410 typedef int (*cryptodev_sym_raw_enqueue_t)(
1411 	void *qp, uint8_t *drv_ctx, struct rte_crypto_vec *data_vec,
1412 	uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs,
1413 	struct rte_crypto_va_iova_ptr *iv,
1414 	struct rte_crypto_va_iova_ptr *digest,
1415 	struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
1416 	void *user_data);
1417 
1418 /**
1419  * Inform the cryptodev queue pair to start processing or finish dequeuing all
1420  * enqueued/dequeued operations.
1421  *
1422  * @param	qp		Driver specific queue pair data.
1423  * @param	drv_ctx		Driver specific context data.
1424  * @param	n		The total number of processed operations.
1425  * @return
1426  *   - On success return 0.
1427  *   - On failure return negative integer.
1428  */
1429 typedef int (*cryptodev_sym_raw_operation_done_t)(void *qp, uint8_t *drv_ctx,
1430 	uint32_t n);
1431 
1432 /**
1433  * Typedef that the user provided for the driver to get the dequeue count.
1434  * The function may return a fixed number or the number parsed from the user
1435  * data stored in the first processed operation.
1436  *
1437  * @param	user_data	Dequeued user data.
1438  * @return
1439  *  - The number of operations to be dequeued.
1440  */
1441 typedef uint32_t (*rte_cryptodev_raw_get_dequeue_count_t)(void *user_data);
1442 
1443 /**
1444  * Typedef that the user provided to deal with post dequeue operation, such
1445  * as filling status.
1446  *
1447  * @param	user_data	Dequeued user data.
1448  * @param	index		Index number of the processed descriptor.
1449  * @param	is_op_success	Operation status provided by the driver.
1450  */
1451 typedef void (*rte_cryptodev_raw_post_dequeue_t)(void *user_data,
1452 	uint32_t index, uint8_t is_op_success);
1453 
1454 /**
1455  * Dequeue a burst of symmetric crypto processing.
1456  *
1457  * @param	qp			Driver specific queue pair data.
1458  * @param	drv_ctx			Driver specific context data.
1459  * @param	get_dequeue_count	User provided callback function to
1460  *					obtain dequeue operation count.
1461  * @param	max_nb_to_dequeue	When get_dequeue_count is NULL this
1462  *					value is used to pass the maximum
1463  *					number of operations to be dequeued.
1464  * @param	post_dequeue		User provided callback function to
1465  *					post-process a dequeued operation.
1466  * @param	out_user_data		User data pointer array to be retrieve
1467  *					from device queue. In case of
1468  *					*is_user_data_array* is set there
1469  *					should be enough room to store all
1470  *					user data.
1471  * @param	is_user_data_array	Set 1 if every dequeued user data will
1472  *					be written into out_user_data array.
1473  *					Set 0 if only the first user data will
1474  *					be written into out_user_data array.
1475  * @param	n_success		Driver written value to specific the
1476  *					total successful operations count.
1477  * @param	dequeue_status		Driver written value to specify the
1478  *					dequeue status. Possible values:
1479  *					- 1: Successfully dequeued the number
1480  *					     of operations returned. The user
1481  *					     data previously set during enqueue
1482  *					     is stored in the "out_user_data".
1483  *					- 0: The number of operations returned
1484  *					     are completed and the user data is
1485  *					     stored in the "out_user_data", but
1486  *					     they are not freed from the queue
1487  *					     until
1488  *					     rte_cryptodev_raw_dequeue_done()
1489  *					     is called.
1490  *					- negative integer: Error occurred.
1491  * @return
1492  *   - The number of operations dequeued or completed but not freed from the
1493  *     queue, depends on "dequeue_status" value.
1494  */
1495 typedef uint32_t (*cryptodev_sym_raw_dequeue_burst_t)(void *qp,
1496 	uint8_t *drv_ctx,
1497 	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
1498 	uint32_t max_nb_to_dequeue,
1499 	rte_cryptodev_raw_post_dequeue_t post_dequeue,
1500 	void **out_user_data, uint8_t is_user_data_array,
1501 	uint32_t *n_success, int *dequeue_status);
1502 
1503 /**
1504  * Dequeue a symmetric crypto processing.
1505  *
1506  * @param	qp			Driver specific queue pair data.
1507  * @param	drv_ctx			Driver specific context data.
1508  * @param	dequeue_status		Driver written value to specify the
1509  *					dequeue status. Possible values:
1510  *					- 1: Successfully dequeued a operation.
1511  *					     The user data is returned.
1512  *					- 0: The first operation in the queue
1513  *					     is completed and the user data
1514  *					     previously set during enqueue is
1515  *					     returned, but it is not freed from
1516  *					     the queue until
1517  *					     rte_cryptodev_raw_dequeue_done() is
1518  *					     called.
1519  *					- negative integer: Error occurred.
1520  * @param	op_status		Driver written value to specify
1521  *					operation status.
1522  * @return
1523  *   - The user data pointer retrieved from device queue or NULL if no
1524  *     operation is ready for dequeue.
1525  */
1526 typedef void * (*cryptodev_sym_raw_dequeue_t)(
1527 		void *qp, uint8_t *drv_ctx, int *dequeue_status,
1528 		enum rte_crypto_op_status *op_status);
1529 
1530 /**
1531  * Context data for raw data-path API crypto process. The buffer of this
1532  * structure is to be allocated by the user application with the size equal
1533  * or bigger than rte_cryptodev_get_raw_dp_ctx_size() returned value.
1534  */
1535 struct rte_crypto_raw_dp_ctx {
1536 	void *qp_data;
1537 
1538 	cryptodev_sym_raw_enqueue_t enqueue;
1539 	cryptodev_sym_raw_enqueue_burst_t enqueue_burst;
1540 	cryptodev_sym_raw_operation_done_t enqueue_done;
1541 	cryptodev_sym_raw_dequeue_t dequeue;
1542 	cryptodev_sym_raw_dequeue_burst_t dequeue_burst;
1543 	cryptodev_sym_raw_operation_done_t dequeue_done;
1544 
1545 	/* Driver specific context data */
1546 	uint8_t drv_ctx_data[];
1547 };
1548 
1549 /**
1550  * Configure raw data-path context data.
1551  *
1552  * @param	dev_id		The device identifier.
1553  * @param	qp_id		The index of the queue pair from which to
1554  *				retrieve processed packets. The value must be
1555  *				in the range [0, nb_queue_pair - 1] previously
1556  *				supplied to rte_cryptodev_configure().
1557  * @param	ctx		The raw data-path context data.
1558  * @param	sess_type	Session type.
1559  * @param	session_ctx	Session context data.
1560  * @param	is_update	Set 0 if it is to initialize the ctx.
1561  *				Set 1 if ctx is initialized and only to update
1562  *				session context data.
1563  * @return
1564  *   - On success return 0.
1565  *   - On failure return negative integer.
1566  *     - -EINVAL if input parameters are invalid.
1567  *     - -ENOTSUP if crypto device does not support raw DP operations with the
1568  *        provided session.
1569  */
1570 int
1571 rte_cryptodev_configure_raw_dp_ctx(uint8_t dev_id, uint16_t qp_id,
1572 	struct rte_crypto_raw_dp_ctx *ctx,
1573 	enum rte_crypto_op_sess_type sess_type,
1574 	union rte_cryptodev_session_ctx session_ctx,
1575 	uint8_t is_update);
1576 
1577 /**
1578  * Enqueue a vectorized operation descriptor into the device queue but the
1579  * driver may or may not start processing until rte_cryptodev_raw_enqueue_done()
1580  * is called.
1581  *
1582  * @param	ctx		The initialized raw data-path context data.
1583  * @param	vec		Vectorized operation descriptor.
1584  * @param	ofs		Start and stop offsets for auth and cipher
1585  *				operations.
1586  * @param	user_data	The array of user data for dequeue later.
1587  * @param	enqueue_status	Driver written value to specify the
1588  *				enqueue status. Possible values:
1589  *				- 1: The number of operations returned are
1590  *				     enqueued successfully.
1591  *				- 0: The number of operations returned are
1592  *				     cached into the queue but are not processed
1593  *				     until rte_cryptodev_raw_enqueue_done() is
1594  *				     called.
1595  *				- negative integer: Error occurred.
1596  * @return
1597  *   - The number of operations in the descriptor successfully enqueued or
1598  *     cached into the queue but not enqueued yet, depends on the
1599  *     "enqueue_status" value.
1600  */
1601 uint32_t
1602 rte_cryptodev_raw_enqueue_burst(struct rte_crypto_raw_dp_ctx *ctx,
1603 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
1604 	void **user_data, int *enqueue_status);
1605 
1606 /**
1607  * Enqueue single raw data vector into the device queue but the driver may or
1608  * may not start processing until rte_cryptodev_raw_enqueue_done() is called.
1609  *
1610  * @param	ctx		The initialized raw data-path context data.
1611  * @param	data_vec	The buffer data vector.
1612  * @param	n_data_vecs	Number of buffer data vectors.
1613  * @param	ofs		Start and stop offsets for auth and cipher
1614  *				operations.
1615  * @param	iv		IV virtual and IOVA addresses
1616  * @param	digest		digest virtual and IOVA addresses
1617  * @param	aad_or_auth_iv	AAD or auth IV virtual and IOVA addresses,
1618  *				depends on the algorithm used.
1619  * @param	user_data	The user data.
1620  * @return
1621  *   - 1: The data vector is enqueued successfully.
1622  *   - 0: The data vector is cached into the queue but is not processed
1623  *        until rte_cryptodev_raw_enqueue_done() is called.
1624  *   - negative integer: failure.
1625  */
1626 __rte_experimental
1627 static __rte_always_inline int
1628 rte_cryptodev_raw_enqueue(struct rte_crypto_raw_dp_ctx *ctx,
1629 	struct rte_crypto_vec *data_vec, uint16_t n_data_vecs,
1630 	union rte_crypto_sym_ofs ofs,
1631 	struct rte_crypto_va_iova_ptr *iv,
1632 	struct rte_crypto_va_iova_ptr *digest,
1633 	struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
1634 	void *user_data)
1635 {
1636 	return (*ctx->enqueue)(ctx->qp_data, ctx->drv_ctx_data, data_vec,
1637 		n_data_vecs, ofs, iv, digest, aad_or_auth_iv, user_data);
1638 }
1639 
1640 /**
1641  * Start processing all enqueued operations from last
1642  * rte_cryptodev_configure_raw_dp_ctx() call.
1643  *
1644  * @param	ctx	The initialized raw data-path context data.
1645  * @param	n	The number of operations cached.
1646  * @return
1647  *   - On success return 0.
1648  *   - On failure return negative integer.
1649  */
1650 int
1651 rte_cryptodev_raw_enqueue_done(struct rte_crypto_raw_dp_ctx *ctx,
1652 		uint32_t n);
1653 
1654 /**
1655  * Dequeue a burst of symmetric crypto processing.
1656  *
1657  * @param	ctx			The initialized raw data-path context
1658  *					data.
1659  * @param	get_dequeue_count	User provided callback function to
1660  *					obtain dequeue operation count.
1661  * @param	max_nb_to_dequeue	When get_dequeue_count is NULL this
1662  *					value is used to pass the maximum
1663  *					number of operations to be dequeued.
1664  * @param	post_dequeue		User provided callback function to
1665  *					post-process a dequeued operation.
1666  * @param	out_user_data		User data pointer array to be retrieve
1667  *					from device queue. In case of
1668  *					*is_user_data_array* is set there
1669  *					should be enough room to store all
1670  *					user data.
1671  * @param	is_user_data_array	Set 1 if every dequeued user data will
1672  *					be written into out_user_data array.
1673  *					Set 0 if only the first user data will
1674  *					be written into out_user_data array.
1675  * @param	n_success		Driver written value to specific the
1676  *					total successful operations count.
1677  * @param	dequeue_status		Driver written value to specify the
1678  *					dequeue status. Possible values:
1679  *					- 1: Successfully dequeued the number
1680  *					     of operations returned. The user
1681  *					     data previously set during enqueue
1682  *					     is stored in the "out_user_data".
1683  *					- 0: The number of operations returned
1684  *					     are completed and the user data is
1685  *					     stored in the "out_user_data", but
1686  *					     they are not freed from the queue
1687  *					     until
1688  *					     rte_cryptodev_raw_dequeue_done()
1689  *					     is called.
1690  *					- negative integer: Error occurred.
1691  * @return
1692  *   - The number of operations dequeued or completed but not freed from the
1693  *     queue, depends on "dequeue_status" value.
1694  */
1695 uint32_t
1696 rte_cryptodev_raw_dequeue_burst(struct rte_crypto_raw_dp_ctx *ctx,
1697 	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
1698 	uint32_t max_nb_to_dequeue,
1699 	rte_cryptodev_raw_post_dequeue_t post_dequeue,
1700 	void **out_user_data, uint8_t is_user_data_array,
1701 	uint32_t *n_success, int *dequeue_status);
1702 
1703 /**
1704  * Dequeue a symmetric crypto processing.
1705  *
1706  * @param	ctx			The initialized raw data-path context
1707  *					data.
1708  * @param	dequeue_status		Driver written value to specify the
1709  *					dequeue status. Possible values:
1710  *					- 1: Successfully dequeued a operation.
1711  *					     The user data is returned.
1712  *					- 0: The first operation in the queue
1713  *					     is completed and the user data
1714  *					     previously set during enqueue is
1715  *					     returned, but it is not freed from
1716  *					     the queue until
1717  *					     rte_cryptodev_raw_dequeue_done() is
1718  *					     called.
1719  *					- negative integer: Error occurred.
1720  * @param	op_status		Driver written value to specify
1721  *					operation status.
1722  * @return
1723  *   - The user data pointer retrieved from device queue or NULL if no
1724  *     operation is ready for dequeue.
1725  */
1726 __rte_experimental
1727 static __rte_always_inline void *
1728 rte_cryptodev_raw_dequeue(struct rte_crypto_raw_dp_ctx *ctx,
1729 		int *dequeue_status, enum rte_crypto_op_status *op_status)
1730 {
1731 	return (*ctx->dequeue)(ctx->qp_data, ctx->drv_ctx_data, dequeue_status,
1732 			op_status);
1733 }
1734 
1735 /**
1736  * Inform the queue pair dequeue operations is finished.
1737  *
1738  * @param	ctx	The initialized raw data-path context data.
1739  * @param	n	The number of operations.
1740  * @return
1741  *   - On success return 0.
1742  *   - On failure return negative integer.
1743  */
1744 int
1745 rte_cryptodev_raw_dequeue_done(struct rte_crypto_raw_dp_ctx *ctx,
1746 		uint32_t n);
1747 
1748 /**
1749  * Add a user callback for a given crypto device and queue pair which will be
1750  * called on crypto ops enqueue.
1751  *
1752  * This API configures a function to be called for each burst of crypto ops
1753  * received on a given crypto device queue pair. The return value is a pointer
1754  * that can be used later to remove the callback using
1755  * rte_cryptodev_remove_enq_callback().
1756  *
1757  * Callbacks registered by application would not survive
1758  * rte_cryptodev_configure() as it reinitializes the callback list.
1759  * It is user responsibility to remove all installed callbacks before
1760  * calling rte_cryptodev_configure() to avoid possible memory leakage.
1761  * Application is expected to call add API after rte_cryptodev_configure().
1762  *
1763  * Multiple functions can be registered per queue pair & they are called
1764  * in the order they were added. The API does not restrict on maximum number
1765  * of callbacks.
1766  *
1767  * @param	dev_id		The identifier of the device.
1768  * @param	qp_id		The index of the queue pair on which ops are
1769  *				to be enqueued for processing. The value
1770  *				must be in the range [0, nb_queue_pairs - 1]
1771  *				previously supplied to
1772  *				*rte_cryptodev_configure*.
1773  * @param	cb_fn		The callback function
1774  * @param	cb_arg		A generic pointer parameter which will be passed
1775  *				to each invocation of the callback function on
1776  *				this crypto device and queue pair.
1777  *
1778  * @return
1779  *  - NULL on error & rte_errno will contain the error code.
1780  *  - On success, a pointer value which can later be used to remove the
1781  *    callback.
1782  */
1783 struct rte_cryptodev_cb *
1784 rte_cryptodev_add_enq_callback(uint8_t dev_id,
1785 			       uint16_t qp_id,
1786 			       rte_cryptodev_callback_fn cb_fn,
1787 			       void *cb_arg);
1788 
1789 /**
1790  * Remove a user callback function for given crypto device and queue pair.
1791  *
1792  * This function is used to remove enqueue callbacks that were added to a
1793  * crypto device queue pair using rte_cryptodev_add_enq_callback().
1794  *
1795  *
1796  *
1797  * @param	dev_id		The identifier of the device.
1798  * @param	qp_id		The index of the queue pair on which ops are
1799  *				to be enqueued. The value must be in the
1800  *				range [0, nb_queue_pairs - 1] previously
1801  *				supplied to *rte_cryptodev_configure*.
1802  * @param	cb		Pointer to user supplied callback created via
1803  *				rte_cryptodev_add_enq_callback().
1804  *
1805  * @return
1806  *   -  0: Success. Callback was removed.
1807  *   - <0: The dev_id or the qp_id is out of range, or the callback
1808  *         is NULL or not found for the crypto device queue pair.
1809  */
1810 int rte_cryptodev_remove_enq_callback(uint8_t dev_id,
1811 				      uint16_t qp_id,
1812 				      struct rte_cryptodev_cb *cb);
1813 
1814 /**
1815  * Add a user callback for a given crypto device and queue pair which will be
1816  * called on crypto ops dequeue.
1817  *
1818  * This API configures a function to be called for each burst of crypto ops
1819  * received on a given crypto device queue pair. The return value is a pointer
1820  * that can be used later to remove the callback using
1821  * rte_cryptodev_remove_deq_callback().
1822  *
1823  * Callbacks registered by application would not survive
1824  * rte_cryptodev_configure() as it reinitializes the callback list.
1825  * It is user responsibility to remove all installed callbacks before
1826  * calling rte_cryptodev_configure() to avoid possible memory leakage.
1827  * Application is expected to call add API after rte_cryptodev_configure().
1828  *
1829  * Multiple functions can be registered per queue pair & they are called
1830  * in the order they were added. The API does not restrict on maximum number
1831  * of callbacks.
1832  *
1833  * @param	dev_id		The identifier of the device.
1834  * @param	qp_id		The index of the queue pair on which ops are
1835  *				to be dequeued. The value must be in the
1836  *				range [0, nb_queue_pairs - 1] previously
1837  *				supplied to *rte_cryptodev_configure*.
1838  * @param	cb_fn		The callback function
1839  * @param	cb_arg		A generic pointer parameter which will be passed
1840  *				to each invocation of the callback function on
1841  *				this crypto device and queue pair.
1842  *
1843  * @return
1844  *   - NULL on error & rte_errno will contain the error code.
1845  *   - On success, a pointer value which can later be used to remove the
1846  *     callback.
1847  */
1848 struct rte_cryptodev_cb *
1849 rte_cryptodev_add_deq_callback(uint8_t dev_id,
1850 			       uint16_t qp_id,
1851 			       rte_cryptodev_callback_fn cb_fn,
1852 			       void *cb_arg);
1853 
1854 /**
1855  * Remove a user callback function for given crypto device and queue pair.
1856  *
1857  * This function is used to remove dequeue callbacks that were added to a
1858  * crypto device queue pair using rte_cryptodev_add_deq_callback().
1859  *
1860  *
1861  *
1862  * @param	dev_id		The identifier of the device.
1863  * @param	qp_id		The index of the queue pair on which ops are
1864  *				to be dequeued. The value must be in the
1865  *				range [0, nb_queue_pairs - 1] previously
1866  *				supplied to *rte_cryptodev_configure*.
1867  * @param	cb		Pointer to user supplied callback created via
1868  *				rte_cryptodev_add_deq_callback().
1869  *
1870  * @return
1871  *   -  0: Success. Callback was removed.
1872  *   - <0: The dev_id or the qp_id is out of range, or the callback
1873  *         is NULL or not found for the crypto device queue pair.
1874  */
1875 int rte_cryptodev_remove_deq_callback(uint8_t dev_id,
1876 				      uint16_t qp_id,
1877 				      struct rte_cryptodev_cb *cb);
1878 
1879 #include <rte_cryptodev_core.h>
1880 
1881 #ifdef __cplusplus
1882 extern "C" {
1883 #endif
1884 /**
1885  *
1886  * Dequeue a burst of processed crypto operations from a queue on the crypto
1887  * device. The dequeued operation are stored in *rte_crypto_op* structures
1888  * whose pointers are supplied in the *ops* array.
1889  *
1890  * The rte_cryptodev_dequeue_burst() function returns the number of ops
1891  * actually dequeued, which is the number of *rte_crypto_op* data structures
1892  * effectively supplied into the *ops* array.
1893  *
1894  * A return value equal to *nb_ops* indicates that the queue contained
1895  * at least *nb_ops* operations, and this is likely to signify that other
1896  * processed operations remain in the devices output queue. Applications
1897  * implementing a "retrieve as many processed operations as possible" policy
1898  * can check this specific case and keep invoking the
1899  * rte_cryptodev_dequeue_burst() function until a value less than
1900  * *nb_ops* is returned.
1901  *
1902  * The rte_cryptodev_dequeue_burst() function does not provide any error
1903  * notification to avoid the corresponding overhead.
1904  *
1905  * @param	dev_id		The symmetric crypto device identifier
1906  * @param	qp_id		The index of the queue pair from which to
1907  *				retrieve processed packets. The value must be
1908  *				in the range [0, nb_queue_pair - 1] previously
1909  *				supplied to rte_cryptodev_configure().
1910  * @param	ops		The address of an array of pointers to
1911  *				*rte_crypto_op* structures that must be
1912  *				large enough to store *nb_ops* pointers in it.
1913  * @param	nb_ops		The maximum number of operations to dequeue.
1914  *
1915  * @return
1916  *   - The number of operations actually dequeued, which is the number
1917  *   of pointers to *rte_crypto_op* structures effectively supplied to the
1918  *   *ops* array.
1919  */
1920 static inline uint16_t
1921 rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
1922 		struct rte_crypto_op **ops, uint16_t nb_ops)
1923 {
1924 	const struct rte_crypto_fp_ops *fp_ops;
1925 	void *qp;
1926 
1927 	rte_cryptodev_trace_dequeue_burst(dev_id, qp_id, (void **)ops, nb_ops);
1928 
1929 	fp_ops = &rte_crypto_fp_ops[dev_id];
1930 	qp = fp_ops->qp.data[qp_id];
1931 
1932 	nb_ops = fp_ops->dequeue_burst(qp, ops, nb_ops);
1933 
1934 #ifdef RTE_CRYPTO_CALLBACKS
1935 	if (unlikely(fp_ops->qp.deq_cb[qp_id].next != NULL)) {
1936 		struct rte_cryptodev_cb_rcu *list;
1937 		struct rte_cryptodev_cb *cb;
1938 
1939 		/* rte_memory_order_release memory order was used when the
1940 		 * call back was inserted into the list.
1941 		 * Since there is a clear dependency between loading
1942 		 * cb and cb->fn/cb->next, rte_memory_order_acquire memory order is
1943 		 * not required.
1944 		 */
1945 		list = &fp_ops->qp.deq_cb[qp_id];
1946 		rte_rcu_qsbr_thread_online(list->qsbr, 0);
1947 		cb = rte_atomic_load_explicit(&list->next, rte_memory_order_relaxed);
1948 
1949 		while (cb != NULL) {
1950 			nb_ops = cb->fn(dev_id, qp_id, ops, nb_ops,
1951 					cb->arg);
1952 			cb = cb->next;
1953 		};
1954 
1955 		rte_rcu_qsbr_thread_offline(list->qsbr, 0);
1956 	}
1957 #endif
1958 	return nb_ops;
1959 }
1960 
1961 /**
1962  * Enqueue a burst of operations for processing on a crypto device.
1963  *
1964  * The rte_cryptodev_enqueue_burst() function is invoked to place
1965  * crypto operations on the queue *qp_id* of the device designated by
1966  * its *dev_id*.
1967  *
1968  * The *nb_ops* parameter is the number of operations to process which are
1969  * supplied in the *ops* array of *rte_crypto_op* structures.
1970  *
1971  * The rte_cryptodev_enqueue_burst() function returns the number of
1972  * operations it actually enqueued for processing. A return value equal to
1973  * *nb_ops* means that all packets have been enqueued.
1974  *
1975  * @param	dev_id		The identifier of the device.
1976  * @param	qp_id		The index of the queue pair which packets are
1977  *				to be enqueued for processing. The value
1978  *				must be in the range [0, nb_queue_pairs - 1]
1979  *				previously supplied to
1980  *				 *rte_cryptodev_configure*.
1981  * @param	ops		The address of an array of *nb_ops* pointers
1982  *				to *rte_crypto_op* structures which contain
1983  *				the crypto operations to be processed.
1984  * @param	nb_ops		The number of operations to process.
1985  *
1986  * @return
1987  * The number of operations actually enqueued on the crypto device. The return
1988  * value can be less than the value of the *nb_ops* parameter when the
1989  * crypto devices queue is full or if invalid parameters are specified in
1990  * a *rte_crypto_op*.
1991  */
1992 static inline uint16_t
1993 rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
1994 		struct rte_crypto_op **ops, uint16_t nb_ops)
1995 {
1996 	const struct rte_crypto_fp_ops *fp_ops;
1997 	void *qp;
1998 
1999 	fp_ops = &rte_crypto_fp_ops[dev_id];
2000 	qp = fp_ops->qp.data[qp_id];
2001 #ifdef RTE_CRYPTO_CALLBACKS
2002 	if (unlikely(fp_ops->qp.enq_cb[qp_id].next != NULL)) {
2003 		struct rte_cryptodev_cb_rcu *list;
2004 		struct rte_cryptodev_cb *cb;
2005 
2006 		/* rte_memory_order_release memory order was used when the
2007 		 * call back was inserted into the list.
2008 		 * Since there is a clear dependency between loading
2009 		 * cb and cb->fn/cb->next, rte_memory_order_acquire memory order is
2010 		 * not required.
2011 		 */
2012 		list = &fp_ops->qp.enq_cb[qp_id];
2013 		rte_rcu_qsbr_thread_online(list->qsbr, 0);
2014 		cb = rte_atomic_load_explicit(&list->next, rte_memory_order_relaxed);
2015 
2016 		while (cb != NULL) {
2017 			nb_ops = cb->fn(dev_id, qp_id, ops, nb_ops,
2018 					cb->arg);
2019 			cb = cb->next;
2020 		};
2021 
2022 		rte_rcu_qsbr_thread_offline(list->qsbr, 0);
2023 	}
2024 #endif
2025 
2026 	rte_cryptodev_trace_enqueue_burst(dev_id, qp_id, (void **)ops, nb_ops);
2027 	return fp_ops->enqueue_burst(qp, ops, nb_ops);
2028 }
2029 
2030 /**
2031  * @warning
2032  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
2033  *
2034  * Get the number of used descriptors or depth of a cryptodev queue pair.
2035  *
2036  * This function retrieves the number of used descriptors in a crypto queue.
2037  * Applications can use this API in the fast path to inspect QP occupancy and
2038  * take appropriate action.
2039  *
2040  * Since it is a fast-path function, no check is performed on dev_id and qp_id.
2041  * Caller must therefore ensure that the device is enabled and queue pair is setup.
2042  *
2043  * @param	dev_id		The identifier of the device.
2044  * @param	qp_id		The index of the queue pair for which used descriptor
2045  *				count is to be retrieved. The value
2046  *				must be in the range [0, nb_queue_pairs - 1]
2047  *				previously supplied to *rte_cryptodev_configure*.
2048  *
2049  * @return
2050  *  The number of used descriptors on the specified queue pair, or:
2051  *   - (-ENOTSUP) if the device does not support this function.
2052  */
2053 
2054 __rte_experimental
2055 static inline int
2056 rte_cryptodev_qp_depth_used(uint8_t dev_id, uint16_t qp_id)
2057 {
2058 	const struct rte_crypto_fp_ops *fp_ops;
2059 	void *qp;
2060 	int rc;
2061 
2062 	fp_ops = &rte_crypto_fp_ops[dev_id];
2063 	qp = fp_ops->qp.data[qp_id];
2064 
2065 	if (fp_ops->qp_depth_used == NULL) {
2066 		rc = -ENOTSUP;
2067 		goto out;
2068 	}
2069 
2070 	rc = fp_ops->qp_depth_used(qp);
2071 out:
2072 	rte_cryptodev_trace_qp_depth_used(dev_id, qp_id);
2073 	return rc;
2074 }
2075 
2076 
2077 #ifdef __cplusplus
2078 }
2079 #endif
2080 
2081 #endif /* _RTE_CRYPTODEV_H_ */
2082