xref: /dpdk/lib/cryptodev/rte_cryptodev.h (revision 3da59f30a23f2e795d2315f3d949e1b3e0ce0c3d)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020 Intel Corporation.
3  */
4 
5 #ifndef _RTE_CRYPTODEV_H_
6 #define _RTE_CRYPTODEV_H_
7 
8 /**
9  * @file rte_cryptodev.h
10  *
11  * RTE Cryptographic Device APIs
12  *
13  * Defines RTE Crypto Device APIs for the provisioning of cipher and
14  * authentication operations.
15  */
16 
17 #ifdef __cplusplus
18 extern "C" {
19 #endif
20 
21 #include <rte_compat.h>
22 #include "rte_kvargs.h"
23 #include "rte_crypto.h"
24 #include <rte_common.h>
25 #include <rte_rcu_qsbr.h>
26 
27 #include "rte_cryptodev_trace_fp.h"
28 
29 /**
30  * @internal Logtype used for cryptodev related messages.
31  */
32 extern int rte_cryptodev_logtype;
33 #define RTE_LOGTYPE_CRYPTODEV rte_cryptodev_logtype
34 
35 /* Logging Macros */
36 #define CDEV_LOG_ERR(...) \
37 	RTE_LOG_LINE(ERR, CRYPTODEV, \
38 		RTE_FMT("%s() line %u: " RTE_FMT_HEAD(__VA_ARGS__ ,), \
39 			__func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__ ,)))
40 
41 #define CDEV_LOG_INFO(...) \
42 	RTE_LOG_LINE(INFO, CRYPTODEV, "" __VA_ARGS__)
43 
44 #define CDEV_LOG_DEBUG(...) \
45 	RTE_LOG_LINE(DEBUG, CRYPTODEV, \
46 		RTE_FMT("%s() line %u: " RTE_FMT_HEAD(__VA_ARGS__ ,), \
47 			__func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__ ,)))
48 
49 #define CDEV_PMD_TRACE(...) \
50 	RTE_LOG_LINE(DEBUG, CRYPTODEV, \
51 		RTE_FMT("[%s] %s: " RTE_FMT_HEAD(__VA_ARGS__ ,), \
52 			dev, __func__, RTE_FMT_TAIL(__VA_ARGS__ ,)))
53 
54 /**
55  * A macro that points to an offset from the start
56  * of the crypto operation structure (rte_crypto_op)
57  *
58  * The returned pointer is cast to type t.
59  *
60  * @param c
61  *   The crypto operation.
62  * @param o
63  *   The offset from the start of the crypto operation.
64  * @param t
65  *   The type to cast the result into.
66  */
67 #define rte_crypto_op_ctod_offset(c, t, o)	\
68 	((t)((char *)(c) + (o)))
69 
70 /**
71  * A macro that returns the physical address that points
72  * to an offset from the start of the crypto operation
73  * (rte_crypto_op)
74  *
75  * @param c
76  *   The crypto operation.
77  * @param o
78  *   The offset from the start of the crypto operation
79  *   to calculate address from.
80  */
81 #define rte_crypto_op_ctophys_offset(c, o)	\
82 	(rte_iova_t)((c)->phys_addr + (o))
83 
84 /**
85  * Crypto parameters range description
86  */
87 struct rte_crypto_param_range {
88 	uint16_t min;	/**< minimum size */
89 	uint16_t max;	/**< maximum size */
90 	uint16_t increment;
91 	/**< if a range of sizes are supported,
92 	 * this parameter is used to indicate
93 	 * increments in byte size that are supported
94 	 * between the minimum and maximum
95 	 */
96 };
97 
98 /**
99  * Data-unit supported lengths of cipher algorithms.
100  * A bit can represent any set of data-unit sizes
101  * (single size, multiple size, range, etc).
102  */
103 #define RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_512_BYTES             RTE_BIT32(0)
104 #define RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_4096_BYTES            RTE_BIT32(1)
105 #define RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_1_MEGABYTES           RTE_BIT32(2)
106 
107 /**
108  * Symmetric Crypto Capability
109  */
110 struct rte_cryptodev_symmetric_capability {
111 	enum rte_crypto_sym_xform_type xform_type;
112 	/**< Transform type : Authentication / Cipher / AEAD */
113 	union {
114 		struct {
115 			enum rte_crypto_auth_algorithm algo;
116 			/**< authentication algorithm */
117 			uint16_t block_size;
118 			/**< algorithm block size */
119 			struct rte_crypto_param_range key_size;
120 			/**< auth key size range */
121 			struct rte_crypto_param_range digest_size;
122 			/**< digest size range */
123 			struct rte_crypto_param_range aad_size;
124 			/**< Additional authentication data size range */
125 			struct rte_crypto_param_range iv_size;
126 			/**< Initialisation vector data size range */
127 		} auth;
128 		/**< Symmetric Authentication transform capabilities */
129 		struct {
130 			enum rte_crypto_cipher_algorithm algo;
131 			/**< cipher algorithm */
132 			uint16_t block_size;
133 			/**< algorithm block size */
134 			struct rte_crypto_param_range key_size;
135 			/**< cipher key size range */
136 			struct rte_crypto_param_range iv_size;
137 			/**< Initialisation vector data size range */
138 			uint32_t dataunit_set;
139 			/**<
140 			 * Supported data-unit lengths:
141 			 * RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_* bits
142 			 * or 0 for lengths defined in the algorithm standard.
143 			 */
144 		} cipher;
145 		/**< Symmetric Cipher transform capabilities */
146 		struct {
147 			enum rte_crypto_aead_algorithm algo;
148 			/**< AEAD algorithm */
149 			uint16_t block_size;
150 			/**< algorithm block size */
151 			struct rte_crypto_param_range key_size;
152 			/**< AEAD key size range */
153 			struct rte_crypto_param_range digest_size;
154 			/**< digest size range */
155 			struct rte_crypto_param_range aad_size;
156 			/**< Additional authentication data size range */
157 			struct rte_crypto_param_range iv_size;
158 			/**< Initialisation vector data size range */
159 		} aead;
160 	};
161 };
162 
163 /**
164  * Asymmetric Xform Crypto Capability
165  */
166 struct rte_cryptodev_asymmetric_xform_capability {
167 	enum rte_crypto_asym_xform_type xform_type;
168 	/**< Transform type: RSA/MODEXP/DH/DSA/MODINV */
169 
170 	uint32_t op_types;
171 	/**<
172 	 * Bitmask for supported rte_crypto_asym_op_type or
173 	 * rte_crypto_asym_ke_type. Which enum is used is determined
174 	 * by the rte_crypto_asym_xform_type. For key exchange algorithms
175 	 * like Diffie-Hellman it is rte_crypto_asym_ke_type, for others
176 	 * it is rte_crypto_asym_op_type.
177 	 */
178 
179 	__extension__
180 	union {
181 		struct rte_crypto_param_range modlen;
182 		/**< Range of modulus length supported by modulus based xform.
183 		 * Value 0 mean implementation default
184 		 */
185 
186 		uint8_t internal_rng;
187 		/**< Availability of random number generator for Elliptic curve based xform.
188 		 * Value 0 means unavailable, and application should pass the required
189 		 * random value. Otherwise, PMD would internally compute the random number.
190 		 */
191 	};
192 
193 	uint64_t hash_algos;
194 	/**< Bitmask of hash algorithms supported for op_type. */
195 };
196 
197 /**
198  * Asymmetric Crypto Capability
199  */
200 struct rte_cryptodev_asymmetric_capability {
201 	struct rte_cryptodev_asymmetric_xform_capability xform_capa;
202 };
203 
204 
205 /** Structure used to capture a capability of a crypto device */
206 struct rte_cryptodev_capabilities {
207 	enum rte_crypto_op_type op;
208 	/**< Operation type */
209 
210 	union {
211 		struct rte_cryptodev_symmetric_capability sym;
212 		/**< Symmetric operation capability parameters */
213 		struct rte_cryptodev_asymmetric_capability asym;
214 		/**< Asymmetric operation capability parameters */
215 	};
216 };
217 
218 /** Structure used to describe crypto algorithms */
219 struct rte_cryptodev_sym_capability_idx {
220 	enum rte_crypto_sym_xform_type type;
221 	union {
222 		enum rte_crypto_cipher_algorithm cipher;
223 		enum rte_crypto_auth_algorithm auth;
224 		enum rte_crypto_aead_algorithm aead;
225 	} algo;
226 };
227 
228 /**
229  * Structure used to describe asymmetric crypto xforms
230  * Each xform maps to one asym algorithm.
231  */
232 struct rte_cryptodev_asym_capability_idx {
233 	enum rte_crypto_asym_xform_type type;
234 	/**< Asymmetric xform (algo) type */
235 };
236 
237 /**
238  * Provide capabilities available for defined device and algorithm
239  *
240  * @param	dev_id		The identifier of the device.
241  * @param	idx		Description of crypto algorithms.
242  *
243  * @return
244  *   - Return description of the symmetric crypto capability if exist.
245  *   - Return NULL if the capability not exist.
246  */
247 const struct rte_cryptodev_symmetric_capability *
248 rte_cryptodev_sym_capability_get(uint8_t dev_id,
249 		const struct rte_cryptodev_sym_capability_idx *idx);
250 
251 /**
252  *  Provide capabilities available for defined device and xform
253  *
254  * @param	dev_id		The identifier of the device.
255  * @param	idx		Description of asym crypto xform.
256  *
257  * @return
258  *   - Return description of the asymmetric crypto capability if exist.
259  *   - Return NULL if the capability not exist.
260  */
261 const struct rte_cryptodev_asymmetric_xform_capability *
262 rte_cryptodev_asym_capability_get(uint8_t dev_id,
263 		const struct rte_cryptodev_asym_capability_idx *idx);
264 
265 /**
266  * Check if key size and initial vector are supported
267  * in crypto cipher capability
268  *
269  * @param	capability	Description of the symmetric crypto capability.
270  * @param	key_size	Cipher key size.
271  * @param	iv_size		Cipher initial vector size.
272  *
273  * @return
274  *   - Return 0 if the parameters are in range of the capability.
275  *   - Return -1 if the parameters are out of range of the capability.
276  */
277 int
278 rte_cryptodev_sym_capability_check_cipher(
279 		const struct rte_cryptodev_symmetric_capability *capability,
280 		uint16_t key_size, uint16_t iv_size);
281 
282 /**
283  * Check if key size and initial vector are supported
284  * in crypto auth capability
285  *
286  * @param	capability	Description of the symmetric crypto capability.
287  * @param	key_size	Auth key size.
288  * @param	digest_size	Auth digest size.
289  * @param	iv_size		Auth initial vector size.
290  *
291  * @return
292  *   - Return 0 if the parameters are in range of the capability.
293  *   - Return -1 if the parameters are out of range of the capability.
294  */
295 int
296 rte_cryptodev_sym_capability_check_auth(
297 		const struct rte_cryptodev_symmetric_capability *capability,
298 		uint16_t key_size, uint16_t digest_size, uint16_t iv_size);
299 
300 /**
301  * Check if key, digest, AAD and initial vector sizes are supported
302  * in crypto AEAD capability
303  *
304  * @param	capability	Description of the symmetric crypto capability.
305  * @param	key_size	AEAD key size.
306  * @param	digest_size	AEAD digest size.
307  * @param	aad_size	AEAD AAD size.
308  * @param	iv_size		AEAD IV size.
309  *
310  * @return
311  *   - Return 0 if the parameters are in range of the capability.
312  *   - Return -1 if the parameters are out of range of the capability.
313  */
314 int
315 rte_cryptodev_sym_capability_check_aead(
316 		const struct rte_cryptodev_symmetric_capability *capability,
317 		uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
318 		uint16_t iv_size);
319 
320 /**
321  * Check if op type is supported
322  *
323  * @param	capability	Description of the asymmetric crypto capability.
324  * @param	op_type		op type
325  *
326  * @return
327  *   - Return 1 if the op type is supported
328  *   - Return 0 if unsupported
329  */
330 int
331 rte_cryptodev_asym_xform_capability_check_optype(
332 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
333 		enum rte_crypto_asym_op_type op_type);
334 
335 /**
336  * Check if modulus length is in supported range
337  *
338  * @param	capability	Description of the asymmetric crypto capability.
339  * @param	modlen		modulus length.
340  *
341  * @return
342  *   - Return 0 if the parameters are in range of the capability.
343  *   - Return -1 if the parameters are out of range of the capability.
344  */
345 int
346 rte_cryptodev_asym_xform_capability_check_modlen(
347 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
348 		uint16_t modlen);
349 
350 /**
351  * Check if hash algorithm is supported.
352  *
353  * @param	capability	Asymmetric crypto capability.
354  * @param	hash		Hash algorithm.
355  *
356  * @return
357  *   - Return true if the hash algorithm is supported.
358  *   - Return false if the hash algorithm is not supported.
359  */
360 bool
361 rte_cryptodev_asym_xform_capability_check_hash(
362 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
363 	enum rte_crypto_auth_algorithm hash);
364 
365 /**
366  * Provide the cipher algorithm enum, given an algorithm string
367  *
368  * @param	algo_enum	A pointer to the cipher algorithm
369  *				enum to be filled
370  * @param	algo_string	Authentication algo string
371  *
372  * @return
373  * - Return -1 if string is not valid
374  * - Return 0 is the string is valid
375  */
376 int
377 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum,
378 		const char *algo_string);
379 
380 /**
381  * Provide the authentication algorithm enum, given an algorithm string
382  *
383  * @param	algo_enum	A pointer to the authentication algorithm
384  *				enum to be filled
385  * @param	algo_string	Authentication algo string
386  *
387  * @return
388  * - Return -1 if string is not valid
389  * - Return 0 is the string is valid
390  */
391 int
392 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum,
393 		const char *algo_string);
394 
395 /**
396  * Provide the AEAD algorithm enum, given an algorithm string
397  *
398  * @param	algo_enum	A pointer to the AEAD algorithm
399  *				enum to be filled
400  * @param	algo_string	AEAD algorithm string
401  *
402  * @return
403  * - Return -1 if string is not valid
404  * - Return 0 is the string is valid
405  */
406 int
407 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum,
408 		const char *algo_string);
409 
410 /**
411  * Provide the Asymmetric xform enum, given an xform string
412  *
413  * @param	xform_enum	A pointer to the xform type
414  *				enum to be filled
415  * @param	xform_string	xform string
416  *
417  * @return
418  * - Return -1 if string is not valid
419  * - Return 0 if the string is valid
420  */
421 int
422 rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum,
423 		const char *xform_string);
424 
425 /**
426  * Provide the cipher algorithm string, given an algorithm enum.
427  *
428  * @param	algo_enum	cipher algorithm enum
429  *
430  * @return
431  * - Return NULL if enum is not valid
432  * - Return algo_string corresponding to enum
433  */
434 __rte_experimental
435 const char *
436 rte_cryptodev_get_cipher_algo_string(enum rte_crypto_cipher_algorithm algo_enum);
437 
438 /**
439  * Provide the authentication algorithm string, given an algorithm enum.
440  *
441  * @param	algo_enum	auth algorithm enum
442  *
443  * @return
444  * - Return NULL if enum is not valid
445  * - Return algo_string corresponding to enum
446  */
447 __rte_experimental
448 const char *
449 rte_cryptodev_get_auth_algo_string(enum rte_crypto_auth_algorithm algo_enum);
450 
451 /**
452  * Provide the AEAD algorithm string, given an algorithm enum.
453  *
454  * @param	algo_enum	AEAD algorithm enum
455  *
456  * @return
457  * - Return NULL if enum is not valid
458  * - Return algo_string corresponding to enum
459  */
460 __rte_experimental
461 const char *
462 rte_cryptodev_get_aead_algo_string(enum rte_crypto_aead_algorithm algo_enum);
463 
464 /**
465  * Provide the Asymmetric xform string, given an xform enum.
466  *
467  * @param	xform_enum	xform type enum
468  *
469  * @return
470  * - Return NULL, if enum is not valid.
471  * - Return xform string, for valid enum.
472  */
473 __rte_experimental
474 const char *
475 rte_cryptodev_asym_get_xform_string(enum rte_crypto_asym_xform_type xform_enum);
476 
477 
478 /** Macro used at end of crypto PMD list */
479 #define RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() \
480 	{ RTE_CRYPTO_OP_TYPE_UNDEFINED }
481 
482 
483 /**
484  * Crypto device supported feature flags
485  *
486  * Note:
487  * New features flags should be added to the end of the list
488  *
489  * Keep these flags synchronised with rte_cryptodev_get_feature_name()
490  */
491 #define	RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO		(1ULL << 0)
492 /**< Symmetric crypto operations are supported */
493 #define	RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO		(1ULL << 1)
494 /**< Asymmetric crypto operations are supported */
495 #define	RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING		(1ULL << 2)
496 /**< Chaining symmetric crypto operations are supported */
497 #define	RTE_CRYPTODEV_FF_CPU_SSE			(1ULL << 3)
498 /**< Utilises CPU SIMD SSE instructions */
499 #define	RTE_CRYPTODEV_FF_CPU_AVX			(1ULL << 4)
500 /**< Utilises CPU SIMD AVX instructions */
501 #define	RTE_CRYPTODEV_FF_CPU_AVX2			(1ULL << 5)
502 /**< Utilises CPU SIMD AVX2 instructions */
503 #define	RTE_CRYPTODEV_FF_CPU_AESNI			(1ULL << 6)
504 /**< Utilises CPU AES-NI instructions */
505 #define	RTE_CRYPTODEV_FF_HW_ACCELERATED			(1ULL << 7)
506 /**< Operations are off-loaded to an
507  * external hardware accelerator
508  */
509 #define	RTE_CRYPTODEV_FF_CPU_AVX512			(1ULL << 8)
510 /**< Utilises CPU SIMD AVX512 instructions */
511 #define	RTE_CRYPTODEV_FF_IN_PLACE_SGL			(1ULL << 9)
512 /**< In-place Scatter-gather (SGL) buffers, with multiple segments,
513  * are supported
514  */
515 #define RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT		(1ULL << 10)
516 /**< Out-of-place Scatter-gather (SGL) buffers are
517  * supported in input and output
518  */
519 #define RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT		(1ULL << 11)
520 /**< Out-of-place Scatter-gather (SGL) buffers are supported
521  * in input, combined with linear buffers (LB), with a
522  * single segment in output
523  */
524 #define RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT		(1ULL << 12)
525 /**< Out-of-place Scatter-gather (SGL) buffers are supported
526  * in output, combined with linear buffers (LB) in input
527  */
528 #define RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT		(1ULL << 13)
529 /**< Out-of-place linear buffers (LB) are supported in input and output */
530 #define	RTE_CRYPTODEV_FF_CPU_NEON			(1ULL << 14)
531 /**< Utilises CPU NEON instructions */
532 #define	RTE_CRYPTODEV_FF_CPU_ARM_CE			(1ULL << 15)
533 /**< Utilises ARM CPU Cryptographic Extensions */
534 #define	RTE_CRYPTODEV_FF_SECURITY			(1ULL << 16)
535 /**< Support Security Protocol Processing */
536 #define RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP		(1ULL << 17)
537 /**< Support RSA Private Key OP with exponent */
538 #define RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT		(1ULL << 18)
539 /**< Support RSA Private Key OP with CRT (quintuple) Keys */
540 #define RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED		(1ULL << 19)
541 /**< Support encrypted-digest operations where digest is appended to data */
542 #define RTE_CRYPTODEV_FF_ASYM_SESSIONLESS		(1ULL << 20)
543 /**< Support asymmetric session-less operations */
544 #define	RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO			(1ULL << 21)
545 /**< Support symmetric cpu-crypto processing */
546 #define RTE_CRYPTODEV_FF_SYM_SESSIONLESS		(1ULL << 22)
547 /**< Support symmetric session-less operations */
548 #define RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA		(1ULL << 23)
549 /**< Support operations on data which is not byte aligned */
550 #define RTE_CRYPTODEV_FF_SYM_RAW_DP			(1ULL << 24)
551 /**< Support accelerator specific symmetric raw data-path APIs */
552 #define RTE_CRYPTODEV_FF_CIPHER_MULTIPLE_DATA_UNITS	(1ULL << 25)
553 /**< Support operations on multiple data-units message */
554 #define RTE_CRYPTODEV_FF_CIPHER_WRAPPED_KEY		(1ULL << 26)
555 /**< Support wrapped key in cipher xform  */
556 #define RTE_CRYPTODEV_FF_SECURITY_INNER_CSUM		(1ULL << 27)
557 /**< Support inner checksum computation/verification */
558 #define RTE_CRYPTODEV_FF_SECURITY_RX_INJECT		(1ULL << 28)
559 /**< Support Rx injection after security processing */
560 
561 /**
562  * Get the name of a crypto device feature flag
563  *
564  * @param	flag	The mask describing the flag.
565  *
566  * @return
567  *   The name of this flag, or NULL if it's not a valid feature flag.
568  */
569 const char *
570 rte_cryptodev_get_feature_name(uint64_t flag);
571 
572 /**  Crypto device information */
573 /* Structure rte_cryptodev_info 8< */
574 struct rte_cryptodev_info {
575 	const char *driver_name;	/**< Driver name. */
576 	uint8_t driver_id;		/**< Driver identifier */
577 	struct rte_device *device;	/**< Generic device information. */
578 
579 	uint64_t feature_flags;
580 	/**< Feature flags exposes HW/SW features for the given device */
581 
582 	const struct rte_cryptodev_capabilities *capabilities;
583 	/**< Array of devices supported capabilities */
584 
585 	unsigned max_nb_queue_pairs;
586 	/**< Maximum number of queues pairs supported by device. */
587 
588 	uint16_t min_mbuf_headroom_req;
589 	/**< Minimum mbuf headroom required by device */
590 
591 	uint16_t min_mbuf_tailroom_req;
592 	/**< Minimum mbuf tailroom required by device */
593 
594 	struct {
595 		unsigned max_nb_sessions;
596 		/**< Maximum number of sessions supported by device.
597 		 * If 0, the device does not have any limitation in
598 		 * number of sessions that can be used.
599 		 */
600 	} sym;
601 };
602 /* >8 End of structure rte_cryptodev_info. */
603 
604 #define RTE_CRYPTODEV_DETACHED  (0)
605 #define RTE_CRYPTODEV_ATTACHED  (1)
606 
607 /** Definitions of Crypto device event types */
608 enum rte_cryptodev_event_type {
609 	RTE_CRYPTODEV_EVENT_UNKNOWN,	/**< unknown event type */
610 	RTE_CRYPTODEV_EVENT_ERROR,	/**< error interrupt event */
611 	RTE_CRYPTODEV_EVENT_MAX		/**< max value of this enum */
612 };
613 
614 /** Crypto device queue pair configuration structure. */
615 /* Structure rte_cryptodev_qp_conf 8<*/
616 struct rte_cryptodev_qp_conf {
617 	uint32_t nb_descriptors; /**< Number of descriptors per queue pair */
618 	struct rte_mempool *mp_session;
619 	/**< The mempool for creating session in sessionless mode */
620 };
621 /* >8 End of structure rte_cryptodev_qp_conf. */
622 
623 /**
624  * Function type used for processing crypto ops when enqueue/dequeue burst is
625  * called.
626  *
627  * The callback function is called on enqueue/dequeue burst immediately.
628  *
629  * @param	dev_id		The identifier of the device.
630  * @param	qp_id		The index of the queue pair on which ops are
631  *				enqueued/dequeued. The value must be in the
632  *				range [0, nb_queue_pairs - 1] previously
633  *				supplied to *rte_cryptodev_configure*.
634  * @param	ops		The address of an array of *nb_ops* pointers
635  *				to *rte_crypto_op* structures which contain
636  *				the crypto operations to be processed.
637  * @param	nb_ops		The number of operations to process.
638  * @param	user_param	The arbitrary user parameter passed in by the
639  *				application when the callback was originally
640  *				registered.
641  * @return			The number of ops to be enqueued to the
642  *				crypto device.
643  */
644 typedef uint16_t (*rte_cryptodev_callback_fn)(uint16_t dev_id, uint16_t qp_id,
645 		struct rte_crypto_op **ops, uint16_t nb_ops, void *user_param);
646 
647 /**
648  * Typedef for application callback function to be registered by application
649  * software for notification of device events
650  *
651  * @param	dev_id	Crypto device identifier
652  * @param	event	Crypto device event to register for notification of.
653  * @param	cb_arg	User specified parameter to be passed as to passed to
654  *			users callback function.
655  */
656 typedef void (*rte_cryptodev_cb_fn)(uint8_t dev_id,
657 		enum rte_cryptodev_event_type event, void *cb_arg);
658 
659 
660 /** Crypto Device statistics */
661 struct rte_cryptodev_stats {
662 	uint64_t enqueued_count;
663 	/**< Count of all operations enqueued */
664 	uint64_t dequeued_count;
665 	/**< Count of all operations dequeued */
666 
667 	uint64_t enqueue_err_count;
668 	/**< Total error count on operations enqueued */
669 	uint64_t dequeue_err_count;
670 	/**< Total error count on operations dequeued */
671 };
672 
673 #define RTE_CRYPTODEV_NAME_MAX_LEN	(64)
674 /**< Max length of name of crypto PMD */
675 
676 /**
677  * Get the device identifier for the named crypto device.
678  *
679  * @param	name	device name to select the device structure.
680  *
681  * @return
682  *   - Returns crypto device identifier on success.
683  *   - Return -1 on failure to find named crypto device.
684  */
685 int
686 rte_cryptodev_get_dev_id(const char *name);
687 
688 /**
689  * Get the crypto device name given a device identifier.
690  *
691  * @param dev_id
692  *   The identifier of the device
693  *
694  * @return
695  *   - Returns crypto device name.
696  *   - Returns NULL if crypto device is not present.
697  */
698 const char *
699 rte_cryptodev_name_get(uint8_t dev_id);
700 
701 /**
702  * Get the total number of crypto devices that have been successfully
703  * initialised.
704  *
705  * @return
706  *   - The total number of usable crypto devices.
707  */
708 uint8_t
709 rte_cryptodev_count(void);
710 
711 /**
712  * Get number of crypto device defined type.
713  *
714  * @param	driver_id	driver identifier.
715  *
716  * @return
717  *   Returns number of crypto device.
718  */
719 uint8_t
720 rte_cryptodev_device_count_by_driver(uint8_t driver_id);
721 
722 /**
723  * Get number and identifiers of attached crypto devices that
724  * use the same crypto driver.
725  *
726  * @param	driver_name	driver name.
727  * @param	devices		output devices identifiers.
728  * @param	nb_devices	maximal number of devices.
729  *
730  * @return
731  *   Returns number of attached crypto device.
732  */
733 uint8_t
734 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
735 		uint8_t nb_devices);
736 /*
737  * Return the NUMA socket to which a device is connected
738  *
739  * @param dev_id
740  *   The identifier of the device
741  * @return
742  *   The NUMA socket id to which the device is connected or
743  *   a default of zero if the socket could not be determined.
744  *   -1 if returned is the dev_id value is out of range.
745  */
746 int
747 rte_cryptodev_socket_id(uint8_t dev_id);
748 
749 /** Crypto device configuration structure */
750 /* Structure rte_cryptodev_config 8< */
751 struct rte_cryptodev_config {
752 	int socket_id;			/**< Socket to allocate resources on */
753 	uint16_t nb_queue_pairs;
754 	/**< Number of queue pairs to configure on device */
755 	uint64_t ff_disable;
756 	/**< Feature flags to be disabled. Only the following features are
757 	 * allowed to be disabled,
758 	 *  - RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO
759 	 *  - RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO
760 	 *  - RTE_CRYTPODEV_FF_SECURITY
761 	 */
762 };
763 /* >8 End of structure rte_cryptodev_config. */
764 
765 /**
766  * Configure a device.
767  *
768  * This function must be invoked first before any other function in the
769  * API. This function can also be re-invoked when a device is in the
770  * stopped state.
771  *
772  * @param	dev_id		The identifier of the device to configure.
773  * @param	config		The crypto device configuration structure.
774  *
775  * @return
776  *   - 0: Success, device configured.
777  *   - <0: Error code returned by the driver configuration function.
778  */
779 int
780 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config);
781 
782 /**
783  * Start an device.
784  *
785  * The device start step is the last one and consists of setting the configured
786  * offload features and in starting the transmit and the receive units of the
787  * device.
788  * On success, all basic functions exported by the API (link status,
789  * receive/transmit, and so on) can be invoked.
790  *
791  * @param dev_id
792  *   The identifier of the device.
793  * @return
794  *   - 0: Success, device started.
795  *   - <0: Error code of the driver device start function.
796  */
797 int
798 rte_cryptodev_start(uint8_t dev_id);
799 
800 /**
801  * Stop an device. The device can be restarted with a call to
802  * rte_cryptodev_start()
803  *
804  * @param	dev_id		The identifier of the device.
805  */
806 void
807 rte_cryptodev_stop(uint8_t dev_id);
808 
809 /**
810  * Close an device. The device cannot be restarted!
811  *
812  * @param	dev_id		The identifier of the device.
813  *
814  * @return
815  *  - 0 on successfully closing device
816  *  - <0 on failure to close device
817  */
818 int
819 rte_cryptodev_close(uint8_t dev_id);
820 
821 /**
822  * Allocate and set up a receive queue pair for a device.
823  *
824  *
825  * @param	dev_id		The identifier of the device.
826  * @param	queue_pair_id	The index of the queue pairs to set up. The
827  *				value must be in the range [0, nb_queue_pair
828  *				- 1] previously supplied to
829  *				rte_cryptodev_configure().
830  * @param	qp_conf		The pointer to the configuration data to be
831  *				used for the queue pair.
832  * @param	socket_id	The *socket_id* argument is the socket
833  *				identifier in case of NUMA. The value can be
834  *				*SOCKET_ID_ANY* if there is no NUMA constraint
835  *				for the DMA memory allocated for the receive
836  *				queue pair.
837  *
838  * @return
839  *   - 0: Success, queue pair correctly set up.
840  *   - <0: Queue pair configuration failed
841  */
842 int
843 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
844 		const struct rte_cryptodev_qp_conf *qp_conf, int socket_id);
845 
846 /**
847  * Get the status of queue pairs setup on a specific crypto device
848  *
849  * @param	dev_id		Crypto device identifier.
850  * @param	queue_pair_id	The index of the queue pairs to set up. The
851  *				value must be in the range [0, nb_queue_pair
852  *				- 1] previously supplied to
853  *				rte_cryptodev_configure().
854  * @return
855  *   - 0: qp was not configured
856  *	 - 1: qp was configured
857  *	 - -EINVAL: device was not configured
858  */
859 int
860 rte_cryptodev_get_qp_status(uint8_t dev_id, uint16_t queue_pair_id);
861 
862 /**
863  * Get the number of queue pairs on a specific crypto device
864  *
865  * @param	dev_id		Crypto device identifier.
866  * @return
867  *   - The number of configured queue pairs.
868  */
869 uint16_t
870 rte_cryptodev_queue_pair_count(uint8_t dev_id);
871 
872 
873 /**
874  * Retrieve the general I/O statistics of a device.
875  *
876  * @param	dev_id		The identifier of the device.
877  * @param	stats		A pointer to a structure of type
878  *				*rte_cryptodev_stats* to be filled with the
879  *				values of device counters.
880  * @return
881  *   - Zero if successful.
882  *   - Non-zero otherwise.
883  */
884 int
885 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats);
886 
887 /**
888  * Reset the general I/O statistics of a device.
889  *
890  * @param	dev_id		The identifier of the device.
891  */
892 void
893 rte_cryptodev_stats_reset(uint8_t dev_id);
894 
895 /**
896  * Retrieve the contextual information of a device.
897  *
898  * @param	dev_id		The identifier of the device.
899  * @param	dev_info	A pointer to a structure of type
900  *				*rte_cryptodev_info* to be filled with the
901  *				contextual information of the device.
902  *
903  * @note The capabilities field of dev_info is set to point to the first
904  * element of an array of struct rte_cryptodev_capabilities. The element after
905  * the last valid element has it's op field set to
906  * RTE_CRYPTO_OP_TYPE_UNDEFINED.
907  */
908 void
909 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info);
910 
911 
912 /**
913  * Register a callback function for specific device id.
914  *
915  * @param	dev_id		Device id.
916  * @param	event		Event interested.
917  * @param	cb_fn		User supplied callback function to be called.
918  * @param	cb_arg		Pointer to the parameters for the registered
919  *				callback.
920  *
921  * @return
922  *  - On success, zero.
923  *  - On failure, a negative value.
924  */
925 int
926 rte_cryptodev_callback_register(uint8_t dev_id,
927 		enum rte_cryptodev_event_type event,
928 		rte_cryptodev_cb_fn cb_fn, void *cb_arg);
929 
930 /**
931  * Unregister a callback function for specific device id.
932  *
933  * @param	dev_id		The device identifier.
934  * @param	event		Event interested.
935  * @param	cb_fn		User supplied callback function to be called.
936  * @param	cb_arg		Pointer to the parameters for the registered
937  *				callback.
938  *
939  * @return
940  *  - On success, zero.
941  *  - On failure, a negative value.
942  */
943 int
944 rte_cryptodev_callback_unregister(uint8_t dev_id,
945 		enum rte_cryptodev_event_type event,
946 		rte_cryptodev_cb_fn cb_fn, void *cb_arg);
947 
948 /**
949  * @warning
950  * @b EXPERIMENTAL: this API may change without prior notice.
951  *
952  * Query a cryptodev queue pair if there are pending RTE_CRYPTODEV_EVENT_ERROR
953  * events.
954  *
955  * @param          dev_id	The device identifier.
956  * @param          qp_id	Queue pair index to be queried.
957  *
958  * @return
959  *   - 1 if requested queue has a pending event.
960  *   - 0 if no pending event is found.
961  *   - a negative value on failure
962  */
963 __rte_experimental
964 int
965 rte_cryptodev_queue_pair_event_error_query(uint8_t dev_id, uint16_t qp_id);
966 
967 struct rte_cryptodev_callback;
968 
969 /** Structure to keep track of registered callbacks */
970 RTE_TAILQ_HEAD(rte_cryptodev_cb_list, rte_cryptodev_callback);
971 
972 /**
973  * Structure used to hold information about the callbacks to be called for a
974  * queue pair on enqueue/dequeue.
975  */
976 struct rte_cryptodev_cb {
977 	RTE_ATOMIC(struct rte_cryptodev_cb *) next;
978 	/**< Pointer to next callback */
979 	rte_cryptodev_callback_fn fn;
980 	/**< Pointer to callback function */
981 	void *arg;
982 	/**< Pointer to argument */
983 };
984 
985 /**
986  * @internal
987  * Structure used to hold information about the RCU for a queue pair.
988  */
989 struct rte_cryptodev_cb_rcu {
990 	RTE_ATOMIC(struct rte_cryptodev_cb *) next;
991 	/**< Pointer to next callback */
992 	struct rte_rcu_qsbr *qsbr;
993 	/**< RCU QSBR variable per queue pair */
994 };
995 
996 /**
997  * Get the security context for the cryptodev.
998  *
999  * @param dev_id
1000  *   The device identifier.
1001  * @return
1002  *   - NULL on error.
1003  *   - Pointer to security context on success.
1004  */
1005 void *
1006 rte_cryptodev_get_sec_ctx(uint8_t dev_id);
1007 
1008 /**
1009  * Create a symmetric session mempool.
1010  *
1011  * @param name
1012  *   The unique mempool name.
1013  * @param nb_elts
1014  *   The number of elements in the mempool.
1015  * @param elt_size
1016  *   The size of the element. This should be the size of the cryptodev PMD
1017  *   session private data obtained through
1018  *   rte_cryptodev_sym_get_private_session_size() function call.
1019  *   For the user who wants to use the same mempool for heterogeneous PMDs
1020  *   this value should be the maximum value of their private session sizes.
1021  *   Please note the created mempool will have bigger elt size than this
1022  *   value as necessary session header and the possible padding are filled
1023  *   into each elt.
1024  * @param cache_size
1025  *   The number of per-lcore cache elements
1026  * @param priv_size
1027  *   The private data size of each session.
1028  * @param socket_id
1029  *   The *socket_id* argument is the socket identifier in the case of
1030  *   NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
1031  *   constraint for the reserved zone.
1032  *
1033  * @return
1034  *  - On success returns the created session mempool pointer
1035  *  - On failure returns NULL
1036  */
1037 struct rte_mempool *
1038 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
1039 	uint32_t elt_size, uint32_t cache_size, uint16_t priv_size,
1040 	int socket_id);
1041 
1042 
1043 /**
1044  * Create an asymmetric session mempool.
1045  *
1046  * @param name
1047  *   The unique mempool name.
1048  * @param nb_elts
1049  *   The number of elements in the mempool.
1050  * @param cache_size
1051  *   The number of per-lcore cache elements
1052  * @param user_data_size
1053  *   The size of user data to be placed after session private data.
1054  * @param socket_id
1055  *   The *socket_id* argument is the socket identifier in the case of
1056  *   NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
1057  *   constraint for the reserved zone.
1058  *
1059  * @return
1060  *  - On success return mempool
1061  *  - On failure returns NULL
1062  */
1063 struct rte_mempool *
1064 rte_cryptodev_asym_session_pool_create(const char *name, uint32_t nb_elts,
1065 	uint32_t cache_size, uint16_t user_data_size, int socket_id);
1066 
1067 /**
1068  * Create symmetric crypto session and fill out private data for the device id,
1069  * based on its device type.
1070  *
1071  * @param   dev_id   ID of device that we want the session to be used on
1072  * @param   xforms   Symmetric crypto transform operations to apply on flow
1073  *                   processed with this session
1074  * @param   mp       Mempool to allocate symmetric session objects from
1075  *
1076  * @return
1077  *  - On success return pointer to sym-session.
1078  *  - On failure returns NULL and rte_errno is set to the error code:
1079  *    - EINVAL on invalid arguments.
1080  *    - ENOMEM on memory error for session allocation.
1081  *    - ENOTSUP if device doesn't support session configuration.
1082  */
1083 void *
1084 rte_cryptodev_sym_session_create(uint8_t dev_id,
1085 		struct rte_crypto_sym_xform *xforms,
1086 		struct rte_mempool *mp);
1087 /**
1088  * Create and initialise an asymmetric crypto session structure.
1089  * Calls the PMD to configure the private session data.
1090  *
1091  * @param   dev_id   ID of device that we want the session to be used on
1092  * @param   xforms   Asymmetric crypto transform operations to apply on flow
1093  *                   processed with this session
1094  * @param   mp       mempool to allocate asymmetric session
1095  *                   objects from
1096  * @param   session  void ** for session to be used
1097  *
1098  * @return
1099  *  - 0 on success.
1100  *  - -EINVAL on invalid arguments.
1101  *  - -ENOMEM on memory error for session allocation.
1102  *  - -ENOTSUP if device doesn't support session configuration.
1103  */
1104 int
1105 rte_cryptodev_asym_session_create(uint8_t dev_id,
1106 		struct rte_crypto_asym_xform *xforms, struct rte_mempool *mp,
1107 		void **session);
1108 
1109 /**
1110  * Frees session for the device id and returning it to its mempool.
1111  * It is the application's responsibility to ensure that the session
1112  * is not still in-flight operations using it.
1113  *
1114  * @param   dev_id   ID of device that uses the session.
1115  * @param   sess     Session header to be freed.
1116  *
1117  * @return
1118  *  - 0 if successful.
1119  *  - -EINVAL if session is NULL or the mismatched device ids.
1120  */
1121 int
1122 rte_cryptodev_sym_session_free(uint8_t dev_id,
1123 	void *sess);
1124 
1125 /**
1126  * Clears and frees asymmetric crypto session header and private data,
1127  * returning it to its original mempool.
1128  *
1129  * @param   dev_id   ID of device that uses the asymmetric session.
1130  * @param   sess     Session header to be freed.
1131  *
1132  * @return
1133  *  - 0 if successful.
1134  *  - -EINVAL if device is invalid or session is NULL.
1135  */
1136 int
1137 rte_cryptodev_asym_session_free(uint8_t dev_id, void *sess);
1138 
1139 /**
1140  * Get the size of the asymmetric session header.
1141  *
1142  * @return
1143  *   Size of the asymmetric header session.
1144  */
1145 unsigned int
1146 rte_cryptodev_asym_get_header_session_size(void);
1147 
1148 /**
1149  * Get the size of the private symmetric session data
1150  * for a device.
1151  *
1152  * @param	dev_id		The device identifier.
1153  *
1154  * @return
1155  *   - Size of the private data, if successful
1156  *   - 0 if device is invalid or does not have private
1157  *   symmetric session
1158  */
1159 unsigned int
1160 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id);
1161 
1162 /**
1163  * Get the size of the private data for asymmetric session
1164  * on device
1165  *
1166  * @param	dev_id		The device identifier.
1167  *
1168  * @return
1169  *   - Size of the asymmetric private data, if successful
1170  *   - 0 if device is invalid or does not have private session
1171  */
1172 unsigned int
1173 rte_cryptodev_asym_get_private_session_size(uint8_t dev_id);
1174 
1175 /**
1176  * Validate if the crypto device index is valid attached crypto device.
1177  *
1178  * @param	dev_id	Crypto device index.
1179  *
1180  * @return
1181  *   - If the device index is valid (1) or not (0).
1182  */
1183 unsigned int
1184 rte_cryptodev_is_valid_dev(uint8_t dev_id);
1185 
1186 /**
1187  * Provide driver identifier.
1188  *
1189  * @param name
1190  *   The pointer to a driver name.
1191  * @return
1192  *  The driver type identifier or -1 if no driver found
1193  */
1194 int rte_cryptodev_driver_id_get(const char *name);
1195 
1196 /**
1197  * Provide driver name.
1198  *
1199  * @param driver_id
1200  *   The driver identifier.
1201  * @return
1202  *  The driver name or null if no driver found
1203  */
1204 const char *rte_cryptodev_driver_name_get(uint8_t driver_id);
1205 
1206 /**
1207  * Store user data in a session.
1208  *
1209  * @param	sess		Session pointer allocated by
1210  *				*rte_cryptodev_sym_session_create*.
1211  * @param	data		Pointer to the user data.
1212  * @param	size		Size of the user data.
1213  *
1214  * @return
1215  *  - On success, zero.
1216  *  - On failure, a negative value.
1217  */
1218 int
1219 rte_cryptodev_sym_session_set_user_data(void *sess,
1220 					void *data,
1221 					uint16_t size);
1222 
1223 #define CRYPTO_SESS_OPAQUE_DATA_OFF 0
1224 /**
1225  * Get opaque data from session handle
1226  */
1227 static inline uint64_t
1228 rte_cryptodev_sym_session_opaque_data_get(void *sess)
1229 {
1230 	return *((uint64_t *)sess + CRYPTO_SESS_OPAQUE_DATA_OFF);
1231 }
1232 
1233 /**
1234  * Set opaque data in session handle
1235  */
1236 static inline void
1237 rte_cryptodev_sym_session_opaque_data_set(void *sess, uint64_t opaque)
1238 {
1239 	uint64_t *data;
1240 	data = (((uint64_t *)sess) + CRYPTO_SESS_OPAQUE_DATA_OFF);
1241 	*data = opaque;
1242 }
1243 
1244 /**
1245  * Get user data stored in a session.
1246  *
1247  * @param	sess		Session pointer allocated by
1248  *				*rte_cryptodev_sym_session_create*.
1249  *
1250  * @return
1251  *  - On success return pointer to user data.
1252  *  - On failure returns NULL.
1253  */
1254 void *
1255 rte_cryptodev_sym_session_get_user_data(void *sess);
1256 
1257 /**
1258  * Store user data in an asymmetric session.
1259  *
1260  * @param	sess		Session pointer allocated by
1261  *				*rte_cryptodev_asym_session_create*.
1262  * @param	data		Pointer to the user data.
1263  * @param	size		Size of the user data.
1264  *
1265  * @return
1266  *  - On success, zero.
1267  *  - -EINVAL if the session pointer is invalid.
1268  *  - -ENOMEM if the available user data size is smaller than the size parameter.
1269  */
1270 int
1271 rte_cryptodev_asym_session_set_user_data(void *sess, void *data, uint16_t size);
1272 
1273 /**
1274  * Get user data stored in an asymmetric session.
1275  *
1276  * @param	sess		Session pointer allocated by
1277  *				*rte_cryptodev_asym_session_create*.
1278  *
1279  * @return
1280  *  - On success return pointer to user data.
1281  *  - On failure returns NULL.
1282  */
1283 void *
1284 rte_cryptodev_asym_session_get_user_data(void *sess);
1285 
1286 /**
1287  * Perform actual crypto processing (encrypt/digest or auth/decrypt)
1288  * on user provided data.
1289  *
1290  * @param	dev_id	The device identifier.
1291  * @param	sess	Cryptodev session structure
1292  * @param	ofs	Start and stop offsets for auth and cipher operations
1293  * @param	vec	Vectorized operation descriptor
1294  *
1295  * @return
1296  *  - Returns number of successfully processed packets.
1297  */
1298 uint32_t
1299 rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id,
1300 	void *sess, union rte_crypto_sym_ofs ofs,
1301 	struct rte_crypto_sym_vec *vec);
1302 
1303 /**
1304  * Get the size of the raw data-path context buffer.
1305  *
1306  * @param	dev_id		The device identifier.
1307  *
1308  * @return
1309  *   - If the device supports raw data-path APIs, return the context size.
1310  *   - If the device does not support the APIs, return -1.
1311  */
1312 int
1313 rte_cryptodev_get_raw_dp_ctx_size(uint8_t dev_id);
1314 
1315 /**
1316  * Set session event meta data
1317  *
1318  * @param	dev_id		The device identifier.
1319  * @param	sess            Crypto or security session.
1320  * @param	op_type         Operation type.
1321  * @param	sess_type       Session type.
1322  * @param	ev_mdata	Pointer to the event crypto meta data
1323  *				(aka *union rte_event_crypto_metadata*)
1324  * @param	size            Size of ev_mdata.
1325  *
1326  * @return
1327  *  - On success, zero.
1328  *  - On failure, a negative value.
1329  */
1330 int
1331 rte_cryptodev_session_event_mdata_set(uint8_t dev_id, void *sess,
1332 	enum rte_crypto_op_type op_type,
1333 	enum rte_crypto_op_sess_type sess_type,
1334 	void *ev_mdata, uint16_t size);
1335 
1336 /**
1337  * Union of different crypto session types, including session-less xform
1338  * pointer.
1339  */
1340 union rte_cryptodev_session_ctx {void *crypto_sess;
1341 	struct rte_crypto_sym_xform *xform;
1342 	struct rte_security_session *sec_sess;
1343 };
1344 
1345 /**
1346  * Enqueue a vectorized operation descriptor into the device queue but the
1347  * driver may or may not start processing until rte_cryptodev_raw_enqueue_done()
1348  * is called.
1349  *
1350  * @param	qp		Driver specific queue pair data.
1351  * @param	drv_ctx		Driver specific context data.
1352  * @param	vec		Vectorized operation descriptor.
1353  * @param	ofs		Start and stop offsets for auth and cipher
1354  *				operations.
1355  * @param	user_data	The array of user data for dequeue later.
1356  * @param	enqueue_status	Driver written value to specify the
1357  *				enqueue status. Possible values:
1358  *				- 1: The number of operations returned are
1359  *				     enqueued successfully.
1360  *				- 0: The number of operations returned are
1361  *				     cached into the queue but are not processed
1362  *				     until rte_cryptodev_raw_enqueue_done() is
1363  *				     called.
1364  *				- negative integer: Error occurred.
1365  * @return
1366  *   - The number of operations in the descriptor successfully enqueued or
1367  *     cached into the queue but not enqueued yet, depends on the
1368  *     "enqueue_status" value.
1369  */
1370 typedef uint32_t (*cryptodev_sym_raw_enqueue_burst_t)(
1371 	void *qp, uint8_t *drv_ctx, struct rte_crypto_sym_vec *vec,
1372 	union rte_crypto_sym_ofs ofs, void *user_data[], int *enqueue_status);
1373 
1374 /**
1375  * Enqueue single raw data vector into the device queue but the driver may or
1376  * may not start processing until rte_cryptodev_raw_enqueue_done() is called.
1377  *
1378  * @param	qp		Driver specific queue pair data.
1379  * @param	drv_ctx		Driver specific context data.
1380  * @param	data_vec	The buffer data vector.
1381  * @param	n_data_vecs	Number of buffer data vectors.
1382  * @param	ofs		Start and stop offsets for auth and cipher
1383  *				operations.
1384  * @param	iv		IV virtual and IOVA addresses
1385  * @param	digest		digest virtual and IOVA addresses
1386  * @param	aad_or_auth_iv	AAD or auth IV virtual and IOVA addresses,
1387  *				depends on the algorithm used.
1388  * @param	user_data	The user data.
1389  * @return
1390  *   - 1: The data vector is enqueued successfully.
1391  *   - 0: The data vector is cached into the queue but is not processed
1392  *        until rte_cryptodev_raw_enqueue_done() is called.
1393  *   - negative integer: failure.
1394  */
1395 typedef int (*cryptodev_sym_raw_enqueue_t)(
1396 	void *qp, uint8_t *drv_ctx, struct rte_crypto_vec *data_vec,
1397 	uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs,
1398 	struct rte_crypto_va_iova_ptr *iv,
1399 	struct rte_crypto_va_iova_ptr *digest,
1400 	struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
1401 	void *user_data);
1402 
1403 /**
1404  * Inform the cryptodev queue pair to start processing or finish dequeuing all
1405  * enqueued/dequeued operations.
1406  *
1407  * @param	qp		Driver specific queue pair data.
1408  * @param	drv_ctx		Driver specific context data.
1409  * @param	n		The total number of processed operations.
1410  * @return
1411  *   - On success return 0.
1412  *   - On failure return negative integer.
1413  */
1414 typedef int (*cryptodev_sym_raw_operation_done_t)(void *qp, uint8_t *drv_ctx,
1415 	uint32_t n);
1416 
1417 /**
1418  * Typedef that the user provided for the driver to get the dequeue count.
1419  * The function may return a fixed number or the number parsed from the user
1420  * data stored in the first processed operation.
1421  *
1422  * @param	user_data	Dequeued user data.
1423  * @return
1424  *  - The number of operations to be dequeued.
1425  */
1426 typedef uint32_t (*rte_cryptodev_raw_get_dequeue_count_t)(void *user_data);
1427 
1428 /**
1429  * Typedef that the user provided to deal with post dequeue operation, such
1430  * as filling status.
1431  *
1432  * @param	user_data	Dequeued user data.
1433  * @param	index		Index number of the processed descriptor.
1434  * @param	is_op_success	Operation status provided by the driver.
1435  */
1436 typedef void (*rte_cryptodev_raw_post_dequeue_t)(void *user_data,
1437 	uint32_t index, uint8_t is_op_success);
1438 
1439 /**
1440  * Dequeue a burst of symmetric crypto processing.
1441  *
1442  * @param	qp			Driver specific queue pair data.
1443  * @param	drv_ctx			Driver specific context data.
1444  * @param	get_dequeue_count	User provided callback function to
1445  *					obtain dequeue operation count.
1446  * @param	max_nb_to_dequeue	When get_dequeue_count is NULL this
1447  *					value is used to pass the maximum
1448  *					number of operations to be dequeued.
1449  * @param	post_dequeue		User provided callback function to
1450  *					post-process a dequeued operation.
1451  * @param	out_user_data		User data pointer array to be retrieve
1452  *					from device queue. In case of
1453  *					*is_user_data_array* is set there
1454  *					should be enough room to store all
1455  *					user data.
1456  * @param	is_user_data_array	Set 1 if every dequeued user data will
1457  *					be written into out_user_data array.
1458  *					Set 0 if only the first user data will
1459  *					be written into out_user_data array.
1460  * @param	n_success		Driver written value to specific the
1461  *					total successful operations count.
1462  * @param	dequeue_status		Driver written value to specify the
1463  *					dequeue status. Possible values:
1464  *					- 1: Successfully dequeued the number
1465  *					     of operations returned. The user
1466  *					     data previously set during enqueue
1467  *					     is stored in the "out_user_data".
1468  *					- 0: The number of operations returned
1469  *					     are completed and the user data is
1470  *					     stored in the "out_user_data", but
1471  *					     they are not freed from the queue
1472  *					     until
1473  *					     rte_cryptodev_raw_dequeue_done()
1474  *					     is called.
1475  *					- negative integer: Error occurred.
1476  * @return
1477  *   - The number of operations dequeued or completed but not freed from the
1478  *     queue, depends on "dequeue_status" value.
1479  */
1480 typedef uint32_t (*cryptodev_sym_raw_dequeue_burst_t)(void *qp,
1481 	uint8_t *drv_ctx,
1482 	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
1483 	uint32_t max_nb_to_dequeue,
1484 	rte_cryptodev_raw_post_dequeue_t post_dequeue,
1485 	void **out_user_data, uint8_t is_user_data_array,
1486 	uint32_t *n_success, int *dequeue_status);
1487 
1488 /**
1489  * Dequeue a symmetric crypto processing.
1490  *
1491  * @param	qp			Driver specific queue pair data.
1492  * @param	drv_ctx			Driver specific context data.
1493  * @param	dequeue_status		Driver written value to specify the
1494  *					dequeue status. Possible values:
1495  *					- 1: Successfully dequeued a operation.
1496  *					     The user data is returned.
1497  *					- 0: The first operation in the queue
1498  *					     is completed and the user data
1499  *					     previously set during enqueue is
1500  *					     returned, but it is not freed from
1501  *					     the queue until
1502  *					     rte_cryptodev_raw_dequeue_done() is
1503  *					     called.
1504  *					- negative integer: Error occurred.
1505  * @param	op_status		Driver written value to specify
1506  *					operation status.
1507  * @return
1508  *   - The user data pointer retrieved from device queue or NULL if no
1509  *     operation is ready for dequeue.
1510  */
1511 typedef void * (*cryptodev_sym_raw_dequeue_t)(
1512 		void *qp, uint8_t *drv_ctx, int *dequeue_status,
1513 		enum rte_crypto_op_status *op_status);
1514 
1515 /**
1516  * Context data for raw data-path API crypto process. The buffer of this
1517  * structure is to be allocated by the user application with the size equal
1518  * or bigger than rte_cryptodev_get_raw_dp_ctx_size() returned value.
1519  */
1520 struct rte_crypto_raw_dp_ctx {
1521 	void *qp_data;
1522 
1523 	cryptodev_sym_raw_enqueue_t enqueue;
1524 	cryptodev_sym_raw_enqueue_burst_t enqueue_burst;
1525 	cryptodev_sym_raw_operation_done_t enqueue_done;
1526 	cryptodev_sym_raw_dequeue_t dequeue;
1527 	cryptodev_sym_raw_dequeue_burst_t dequeue_burst;
1528 	cryptodev_sym_raw_operation_done_t dequeue_done;
1529 
1530 	/* Driver specific context data */
1531 	__extension__ uint8_t drv_ctx_data[];
1532 };
1533 
1534 /**
1535  * Configure raw data-path context data.
1536  *
1537  * @param	dev_id		The device identifier.
1538  * @param	qp_id		The index of the queue pair from which to
1539  *				retrieve processed packets. The value must be
1540  *				in the range [0, nb_queue_pair - 1] previously
1541  *				supplied to rte_cryptodev_configure().
1542  * @param	ctx		The raw data-path context data.
1543  * @param	sess_type	Session type.
1544  * @param	session_ctx	Session context data.
1545  * @param	is_update	Set 0 if it is to initialize the ctx.
1546  *				Set 1 if ctx is initialized and only to update
1547  *				session context data.
1548  * @return
1549  *   - On success return 0.
1550  *   - On failure return negative integer.
1551  *     - -EINVAL if input parameters are invalid.
1552  *     - -ENOTSUP if crypto device does not support raw DP operations with the
1553  *        provided session.
1554  */
1555 int
1556 rte_cryptodev_configure_raw_dp_ctx(uint8_t dev_id, uint16_t qp_id,
1557 	struct rte_crypto_raw_dp_ctx *ctx,
1558 	enum rte_crypto_op_sess_type sess_type,
1559 	union rte_cryptodev_session_ctx session_ctx,
1560 	uint8_t is_update);
1561 
1562 /**
1563  * Enqueue a vectorized operation descriptor into the device queue but the
1564  * driver may or may not start processing until rte_cryptodev_raw_enqueue_done()
1565  * is called.
1566  *
1567  * @param	ctx		The initialized raw data-path context data.
1568  * @param	vec		Vectorized operation descriptor.
1569  * @param	ofs		Start and stop offsets for auth and cipher
1570  *				operations.
1571  * @param	user_data	The array of user data for dequeue later.
1572  * @param	enqueue_status	Driver written value to specify the
1573  *				enqueue status. Possible values:
1574  *				- 1: The number of operations returned are
1575  *				     enqueued successfully.
1576  *				- 0: The number of operations returned are
1577  *				     cached into the queue but are not processed
1578  *				     until rte_cryptodev_raw_enqueue_done() is
1579  *				     called.
1580  *				- negative integer: Error occurred.
1581  * @return
1582  *   - The number of operations in the descriptor successfully enqueued or
1583  *     cached into the queue but not enqueued yet, depends on the
1584  *     "enqueue_status" value.
1585  */
1586 uint32_t
1587 rte_cryptodev_raw_enqueue_burst(struct rte_crypto_raw_dp_ctx *ctx,
1588 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
1589 	void **user_data, int *enqueue_status);
1590 
1591 /**
1592  * Enqueue single raw data vector into the device queue but the driver may or
1593  * may not start processing until rte_cryptodev_raw_enqueue_done() is called.
1594  *
1595  * @param	ctx		The initialized raw data-path context data.
1596  * @param	data_vec	The buffer data vector.
1597  * @param	n_data_vecs	Number of buffer data vectors.
1598  * @param	ofs		Start and stop offsets for auth and cipher
1599  *				operations.
1600  * @param	iv		IV virtual and IOVA addresses
1601  * @param	digest		digest virtual and IOVA addresses
1602  * @param	aad_or_auth_iv	AAD or auth IV virtual and IOVA addresses,
1603  *				depends on the algorithm used.
1604  * @param	user_data	The user data.
1605  * @return
1606  *   - 1: The data vector is enqueued successfully.
1607  *   - 0: The data vector is cached into the queue but is not processed
1608  *        until rte_cryptodev_raw_enqueue_done() is called.
1609  *   - negative integer: failure.
1610  */
1611 __rte_experimental
1612 static __rte_always_inline int
1613 rte_cryptodev_raw_enqueue(struct rte_crypto_raw_dp_ctx *ctx,
1614 	struct rte_crypto_vec *data_vec, uint16_t n_data_vecs,
1615 	union rte_crypto_sym_ofs ofs,
1616 	struct rte_crypto_va_iova_ptr *iv,
1617 	struct rte_crypto_va_iova_ptr *digest,
1618 	struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
1619 	void *user_data)
1620 {
1621 	return (*ctx->enqueue)(ctx->qp_data, ctx->drv_ctx_data, data_vec,
1622 		n_data_vecs, ofs, iv, digest, aad_or_auth_iv, user_data);
1623 }
1624 
1625 /**
1626  * Start processing all enqueued operations from last
1627  * rte_cryptodev_configure_raw_dp_ctx() call.
1628  *
1629  * @param	ctx	The initialized raw data-path context data.
1630  * @param	n	The number of operations cached.
1631  * @return
1632  *   - On success return 0.
1633  *   - On failure return negative integer.
1634  */
1635 int
1636 rte_cryptodev_raw_enqueue_done(struct rte_crypto_raw_dp_ctx *ctx,
1637 		uint32_t n);
1638 
1639 /**
1640  * Dequeue a burst of symmetric crypto processing.
1641  *
1642  * @param	ctx			The initialized raw data-path context
1643  *					data.
1644  * @param	get_dequeue_count	User provided callback function to
1645  *					obtain dequeue operation count.
1646  * @param	max_nb_to_dequeue	When get_dequeue_count is NULL this
1647  *					value is used to pass the maximum
1648  *					number of operations to be dequeued.
1649  * @param	post_dequeue		User provided callback function to
1650  *					post-process a dequeued operation.
1651  * @param	out_user_data		User data pointer array to be retrieve
1652  *					from device queue. In case of
1653  *					*is_user_data_array* is set there
1654  *					should be enough room to store all
1655  *					user data.
1656  * @param	is_user_data_array	Set 1 if every dequeued user data will
1657  *					be written into out_user_data array.
1658  *					Set 0 if only the first user data will
1659  *					be written into out_user_data array.
1660  * @param	n_success		Driver written value to specific the
1661  *					total successful operations count.
1662  * @param	dequeue_status		Driver written value to specify the
1663  *					dequeue status. Possible values:
1664  *					- 1: Successfully dequeued the number
1665  *					     of operations returned. The user
1666  *					     data previously set during enqueue
1667  *					     is stored in the "out_user_data".
1668  *					- 0: The number of operations returned
1669  *					     are completed and the user data is
1670  *					     stored in the "out_user_data", but
1671  *					     they are not freed from the queue
1672  *					     until
1673  *					     rte_cryptodev_raw_dequeue_done()
1674  *					     is called.
1675  *					- negative integer: Error occurred.
1676  * @return
1677  *   - The number of operations dequeued or completed but not freed from the
1678  *     queue, depends on "dequeue_status" value.
1679  */
1680 uint32_t
1681 rte_cryptodev_raw_dequeue_burst(struct rte_crypto_raw_dp_ctx *ctx,
1682 	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
1683 	uint32_t max_nb_to_dequeue,
1684 	rte_cryptodev_raw_post_dequeue_t post_dequeue,
1685 	void **out_user_data, uint8_t is_user_data_array,
1686 	uint32_t *n_success, int *dequeue_status);
1687 
1688 /**
1689  * Dequeue a symmetric crypto processing.
1690  *
1691  * @param	ctx			The initialized raw data-path context
1692  *					data.
1693  * @param	dequeue_status		Driver written value to specify the
1694  *					dequeue status. Possible values:
1695  *					- 1: Successfully dequeued a operation.
1696  *					     The user data is returned.
1697  *					- 0: The first operation in the queue
1698  *					     is completed and the user data
1699  *					     previously set during enqueue is
1700  *					     returned, but it is not freed from
1701  *					     the queue until
1702  *					     rte_cryptodev_raw_dequeue_done() is
1703  *					     called.
1704  *					- negative integer: Error occurred.
1705  * @param	op_status		Driver written value to specify
1706  *					operation status.
1707  * @return
1708  *   - The user data pointer retrieved from device queue or NULL if no
1709  *     operation is ready for dequeue.
1710  */
1711 __rte_experimental
1712 static __rte_always_inline void *
1713 rte_cryptodev_raw_dequeue(struct rte_crypto_raw_dp_ctx *ctx,
1714 		int *dequeue_status, enum rte_crypto_op_status *op_status)
1715 {
1716 	return (*ctx->dequeue)(ctx->qp_data, ctx->drv_ctx_data, dequeue_status,
1717 			op_status);
1718 }
1719 
1720 /**
1721  * Inform the queue pair dequeue operations is finished.
1722  *
1723  * @param	ctx	The initialized raw data-path context data.
1724  * @param	n	The number of operations.
1725  * @return
1726  *   - On success return 0.
1727  *   - On failure return negative integer.
1728  */
1729 int
1730 rte_cryptodev_raw_dequeue_done(struct rte_crypto_raw_dp_ctx *ctx,
1731 		uint32_t n);
1732 
1733 /**
1734  * Add a user callback for a given crypto device and queue pair which will be
1735  * called on crypto ops enqueue.
1736  *
1737  * This API configures a function to be called for each burst of crypto ops
1738  * received on a given crypto device queue pair. The return value is a pointer
1739  * that can be used later to remove the callback using
1740  * rte_cryptodev_remove_enq_callback().
1741  *
1742  * Callbacks registered by application would not survive
1743  * rte_cryptodev_configure() as it reinitializes the callback list.
1744  * It is user responsibility to remove all installed callbacks before
1745  * calling rte_cryptodev_configure() to avoid possible memory leakage.
1746  * Application is expected to call add API after rte_cryptodev_configure().
1747  *
1748  * Multiple functions can be registered per queue pair & they are called
1749  * in the order they were added. The API does not restrict on maximum number
1750  * of callbacks.
1751  *
1752  * @param	dev_id		The identifier of the device.
1753  * @param	qp_id		The index of the queue pair on which ops are
1754  *				to be enqueued for processing. The value
1755  *				must be in the range [0, nb_queue_pairs - 1]
1756  *				previously supplied to
1757  *				*rte_cryptodev_configure*.
1758  * @param	cb_fn		The callback function
1759  * @param	cb_arg		A generic pointer parameter which will be passed
1760  *				to each invocation of the callback function on
1761  *				this crypto device and queue pair.
1762  *
1763  * @return
1764  *  - NULL on error & rte_errno will contain the error code.
1765  *  - On success, a pointer value which can later be used to remove the
1766  *    callback.
1767  */
1768 struct rte_cryptodev_cb *
1769 rte_cryptodev_add_enq_callback(uint8_t dev_id,
1770 			       uint16_t qp_id,
1771 			       rte_cryptodev_callback_fn cb_fn,
1772 			       void *cb_arg);
1773 
1774 /**
1775  * Remove a user callback function for given crypto device and queue pair.
1776  *
1777  * This function is used to remove enqueue callbacks that were added to a
1778  * crypto device queue pair using rte_cryptodev_add_enq_callback().
1779  *
1780  *
1781  *
1782  * @param	dev_id		The identifier of the device.
1783  * @param	qp_id		The index of the queue pair on which ops are
1784  *				to be enqueued. The value must be in the
1785  *				range [0, nb_queue_pairs - 1] previously
1786  *				supplied to *rte_cryptodev_configure*.
1787  * @param	cb		Pointer to user supplied callback created via
1788  *				rte_cryptodev_add_enq_callback().
1789  *
1790  * @return
1791  *   -  0: Success. Callback was removed.
1792  *   - <0: The dev_id or the qp_id is out of range, or the callback
1793  *         is NULL or not found for the crypto device queue pair.
1794  */
1795 int rte_cryptodev_remove_enq_callback(uint8_t dev_id,
1796 				      uint16_t qp_id,
1797 				      struct rte_cryptodev_cb *cb);
1798 
1799 /**
1800  * Add a user callback for a given crypto device and queue pair which will be
1801  * called on crypto ops dequeue.
1802  *
1803  * This API configures a function to be called for each burst of crypto ops
1804  * received on a given crypto device queue pair. The return value is a pointer
1805  * that can be used later to remove the callback using
1806  * rte_cryptodev_remove_deq_callback().
1807  *
1808  * Callbacks registered by application would not survive
1809  * rte_cryptodev_configure() as it reinitializes the callback list.
1810  * It is user responsibility to remove all installed callbacks before
1811  * calling rte_cryptodev_configure() to avoid possible memory leakage.
1812  * Application is expected to call add API after rte_cryptodev_configure().
1813  *
1814  * Multiple functions can be registered per queue pair & they are called
1815  * in the order they were added. The API does not restrict on maximum number
1816  * of callbacks.
1817  *
1818  * @param	dev_id		The identifier of the device.
1819  * @param	qp_id		The index of the queue pair on which ops are
1820  *				to be dequeued. The value must be in the
1821  *				range [0, nb_queue_pairs - 1] previously
1822  *				supplied to *rte_cryptodev_configure*.
1823  * @param	cb_fn		The callback function
1824  * @param	cb_arg		A generic pointer parameter which will be passed
1825  *				to each invocation of the callback function on
1826  *				this crypto device and queue pair.
1827  *
1828  * @return
1829  *   - NULL on error & rte_errno will contain the error code.
1830  *   - On success, a pointer value which can later be used to remove the
1831  *     callback.
1832  */
1833 struct rte_cryptodev_cb *
1834 rte_cryptodev_add_deq_callback(uint8_t dev_id,
1835 			       uint16_t qp_id,
1836 			       rte_cryptodev_callback_fn cb_fn,
1837 			       void *cb_arg);
1838 
1839 /**
1840  * Remove a user callback function for given crypto device and queue pair.
1841  *
1842  * This function is used to remove dequeue callbacks that were added to a
1843  * crypto device queue pair using rte_cryptodev_add_deq_callback().
1844  *
1845  *
1846  *
1847  * @param	dev_id		The identifier of the device.
1848  * @param	qp_id		The index of the queue pair on which ops are
1849  *				to be dequeued. The value must be in the
1850  *				range [0, nb_queue_pairs - 1] previously
1851  *				supplied to *rte_cryptodev_configure*.
1852  * @param	cb		Pointer to user supplied callback created via
1853  *				rte_cryptodev_add_deq_callback().
1854  *
1855  * @return
1856  *   -  0: Success. Callback was removed.
1857  *   - <0: The dev_id or the qp_id is out of range, or the callback
1858  *         is NULL or not found for the crypto device queue pair.
1859  */
1860 int rte_cryptodev_remove_deq_callback(uint8_t dev_id,
1861 				      uint16_t qp_id,
1862 				      struct rte_cryptodev_cb *cb);
1863 
1864 #include <rte_cryptodev_core.h>
1865 /**
1866  *
1867  * Dequeue a burst of processed crypto operations from a queue on the crypto
1868  * device. The dequeued operation are stored in *rte_crypto_op* structures
1869  * whose pointers are supplied in the *ops* array.
1870  *
1871  * The rte_cryptodev_dequeue_burst() function returns the number of ops
1872  * actually dequeued, which is the number of *rte_crypto_op* data structures
1873  * effectively supplied into the *ops* array.
1874  *
1875  * A return value equal to *nb_ops* indicates that the queue contained
1876  * at least *nb_ops* operations, and this is likely to signify that other
1877  * processed operations remain in the devices output queue. Applications
1878  * implementing a "retrieve as many processed operations as possible" policy
1879  * can check this specific case and keep invoking the
1880  * rte_cryptodev_dequeue_burst() function until a value less than
1881  * *nb_ops* is returned.
1882  *
1883  * The rte_cryptodev_dequeue_burst() function does not provide any error
1884  * notification to avoid the corresponding overhead.
1885  *
1886  * @param	dev_id		The symmetric crypto device identifier
1887  * @param	qp_id		The index of the queue pair from which to
1888  *				retrieve processed packets. The value must be
1889  *				in the range [0, nb_queue_pair - 1] previously
1890  *				supplied to rte_cryptodev_configure().
1891  * @param	ops		The address of an array of pointers to
1892  *				*rte_crypto_op* structures that must be
1893  *				large enough to store *nb_ops* pointers in it.
1894  * @param	nb_ops		The maximum number of operations to dequeue.
1895  *
1896  * @return
1897  *   - The number of operations actually dequeued, which is the number
1898  *   of pointers to *rte_crypto_op* structures effectively supplied to the
1899  *   *ops* array.
1900  */
1901 static inline uint16_t
1902 rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
1903 		struct rte_crypto_op **ops, uint16_t nb_ops)
1904 {
1905 	const struct rte_crypto_fp_ops *fp_ops;
1906 	void *qp;
1907 
1908 	rte_cryptodev_trace_dequeue_burst(dev_id, qp_id, (void **)ops, nb_ops);
1909 
1910 	fp_ops = &rte_crypto_fp_ops[dev_id];
1911 	qp = fp_ops->qp.data[qp_id];
1912 
1913 	nb_ops = fp_ops->dequeue_burst(qp, ops, nb_ops);
1914 
1915 #ifdef RTE_CRYPTO_CALLBACKS
1916 	if (unlikely(fp_ops->qp.deq_cb != NULL)) {
1917 		struct rte_cryptodev_cb_rcu *list;
1918 		struct rte_cryptodev_cb *cb;
1919 
1920 		/* rte_memory_order_release memory order was used when the
1921 		 * call back was inserted into the list.
1922 		 * Since there is a clear dependency between loading
1923 		 * cb and cb->fn/cb->next, rte_memory_order_acquire memory order is
1924 		 * not required.
1925 		 */
1926 		list = &fp_ops->qp.deq_cb[qp_id];
1927 		rte_rcu_qsbr_thread_online(list->qsbr, 0);
1928 		cb = rte_atomic_load_explicit(&list->next, rte_memory_order_relaxed);
1929 
1930 		while (cb != NULL) {
1931 			nb_ops = cb->fn(dev_id, qp_id, ops, nb_ops,
1932 					cb->arg);
1933 			cb = cb->next;
1934 		};
1935 
1936 		rte_rcu_qsbr_thread_offline(list->qsbr, 0);
1937 	}
1938 #endif
1939 	return nb_ops;
1940 }
1941 
1942 /**
1943  * Enqueue a burst of operations for processing on a crypto device.
1944  *
1945  * The rte_cryptodev_enqueue_burst() function is invoked to place
1946  * crypto operations on the queue *qp_id* of the device designated by
1947  * its *dev_id*.
1948  *
1949  * The *nb_ops* parameter is the number of operations to process which are
1950  * supplied in the *ops* array of *rte_crypto_op* structures.
1951  *
1952  * The rte_cryptodev_enqueue_burst() function returns the number of
1953  * operations it actually enqueued for processing. A return value equal to
1954  * *nb_ops* means that all packets have been enqueued.
1955  *
1956  * @param	dev_id		The identifier of the device.
1957  * @param	qp_id		The index of the queue pair which packets are
1958  *				to be enqueued for processing. The value
1959  *				must be in the range [0, nb_queue_pairs - 1]
1960  *				previously supplied to
1961  *				 *rte_cryptodev_configure*.
1962  * @param	ops		The address of an array of *nb_ops* pointers
1963  *				to *rte_crypto_op* structures which contain
1964  *				the crypto operations to be processed.
1965  * @param	nb_ops		The number of operations to process.
1966  *
1967  * @return
1968  * The number of operations actually enqueued on the crypto device. The return
1969  * value can be less than the value of the *nb_ops* parameter when the
1970  * crypto devices queue is full or if invalid parameters are specified in
1971  * a *rte_crypto_op*.
1972  */
1973 static inline uint16_t
1974 rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
1975 		struct rte_crypto_op **ops, uint16_t nb_ops)
1976 {
1977 	const struct rte_crypto_fp_ops *fp_ops;
1978 	void *qp;
1979 
1980 	fp_ops = &rte_crypto_fp_ops[dev_id];
1981 	qp = fp_ops->qp.data[qp_id];
1982 #ifdef RTE_CRYPTO_CALLBACKS
1983 	if (unlikely(fp_ops->qp.enq_cb != NULL)) {
1984 		struct rte_cryptodev_cb_rcu *list;
1985 		struct rte_cryptodev_cb *cb;
1986 
1987 		/* rte_memory_order_release memory order was used when the
1988 		 * call back was inserted into the list.
1989 		 * Since there is a clear dependency between loading
1990 		 * cb and cb->fn/cb->next, rte_memory_order_acquire memory order is
1991 		 * not required.
1992 		 */
1993 		list = &fp_ops->qp.enq_cb[qp_id];
1994 		rte_rcu_qsbr_thread_online(list->qsbr, 0);
1995 		cb = rte_atomic_load_explicit(&list->next, rte_memory_order_relaxed);
1996 
1997 		while (cb != NULL) {
1998 			nb_ops = cb->fn(dev_id, qp_id, ops, nb_ops,
1999 					cb->arg);
2000 			cb = cb->next;
2001 		};
2002 
2003 		rte_rcu_qsbr_thread_offline(list->qsbr, 0);
2004 	}
2005 #endif
2006 
2007 	rte_cryptodev_trace_enqueue_burst(dev_id, qp_id, (void **)ops, nb_ops);
2008 	return fp_ops->enqueue_burst(qp, ops, nb_ops);
2009 }
2010 
2011 
2012 
2013 #ifdef __cplusplus
2014 }
2015 #endif
2016 
2017 #endif /* _RTE_CRYPTODEV_H_ */
2018