xref: /dpdk/lib/cryptodev/rte_cryptodev.c (revision 09442498ef736d0a96632cf8b8c15d8ca78a6468)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020 Intel Corporation
3  */
4 
5 #include <sys/queue.h>
6 #include <ctype.h>
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <string.h>
10 #include <errno.h>
11 #include <stdint.h>
12 #include <inttypes.h>
13 
14 #include <rte_log.h>
15 #include <rte_debug.h>
16 #include <dev_driver.h>
17 #include <rte_memory.h>
18 #include <rte_memcpy.h>
19 #include <rte_memzone.h>
20 #include <rte_eal.h>
21 #include <rte_common.h>
22 #include <rte_mempool.h>
23 #include <rte_malloc.h>
24 #include <rte_errno.h>
25 #include <rte_spinlock.h>
26 #include <rte_string_fns.h>
27 #include <rte_telemetry.h>
28 
29 #include "rte_crypto.h"
30 #include "rte_cryptodev.h"
31 #include "cryptodev_pmd.h"
32 #include "rte_cryptodev_trace.h"
33 
34 static uint8_t nb_drivers;
35 
36 static struct rte_cryptodev rte_crypto_devices[RTE_CRYPTO_MAX_DEVS];
37 
38 struct rte_cryptodev *rte_cryptodevs = rte_crypto_devices;
39 
40 static struct rte_cryptodev_global cryptodev_globals = {
41 		.devs			= rte_crypto_devices,
42 		.data			= { NULL },
43 		.nb_devs		= 0
44 };
45 
46 /* Public fastpath APIs. */
47 struct rte_crypto_fp_ops rte_crypto_fp_ops[RTE_CRYPTO_MAX_DEVS];
48 
49 /* spinlock for crypto device callbacks */
50 static rte_spinlock_t rte_cryptodev_cb_lock = RTE_SPINLOCK_INITIALIZER;
51 
52 /**
53  * The user application callback description.
54  *
55  * It contains callback address to be registered by user application,
56  * the pointer to the parameters for callback, and the event type.
57  */
58 struct rte_cryptodev_callback {
59 	TAILQ_ENTRY(rte_cryptodev_callback) next; /**< Callbacks list */
60 	rte_cryptodev_cb_fn cb_fn;		/**< Callback address */
61 	void *cb_arg;				/**< Parameter for callback */
62 	enum rte_cryptodev_event_type event;	/**< Interrupt event type */
63 	uint32_t active;			/**< Callback is executing */
64 };
65 
66 /**
67  * @deprecated
68  * The crypto cipher algorithm strings identifiers.
69  * It could be used in application command line.
70  */
71 __rte_deprecated
72 const char *
73 rte_crypto_cipher_algorithm_strings[] = {
74 	[RTE_CRYPTO_CIPHER_3DES_CBC]	= "3des-cbc",
75 	[RTE_CRYPTO_CIPHER_3DES_ECB]	= "3des-ecb",
76 	[RTE_CRYPTO_CIPHER_3DES_CTR]	= "3des-ctr",
77 
78 	[RTE_CRYPTO_CIPHER_AES_CBC]	= "aes-cbc",
79 	[RTE_CRYPTO_CIPHER_AES_CTR]	= "aes-ctr",
80 	[RTE_CRYPTO_CIPHER_AES_DOCSISBPI]	= "aes-docsisbpi",
81 	[RTE_CRYPTO_CIPHER_AES_ECB]	= "aes-ecb",
82 	[RTE_CRYPTO_CIPHER_AES_F8]	= "aes-f8",
83 	[RTE_CRYPTO_CIPHER_AES_XTS]	= "aes-xts",
84 
85 	[RTE_CRYPTO_CIPHER_ARC4]	= "arc4",
86 
87 	[RTE_CRYPTO_CIPHER_DES_CBC]     = "des-cbc",
88 	[RTE_CRYPTO_CIPHER_DES_DOCSISBPI]	= "des-docsisbpi",
89 
90 	[RTE_CRYPTO_CIPHER_NULL]	= "null",
91 
92 	[RTE_CRYPTO_CIPHER_KASUMI_F8]	= "kasumi-f8",
93 	[RTE_CRYPTO_CIPHER_SNOW3G_UEA2]	= "snow3g-uea2",
94 	[RTE_CRYPTO_CIPHER_ZUC_EEA3]	= "zuc-eea3",
95 	[RTE_CRYPTO_CIPHER_SM4_ECB]	= "sm4-ecb",
96 	[RTE_CRYPTO_CIPHER_SM4_CBC]	= "sm4-cbc",
97 	[RTE_CRYPTO_CIPHER_SM4_CTR]	= "sm4-ctr"
98 };
99 
100 /**
101  * The crypto cipher algorithm strings identifiers.
102  * Not to be used in application directly.
103  * Application can use rte_cryptodev_get_cipher_algo_string().
104  */
105 static const char *
106 crypto_cipher_algorithm_strings[] = {
107 	[RTE_CRYPTO_CIPHER_3DES_CBC]	= "3des-cbc",
108 	[RTE_CRYPTO_CIPHER_3DES_ECB]	= "3des-ecb",
109 	[RTE_CRYPTO_CIPHER_3DES_CTR]	= "3des-ctr",
110 
111 	[RTE_CRYPTO_CIPHER_AES_CBC]	= "aes-cbc",
112 	[RTE_CRYPTO_CIPHER_AES_CTR]	= "aes-ctr",
113 	[RTE_CRYPTO_CIPHER_AES_DOCSISBPI]	= "aes-docsisbpi",
114 	[RTE_CRYPTO_CIPHER_AES_ECB]	= "aes-ecb",
115 	[RTE_CRYPTO_CIPHER_AES_F8]	= "aes-f8",
116 	[RTE_CRYPTO_CIPHER_AES_XTS]	= "aes-xts",
117 
118 	[RTE_CRYPTO_CIPHER_ARC4]	= "arc4",
119 
120 	[RTE_CRYPTO_CIPHER_DES_CBC]     = "des-cbc",
121 	[RTE_CRYPTO_CIPHER_DES_DOCSISBPI]	= "des-docsisbpi",
122 
123 	[RTE_CRYPTO_CIPHER_NULL]	= "null",
124 
125 	[RTE_CRYPTO_CIPHER_KASUMI_F8]	= "kasumi-f8",
126 	[RTE_CRYPTO_CIPHER_SNOW3G_UEA2]	= "snow3g-uea2",
127 	[RTE_CRYPTO_CIPHER_ZUC_EEA3]	= "zuc-eea3",
128 	[RTE_CRYPTO_CIPHER_SM4_ECB]	= "sm4-ecb",
129 	[RTE_CRYPTO_CIPHER_SM4_CBC]	= "sm4-cbc",
130 	[RTE_CRYPTO_CIPHER_SM4_CTR]	= "sm4-ctr"
131 };
132 
133 /**
134  * The crypto cipher operation strings identifiers.
135  * It could be used in application command line.
136  */
137 const char *
138 rte_crypto_cipher_operation_strings[] = {
139 		[RTE_CRYPTO_CIPHER_OP_ENCRYPT]	= "encrypt",
140 		[RTE_CRYPTO_CIPHER_OP_DECRYPT]	= "decrypt"
141 };
142 
143 /**
144  * @deprecated
145  * The crypto auth algorithm strings identifiers.
146  * It could be used in application command line.
147  */
148 __rte_deprecated
149 const char *
150 rte_crypto_auth_algorithm_strings[] = {
151 	[RTE_CRYPTO_AUTH_AES_CBC_MAC]	= "aes-cbc-mac",
152 	[RTE_CRYPTO_AUTH_AES_CMAC]	= "aes-cmac",
153 	[RTE_CRYPTO_AUTH_AES_GMAC]	= "aes-gmac",
154 	[RTE_CRYPTO_AUTH_AES_XCBC_MAC]	= "aes-xcbc-mac",
155 
156 	[RTE_CRYPTO_AUTH_MD5]		= "md5",
157 	[RTE_CRYPTO_AUTH_MD5_HMAC]	= "md5-hmac",
158 
159 	[RTE_CRYPTO_AUTH_NULL]		= "null",
160 
161 	[RTE_CRYPTO_AUTH_SHA1]		= "sha1",
162 	[RTE_CRYPTO_AUTH_SHA1_HMAC]	= "sha1-hmac",
163 
164 	[RTE_CRYPTO_AUTH_SHA224]	= "sha2-224",
165 	[RTE_CRYPTO_AUTH_SHA224_HMAC]	= "sha2-224-hmac",
166 	[RTE_CRYPTO_AUTH_SHA256]	= "sha2-256",
167 	[RTE_CRYPTO_AUTH_SHA256_HMAC]	= "sha2-256-hmac",
168 	[RTE_CRYPTO_AUTH_SHA384]	= "sha2-384",
169 	[RTE_CRYPTO_AUTH_SHA384_HMAC]	= "sha2-384-hmac",
170 	[RTE_CRYPTO_AUTH_SHA512]	= "sha2-512",
171 	[RTE_CRYPTO_AUTH_SHA512_HMAC]	= "sha2-512-hmac",
172 
173 	[RTE_CRYPTO_AUTH_SHA3_224]	= "sha3-224",
174 	[RTE_CRYPTO_AUTH_SHA3_224_HMAC] = "sha3-224-hmac",
175 	[RTE_CRYPTO_AUTH_SHA3_256]	= "sha3-256",
176 	[RTE_CRYPTO_AUTH_SHA3_256_HMAC] = "sha3-256-hmac",
177 	[RTE_CRYPTO_AUTH_SHA3_384]	= "sha3-384",
178 	[RTE_CRYPTO_AUTH_SHA3_384_HMAC] = "sha3-384-hmac",
179 	[RTE_CRYPTO_AUTH_SHA3_512]	= "sha3-512",
180 	[RTE_CRYPTO_AUTH_SHA3_512_HMAC]	= "sha3-512-hmac",
181 
182 	[RTE_CRYPTO_AUTH_KASUMI_F9]	= "kasumi-f9",
183 	[RTE_CRYPTO_AUTH_SNOW3G_UIA2]	= "snow3g-uia2",
184 	[RTE_CRYPTO_AUTH_ZUC_EIA3]	= "zuc-eia3",
185 	[RTE_CRYPTO_AUTH_SM3]		= "sm3"
186 };
187 
188 /**
189  * The crypto auth algorithm strings identifiers.
190  * Not to be used in application directly.
191  * Application can use rte_cryptodev_get_auth_algo_string().
192  */
193 static const char *
194 crypto_auth_algorithm_strings[] = {
195 	[RTE_CRYPTO_AUTH_AES_CBC_MAC]	= "aes-cbc-mac",
196 	[RTE_CRYPTO_AUTH_AES_CMAC]	= "aes-cmac",
197 	[RTE_CRYPTO_AUTH_AES_GMAC]	= "aes-gmac",
198 	[RTE_CRYPTO_AUTH_AES_XCBC_MAC]	= "aes-xcbc-mac",
199 
200 	[RTE_CRYPTO_AUTH_MD5]		= "md5",
201 	[RTE_CRYPTO_AUTH_MD5_HMAC]	= "md5-hmac",
202 
203 	[RTE_CRYPTO_AUTH_NULL]		= "null",
204 
205 	[RTE_CRYPTO_AUTH_SHA1]		= "sha1",
206 	[RTE_CRYPTO_AUTH_SHA1_HMAC]	= "sha1-hmac",
207 
208 	[RTE_CRYPTO_AUTH_SHA224]	= "sha2-224",
209 	[RTE_CRYPTO_AUTH_SHA224_HMAC]	= "sha2-224-hmac",
210 	[RTE_CRYPTO_AUTH_SHA256]	= "sha2-256",
211 	[RTE_CRYPTO_AUTH_SHA256_HMAC]	= "sha2-256-hmac",
212 	[RTE_CRYPTO_AUTH_SHA384]	= "sha2-384",
213 	[RTE_CRYPTO_AUTH_SHA384_HMAC]	= "sha2-384-hmac",
214 	[RTE_CRYPTO_AUTH_SHA512]	= "sha2-512",
215 	[RTE_CRYPTO_AUTH_SHA512_HMAC]	= "sha2-512-hmac",
216 
217 	[RTE_CRYPTO_AUTH_SHA3_224]	= "sha3-224",
218 	[RTE_CRYPTO_AUTH_SHA3_224_HMAC] = "sha3-224-hmac",
219 	[RTE_CRYPTO_AUTH_SHA3_256]	= "sha3-256",
220 	[RTE_CRYPTO_AUTH_SHA3_256_HMAC] = "sha3-256-hmac",
221 	[RTE_CRYPTO_AUTH_SHA3_384]	= "sha3-384",
222 	[RTE_CRYPTO_AUTH_SHA3_384_HMAC] = "sha3-384-hmac",
223 	[RTE_CRYPTO_AUTH_SHA3_512]	= "sha3-512",
224 	[RTE_CRYPTO_AUTH_SHA3_512_HMAC]	= "sha3-512-hmac",
225 
226 	[RTE_CRYPTO_AUTH_KASUMI_F9]	= "kasumi-f9",
227 	[RTE_CRYPTO_AUTH_SNOW3G_UIA2]	= "snow3g-uia2",
228 	[RTE_CRYPTO_AUTH_ZUC_EIA3]	= "zuc-eia3",
229 	[RTE_CRYPTO_AUTH_SM3]		= "sm3",
230 
231 	[RTE_CRYPTO_AUTH_SHAKE_128]	 = "shake-128",
232 	[RTE_CRYPTO_AUTH_SHAKE_256]	 = "shake-256",
233 };
234 
235 /**
236  * @deprecated
237  * The crypto AEAD algorithm strings identifiers.
238  * It could be used in application command line.
239  */
240 __rte_deprecated
241 const char *
242 rte_crypto_aead_algorithm_strings[] = {
243 	[RTE_CRYPTO_AEAD_AES_CCM]	= "aes-ccm",
244 	[RTE_CRYPTO_AEAD_AES_GCM]	= "aes-gcm",
245 	[RTE_CRYPTO_AEAD_CHACHA20_POLY1305] = "chacha20-poly1305"
246 };
247 
248 /**
249  * The crypto AEAD algorithm strings identifiers.
250  * Not to be used in application directly.
251  * Application can use rte_cryptodev_get_aead_algo_string().
252  */
253 static const char *
254 crypto_aead_algorithm_strings[] = {
255 	[RTE_CRYPTO_AEAD_AES_CCM]	= "aes-ccm",
256 	[RTE_CRYPTO_AEAD_AES_GCM]	= "aes-gcm",
257 	[RTE_CRYPTO_AEAD_CHACHA20_POLY1305] = "chacha20-poly1305"
258 };
259 
260 
261 /**
262  * The crypto AEAD operation strings identifiers.
263  * It could be used in application command line.
264  */
265 const char *
266 rte_crypto_aead_operation_strings[] = {
267 	[RTE_CRYPTO_AEAD_OP_ENCRYPT]	= "encrypt",
268 	[RTE_CRYPTO_AEAD_OP_DECRYPT]	= "decrypt"
269 };
270 
271 /**
272  * @deprecated
273  * Asymmetric crypto transform operation strings identifiers.
274  */
275 __rte_deprecated
276 const char *rte_crypto_asym_xform_strings[] = {
277 	[RTE_CRYPTO_ASYM_XFORM_NONE]	= "none",
278 	[RTE_CRYPTO_ASYM_XFORM_RSA]	= "rsa",
279 	[RTE_CRYPTO_ASYM_XFORM_MODEX]	= "modexp",
280 	[RTE_CRYPTO_ASYM_XFORM_MODINV]	= "modinv",
281 	[RTE_CRYPTO_ASYM_XFORM_DH]	= "dh",
282 	[RTE_CRYPTO_ASYM_XFORM_DSA]	= "dsa",
283 	[RTE_CRYPTO_ASYM_XFORM_ECDSA]	= "ecdsa",
284 	[RTE_CRYPTO_ASYM_XFORM_ECPM]	= "ecpm",
285 };
286 
287 /**
288  * Asymmetric crypto transform operation strings identifiers.
289  * Not to be used in application directly.
290  * Application can use rte_cryptodev_asym_get_xform_string().
291  */
292 static const char *
293 crypto_asym_xform_strings[] = {
294 	[RTE_CRYPTO_ASYM_XFORM_NONE]	= "none",
295 	[RTE_CRYPTO_ASYM_XFORM_RSA]	= "rsa",
296 	[RTE_CRYPTO_ASYM_XFORM_MODEX]	= "modexp",
297 	[RTE_CRYPTO_ASYM_XFORM_MODINV]	= "modinv",
298 	[RTE_CRYPTO_ASYM_XFORM_DH]	= "dh",
299 	[RTE_CRYPTO_ASYM_XFORM_DSA]	= "dsa",
300 	[RTE_CRYPTO_ASYM_XFORM_ECDSA]	= "ecdsa",
301 	[RTE_CRYPTO_ASYM_XFORM_ECPM]	= "ecpm",
302 };
303 
304 /**
305  * Asymmetric crypto operation strings identifiers.
306  */
307 const char *rte_crypto_asym_op_strings[] = {
308 	[RTE_CRYPTO_ASYM_OP_ENCRYPT]	= "encrypt",
309 	[RTE_CRYPTO_ASYM_OP_DECRYPT]	= "decrypt",
310 	[RTE_CRYPTO_ASYM_OP_SIGN]	= "sign",
311 	[RTE_CRYPTO_ASYM_OP_VERIFY]	= "verify"
312 };
313 
314 /**
315  * Asymmetric crypto key exchange operation strings identifiers.
316  */
317 const char *rte_crypto_asym_ke_strings[] = {
318 	[RTE_CRYPTO_ASYM_KE_PRIV_KEY_GENERATE] = "priv_key_generate",
319 	[RTE_CRYPTO_ASYM_KE_PUB_KEY_GENERATE] = "pub_key_generate",
320 	[RTE_CRYPTO_ASYM_KE_SHARED_SECRET_COMPUTE] = "sharedsecret_compute",
321 	[RTE_CRYPTO_ASYM_KE_PUB_KEY_VERIFY] = "pub_ec_key_verify"
322 };
323 
324 struct rte_cryptodev_sym_session_pool_private_data {
325 	uint16_t sess_data_sz;
326 	/**< driver session data size */
327 	uint16_t user_data_sz;
328 	/**< session user data will be placed after sess_data */
329 };
330 
331 /**
332  * The private data structure stored in the asym session mempool private data.
333  */
334 struct rte_cryptodev_asym_session_pool_private_data {
335 	uint16_t max_priv_session_sz;
336 	/**< Size of private session data used when creating mempool */
337 	uint16_t user_data_sz;
338 	/**< Session user data will be placed after sess_private_data */
339 };
340 
341 int
342 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum,
343 		const char *algo_string)
344 {
345 	unsigned int i;
346 	int ret = -1;	/* Invalid string */
347 
348 	for (i = 1; i < RTE_DIM(crypto_cipher_algorithm_strings); i++) {
349 		if (strcmp(algo_string, crypto_cipher_algorithm_strings[i]) == 0) {
350 			*algo_enum = (enum rte_crypto_cipher_algorithm) i;
351 			ret = 0;
352 			break;
353 		}
354 	}
355 
356 	rte_cryptodev_trace_get_cipher_algo_enum(algo_string, *algo_enum, ret);
357 
358 	return ret;
359 }
360 
361 int
362 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum,
363 		const char *algo_string)
364 {
365 	unsigned int i;
366 	int ret = -1;	/* Invalid string */
367 
368 	for (i = 1; i < RTE_DIM(crypto_auth_algorithm_strings); i++) {
369 		if (strcmp(algo_string, crypto_auth_algorithm_strings[i]) == 0) {
370 			*algo_enum = (enum rte_crypto_auth_algorithm) i;
371 			ret = 0;
372 			break;
373 		}
374 	}
375 
376 	rte_cryptodev_trace_get_auth_algo_enum(algo_string, *algo_enum, ret);
377 
378 	return ret;
379 }
380 
381 int
382 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum,
383 		const char *algo_string)
384 {
385 	unsigned int i;
386 	int ret = -1;	/* Invalid string */
387 
388 	for (i = 1; i < RTE_DIM(crypto_aead_algorithm_strings); i++) {
389 		if (strcmp(algo_string, crypto_aead_algorithm_strings[i]) == 0) {
390 			*algo_enum = (enum rte_crypto_aead_algorithm) i;
391 			ret = 0;
392 			break;
393 		}
394 	}
395 
396 	rte_cryptodev_trace_get_aead_algo_enum(algo_string, *algo_enum, ret);
397 
398 	return ret;
399 }
400 
401 int
402 rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum,
403 		const char *xform_string)
404 {
405 	unsigned int i;
406 	int ret = -1;	/* Invalid string */
407 
408 	for (i = 1; i < RTE_DIM(crypto_asym_xform_strings); i++) {
409 		if (strcmp(xform_string,
410 			crypto_asym_xform_strings[i]) == 0) {
411 			*xform_enum = (enum rte_crypto_asym_xform_type) i;
412 			ret = 0;
413 			break;
414 		}
415 	}
416 
417 	rte_cryptodev_trace_asym_get_xform_enum(xform_string, *xform_enum, ret);
418 
419 	return ret;
420 }
421 
422 const char *
423 rte_cryptodev_get_cipher_algo_string(enum rte_crypto_cipher_algorithm algo_enum)
424 {
425 	const char *alg_str = NULL;
426 
427 	if ((unsigned int)algo_enum < RTE_DIM(crypto_cipher_algorithm_strings))
428 		alg_str = crypto_cipher_algorithm_strings[algo_enum];
429 
430 	rte_cryptodev_trace_get_cipher_algo_string(algo_enum, alg_str);
431 
432 	return alg_str;
433 }
434 
435 const char *
436 rte_cryptodev_get_auth_algo_string(enum rte_crypto_auth_algorithm algo_enum)
437 {
438 	const char *alg_str = NULL;
439 
440 	if ((unsigned int)algo_enum < RTE_DIM(crypto_auth_algorithm_strings))
441 		alg_str = crypto_auth_algorithm_strings[algo_enum];
442 
443 	rte_cryptodev_trace_get_auth_algo_string(algo_enum, alg_str);
444 
445 	return alg_str;
446 }
447 
448 const char *
449 rte_cryptodev_get_aead_algo_string(enum rte_crypto_aead_algorithm algo_enum)
450 {
451 	const char *alg_str = NULL;
452 
453 	if ((unsigned int)algo_enum < RTE_DIM(crypto_aead_algorithm_strings))
454 		alg_str = crypto_aead_algorithm_strings[algo_enum];
455 
456 	rte_cryptodev_trace_get_aead_algo_string(algo_enum, alg_str);
457 
458 	return alg_str;
459 }
460 
461 const char *
462 rte_cryptodev_asym_get_xform_string(enum rte_crypto_asym_xform_type xform_enum)
463 {
464 	const char *xform_str = NULL;
465 
466 	if ((unsigned int)xform_enum < RTE_DIM(crypto_asym_xform_strings))
467 		xform_str = crypto_asym_xform_strings[xform_enum];
468 
469 	rte_cryptodev_trace_asym_get_xform_string(xform_enum, xform_str);
470 
471 	return xform_str;
472 }
473 
474 /**
475  * The crypto auth operation strings identifiers.
476  * It could be used in application command line.
477  */
478 const char *
479 rte_crypto_auth_operation_strings[] = {
480 		[RTE_CRYPTO_AUTH_OP_VERIFY]	= "verify",
481 		[RTE_CRYPTO_AUTH_OP_GENERATE]	= "generate"
482 };
483 
484 const struct rte_cryptodev_symmetric_capability *
485 rte_cryptodev_sym_capability_get(uint8_t dev_id,
486 		const struct rte_cryptodev_sym_capability_idx *idx)
487 {
488 	const struct rte_cryptodev_capabilities *capability;
489 	const struct rte_cryptodev_symmetric_capability *sym_capability = NULL;
490 	struct rte_cryptodev_info dev_info;
491 	int i = 0;
492 
493 	rte_cryptodev_info_get(dev_id, &dev_info);
494 
495 	while ((capability = &dev_info.capabilities[i++])->op !=
496 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
497 		if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
498 			continue;
499 
500 		if (capability->sym.xform_type != idx->type)
501 			continue;
502 
503 		if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
504 			capability->sym.auth.algo == idx->algo.auth) {
505 			sym_capability = &capability->sym;
506 			break;
507 		}
508 
509 		if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
510 			capability->sym.cipher.algo == idx->algo.cipher) {
511 			sym_capability = &capability->sym;
512 			break;
513 		}
514 
515 		if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
516 				capability->sym.aead.algo == idx->algo.aead) {
517 			sym_capability = &capability->sym;
518 			break;
519 		}
520 	}
521 
522 	rte_cryptodev_trace_sym_capability_get(dev_id, dev_info.driver_name,
523 		dev_info.driver_id, idx->type, sym_capability);
524 
525 	return sym_capability;
526 }
527 
528 static int
529 param_range_check(uint16_t size, const struct rte_crypto_param_range *range)
530 {
531 	unsigned int next_size;
532 
533 	/* Check lower/upper bounds */
534 	if (size < range->min)
535 		return -1;
536 
537 	if (size > range->max)
538 		return -1;
539 
540 	/* If range is actually only one value, size is correct */
541 	if (range->increment == 0)
542 		return 0;
543 
544 	/* Check if value is one of the supported sizes */
545 	for (next_size = range->min; next_size <= range->max;
546 			next_size += range->increment)
547 		if (size == next_size)
548 			return 0;
549 
550 	return -1;
551 }
552 
553 const struct rte_cryptodev_asymmetric_xform_capability *
554 rte_cryptodev_asym_capability_get(uint8_t dev_id,
555 		const struct rte_cryptodev_asym_capability_idx *idx)
556 {
557 	const struct rte_cryptodev_capabilities *capability;
558 	const struct rte_cryptodev_asymmetric_xform_capability *asym_cap = NULL;
559 	struct rte_cryptodev_info dev_info;
560 	unsigned int i = 0;
561 
562 	memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
563 	rte_cryptodev_info_get(dev_id, &dev_info);
564 
565 	while ((capability = &dev_info.capabilities[i++])->op !=
566 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
567 		if (capability->op != RTE_CRYPTO_OP_TYPE_ASYMMETRIC)
568 			continue;
569 
570 		if (capability->asym.xform_capa.xform_type == idx->type) {
571 			asym_cap = &capability->asym.xform_capa;
572 			break;
573 		}
574 	}
575 
576 	rte_cryptodev_trace_asym_capability_get(dev_info.driver_name,
577 		dev_info.driver_id, idx->type, asym_cap);
578 
579 	return asym_cap;
580 };
581 
582 int
583 rte_cryptodev_sym_capability_check_cipher(
584 		const struct rte_cryptodev_symmetric_capability *capability,
585 		uint16_t key_size, uint16_t iv_size)
586 {
587 	int ret = 0; /* success */
588 
589 	if (param_range_check(key_size, &capability->cipher.key_size) != 0) {
590 		ret = -1;
591 		goto done;
592 	}
593 
594 	if (param_range_check(iv_size, &capability->cipher.iv_size) != 0)
595 		ret = -1;
596 
597 done:
598 	rte_cryptodev_trace_sym_capability_check_cipher(capability, key_size,
599 		iv_size, ret);
600 
601 	return ret;
602 }
603 
604 int
605 rte_cryptodev_sym_capability_check_auth(
606 		const struct rte_cryptodev_symmetric_capability *capability,
607 		uint16_t key_size, uint16_t digest_size, uint16_t iv_size)
608 {
609 	int ret = 0; /* success */
610 
611 	if (param_range_check(key_size, &capability->auth.key_size) != 0) {
612 		ret = -1;
613 		goto done;
614 	}
615 
616 	if (param_range_check(digest_size,
617 		&capability->auth.digest_size) != 0) {
618 		ret = -1;
619 		goto done;
620 	}
621 
622 	if (param_range_check(iv_size, &capability->auth.iv_size) != 0)
623 		ret = -1;
624 
625 done:
626 	rte_cryptodev_trace_sym_capability_check_auth(capability, key_size,
627 		digest_size, iv_size, ret);
628 
629 	return ret;
630 }
631 
632 int
633 rte_cryptodev_sym_capability_check_aead(
634 		const struct rte_cryptodev_symmetric_capability *capability,
635 		uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
636 		uint16_t iv_size)
637 {
638 	int ret = 0; /* success */
639 
640 	if (param_range_check(key_size, &capability->aead.key_size) != 0) {
641 		ret = -1;
642 		goto done;
643 	}
644 
645 	if (param_range_check(digest_size,
646 		&capability->aead.digest_size) != 0) {
647 		ret = -1;
648 		goto done;
649 	}
650 
651 	if (param_range_check(aad_size, &capability->aead.aad_size) != 0) {
652 		ret = -1;
653 		goto done;
654 	}
655 
656 	if (param_range_check(iv_size, &capability->aead.iv_size) != 0)
657 		ret = -1;
658 
659 done:
660 	rte_cryptodev_trace_sym_capability_check_aead(capability, key_size,
661 		digest_size, aad_size, iv_size, ret);
662 
663 	return ret;
664 }
665 
666 int
667 rte_cryptodev_asym_xform_capability_check_optype(
668 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
669 	enum rte_crypto_asym_op_type op_type)
670 {
671 	int ret = 0;
672 
673 	if (capability->op_types & (1 << op_type))
674 		ret = 1;
675 
676 	rte_cryptodev_trace_asym_xform_capability_check_optype(
677 		capability->op_types, op_type, ret);
678 
679 	return ret;
680 }
681 
682 int
683 rte_cryptodev_asym_xform_capability_check_modlen(
684 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
685 	uint16_t modlen)
686 {
687 	int ret = 0; /* success */
688 
689 	/* no need to check for limits, if min or max = 0 */
690 	if (capability->modlen.min != 0) {
691 		if (modlen < capability->modlen.min) {
692 			ret = -1;
693 			goto done;
694 		}
695 	}
696 
697 	if (capability->modlen.max != 0) {
698 		if (modlen > capability->modlen.max) {
699 			ret = -1;
700 			goto done;
701 		}
702 	}
703 
704 	/* in any case, check if given modlen is module increment */
705 	if (capability->modlen.increment != 0) {
706 		if (modlen % (capability->modlen.increment))
707 			ret = -1;
708 	}
709 
710 done:
711 	rte_cryptodev_trace_asym_xform_capability_check_modlen(capability,
712 		modlen, ret);
713 
714 	return ret;
715 }
716 
717 /* spinlock for crypto device enq callbacks */
718 static rte_spinlock_t rte_cryptodev_callback_lock = RTE_SPINLOCK_INITIALIZER;
719 
720 static void
721 cryptodev_cb_cleanup(struct rte_cryptodev *dev)
722 {
723 	struct rte_cryptodev_cb_rcu *list;
724 	struct rte_cryptodev_cb *cb, *next;
725 	uint16_t qp_id;
726 
727 	if (dev->enq_cbs == NULL && dev->deq_cbs == NULL)
728 		return;
729 
730 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
731 		list = &dev->enq_cbs[qp_id];
732 		cb = list->next;
733 		while (cb != NULL) {
734 			next = cb->next;
735 			rte_free(cb);
736 			cb = next;
737 		}
738 
739 		rte_free(list->qsbr);
740 	}
741 
742 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
743 		list = &dev->deq_cbs[qp_id];
744 		cb = list->next;
745 		while (cb != NULL) {
746 			next = cb->next;
747 			rte_free(cb);
748 			cb = next;
749 		}
750 
751 		rte_free(list->qsbr);
752 	}
753 
754 	rte_free(dev->enq_cbs);
755 	dev->enq_cbs = NULL;
756 	rte_free(dev->deq_cbs);
757 	dev->deq_cbs = NULL;
758 }
759 
760 static int
761 cryptodev_cb_init(struct rte_cryptodev *dev)
762 {
763 	struct rte_cryptodev_cb_rcu *list;
764 	struct rte_rcu_qsbr *qsbr;
765 	uint16_t qp_id;
766 	size_t size;
767 
768 	/* Max thread set to 1, as one DP thread accessing a queue-pair */
769 	const uint32_t max_threads = 1;
770 
771 	dev->enq_cbs = rte_zmalloc(NULL,
772 				   sizeof(struct rte_cryptodev_cb_rcu) *
773 				   dev->data->nb_queue_pairs, 0);
774 	if (dev->enq_cbs == NULL) {
775 		CDEV_LOG_ERR("Failed to allocate memory for enq callbacks");
776 		return -ENOMEM;
777 	}
778 
779 	dev->deq_cbs = rte_zmalloc(NULL,
780 				   sizeof(struct rte_cryptodev_cb_rcu) *
781 				   dev->data->nb_queue_pairs, 0);
782 	if (dev->deq_cbs == NULL) {
783 		CDEV_LOG_ERR("Failed to allocate memory for deq callbacks");
784 		rte_free(dev->enq_cbs);
785 		return -ENOMEM;
786 	}
787 
788 	/* Create RCU QSBR variable */
789 	size = rte_rcu_qsbr_get_memsize(max_threads);
790 
791 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
792 		list = &dev->enq_cbs[qp_id];
793 		qsbr = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
794 		if (qsbr == NULL) {
795 			CDEV_LOG_ERR("Failed to allocate memory for RCU on "
796 				"queue_pair_id=%d", qp_id);
797 			goto cb_init_err;
798 		}
799 
800 		if (rte_rcu_qsbr_init(qsbr, max_threads)) {
801 			CDEV_LOG_ERR("Failed to initialize for RCU on "
802 				"queue_pair_id=%d", qp_id);
803 			goto cb_init_err;
804 		}
805 
806 		list->qsbr = qsbr;
807 	}
808 
809 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
810 		list = &dev->deq_cbs[qp_id];
811 		qsbr = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
812 		if (qsbr == NULL) {
813 			CDEV_LOG_ERR("Failed to allocate memory for RCU on "
814 				"queue_pair_id=%d", qp_id);
815 			goto cb_init_err;
816 		}
817 
818 		if (rte_rcu_qsbr_init(qsbr, max_threads)) {
819 			CDEV_LOG_ERR("Failed to initialize for RCU on "
820 				"queue_pair_id=%d", qp_id);
821 			goto cb_init_err;
822 		}
823 
824 		list->qsbr = qsbr;
825 	}
826 
827 	return 0;
828 
829 cb_init_err:
830 	cryptodev_cb_cleanup(dev);
831 	return -ENOMEM;
832 }
833 
834 const char *
835 rte_cryptodev_get_feature_name(uint64_t flag)
836 {
837 	rte_cryptodev_trace_get_feature_name(flag);
838 
839 	switch (flag) {
840 	case RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO:
841 		return "SYMMETRIC_CRYPTO";
842 	case RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO:
843 		return "ASYMMETRIC_CRYPTO";
844 	case RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING:
845 		return "SYM_OPERATION_CHAINING";
846 	case RTE_CRYPTODEV_FF_CPU_SSE:
847 		return "CPU_SSE";
848 	case RTE_CRYPTODEV_FF_CPU_AVX:
849 		return "CPU_AVX";
850 	case RTE_CRYPTODEV_FF_CPU_AVX2:
851 		return "CPU_AVX2";
852 	case RTE_CRYPTODEV_FF_CPU_AVX512:
853 		return "CPU_AVX512";
854 	case RTE_CRYPTODEV_FF_CPU_AESNI:
855 		return "CPU_AESNI";
856 	case RTE_CRYPTODEV_FF_HW_ACCELERATED:
857 		return "HW_ACCELERATED";
858 	case RTE_CRYPTODEV_FF_IN_PLACE_SGL:
859 		return "IN_PLACE_SGL";
860 	case RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT:
861 		return "OOP_SGL_IN_SGL_OUT";
862 	case RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT:
863 		return "OOP_SGL_IN_LB_OUT";
864 	case RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT:
865 		return "OOP_LB_IN_SGL_OUT";
866 	case RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT:
867 		return "OOP_LB_IN_LB_OUT";
868 	case RTE_CRYPTODEV_FF_CPU_NEON:
869 		return "CPU_NEON";
870 	case RTE_CRYPTODEV_FF_CPU_ARM_CE:
871 		return "CPU_ARM_CE";
872 	case RTE_CRYPTODEV_FF_SECURITY:
873 		return "SECURITY_PROTOCOL";
874 	case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP:
875 		return "RSA_PRIV_OP_KEY_EXP";
876 	case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT:
877 		return "RSA_PRIV_OP_KEY_QT";
878 	case RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED:
879 		return "DIGEST_ENCRYPTED";
880 	case RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO:
881 		return "SYM_CPU_CRYPTO";
882 	case RTE_CRYPTODEV_FF_ASYM_SESSIONLESS:
883 		return "ASYM_SESSIONLESS";
884 	case RTE_CRYPTODEV_FF_SYM_SESSIONLESS:
885 		return "SYM_SESSIONLESS";
886 	case RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA:
887 		return "NON_BYTE_ALIGNED_DATA";
888 	case RTE_CRYPTODEV_FF_CIPHER_MULTIPLE_DATA_UNITS:
889 		return "CIPHER_MULTIPLE_DATA_UNITS";
890 	case RTE_CRYPTODEV_FF_CIPHER_WRAPPED_KEY:
891 		return "CIPHER_WRAPPED_KEY";
892 	default:
893 		return NULL;
894 	}
895 }
896 
897 struct rte_cryptodev *
898 rte_cryptodev_pmd_get_dev(uint8_t dev_id)
899 {
900 	return &cryptodev_globals.devs[dev_id];
901 }
902 
903 struct rte_cryptodev *
904 rte_cryptodev_pmd_get_named_dev(const char *name)
905 {
906 	struct rte_cryptodev *dev;
907 	unsigned int i;
908 
909 	if (name == NULL)
910 		return NULL;
911 
912 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
913 		dev = &cryptodev_globals.devs[i];
914 
915 		if ((dev->attached == RTE_CRYPTODEV_ATTACHED) &&
916 				(strcmp(dev->data->name, name) == 0))
917 			return dev;
918 	}
919 
920 	return NULL;
921 }
922 
923 static inline uint8_t
924 rte_cryptodev_is_valid_device_data(uint8_t dev_id)
925 {
926 	if (dev_id >= RTE_CRYPTO_MAX_DEVS ||
927 			rte_crypto_devices[dev_id].data == NULL)
928 		return 0;
929 
930 	return 1;
931 }
932 
933 unsigned int
934 rte_cryptodev_is_valid_dev(uint8_t dev_id)
935 {
936 	struct rte_cryptodev *dev = NULL;
937 	unsigned int ret = 1;
938 
939 	if (!rte_cryptodev_is_valid_device_data(dev_id)) {
940 		ret = 0;
941 		goto done;
942 	}
943 
944 	dev = rte_cryptodev_pmd_get_dev(dev_id);
945 	if (dev->attached != RTE_CRYPTODEV_ATTACHED)
946 		ret = 0;
947 
948 done:
949 	rte_cryptodev_trace_is_valid_dev(dev_id, ret);
950 
951 	return ret;
952 }
953 
954 int
955 rte_cryptodev_get_dev_id(const char *name)
956 {
957 	unsigned i;
958 	int ret = -1;
959 
960 	if (name == NULL)
961 		return -1;
962 
963 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
964 		if (!rte_cryptodev_is_valid_device_data(i))
965 			continue;
966 		if ((strcmp(cryptodev_globals.devs[i].data->name, name)
967 				== 0) &&
968 				(cryptodev_globals.devs[i].attached ==
969 						RTE_CRYPTODEV_ATTACHED)) {
970 			ret = (int)i;
971 			break;
972 		}
973 	}
974 
975 	rte_cryptodev_trace_get_dev_id(name, ret);
976 
977 	return ret;
978 }
979 
980 uint8_t
981 rte_cryptodev_count(void)
982 {
983 	rte_cryptodev_trace_count(cryptodev_globals.nb_devs);
984 
985 	return cryptodev_globals.nb_devs;
986 }
987 
988 uint8_t
989 rte_cryptodev_device_count_by_driver(uint8_t driver_id)
990 {
991 	uint8_t i, dev_count = 0;
992 
993 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++)
994 		if (cryptodev_globals.devs[i].driver_id == driver_id &&
995 			cryptodev_globals.devs[i].attached ==
996 					RTE_CRYPTODEV_ATTACHED)
997 			dev_count++;
998 
999 	rte_cryptodev_trace_device_count_by_driver(driver_id, dev_count);
1000 
1001 	return dev_count;
1002 }
1003 
1004 uint8_t
1005 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
1006 	uint8_t nb_devices)
1007 {
1008 	uint8_t i, count = 0;
1009 	struct rte_cryptodev *devs = cryptodev_globals.devs;
1010 
1011 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS && count < nb_devices; i++) {
1012 		if (!rte_cryptodev_is_valid_device_data(i))
1013 			continue;
1014 
1015 		if (devs[i].attached == RTE_CRYPTODEV_ATTACHED) {
1016 			int cmp;
1017 
1018 			cmp = strncmp(devs[i].device->driver->name,
1019 					driver_name,
1020 					strlen(driver_name) + 1);
1021 
1022 			if (cmp == 0)
1023 				devices[count++] = devs[i].data->dev_id;
1024 		}
1025 	}
1026 
1027 	rte_cryptodev_trace_devices_get(driver_name, count);
1028 
1029 	return count;
1030 }
1031 
1032 void *
1033 rte_cryptodev_get_sec_ctx(uint8_t dev_id)
1034 {
1035 	void *sec_ctx = NULL;
1036 
1037 	if (dev_id < RTE_CRYPTO_MAX_DEVS &&
1038 			(rte_crypto_devices[dev_id].feature_flags &
1039 			RTE_CRYPTODEV_FF_SECURITY))
1040 		sec_ctx = rte_crypto_devices[dev_id].security_ctx;
1041 
1042 	rte_cryptodev_trace_get_sec_ctx(dev_id, sec_ctx);
1043 
1044 	return sec_ctx;
1045 }
1046 
1047 int
1048 rte_cryptodev_socket_id(uint8_t dev_id)
1049 {
1050 	struct rte_cryptodev *dev;
1051 
1052 	if (!rte_cryptodev_is_valid_dev(dev_id))
1053 		return -1;
1054 
1055 	dev = rte_cryptodev_pmd_get_dev(dev_id);
1056 
1057 	rte_cryptodev_trace_socket_id(dev_id, dev->data->name,
1058 		dev->data->socket_id);
1059 	return dev->data->socket_id;
1060 }
1061 
1062 static inline int
1063 rte_cryptodev_data_alloc(uint8_t dev_id, struct rte_cryptodev_data **data,
1064 		int socket_id)
1065 {
1066 	char mz_name[RTE_MEMZONE_NAMESIZE];
1067 	const struct rte_memzone *mz;
1068 	int n;
1069 
1070 	/* generate memzone name */
1071 	n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
1072 	if (n >= (int)sizeof(mz_name))
1073 		return -EINVAL;
1074 
1075 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1076 		mz = rte_memzone_reserve(mz_name,
1077 				sizeof(struct rte_cryptodev_data),
1078 				socket_id, 0);
1079 		CDEV_LOG_DEBUG("PRIMARY:reserved memzone for %s (%p)",
1080 				mz_name, mz);
1081 	} else {
1082 		mz = rte_memzone_lookup(mz_name);
1083 		CDEV_LOG_DEBUG("SECONDARY:looked up memzone for %s (%p)",
1084 				mz_name, mz);
1085 	}
1086 
1087 	if (mz == NULL)
1088 		return -ENOMEM;
1089 
1090 	*data = mz->addr;
1091 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1092 		memset(*data, 0, sizeof(struct rte_cryptodev_data));
1093 
1094 	return 0;
1095 }
1096 
1097 static inline int
1098 rte_cryptodev_data_free(uint8_t dev_id, struct rte_cryptodev_data **data)
1099 {
1100 	char mz_name[RTE_MEMZONE_NAMESIZE];
1101 	const struct rte_memzone *mz;
1102 	int n;
1103 
1104 	/* generate memzone name */
1105 	n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
1106 	if (n >= (int)sizeof(mz_name))
1107 		return -EINVAL;
1108 
1109 	mz = rte_memzone_lookup(mz_name);
1110 	if (mz == NULL)
1111 		return -ENOMEM;
1112 
1113 	RTE_ASSERT(*data == mz->addr);
1114 	*data = NULL;
1115 
1116 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1117 		CDEV_LOG_DEBUG("PRIMARY:free memzone of %s (%p)",
1118 				mz_name, mz);
1119 		return rte_memzone_free(mz);
1120 	} else {
1121 		CDEV_LOG_DEBUG("SECONDARY:don't free memzone of %s (%p)",
1122 				mz_name, mz);
1123 	}
1124 
1125 	return 0;
1126 }
1127 
1128 static uint8_t
1129 rte_cryptodev_find_free_device_index(void)
1130 {
1131 	uint8_t dev_id;
1132 
1133 	for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++) {
1134 		if (rte_crypto_devices[dev_id].attached ==
1135 				RTE_CRYPTODEV_DETACHED)
1136 			return dev_id;
1137 	}
1138 	return RTE_CRYPTO_MAX_DEVS;
1139 }
1140 
1141 struct rte_cryptodev *
1142 rte_cryptodev_pmd_allocate(const char *name, int socket_id)
1143 {
1144 	struct rte_cryptodev *cryptodev;
1145 	uint8_t dev_id;
1146 
1147 	if (rte_cryptodev_pmd_get_named_dev(name) != NULL) {
1148 		CDEV_LOG_ERR("Crypto device with name %s already "
1149 				"allocated!", name);
1150 		return NULL;
1151 	}
1152 
1153 	dev_id = rte_cryptodev_find_free_device_index();
1154 	if (dev_id == RTE_CRYPTO_MAX_DEVS) {
1155 		CDEV_LOG_ERR("Reached maximum number of crypto devices");
1156 		return NULL;
1157 	}
1158 
1159 	cryptodev = rte_cryptodev_pmd_get_dev(dev_id);
1160 
1161 	if (cryptodev->data == NULL) {
1162 		struct rte_cryptodev_data **cryptodev_data =
1163 				&cryptodev_globals.data[dev_id];
1164 
1165 		int retval = rte_cryptodev_data_alloc(dev_id, cryptodev_data,
1166 				socket_id);
1167 
1168 		if (retval < 0 || *cryptodev_data == NULL)
1169 			return NULL;
1170 
1171 		cryptodev->data = *cryptodev_data;
1172 
1173 		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1174 			strlcpy(cryptodev->data->name, name,
1175 				RTE_CRYPTODEV_NAME_MAX_LEN);
1176 
1177 			cryptodev->data->dev_id = dev_id;
1178 			cryptodev->data->socket_id = socket_id;
1179 			cryptodev->data->dev_started = 0;
1180 			CDEV_LOG_DEBUG("PRIMARY:init data");
1181 		}
1182 
1183 		CDEV_LOG_DEBUG("Data for %s: dev_id %d, socket %d, started %d",
1184 				cryptodev->data->name,
1185 				cryptodev->data->dev_id,
1186 				cryptodev->data->socket_id,
1187 				cryptodev->data->dev_started);
1188 
1189 		/* init user callbacks */
1190 		TAILQ_INIT(&(cryptodev->link_intr_cbs));
1191 
1192 		cryptodev->attached = RTE_CRYPTODEV_ATTACHED;
1193 
1194 		cryptodev_globals.nb_devs++;
1195 	}
1196 
1197 	return cryptodev;
1198 }
1199 
1200 int
1201 rte_cryptodev_pmd_release_device(struct rte_cryptodev *cryptodev)
1202 {
1203 	int ret;
1204 	uint8_t dev_id;
1205 
1206 	if (cryptodev == NULL)
1207 		return -EINVAL;
1208 
1209 	dev_id = cryptodev->data->dev_id;
1210 
1211 	cryptodev_fp_ops_reset(rte_crypto_fp_ops + dev_id);
1212 
1213 	/* Close device only if device operations have been set */
1214 	if (cryptodev->dev_ops) {
1215 		ret = rte_cryptodev_close(dev_id);
1216 		if (ret < 0)
1217 			return ret;
1218 	}
1219 
1220 	ret = rte_cryptodev_data_free(dev_id, &cryptodev_globals.data[dev_id]);
1221 	if (ret < 0)
1222 		return ret;
1223 
1224 	cryptodev->attached = RTE_CRYPTODEV_DETACHED;
1225 	cryptodev_globals.nb_devs--;
1226 	return 0;
1227 }
1228 
1229 uint16_t
1230 rte_cryptodev_queue_pair_count(uint8_t dev_id)
1231 {
1232 	struct rte_cryptodev *dev;
1233 
1234 	if (!rte_cryptodev_is_valid_device_data(dev_id)) {
1235 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1236 		return 0;
1237 	}
1238 
1239 	dev = &rte_crypto_devices[dev_id];
1240 	rte_cryptodev_trace_queue_pair_count(dev, dev->data->name,
1241 		dev->data->socket_id, dev->data->dev_id,
1242 		dev->data->nb_queue_pairs);
1243 
1244 	return dev->data->nb_queue_pairs;
1245 }
1246 
1247 static int
1248 rte_cryptodev_queue_pairs_config(struct rte_cryptodev *dev, uint16_t nb_qpairs,
1249 		int socket_id)
1250 {
1251 	struct rte_cryptodev_info dev_info;
1252 	void **qp;
1253 	unsigned i;
1254 
1255 	if ((dev == NULL) || (nb_qpairs < 1)) {
1256 		CDEV_LOG_ERR("invalid param: dev %p, nb_queues %u",
1257 							dev, nb_qpairs);
1258 		return -EINVAL;
1259 	}
1260 
1261 	CDEV_LOG_DEBUG("Setup %d queues pairs on device %u",
1262 			nb_qpairs, dev->data->dev_id);
1263 
1264 	memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
1265 
1266 	if (*dev->dev_ops->dev_infos_get == NULL)
1267 		return -ENOTSUP;
1268 	(*dev->dev_ops->dev_infos_get)(dev, &dev_info);
1269 
1270 	if (nb_qpairs > (dev_info.max_nb_queue_pairs)) {
1271 		CDEV_LOG_ERR("Invalid num queue_pairs (%u) for dev %u",
1272 				nb_qpairs, dev->data->dev_id);
1273 	    return -EINVAL;
1274 	}
1275 
1276 	if (dev->data->queue_pairs == NULL) { /* first time configuration */
1277 		dev->data->queue_pairs = rte_zmalloc_socket(
1278 				"cryptodev->queue_pairs",
1279 				sizeof(dev->data->queue_pairs[0]) *
1280 				dev_info.max_nb_queue_pairs,
1281 				RTE_CACHE_LINE_SIZE, socket_id);
1282 
1283 		if (dev->data->queue_pairs == NULL) {
1284 			dev->data->nb_queue_pairs = 0;
1285 			CDEV_LOG_ERR("failed to get memory for qp meta data, "
1286 							"nb_queues %u",
1287 							nb_qpairs);
1288 			return -(ENOMEM);
1289 		}
1290 	} else { /* re-configure */
1291 		int ret;
1292 		uint16_t old_nb_queues = dev->data->nb_queue_pairs;
1293 
1294 		qp = dev->data->queue_pairs;
1295 
1296 		if (*dev->dev_ops->queue_pair_release == NULL)
1297 			return -ENOTSUP;
1298 
1299 		for (i = nb_qpairs; i < old_nb_queues; i++) {
1300 			ret = (*dev->dev_ops->queue_pair_release)(dev, i);
1301 			if (ret < 0)
1302 				return ret;
1303 			qp[i] = NULL;
1304 		}
1305 
1306 	}
1307 	dev->data->nb_queue_pairs = nb_qpairs;
1308 	return 0;
1309 }
1310 
1311 int
1312 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
1313 {
1314 	struct rte_cryptodev *dev;
1315 	int diag;
1316 
1317 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1318 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1319 		return -EINVAL;
1320 	}
1321 
1322 	dev = &rte_crypto_devices[dev_id];
1323 
1324 	if (dev->data->dev_started) {
1325 		CDEV_LOG_ERR(
1326 		    "device %d must be stopped to allow configuration", dev_id);
1327 		return -EBUSY;
1328 	}
1329 
1330 	if (*dev->dev_ops->dev_configure == NULL)
1331 		return -ENOTSUP;
1332 
1333 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1334 	cryptodev_cb_cleanup(dev);
1335 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1336 
1337 	/* Setup new number of queue pairs and reconfigure device. */
1338 	diag = rte_cryptodev_queue_pairs_config(dev, config->nb_queue_pairs,
1339 			config->socket_id);
1340 	if (diag != 0) {
1341 		CDEV_LOG_ERR("dev%d rte_crypto_dev_queue_pairs_config = %d",
1342 				dev_id, diag);
1343 		return diag;
1344 	}
1345 
1346 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1347 	diag = cryptodev_cb_init(dev);
1348 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1349 	if (diag) {
1350 		CDEV_LOG_ERR("Callback init failed for dev_id=%d", dev_id);
1351 		return diag;
1352 	}
1353 
1354 	rte_cryptodev_trace_configure(dev_id, config);
1355 	return (*dev->dev_ops->dev_configure)(dev, config);
1356 }
1357 
1358 int
1359 rte_cryptodev_start(uint8_t dev_id)
1360 {
1361 	struct rte_cryptodev *dev;
1362 	int diag;
1363 
1364 	CDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
1365 
1366 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1367 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1368 		return -EINVAL;
1369 	}
1370 
1371 	dev = &rte_crypto_devices[dev_id];
1372 
1373 	if (*dev->dev_ops->dev_start == NULL)
1374 		return -ENOTSUP;
1375 
1376 	if (dev->data->dev_started != 0) {
1377 		CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already started",
1378 			dev_id);
1379 		return 0;
1380 	}
1381 
1382 	diag = (*dev->dev_ops->dev_start)(dev);
1383 	/* expose selection of PMD fast-path functions */
1384 	cryptodev_fp_ops_set(rte_crypto_fp_ops + dev_id, dev);
1385 
1386 	rte_cryptodev_trace_start(dev_id, diag);
1387 	if (diag == 0)
1388 		dev->data->dev_started = 1;
1389 	else
1390 		return diag;
1391 
1392 	return 0;
1393 }
1394 
1395 void
1396 rte_cryptodev_stop(uint8_t dev_id)
1397 {
1398 	struct rte_cryptodev *dev;
1399 
1400 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1401 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1402 		return;
1403 	}
1404 
1405 	dev = &rte_crypto_devices[dev_id];
1406 
1407 	if (*dev->dev_ops->dev_stop == NULL)
1408 		return;
1409 
1410 	if (dev->data->dev_started == 0) {
1411 		CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already stopped",
1412 			dev_id);
1413 		return;
1414 	}
1415 
1416 	/* point fast-path functions to dummy ones */
1417 	cryptodev_fp_ops_reset(rte_crypto_fp_ops + dev_id);
1418 
1419 	(*dev->dev_ops->dev_stop)(dev);
1420 	rte_cryptodev_trace_stop(dev_id);
1421 	dev->data->dev_started = 0;
1422 }
1423 
1424 int
1425 rte_cryptodev_close(uint8_t dev_id)
1426 {
1427 	struct rte_cryptodev *dev;
1428 	int retval;
1429 
1430 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1431 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1432 		return -1;
1433 	}
1434 
1435 	dev = &rte_crypto_devices[dev_id];
1436 
1437 	/* Device must be stopped before it can be closed */
1438 	if (dev->data->dev_started == 1) {
1439 		CDEV_LOG_ERR("Device %u must be stopped before closing",
1440 				dev_id);
1441 		return -EBUSY;
1442 	}
1443 
1444 	/* We can't close the device if there are outstanding sessions in use */
1445 	if (dev->data->session_pool != NULL) {
1446 		if (!rte_mempool_full(dev->data->session_pool)) {
1447 			CDEV_LOG_ERR("dev_id=%u close failed, session mempool "
1448 					"has sessions still in use, free "
1449 					"all sessions before calling close",
1450 					(unsigned)dev_id);
1451 			return -EBUSY;
1452 		}
1453 	}
1454 
1455 	if (*dev->dev_ops->dev_close == NULL)
1456 		return -ENOTSUP;
1457 	retval = (*dev->dev_ops->dev_close)(dev);
1458 	rte_cryptodev_trace_close(dev_id, retval);
1459 
1460 	if (retval < 0)
1461 		return retval;
1462 
1463 	return 0;
1464 }
1465 
1466 int
1467 rte_cryptodev_get_qp_status(uint8_t dev_id, uint16_t queue_pair_id)
1468 {
1469 	struct rte_cryptodev *dev;
1470 	int ret = 0;
1471 
1472 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1473 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1474 		ret = -EINVAL;
1475 		goto done;
1476 	}
1477 
1478 	dev = &rte_crypto_devices[dev_id];
1479 	if (queue_pair_id >= dev->data->nb_queue_pairs) {
1480 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
1481 		ret = -EINVAL;
1482 		goto done;
1483 	}
1484 	void **qps = dev->data->queue_pairs;
1485 
1486 	if (qps[queue_pair_id])	{
1487 		CDEV_LOG_DEBUG("qp %d on dev %d is initialised",
1488 			queue_pair_id, dev_id);
1489 		ret = 1;
1490 		goto done;
1491 	}
1492 
1493 	CDEV_LOG_DEBUG("qp %d on dev %d is not initialised",
1494 		queue_pair_id, dev_id);
1495 
1496 done:
1497 	rte_cryptodev_trace_get_qp_status(dev_id, queue_pair_id, ret);
1498 
1499 	return ret;
1500 }
1501 
1502 static uint8_t
1503 rte_cryptodev_sym_is_valid_session_pool(struct rte_mempool *mp,
1504 	uint32_t sess_priv_size)
1505 {
1506 	struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1507 
1508 	if (!mp)
1509 		return 0;
1510 
1511 	pool_priv = rte_mempool_get_priv(mp);
1512 
1513 	if (!pool_priv || mp->private_data_size < sizeof(*pool_priv) ||
1514 			pool_priv->sess_data_sz < sess_priv_size)
1515 		return 0;
1516 
1517 	return 1;
1518 }
1519 
1520 int
1521 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
1522 		const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
1523 
1524 {
1525 	struct rte_cryptodev *dev;
1526 
1527 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1528 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1529 		return -EINVAL;
1530 	}
1531 
1532 	dev = &rte_crypto_devices[dev_id];
1533 	if (queue_pair_id >= dev->data->nb_queue_pairs) {
1534 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
1535 		return -EINVAL;
1536 	}
1537 
1538 	if (!qp_conf) {
1539 		CDEV_LOG_ERR("qp_conf cannot be NULL");
1540 		return -EINVAL;
1541 	}
1542 
1543 	if (qp_conf->mp_session) {
1544 		struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1545 
1546 		pool_priv = rte_mempool_get_priv(qp_conf->mp_session);
1547 		if (!pool_priv || qp_conf->mp_session->private_data_size <
1548 				sizeof(*pool_priv)) {
1549 			CDEV_LOG_ERR("Invalid mempool");
1550 			return -EINVAL;
1551 		}
1552 
1553 		if (!rte_cryptodev_sym_is_valid_session_pool(qp_conf->mp_session,
1554 					rte_cryptodev_sym_get_private_session_size(dev_id))) {
1555 			CDEV_LOG_ERR("Invalid mempool");
1556 			return -EINVAL;
1557 		}
1558 	}
1559 
1560 	if (dev->data->dev_started) {
1561 		CDEV_LOG_ERR(
1562 		    "device %d must be stopped to allow configuration", dev_id);
1563 		return -EBUSY;
1564 	}
1565 
1566 	if (*dev->dev_ops->queue_pair_setup == NULL)
1567 		return -ENOTSUP;
1568 
1569 	rte_cryptodev_trace_queue_pair_setup(dev_id, queue_pair_id, qp_conf);
1570 	return (*dev->dev_ops->queue_pair_setup)(dev, queue_pair_id, qp_conf,
1571 			socket_id);
1572 }
1573 
1574 struct rte_cryptodev_cb *
1575 rte_cryptodev_add_enq_callback(uint8_t dev_id,
1576 			       uint16_t qp_id,
1577 			       rte_cryptodev_callback_fn cb_fn,
1578 			       void *cb_arg)
1579 {
1580 	struct rte_cryptodev *dev;
1581 	struct rte_cryptodev_cb_rcu *list;
1582 	struct rte_cryptodev_cb *cb, *tail;
1583 
1584 	if (!cb_fn) {
1585 		CDEV_LOG_ERR("Callback is NULL on dev_id=%d", dev_id);
1586 		rte_errno = EINVAL;
1587 		return NULL;
1588 	}
1589 
1590 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1591 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1592 		rte_errno = ENODEV;
1593 		return NULL;
1594 	}
1595 
1596 	dev = &rte_crypto_devices[dev_id];
1597 	if (qp_id >= dev->data->nb_queue_pairs) {
1598 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1599 		rte_errno = ENODEV;
1600 		return NULL;
1601 	}
1602 
1603 	cb = rte_zmalloc(NULL, sizeof(*cb), 0);
1604 	if (cb == NULL) {
1605 		CDEV_LOG_ERR("Failed to allocate memory for callback on "
1606 			     "dev=%d, queue_pair_id=%d", dev_id, qp_id);
1607 		rte_errno = ENOMEM;
1608 		return NULL;
1609 	}
1610 
1611 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1612 
1613 	cb->fn = cb_fn;
1614 	cb->arg = cb_arg;
1615 
1616 	/* Add the callbacks in fifo order. */
1617 	list = &dev->enq_cbs[qp_id];
1618 	tail = list->next;
1619 
1620 	if (tail) {
1621 		while (tail->next)
1622 			tail = tail->next;
1623 		/* Stores to cb->fn and cb->param should complete before
1624 		 * cb is visible to data plane.
1625 		 */
1626 		__atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
1627 	} else {
1628 		/* Stores to cb->fn and cb->param should complete before
1629 		 * cb is visible to data plane.
1630 		 */
1631 		__atomic_store_n(&list->next, cb, __ATOMIC_RELEASE);
1632 	}
1633 
1634 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1635 
1636 	rte_cryptodev_trace_add_enq_callback(dev_id, qp_id, cb_fn);
1637 	return cb;
1638 }
1639 
1640 int
1641 rte_cryptodev_remove_enq_callback(uint8_t dev_id,
1642 				  uint16_t qp_id,
1643 				  struct rte_cryptodev_cb *cb)
1644 {
1645 	struct rte_cryptodev *dev;
1646 	struct rte_cryptodev_cb **prev_cb, *curr_cb;
1647 	struct rte_cryptodev_cb_rcu *list;
1648 	int ret;
1649 
1650 	ret = -EINVAL;
1651 
1652 	if (!cb) {
1653 		CDEV_LOG_ERR("Callback is NULL");
1654 		return -EINVAL;
1655 	}
1656 
1657 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1658 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1659 		return -ENODEV;
1660 	}
1661 
1662 	rte_cryptodev_trace_remove_enq_callback(dev_id, qp_id, cb->fn);
1663 
1664 	dev = &rte_crypto_devices[dev_id];
1665 	if (qp_id >= dev->data->nb_queue_pairs) {
1666 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1667 		return -ENODEV;
1668 	}
1669 
1670 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1671 	if (dev->enq_cbs == NULL) {
1672 		CDEV_LOG_ERR("Callback not initialized");
1673 		goto cb_err;
1674 	}
1675 
1676 	list = &dev->enq_cbs[qp_id];
1677 	if (list == NULL) {
1678 		CDEV_LOG_ERR("Callback list is NULL");
1679 		goto cb_err;
1680 	}
1681 
1682 	if (list->qsbr == NULL) {
1683 		CDEV_LOG_ERR("Rcu qsbr is NULL");
1684 		goto cb_err;
1685 	}
1686 
1687 	prev_cb = &list->next;
1688 	for (; *prev_cb != NULL; prev_cb = &curr_cb->next) {
1689 		curr_cb = *prev_cb;
1690 		if (curr_cb == cb) {
1691 			/* Remove the user cb from the callback list. */
1692 			__atomic_store_n(prev_cb, curr_cb->next,
1693 				__ATOMIC_RELAXED);
1694 			ret = 0;
1695 			break;
1696 		}
1697 	}
1698 
1699 	if (!ret) {
1700 		/* Call sync with invalid thread id as this is part of
1701 		 * control plane API
1702 		 */
1703 		rte_rcu_qsbr_synchronize(list->qsbr, RTE_QSBR_THRID_INVALID);
1704 		rte_free(cb);
1705 	}
1706 
1707 cb_err:
1708 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1709 	return ret;
1710 }
1711 
1712 struct rte_cryptodev_cb *
1713 rte_cryptodev_add_deq_callback(uint8_t dev_id,
1714 			       uint16_t qp_id,
1715 			       rte_cryptodev_callback_fn cb_fn,
1716 			       void *cb_arg)
1717 {
1718 	struct rte_cryptodev *dev;
1719 	struct rte_cryptodev_cb_rcu *list;
1720 	struct rte_cryptodev_cb *cb, *tail;
1721 
1722 	if (!cb_fn) {
1723 		CDEV_LOG_ERR("Callback is NULL on dev_id=%d", dev_id);
1724 		rte_errno = EINVAL;
1725 		return NULL;
1726 	}
1727 
1728 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1729 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1730 		rte_errno = ENODEV;
1731 		return NULL;
1732 	}
1733 
1734 	dev = &rte_crypto_devices[dev_id];
1735 	if (qp_id >= dev->data->nb_queue_pairs) {
1736 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1737 		rte_errno = ENODEV;
1738 		return NULL;
1739 	}
1740 
1741 	cb = rte_zmalloc(NULL, sizeof(*cb), 0);
1742 	if (cb == NULL) {
1743 		CDEV_LOG_ERR("Failed to allocate memory for callback on "
1744 			     "dev=%d, queue_pair_id=%d", dev_id, qp_id);
1745 		rte_errno = ENOMEM;
1746 		return NULL;
1747 	}
1748 
1749 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1750 
1751 	cb->fn = cb_fn;
1752 	cb->arg = cb_arg;
1753 
1754 	/* Add the callbacks in fifo order. */
1755 	list = &dev->deq_cbs[qp_id];
1756 	tail = list->next;
1757 
1758 	if (tail) {
1759 		while (tail->next)
1760 			tail = tail->next;
1761 		/* Stores to cb->fn and cb->param should complete before
1762 		 * cb is visible to data plane.
1763 		 */
1764 		__atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
1765 	} else {
1766 		/* Stores to cb->fn and cb->param should complete before
1767 		 * cb is visible to data plane.
1768 		 */
1769 		__atomic_store_n(&list->next, cb, __ATOMIC_RELEASE);
1770 	}
1771 
1772 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1773 
1774 	rte_cryptodev_trace_add_deq_callback(dev_id, qp_id, cb_fn);
1775 
1776 	return cb;
1777 }
1778 
1779 int
1780 rte_cryptodev_remove_deq_callback(uint8_t dev_id,
1781 				  uint16_t qp_id,
1782 				  struct rte_cryptodev_cb *cb)
1783 {
1784 	struct rte_cryptodev *dev;
1785 	struct rte_cryptodev_cb **prev_cb, *curr_cb;
1786 	struct rte_cryptodev_cb_rcu *list;
1787 	int ret;
1788 
1789 	ret = -EINVAL;
1790 
1791 	if (!cb) {
1792 		CDEV_LOG_ERR("Callback is NULL");
1793 		return -EINVAL;
1794 	}
1795 
1796 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1797 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1798 		return -ENODEV;
1799 	}
1800 
1801 	rte_cryptodev_trace_remove_deq_callback(dev_id, qp_id, cb->fn);
1802 
1803 	dev = &rte_crypto_devices[dev_id];
1804 	if (qp_id >= dev->data->nb_queue_pairs) {
1805 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1806 		return -ENODEV;
1807 	}
1808 
1809 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1810 	if (dev->enq_cbs == NULL) {
1811 		CDEV_LOG_ERR("Callback not initialized");
1812 		goto cb_err;
1813 	}
1814 
1815 	list = &dev->deq_cbs[qp_id];
1816 	if (list == NULL) {
1817 		CDEV_LOG_ERR("Callback list is NULL");
1818 		goto cb_err;
1819 	}
1820 
1821 	if (list->qsbr == NULL) {
1822 		CDEV_LOG_ERR("Rcu qsbr is NULL");
1823 		goto cb_err;
1824 	}
1825 
1826 	prev_cb = &list->next;
1827 	for (; *prev_cb != NULL; prev_cb = &curr_cb->next) {
1828 		curr_cb = *prev_cb;
1829 		if (curr_cb == cb) {
1830 			/* Remove the user cb from the callback list. */
1831 			__atomic_store_n(prev_cb, curr_cb->next,
1832 				__ATOMIC_RELAXED);
1833 			ret = 0;
1834 			break;
1835 		}
1836 	}
1837 
1838 	if (!ret) {
1839 		/* Call sync with invalid thread id as this is part of
1840 		 * control plane API
1841 		 */
1842 		rte_rcu_qsbr_synchronize(list->qsbr, RTE_QSBR_THRID_INVALID);
1843 		rte_free(cb);
1844 	}
1845 
1846 cb_err:
1847 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1848 	return ret;
1849 }
1850 
1851 int
1852 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats)
1853 {
1854 	struct rte_cryptodev *dev;
1855 
1856 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1857 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1858 		return -ENODEV;
1859 	}
1860 
1861 	if (stats == NULL) {
1862 		CDEV_LOG_ERR("Invalid stats ptr");
1863 		return -EINVAL;
1864 	}
1865 
1866 	dev = &rte_crypto_devices[dev_id];
1867 	memset(stats, 0, sizeof(*stats));
1868 
1869 	if (*dev->dev_ops->stats_get == NULL)
1870 		return -ENOTSUP;
1871 	(*dev->dev_ops->stats_get)(dev, stats);
1872 
1873 	rte_cryptodev_trace_stats_get(dev_id, stats);
1874 	return 0;
1875 }
1876 
1877 void
1878 rte_cryptodev_stats_reset(uint8_t dev_id)
1879 {
1880 	struct rte_cryptodev *dev;
1881 
1882 	rte_cryptodev_trace_stats_reset(dev_id);
1883 
1884 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1885 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1886 		return;
1887 	}
1888 
1889 	dev = &rte_crypto_devices[dev_id];
1890 
1891 	if (*dev->dev_ops->stats_reset == NULL)
1892 		return;
1893 	(*dev->dev_ops->stats_reset)(dev);
1894 }
1895 
1896 void
1897 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
1898 {
1899 	struct rte_cryptodev *dev;
1900 
1901 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1902 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1903 		return;
1904 	}
1905 
1906 	dev = &rte_crypto_devices[dev_id];
1907 
1908 	memset(dev_info, 0, sizeof(struct rte_cryptodev_info));
1909 
1910 	if (*dev->dev_ops->dev_infos_get == NULL)
1911 		return;
1912 	(*dev->dev_ops->dev_infos_get)(dev, dev_info);
1913 
1914 	dev_info->driver_name = dev->device->driver->name;
1915 	dev_info->device = dev->device;
1916 
1917 	rte_cryptodev_trace_info_get(dev_id, dev_info->driver_name);
1918 
1919 }
1920 
1921 int
1922 rte_cryptodev_callback_register(uint8_t dev_id,
1923 			enum rte_cryptodev_event_type event,
1924 			rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1925 {
1926 	struct rte_cryptodev *dev;
1927 	struct rte_cryptodev_callback *user_cb;
1928 
1929 	if (!cb_fn)
1930 		return -EINVAL;
1931 
1932 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1933 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1934 		return -EINVAL;
1935 	}
1936 
1937 	dev = &rte_crypto_devices[dev_id];
1938 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
1939 
1940 	TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
1941 		if (user_cb->cb_fn == cb_fn &&
1942 			user_cb->cb_arg == cb_arg &&
1943 			user_cb->event == event) {
1944 			break;
1945 		}
1946 	}
1947 
1948 	/* create a new callback. */
1949 	if (user_cb == NULL) {
1950 		user_cb = rte_zmalloc("INTR_USER_CALLBACK",
1951 				sizeof(struct rte_cryptodev_callback), 0);
1952 		if (user_cb != NULL) {
1953 			user_cb->cb_fn = cb_fn;
1954 			user_cb->cb_arg = cb_arg;
1955 			user_cb->event = event;
1956 			TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
1957 		}
1958 	}
1959 
1960 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1961 
1962 	rte_cryptodev_trace_callback_register(dev_id, event, cb_fn);
1963 	return (user_cb == NULL) ? -ENOMEM : 0;
1964 }
1965 
1966 int
1967 rte_cryptodev_callback_unregister(uint8_t dev_id,
1968 			enum rte_cryptodev_event_type event,
1969 			rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1970 {
1971 	int ret;
1972 	struct rte_cryptodev *dev;
1973 	struct rte_cryptodev_callback *cb, *next;
1974 
1975 	if (!cb_fn)
1976 		return -EINVAL;
1977 
1978 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1979 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1980 		return -EINVAL;
1981 	}
1982 
1983 	dev = &rte_crypto_devices[dev_id];
1984 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
1985 
1986 	ret = 0;
1987 	for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
1988 
1989 		next = TAILQ_NEXT(cb, next);
1990 
1991 		if (cb->cb_fn != cb_fn || cb->event != event ||
1992 				(cb->cb_arg != (void *)-1 &&
1993 				cb->cb_arg != cb_arg))
1994 			continue;
1995 
1996 		/*
1997 		 * if this callback is not executing right now,
1998 		 * then remove it.
1999 		 */
2000 		if (cb->active == 0) {
2001 			TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
2002 			rte_free(cb);
2003 		} else {
2004 			ret = -EAGAIN;
2005 		}
2006 	}
2007 
2008 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
2009 
2010 	rte_cryptodev_trace_callback_unregister(dev_id, event, cb_fn);
2011 	return ret;
2012 }
2013 
2014 void
2015 rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev,
2016 	enum rte_cryptodev_event_type event)
2017 {
2018 	struct rte_cryptodev_callback *cb_lst;
2019 	struct rte_cryptodev_callback dev_cb;
2020 
2021 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
2022 	TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
2023 		if (cb_lst->cb_fn == NULL || cb_lst->event != event)
2024 			continue;
2025 		dev_cb = *cb_lst;
2026 		cb_lst->active = 1;
2027 		rte_spinlock_unlock(&rte_cryptodev_cb_lock);
2028 		dev_cb.cb_fn(dev->data->dev_id, dev_cb.event,
2029 						dev_cb.cb_arg);
2030 		rte_spinlock_lock(&rte_cryptodev_cb_lock);
2031 		cb_lst->active = 0;
2032 	}
2033 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
2034 }
2035 
2036 int
2037 rte_cryptodev_queue_pair_event_error_query(uint8_t dev_id, uint16_t qp_id)
2038 {
2039 	struct rte_cryptodev *dev;
2040 
2041 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
2042 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
2043 		return -EINVAL;
2044 	}
2045 	dev = &rte_crypto_devices[dev_id];
2046 
2047 	if (qp_id >= dev->data->nb_queue_pairs)
2048 		return -EINVAL;
2049 	if (*dev->dev_ops->queue_pair_event_error_query == NULL)
2050 		return -ENOTSUP;
2051 
2052 	return dev->dev_ops->queue_pair_event_error_query(dev, qp_id);
2053 }
2054 
2055 struct rte_mempool *
2056 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
2057 	uint32_t elt_size, uint32_t cache_size, uint16_t user_data_size,
2058 	int socket_id)
2059 {
2060 	struct rte_mempool *mp;
2061 	struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
2062 	uint32_t obj_sz;
2063 
2064 	obj_sz = sizeof(struct rte_cryptodev_sym_session) + elt_size + user_data_size;
2065 
2066 	obj_sz = RTE_ALIGN_CEIL(obj_sz, RTE_CACHE_LINE_SIZE);
2067 	mp = rte_mempool_create(name, nb_elts, obj_sz, cache_size,
2068 			(uint32_t)(sizeof(*pool_priv)), NULL, NULL,
2069 			NULL, NULL,
2070 			socket_id, 0);
2071 	if (mp == NULL) {
2072 		CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d",
2073 			__func__, name, rte_errno);
2074 		return NULL;
2075 	}
2076 
2077 	pool_priv = rte_mempool_get_priv(mp);
2078 	if (!pool_priv) {
2079 		CDEV_LOG_ERR("%s(name=%s) failed to get private data",
2080 			__func__, name);
2081 		rte_mempool_free(mp);
2082 		return NULL;
2083 	}
2084 
2085 	pool_priv->sess_data_sz = elt_size;
2086 	pool_priv->user_data_sz = user_data_size;
2087 
2088 	rte_cryptodev_trace_sym_session_pool_create(name, nb_elts,
2089 		elt_size, cache_size, user_data_size, mp);
2090 	return mp;
2091 }
2092 
2093 struct rte_mempool *
2094 rte_cryptodev_asym_session_pool_create(const char *name, uint32_t nb_elts,
2095 	uint32_t cache_size, uint16_t user_data_size, int socket_id)
2096 {
2097 	struct rte_mempool *mp;
2098 	struct rte_cryptodev_asym_session_pool_private_data *pool_priv;
2099 	uint32_t obj_sz, obj_sz_aligned;
2100 	uint8_t dev_id;
2101 	unsigned int priv_sz, max_priv_sz = 0;
2102 
2103 	for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++)
2104 		if (rte_cryptodev_is_valid_dev(dev_id)) {
2105 			priv_sz = rte_cryptodev_asym_get_private_session_size(dev_id);
2106 			if (priv_sz > max_priv_sz)
2107 				max_priv_sz = priv_sz;
2108 		}
2109 	if (max_priv_sz == 0) {
2110 		CDEV_LOG_INFO("Could not set max private session size");
2111 		return NULL;
2112 	}
2113 
2114 	obj_sz = rte_cryptodev_asym_get_header_session_size() + max_priv_sz +
2115 			user_data_size;
2116 	obj_sz_aligned =  RTE_ALIGN_CEIL(obj_sz, RTE_CACHE_LINE_SIZE);
2117 
2118 	mp = rte_mempool_create(name, nb_elts, obj_sz_aligned, cache_size,
2119 			(uint32_t)(sizeof(*pool_priv)),
2120 			NULL, NULL, NULL, NULL,
2121 			socket_id, 0);
2122 	if (mp == NULL) {
2123 		CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d",
2124 			__func__, name, rte_errno);
2125 		return NULL;
2126 	}
2127 
2128 	pool_priv = rte_mempool_get_priv(mp);
2129 	if (!pool_priv) {
2130 		CDEV_LOG_ERR("%s(name=%s) failed to get private data",
2131 			__func__, name);
2132 		rte_mempool_free(mp);
2133 		return NULL;
2134 	}
2135 	pool_priv->max_priv_session_sz = max_priv_sz;
2136 	pool_priv->user_data_sz = user_data_size;
2137 
2138 	rte_cryptodev_trace_asym_session_pool_create(name, nb_elts,
2139 		user_data_size, cache_size, mp);
2140 	return mp;
2141 }
2142 
2143 void *
2144 rte_cryptodev_sym_session_create(uint8_t dev_id,
2145 		struct rte_crypto_sym_xform *xforms,
2146 		struct rte_mempool *mp)
2147 {
2148 	struct rte_cryptodev *dev;
2149 	struct rte_cryptodev_sym_session *sess;
2150 	struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
2151 	uint32_t sess_priv_sz;
2152 	int ret;
2153 
2154 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
2155 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
2156 		rte_errno = EINVAL;
2157 		return NULL;
2158 	}
2159 
2160 	if (xforms == NULL) {
2161 		CDEV_LOG_ERR("Invalid xform\n");
2162 		rte_errno = EINVAL;
2163 		return NULL;
2164 	}
2165 
2166 	sess_priv_sz = rte_cryptodev_sym_get_private_session_size(dev_id);
2167 	if (!rte_cryptodev_sym_is_valid_session_pool(mp, sess_priv_sz)) {
2168 		CDEV_LOG_ERR("Invalid mempool");
2169 		rte_errno = EINVAL;
2170 		return NULL;
2171 	}
2172 
2173 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2174 
2175 	/* Allocate a session structure from the session pool */
2176 	if (rte_mempool_get(mp, (void **)&sess)) {
2177 		CDEV_LOG_ERR("couldn't get object from session mempool");
2178 		rte_errno = ENOMEM;
2179 		return NULL;
2180 	}
2181 
2182 	pool_priv = rte_mempool_get_priv(mp);
2183 	sess->driver_id = dev->driver_id;
2184 	sess->sess_data_sz = pool_priv->sess_data_sz;
2185 	sess->user_data_sz = pool_priv->user_data_sz;
2186 	sess->driver_priv_data_iova = rte_mempool_virt2iova(sess) +
2187 		offsetof(struct rte_cryptodev_sym_session, driver_priv_data);
2188 
2189 	if (dev->dev_ops->sym_session_configure == NULL) {
2190 		rte_errno = ENOTSUP;
2191 		goto error_exit;
2192 	}
2193 	memset(sess->driver_priv_data, 0, pool_priv->sess_data_sz + pool_priv->user_data_sz);
2194 
2195 	ret = dev->dev_ops->sym_session_configure(dev, xforms, sess);
2196 	if (ret < 0) {
2197 		rte_errno = -ret;
2198 		goto error_exit;
2199 	}
2200 	sess->driver_id = dev->driver_id;
2201 
2202 	rte_cryptodev_trace_sym_session_create(dev_id, sess, xforms, mp);
2203 
2204 	return (void *)sess;
2205 error_exit:
2206 	rte_mempool_put(mp, (void *)sess);
2207 	return NULL;
2208 }
2209 
2210 int
2211 rte_cryptodev_asym_session_create(uint8_t dev_id,
2212 		struct rte_crypto_asym_xform *xforms, struct rte_mempool *mp,
2213 		void **session)
2214 {
2215 	struct rte_cryptodev_asym_session *sess;
2216 	uint32_t session_priv_data_sz;
2217 	struct rte_cryptodev_asym_session_pool_private_data *pool_priv;
2218 	unsigned int session_header_size =
2219 			rte_cryptodev_asym_get_header_session_size();
2220 	struct rte_cryptodev *dev;
2221 	int ret;
2222 
2223 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
2224 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
2225 		return -EINVAL;
2226 	}
2227 
2228 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2229 
2230 	if (dev == NULL)
2231 		return -EINVAL;
2232 
2233 	if (!mp) {
2234 		CDEV_LOG_ERR("invalid mempool");
2235 		return -EINVAL;
2236 	}
2237 
2238 	session_priv_data_sz = rte_cryptodev_asym_get_private_session_size(
2239 			dev_id);
2240 	pool_priv = rte_mempool_get_priv(mp);
2241 
2242 	if (pool_priv->max_priv_session_sz < session_priv_data_sz) {
2243 		CDEV_LOG_DEBUG(
2244 			"The private session data size used when creating the mempool is smaller than this device's private session data.");
2245 		return -EINVAL;
2246 	}
2247 
2248 	/* Verify if provided mempool can hold elements big enough. */
2249 	if (mp->elt_size < session_header_size + session_priv_data_sz) {
2250 		CDEV_LOG_ERR(
2251 			"mempool elements too small to hold session objects");
2252 		return -EINVAL;
2253 	}
2254 
2255 	/* Allocate a session structure from the session pool */
2256 	if (rte_mempool_get(mp, session)) {
2257 		CDEV_LOG_ERR("couldn't get object from session mempool");
2258 		return -ENOMEM;
2259 	}
2260 
2261 	sess = *session;
2262 	sess->driver_id = dev->driver_id;
2263 	sess->user_data_sz = pool_priv->user_data_sz;
2264 	sess->max_priv_data_sz = pool_priv->max_priv_session_sz;
2265 
2266 	/* Clear device session pointer.*/
2267 	memset(sess->sess_private_data, 0, session_priv_data_sz + sess->user_data_sz);
2268 
2269 	if (*dev->dev_ops->asym_session_configure == NULL)
2270 		return -ENOTSUP;
2271 
2272 	if (sess->sess_private_data[0] == 0) {
2273 		ret = dev->dev_ops->asym_session_configure(dev, xforms, sess);
2274 		if (ret < 0) {
2275 			CDEV_LOG_ERR(
2276 				"dev_id %d failed to configure session details",
2277 				dev_id);
2278 			return ret;
2279 		}
2280 	}
2281 
2282 	rte_cryptodev_trace_asym_session_create(dev_id, xforms, mp, sess);
2283 	return 0;
2284 }
2285 
2286 int
2287 rte_cryptodev_sym_session_free(uint8_t dev_id, void *_sess)
2288 {
2289 	struct rte_cryptodev *dev;
2290 	struct rte_mempool *sess_mp;
2291 	struct rte_cryptodev_sym_session *sess = _sess;
2292 	struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
2293 
2294 	if (sess == NULL)
2295 		return -EINVAL;
2296 
2297 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
2298 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
2299 		return -EINVAL;
2300 	}
2301 
2302 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2303 
2304 	if (dev == NULL || sess == NULL)
2305 		return -EINVAL;
2306 
2307 	sess_mp = rte_mempool_from_obj(sess);
2308 	if (!sess_mp)
2309 		return -EINVAL;
2310 	pool_priv = rte_mempool_get_priv(sess_mp);
2311 
2312 	if (sess->driver_id != dev->driver_id) {
2313 		CDEV_LOG_ERR("Session created by driver %u but freed by %u",
2314 			sess->driver_id, dev->driver_id);
2315 		return -EINVAL;
2316 	}
2317 
2318 	if (*dev->dev_ops->sym_session_clear == NULL)
2319 		return -ENOTSUP;
2320 
2321 	dev->dev_ops->sym_session_clear(dev, sess);
2322 
2323 	memset(sess->driver_priv_data, 0, pool_priv->sess_data_sz + pool_priv->user_data_sz);
2324 
2325 	/* Return session to mempool */
2326 	rte_mempool_put(sess_mp, sess);
2327 
2328 	rte_cryptodev_trace_sym_session_free(dev_id, sess);
2329 	return 0;
2330 }
2331 
2332 int
2333 rte_cryptodev_asym_session_free(uint8_t dev_id, void *sess)
2334 {
2335 	struct rte_mempool *sess_mp;
2336 	struct rte_cryptodev *dev;
2337 
2338 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
2339 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
2340 		return -EINVAL;
2341 	}
2342 
2343 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2344 
2345 	if (dev == NULL || sess == NULL)
2346 		return -EINVAL;
2347 
2348 	if (*dev->dev_ops->asym_session_clear == NULL)
2349 		return -ENOTSUP;
2350 
2351 	dev->dev_ops->asym_session_clear(dev, sess);
2352 
2353 	rte_free(((struct rte_cryptodev_asym_session *)sess)->event_mdata);
2354 
2355 	/* Return session to mempool */
2356 	sess_mp = rte_mempool_from_obj(sess);
2357 	rte_mempool_put(sess_mp, sess);
2358 
2359 	rte_cryptodev_trace_asym_session_free(dev_id, sess);
2360 	return 0;
2361 }
2362 
2363 unsigned int
2364 rte_cryptodev_asym_get_header_session_size(void)
2365 {
2366 	return sizeof(struct rte_cryptodev_asym_session);
2367 }
2368 
2369 unsigned int
2370 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
2371 {
2372 	struct rte_cryptodev *dev;
2373 	unsigned int priv_sess_size;
2374 
2375 	if (!rte_cryptodev_is_valid_dev(dev_id))
2376 		return 0;
2377 
2378 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2379 
2380 	if (*dev->dev_ops->sym_session_get_size == NULL)
2381 		return 0;
2382 
2383 	priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev);
2384 
2385 	rte_cryptodev_trace_sym_get_private_session_size(dev_id,
2386 		priv_sess_size);
2387 
2388 	return priv_sess_size;
2389 }
2390 
2391 unsigned int
2392 rte_cryptodev_asym_get_private_session_size(uint8_t dev_id)
2393 {
2394 	struct rte_cryptodev *dev;
2395 	unsigned int priv_sess_size;
2396 
2397 	if (!rte_cryptodev_is_valid_dev(dev_id))
2398 		return 0;
2399 
2400 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2401 
2402 	if (*dev->dev_ops->asym_session_get_size == NULL)
2403 		return 0;
2404 
2405 	priv_sess_size = (*dev->dev_ops->asym_session_get_size)(dev);
2406 
2407 	rte_cryptodev_trace_asym_get_private_session_size(dev_id,
2408 		priv_sess_size);
2409 
2410 	return priv_sess_size;
2411 }
2412 
2413 int
2414 rte_cryptodev_sym_session_set_user_data(void *_sess, void *data,
2415 		uint16_t size)
2416 {
2417 	struct rte_cryptodev_sym_session *sess = _sess;
2418 
2419 	if (sess == NULL)
2420 		return -EINVAL;
2421 
2422 	if (sess->user_data_sz < size)
2423 		return -ENOMEM;
2424 
2425 	rte_memcpy(sess->driver_priv_data + sess->sess_data_sz, data, size);
2426 
2427 	rte_cryptodev_trace_sym_session_set_user_data(sess, data, size);
2428 
2429 	return 0;
2430 }
2431 
2432 void *
2433 rte_cryptodev_sym_session_get_user_data(void *_sess)
2434 {
2435 	struct rte_cryptodev_sym_session *sess = _sess;
2436 	void *data = NULL;
2437 
2438 	if (sess == NULL || sess->user_data_sz == 0)
2439 		return NULL;
2440 
2441 	data = (void *)(sess->driver_priv_data + sess->sess_data_sz);
2442 
2443 	rte_cryptodev_trace_sym_session_get_user_data(sess, data);
2444 
2445 	return data;
2446 }
2447 
2448 int
2449 rte_cryptodev_asym_session_set_user_data(void *session, void *data, uint16_t size)
2450 {
2451 	struct rte_cryptodev_asym_session *sess = session;
2452 	if (sess == NULL)
2453 		return -EINVAL;
2454 
2455 	if (sess->user_data_sz < size)
2456 		return -ENOMEM;
2457 
2458 	rte_memcpy(sess->sess_private_data +
2459 			sess->max_priv_data_sz,
2460 			data, size);
2461 
2462 	rte_cryptodev_trace_asym_session_set_user_data(sess, data, size);
2463 
2464 	return 0;
2465 }
2466 
2467 void *
2468 rte_cryptodev_asym_session_get_user_data(void *session)
2469 {
2470 	struct rte_cryptodev_asym_session *sess = session;
2471 	void *data = NULL;
2472 
2473 	if (sess == NULL || sess->user_data_sz == 0)
2474 		return NULL;
2475 
2476 	data = (void *)(sess->sess_private_data + sess->max_priv_data_sz);
2477 
2478 	rte_cryptodev_trace_asym_session_get_user_data(sess, data);
2479 
2480 	return data;
2481 }
2482 
2483 static inline void
2484 sym_crypto_fill_status(struct rte_crypto_sym_vec *vec, int32_t errnum)
2485 {
2486 	uint32_t i;
2487 	for (i = 0; i < vec->num; i++)
2488 		vec->status[i] = errnum;
2489 }
2490 
2491 uint32_t
2492 rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id,
2493 	void *_sess, union rte_crypto_sym_ofs ofs,
2494 	struct rte_crypto_sym_vec *vec)
2495 {
2496 	struct rte_cryptodev *dev;
2497 	struct rte_cryptodev_sym_session *sess = _sess;
2498 
2499 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
2500 		sym_crypto_fill_status(vec, EINVAL);
2501 		return 0;
2502 	}
2503 
2504 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2505 
2506 	if (*dev->dev_ops->sym_cpu_process == NULL ||
2507 		!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO)) {
2508 		sym_crypto_fill_status(vec, ENOTSUP);
2509 		return 0;
2510 	}
2511 
2512 	rte_cryptodev_trace_sym_cpu_crypto_process(dev_id, sess);
2513 
2514 	return dev->dev_ops->sym_cpu_process(dev, sess, ofs, vec);
2515 }
2516 
2517 int
2518 rte_cryptodev_get_raw_dp_ctx_size(uint8_t dev_id)
2519 {
2520 	struct rte_cryptodev *dev;
2521 	int32_t size = sizeof(struct rte_crypto_raw_dp_ctx);
2522 	int32_t priv_size;
2523 
2524 	if (!rte_cryptodev_is_valid_dev(dev_id))
2525 		return -EINVAL;
2526 
2527 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2528 
2529 	if (*dev->dev_ops->sym_get_raw_dp_ctx_size == NULL ||
2530 		!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP)) {
2531 		return -ENOTSUP;
2532 	}
2533 
2534 	priv_size = (*dev->dev_ops->sym_get_raw_dp_ctx_size)(dev);
2535 	if (priv_size < 0)
2536 		return -ENOTSUP;
2537 
2538 	rte_cryptodev_trace_get_raw_dp_ctx_size(dev_id);
2539 
2540 	return RTE_ALIGN_CEIL((size + priv_size), 8);
2541 }
2542 
2543 int
2544 rte_cryptodev_configure_raw_dp_ctx(uint8_t dev_id, uint16_t qp_id,
2545 	struct rte_crypto_raw_dp_ctx *ctx,
2546 	enum rte_crypto_op_sess_type sess_type,
2547 	union rte_cryptodev_session_ctx session_ctx,
2548 	uint8_t is_update)
2549 {
2550 	struct rte_cryptodev *dev;
2551 
2552 	if (!rte_cryptodev_get_qp_status(dev_id, qp_id))
2553 		return -EINVAL;
2554 
2555 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2556 	if (!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP)
2557 			|| dev->dev_ops->sym_configure_raw_dp_ctx == NULL)
2558 		return -ENOTSUP;
2559 
2560 	rte_cryptodev_trace_configure_raw_dp_ctx(dev_id, qp_id, sess_type);
2561 
2562 	return (*dev->dev_ops->sym_configure_raw_dp_ctx)(dev, qp_id, ctx,
2563 			sess_type, session_ctx, is_update);
2564 }
2565 
2566 int
2567 rte_cryptodev_session_event_mdata_set(uint8_t dev_id, void *sess,
2568 	enum rte_crypto_op_type op_type,
2569 	enum rte_crypto_op_sess_type sess_type,
2570 	void *ev_mdata,
2571 	uint16_t size)
2572 {
2573 	struct rte_cryptodev *dev;
2574 
2575 	if (sess == NULL || ev_mdata == NULL)
2576 		return -EINVAL;
2577 
2578 	if (!rte_cryptodev_is_valid_dev(dev_id))
2579 		goto skip_pmd_op;
2580 
2581 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2582 	if (dev->dev_ops->session_ev_mdata_set == NULL)
2583 		goto skip_pmd_op;
2584 
2585 	rte_cryptodev_trace_session_event_mdata_set(dev_id, sess, op_type,
2586 		sess_type, ev_mdata, size);
2587 
2588 	return (*dev->dev_ops->session_ev_mdata_set)(dev, sess, op_type,
2589 			sess_type, ev_mdata);
2590 
2591 skip_pmd_op:
2592 	if (op_type == RTE_CRYPTO_OP_TYPE_SYMMETRIC)
2593 		return rte_cryptodev_sym_session_set_user_data(sess, ev_mdata,
2594 				size);
2595 	else if (op_type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
2596 		struct rte_cryptodev_asym_session *s = sess;
2597 
2598 		if (s->event_mdata == NULL) {
2599 			s->event_mdata = rte_malloc(NULL, size, 0);
2600 			if (s->event_mdata == NULL)
2601 				return -ENOMEM;
2602 		}
2603 		rte_memcpy(s->event_mdata, ev_mdata, size);
2604 
2605 		return 0;
2606 	} else
2607 		return -ENOTSUP;
2608 }
2609 
2610 uint32_t
2611 rte_cryptodev_raw_enqueue_burst(struct rte_crypto_raw_dp_ctx *ctx,
2612 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
2613 	void **user_data, int *enqueue_status)
2614 {
2615 	return (*ctx->enqueue_burst)(ctx->qp_data, ctx->drv_ctx_data, vec,
2616 			ofs, user_data, enqueue_status);
2617 }
2618 
2619 int
2620 rte_cryptodev_raw_enqueue_done(struct rte_crypto_raw_dp_ctx *ctx,
2621 		uint32_t n)
2622 {
2623 	return (*ctx->enqueue_done)(ctx->qp_data, ctx->drv_ctx_data, n);
2624 }
2625 
2626 uint32_t
2627 rte_cryptodev_raw_dequeue_burst(struct rte_crypto_raw_dp_ctx *ctx,
2628 	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
2629 	uint32_t max_nb_to_dequeue,
2630 	rte_cryptodev_raw_post_dequeue_t post_dequeue,
2631 	void **out_user_data, uint8_t is_user_data_array,
2632 	uint32_t *n_success_jobs, int *status)
2633 {
2634 	return (*ctx->dequeue_burst)(ctx->qp_data, ctx->drv_ctx_data,
2635 		get_dequeue_count, max_nb_to_dequeue, post_dequeue,
2636 		out_user_data, is_user_data_array, n_success_jobs, status);
2637 }
2638 
2639 int
2640 rte_cryptodev_raw_dequeue_done(struct rte_crypto_raw_dp_ctx *ctx,
2641 		uint32_t n)
2642 {
2643 	return (*ctx->dequeue_done)(ctx->qp_data, ctx->drv_ctx_data, n);
2644 }
2645 
2646 /** Initialise rte_crypto_op mempool element */
2647 static void
2648 rte_crypto_op_init(struct rte_mempool *mempool,
2649 		void *opaque_arg,
2650 		void *_op_data,
2651 		__rte_unused unsigned i)
2652 {
2653 	struct rte_crypto_op *op = _op_data;
2654 	enum rte_crypto_op_type type = *(enum rte_crypto_op_type *)opaque_arg;
2655 
2656 	memset(_op_data, 0, mempool->elt_size);
2657 
2658 	__rte_crypto_op_reset(op, type);
2659 
2660 	op->phys_addr = rte_mem_virt2iova(_op_data);
2661 	op->mempool = mempool;
2662 }
2663 
2664 
2665 struct rte_mempool *
2666 rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
2667 		unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
2668 		int socket_id)
2669 {
2670 	struct rte_crypto_op_pool_private *priv;
2671 
2672 	unsigned elt_size = sizeof(struct rte_crypto_op) +
2673 			priv_size;
2674 
2675 	if (type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
2676 		elt_size += sizeof(struct rte_crypto_sym_op);
2677 	} else if (type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
2678 		elt_size += sizeof(struct rte_crypto_asym_op);
2679 	} else if (type == RTE_CRYPTO_OP_TYPE_UNDEFINED) {
2680 		elt_size += RTE_MAX(sizeof(struct rte_crypto_sym_op),
2681 		                    sizeof(struct rte_crypto_asym_op));
2682 	} else {
2683 		CDEV_LOG_ERR("Invalid op_type");
2684 		return NULL;
2685 	}
2686 
2687 	/* lookup mempool in case already allocated */
2688 	struct rte_mempool *mp = rte_mempool_lookup(name);
2689 
2690 	if (mp != NULL) {
2691 		priv = (struct rte_crypto_op_pool_private *)
2692 				rte_mempool_get_priv(mp);
2693 
2694 		if (mp->elt_size != elt_size ||
2695 				mp->cache_size < cache_size ||
2696 				mp->size < nb_elts ||
2697 				priv->priv_size <  priv_size) {
2698 			mp = NULL;
2699 			CDEV_LOG_ERR("Mempool %s already exists but with "
2700 					"incompatible parameters", name);
2701 			return NULL;
2702 		}
2703 		return mp;
2704 	}
2705 
2706 	mp = rte_mempool_create(
2707 			name,
2708 			nb_elts,
2709 			elt_size,
2710 			cache_size,
2711 			sizeof(struct rte_crypto_op_pool_private),
2712 			NULL,
2713 			NULL,
2714 			rte_crypto_op_init,
2715 			&type,
2716 			socket_id,
2717 			0);
2718 
2719 	if (mp == NULL) {
2720 		CDEV_LOG_ERR("Failed to create mempool %s", name);
2721 		return NULL;
2722 	}
2723 
2724 	priv = (struct rte_crypto_op_pool_private *)
2725 			rte_mempool_get_priv(mp);
2726 
2727 	priv->priv_size = priv_size;
2728 	priv->type = type;
2729 
2730 	rte_cryptodev_trace_op_pool_create(name, socket_id, type, nb_elts, mp);
2731 	return mp;
2732 }
2733 
2734 int
2735 rte_cryptodev_pmd_create_dev_name(char *name, const char *dev_name_prefix)
2736 {
2737 	struct rte_cryptodev *dev = NULL;
2738 	uint32_t i = 0;
2739 
2740 	if (name == NULL)
2741 		return -EINVAL;
2742 
2743 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
2744 		int ret = snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
2745 				"%s_%u", dev_name_prefix, i);
2746 
2747 		if (ret < 0)
2748 			return ret;
2749 
2750 		dev = rte_cryptodev_pmd_get_named_dev(name);
2751 		if (!dev)
2752 			return 0;
2753 	}
2754 
2755 	return -1;
2756 }
2757 
2758 TAILQ_HEAD(cryptodev_driver_list, cryptodev_driver);
2759 
2760 static struct cryptodev_driver_list cryptodev_driver_list =
2761 	TAILQ_HEAD_INITIALIZER(cryptodev_driver_list);
2762 
2763 int
2764 rte_cryptodev_driver_id_get(const char *name)
2765 {
2766 	struct cryptodev_driver *driver;
2767 	const char *driver_name;
2768 	int driver_id = -1;
2769 
2770 	if (name == NULL) {
2771 		RTE_LOG(DEBUG, CRYPTODEV, "name pointer NULL");
2772 		return -1;
2773 	}
2774 
2775 	TAILQ_FOREACH(driver, &cryptodev_driver_list, next) {
2776 		driver_name = driver->driver->name;
2777 		if (strncmp(driver_name, name, strlen(driver_name) + 1) == 0) {
2778 			driver_id = driver->id;
2779 			break;
2780 		}
2781 	}
2782 
2783 	rte_cryptodev_trace_driver_id_get(name, driver_id);
2784 
2785 	return driver_id;
2786 }
2787 
2788 const char *
2789 rte_cryptodev_name_get(uint8_t dev_id)
2790 {
2791 	struct rte_cryptodev *dev;
2792 
2793 	if (!rte_cryptodev_is_valid_device_data(dev_id)) {
2794 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
2795 		return NULL;
2796 	}
2797 
2798 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2799 	if (dev == NULL)
2800 		return NULL;
2801 
2802 	rte_cryptodev_trace_name_get(dev_id, dev->data->name);
2803 
2804 	return dev->data->name;
2805 }
2806 
2807 const char *
2808 rte_cryptodev_driver_name_get(uint8_t driver_id)
2809 {
2810 	struct cryptodev_driver *driver;
2811 
2812 	TAILQ_FOREACH(driver, &cryptodev_driver_list, next) {
2813 		if (driver->id == driver_id) {
2814 			rte_cryptodev_trace_driver_name_get(driver_id,
2815 				driver->driver->name);
2816 			return driver->driver->name;
2817 		}
2818 	}
2819 	return NULL;
2820 }
2821 
2822 uint8_t
2823 rte_cryptodev_allocate_driver(struct cryptodev_driver *crypto_drv,
2824 		const struct rte_driver *drv)
2825 {
2826 	crypto_drv->driver = drv;
2827 	crypto_drv->id = nb_drivers;
2828 
2829 	TAILQ_INSERT_TAIL(&cryptodev_driver_list, crypto_drv, next);
2830 
2831 	rte_cryptodev_trace_allocate_driver(drv->name);
2832 
2833 	return nb_drivers++;
2834 }
2835 
2836 RTE_INIT(cryptodev_init_fp_ops)
2837 {
2838 	uint32_t i;
2839 
2840 	for (i = 0; i != RTE_DIM(rte_crypto_fp_ops); i++)
2841 		cryptodev_fp_ops_reset(rte_crypto_fp_ops + i);
2842 }
2843 
2844 static int
2845 cryptodev_handle_dev_list(const char *cmd __rte_unused,
2846 		const char *params __rte_unused,
2847 		struct rte_tel_data *d)
2848 {
2849 	int dev_id;
2850 
2851 	if (rte_cryptodev_count() < 1)
2852 		return -EINVAL;
2853 
2854 	rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
2855 	for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++)
2856 		if (rte_cryptodev_is_valid_dev(dev_id))
2857 			rte_tel_data_add_array_int(d, dev_id);
2858 
2859 	return 0;
2860 }
2861 
2862 static int
2863 cryptodev_handle_dev_info(const char *cmd __rte_unused,
2864 		const char *params, struct rte_tel_data *d)
2865 {
2866 	struct rte_cryptodev_info cryptodev_info;
2867 	int dev_id;
2868 	char *end_param;
2869 
2870 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
2871 		return -EINVAL;
2872 
2873 	dev_id = strtoul(params, &end_param, 0);
2874 	if (*end_param != '\0')
2875 		CDEV_LOG_ERR("Extra parameters passed to command, ignoring");
2876 	if (!rte_cryptodev_is_valid_dev(dev_id))
2877 		return -EINVAL;
2878 
2879 	rte_cryptodev_info_get(dev_id, &cryptodev_info);
2880 
2881 	rte_tel_data_start_dict(d);
2882 	rte_tel_data_add_dict_string(d, "device_name",
2883 		cryptodev_info.device->name);
2884 	rte_tel_data_add_dict_uint(d, "max_nb_queue_pairs",
2885 		cryptodev_info.max_nb_queue_pairs);
2886 
2887 	return 0;
2888 }
2889 
2890 #define ADD_DICT_STAT(s) rte_tel_data_add_dict_uint(d, #s, cryptodev_stats.s)
2891 
2892 static int
2893 cryptodev_handle_dev_stats(const char *cmd __rte_unused,
2894 		const char *params,
2895 		struct rte_tel_data *d)
2896 {
2897 	struct rte_cryptodev_stats cryptodev_stats;
2898 	int dev_id, ret;
2899 	char *end_param;
2900 
2901 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
2902 		return -EINVAL;
2903 
2904 	dev_id = strtoul(params, &end_param, 0);
2905 	if (*end_param != '\0')
2906 		CDEV_LOG_ERR("Extra parameters passed to command, ignoring");
2907 	if (!rte_cryptodev_is_valid_dev(dev_id))
2908 		return -EINVAL;
2909 
2910 	ret = rte_cryptodev_stats_get(dev_id, &cryptodev_stats);
2911 	if (ret < 0)
2912 		return ret;
2913 
2914 	rte_tel_data_start_dict(d);
2915 	ADD_DICT_STAT(enqueued_count);
2916 	ADD_DICT_STAT(dequeued_count);
2917 	ADD_DICT_STAT(enqueue_err_count);
2918 	ADD_DICT_STAT(dequeue_err_count);
2919 
2920 	return 0;
2921 }
2922 
2923 #define CRYPTO_CAPS_SZ                                             \
2924 	(RTE_ALIGN_CEIL(sizeof(struct rte_cryptodev_capabilities), \
2925 					sizeof(uint64_t)) /        \
2926 	 sizeof(uint64_t))
2927 
2928 static int
2929 crypto_caps_array(struct rte_tel_data *d,
2930 		  const struct rte_cryptodev_capabilities *capabilities)
2931 {
2932 	const struct rte_cryptodev_capabilities *dev_caps;
2933 	uint64_t caps_val[CRYPTO_CAPS_SZ];
2934 	unsigned int i = 0, j;
2935 
2936 	rte_tel_data_start_array(d, RTE_TEL_UINT_VAL);
2937 
2938 	while ((dev_caps = &capabilities[i++])->op !=
2939 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
2940 		memset(&caps_val, 0, CRYPTO_CAPS_SZ * sizeof(caps_val[0]));
2941 		rte_memcpy(caps_val, dev_caps, sizeof(capabilities[0]));
2942 		for (j = 0; j < CRYPTO_CAPS_SZ; j++)
2943 			rte_tel_data_add_array_uint(d, caps_val[j]);
2944 	}
2945 
2946 	return i;
2947 }
2948 
2949 static int
2950 cryptodev_handle_dev_caps(const char *cmd __rte_unused, const char *params,
2951 			  struct rte_tel_data *d)
2952 {
2953 	struct rte_cryptodev_info dev_info;
2954 	struct rte_tel_data *crypto_caps;
2955 	int crypto_caps_n;
2956 	char *end_param;
2957 	int dev_id;
2958 
2959 	if (!params || strlen(params) == 0 || !isdigit(*params))
2960 		return -EINVAL;
2961 
2962 	dev_id = strtoul(params, &end_param, 0);
2963 	if (*end_param != '\0')
2964 		CDEV_LOG_ERR("Extra parameters passed to command, ignoring");
2965 	if (!rte_cryptodev_is_valid_dev(dev_id))
2966 		return -EINVAL;
2967 
2968 	rte_tel_data_start_dict(d);
2969 	crypto_caps = rte_tel_data_alloc();
2970 	if (!crypto_caps)
2971 		return -ENOMEM;
2972 
2973 	rte_cryptodev_info_get(dev_id, &dev_info);
2974 	crypto_caps_n = crypto_caps_array(crypto_caps, dev_info.capabilities);
2975 	rte_tel_data_add_dict_container(d, "crypto_caps", crypto_caps, 0);
2976 	rte_tel_data_add_dict_int(d, "crypto_caps_n", crypto_caps_n);
2977 
2978 	return 0;
2979 }
2980 
2981 RTE_INIT(cryptodev_init_telemetry)
2982 {
2983 	rte_telemetry_register_cmd("/cryptodev/info", cryptodev_handle_dev_info,
2984 			"Returns information for a cryptodev. Parameters: int dev_id");
2985 	rte_telemetry_register_cmd("/cryptodev/list",
2986 			cryptodev_handle_dev_list,
2987 			"Returns list of available crypto devices by IDs. No parameters.");
2988 	rte_telemetry_register_cmd("/cryptodev/stats",
2989 			cryptodev_handle_dev_stats,
2990 			"Returns the stats for a cryptodev. Parameters: int dev_id");
2991 	rte_telemetry_register_cmd("/cryptodev/caps",
2992 			cryptodev_handle_dev_caps,
2993 			"Returns the capabilities for a cryptodev. Parameters: int dev_id");
2994 }
2995