xref: /dpdk/lib/cryptodev/rte_cryptodev.c (revision 62774b78a84e9fa5df56d04cffed69bef8c901f1)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020 Intel Corporation
3  */
4 
5 #include <sys/queue.h>
6 #include <ctype.h>
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <string.h>
10 #include <errno.h>
11 #include <stdint.h>
12 #include <inttypes.h>
13 
14 #include <rte_log.h>
15 #include <rte_debug.h>
16 #include <dev_driver.h>
17 #include <rte_memory.h>
18 #include <rte_memcpy.h>
19 #include <rte_memzone.h>
20 #include <rte_eal.h>
21 #include <rte_common.h>
22 #include <rte_mempool.h>
23 #include <rte_malloc.h>
24 #include <rte_errno.h>
25 #include <rte_spinlock.h>
26 #include <rte_string_fns.h>
27 #include <rte_telemetry.h>
28 
29 #include "rte_crypto.h"
30 #include "rte_cryptodev.h"
31 #include "cryptodev_pmd.h"
32 #include "cryptodev_trace.h"
33 
34 static uint8_t nb_drivers;
35 
36 static struct rte_cryptodev rte_crypto_devices[RTE_CRYPTO_MAX_DEVS];
37 
38 struct rte_cryptodev *rte_cryptodevs = rte_crypto_devices;
39 
40 static struct rte_cryptodev_global cryptodev_globals = {
41 		.devs			= rte_crypto_devices,
42 		.data			= { NULL },
43 		.nb_devs		= 0
44 };
45 
46 /* Public fastpath APIs. */
47 struct rte_crypto_fp_ops rte_crypto_fp_ops[RTE_CRYPTO_MAX_DEVS];
48 
49 /* spinlock for crypto device callbacks */
50 static rte_spinlock_t rte_cryptodev_cb_lock = RTE_SPINLOCK_INITIALIZER;
51 
52 /**
53  * The user application callback description.
54  *
55  * It contains callback address to be registered by user application,
56  * the pointer to the parameters for callback, and the event type.
57  */
58 struct rte_cryptodev_callback {
59 	TAILQ_ENTRY(rte_cryptodev_callback) next; /**< Callbacks list */
60 	rte_cryptodev_cb_fn cb_fn;		/**< Callback address */
61 	void *cb_arg;				/**< Parameter for callback */
62 	enum rte_cryptodev_event_type event;	/**< Interrupt event type */
63 	uint32_t active;			/**< Callback is executing */
64 };
65 
66 /**
67  * @deprecated
68  * The crypto cipher algorithm strings identifiers.
69  * It could be used in application command line.
70  */
71 __rte_deprecated
72 const char *
73 rte_crypto_cipher_algorithm_strings[] = {
74 	[RTE_CRYPTO_CIPHER_3DES_CBC]	= "3des-cbc",
75 	[RTE_CRYPTO_CIPHER_3DES_ECB]	= "3des-ecb",
76 	[RTE_CRYPTO_CIPHER_3DES_CTR]	= "3des-ctr",
77 
78 	[RTE_CRYPTO_CIPHER_AES_CBC]	= "aes-cbc",
79 	[RTE_CRYPTO_CIPHER_AES_CTR]	= "aes-ctr",
80 	[RTE_CRYPTO_CIPHER_AES_DOCSISBPI]	= "aes-docsisbpi",
81 	[RTE_CRYPTO_CIPHER_AES_ECB]	= "aes-ecb",
82 	[RTE_CRYPTO_CIPHER_AES_F8]	= "aes-f8",
83 	[RTE_CRYPTO_CIPHER_AES_XTS]	= "aes-xts",
84 
85 	[RTE_CRYPTO_CIPHER_ARC4]	= "arc4",
86 
87 	[RTE_CRYPTO_CIPHER_DES_CBC]     = "des-cbc",
88 	[RTE_CRYPTO_CIPHER_DES_DOCSISBPI]	= "des-docsisbpi",
89 
90 	[RTE_CRYPTO_CIPHER_NULL]	= "null",
91 
92 	[RTE_CRYPTO_CIPHER_KASUMI_F8]	= "kasumi-f8",
93 	[RTE_CRYPTO_CIPHER_SNOW3G_UEA2]	= "snow3g-uea2",
94 	[RTE_CRYPTO_CIPHER_ZUC_EEA3]	= "zuc-eea3",
95 	[RTE_CRYPTO_CIPHER_SM4_ECB]	= "sm4-ecb",
96 	[RTE_CRYPTO_CIPHER_SM4_CBC]	= "sm4-cbc",
97 	[RTE_CRYPTO_CIPHER_SM4_CTR]	= "sm4-ctr"
98 };
99 
100 /**
101  * The crypto cipher algorithm strings identifiers.
102  * Not to be used in application directly.
103  * Application can use rte_cryptodev_get_cipher_algo_string().
104  */
105 static const char *
106 crypto_cipher_algorithm_strings[] = {
107 	[RTE_CRYPTO_CIPHER_3DES_CBC]	= "3des-cbc",
108 	[RTE_CRYPTO_CIPHER_3DES_ECB]	= "3des-ecb",
109 	[RTE_CRYPTO_CIPHER_3DES_CTR]	= "3des-ctr",
110 
111 	[RTE_CRYPTO_CIPHER_AES_CBC]	= "aes-cbc",
112 	[RTE_CRYPTO_CIPHER_AES_CTR]	= "aes-ctr",
113 	[RTE_CRYPTO_CIPHER_AES_DOCSISBPI]	= "aes-docsisbpi",
114 	[RTE_CRYPTO_CIPHER_AES_ECB]	= "aes-ecb",
115 	[RTE_CRYPTO_CIPHER_AES_F8]	= "aes-f8",
116 	[RTE_CRYPTO_CIPHER_AES_XTS]	= "aes-xts",
117 
118 	[RTE_CRYPTO_CIPHER_ARC4]	= "arc4",
119 
120 	[RTE_CRYPTO_CIPHER_DES_CBC]     = "des-cbc",
121 	[RTE_CRYPTO_CIPHER_DES_DOCSISBPI]	= "des-docsisbpi",
122 
123 	[RTE_CRYPTO_CIPHER_NULL]	= "null",
124 
125 	[RTE_CRYPTO_CIPHER_KASUMI_F8]	= "kasumi-f8",
126 	[RTE_CRYPTO_CIPHER_SNOW3G_UEA2]	= "snow3g-uea2",
127 	[RTE_CRYPTO_CIPHER_ZUC_EEA3]	= "zuc-eea3",
128 	[RTE_CRYPTO_CIPHER_SM4_ECB]	= "sm4-ecb",
129 	[RTE_CRYPTO_CIPHER_SM4_CBC]	= "sm4-cbc",
130 	[RTE_CRYPTO_CIPHER_SM4_CTR]	= "sm4-ctr",
131 	[RTE_CRYPTO_CIPHER_SM4_CFB]	= "sm4-cfb",
132 	[RTE_CRYPTO_CIPHER_SM4_OFB]	= "sm4-ofb"
133 };
134 
135 /**
136  * The crypto cipher operation strings identifiers.
137  * It could be used in application command line.
138  */
139 const char *
140 rte_crypto_cipher_operation_strings[] = {
141 		[RTE_CRYPTO_CIPHER_OP_ENCRYPT]	= "encrypt",
142 		[RTE_CRYPTO_CIPHER_OP_DECRYPT]	= "decrypt"
143 };
144 
145 /**
146  * @deprecated
147  * The crypto auth algorithm strings identifiers.
148  * It could be used in application command line.
149  */
150 __rte_deprecated
151 const char *
152 rte_crypto_auth_algorithm_strings[] = {
153 	[RTE_CRYPTO_AUTH_AES_CBC_MAC]	= "aes-cbc-mac",
154 	[RTE_CRYPTO_AUTH_AES_CMAC]	= "aes-cmac",
155 	[RTE_CRYPTO_AUTH_AES_GMAC]	= "aes-gmac",
156 	[RTE_CRYPTO_AUTH_AES_XCBC_MAC]	= "aes-xcbc-mac",
157 
158 	[RTE_CRYPTO_AUTH_MD5]		= "md5",
159 	[RTE_CRYPTO_AUTH_MD5_HMAC]	= "md5-hmac",
160 
161 	[RTE_CRYPTO_AUTH_NULL]		= "null",
162 
163 	[RTE_CRYPTO_AUTH_SHA1]		= "sha1",
164 	[RTE_CRYPTO_AUTH_SHA1_HMAC]	= "sha1-hmac",
165 
166 	[RTE_CRYPTO_AUTH_SHA224]	= "sha2-224",
167 	[RTE_CRYPTO_AUTH_SHA224_HMAC]	= "sha2-224-hmac",
168 	[RTE_CRYPTO_AUTH_SHA256]	= "sha2-256",
169 	[RTE_CRYPTO_AUTH_SHA256_HMAC]	= "sha2-256-hmac",
170 	[RTE_CRYPTO_AUTH_SHA384]	= "sha2-384",
171 	[RTE_CRYPTO_AUTH_SHA384_HMAC]	= "sha2-384-hmac",
172 	[RTE_CRYPTO_AUTH_SHA512]	= "sha2-512",
173 	[RTE_CRYPTO_AUTH_SHA512_HMAC]	= "sha2-512-hmac",
174 
175 	[RTE_CRYPTO_AUTH_SHA3_224]	= "sha3-224",
176 	[RTE_CRYPTO_AUTH_SHA3_224_HMAC] = "sha3-224-hmac",
177 	[RTE_CRYPTO_AUTH_SHA3_256]	= "sha3-256",
178 	[RTE_CRYPTO_AUTH_SHA3_256_HMAC] = "sha3-256-hmac",
179 	[RTE_CRYPTO_AUTH_SHA3_384]	= "sha3-384",
180 	[RTE_CRYPTO_AUTH_SHA3_384_HMAC] = "sha3-384-hmac",
181 	[RTE_CRYPTO_AUTH_SHA3_512]	= "sha3-512",
182 	[RTE_CRYPTO_AUTH_SHA3_512_HMAC]	= "sha3-512-hmac",
183 
184 	[RTE_CRYPTO_AUTH_KASUMI_F9]	= "kasumi-f9",
185 	[RTE_CRYPTO_AUTH_SNOW3G_UIA2]	= "snow3g-uia2",
186 	[RTE_CRYPTO_AUTH_ZUC_EIA3]	= "zuc-eia3",
187 	[RTE_CRYPTO_AUTH_SM3]		= "sm3"
188 };
189 
190 /**
191  * The crypto auth algorithm strings identifiers.
192  * Not to be used in application directly.
193  * Application can use rte_cryptodev_get_auth_algo_string().
194  */
195 static const char *
196 crypto_auth_algorithm_strings[] = {
197 	[RTE_CRYPTO_AUTH_AES_CBC_MAC]	= "aes-cbc-mac",
198 	[RTE_CRYPTO_AUTH_AES_CMAC]	= "aes-cmac",
199 	[RTE_CRYPTO_AUTH_AES_GMAC]	= "aes-gmac",
200 	[RTE_CRYPTO_AUTH_AES_XCBC_MAC]	= "aes-xcbc-mac",
201 
202 	[RTE_CRYPTO_AUTH_MD5]		= "md5",
203 	[RTE_CRYPTO_AUTH_MD5_HMAC]	= "md5-hmac",
204 
205 	[RTE_CRYPTO_AUTH_NULL]		= "null",
206 
207 	[RTE_CRYPTO_AUTH_SHA1]		= "sha1",
208 	[RTE_CRYPTO_AUTH_SHA1_HMAC]	= "sha1-hmac",
209 
210 	[RTE_CRYPTO_AUTH_SHA224]	= "sha2-224",
211 	[RTE_CRYPTO_AUTH_SHA224_HMAC]	= "sha2-224-hmac",
212 	[RTE_CRYPTO_AUTH_SHA256]	= "sha2-256",
213 	[RTE_CRYPTO_AUTH_SHA256_HMAC]	= "sha2-256-hmac",
214 	[RTE_CRYPTO_AUTH_SHA384]	= "sha2-384",
215 	[RTE_CRYPTO_AUTH_SHA384_HMAC]	= "sha2-384-hmac",
216 	[RTE_CRYPTO_AUTH_SHA512]	= "sha2-512",
217 	[RTE_CRYPTO_AUTH_SHA512_HMAC]	= "sha2-512-hmac",
218 
219 	[RTE_CRYPTO_AUTH_SHA3_224]	= "sha3-224",
220 	[RTE_CRYPTO_AUTH_SHA3_224_HMAC] = "sha3-224-hmac",
221 	[RTE_CRYPTO_AUTH_SHA3_256]	= "sha3-256",
222 	[RTE_CRYPTO_AUTH_SHA3_256_HMAC] = "sha3-256-hmac",
223 	[RTE_CRYPTO_AUTH_SHA3_384]	= "sha3-384",
224 	[RTE_CRYPTO_AUTH_SHA3_384_HMAC] = "sha3-384-hmac",
225 	[RTE_CRYPTO_AUTH_SHA3_512]	= "sha3-512",
226 	[RTE_CRYPTO_AUTH_SHA3_512_HMAC]	= "sha3-512-hmac",
227 
228 	[RTE_CRYPTO_AUTH_KASUMI_F9]	= "kasumi-f9",
229 	[RTE_CRYPTO_AUTH_SNOW3G_UIA2]	= "snow3g-uia2",
230 	[RTE_CRYPTO_AUTH_ZUC_EIA3]	= "zuc-eia3",
231 	[RTE_CRYPTO_AUTH_SM3]		= "sm3",
232 	[RTE_CRYPTO_AUTH_SM3_HMAC]	= "sm3-hmac",
233 
234 	[RTE_CRYPTO_AUTH_SHAKE_128]	 = "shake-128",
235 	[RTE_CRYPTO_AUTH_SHAKE_256]	 = "shake-256",
236 };
237 
238 /**
239  * @deprecated
240  * The crypto AEAD algorithm strings identifiers.
241  * It could be used in application command line.
242  */
243 __rte_deprecated
244 const char *
245 rte_crypto_aead_algorithm_strings[] = {
246 	[RTE_CRYPTO_AEAD_AES_CCM]	= "aes-ccm",
247 	[RTE_CRYPTO_AEAD_AES_GCM]	= "aes-gcm",
248 	[RTE_CRYPTO_AEAD_CHACHA20_POLY1305] = "chacha20-poly1305"
249 };
250 
251 /**
252  * The crypto AEAD algorithm strings identifiers.
253  * Not to be used in application directly.
254  * Application can use rte_cryptodev_get_aead_algo_string().
255  */
256 static const char *
257 crypto_aead_algorithm_strings[] = {
258 	[RTE_CRYPTO_AEAD_AES_CCM]	= "aes-ccm",
259 	[RTE_CRYPTO_AEAD_AES_GCM]	= "aes-gcm",
260 	[RTE_CRYPTO_AEAD_CHACHA20_POLY1305] = "chacha20-poly1305"
261 };
262 
263 
264 /**
265  * The crypto AEAD operation strings identifiers.
266  * It could be used in application command line.
267  */
268 const char *
269 rte_crypto_aead_operation_strings[] = {
270 	[RTE_CRYPTO_AEAD_OP_ENCRYPT]	= "encrypt",
271 	[RTE_CRYPTO_AEAD_OP_DECRYPT]	= "decrypt"
272 };
273 
274 /**
275  * @deprecated
276  * Asymmetric crypto transform operation strings identifiers.
277  */
278 __rte_deprecated
279 const char *rte_crypto_asym_xform_strings[] = {
280 	[RTE_CRYPTO_ASYM_XFORM_NONE]	= "none",
281 	[RTE_CRYPTO_ASYM_XFORM_RSA]	= "rsa",
282 	[RTE_CRYPTO_ASYM_XFORM_MODEX]	= "modexp",
283 	[RTE_CRYPTO_ASYM_XFORM_MODINV]	= "modinv",
284 	[RTE_CRYPTO_ASYM_XFORM_DH]	= "dh",
285 	[RTE_CRYPTO_ASYM_XFORM_DSA]	= "dsa",
286 	[RTE_CRYPTO_ASYM_XFORM_ECDSA]	= "ecdsa",
287 	[RTE_CRYPTO_ASYM_XFORM_ECPM]	= "ecpm",
288 };
289 
290 /**
291  * Asymmetric crypto transform operation strings identifiers.
292  * Not to be used in application directly.
293  * Application can use rte_cryptodev_asym_get_xform_string().
294  */
295 static const char *
296 crypto_asym_xform_strings[] = {
297 	[RTE_CRYPTO_ASYM_XFORM_NONE]	= "none",
298 	[RTE_CRYPTO_ASYM_XFORM_RSA]	= "rsa",
299 	[RTE_CRYPTO_ASYM_XFORM_MODEX]	= "modexp",
300 	[RTE_CRYPTO_ASYM_XFORM_MODINV]	= "modinv",
301 	[RTE_CRYPTO_ASYM_XFORM_DH]	= "dh",
302 	[RTE_CRYPTO_ASYM_XFORM_DSA]	= "dsa",
303 	[RTE_CRYPTO_ASYM_XFORM_ECDSA]	= "ecdsa",
304 	[RTE_CRYPTO_ASYM_XFORM_ECPM]	= "ecpm",
305 	[RTE_CRYPTO_ASYM_XFORM_SM2]	= "sm2",
306 };
307 
308 /**
309  * Asymmetric crypto operation strings identifiers.
310  */
311 const char *rte_crypto_asym_op_strings[] = {
312 	[RTE_CRYPTO_ASYM_OP_ENCRYPT]	= "encrypt",
313 	[RTE_CRYPTO_ASYM_OP_DECRYPT]	= "decrypt",
314 	[RTE_CRYPTO_ASYM_OP_SIGN]	= "sign",
315 	[RTE_CRYPTO_ASYM_OP_VERIFY]	= "verify"
316 };
317 
318 /**
319  * Asymmetric crypto key exchange operation strings identifiers.
320  */
321 const char *rte_crypto_asym_ke_strings[] = {
322 	[RTE_CRYPTO_ASYM_KE_PRIV_KEY_GENERATE] = "priv_key_generate",
323 	[RTE_CRYPTO_ASYM_KE_PUB_KEY_GENERATE] = "pub_key_generate",
324 	[RTE_CRYPTO_ASYM_KE_SHARED_SECRET_COMPUTE] = "sharedsecret_compute",
325 	[RTE_CRYPTO_ASYM_KE_PUB_KEY_VERIFY] = "pub_ec_key_verify"
326 };
327 
328 struct rte_cryptodev_sym_session_pool_private_data {
329 	uint16_t sess_data_sz;
330 	/**< driver session data size */
331 	uint16_t user_data_sz;
332 	/**< session user data will be placed after sess_data */
333 };
334 
335 /**
336  * The private data structure stored in the asym session mempool private data.
337  */
338 struct rte_cryptodev_asym_session_pool_private_data {
339 	uint16_t max_priv_session_sz;
340 	/**< Size of private session data used when creating mempool */
341 	uint16_t user_data_sz;
342 	/**< Session user data will be placed after sess_private_data */
343 };
344 
345 int
346 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum,
347 		const char *algo_string)
348 {
349 	unsigned int i;
350 	int ret = -1;	/* Invalid string */
351 
352 	for (i = 1; i < RTE_DIM(crypto_cipher_algorithm_strings); i++) {
353 		if (strcmp(algo_string, crypto_cipher_algorithm_strings[i]) == 0) {
354 			*algo_enum = (enum rte_crypto_cipher_algorithm) i;
355 			ret = 0;
356 			break;
357 		}
358 	}
359 
360 	rte_cryptodev_trace_get_cipher_algo_enum(algo_string, *algo_enum, ret);
361 
362 	return ret;
363 }
364 
365 int
366 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum,
367 		const char *algo_string)
368 {
369 	unsigned int i;
370 	int ret = -1;	/* Invalid string */
371 
372 	for (i = 1; i < RTE_DIM(crypto_auth_algorithm_strings); i++) {
373 		if (strcmp(algo_string, crypto_auth_algorithm_strings[i]) == 0) {
374 			*algo_enum = (enum rte_crypto_auth_algorithm) i;
375 			ret = 0;
376 			break;
377 		}
378 	}
379 
380 	rte_cryptodev_trace_get_auth_algo_enum(algo_string, *algo_enum, ret);
381 
382 	return ret;
383 }
384 
385 int
386 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum,
387 		const char *algo_string)
388 {
389 	unsigned int i;
390 	int ret = -1;	/* Invalid string */
391 
392 	for (i = 1; i < RTE_DIM(crypto_aead_algorithm_strings); i++) {
393 		if (strcmp(algo_string, crypto_aead_algorithm_strings[i]) == 0) {
394 			*algo_enum = (enum rte_crypto_aead_algorithm) i;
395 			ret = 0;
396 			break;
397 		}
398 	}
399 
400 	rte_cryptodev_trace_get_aead_algo_enum(algo_string, *algo_enum, ret);
401 
402 	return ret;
403 }
404 
405 int
406 rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum,
407 		const char *xform_string)
408 {
409 	unsigned int i;
410 	int ret = -1;	/* Invalid string */
411 
412 	for (i = 1; i < RTE_DIM(crypto_asym_xform_strings); i++) {
413 		if (strcmp(xform_string,
414 			crypto_asym_xform_strings[i]) == 0) {
415 			*xform_enum = (enum rte_crypto_asym_xform_type) i;
416 			ret = 0;
417 			break;
418 		}
419 	}
420 
421 	rte_cryptodev_trace_asym_get_xform_enum(xform_string, *xform_enum, ret);
422 
423 	return ret;
424 }
425 
426 const char *
427 rte_cryptodev_get_cipher_algo_string(enum rte_crypto_cipher_algorithm algo_enum)
428 {
429 	const char *alg_str = NULL;
430 
431 	if ((unsigned int)algo_enum < RTE_DIM(crypto_cipher_algorithm_strings))
432 		alg_str = crypto_cipher_algorithm_strings[algo_enum];
433 
434 	rte_cryptodev_trace_get_cipher_algo_string(algo_enum, alg_str);
435 
436 	return alg_str;
437 }
438 
439 const char *
440 rte_cryptodev_get_auth_algo_string(enum rte_crypto_auth_algorithm algo_enum)
441 {
442 	const char *alg_str = NULL;
443 
444 	if ((unsigned int)algo_enum < RTE_DIM(crypto_auth_algorithm_strings))
445 		alg_str = crypto_auth_algorithm_strings[algo_enum];
446 
447 	rte_cryptodev_trace_get_auth_algo_string(algo_enum, alg_str);
448 
449 	return alg_str;
450 }
451 
452 const char *
453 rte_cryptodev_get_aead_algo_string(enum rte_crypto_aead_algorithm algo_enum)
454 {
455 	const char *alg_str = NULL;
456 
457 	if ((unsigned int)algo_enum < RTE_DIM(crypto_aead_algorithm_strings))
458 		alg_str = crypto_aead_algorithm_strings[algo_enum];
459 
460 	rte_cryptodev_trace_get_aead_algo_string(algo_enum, alg_str);
461 
462 	return alg_str;
463 }
464 
465 const char *
466 rte_cryptodev_asym_get_xform_string(enum rte_crypto_asym_xform_type xform_enum)
467 {
468 	const char *xform_str = NULL;
469 
470 	if ((unsigned int)xform_enum < RTE_DIM(crypto_asym_xform_strings))
471 		xform_str = crypto_asym_xform_strings[xform_enum];
472 
473 	rte_cryptodev_trace_asym_get_xform_string(xform_enum, xform_str);
474 
475 	return xform_str;
476 }
477 
478 /**
479  * The crypto auth operation strings identifiers.
480  * It could be used in application command line.
481  */
482 const char *
483 rte_crypto_auth_operation_strings[] = {
484 		[RTE_CRYPTO_AUTH_OP_VERIFY]	= "verify",
485 		[RTE_CRYPTO_AUTH_OP_GENERATE]	= "generate"
486 };
487 
488 const struct rte_cryptodev_symmetric_capability *
489 rte_cryptodev_sym_capability_get(uint8_t dev_id,
490 		const struct rte_cryptodev_sym_capability_idx *idx)
491 {
492 	const struct rte_cryptodev_capabilities *capability;
493 	const struct rte_cryptodev_symmetric_capability *sym_capability = NULL;
494 	struct rte_cryptodev_info dev_info;
495 	int i = 0;
496 
497 	rte_cryptodev_info_get(dev_id, &dev_info);
498 
499 	while ((capability = &dev_info.capabilities[i++])->op !=
500 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
501 		if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
502 			continue;
503 
504 		if (capability->sym.xform_type != idx->type)
505 			continue;
506 
507 		if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
508 			capability->sym.auth.algo == idx->algo.auth) {
509 			sym_capability = &capability->sym;
510 			break;
511 		}
512 
513 		if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
514 			capability->sym.cipher.algo == idx->algo.cipher) {
515 			sym_capability = &capability->sym;
516 			break;
517 		}
518 
519 		if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
520 				capability->sym.aead.algo == idx->algo.aead) {
521 			sym_capability = &capability->sym;
522 			break;
523 		}
524 	}
525 
526 	rte_cryptodev_trace_sym_capability_get(dev_id, dev_info.driver_name,
527 		dev_info.driver_id, idx->type, sym_capability);
528 
529 	return sym_capability;
530 }
531 
532 static int
533 param_range_check(uint16_t size, const struct rte_crypto_param_range *range)
534 {
535 	unsigned int next_size;
536 
537 	/* Check lower/upper bounds */
538 	if (size < range->min)
539 		return -1;
540 
541 	if (size > range->max)
542 		return -1;
543 
544 	/* If range is actually only one value, size is correct */
545 	if (range->increment == 0)
546 		return 0;
547 
548 	/* Check if value is one of the supported sizes */
549 	for (next_size = range->min; next_size <= range->max;
550 			next_size += range->increment)
551 		if (size == next_size)
552 			return 0;
553 
554 	return -1;
555 }
556 
557 const struct rte_cryptodev_asymmetric_xform_capability *
558 rte_cryptodev_asym_capability_get(uint8_t dev_id,
559 		const struct rte_cryptodev_asym_capability_idx *idx)
560 {
561 	const struct rte_cryptodev_capabilities *capability;
562 	const struct rte_cryptodev_asymmetric_xform_capability *asym_cap = NULL;
563 	struct rte_cryptodev_info dev_info;
564 	unsigned int i = 0;
565 
566 	memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
567 	rte_cryptodev_info_get(dev_id, &dev_info);
568 
569 	while ((capability = &dev_info.capabilities[i++])->op !=
570 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
571 		if (capability->op != RTE_CRYPTO_OP_TYPE_ASYMMETRIC)
572 			continue;
573 
574 		if (capability->asym.xform_capa.xform_type == idx->type) {
575 			asym_cap = &capability->asym.xform_capa;
576 			break;
577 		}
578 	}
579 
580 	rte_cryptodev_trace_asym_capability_get(dev_info.driver_name,
581 		dev_info.driver_id, idx->type, asym_cap);
582 
583 	return asym_cap;
584 };
585 
586 int
587 rte_cryptodev_sym_capability_check_cipher(
588 		const struct rte_cryptodev_symmetric_capability *capability,
589 		uint16_t key_size, uint16_t iv_size)
590 {
591 	int ret = 0; /* success */
592 
593 	if (param_range_check(key_size, &capability->cipher.key_size) != 0) {
594 		ret = -1;
595 		goto done;
596 	}
597 
598 	if (param_range_check(iv_size, &capability->cipher.iv_size) != 0)
599 		ret = -1;
600 
601 done:
602 	rte_cryptodev_trace_sym_capability_check_cipher(capability, key_size,
603 		iv_size, ret);
604 
605 	return ret;
606 }
607 
608 int
609 rte_cryptodev_sym_capability_check_auth(
610 		const struct rte_cryptodev_symmetric_capability *capability,
611 		uint16_t key_size, uint16_t digest_size, uint16_t iv_size)
612 {
613 	int ret = 0; /* success */
614 
615 	if (param_range_check(key_size, &capability->auth.key_size) != 0) {
616 		ret = -1;
617 		goto done;
618 	}
619 
620 	if (param_range_check(digest_size,
621 		&capability->auth.digest_size) != 0) {
622 		ret = -1;
623 		goto done;
624 	}
625 
626 	if (param_range_check(iv_size, &capability->auth.iv_size) != 0)
627 		ret = -1;
628 
629 done:
630 	rte_cryptodev_trace_sym_capability_check_auth(capability, key_size,
631 		digest_size, iv_size, ret);
632 
633 	return ret;
634 }
635 
636 int
637 rte_cryptodev_sym_capability_check_aead(
638 		const struct rte_cryptodev_symmetric_capability *capability,
639 		uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
640 		uint16_t iv_size)
641 {
642 	int ret = 0; /* success */
643 
644 	if (param_range_check(key_size, &capability->aead.key_size) != 0) {
645 		ret = -1;
646 		goto done;
647 	}
648 
649 	if (param_range_check(digest_size,
650 		&capability->aead.digest_size) != 0) {
651 		ret = -1;
652 		goto done;
653 	}
654 
655 	if (param_range_check(aad_size, &capability->aead.aad_size) != 0) {
656 		ret = -1;
657 		goto done;
658 	}
659 
660 	if (param_range_check(iv_size, &capability->aead.iv_size) != 0)
661 		ret = -1;
662 
663 done:
664 	rte_cryptodev_trace_sym_capability_check_aead(capability, key_size,
665 		digest_size, aad_size, iv_size, ret);
666 
667 	return ret;
668 }
669 
670 int
671 rte_cryptodev_asym_xform_capability_check_optype(
672 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
673 	enum rte_crypto_asym_op_type op_type)
674 {
675 	int ret = 0;
676 
677 	if (capability->op_types & (1 << op_type))
678 		ret = 1;
679 
680 	rte_cryptodev_trace_asym_xform_capability_check_optype(
681 		capability->op_types, op_type, ret);
682 
683 	return ret;
684 }
685 
686 int
687 rte_cryptodev_asym_xform_capability_check_modlen(
688 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
689 	uint16_t modlen)
690 {
691 	int ret = 0; /* success */
692 
693 	/* no need to check for limits, if min or max = 0 */
694 	if (capability->modlen.min != 0) {
695 		if (modlen < capability->modlen.min) {
696 			ret = -1;
697 			goto done;
698 		}
699 	}
700 
701 	if (capability->modlen.max != 0) {
702 		if (modlen > capability->modlen.max) {
703 			ret = -1;
704 			goto done;
705 		}
706 	}
707 
708 	/* in any case, check if given modlen is module increment */
709 	if (capability->modlen.increment != 0) {
710 		if (modlen % (capability->modlen.increment))
711 			ret = -1;
712 	}
713 
714 done:
715 	rte_cryptodev_trace_asym_xform_capability_check_modlen(capability,
716 		modlen, ret);
717 
718 	return ret;
719 }
720 
721 /* spinlock for crypto device enq callbacks */
722 static rte_spinlock_t rte_cryptodev_callback_lock = RTE_SPINLOCK_INITIALIZER;
723 
724 static void
725 cryptodev_cb_cleanup(struct rte_cryptodev *dev)
726 {
727 	struct rte_cryptodev_cb_rcu *list;
728 	struct rte_cryptodev_cb *cb, *next;
729 	uint16_t qp_id;
730 
731 	if (dev->enq_cbs == NULL && dev->deq_cbs == NULL)
732 		return;
733 
734 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
735 		list = &dev->enq_cbs[qp_id];
736 		cb = list->next;
737 		while (cb != NULL) {
738 			next = cb->next;
739 			rte_free(cb);
740 			cb = next;
741 		}
742 
743 		rte_free(list->qsbr);
744 	}
745 
746 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
747 		list = &dev->deq_cbs[qp_id];
748 		cb = list->next;
749 		while (cb != NULL) {
750 			next = cb->next;
751 			rte_free(cb);
752 			cb = next;
753 		}
754 
755 		rte_free(list->qsbr);
756 	}
757 
758 	rte_free(dev->enq_cbs);
759 	dev->enq_cbs = NULL;
760 	rte_free(dev->deq_cbs);
761 	dev->deq_cbs = NULL;
762 }
763 
764 static int
765 cryptodev_cb_init(struct rte_cryptodev *dev)
766 {
767 	struct rte_cryptodev_cb_rcu *list;
768 	struct rte_rcu_qsbr *qsbr;
769 	uint16_t qp_id;
770 	size_t size;
771 
772 	/* Max thread set to 1, as one DP thread accessing a queue-pair */
773 	const uint32_t max_threads = 1;
774 
775 	dev->enq_cbs = rte_zmalloc(NULL,
776 				   sizeof(struct rte_cryptodev_cb_rcu) *
777 				   dev->data->nb_queue_pairs, 0);
778 	if (dev->enq_cbs == NULL) {
779 		CDEV_LOG_ERR("Failed to allocate memory for enq callbacks");
780 		return -ENOMEM;
781 	}
782 
783 	dev->deq_cbs = rte_zmalloc(NULL,
784 				   sizeof(struct rte_cryptodev_cb_rcu) *
785 				   dev->data->nb_queue_pairs, 0);
786 	if (dev->deq_cbs == NULL) {
787 		CDEV_LOG_ERR("Failed to allocate memory for deq callbacks");
788 		rte_free(dev->enq_cbs);
789 		return -ENOMEM;
790 	}
791 
792 	/* Create RCU QSBR variable */
793 	size = rte_rcu_qsbr_get_memsize(max_threads);
794 
795 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
796 		list = &dev->enq_cbs[qp_id];
797 		qsbr = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
798 		if (qsbr == NULL) {
799 			CDEV_LOG_ERR("Failed to allocate memory for RCU on "
800 				"queue_pair_id=%d", qp_id);
801 			goto cb_init_err;
802 		}
803 
804 		if (rte_rcu_qsbr_init(qsbr, max_threads)) {
805 			CDEV_LOG_ERR("Failed to initialize for RCU on "
806 				"queue_pair_id=%d", qp_id);
807 			goto cb_init_err;
808 		}
809 
810 		list->qsbr = qsbr;
811 	}
812 
813 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
814 		list = &dev->deq_cbs[qp_id];
815 		qsbr = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
816 		if (qsbr == NULL) {
817 			CDEV_LOG_ERR("Failed to allocate memory for RCU on "
818 				"queue_pair_id=%d", qp_id);
819 			goto cb_init_err;
820 		}
821 
822 		if (rte_rcu_qsbr_init(qsbr, max_threads)) {
823 			CDEV_LOG_ERR("Failed to initialize for RCU on "
824 				"queue_pair_id=%d", qp_id);
825 			goto cb_init_err;
826 		}
827 
828 		list->qsbr = qsbr;
829 	}
830 
831 	return 0;
832 
833 cb_init_err:
834 	cryptodev_cb_cleanup(dev);
835 	return -ENOMEM;
836 }
837 
838 const char *
839 rte_cryptodev_get_feature_name(uint64_t flag)
840 {
841 	rte_cryptodev_trace_get_feature_name(flag);
842 
843 	switch (flag) {
844 	case RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO:
845 		return "SYMMETRIC_CRYPTO";
846 	case RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO:
847 		return "ASYMMETRIC_CRYPTO";
848 	case RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING:
849 		return "SYM_OPERATION_CHAINING";
850 	case RTE_CRYPTODEV_FF_CPU_SSE:
851 		return "CPU_SSE";
852 	case RTE_CRYPTODEV_FF_CPU_AVX:
853 		return "CPU_AVX";
854 	case RTE_CRYPTODEV_FF_CPU_AVX2:
855 		return "CPU_AVX2";
856 	case RTE_CRYPTODEV_FF_CPU_AVX512:
857 		return "CPU_AVX512";
858 	case RTE_CRYPTODEV_FF_CPU_AESNI:
859 		return "CPU_AESNI";
860 	case RTE_CRYPTODEV_FF_HW_ACCELERATED:
861 		return "HW_ACCELERATED";
862 	case RTE_CRYPTODEV_FF_IN_PLACE_SGL:
863 		return "IN_PLACE_SGL";
864 	case RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT:
865 		return "OOP_SGL_IN_SGL_OUT";
866 	case RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT:
867 		return "OOP_SGL_IN_LB_OUT";
868 	case RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT:
869 		return "OOP_LB_IN_SGL_OUT";
870 	case RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT:
871 		return "OOP_LB_IN_LB_OUT";
872 	case RTE_CRYPTODEV_FF_CPU_NEON:
873 		return "CPU_NEON";
874 	case RTE_CRYPTODEV_FF_CPU_ARM_CE:
875 		return "CPU_ARM_CE";
876 	case RTE_CRYPTODEV_FF_SECURITY:
877 		return "SECURITY_PROTOCOL";
878 	case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP:
879 		return "RSA_PRIV_OP_KEY_EXP";
880 	case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT:
881 		return "RSA_PRIV_OP_KEY_QT";
882 	case RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED:
883 		return "DIGEST_ENCRYPTED";
884 	case RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO:
885 		return "SYM_CPU_CRYPTO";
886 	case RTE_CRYPTODEV_FF_ASYM_SESSIONLESS:
887 		return "ASYM_SESSIONLESS";
888 	case RTE_CRYPTODEV_FF_SYM_SESSIONLESS:
889 		return "SYM_SESSIONLESS";
890 	case RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA:
891 		return "NON_BYTE_ALIGNED_DATA";
892 	case RTE_CRYPTODEV_FF_CIPHER_MULTIPLE_DATA_UNITS:
893 		return "CIPHER_MULTIPLE_DATA_UNITS";
894 	case RTE_CRYPTODEV_FF_CIPHER_WRAPPED_KEY:
895 		return "CIPHER_WRAPPED_KEY";
896 	default:
897 		return NULL;
898 	}
899 }
900 
901 struct rte_cryptodev *
902 rte_cryptodev_pmd_get_dev(uint8_t dev_id)
903 {
904 	return &cryptodev_globals.devs[dev_id];
905 }
906 
907 struct rte_cryptodev *
908 rte_cryptodev_pmd_get_named_dev(const char *name)
909 {
910 	struct rte_cryptodev *dev;
911 	unsigned int i;
912 
913 	if (name == NULL)
914 		return NULL;
915 
916 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
917 		dev = &cryptodev_globals.devs[i];
918 
919 		if ((dev->attached == RTE_CRYPTODEV_ATTACHED) &&
920 				(strcmp(dev->data->name, name) == 0))
921 			return dev;
922 	}
923 
924 	return NULL;
925 }
926 
927 static inline uint8_t
928 rte_cryptodev_is_valid_device_data(uint8_t dev_id)
929 {
930 	if (dev_id >= RTE_CRYPTO_MAX_DEVS ||
931 			rte_crypto_devices[dev_id].data == NULL)
932 		return 0;
933 
934 	return 1;
935 }
936 
937 unsigned int
938 rte_cryptodev_is_valid_dev(uint8_t dev_id)
939 {
940 	struct rte_cryptodev *dev = NULL;
941 	unsigned int ret = 1;
942 
943 	if (!rte_cryptodev_is_valid_device_data(dev_id)) {
944 		ret = 0;
945 		goto done;
946 	}
947 
948 	dev = rte_cryptodev_pmd_get_dev(dev_id);
949 	if (dev->attached != RTE_CRYPTODEV_ATTACHED)
950 		ret = 0;
951 
952 done:
953 	rte_cryptodev_trace_is_valid_dev(dev_id, ret);
954 
955 	return ret;
956 }
957 
958 int
959 rte_cryptodev_get_dev_id(const char *name)
960 {
961 	unsigned i;
962 	int ret = -1;
963 
964 	if (name == NULL)
965 		return -1;
966 
967 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
968 		if (!rte_cryptodev_is_valid_device_data(i))
969 			continue;
970 		if ((strcmp(cryptodev_globals.devs[i].data->name, name)
971 				== 0) &&
972 				(cryptodev_globals.devs[i].attached ==
973 						RTE_CRYPTODEV_ATTACHED)) {
974 			ret = (int)i;
975 			break;
976 		}
977 	}
978 
979 	rte_cryptodev_trace_get_dev_id(name, ret);
980 
981 	return ret;
982 }
983 
984 uint8_t
985 rte_cryptodev_count(void)
986 {
987 	rte_cryptodev_trace_count(cryptodev_globals.nb_devs);
988 
989 	return cryptodev_globals.nb_devs;
990 }
991 
992 uint8_t
993 rte_cryptodev_device_count_by_driver(uint8_t driver_id)
994 {
995 	uint8_t i, dev_count = 0;
996 
997 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++)
998 		if (cryptodev_globals.devs[i].driver_id == driver_id &&
999 			cryptodev_globals.devs[i].attached ==
1000 					RTE_CRYPTODEV_ATTACHED)
1001 			dev_count++;
1002 
1003 	rte_cryptodev_trace_device_count_by_driver(driver_id, dev_count);
1004 
1005 	return dev_count;
1006 }
1007 
1008 uint8_t
1009 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
1010 	uint8_t nb_devices)
1011 {
1012 	uint8_t i, count = 0;
1013 	struct rte_cryptodev *devs = cryptodev_globals.devs;
1014 
1015 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS && count < nb_devices; i++) {
1016 		if (!rte_cryptodev_is_valid_device_data(i))
1017 			continue;
1018 
1019 		if (devs[i].attached == RTE_CRYPTODEV_ATTACHED) {
1020 			int cmp;
1021 
1022 			cmp = strncmp(devs[i].device->driver->name,
1023 					driver_name,
1024 					strlen(driver_name) + 1);
1025 
1026 			if (cmp == 0)
1027 				devices[count++] = devs[i].data->dev_id;
1028 		}
1029 	}
1030 
1031 	rte_cryptodev_trace_devices_get(driver_name, count);
1032 
1033 	return count;
1034 }
1035 
1036 void *
1037 rte_cryptodev_get_sec_ctx(uint8_t dev_id)
1038 {
1039 	void *sec_ctx = NULL;
1040 
1041 	if (dev_id < RTE_CRYPTO_MAX_DEVS &&
1042 			(rte_crypto_devices[dev_id].feature_flags &
1043 			RTE_CRYPTODEV_FF_SECURITY))
1044 		sec_ctx = rte_crypto_devices[dev_id].security_ctx;
1045 
1046 	rte_cryptodev_trace_get_sec_ctx(dev_id, sec_ctx);
1047 
1048 	return sec_ctx;
1049 }
1050 
1051 int
1052 rte_cryptodev_socket_id(uint8_t dev_id)
1053 {
1054 	struct rte_cryptodev *dev;
1055 
1056 	if (!rte_cryptodev_is_valid_dev(dev_id))
1057 		return -1;
1058 
1059 	dev = rte_cryptodev_pmd_get_dev(dev_id);
1060 
1061 	rte_cryptodev_trace_socket_id(dev_id, dev->data->name,
1062 		dev->data->socket_id);
1063 	return dev->data->socket_id;
1064 }
1065 
1066 static inline int
1067 rte_cryptodev_data_alloc(uint8_t dev_id, struct rte_cryptodev_data **data,
1068 		int socket_id)
1069 {
1070 	char mz_name[RTE_MEMZONE_NAMESIZE];
1071 	const struct rte_memzone *mz;
1072 	int n;
1073 
1074 	/* generate memzone name */
1075 	n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
1076 	if (n >= (int)sizeof(mz_name))
1077 		return -EINVAL;
1078 
1079 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1080 		mz = rte_memzone_reserve(mz_name,
1081 				sizeof(struct rte_cryptodev_data),
1082 				socket_id, 0);
1083 		CDEV_LOG_DEBUG("PRIMARY:reserved memzone for %s (%p)",
1084 				mz_name, mz);
1085 	} else {
1086 		mz = rte_memzone_lookup(mz_name);
1087 		CDEV_LOG_DEBUG("SECONDARY:looked up memzone for %s (%p)",
1088 				mz_name, mz);
1089 	}
1090 
1091 	if (mz == NULL)
1092 		return -ENOMEM;
1093 
1094 	*data = mz->addr;
1095 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1096 		memset(*data, 0, sizeof(struct rte_cryptodev_data));
1097 
1098 	return 0;
1099 }
1100 
1101 static inline int
1102 rte_cryptodev_data_free(uint8_t dev_id, struct rte_cryptodev_data **data)
1103 {
1104 	char mz_name[RTE_MEMZONE_NAMESIZE];
1105 	const struct rte_memzone *mz;
1106 	int n;
1107 
1108 	/* generate memzone name */
1109 	n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
1110 	if (n >= (int)sizeof(mz_name))
1111 		return -EINVAL;
1112 
1113 	mz = rte_memzone_lookup(mz_name);
1114 	if (mz == NULL)
1115 		return -ENOMEM;
1116 
1117 	RTE_ASSERT(*data == mz->addr);
1118 	*data = NULL;
1119 
1120 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1121 		CDEV_LOG_DEBUG("PRIMARY:free memzone of %s (%p)",
1122 				mz_name, mz);
1123 		return rte_memzone_free(mz);
1124 	} else {
1125 		CDEV_LOG_DEBUG("SECONDARY:don't free memzone of %s (%p)",
1126 				mz_name, mz);
1127 	}
1128 
1129 	return 0;
1130 }
1131 
1132 static uint8_t
1133 rte_cryptodev_find_free_device_index(void)
1134 {
1135 	uint8_t dev_id;
1136 
1137 	for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++) {
1138 		if (rte_crypto_devices[dev_id].attached ==
1139 				RTE_CRYPTODEV_DETACHED)
1140 			return dev_id;
1141 	}
1142 	return RTE_CRYPTO_MAX_DEVS;
1143 }
1144 
1145 struct rte_cryptodev *
1146 rte_cryptodev_pmd_allocate(const char *name, int socket_id)
1147 {
1148 	struct rte_cryptodev *cryptodev;
1149 	uint8_t dev_id;
1150 
1151 	if (rte_cryptodev_pmd_get_named_dev(name) != NULL) {
1152 		CDEV_LOG_ERR("Crypto device with name %s already "
1153 				"allocated!", name);
1154 		return NULL;
1155 	}
1156 
1157 	dev_id = rte_cryptodev_find_free_device_index();
1158 	if (dev_id == RTE_CRYPTO_MAX_DEVS) {
1159 		CDEV_LOG_ERR("Reached maximum number of crypto devices");
1160 		return NULL;
1161 	}
1162 
1163 	cryptodev = rte_cryptodev_pmd_get_dev(dev_id);
1164 
1165 	if (cryptodev->data == NULL) {
1166 		struct rte_cryptodev_data **cryptodev_data =
1167 				&cryptodev_globals.data[dev_id];
1168 
1169 		int retval = rte_cryptodev_data_alloc(dev_id, cryptodev_data,
1170 				socket_id);
1171 
1172 		if (retval < 0 || *cryptodev_data == NULL)
1173 			return NULL;
1174 
1175 		cryptodev->data = *cryptodev_data;
1176 
1177 		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1178 			strlcpy(cryptodev->data->name, name,
1179 				RTE_CRYPTODEV_NAME_MAX_LEN);
1180 
1181 			cryptodev->data->dev_id = dev_id;
1182 			cryptodev->data->socket_id = socket_id;
1183 			cryptodev->data->dev_started = 0;
1184 			CDEV_LOG_DEBUG("PRIMARY:init data");
1185 		}
1186 
1187 		CDEV_LOG_DEBUG("Data for %s: dev_id %d, socket %d, started %d",
1188 				cryptodev->data->name,
1189 				cryptodev->data->dev_id,
1190 				cryptodev->data->socket_id,
1191 				cryptodev->data->dev_started);
1192 
1193 		/* init user callbacks */
1194 		TAILQ_INIT(&(cryptodev->link_intr_cbs));
1195 
1196 		cryptodev->attached = RTE_CRYPTODEV_ATTACHED;
1197 
1198 		cryptodev_globals.nb_devs++;
1199 	}
1200 
1201 	return cryptodev;
1202 }
1203 
1204 int
1205 rte_cryptodev_pmd_release_device(struct rte_cryptodev *cryptodev)
1206 {
1207 	int ret;
1208 	uint8_t dev_id;
1209 
1210 	if (cryptodev == NULL)
1211 		return -EINVAL;
1212 
1213 	dev_id = cryptodev->data->dev_id;
1214 
1215 	cryptodev_fp_ops_reset(rte_crypto_fp_ops + dev_id);
1216 
1217 	/* Close device only if device operations have been set */
1218 	if (cryptodev->dev_ops) {
1219 		ret = rte_cryptodev_close(dev_id);
1220 		if (ret < 0)
1221 			return ret;
1222 	}
1223 
1224 	ret = rte_cryptodev_data_free(dev_id, &cryptodev_globals.data[dev_id]);
1225 	if (ret < 0)
1226 		return ret;
1227 
1228 	cryptodev->attached = RTE_CRYPTODEV_DETACHED;
1229 	cryptodev_globals.nb_devs--;
1230 	return 0;
1231 }
1232 
1233 uint16_t
1234 rte_cryptodev_queue_pair_count(uint8_t dev_id)
1235 {
1236 	struct rte_cryptodev *dev;
1237 
1238 	if (!rte_cryptodev_is_valid_device_data(dev_id)) {
1239 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1240 		return 0;
1241 	}
1242 
1243 	dev = &rte_crypto_devices[dev_id];
1244 	rte_cryptodev_trace_queue_pair_count(dev, dev->data->name,
1245 		dev->data->socket_id, dev->data->dev_id,
1246 		dev->data->nb_queue_pairs);
1247 
1248 	return dev->data->nb_queue_pairs;
1249 }
1250 
1251 static int
1252 rte_cryptodev_queue_pairs_config(struct rte_cryptodev *dev, uint16_t nb_qpairs,
1253 		int socket_id)
1254 {
1255 	struct rte_cryptodev_info dev_info;
1256 	void **qp;
1257 	unsigned i;
1258 
1259 	if ((dev == NULL) || (nb_qpairs < 1)) {
1260 		CDEV_LOG_ERR("invalid param: dev %p, nb_queues %u",
1261 							dev, nb_qpairs);
1262 		return -EINVAL;
1263 	}
1264 
1265 	CDEV_LOG_DEBUG("Setup %d queues pairs on device %u",
1266 			nb_qpairs, dev->data->dev_id);
1267 
1268 	memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
1269 
1270 	if (*dev->dev_ops->dev_infos_get == NULL)
1271 		return -ENOTSUP;
1272 	(*dev->dev_ops->dev_infos_get)(dev, &dev_info);
1273 
1274 	if (nb_qpairs > (dev_info.max_nb_queue_pairs)) {
1275 		CDEV_LOG_ERR("Invalid num queue_pairs (%u) for dev %u",
1276 				nb_qpairs, dev->data->dev_id);
1277 	    return -EINVAL;
1278 	}
1279 
1280 	if (dev->data->queue_pairs == NULL) { /* first time configuration */
1281 		dev->data->queue_pairs = rte_zmalloc_socket(
1282 				"cryptodev->queue_pairs",
1283 				sizeof(dev->data->queue_pairs[0]) *
1284 				dev_info.max_nb_queue_pairs,
1285 				RTE_CACHE_LINE_SIZE, socket_id);
1286 
1287 		if (dev->data->queue_pairs == NULL) {
1288 			dev->data->nb_queue_pairs = 0;
1289 			CDEV_LOG_ERR("failed to get memory for qp meta data, "
1290 							"nb_queues %u",
1291 							nb_qpairs);
1292 			return -(ENOMEM);
1293 		}
1294 	} else { /* re-configure */
1295 		int ret;
1296 		uint16_t old_nb_queues = dev->data->nb_queue_pairs;
1297 
1298 		qp = dev->data->queue_pairs;
1299 
1300 		if (*dev->dev_ops->queue_pair_release == NULL)
1301 			return -ENOTSUP;
1302 
1303 		for (i = nb_qpairs; i < old_nb_queues; i++) {
1304 			ret = (*dev->dev_ops->queue_pair_release)(dev, i);
1305 			if (ret < 0)
1306 				return ret;
1307 			qp[i] = NULL;
1308 		}
1309 
1310 	}
1311 	dev->data->nb_queue_pairs = nb_qpairs;
1312 	return 0;
1313 }
1314 
1315 int
1316 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
1317 {
1318 	struct rte_cryptodev *dev;
1319 	int diag;
1320 
1321 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1322 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1323 		return -EINVAL;
1324 	}
1325 
1326 	dev = &rte_crypto_devices[dev_id];
1327 
1328 	if (dev->data->dev_started) {
1329 		CDEV_LOG_ERR(
1330 		    "device %d must be stopped to allow configuration", dev_id);
1331 		return -EBUSY;
1332 	}
1333 
1334 	if (*dev->dev_ops->dev_configure == NULL)
1335 		return -ENOTSUP;
1336 
1337 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1338 	cryptodev_cb_cleanup(dev);
1339 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1340 
1341 	/* Setup new number of queue pairs and reconfigure device. */
1342 	diag = rte_cryptodev_queue_pairs_config(dev, config->nb_queue_pairs,
1343 			config->socket_id);
1344 	if (diag != 0) {
1345 		CDEV_LOG_ERR("dev%d rte_crypto_dev_queue_pairs_config = %d",
1346 				dev_id, diag);
1347 		return diag;
1348 	}
1349 
1350 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1351 	diag = cryptodev_cb_init(dev);
1352 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1353 	if (diag) {
1354 		CDEV_LOG_ERR("Callback init failed for dev_id=%d", dev_id);
1355 		return diag;
1356 	}
1357 
1358 	rte_cryptodev_trace_configure(dev_id, config);
1359 	return (*dev->dev_ops->dev_configure)(dev, config);
1360 }
1361 
1362 int
1363 rte_cryptodev_start(uint8_t dev_id)
1364 {
1365 	struct rte_cryptodev *dev;
1366 	int diag;
1367 
1368 	CDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
1369 
1370 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1371 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1372 		return -EINVAL;
1373 	}
1374 
1375 	dev = &rte_crypto_devices[dev_id];
1376 
1377 	if (*dev->dev_ops->dev_start == NULL)
1378 		return -ENOTSUP;
1379 
1380 	if (dev->data->dev_started != 0) {
1381 		CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already started",
1382 			dev_id);
1383 		return 0;
1384 	}
1385 
1386 	diag = (*dev->dev_ops->dev_start)(dev);
1387 	/* expose selection of PMD fast-path functions */
1388 	cryptodev_fp_ops_set(rte_crypto_fp_ops + dev_id, dev);
1389 
1390 	rte_cryptodev_trace_start(dev_id, diag);
1391 	if (diag == 0)
1392 		dev->data->dev_started = 1;
1393 	else
1394 		return diag;
1395 
1396 	return 0;
1397 }
1398 
1399 void
1400 rte_cryptodev_stop(uint8_t dev_id)
1401 {
1402 	struct rte_cryptodev *dev;
1403 
1404 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1405 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1406 		return;
1407 	}
1408 
1409 	dev = &rte_crypto_devices[dev_id];
1410 
1411 	if (*dev->dev_ops->dev_stop == NULL)
1412 		return;
1413 
1414 	if (dev->data->dev_started == 0) {
1415 		CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already stopped",
1416 			dev_id);
1417 		return;
1418 	}
1419 
1420 	/* point fast-path functions to dummy ones */
1421 	cryptodev_fp_ops_reset(rte_crypto_fp_ops + dev_id);
1422 
1423 	(*dev->dev_ops->dev_stop)(dev);
1424 	rte_cryptodev_trace_stop(dev_id);
1425 	dev->data->dev_started = 0;
1426 }
1427 
1428 int
1429 rte_cryptodev_close(uint8_t dev_id)
1430 {
1431 	struct rte_cryptodev *dev;
1432 	int retval;
1433 
1434 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1435 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1436 		return -1;
1437 	}
1438 
1439 	dev = &rte_crypto_devices[dev_id];
1440 
1441 	/* Device must be stopped before it can be closed */
1442 	if (dev->data->dev_started == 1) {
1443 		CDEV_LOG_ERR("Device %u must be stopped before closing",
1444 				dev_id);
1445 		return -EBUSY;
1446 	}
1447 
1448 	/* We can't close the device if there are outstanding sessions in use */
1449 	if (dev->data->session_pool != NULL) {
1450 		if (!rte_mempool_full(dev->data->session_pool)) {
1451 			CDEV_LOG_ERR("dev_id=%u close failed, session mempool "
1452 					"has sessions still in use, free "
1453 					"all sessions before calling close",
1454 					(unsigned)dev_id);
1455 			return -EBUSY;
1456 		}
1457 	}
1458 
1459 	if (*dev->dev_ops->dev_close == NULL)
1460 		return -ENOTSUP;
1461 	retval = (*dev->dev_ops->dev_close)(dev);
1462 	rte_cryptodev_trace_close(dev_id, retval);
1463 
1464 	if (retval < 0)
1465 		return retval;
1466 
1467 	return 0;
1468 }
1469 
1470 int
1471 rte_cryptodev_get_qp_status(uint8_t dev_id, uint16_t queue_pair_id)
1472 {
1473 	struct rte_cryptodev *dev;
1474 	int ret = 0;
1475 
1476 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1477 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1478 		ret = -EINVAL;
1479 		goto done;
1480 	}
1481 
1482 	dev = &rte_crypto_devices[dev_id];
1483 	if (queue_pair_id >= dev->data->nb_queue_pairs) {
1484 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
1485 		ret = -EINVAL;
1486 		goto done;
1487 	}
1488 	void **qps = dev->data->queue_pairs;
1489 
1490 	if (qps[queue_pair_id])	{
1491 		CDEV_LOG_DEBUG("qp %d on dev %d is initialised",
1492 			queue_pair_id, dev_id);
1493 		ret = 1;
1494 		goto done;
1495 	}
1496 
1497 	CDEV_LOG_DEBUG("qp %d on dev %d is not initialised",
1498 		queue_pair_id, dev_id);
1499 
1500 done:
1501 	rte_cryptodev_trace_get_qp_status(dev_id, queue_pair_id, ret);
1502 
1503 	return ret;
1504 }
1505 
1506 static uint8_t
1507 rte_cryptodev_sym_is_valid_session_pool(struct rte_mempool *mp,
1508 	uint32_t sess_priv_size)
1509 {
1510 	struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1511 
1512 	if (!mp)
1513 		return 0;
1514 
1515 	pool_priv = rte_mempool_get_priv(mp);
1516 
1517 	if (!pool_priv || mp->private_data_size < sizeof(*pool_priv) ||
1518 			pool_priv->sess_data_sz < sess_priv_size)
1519 		return 0;
1520 
1521 	return 1;
1522 }
1523 
1524 int
1525 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
1526 		const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
1527 
1528 {
1529 	struct rte_cryptodev *dev;
1530 
1531 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1532 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1533 		return -EINVAL;
1534 	}
1535 
1536 	dev = &rte_crypto_devices[dev_id];
1537 	if (queue_pair_id >= dev->data->nb_queue_pairs) {
1538 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
1539 		return -EINVAL;
1540 	}
1541 
1542 	if (!qp_conf) {
1543 		CDEV_LOG_ERR("qp_conf cannot be NULL");
1544 		return -EINVAL;
1545 	}
1546 
1547 	if (qp_conf->mp_session) {
1548 		struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1549 
1550 		pool_priv = rte_mempool_get_priv(qp_conf->mp_session);
1551 		if (!pool_priv || qp_conf->mp_session->private_data_size <
1552 				sizeof(*pool_priv)) {
1553 			CDEV_LOG_ERR("Invalid mempool");
1554 			return -EINVAL;
1555 		}
1556 
1557 		if (!rte_cryptodev_sym_is_valid_session_pool(qp_conf->mp_session,
1558 					rte_cryptodev_sym_get_private_session_size(dev_id))) {
1559 			CDEV_LOG_ERR("Invalid mempool");
1560 			return -EINVAL;
1561 		}
1562 	}
1563 
1564 	if (dev->data->dev_started) {
1565 		CDEV_LOG_ERR(
1566 		    "device %d must be stopped to allow configuration", dev_id);
1567 		return -EBUSY;
1568 	}
1569 
1570 	if (*dev->dev_ops->queue_pair_setup == NULL)
1571 		return -ENOTSUP;
1572 
1573 	rte_cryptodev_trace_queue_pair_setup(dev_id, queue_pair_id, qp_conf);
1574 	return (*dev->dev_ops->queue_pair_setup)(dev, queue_pair_id, qp_conf,
1575 			socket_id);
1576 }
1577 
1578 struct rte_cryptodev_cb *
1579 rte_cryptodev_add_enq_callback(uint8_t dev_id,
1580 			       uint16_t qp_id,
1581 			       rte_cryptodev_callback_fn cb_fn,
1582 			       void *cb_arg)
1583 {
1584 	struct rte_cryptodev *dev;
1585 	struct rte_cryptodev_cb_rcu *list;
1586 	struct rte_cryptodev_cb *cb, *tail;
1587 
1588 	if (!cb_fn) {
1589 		CDEV_LOG_ERR("Callback is NULL on dev_id=%d", dev_id);
1590 		rte_errno = EINVAL;
1591 		return NULL;
1592 	}
1593 
1594 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1595 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1596 		rte_errno = ENODEV;
1597 		return NULL;
1598 	}
1599 
1600 	dev = &rte_crypto_devices[dev_id];
1601 	if (qp_id >= dev->data->nb_queue_pairs) {
1602 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1603 		rte_errno = ENODEV;
1604 		return NULL;
1605 	}
1606 
1607 	cb = rte_zmalloc(NULL, sizeof(*cb), 0);
1608 	if (cb == NULL) {
1609 		CDEV_LOG_ERR("Failed to allocate memory for callback on "
1610 			     "dev=%d, queue_pair_id=%d", dev_id, qp_id);
1611 		rte_errno = ENOMEM;
1612 		return NULL;
1613 	}
1614 
1615 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1616 
1617 	cb->fn = cb_fn;
1618 	cb->arg = cb_arg;
1619 
1620 	/* Add the callbacks in fifo order. */
1621 	list = &dev->enq_cbs[qp_id];
1622 	tail = list->next;
1623 
1624 	if (tail) {
1625 		while (tail->next)
1626 			tail = tail->next;
1627 		/* Stores to cb->fn and cb->param should complete before
1628 		 * cb is visible to data plane.
1629 		 */
1630 		__atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
1631 	} else {
1632 		/* Stores to cb->fn and cb->param should complete before
1633 		 * cb is visible to data plane.
1634 		 */
1635 		__atomic_store_n(&list->next, cb, __ATOMIC_RELEASE);
1636 	}
1637 
1638 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1639 
1640 	rte_cryptodev_trace_add_enq_callback(dev_id, qp_id, cb_fn);
1641 	return cb;
1642 }
1643 
1644 int
1645 rte_cryptodev_remove_enq_callback(uint8_t dev_id,
1646 				  uint16_t qp_id,
1647 				  struct rte_cryptodev_cb *cb)
1648 {
1649 	struct rte_cryptodev *dev;
1650 	struct rte_cryptodev_cb **prev_cb, *curr_cb;
1651 	struct rte_cryptodev_cb_rcu *list;
1652 	int ret;
1653 
1654 	ret = -EINVAL;
1655 
1656 	if (!cb) {
1657 		CDEV_LOG_ERR("Callback is NULL");
1658 		return -EINVAL;
1659 	}
1660 
1661 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1662 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1663 		return -ENODEV;
1664 	}
1665 
1666 	rte_cryptodev_trace_remove_enq_callback(dev_id, qp_id, cb->fn);
1667 
1668 	dev = &rte_crypto_devices[dev_id];
1669 	if (qp_id >= dev->data->nb_queue_pairs) {
1670 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1671 		return -ENODEV;
1672 	}
1673 
1674 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1675 	if (dev->enq_cbs == NULL) {
1676 		CDEV_LOG_ERR("Callback not initialized");
1677 		goto cb_err;
1678 	}
1679 
1680 	list = &dev->enq_cbs[qp_id];
1681 	if (list == NULL) {
1682 		CDEV_LOG_ERR("Callback list is NULL");
1683 		goto cb_err;
1684 	}
1685 
1686 	if (list->qsbr == NULL) {
1687 		CDEV_LOG_ERR("Rcu qsbr is NULL");
1688 		goto cb_err;
1689 	}
1690 
1691 	prev_cb = &list->next;
1692 	for (; *prev_cb != NULL; prev_cb = &curr_cb->next) {
1693 		curr_cb = *prev_cb;
1694 		if (curr_cb == cb) {
1695 			/* Remove the user cb from the callback list. */
1696 			__atomic_store_n(prev_cb, curr_cb->next,
1697 				__ATOMIC_RELAXED);
1698 			ret = 0;
1699 			break;
1700 		}
1701 	}
1702 
1703 	if (!ret) {
1704 		/* Call sync with invalid thread id as this is part of
1705 		 * control plane API
1706 		 */
1707 		rte_rcu_qsbr_synchronize(list->qsbr, RTE_QSBR_THRID_INVALID);
1708 		rte_free(cb);
1709 	}
1710 
1711 cb_err:
1712 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1713 	return ret;
1714 }
1715 
1716 struct rte_cryptodev_cb *
1717 rte_cryptodev_add_deq_callback(uint8_t dev_id,
1718 			       uint16_t qp_id,
1719 			       rte_cryptodev_callback_fn cb_fn,
1720 			       void *cb_arg)
1721 {
1722 	struct rte_cryptodev *dev;
1723 	struct rte_cryptodev_cb_rcu *list;
1724 	struct rte_cryptodev_cb *cb, *tail;
1725 
1726 	if (!cb_fn) {
1727 		CDEV_LOG_ERR("Callback is NULL on dev_id=%d", dev_id);
1728 		rte_errno = EINVAL;
1729 		return NULL;
1730 	}
1731 
1732 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1733 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1734 		rte_errno = ENODEV;
1735 		return NULL;
1736 	}
1737 
1738 	dev = &rte_crypto_devices[dev_id];
1739 	if (qp_id >= dev->data->nb_queue_pairs) {
1740 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1741 		rte_errno = ENODEV;
1742 		return NULL;
1743 	}
1744 
1745 	cb = rte_zmalloc(NULL, sizeof(*cb), 0);
1746 	if (cb == NULL) {
1747 		CDEV_LOG_ERR("Failed to allocate memory for callback on "
1748 			     "dev=%d, queue_pair_id=%d", dev_id, qp_id);
1749 		rte_errno = ENOMEM;
1750 		return NULL;
1751 	}
1752 
1753 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1754 
1755 	cb->fn = cb_fn;
1756 	cb->arg = cb_arg;
1757 
1758 	/* Add the callbacks in fifo order. */
1759 	list = &dev->deq_cbs[qp_id];
1760 	tail = list->next;
1761 
1762 	if (tail) {
1763 		while (tail->next)
1764 			tail = tail->next;
1765 		/* Stores to cb->fn and cb->param should complete before
1766 		 * cb is visible to data plane.
1767 		 */
1768 		__atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
1769 	} else {
1770 		/* Stores to cb->fn and cb->param should complete before
1771 		 * cb is visible to data plane.
1772 		 */
1773 		__atomic_store_n(&list->next, cb, __ATOMIC_RELEASE);
1774 	}
1775 
1776 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1777 
1778 	rte_cryptodev_trace_add_deq_callback(dev_id, qp_id, cb_fn);
1779 
1780 	return cb;
1781 }
1782 
1783 int
1784 rte_cryptodev_remove_deq_callback(uint8_t dev_id,
1785 				  uint16_t qp_id,
1786 				  struct rte_cryptodev_cb *cb)
1787 {
1788 	struct rte_cryptodev *dev;
1789 	struct rte_cryptodev_cb **prev_cb, *curr_cb;
1790 	struct rte_cryptodev_cb_rcu *list;
1791 	int ret;
1792 
1793 	ret = -EINVAL;
1794 
1795 	if (!cb) {
1796 		CDEV_LOG_ERR("Callback is NULL");
1797 		return -EINVAL;
1798 	}
1799 
1800 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1801 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1802 		return -ENODEV;
1803 	}
1804 
1805 	rte_cryptodev_trace_remove_deq_callback(dev_id, qp_id, cb->fn);
1806 
1807 	dev = &rte_crypto_devices[dev_id];
1808 	if (qp_id >= dev->data->nb_queue_pairs) {
1809 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1810 		return -ENODEV;
1811 	}
1812 
1813 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1814 	if (dev->enq_cbs == NULL) {
1815 		CDEV_LOG_ERR("Callback not initialized");
1816 		goto cb_err;
1817 	}
1818 
1819 	list = &dev->deq_cbs[qp_id];
1820 	if (list == NULL) {
1821 		CDEV_LOG_ERR("Callback list is NULL");
1822 		goto cb_err;
1823 	}
1824 
1825 	if (list->qsbr == NULL) {
1826 		CDEV_LOG_ERR("Rcu qsbr is NULL");
1827 		goto cb_err;
1828 	}
1829 
1830 	prev_cb = &list->next;
1831 	for (; *prev_cb != NULL; prev_cb = &curr_cb->next) {
1832 		curr_cb = *prev_cb;
1833 		if (curr_cb == cb) {
1834 			/* Remove the user cb from the callback list. */
1835 			__atomic_store_n(prev_cb, curr_cb->next,
1836 				__ATOMIC_RELAXED);
1837 			ret = 0;
1838 			break;
1839 		}
1840 	}
1841 
1842 	if (!ret) {
1843 		/* Call sync with invalid thread id as this is part of
1844 		 * control plane API
1845 		 */
1846 		rte_rcu_qsbr_synchronize(list->qsbr, RTE_QSBR_THRID_INVALID);
1847 		rte_free(cb);
1848 	}
1849 
1850 cb_err:
1851 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1852 	return ret;
1853 }
1854 
1855 int
1856 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats)
1857 {
1858 	struct rte_cryptodev *dev;
1859 
1860 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1861 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1862 		return -ENODEV;
1863 	}
1864 
1865 	if (stats == NULL) {
1866 		CDEV_LOG_ERR("Invalid stats ptr");
1867 		return -EINVAL;
1868 	}
1869 
1870 	dev = &rte_crypto_devices[dev_id];
1871 	memset(stats, 0, sizeof(*stats));
1872 
1873 	if (*dev->dev_ops->stats_get == NULL)
1874 		return -ENOTSUP;
1875 	(*dev->dev_ops->stats_get)(dev, stats);
1876 
1877 	rte_cryptodev_trace_stats_get(dev_id, stats);
1878 	return 0;
1879 }
1880 
1881 void
1882 rte_cryptodev_stats_reset(uint8_t dev_id)
1883 {
1884 	struct rte_cryptodev *dev;
1885 
1886 	rte_cryptodev_trace_stats_reset(dev_id);
1887 
1888 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1889 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1890 		return;
1891 	}
1892 
1893 	dev = &rte_crypto_devices[dev_id];
1894 
1895 	if (*dev->dev_ops->stats_reset == NULL)
1896 		return;
1897 	(*dev->dev_ops->stats_reset)(dev);
1898 }
1899 
1900 void
1901 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
1902 {
1903 	struct rte_cryptodev *dev;
1904 
1905 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1906 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1907 		return;
1908 	}
1909 
1910 	dev = &rte_crypto_devices[dev_id];
1911 
1912 	memset(dev_info, 0, sizeof(struct rte_cryptodev_info));
1913 
1914 	if (*dev->dev_ops->dev_infos_get == NULL)
1915 		return;
1916 	(*dev->dev_ops->dev_infos_get)(dev, dev_info);
1917 
1918 	dev_info->driver_name = dev->device->driver->name;
1919 	dev_info->device = dev->device;
1920 
1921 	rte_cryptodev_trace_info_get(dev_id, dev_info->driver_name);
1922 
1923 }
1924 
1925 int
1926 rte_cryptodev_callback_register(uint8_t dev_id,
1927 			enum rte_cryptodev_event_type event,
1928 			rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1929 {
1930 	struct rte_cryptodev *dev;
1931 	struct rte_cryptodev_callback *user_cb;
1932 
1933 	if (!cb_fn)
1934 		return -EINVAL;
1935 
1936 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1937 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1938 		return -EINVAL;
1939 	}
1940 
1941 	dev = &rte_crypto_devices[dev_id];
1942 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
1943 
1944 	TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
1945 		if (user_cb->cb_fn == cb_fn &&
1946 			user_cb->cb_arg == cb_arg &&
1947 			user_cb->event == event) {
1948 			break;
1949 		}
1950 	}
1951 
1952 	/* create a new callback. */
1953 	if (user_cb == NULL) {
1954 		user_cb = rte_zmalloc("INTR_USER_CALLBACK",
1955 				sizeof(struct rte_cryptodev_callback), 0);
1956 		if (user_cb != NULL) {
1957 			user_cb->cb_fn = cb_fn;
1958 			user_cb->cb_arg = cb_arg;
1959 			user_cb->event = event;
1960 			TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
1961 		}
1962 	}
1963 
1964 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1965 
1966 	rte_cryptodev_trace_callback_register(dev_id, event, cb_fn);
1967 	return (user_cb == NULL) ? -ENOMEM : 0;
1968 }
1969 
1970 int
1971 rte_cryptodev_callback_unregister(uint8_t dev_id,
1972 			enum rte_cryptodev_event_type event,
1973 			rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1974 {
1975 	int ret;
1976 	struct rte_cryptodev *dev;
1977 	struct rte_cryptodev_callback *cb, *next;
1978 
1979 	if (!cb_fn)
1980 		return -EINVAL;
1981 
1982 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1983 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1984 		return -EINVAL;
1985 	}
1986 
1987 	dev = &rte_crypto_devices[dev_id];
1988 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
1989 
1990 	ret = 0;
1991 	for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
1992 
1993 		next = TAILQ_NEXT(cb, next);
1994 
1995 		if (cb->cb_fn != cb_fn || cb->event != event ||
1996 				(cb->cb_arg != (void *)-1 &&
1997 				cb->cb_arg != cb_arg))
1998 			continue;
1999 
2000 		/*
2001 		 * if this callback is not executing right now,
2002 		 * then remove it.
2003 		 */
2004 		if (cb->active == 0) {
2005 			TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
2006 			rte_free(cb);
2007 		} else {
2008 			ret = -EAGAIN;
2009 		}
2010 	}
2011 
2012 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
2013 
2014 	rte_cryptodev_trace_callback_unregister(dev_id, event, cb_fn);
2015 	return ret;
2016 }
2017 
2018 void
2019 rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev,
2020 	enum rte_cryptodev_event_type event)
2021 {
2022 	struct rte_cryptodev_callback *cb_lst;
2023 	struct rte_cryptodev_callback dev_cb;
2024 
2025 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
2026 	TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
2027 		if (cb_lst->cb_fn == NULL || cb_lst->event != event)
2028 			continue;
2029 		dev_cb = *cb_lst;
2030 		cb_lst->active = 1;
2031 		rte_spinlock_unlock(&rte_cryptodev_cb_lock);
2032 		dev_cb.cb_fn(dev->data->dev_id, dev_cb.event,
2033 						dev_cb.cb_arg);
2034 		rte_spinlock_lock(&rte_cryptodev_cb_lock);
2035 		cb_lst->active = 0;
2036 	}
2037 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
2038 }
2039 
2040 int
2041 rte_cryptodev_queue_pair_event_error_query(uint8_t dev_id, uint16_t qp_id)
2042 {
2043 	struct rte_cryptodev *dev;
2044 
2045 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
2046 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
2047 		return -EINVAL;
2048 	}
2049 	dev = &rte_crypto_devices[dev_id];
2050 
2051 	if (qp_id >= dev->data->nb_queue_pairs)
2052 		return -EINVAL;
2053 	if (*dev->dev_ops->queue_pair_event_error_query == NULL)
2054 		return -ENOTSUP;
2055 
2056 	return dev->dev_ops->queue_pair_event_error_query(dev, qp_id);
2057 }
2058 
2059 struct rte_mempool *
2060 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
2061 	uint32_t elt_size, uint32_t cache_size, uint16_t user_data_size,
2062 	int socket_id)
2063 {
2064 	struct rte_mempool *mp;
2065 	struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
2066 	uint32_t obj_sz;
2067 
2068 	obj_sz = sizeof(struct rte_cryptodev_sym_session) + elt_size + user_data_size;
2069 
2070 	obj_sz = RTE_ALIGN_CEIL(obj_sz, RTE_CACHE_LINE_SIZE);
2071 	mp = rte_mempool_create(name, nb_elts, obj_sz, cache_size,
2072 			(uint32_t)(sizeof(*pool_priv)), NULL, NULL,
2073 			NULL, NULL,
2074 			socket_id, 0);
2075 	if (mp == NULL) {
2076 		CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d",
2077 			__func__, name, rte_errno);
2078 		return NULL;
2079 	}
2080 
2081 	pool_priv = rte_mempool_get_priv(mp);
2082 	if (!pool_priv) {
2083 		CDEV_LOG_ERR("%s(name=%s) failed to get private data",
2084 			__func__, name);
2085 		rte_mempool_free(mp);
2086 		return NULL;
2087 	}
2088 
2089 	pool_priv->sess_data_sz = elt_size;
2090 	pool_priv->user_data_sz = user_data_size;
2091 
2092 	rte_cryptodev_trace_sym_session_pool_create(name, nb_elts,
2093 		elt_size, cache_size, user_data_size, mp);
2094 	return mp;
2095 }
2096 
2097 struct rte_mempool *
2098 rte_cryptodev_asym_session_pool_create(const char *name, uint32_t nb_elts,
2099 	uint32_t cache_size, uint16_t user_data_size, int socket_id)
2100 {
2101 	struct rte_mempool *mp;
2102 	struct rte_cryptodev_asym_session_pool_private_data *pool_priv;
2103 	uint32_t obj_sz, obj_sz_aligned;
2104 	uint8_t dev_id;
2105 	unsigned int priv_sz, max_priv_sz = 0;
2106 
2107 	for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++)
2108 		if (rte_cryptodev_is_valid_dev(dev_id)) {
2109 			priv_sz = rte_cryptodev_asym_get_private_session_size(dev_id);
2110 			if (priv_sz > max_priv_sz)
2111 				max_priv_sz = priv_sz;
2112 		}
2113 	if (max_priv_sz == 0) {
2114 		CDEV_LOG_INFO("Could not set max private session size");
2115 		return NULL;
2116 	}
2117 
2118 	obj_sz = rte_cryptodev_asym_get_header_session_size() + max_priv_sz +
2119 			user_data_size;
2120 	obj_sz_aligned =  RTE_ALIGN_CEIL(obj_sz, RTE_CACHE_LINE_SIZE);
2121 
2122 	mp = rte_mempool_create(name, nb_elts, obj_sz_aligned, cache_size,
2123 			(uint32_t)(sizeof(*pool_priv)),
2124 			NULL, NULL, NULL, NULL,
2125 			socket_id, 0);
2126 	if (mp == NULL) {
2127 		CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d",
2128 			__func__, name, rte_errno);
2129 		return NULL;
2130 	}
2131 
2132 	pool_priv = rte_mempool_get_priv(mp);
2133 	if (!pool_priv) {
2134 		CDEV_LOG_ERR("%s(name=%s) failed to get private data",
2135 			__func__, name);
2136 		rte_mempool_free(mp);
2137 		return NULL;
2138 	}
2139 	pool_priv->max_priv_session_sz = max_priv_sz;
2140 	pool_priv->user_data_sz = user_data_size;
2141 
2142 	rte_cryptodev_trace_asym_session_pool_create(name, nb_elts,
2143 		user_data_size, cache_size, mp);
2144 	return mp;
2145 }
2146 
2147 void *
2148 rte_cryptodev_sym_session_create(uint8_t dev_id,
2149 		struct rte_crypto_sym_xform *xforms,
2150 		struct rte_mempool *mp)
2151 {
2152 	struct rte_cryptodev *dev;
2153 	struct rte_cryptodev_sym_session *sess;
2154 	struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
2155 	uint32_t sess_priv_sz;
2156 	int ret;
2157 
2158 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
2159 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
2160 		rte_errno = EINVAL;
2161 		return NULL;
2162 	}
2163 
2164 	if (xforms == NULL) {
2165 		CDEV_LOG_ERR("Invalid xform\n");
2166 		rte_errno = EINVAL;
2167 		return NULL;
2168 	}
2169 
2170 	sess_priv_sz = rte_cryptodev_sym_get_private_session_size(dev_id);
2171 	if (!rte_cryptodev_sym_is_valid_session_pool(mp, sess_priv_sz)) {
2172 		CDEV_LOG_ERR("Invalid mempool");
2173 		rte_errno = EINVAL;
2174 		return NULL;
2175 	}
2176 
2177 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2178 
2179 	/* Allocate a session structure from the session pool */
2180 	if (rte_mempool_get(mp, (void **)&sess)) {
2181 		CDEV_LOG_ERR("couldn't get object from session mempool");
2182 		rte_errno = ENOMEM;
2183 		return NULL;
2184 	}
2185 
2186 	pool_priv = rte_mempool_get_priv(mp);
2187 	sess->driver_id = dev->driver_id;
2188 	sess->sess_data_sz = pool_priv->sess_data_sz;
2189 	sess->user_data_sz = pool_priv->user_data_sz;
2190 	sess->driver_priv_data_iova = rte_mempool_virt2iova(sess) +
2191 		offsetof(struct rte_cryptodev_sym_session, driver_priv_data);
2192 
2193 	if (dev->dev_ops->sym_session_configure == NULL) {
2194 		rte_errno = ENOTSUP;
2195 		goto error_exit;
2196 	}
2197 	memset(sess->driver_priv_data, 0, pool_priv->sess_data_sz + pool_priv->user_data_sz);
2198 
2199 	ret = dev->dev_ops->sym_session_configure(dev, xforms, sess);
2200 	if (ret < 0) {
2201 		rte_errno = -ret;
2202 		goto error_exit;
2203 	}
2204 	sess->driver_id = dev->driver_id;
2205 
2206 	rte_cryptodev_trace_sym_session_create(dev_id, sess, xforms, mp);
2207 
2208 	return (void *)sess;
2209 error_exit:
2210 	rte_mempool_put(mp, (void *)sess);
2211 	return NULL;
2212 }
2213 
2214 int
2215 rte_cryptodev_asym_session_create(uint8_t dev_id,
2216 		struct rte_crypto_asym_xform *xforms, struct rte_mempool *mp,
2217 		void **session)
2218 {
2219 	struct rte_cryptodev_asym_session *sess;
2220 	uint32_t session_priv_data_sz;
2221 	struct rte_cryptodev_asym_session_pool_private_data *pool_priv;
2222 	unsigned int session_header_size =
2223 			rte_cryptodev_asym_get_header_session_size();
2224 	struct rte_cryptodev *dev;
2225 	int ret;
2226 
2227 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
2228 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
2229 		return -EINVAL;
2230 	}
2231 
2232 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2233 
2234 	if (dev == NULL)
2235 		return -EINVAL;
2236 
2237 	if (!mp) {
2238 		CDEV_LOG_ERR("invalid mempool");
2239 		return -EINVAL;
2240 	}
2241 
2242 	session_priv_data_sz = rte_cryptodev_asym_get_private_session_size(
2243 			dev_id);
2244 	pool_priv = rte_mempool_get_priv(mp);
2245 
2246 	if (pool_priv->max_priv_session_sz < session_priv_data_sz) {
2247 		CDEV_LOG_DEBUG(
2248 			"The private session data size used when creating the mempool is smaller than this device's private session data.");
2249 		return -EINVAL;
2250 	}
2251 
2252 	/* Verify if provided mempool can hold elements big enough. */
2253 	if (mp->elt_size < session_header_size + session_priv_data_sz) {
2254 		CDEV_LOG_ERR(
2255 			"mempool elements too small to hold session objects");
2256 		return -EINVAL;
2257 	}
2258 
2259 	/* Allocate a session structure from the session pool */
2260 	if (rte_mempool_get(mp, session)) {
2261 		CDEV_LOG_ERR("couldn't get object from session mempool");
2262 		return -ENOMEM;
2263 	}
2264 
2265 	sess = *session;
2266 	sess->driver_id = dev->driver_id;
2267 	sess->user_data_sz = pool_priv->user_data_sz;
2268 	sess->max_priv_data_sz = pool_priv->max_priv_session_sz;
2269 
2270 	/* Clear device session pointer.*/
2271 	memset(sess->sess_private_data, 0, session_priv_data_sz + sess->user_data_sz);
2272 
2273 	if (*dev->dev_ops->asym_session_configure == NULL)
2274 		return -ENOTSUP;
2275 
2276 	if (sess->sess_private_data[0] == 0) {
2277 		ret = dev->dev_ops->asym_session_configure(dev, xforms, sess);
2278 		if (ret < 0) {
2279 			CDEV_LOG_ERR(
2280 				"dev_id %d failed to configure session details",
2281 				dev_id);
2282 			return ret;
2283 		}
2284 	}
2285 
2286 	rte_cryptodev_trace_asym_session_create(dev_id, xforms, mp, sess);
2287 	return 0;
2288 }
2289 
2290 int
2291 rte_cryptodev_sym_session_free(uint8_t dev_id, void *_sess)
2292 {
2293 	struct rte_cryptodev *dev;
2294 	struct rte_mempool *sess_mp;
2295 	struct rte_cryptodev_sym_session *sess = _sess;
2296 	struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
2297 
2298 	if (sess == NULL)
2299 		return -EINVAL;
2300 
2301 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
2302 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
2303 		return -EINVAL;
2304 	}
2305 
2306 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2307 
2308 	if (dev == NULL || sess == NULL)
2309 		return -EINVAL;
2310 
2311 	sess_mp = rte_mempool_from_obj(sess);
2312 	if (!sess_mp)
2313 		return -EINVAL;
2314 	pool_priv = rte_mempool_get_priv(sess_mp);
2315 
2316 	if (sess->driver_id != dev->driver_id) {
2317 		CDEV_LOG_ERR("Session created by driver %u but freed by %u",
2318 			sess->driver_id, dev->driver_id);
2319 		return -EINVAL;
2320 	}
2321 
2322 	if (*dev->dev_ops->sym_session_clear == NULL)
2323 		return -ENOTSUP;
2324 
2325 	dev->dev_ops->sym_session_clear(dev, sess);
2326 
2327 	memset(sess->driver_priv_data, 0, pool_priv->sess_data_sz + pool_priv->user_data_sz);
2328 
2329 	/* Return session to mempool */
2330 	rte_mempool_put(sess_mp, sess);
2331 
2332 	rte_cryptodev_trace_sym_session_free(dev_id, sess);
2333 	return 0;
2334 }
2335 
2336 int
2337 rte_cryptodev_asym_session_free(uint8_t dev_id, void *sess)
2338 {
2339 	struct rte_mempool *sess_mp;
2340 	struct rte_cryptodev *dev;
2341 
2342 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
2343 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
2344 		return -EINVAL;
2345 	}
2346 
2347 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2348 
2349 	if (dev == NULL || sess == NULL)
2350 		return -EINVAL;
2351 
2352 	if (*dev->dev_ops->asym_session_clear == NULL)
2353 		return -ENOTSUP;
2354 
2355 	dev->dev_ops->asym_session_clear(dev, sess);
2356 
2357 	rte_free(((struct rte_cryptodev_asym_session *)sess)->event_mdata);
2358 
2359 	/* Return session to mempool */
2360 	sess_mp = rte_mempool_from_obj(sess);
2361 	rte_mempool_put(sess_mp, sess);
2362 
2363 	rte_cryptodev_trace_asym_session_free(dev_id, sess);
2364 	return 0;
2365 }
2366 
2367 unsigned int
2368 rte_cryptodev_asym_get_header_session_size(void)
2369 {
2370 	return sizeof(struct rte_cryptodev_asym_session);
2371 }
2372 
2373 unsigned int
2374 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
2375 {
2376 	struct rte_cryptodev *dev;
2377 	unsigned int priv_sess_size;
2378 
2379 	if (!rte_cryptodev_is_valid_dev(dev_id))
2380 		return 0;
2381 
2382 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2383 
2384 	if (*dev->dev_ops->sym_session_get_size == NULL)
2385 		return 0;
2386 
2387 	priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev);
2388 
2389 	rte_cryptodev_trace_sym_get_private_session_size(dev_id,
2390 		priv_sess_size);
2391 
2392 	return priv_sess_size;
2393 }
2394 
2395 unsigned int
2396 rte_cryptodev_asym_get_private_session_size(uint8_t dev_id)
2397 {
2398 	struct rte_cryptodev *dev;
2399 	unsigned int priv_sess_size;
2400 
2401 	if (!rte_cryptodev_is_valid_dev(dev_id))
2402 		return 0;
2403 
2404 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2405 
2406 	if (*dev->dev_ops->asym_session_get_size == NULL)
2407 		return 0;
2408 
2409 	priv_sess_size = (*dev->dev_ops->asym_session_get_size)(dev);
2410 
2411 	rte_cryptodev_trace_asym_get_private_session_size(dev_id,
2412 		priv_sess_size);
2413 
2414 	return priv_sess_size;
2415 }
2416 
2417 int
2418 rte_cryptodev_sym_session_set_user_data(void *_sess, void *data,
2419 		uint16_t size)
2420 {
2421 	struct rte_cryptodev_sym_session *sess = _sess;
2422 
2423 	if (sess == NULL)
2424 		return -EINVAL;
2425 
2426 	if (sess->user_data_sz < size)
2427 		return -ENOMEM;
2428 
2429 	rte_memcpy(sess->driver_priv_data + sess->sess_data_sz, data, size);
2430 
2431 	rte_cryptodev_trace_sym_session_set_user_data(sess, data, size);
2432 
2433 	return 0;
2434 }
2435 
2436 void *
2437 rte_cryptodev_sym_session_get_user_data(void *_sess)
2438 {
2439 	struct rte_cryptodev_sym_session *sess = _sess;
2440 	void *data = NULL;
2441 
2442 	if (sess == NULL || sess->user_data_sz == 0)
2443 		return NULL;
2444 
2445 	data = (void *)(sess->driver_priv_data + sess->sess_data_sz);
2446 
2447 	rte_cryptodev_trace_sym_session_get_user_data(sess, data);
2448 
2449 	return data;
2450 }
2451 
2452 int
2453 rte_cryptodev_asym_session_set_user_data(void *session, void *data, uint16_t size)
2454 {
2455 	struct rte_cryptodev_asym_session *sess = session;
2456 	if (sess == NULL)
2457 		return -EINVAL;
2458 
2459 	if (sess->user_data_sz < size)
2460 		return -ENOMEM;
2461 
2462 	rte_memcpy(sess->sess_private_data +
2463 			sess->max_priv_data_sz,
2464 			data, size);
2465 
2466 	rte_cryptodev_trace_asym_session_set_user_data(sess, data, size);
2467 
2468 	return 0;
2469 }
2470 
2471 void *
2472 rte_cryptodev_asym_session_get_user_data(void *session)
2473 {
2474 	struct rte_cryptodev_asym_session *sess = session;
2475 	void *data = NULL;
2476 
2477 	if (sess == NULL || sess->user_data_sz == 0)
2478 		return NULL;
2479 
2480 	data = (void *)(sess->sess_private_data + sess->max_priv_data_sz);
2481 
2482 	rte_cryptodev_trace_asym_session_get_user_data(sess, data);
2483 
2484 	return data;
2485 }
2486 
2487 static inline void
2488 sym_crypto_fill_status(struct rte_crypto_sym_vec *vec, int32_t errnum)
2489 {
2490 	uint32_t i;
2491 	for (i = 0; i < vec->num; i++)
2492 		vec->status[i] = errnum;
2493 }
2494 
2495 uint32_t
2496 rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id,
2497 	void *_sess, union rte_crypto_sym_ofs ofs,
2498 	struct rte_crypto_sym_vec *vec)
2499 {
2500 	struct rte_cryptodev *dev;
2501 	struct rte_cryptodev_sym_session *sess = _sess;
2502 
2503 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
2504 		sym_crypto_fill_status(vec, EINVAL);
2505 		return 0;
2506 	}
2507 
2508 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2509 
2510 	if (*dev->dev_ops->sym_cpu_process == NULL ||
2511 		!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO)) {
2512 		sym_crypto_fill_status(vec, ENOTSUP);
2513 		return 0;
2514 	}
2515 
2516 	rte_cryptodev_trace_sym_cpu_crypto_process(dev_id, sess);
2517 
2518 	return dev->dev_ops->sym_cpu_process(dev, sess, ofs, vec);
2519 }
2520 
2521 int
2522 rte_cryptodev_get_raw_dp_ctx_size(uint8_t dev_id)
2523 {
2524 	struct rte_cryptodev *dev;
2525 	int32_t size = sizeof(struct rte_crypto_raw_dp_ctx);
2526 	int32_t priv_size;
2527 
2528 	if (!rte_cryptodev_is_valid_dev(dev_id))
2529 		return -EINVAL;
2530 
2531 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2532 
2533 	if (*dev->dev_ops->sym_get_raw_dp_ctx_size == NULL ||
2534 		!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP)) {
2535 		return -ENOTSUP;
2536 	}
2537 
2538 	priv_size = (*dev->dev_ops->sym_get_raw_dp_ctx_size)(dev);
2539 	if (priv_size < 0)
2540 		return -ENOTSUP;
2541 
2542 	rte_cryptodev_trace_get_raw_dp_ctx_size(dev_id);
2543 
2544 	return RTE_ALIGN_CEIL((size + priv_size), 8);
2545 }
2546 
2547 int
2548 rte_cryptodev_configure_raw_dp_ctx(uint8_t dev_id, uint16_t qp_id,
2549 	struct rte_crypto_raw_dp_ctx *ctx,
2550 	enum rte_crypto_op_sess_type sess_type,
2551 	union rte_cryptodev_session_ctx session_ctx,
2552 	uint8_t is_update)
2553 {
2554 	struct rte_cryptodev *dev;
2555 
2556 	if (!rte_cryptodev_get_qp_status(dev_id, qp_id))
2557 		return -EINVAL;
2558 
2559 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2560 	if (!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP)
2561 			|| dev->dev_ops->sym_configure_raw_dp_ctx == NULL)
2562 		return -ENOTSUP;
2563 
2564 	rte_cryptodev_trace_configure_raw_dp_ctx(dev_id, qp_id, sess_type);
2565 
2566 	return (*dev->dev_ops->sym_configure_raw_dp_ctx)(dev, qp_id, ctx,
2567 			sess_type, session_ctx, is_update);
2568 }
2569 
2570 int
2571 rte_cryptodev_session_event_mdata_set(uint8_t dev_id, void *sess,
2572 	enum rte_crypto_op_type op_type,
2573 	enum rte_crypto_op_sess_type sess_type,
2574 	void *ev_mdata,
2575 	uint16_t size)
2576 {
2577 	struct rte_cryptodev *dev;
2578 
2579 	if (sess == NULL || ev_mdata == NULL)
2580 		return -EINVAL;
2581 
2582 	if (!rte_cryptodev_is_valid_dev(dev_id))
2583 		goto skip_pmd_op;
2584 
2585 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2586 	if (dev->dev_ops->session_ev_mdata_set == NULL)
2587 		goto skip_pmd_op;
2588 
2589 	rte_cryptodev_trace_session_event_mdata_set(dev_id, sess, op_type,
2590 		sess_type, ev_mdata, size);
2591 
2592 	return (*dev->dev_ops->session_ev_mdata_set)(dev, sess, op_type,
2593 			sess_type, ev_mdata);
2594 
2595 skip_pmd_op:
2596 	if (op_type == RTE_CRYPTO_OP_TYPE_SYMMETRIC)
2597 		return rte_cryptodev_sym_session_set_user_data(sess, ev_mdata,
2598 				size);
2599 	else if (op_type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
2600 		struct rte_cryptodev_asym_session *s = sess;
2601 
2602 		if (s->event_mdata == NULL) {
2603 			s->event_mdata = rte_malloc(NULL, size, 0);
2604 			if (s->event_mdata == NULL)
2605 				return -ENOMEM;
2606 		}
2607 		rte_memcpy(s->event_mdata, ev_mdata, size);
2608 
2609 		return 0;
2610 	} else
2611 		return -ENOTSUP;
2612 }
2613 
2614 uint32_t
2615 rte_cryptodev_raw_enqueue_burst(struct rte_crypto_raw_dp_ctx *ctx,
2616 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
2617 	void **user_data, int *enqueue_status)
2618 {
2619 	return (*ctx->enqueue_burst)(ctx->qp_data, ctx->drv_ctx_data, vec,
2620 			ofs, user_data, enqueue_status);
2621 }
2622 
2623 int
2624 rte_cryptodev_raw_enqueue_done(struct rte_crypto_raw_dp_ctx *ctx,
2625 		uint32_t n)
2626 {
2627 	return (*ctx->enqueue_done)(ctx->qp_data, ctx->drv_ctx_data, n);
2628 }
2629 
2630 uint32_t
2631 rte_cryptodev_raw_dequeue_burst(struct rte_crypto_raw_dp_ctx *ctx,
2632 	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
2633 	uint32_t max_nb_to_dequeue,
2634 	rte_cryptodev_raw_post_dequeue_t post_dequeue,
2635 	void **out_user_data, uint8_t is_user_data_array,
2636 	uint32_t *n_success_jobs, int *status)
2637 {
2638 	return (*ctx->dequeue_burst)(ctx->qp_data, ctx->drv_ctx_data,
2639 		get_dequeue_count, max_nb_to_dequeue, post_dequeue,
2640 		out_user_data, is_user_data_array, n_success_jobs, status);
2641 }
2642 
2643 int
2644 rte_cryptodev_raw_dequeue_done(struct rte_crypto_raw_dp_ctx *ctx,
2645 		uint32_t n)
2646 {
2647 	return (*ctx->dequeue_done)(ctx->qp_data, ctx->drv_ctx_data, n);
2648 }
2649 
2650 /** Initialise rte_crypto_op mempool element */
2651 static void
2652 rte_crypto_op_init(struct rte_mempool *mempool,
2653 		void *opaque_arg,
2654 		void *_op_data,
2655 		__rte_unused unsigned i)
2656 {
2657 	struct rte_crypto_op *op = _op_data;
2658 	enum rte_crypto_op_type type = *(enum rte_crypto_op_type *)opaque_arg;
2659 
2660 	memset(_op_data, 0, mempool->elt_size);
2661 
2662 	__rte_crypto_op_reset(op, type);
2663 
2664 	op->phys_addr = rte_mem_virt2iova(_op_data);
2665 	op->mempool = mempool;
2666 }
2667 
2668 
2669 struct rte_mempool *
2670 rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
2671 		unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
2672 		int socket_id)
2673 {
2674 	struct rte_crypto_op_pool_private *priv;
2675 
2676 	unsigned elt_size = sizeof(struct rte_crypto_op) +
2677 			priv_size;
2678 
2679 	if (type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
2680 		elt_size += sizeof(struct rte_crypto_sym_op);
2681 	} else if (type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
2682 		elt_size += sizeof(struct rte_crypto_asym_op);
2683 	} else if (type == RTE_CRYPTO_OP_TYPE_UNDEFINED) {
2684 		elt_size += RTE_MAX(sizeof(struct rte_crypto_sym_op),
2685 		                    sizeof(struct rte_crypto_asym_op));
2686 	} else {
2687 		CDEV_LOG_ERR("Invalid op_type");
2688 		return NULL;
2689 	}
2690 
2691 	/* lookup mempool in case already allocated */
2692 	struct rte_mempool *mp = rte_mempool_lookup(name);
2693 
2694 	if (mp != NULL) {
2695 		priv = (struct rte_crypto_op_pool_private *)
2696 				rte_mempool_get_priv(mp);
2697 
2698 		if (mp->elt_size != elt_size ||
2699 				mp->cache_size < cache_size ||
2700 				mp->size < nb_elts ||
2701 				priv->priv_size <  priv_size) {
2702 			mp = NULL;
2703 			CDEV_LOG_ERR("Mempool %s already exists but with "
2704 					"incompatible parameters", name);
2705 			return NULL;
2706 		}
2707 		return mp;
2708 	}
2709 
2710 	mp = rte_mempool_create(
2711 			name,
2712 			nb_elts,
2713 			elt_size,
2714 			cache_size,
2715 			sizeof(struct rte_crypto_op_pool_private),
2716 			NULL,
2717 			NULL,
2718 			rte_crypto_op_init,
2719 			&type,
2720 			socket_id,
2721 			0);
2722 
2723 	if (mp == NULL) {
2724 		CDEV_LOG_ERR("Failed to create mempool %s", name);
2725 		return NULL;
2726 	}
2727 
2728 	priv = (struct rte_crypto_op_pool_private *)
2729 			rte_mempool_get_priv(mp);
2730 
2731 	priv->priv_size = priv_size;
2732 	priv->type = type;
2733 
2734 	rte_cryptodev_trace_op_pool_create(name, socket_id, type, nb_elts, mp);
2735 	return mp;
2736 }
2737 
2738 int
2739 rte_cryptodev_pmd_create_dev_name(char *name, const char *dev_name_prefix)
2740 {
2741 	struct rte_cryptodev *dev = NULL;
2742 	uint32_t i = 0;
2743 
2744 	if (name == NULL)
2745 		return -EINVAL;
2746 
2747 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
2748 		int ret = snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
2749 				"%s_%u", dev_name_prefix, i);
2750 
2751 		if (ret < 0)
2752 			return ret;
2753 
2754 		dev = rte_cryptodev_pmd_get_named_dev(name);
2755 		if (!dev)
2756 			return 0;
2757 	}
2758 
2759 	return -1;
2760 }
2761 
2762 TAILQ_HEAD(cryptodev_driver_list, cryptodev_driver);
2763 
2764 static struct cryptodev_driver_list cryptodev_driver_list =
2765 	TAILQ_HEAD_INITIALIZER(cryptodev_driver_list);
2766 
2767 int
2768 rte_cryptodev_driver_id_get(const char *name)
2769 {
2770 	struct cryptodev_driver *driver;
2771 	const char *driver_name;
2772 	int driver_id = -1;
2773 
2774 	if (name == NULL) {
2775 		RTE_LOG(DEBUG, CRYPTODEV, "name pointer NULL");
2776 		return -1;
2777 	}
2778 
2779 	TAILQ_FOREACH(driver, &cryptodev_driver_list, next) {
2780 		driver_name = driver->driver->name;
2781 		if (strncmp(driver_name, name, strlen(driver_name) + 1) == 0) {
2782 			driver_id = driver->id;
2783 			break;
2784 		}
2785 	}
2786 
2787 	rte_cryptodev_trace_driver_id_get(name, driver_id);
2788 
2789 	return driver_id;
2790 }
2791 
2792 const char *
2793 rte_cryptodev_name_get(uint8_t dev_id)
2794 {
2795 	struct rte_cryptodev *dev;
2796 
2797 	if (!rte_cryptodev_is_valid_device_data(dev_id)) {
2798 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
2799 		return NULL;
2800 	}
2801 
2802 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2803 	if (dev == NULL)
2804 		return NULL;
2805 
2806 	rte_cryptodev_trace_name_get(dev_id, dev->data->name);
2807 
2808 	return dev->data->name;
2809 }
2810 
2811 const char *
2812 rte_cryptodev_driver_name_get(uint8_t driver_id)
2813 {
2814 	struct cryptodev_driver *driver;
2815 
2816 	TAILQ_FOREACH(driver, &cryptodev_driver_list, next) {
2817 		if (driver->id == driver_id) {
2818 			rte_cryptodev_trace_driver_name_get(driver_id,
2819 				driver->driver->name);
2820 			return driver->driver->name;
2821 		}
2822 	}
2823 	return NULL;
2824 }
2825 
2826 uint8_t
2827 rte_cryptodev_allocate_driver(struct cryptodev_driver *crypto_drv,
2828 		const struct rte_driver *drv)
2829 {
2830 	crypto_drv->driver = drv;
2831 	crypto_drv->id = nb_drivers;
2832 
2833 	TAILQ_INSERT_TAIL(&cryptodev_driver_list, crypto_drv, next);
2834 
2835 	rte_cryptodev_trace_allocate_driver(drv->name);
2836 
2837 	return nb_drivers++;
2838 }
2839 
2840 RTE_INIT(cryptodev_init_fp_ops)
2841 {
2842 	uint32_t i;
2843 
2844 	for (i = 0; i != RTE_DIM(rte_crypto_fp_ops); i++)
2845 		cryptodev_fp_ops_reset(rte_crypto_fp_ops + i);
2846 }
2847 
2848 static int
2849 cryptodev_handle_dev_list(const char *cmd __rte_unused,
2850 		const char *params __rte_unused,
2851 		struct rte_tel_data *d)
2852 {
2853 	int dev_id;
2854 
2855 	if (rte_cryptodev_count() < 1)
2856 		return -EINVAL;
2857 
2858 	rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
2859 	for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++)
2860 		if (rte_cryptodev_is_valid_dev(dev_id))
2861 			rte_tel_data_add_array_int(d, dev_id);
2862 
2863 	return 0;
2864 }
2865 
2866 static int
2867 cryptodev_handle_dev_info(const char *cmd __rte_unused,
2868 		const char *params, struct rte_tel_data *d)
2869 {
2870 	struct rte_cryptodev_info cryptodev_info;
2871 	int dev_id;
2872 	char *end_param;
2873 
2874 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
2875 		return -EINVAL;
2876 
2877 	dev_id = strtoul(params, &end_param, 0);
2878 	if (*end_param != '\0')
2879 		CDEV_LOG_ERR("Extra parameters passed to command, ignoring");
2880 	if (!rte_cryptodev_is_valid_dev(dev_id))
2881 		return -EINVAL;
2882 
2883 	rte_cryptodev_info_get(dev_id, &cryptodev_info);
2884 
2885 	rte_tel_data_start_dict(d);
2886 	rte_tel_data_add_dict_string(d, "device_name",
2887 		cryptodev_info.device->name);
2888 	rte_tel_data_add_dict_uint(d, "max_nb_queue_pairs",
2889 		cryptodev_info.max_nb_queue_pairs);
2890 
2891 	return 0;
2892 }
2893 
2894 #define ADD_DICT_STAT(s) rte_tel_data_add_dict_uint(d, #s, cryptodev_stats.s)
2895 
2896 static int
2897 cryptodev_handle_dev_stats(const char *cmd __rte_unused,
2898 		const char *params,
2899 		struct rte_tel_data *d)
2900 {
2901 	struct rte_cryptodev_stats cryptodev_stats;
2902 	int dev_id, ret;
2903 	char *end_param;
2904 
2905 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
2906 		return -EINVAL;
2907 
2908 	dev_id = strtoul(params, &end_param, 0);
2909 	if (*end_param != '\0')
2910 		CDEV_LOG_ERR("Extra parameters passed to command, ignoring");
2911 	if (!rte_cryptodev_is_valid_dev(dev_id))
2912 		return -EINVAL;
2913 
2914 	ret = rte_cryptodev_stats_get(dev_id, &cryptodev_stats);
2915 	if (ret < 0)
2916 		return ret;
2917 
2918 	rte_tel_data_start_dict(d);
2919 	ADD_DICT_STAT(enqueued_count);
2920 	ADD_DICT_STAT(dequeued_count);
2921 	ADD_DICT_STAT(enqueue_err_count);
2922 	ADD_DICT_STAT(dequeue_err_count);
2923 
2924 	return 0;
2925 }
2926 
2927 #define CRYPTO_CAPS_SZ                                             \
2928 	(RTE_ALIGN_CEIL(sizeof(struct rte_cryptodev_capabilities), \
2929 					sizeof(uint64_t)) /        \
2930 	 sizeof(uint64_t))
2931 
2932 static int
2933 crypto_caps_array(struct rte_tel_data *d,
2934 		  const struct rte_cryptodev_capabilities *capabilities)
2935 {
2936 	const struct rte_cryptodev_capabilities *dev_caps;
2937 	uint64_t caps_val[CRYPTO_CAPS_SZ];
2938 	unsigned int i = 0, j;
2939 
2940 	rte_tel_data_start_array(d, RTE_TEL_UINT_VAL);
2941 
2942 	while ((dev_caps = &capabilities[i++])->op !=
2943 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
2944 		memset(&caps_val, 0, CRYPTO_CAPS_SZ * sizeof(caps_val[0]));
2945 		rte_memcpy(caps_val, dev_caps, sizeof(capabilities[0]));
2946 		for (j = 0; j < CRYPTO_CAPS_SZ; j++)
2947 			rte_tel_data_add_array_uint(d, caps_val[j]);
2948 	}
2949 
2950 	return i;
2951 }
2952 
2953 static int
2954 cryptodev_handle_dev_caps(const char *cmd __rte_unused, const char *params,
2955 			  struct rte_tel_data *d)
2956 {
2957 	struct rte_cryptodev_info dev_info;
2958 	struct rte_tel_data *crypto_caps;
2959 	int crypto_caps_n;
2960 	char *end_param;
2961 	int dev_id;
2962 
2963 	if (!params || strlen(params) == 0 || !isdigit(*params))
2964 		return -EINVAL;
2965 
2966 	dev_id = strtoul(params, &end_param, 0);
2967 	if (*end_param != '\0')
2968 		CDEV_LOG_ERR("Extra parameters passed to command, ignoring");
2969 	if (!rte_cryptodev_is_valid_dev(dev_id))
2970 		return -EINVAL;
2971 
2972 	rte_tel_data_start_dict(d);
2973 	crypto_caps = rte_tel_data_alloc();
2974 	if (!crypto_caps)
2975 		return -ENOMEM;
2976 
2977 	rte_cryptodev_info_get(dev_id, &dev_info);
2978 	crypto_caps_n = crypto_caps_array(crypto_caps, dev_info.capabilities);
2979 	rte_tel_data_add_dict_container(d, "crypto_caps", crypto_caps, 0);
2980 	rte_tel_data_add_dict_int(d, "crypto_caps_n", crypto_caps_n);
2981 
2982 	return 0;
2983 }
2984 
2985 RTE_INIT(cryptodev_init_telemetry)
2986 {
2987 	rte_telemetry_register_cmd("/cryptodev/info", cryptodev_handle_dev_info,
2988 			"Returns information for a cryptodev. Parameters: int dev_id");
2989 	rte_telemetry_register_cmd("/cryptodev/list",
2990 			cryptodev_handle_dev_list,
2991 			"Returns list of available crypto devices by IDs. No parameters.");
2992 	rte_telemetry_register_cmd("/cryptodev/stats",
2993 			cryptodev_handle_dev_stats,
2994 			"Returns the stats for a cryptodev. Parameters: int dev_id");
2995 	rte_telemetry_register_cmd("/cryptodev/caps",
2996 			cryptodev_handle_dev_caps,
2997 			"Returns the capabilities for a cryptodev. Parameters: int dev_id");
2998 }
2999