xref: /dpdk/lib/cryptodev/rte_cryptodev.c (revision 8bd4315ceba8d9de9dedafdaa963ffecc09cc971)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020 Intel Corporation
3  */
4 
5 #include <sys/queue.h>
6 #include <ctype.h>
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <string.h>
10 #include <errno.h>
11 #include <stdint.h>
12 #include <inttypes.h>
13 
14 #include <rte_log.h>
15 #include <rte_debug.h>
16 #include <dev_driver.h>
17 #include <rte_memory.h>
18 #include <rte_memcpy.h>
19 #include <rte_memzone.h>
20 #include <rte_eal.h>
21 #include <rte_common.h>
22 #include <rte_mempool.h>
23 #include <rte_malloc.h>
24 #include <rte_errno.h>
25 #include <rte_spinlock.h>
26 #include <rte_string_fns.h>
27 #include <rte_telemetry.h>
28 
29 #include "rte_crypto.h"
30 #include "rte_cryptodev.h"
31 #include "cryptodev_pmd.h"
32 #include "cryptodev_trace.h"
33 
34 static uint8_t nb_drivers;
35 
36 static struct rte_cryptodev rte_crypto_devices[RTE_CRYPTO_MAX_DEVS];
37 
38 struct rte_cryptodev *rte_cryptodevs = rte_crypto_devices;
39 
40 static struct rte_cryptodev_global cryptodev_globals = {
41 		.devs			= rte_crypto_devices,
42 		.data			= { NULL },
43 		.nb_devs		= 0
44 };
45 
46 /* Public fastpath APIs. */
47 struct rte_crypto_fp_ops rte_crypto_fp_ops[RTE_CRYPTO_MAX_DEVS];
48 
49 /* spinlock for crypto device callbacks */
50 static rte_spinlock_t rte_cryptodev_cb_lock = RTE_SPINLOCK_INITIALIZER;
51 
52 RTE_LOG_REGISTER_DEFAULT(rte_cryptodev_logtype, INFO);
53 
54 /**
55  * The user application callback description.
56  *
57  * It contains callback address to be registered by user application,
58  * the pointer to the parameters for callback, and the event type.
59  */
60 struct rte_cryptodev_callback {
61 	TAILQ_ENTRY(rte_cryptodev_callback) next; /**< Callbacks list */
62 	rte_cryptodev_cb_fn cb_fn;		/**< Callback address */
63 	void *cb_arg;				/**< Parameter for callback */
64 	enum rte_cryptodev_event_type event;	/**< Interrupt event type */
65 	uint32_t active;			/**< Callback is executing */
66 };
67 
68 /**
69  * The crypto cipher algorithm strings identifiers.
70  * Not to be used in application directly.
71  * Application can use rte_cryptodev_get_cipher_algo_string().
72  */
73 static const char *
74 crypto_cipher_algorithm_strings[] = {
75 	[RTE_CRYPTO_CIPHER_3DES_CBC]	= "3des-cbc",
76 	[RTE_CRYPTO_CIPHER_3DES_ECB]	= "3des-ecb",
77 	[RTE_CRYPTO_CIPHER_3DES_CTR]	= "3des-ctr",
78 
79 	[RTE_CRYPTO_CIPHER_AES_CBC]	= "aes-cbc",
80 	[RTE_CRYPTO_CIPHER_AES_CTR]	= "aes-ctr",
81 	[RTE_CRYPTO_CIPHER_AES_DOCSISBPI]	= "aes-docsisbpi",
82 	[RTE_CRYPTO_CIPHER_AES_ECB]	= "aes-ecb",
83 	[RTE_CRYPTO_CIPHER_AES_F8]	= "aes-f8",
84 	[RTE_CRYPTO_CIPHER_AES_XTS]	= "aes-xts",
85 
86 	[RTE_CRYPTO_CIPHER_ARC4]	= "arc4",
87 
88 	[RTE_CRYPTO_CIPHER_DES_CBC]     = "des-cbc",
89 	[RTE_CRYPTO_CIPHER_DES_DOCSISBPI]	= "des-docsisbpi",
90 
91 	[RTE_CRYPTO_CIPHER_NULL]	= "null",
92 
93 	[RTE_CRYPTO_CIPHER_KASUMI_F8]	= "kasumi-f8",
94 	[RTE_CRYPTO_CIPHER_SNOW3G_UEA2]	= "snow3g-uea2",
95 	[RTE_CRYPTO_CIPHER_ZUC_EEA3]	= "zuc-eea3",
96 	[RTE_CRYPTO_CIPHER_SM4_ECB]	= "sm4-ecb",
97 	[RTE_CRYPTO_CIPHER_SM4_CBC]	= "sm4-cbc",
98 	[RTE_CRYPTO_CIPHER_SM4_CTR]	= "sm4-ctr",
99 	[RTE_CRYPTO_CIPHER_SM4_CFB]	= "sm4-cfb",
100 	[RTE_CRYPTO_CIPHER_SM4_OFB]	= "sm4-ofb"
101 };
102 
103 /**
104  * The crypto cipher operation strings identifiers.
105  * It could be used in application command line.
106  */
107 const char *
108 rte_crypto_cipher_operation_strings[] = {
109 		[RTE_CRYPTO_CIPHER_OP_ENCRYPT]	= "encrypt",
110 		[RTE_CRYPTO_CIPHER_OP_DECRYPT]	= "decrypt"
111 };
112 
113 /**
114  * The crypto auth algorithm strings identifiers.
115  * Not to be used in application directly.
116  * Application can use rte_cryptodev_get_auth_algo_string().
117  */
118 static const char *
119 crypto_auth_algorithm_strings[] = {
120 	[RTE_CRYPTO_AUTH_AES_CBC_MAC]	= "aes-cbc-mac",
121 	[RTE_CRYPTO_AUTH_AES_CMAC]	= "aes-cmac",
122 	[RTE_CRYPTO_AUTH_AES_GMAC]	= "aes-gmac",
123 	[RTE_CRYPTO_AUTH_AES_XCBC_MAC]	= "aes-xcbc-mac",
124 
125 	[RTE_CRYPTO_AUTH_MD5]		= "md5",
126 	[RTE_CRYPTO_AUTH_MD5_HMAC]	= "md5-hmac",
127 
128 	[RTE_CRYPTO_AUTH_NULL]		= "null",
129 
130 	[RTE_CRYPTO_AUTH_SHA1]		= "sha1",
131 	[RTE_CRYPTO_AUTH_SHA1_HMAC]	= "sha1-hmac",
132 
133 	[RTE_CRYPTO_AUTH_SHA224]	= "sha2-224",
134 	[RTE_CRYPTO_AUTH_SHA224_HMAC]	= "sha2-224-hmac",
135 	[RTE_CRYPTO_AUTH_SHA256]	= "sha2-256",
136 	[RTE_CRYPTO_AUTH_SHA256_HMAC]	= "sha2-256-hmac",
137 	[RTE_CRYPTO_AUTH_SHA384]	= "sha2-384",
138 	[RTE_CRYPTO_AUTH_SHA384_HMAC]	= "sha2-384-hmac",
139 	[RTE_CRYPTO_AUTH_SHA512]	= "sha2-512",
140 	[RTE_CRYPTO_AUTH_SHA512_HMAC]	= "sha2-512-hmac",
141 
142 	[RTE_CRYPTO_AUTH_SHA3_224]	= "sha3-224",
143 	[RTE_CRYPTO_AUTH_SHA3_224_HMAC] = "sha3-224-hmac",
144 	[RTE_CRYPTO_AUTH_SHA3_256]	= "sha3-256",
145 	[RTE_CRYPTO_AUTH_SHA3_256_HMAC] = "sha3-256-hmac",
146 	[RTE_CRYPTO_AUTH_SHA3_384]	= "sha3-384",
147 	[RTE_CRYPTO_AUTH_SHA3_384_HMAC] = "sha3-384-hmac",
148 	[RTE_CRYPTO_AUTH_SHA3_512]	= "sha3-512",
149 	[RTE_CRYPTO_AUTH_SHA3_512_HMAC]	= "sha3-512-hmac",
150 
151 	[RTE_CRYPTO_AUTH_KASUMI_F9]	= "kasumi-f9",
152 	[RTE_CRYPTO_AUTH_SNOW3G_UIA2]	= "snow3g-uia2",
153 	[RTE_CRYPTO_AUTH_ZUC_EIA3]	= "zuc-eia3",
154 	[RTE_CRYPTO_AUTH_SM3]		= "sm3",
155 	[RTE_CRYPTO_AUTH_SM3_HMAC]	= "sm3-hmac",
156 
157 	[RTE_CRYPTO_AUTH_SHAKE_128]	 = "shake-128",
158 	[RTE_CRYPTO_AUTH_SHAKE_256]	 = "shake-256",
159 };
160 
161 /**
162  * The crypto AEAD algorithm strings identifiers.
163  * Not to be used in application directly.
164  * Application can use rte_cryptodev_get_aead_algo_string().
165  */
166 static const char *
167 crypto_aead_algorithm_strings[] = {
168 	[RTE_CRYPTO_AEAD_AES_CCM]	= "aes-ccm",
169 	[RTE_CRYPTO_AEAD_AES_GCM]	= "aes-gcm",
170 	[RTE_CRYPTO_AEAD_CHACHA20_POLY1305] = "chacha20-poly1305"
171 };
172 
173 
174 /**
175  * The crypto AEAD operation strings identifiers.
176  * It could be used in application command line.
177  */
178 const char *
179 rte_crypto_aead_operation_strings[] = {
180 	[RTE_CRYPTO_AEAD_OP_ENCRYPT]	= "encrypt",
181 	[RTE_CRYPTO_AEAD_OP_DECRYPT]	= "decrypt"
182 };
183 
184 /**
185  * Asymmetric crypto transform operation strings identifiers.
186  * Not to be used in application directly.
187  * Application can use rte_cryptodev_asym_get_xform_string().
188  */
189 static const char *
190 crypto_asym_xform_strings[] = {
191 	[RTE_CRYPTO_ASYM_XFORM_NONE]	= "none",
192 	[RTE_CRYPTO_ASYM_XFORM_RSA]	= "rsa",
193 	[RTE_CRYPTO_ASYM_XFORM_MODEX]	= "modexp",
194 	[RTE_CRYPTO_ASYM_XFORM_MODINV]	= "modinv",
195 	[RTE_CRYPTO_ASYM_XFORM_DH]	= "dh",
196 	[RTE_CRYPTO_ASYM_XFORM_DSA]	= "dsa",
197 	[RTE_CRYPTO_ASYM_XFORM_ECDSA]	= "ecdsa",
198 	[RTE_CRYPTO_ASYM_XFORM_ECPM]	= "ecpm",
199 	[RTE_CRYPTO_ASYM_XFORM_SM2]	= "sm2",
200 };
201 
202 /**
203  * Asymmetric crypto operation strings identifiers.
204  */
205 const char *rte_crypto_asym_op_strings[] = {
206 	[RTE_CRYPTO_ASYM_OP_ENCRYPT]	= "encrypt",
207 	[RTE_CRYPTO_ASYM_OP_DECRYPT]	= "decrypt",
208 	[RTE_CRYPTO_ASYM_OP_SIGN]	= "sign",
209 	[RTE_CRYPTO_ASYM_OP_VERIFY]	= "verify"
210 };
211 
212 /**
213  * Asymmetric crypto key exchange operation strings identifiers.
214  */
215 const char *rte_crypto_asym_ke_strings[] = {
216 	[RTE_CRYPTO_ASYM_KE_PRIV_KEY_GENERATE] = "priv_key_generate",
217 	[RTE_CRYPTO_ASYM_KE_PUB_KEY_GENERATE] = "pub_key_generate",
218 	[RTE_CRYPTO_ASYM_KE_SHARED_SECRET_COMPUTE] = "sharedsecret_compute",
219 	[RTE_CRYPTO_ASYM_KE_PUB_KEY_VERIFY] = "pub_ec_key_verify"
220 };
221 
222 struct rte_cryptodev_sym_session_pool_private_data {
223 	uint16_t sess_data_sz;
224 	/**< driver session data size */
225 	uint16_t user_data_sz;
226 	/**< session user data will be placed after sess_data */
227 };
228 
229 /**
230  * The private data structure stored in the asym session mempool private data.
231  */
232 struct rte_cryptodev_asym_session_pool_private_data {
233 	uint16_t max_priv_session_sz;
234 	/**< Size of private session data used when creating mempool */
235 	uint16_t user_data_sz;
236 	/**< Session user data will be placed after sess_private_data */
237 };
238 
239 int
240 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum,
241 		const char *algo_string)
242 {
243 	unsigned int i;
244 	int ret = -1;	/* Invalid string */
245 
246 	for (i = 1; i < RTE_DIM(crypto_cipher_algorithm_strings); i++) {
247 		if (strcmp(algo_string, crypto_cipher_algorithm_strings[i]) == 0) {
248 			*algo_enum = (enum rte_crypto_cipher_algorithm) i;
249 			ret = 0;
250 			break;
251 		}
252 	}
253 
254 	rte_cryptodev_trace_get_cipher_algo_enum(algo_string, *algo_enum, ret);
255 
256 	return ret;
257 }
258 
259 int
260 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum,
261 		const char *algo_string)
262 {
263 	unsigned int i;
264 	int ret = -1;	/* Invalid string */
265 
266 	for (i = 1; i < RTE_DIM(crypto_auth_algorithm_strings); i++) {
267 		if (strcmp(algo_string, crypto_auth_algorithm_strings[i]) == 0) {
268 			*algo_enum = (enum rte_crypto_auth_algorithm) i;
269 			ret = 0;
270 			break;
271 		}
272 	}
273 
274 	rte_cryptodev_trace_get_auth_algo_enum(algo_string, *algo_enum, ret);
275 
276 	return ret;
277 }
278 
279 int
280 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum,
281 		const char *algo_string)
282 {
283 	unsigned int i;
284 	int ret = -1;	/* Invalid string */
285 
286 	for (i = 1; i < RTE_DIM(crypto_aead_algorithm_strings); i++) {
287 		if (strcmp(algo_string, crypto_aead_algorithm_strings[i]) == 0) {
288 			*algo_enum = (enum rte_crypto_aead_algorithm) i;
289 			ret = 0;
290 			break;
291 		}
292 	}
293 
294 	rte_cryptodev_trace_get_aead_algo_enum(algo_string, *algo_enum, ret);
295 
296 	return ret;
297 }
298 
299 int
300 rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum,
301 		const char *xform_string)
302 {
303 	unsigned int i;
304 	int ret = -1;	/* Invalid string */
305 
306 	for (i = 1; i < RTE_DIM(crypto_asym_xform_strings); i++) {
307 		if (strcmp(xform_string,
308 			crypto_asym_xform_strings[i]) == 0) {
309 			*xform_enum = (enum rte_crypto_asym_xform_type) i;
310 			ret = 0;
311 			break;
312 		}
313 	}
314 
315 	rte_cryptodev_trace_asym_get_xform_enum(xform_string, *xform_enum, ret);
316 
317 	return ret;
318 }
319 
320 const char *
321 rte_cryptodev_get_cipher_algo_string(enum rte_crypto_cipher_algorithm algo_enum)
322 {
323 	const char *alg_str = NULL;
324 
325 	if ((unsigned int)algo_enum < RTE_DIM(crypto_cipher_algorithm_strings))
326 		alg_str = crypto_cipher_algorithm_strings[algo_enum];
327 
328 	rte_cryptodev_trace_get_cipher_algo_string(algo_enum, alg_str);
329 
330 	return alg_str;
331 }
332 
333 const char *
334 rte_cryptodev_get_auth_algo_string(enum rte_crypto_auth_algorithm algo_enum)
335 {
336 	const char *alg_str = NULL;
337 
338 	if ((unsigned int)algo_enum < RTE_DIM(crypto_auth_algorithm_strings))
339 		alg_str = crypto_auth_algorithm_strings[algo_enum];
340 
341 	rte_cryptodev_trace_get_auth_algo_string(algo_enum, alg_str);
342 
343 	return alg_str;
344 }
345 
346 const char *
347 rte_cryptodev_get_aead_algo_string(enum rte_crypto_aead_algorithm algo_enum)
348 {
349 	const char *alg_str = NULL;
350 
351 	if ((unsigned int)algo_enum < RTE_DIM(crypto_aead_algorithm_strings))
352 		alg_str = crypto_aead_algorithm_strings[algo_enum];
353 
354 	rte_cryptodev_trace_get_aead_algo_string(algo_enum, alg_str);
355 
356 	return alg_str;
357 }
358 
359 const char *
360 rte_cryptodev_asym_get_xform_string(enum rte_crypto_asym_xform_type xform_enum)
361 {
362 	const char *xform_str = NULL;
363 
364 	if ((unsigned int)xform_enum < RTE_DIM(crypto_asym_xform_strings))
365 		xform_str = crypto_asym_xform_strings[xform_enum];
366 
367 	rte_cryptodev_trace_asym_get_xform_string(xform_enum, xform_str);
368 
369 	return xform_str;
370 }
371 
372 /**
373  * The crypto auth operation strings identifiers.
374  * It could be used in application command line.
375  */
376 const char *
377 rte_crypto_auth_operation_strings[] = {
378 		[RTE_CRYPTO_AUTH_OP_VERIFY]	= "verify",
379 		[RTE_CRYPTO_AUTH_OP_GENERATE]	= "generate"
380 };
381 
382 const struct rte_cryptodev_symmetric_capability *
383 rte_cryptodev_sym_capability_get(uint8_t dev_id,
384 		const struct rte_cryptodev_sym_capability_idx *idx)
385 {
386 	const struct rte_cryptodev_capabilities *capability;
387 	const struct rte_cryptodev_symmetric_capability *sym_capability = NULL;
388 	struct rte_cryptodev_info dev_info;
389 	int i = 0;
390 
391 	rte_cryptodev_info_get(dev_id, &dev_info);
392 
393 	while ((capability = &dev_info.capabilities[i++])->op !=
394 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
395 		if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
396 			continue;
397 
398 		if (capability->sym.xform_type != idx->type)
399 			continue;
400 
401 		if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
402 			capability->sym.auth.algo == idx->algo.auth) {
403 			sym_capability = &capability->sym;
404 			break;
405 		}
406 
407 		if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
408 			capability->sym.cipher.algo == idx->algo.cipher) {
409 			sym_capability = &capability->sym;
410 			break;
411 		}
412 
413 		if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
414 				capability->sym.aead.algo == idx->algo.aead) {
415 			sym_capability = &capability->sym;
416 			break;
417 		}
418 	}
419 
420 	rte_cryptodev_trace_sym_capability_get(dev_id, dev_info.driver_name,
421 		dev_info.driver_id, idx->type, sym_capability);
422 
423 	return sym_capability;
424 }
425 
426 static int
427 param_range_check(uint16_t size, const struct rte_crypto_param_range *range)
428 {
429 	unsigned int next_size;
430 
431 	/* Check lower/upper bounds */
432 	if (size < range->min)
433 		return -1;
434 
435 	if (size > range->max)
436 		return -1;
437 
438 	/* If range is actually only one value, size is correct */
439 	if (range->increment == 0)
440 		return 0;
441 
442 	/* Check if value is one of the supported sizes */
443 	for (next_size = range->min; next_size <= range->max;
444 			next_size += range->increment)
445 		if (size == next_size)
446 			return 0;
447 
448 	return -1;
449 }
450 
451 const struct rte_cryptodev_asymmetric_xform_capability *
452 rte_cryptodev_asym_capability_get(uint8_t dev_id,
453 		const struct rte_cryptodev_asym_capability_idx *idx)
454 {
455 	const struct rte_cryptodev_capabilities *capability;
456 	const struct rte_cryptodev_asymmetric_xform_capability *asym_cap = NULL;
457 	struct rte_cryptodev_info dev_info;
458 	unsigned int i = 0;
459 
460 	memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
461 	rte_cryptodev_info_get(dev_id, &dev_info);
462 
463 	while ((capability = &dev_info.capabilities[i++])->op !=
464 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
465 		if (capability->op != RTE_CRYPTO_OP_TYPE_ASYMMETRIC)
466 			continue;
467 
468 		if (capability->asym.xform_capa.xform_type == idx->type) {
469 			asym_cap = &capability->asym.xform_capa;
470 			break;
471 		}
472 	}
473 
474 	rte_cryptodev_trace_asym_capability_get(dev_info.driver_name,
475 		dev_info.driver_id, idx->type, asym_cap);
476 
477 	return asym_cap;
478 };
479 
480 int
481 rte_cryptodev_sym_capability_check_cipher(
482 		const struct rte_cryptodev_symmetric_capability *capability,
483 		uint16_t key_size, uint16_t iv_size)
484 {
485 	int ret = 0; /* success */
486 
487 	if (param_range_check(key_size, &capability->cipher.key_size) != 0) {
488 		ret = -1;
489 		goto done;
490 	}
491 
492 	if (param_range_check(iv_size, &capability->cipher.iv_size) != 0)
493 		ret = -1;
494 
495 done:
496 	rte_cryptodev_trace_sym_capability_check_cipher(capability, key_size,
497 		iv_size, ret);
498 
499 	return ret;
500 }
501 
502 int
503 rte_cryptodev_sym_capability_check_auth(
504 		const struct rte_cryptodev_symmetric_capability *capability,
505 		uint16_t key_size, uint16_t digest_size, uint16_t iv_size)
506 {
507 	int ret = 0; /* success */
508 
509 	if (param_range_check(key_size, &capability->auth.key_size) != 0) {
510 		ret = -1;
511 		goto done;
512 	}
513 
514 	if (param_range_check(digest_size,
515 		&capability->auth.digest_size) != 0) {
516 		ret = -1;
517 		goto done;
518 	}
519 
520 	if (param_range_check(iv_size, &capability->auth.iv_size) != 0)
521 		ret = -1;
522 
523 done:
524 	rte_cryptodev_trace_sym_capability_check_auth(capability, key_size,
525 		digest_size, iv_size, ret);
526 
527 	return ret;
528 }
529 
530 int
531 rte_cryptodev_sym_capability_check_aead(
532 		const struct rte_cryptodev_symmetric_capability *capability,
533 		uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
534 		uint16_t iv_size)
535 {
536 	int ret = 0; /* success */
537 
538 	if (param_range_check(key_size, &capability->aead.key_size) != 0) {
539 		ret = -1;
540 		goto done;
541 	}
542 
543 	if (param_range_check(digest_size,
544 		&capability->aead.digest_size) != 0) {
545 		ret = -1;
546 		goto done;
547 	}
548 
549 	if (param_range_check(aad_size, &capability->aead.aad_size) != 0) {
550 		ret = -1;
551 		goto done;
552 	}
553 
554 	if (param_range_check(iv_size, &capability->aead.iv_size) != 0)
555 		ret = -1;
556 
557 done:
558 	rte_cryptodev_trace_sym_capability_check_aead(capability, key_size,
559 		digest_size, aad_size, iv_size, ret);
560 
561 	return ret;
562 }
563 
564 int
565 rte_cryptodev_asym_xform_capability_check_optype(
566 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
567 	enum rte_crypto_asym_op_type op_type)
568 {
569 	int ret = 0;
570 
571 	if (capability->op_types & (1 << op_type))
572 		ret = 1;
573 
574 	rte_cryptodev_trace_asym_xform_capability_check_optype(
575 		capability->op_types, op_type, ret);
576 
577 	return ret;
578 }
579 
580 int
581 rte_cryptodev_asym_xform_capability_check_modlen(
582 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
583 	uint16_t modlen)
584 {
585 	int ret = 0; /* success */
586 
587 	/* no need to check for limits, if min or max = 0 */
588 	if (capability->modlen.min != 0) {
589 		if (modlen < capability->modlen.min) {
590 			ret = -1;
591 			goto done;
592 		}
593 	}
594 
595 	if (capability->modlen.max != 0) {
596 		if (modlen > capability->modlen.max) {
597 			ret = -1;
598 			goto done;
599 		}
600 	}
601 
602 	/* in any case, check if given modlen is module increment */
603 	if (capability->modlen.increment != 0) {
604 		if (modlen % (capability->modlen.increment))
605 			ret = -1;
606 	}
607 
608 done:
609 	rte_cryptodev_trace_asym_xform_capability_check_modlen(capability,
610 		modlen, ret);
611 
612 	return ret;
613 }
614 
615 bool
616 rte_cryptodev_asym_xform_capability_check_hash(
617 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
618 	enum rte_crypto_auth_algorithm hash)
619 {
620 	bool ret = false;
621 
622 	if (capability->hash_algos & (1 << hash))
623 		ret = true;
624 
625 	rte_cryptodev_trace_asym_xform_capability_check_hash(
626 		capability->hash_algos, hash, ret);
627 
628 	return ret;
629 }
630 
631 /* spinlock for crypto device enq callbacks */
632 static rte_spinlock_t rte_cryptodev_callback_lock = RTE_SPINLOCK_INITIALIZER;
633 
634 static void
635 cryptodev_cb_cleanup(struct rte_cryptodev *dev)
636 {
637 	struct rte_cryptodev_cb_rcu *list;
638 	struct rte_cryptodev_cb *cb, *next;
639 	uint16_t qp_id;
640 
641 	if (dev->enq_cbs == NULL && dev->deq_cbs == NULL)
642 		return;
643 
644 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
645 		list = &dev->enq_cbs[qp_id];
646 		cb = list->next;
647 		while (cb != NULL) {
648 			next = cb->next;
649 			rte_free(cb);
650 			cb = next;
651 		}
652 
653 		rte_free(list->qsbr);
654 	}
655 
656 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
657 		list = &dev->deq_cbs[qp_id];
658 		cb = list->next;
659 		while (cb != NULL) {
660 			next = cb->next;
661 			rte_free(cb);
662 			cb = next;
663 		}
664 
665 		rte_free(list->qsbr);
666 	}
667 
668 	rte_free(dev->enq_cbs);
669 	dev->enq_cbs = NULL;
670 	rte_free(dev->deq_cbs);
671 	dev->deq_cbs = NULL;
672 }
673 
674 static int
675 cryptodev_cb_init(struct rte_cryptodev *dev)
676 {
677 	struct rte_cryptodev_cb_rcu *list;
678 	struct rte_rcu_qsbr *qsbr;
679 	uint16_t qp_id;
680 	size_t size;
681 
682 	/* Max thread set to 1, as one DP thread accessing a queue-pair */
683 	const uint32_t max_threads = 1;
684 
685 	dev->enq_cbs = rte_zmalloc(NULL,
686 				   sizeof(struct rte_cryptodev_cb_rcu) *
687 				   dev->data->nb_queue_pairs, 0);
688 	if (dev->enq_cbs == NULL) {
689 		CDEV_LOG_ERR("Failed to allocate memory for enq callbacks");
690 		return -ENOMEM;
691 	}
692 
693 	dev->deq_cbs = rte_zmalloc(NULL,
694 				   sizeof(struct rte_cryptodev_cb_rcu) *
695 				   dev->data->nb_queue_pairs, 0);
696 	if (dev->deq_cbs == NULL) {
697 		CDEV_LOG_ERR("Failed to allocate memory for deq callbacks");
698 		rte_free(dev->enq_cbs);
699 		return -ENOMEM;
700 	}
701 
702 	/* Create RCU QSBR variable */
703 	size = rte_rcu_qsbr_get_memsize(max_threads);
704 
705 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
706 		list = &dev->enq_cbs[qp_id];
707 		qsbr = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
708 		if (qsbr == NULL) {
709 			CDEV_LOG_ERR("Failed to allocate memory for RCU on "
710 				"queue_pair_id=%d", qp_id);
711 			goto cb_init_err;
712 		}
713 
714 		if (rte_rcu_qsbr_init(qsbr, max_threads)) {
715 			CDEV_LOG_ERR("Failed to initialize for RCU on "
716 				"queue_pair_id=%d", qp_id);
717 			goto cb_init_err;
718 		}
719 
720 		list->qsbr = qsbr;
721 	}
722 
723 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
724 		list = &dev->deq_cbs[qp_id];
725 		qsbr = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
726 		if (qsbr == NULL) {
727 			CDEV_LOG_ERR("Failed to allocate memory for RCU on "
728 				"queue_pair_id=%d", qp_id);
729 			goto cb_init_err;
730 		}
731 
732 		if (rte_rcu_qsbr_init(qsbr, max_threads)) {
733 			CDEV_LOG_ERR("Failed to initialize for RCU on "
734 				"queue_pair_id=%d", qp_id);
735 			goto cb_init_err;
736 		}
737 
738 		list->qsbr = qsbr;
739 	}
740 
741 	return 0;
742 
743 cb_init_err:
744 	cryptodev_cb_cleanup(dev);
745 	return -ENOMEM;
746 }
747 
748 const char *
749 rte_cryptodev_get_feature_name(uint64_t flag)
750 {
751 	rte_cryptodev_trace_get_feature_name(flag);
752 
753 	switch (flag) {
754 	case RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO:
755 		return "SYMMETRIC_CRYPTO";
756 	case RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO:
757 		return "ASYMMETRIC_CRYPTO";
758 	case RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING:
759 		return "SYM_OPERATION_CHAINING";
760 	case RTE_CRYPTODEV_FF_CPU_SSE:
761 		return "CPU_SSE";
762 	case RTE_CRYPTODEV_FF_CPU_AVX:
763 		return "CPU_AVX";
764 	case RTE_CRYPTODEV_FF_CPU_AVX2:
765 		return "CPU_AVX2";
766 	case RTE_CRYPTODEV_FF_CPU_AVX512:
767 		return "CPU_AVX512";
768 	case RTE_CRYPTODEV_FF_CPU_AESNI:
769 		return "CPU_AESNI";
770 	case RTE_CRYPTODEV_FF_HW_ACCELERATED:
771 		return "HW_ACCELERATED";
772 	case RTE_CRYPTODEV_FF_IN_PLACE_SGL:
773 		return "IN_PLACE_SGL";
774 	case RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT:
775 		return "OOP_SGL_IN_SGL_OUT";
776 	case RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT:
777 		return "OOP_SGL_IN_LB_OUT";
778 	case RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT:
779 		return "OOP_LB_IN_SGL_OUT";
780 	case RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT:
781 		return "OOP_LB_IN_LB_OUT";
782 	case RTE_CRYPTODEV_FF_CPU_NEON:
783 		return "CPU_NEON";
784 	case RTE_CRYPTODEV_FF_CPU_ARM_CE:
785 		return "CPU_ARM_CE";
786 	case RTE_CRYPTODEV_FF_SECURITY:
787 		return "SECURITY_PROTOCOL";
788 	case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP:
789 		return "RSA_PRIV_OP_KEY_EXP";
790 	case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT:
791 		return "RSA_PRIV_OP_KEY_QT";
792 	case RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED:
793 		return "DIGEST_ENCRYPTED";
794 	case RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO:
795 		return "SYM_CPU_CRYPTO";
796 	case RTE_CRYPTODEV_FF_ASYM_SESSIONLESS:
797 		return "ASYM_SESSIONLESS";
798 	case RTE_CRYPTODEV_FF_SYM_SESSIONLESS:
799 		return "SYM_SESSIONLESS";
800 	case RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA:
801 		return "NON_BYTE_ALIGNED_DATA";
802 	case RTE_CRYPTODEV_FF_CIPHER_MULTIPLE_DATA_UNITS:
803 		return "CIPHER_MULTIPLE_DATA_UNITS";
804 	case RTE_CRYPTODEV_FF_CIPHER_WRAPPED_KEY:
805 		return "CIPHER_WRAPPED_KEY";
806 	default:
807 		return NULL;
808 	}
809 }
810 
811 struct rte_cryptodev *
812 rte_cryptodev_pmd_get_dev(uint8_t dev_id)
813 {
814 	return &cryptodev_globals.devs[dev_id];
815 }
816 
817 struct rte_cryptodev *
818 rte_cryptodev_pmd_get_named_dev(const char *name)
819 {
820 	struct rte_cryptodev *dev;
821 	unsigned int i;
822 
823 	if (name == NULL)
824 		return NULL;
825 
826 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
827 		dev = &cryptodev_globals.devs[i];
828 
829 		if ((dev->attached == RTE_CRYPTODEV_ATTACHED) &&
830 				(strcmp(dev->data->name, name) == 0))
831 			return dev;
832 	}
833 
834 	return NULL;
835 }
836 
837 static inline uint8_t
838 rte_cryptodev_is_valid_device_data(uint8_t dev_id)
839 {
840 	if (dev_id >= RTE_CRYPTO_MAX_DEVS ||
841 			rte_crypto_devices[dev_id].data == NULL)
842 		return 0;
843 
844 	return 1;
845 }
846 
847 unsigned int
848 rte_cryptodev_is_valid_dev(uint8_t dev_id)
849 {
850 	struct rte_cryptodev *dev = NULL;
851 	unsigned int ret = 1;
852 
853 	if (!rte_cryptodev_is_valid_device_data(dev_id)) {
854 		ret = 0;
855 		goto done;
856 	}
857 
858 	dev = rte_cryptodev_pmd_get_dev(dev_id);
859 	if (dev->attached != RTE_CRYPTODEV_ATTACHED)
860 		ret = 0;
861 
862 done:
863 	rte_cryptodev_trace_is_valid_dev(dev_id, ret);
864 
865 	return ret;
866 }
867 
868 int
869 rte_cryptodev_get_dev_id(const char *name)
870 {
871 	unsigned i;
872 	int ret = -1;
873 
874 	if (name == NULL)
875 		return -1;
876 
877 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
878 		if (!rte_cryptodev_is_valid_device_data(i))
879 			continue;
880 		if ((strcmp(cryptodev_globals.devs[i].data->name, name)
881 				== 0) &&
882 				(cryptodev_globals.devs[i].attached ==
883 						RTE_CRYPTODEV_ATTACHED)) {
884 			ret = (int)i;
885 			break;
886 		}
887 	}
888 
889 	rte_cryptodev_trace_get_dev_id(name, ret);
890 
891 	return ret;
892 }
893 
894 uint8_t
895 rte_cryptodev_count(void)
896 {
897 	rte_cryptodev_trace_count(cryptodev_globals.nb_devs);
898 
899 	return cryptodev_globals.nb_devs;
900 }
901 
902 uint8_t
903 rte_cryptodev_device_count_by_driver(uint8_t driver_id)
904 {
905 	uint8_t i, dev_count = 0;
906 
907 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++)
908 		if (cryptodev_globals.devs[i].driver_id == driver_id &&
909 			cryptodev_globals.devs[i].attached ==
910 					RTE_CRYPTODEV_ATTACHED)
911 			dev_count++;
912 
913 	rte_cryptodev_trace_device_count_by_driver(driver_id, dev_count);
914 
915 	return dev_count;
916 }
917 
918 uint8_t
919 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
920 	uint8_t nb_devices)
921 {
922 	uint8_t i, count = 0;
923 	struct rte_cryptodev *devs = cryptodev_globals.devs;
924 
925 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS && count < nb_devices; i++) {
926 		if (!rte_cryptodev_is_valid_device_data(i))
927 			continue;
928 
929 		if (devs[i].attached == RTE_CRYPTODEV_ATTACHED) {
930 			int cmp;
931 
932 			cmp = strncmp(devs[i].device->driver->name,
933 					driver_name,
934 					strlen(driver_name) + 1);
935 
936 			if (cmp == 0)
937 				devices[count++] = devs[i].data->dev_id;
938 		}
939 	}
940 
941 	rte_cryptodev_trace_devices_get(driver_name, count);
942 
943 	return count;
944 }
945 
946 void *
947 rte_cryptodev_get_sec_ctx(uint8_t dev_id)
948 {
949 	void *sec_ctx = NULL;
950 
951 	if (dev_id < RTE_CRYPTO_MAX_DEVS &&
952 			(rte_crypto_devices[dev_id].feature_flags &
953 			RTE_CRYPTODEV_FF_SECURITY))
954 		sec_ctx = rte_crypto_devices[dev_id].security_ctx;
955 
956 	rte_cryptodev_trace_get_sec_ctx(dev_id, sec_ctx);
957 
958 	return sec_ctx;
959 }
960 
961 int
962 rte_cryptodev_socket_id(uint8_t dev_id)
963 {
964 	struct rte_cryptodev *dev;
965 
966 	if (!rte_cryptodev_is_valid_dev(dev_id))
967 		return -1;
968 
969 	dev = rte_cryptodev_pmd_get_dev(dev_id);
970 
971 	rte_cryptodev_trace_socket_id(dev_id, dev->data->name,
972 		dev->data->socket_id);
973 	return dev->data->socket_id;
974 }
975 
976 static inline int
977 rte_cryptodev_data_alloc(uint8_t dev_id, struct rte_cryptodev_data **data,
978 		int socket_id)
979 {
980 	char mz_name[RTE_MEMZONE_NAMESIZE];
981 	const struct rte_memzone *mz;
982 	int n;
983 
984 	/* generate memzone name */
985 	n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
986 	if (n >= (int)sizeof(mz_name))
987 		return -EINVAL;
988 
989 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
990 		mz = rte_memzone_reserve(mz_name,
991 				sizeof(struct rte_cryptodev_data),
992 				socket_id, 0);
993 		CDEV_LOG_DEBUG("PRIMARY:reserved memzone for %s (%p)",
994 				mz_name, mz);
995 	} else {
996 		mz = rte_memzone_lookup(mz_name);
997 		CDEV_LOG_DEBUG("SECONDARY:looked up memzone for %s (%p)",
998 				mz_name, mz);
999 	}
1000 
1001 	if (mz == NULL)
1002 		return -ENOMEM;
1003 
1004 	*data = mz->addr;
1005 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1006 		memset(*data, 0, sizeof(struct rte_cryptodev_data));
1007 
1008 	return 0;
1009 }
1010 
1011 static inline int
1012 rte_cryptodev_data_free(uint8_t dev_id, struct rte_cryptodev_data **data)
1013 {
1014 	char mz_name[RTE_MEMZONE_NAMESIZE];
1015 	const struct rte_memzone *mz;
1016 	int n;
1017 
1018 	/* generate memzone name */
1019 	n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
1020 	if (n >= (int)sizeof(mz_name))
1021 		return -EINVAL;
1022 
1023 	mz = rte_memzone_lookup(mz_name);
1024 	if (mz == NULL)
1025 		return -ENOMEM;
1026 
1027 	RTE_ASSERT(*data == mz->addr);
1028 	*data = NULL;
1029 
1030 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1031 		CDEV_LOG_DEBUG("PRIMARY:free memzone of %s (%p)",
1032 				mz_name, mz);
1033 		return rte_memzone_free(mz);
1034 	} else {
1035 		CDEV_LOG_DEBUG("SECONDARY:don't free memzone of %s (%p)",
1036 				mz_name, mz);
1037 	}
1038 
1039 	return 0;
1040 }
1041 
1042 static uint8_t
1043 rte_cryptodev_find_free_device_index(void)
1044 {
1045 	uint8_t dev_id;
1046 
1047 	for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++) {
1048 		if (rte_crypto_devices[dev_id].attached ==
1049 				RTE_CRYPTODEV_DETACHED)
1050 			return dev_id;
1051 	}
1052 	return RTE_CRYPTO_MAX_DEVS;
1053 }
1054 
1055 struct rte_cryptodev *
1056 rte_cryptodev_pmd_allocate(const char *name, int socket_id)
1057 {
1058 	struct rte_cryptodev *cryptodev;
1059 	uint8_t dev_id;
1060 
1061 	if (rte_cryptodev_pmd_get_named_dev(name) != NULL) {
1062 		CDEV_LOG_ERR("Crypto device with name %s already "
1063 				"allocated!", name);
1064 		return NULL;
1065 	}
1066 
1067 	dev_id = rte_cryptodev_find_free_device_index();
1068 	if (dev_id == RTE_CRYPTO_MAX_DEVS) {
1069 		CDEV_LOG_ERR("Reached maximum number of crypto devices");
1070 		return NULL;
1071 	}
1072 
1073 	cryptodev = rte_cryptodev_pmd_get_dev(dev_id);
1074 
1075 	if (cryptodev->data == NULL) {
1076 		struct rte_cryptodev_data **cryptodev_data =
1077 				&cryptodev_globals.data[dev_id];
1078 
1079 		int retval = rte_cryptodev_data_alloc(dev_id, cryptodev_data,
1080 				socket_id);
1081 
1082 		if (retval < 0 || *cryptodev_data == NULL)
1083 			return NULL;
1084 
1085 		cryptodev->data = *cryptodev_data;
1086 
1087 		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1088 			strlcpy(cryptodev->data->name, name,
1089 				RTE_CRYPTODEV_NAME_MAX_LEN);
1090 
1091 			cryptodev->data->dev_id = dev_id;
1092 			cryptodev->data->socket_id = socket_id;
1093 			cryptodev->data->dev_started = 0;
1094 			CDEV_LOG_DEBUG("PRIMARY:init data");
1095 		}
1096 
1097 		CDEV_LOG_DEBUG("Data for %s: dev_id %d, socket %d, started %d",
1098 				cryptodev->data->name,
1099 				cryptodev->data->dev_id,
1100 				cryptodev->data->socket_id,
1101 				cryptodev->data->dev_started);
1102 
1103 		/* init user callbacks */
1104 		TAILQ_INIT(&(cryptodev->link_intr_cbs));
1105 
1106 		cryptodev->attached = RTE_CRYPTODEV_ATTACHED;
1107 
1108 		cryptodev_globals.nb_devs++;
1109 	}
1110 
1111 	return cryptodev;
1112 }
1113 
1114 int
1115 rte_cryptodev_pmd_release_device(struct rte_cryptodev *cryptodev)
1116 {
1117 	int ret;
1118 	uint8_t dev_id;
1119 
1120 	if (cryptodev == NULL)
1121 		return -EINVAL;
1122 
1123 	dev_id = cryptodev->data->dev_id;
1124 
1125 	cryptodev_fp_ops_reset(rte_crypto_fp_ops + dev_id);
1126 
1127 	/* Close device only if device operations have been set */
1128 	if (cryptodev->dev_ops) {
1129 		ret = rte_cryptodev_close(dev_id);
1130 		if (ret < 0)
1131 			return ret;
1132 	}
1133 
1134 	ret = rte_cryptodev_data_free(dev_id, &cryptodev_globals.data[dev_id]);
1135 	if (ret < 0)
1136 		return ret;
1137 
1138 	cryptodev->attached = RTE_CRYPTODEV_DETACHED;
1139 	cryptodev_globals.nb_devs--;
1140 	return 0;
1141 }
1142 
1143 uint16_t
1144 rte_cryptodev_queue_pair_count(uint8_t dev_id)
1145 {
1146 	struct rte_cryptodev *dev;
1147 
1148 	if (!rte_cryptodev_is_valid_device_data(dev_id)) {
1149 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1150 		return 0;
1151 	}
1152 
1153 	dev = &rte_crypto_devices[dev_id];
1154 	rte_cryptodev_trace_queue_pair_count(dev, dev->data->name,
1155 		dev->data->socket_id, dev->data->dev_id,
1156 		dev->data->nb_queue_pairs);
1157 
1158 	return dev->data->nb_queue_pairs;
1159 }
1160 
1161 static int
1162 rte_cryptodev_queue_pairs_config(struct rte_cryptodev *dev, uint16_t nb_qpairs,
1163 		int socket_id)
1164 {
1165 	struct rte_cryptodev_info dev_info;
1166 	void **qp;
1167 	unsigned i;
1168 
1169 	if ((dev == NULL) || (nb_qpairs < 1)) {
1170 		CDEV_LOG_ERR("invalid param: dev %p, nb_queues %u",
1171 							dev, nb_qpairs);
1172 		return -EINVAL;
1173 	}
1174 
1175 	CDEV_LOG_DEBUG("Setup %d queues pairs on device %u",
1176 			nb_qpairs, dev->data->dev_id);
1177 
1178 	memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
1179 
1180 	if (*dev->dev_ops->dev_infos_get == NULL)
1181 		return -ENOTSUP;
1182 	(*dev->dev_ops->dev_infos_get)(dev, &dev_info);
1183 
1184 	if (nb_qpairs > (dev_info.max_nb_queue_pairs)) {
1185 		CDEV_LOG_ERR("Invalid num queue_pairs (%u) for dev %u",
1186 				nb_qpairs, dev->data->dev_id);
1187 	    return -EINVAL;
1188 	}
1189 
1190 	if (dev->data->queue_pairs == NULL) { /* first time configuration */
1191 		dev->data->queue_pairs = rte_zmalloc_socket(
1192 				"cryptodev->queue_pairs",
1193 				sizeof(dev->data->queue_pairs[0]) *
1194 				dev_info.max_nb_queue_pairs,
1195 				RTE_CACHE_LINE_SIZE, socket_id);
1196 
1197 		if (dev->data->queue_pairs == NULL) {
1198 			dev->data->nb_queue_pairs = 0;
1199 			CDEV_LOG_ERR("failed to get memory for qp meta data, "
1200 							"nb_queues %u",
1201 							nb_qpairs);
1202 			return -(ENOMEM);
1203 		}
1204 	} else { /* re-configure */
1205 		int ret;
1206 		uint16_t old_nb_queues = dev->data->nb_queue_pairs;
1207 
1208 		qp = dev->data->queue_pairs;
1209 
1210 		if (*dev->dev_ops->queue_pair_release == NULL)
1211 			return -ENOTSUP;
1212 
1213 		for (i = nb_qpairs; i < old_nb_queues; i++) {
1214 			ret = (*dev->dev_ops->queue_pair_release)(dev, i);
1215 			if (ret < 0)
1216 				return ret;
1217 			qp[i] = NULL;
1218 		}
1219 
1220 	}
1221 	dev->data->nb_queue_pairs = nb_qpairs;
1222 	return 0;
1223 }
1224 
1225 int
1226 rte_cryptodev_queue_pair_reset(uint8_t dev_id, uint16_t queue_pair_id,
1227 		const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
1228 {
1229 	struct rte_cryptodev *dev;
1230 
1231 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1232 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1233 		return -EINVAL;
1234 	}
1235 
1236 	dev = &rte_crypto_devices[dev_id];
1237 	if (queue_pair_id >= dev->data->nb_queue_pairs) {
1238 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
1239 		return -EINVAL;
1240 	}
1241 
1242 	if (*dev->dev_ops->queue_pair_reset == NULL)
1243 		return -ENOTSUP;
1244 
1245 	rte_cryptodev_trace_queue_pair_reset(dev_id, queue_pair_id, qp_conf, socket_id);
1246 	return (*dev->dev_ops->queue_pair_reset)(dev, queue_pair_id, qp_conf, socket_id);
1247 }
1248 
1249 int
1250 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
1251 {
1252 	struct rte_cryptodev *dev;
1253 	int diag;
1254 
1255 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1256 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1257 		return -EINVAL;
1258 	}
1259 
1260 	dev = &rte_crypto_devices[dev_id];
1261 
1262 	if (dev->data->dev_started) {
1263 		CDEV_LOG_ERR(
1264 		    "device %d must be stopped to allow configuration", dev_id);
1265 		return -EBUSY;
1266 	}
1267 
1268 	if (*dev->dev_ops->dev_configure == NULL)
1269 		return -ENOTSUP;
1270 
1271 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1272 	cryptodev_cb_cleanup(dev);
1273 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1274 
1275 	/* Setup new number of queue pairs and reconfigure device. */
1276 	diag = rte_cryptodev_queue_pairs_config(dev, config->nb_queue_pairs,
1277 			config->socket_id);
1278 	if (diag != 0) {
1279 		CDEV_LOG_ERR("dev%d rte_crypto_dev_queue_pairs_config = %d",
1280 				dev_id, diag);
1281 		return diag;
1282 	}
1283 
1284 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1285 	diag = cryptodev_cb_init(dev);
1286 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1287 	if (diag) {
1288 		CDEV_LOG_ERR("Callback init failed for dev_id=%d", dev_id);
1289 		return diag;
1290 	}
1291 
1292 	rte_cryptodev_trace_configure(dev_id, config);
1293 	return (*dev->dev_ops->dev_configure)(dev, config);
1294 }
1295 
1296 int
1297 rte_cryptodev_start(uint8_t dev_id)
1298 {
1299 	struct rte_cryptodev *dev;
1300 	int diag;
1301 
1302 	CDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
1303 
1304 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1305 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1306 		return -EINVAL;
1307 	}
1308 
1309 	dev = &rte_crypto_devices[dev_id];
1310 
1311 	if (*dev->dev_ops->dev_start == NULL)
1312 		return -ENOTSUP;
1313 
1314 	if (dev->data->dev_started != 0) {
1315 		CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already started",
1316 			dev_id);
1317 		return 0;
1318 	}
1319 
1320 	diag = (*dev->dev_ops->dev_start)(dev);
1321 	/* expose selection of PMD fast-path functions */
1322 	cryptodev_fp_ops_set(rte_crypto_fp_ops + dev_id, dev);
1323 
1324 	rte_cryptodev_trace_start(dev_id, diag);
1325 	if (diag == 0)
1326 		dev->data->dev_started = 1;
1327 	else
1328 		return diag;
1329 
1330 	return 0;
1331 }
1332 
1333 void
1334 rte_cryptodev_stop(uint8_t dev_id)
1335 {
1336 	struct rte_cryptodev *dev;
1337 
1338 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1339 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1340 		return;
1341 	}
1342 
1343 	dev = &rte_crypto_devices[dev_id];
1344 
1345 	if (*dev->dev_ops->dev_stop == NULL)
1346 		return;
1347 
1348 	if (dev->data->dev_started == 0) {
1349 		CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already stopped",
1350 			dev_id);
1351 		return;
1352 	}
1353 
1354 	/* point fast-path functions to dummy ones */
1355 	cryptodev_fp_ops_reset(rte_crypto_fp_ops + dev_id);
1356 
1357 	(*dev->dev_ops->dev_stop)(dev);
1358 	rte_cryptodev_trace_stop(dev_id);
1359 	dev->data->dev_started = 0;
1360 }
1361 
1362 int
1363 rte_cryptodev_close(uint8_t dev_id)
1364 {
1365 	struct rte_cryptodev *dev;
1366 	int retval;
1367 
1368 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1369 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1370 		return -1;
1371 	}
1372 
1373 	dev = &rte_crypto_devices[dev_id];
1374 
1375 	/* Device must be stopped before it can be closed */
1376 	if (dev->data->dev_started == 1) {
1377 		CDEV_LOG_ERR("Device %u must be stopped before closing",
1378 				dev_id);
1379 		return -EBUSY;
1380 	}
1381 
1382 	/* We can't close the device if there are outstanding sessions in use */
1383 	if (dev->data->session_pool != NULL) {
1384 		if (!rte_mempool_full(dev->data->session_pool)) {
1385 			CDEV_LOG_ERR("dev_id=%u close failed, session mempool "
1386 					"has sessions still in use, free "
1387 					"all sessions before calling close",
1388 					(unsigned)dev_id);
1389 			return -EBUSY;
1390 		}
1391 	}
1392 
1393 	if (*dev->dev_ops->dev_close == NULL)
1394 		return -ENOTSUP;
1395 	retval = (*dev->dev_ops->dev_close)(dev);
1396 	rte_cryptodev_trace_close(dev_id, retval);
1397 
1398 	if (retval < 0)
1399 		return retval;
1400 
1401 	return 0;
1402 }
1403 
1404 int
1405 rte_cryptodev_get_qp_status(uint8_t dev_id, uint16_t queue_pair_id)
1406 {
1407 	struct rte_cryptodev *dev;
1408 	int ret = 0;
1409 
1410 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1411 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1412 		ret = -EINVAL;
1413 		goto done;
1414 	}
1415 
1416 	dev = &rte_crypto_devices[dev_id];
1417 	if (queue_pair_id >= dev->data->nb_queue_pairs) {
1418 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
1419 		ret = -EINVAL;
1420 		goto done;
1421 	}
1422 	void **qps = dev->data->queue_pairs;
1423 
1424 	if (qps[queue_pair_id])	{
1425 		CDEV_LOG_DEBUG("qp %d on dev %d is initialised",
1426 			queue_pair_id, dev_id);
1427 		ret = 1;
1428 		goto done;
1429 	}
1430 
1431 	CDEV_LOG_DEBUG("qp %d on dev %d is not initialised",
1432 		queue_pair_id, dev_id);
1433 
1434 done:
1435 	rte_cryptodev_trace_get_qp_status(dev_id, queue_pair_id, ret);
1436 
1437 	return ret;
1438 }
1439 
1440 static uint8_t
1441 rte_cryptodev_sym_is_valid_session_pool(struct rte_mempool *mp,
1442 	uint32_t sess_priv_size)
1443 {
1444 	struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1445 
1446 	if (!mp)
1447 		return 0;
1448 
1449 	pool_priv = rte_mempool_get_priv(mp);
1450 
1451 	if (!pool_priv || mp->private_data_size < sizeof(*pool_priv) ||
1452 			pool_priv->sess_data_sz < sess_priv_size)
1453 		return 0;
1454 
1455 	return 1;
1456 }
1457 
1458 int
1459 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
1460 		const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
1461 
1462 {
1463 	struct rte_cryptodev *dev;
1464 
1465 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1466 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1467 		return -EINVAL;
1468 	}
1469 
1470 	dev = &rte_crypto_devices[dev_id];
1471 	if (queue_pair_id >= dev->data->nb_queue_pairs) {
1472 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
1473 		return -EINVAL;
1474 	}
1475 
1476 	if (!qp_conf) {
1477 		CDEV_LOG_ERR("qp_conf cannot be NULL");
1478 		return -EINVAL;
1479 	}
1480 
1481 	if (qp_conf->mp_session) {
1482 		struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1483 
1484 		pool_priv = rte_mempool_get_priv(qp_conf->mp_session);
1485 		if (!pool_priv || qp_conf->mp_session->private_data_size <
1486 				sizeof(*pool_priv)) {
1487 			CDEV_LOG_ERR("Invalid mempool");
1488 			return -EINVAL;
1489 		}
1490 
1491 		if (!rte_cryptodev_sym_is_valid_session_pool(qp_conf->mp_session,
1492 					rte_cryptodev_sym_get_private_session_size(dev_id))) {
1493 			CDEV_LOG_ERR("Invalid mempool");
1494 			return -EINVAL;
1495 		}
1496 	}
1497 
1498 	if (dev->data->dev_started) {
1499 		CDEV_LOG_ERR(
1500 		    "device %d must be stopped to allow configuration", dev_id);
1501 		return -EBUSY;
1502 	}
1503 
1504 	if (*dev->dev_ops->queue_pair_setup == NULL)
1505 		return -ENOTSUP;
1506 
1507 	rte_cryptodev_trace_queue_pair_setup(dev_id, queue_pair_id, qp_conf);
1508 	return (*dev->dev_ops->queue_pair_setup)(dev, queue_pair_id, qp_conf,
1509 			socket_id);
1510 }
1511 
1512 struct rte_cryptodev_cb *
1513 rte_cryptodev_add_enq_callback(uint8_t dev_id,
1514 			       uint16_t qp_id,
1515 			       rte_cryptodev_callback_fn cb_fn,
1516 			       void *cb_arg)
1517 {
1518 #ifndef RTE_CRYPTO_CALLBACKS
1519 	rte_errno = ENOTSUP;
1520 	return NULL;
1521 #endif
1522 	struct rte_cryptodev *dev;
1523 	struct rte_cryptodev_cb_rcu *list;
1524 	struct rte_cryptodev_cb *cb, *tail;
1525 
1526 	if (!cb_fn) {
1527 		CDEV_LOG_ERR("Callback is NULL on dev_id=%d", dev_id);
1528 		rte_errno = EINVAL;
1529 		return NULL;
1530 	}
1531 
1532 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1533 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1534 		rte_errno = ENODEV;
1535 		return NULL;
1536 	}
1537 
1538 	dev = &rte_crypto_devices[dev_id];
1539 	if (qp_id >= dev->data->nb_queue_pairs) {
1540 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1541 		rte_errno = ENODEV;
1542 		return NULL;
1543 	}
1544 
1545 	cb = rte_zmalloc(NULL, sizeof(*cb), 0);
1546 	if (cb == NULL) {
1547 		CDEV_LOG_ERR("Failed to allocate memory for callback on "
1548 			     "dev=%d, queue_pair_id=%d", dev_id, qp_id);
1549 		rte_errno = ENOMEM;
1550 		return NULL;
1551 	}
1552 
1553 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1554 
1555 	cb->fn = cb_fn;
1556 	cb->arg = cb_arg;
1557 
1558 	/* Add the callbacks in fifo order. */
1559 	list = &dev->enq_cbs[qp_id];
1560 	tail = list->next;
1561 
1562 	if (tail) {
1563 		while (tail->next)
1564 			tail = tail->next;
1565 		/* Stores to cb->fn and cb->param should complete before
1566 		 * cb is visible to data plane.
1567 		 */
1568 		rte_atomic_store_explicit(&tail->next, cb, rte_memory_order_release);
1569 	} else {
1570 		/* Stores to cb->fn and cb->param should complete before
1571 		 * cb is visible to data plane.
1572 		 */
1573 		rte_atomic_store_explicit(&list->next, cb, rte_memory_order_release);
1574 	}
1575 
1576 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1577 
1578 	rte_cryptodev_trace_add_enq_callback(dev_id, qp_id, cb_fn);
1579 	return cb;
1580 }
1581 
1582 int
1583 rte_cryptodev_remove_enq_callback(uint8_t dev_id,
1584 				  uint16_t qp_id,
1585 				  struct rte_cryptodev_cb *cb)
1586 {
1587 #ifndef RTE_CRYPTO_CALLBACKS
1588 	return -ENOTSUP;
1589 #endif
1590 	struct rte_cryptodev *dev;
1591 	RTE_ATOMIC(struct rte_cryptodev_cb *) *prev_cb;
1592 	struct rte_cryptodev_cb *curr_cb;
1593 	struct rte_cryptodev_cb_rcu *list;
1594 	int ret;
1595 
1596 	ret = -EINVAL;
1597 
1598 	if (!cb) {
1599 		CDEV_LOG_ERR("Callback is NULL");
1600 		return -EINVAL;
1601 	}
1602 
1603 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1604 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1605 		return -ENODEV;
1606 	}
1607 
1608 	rte_cryptodev_trace_remove_enq_callback(dev_id, qp_id, cb->fn);
1609 
1610 	dev = &rte_crypto_devices[dev_id];
1611 	if (qp_id >= dev->data->nb_queue_pairs) {
1612 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1613 		return -ENODEV;
1614 	}
1615 
1616 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1617 	if (dev->enq_cbs == NULL) {
1618 		CDEV_LOG_ERR("Callback not initialized");
1619 		goto cb_err;
1620 	}
1621 
1622 	list = &dev->enq_cbs[qp_id];
1623 	if (list == NULL) {
1624 		CDEV_LOG_ERR("Callback list is NULL");
1625 		goto cb_err;
1626 	}
1627 
1628 	if (list->qsbr == NULL) {
1629 		CDEV_LOG_ERR("Rcu qsbr is NULL");
1630 		goto cb_err;
1631 	}
1632 
1633 	prev_cb = &list->next;
1634 	for (; *prev_cb != NULL; prev_cb = &curr_cb->next) {
1635 		curr_cb = *prev_cb;
1636 		if (curr_cb == cb) {
1637 			/* Remove the user cb from the callback list. */
1638 			rte_atomic_store_explicit(prev_cb, curr_cb->next,
1639 				rte_memory_order_relaxed);
1640 			ret = 0;
1641 			break;
1642 		}
1643 	}
1644 
1645 	if (!ret) {
1646 		/* Call sync with invalid thread id as this is part of
1647 		 * control plane API
1648 		 */
1649 		rte_rcu_qsbr_synchronize(list->qsbr, RTE_QSBR_THRID_INVALID);
1650 		rte_free(cb);
1651 	}
1652 
1653 cb_err:
1654 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1655 	return ret;
1656 }
1657 
1658 struct rte_cryptodev_cb *
1659 rte_cryptodev_add_deq_callback(uint8_t dev_id,
1660 			       uint16_t qp_id,
1661 			       rte_cryptodev_callback_fn cb_fn,
1662 			       void *cb_arg)
1663 {
1664 #ifndef RTE_CRYPTO_CALLBACKS
1665 	rte_errno = ENOTSUP;
1666 	return NULL;
1667 #endif
1668 	struct rte_cryptodev *dev;
1669 	struct rte_cryptodev_cb_rcu *list;
1670 	struct rte_cryptodev_cb *cb, *tail;
1671 
1672 	if (!cb_fn) {
1673 		CDEV_LOG_ERR("Callback is NULL on dev_id=%d", dev_id);
1674 		rte_errno = EINVAL;
1675 		return NULL;
1676 	}
1677 
1678 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1679 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1680 		rte_errno = ENODEV;
1681 		return NULL;
1682 	}
1683 
1684 	dev = &rte_crypto_devices[dev_id];
1685 	if (qp_id >= dev->data->nb_queue_pairs) {
1686 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1687 		rte_errno = ENODEV;
1688 		return NULL;
1689 	}
1690 
1691 	cb = rte_zmalloc(NULL, sizeof(*cb), 0);
1692 	if (cb == NULL) {
1693 		CDEV_LOG_ERR("Failed to allocate memory for callback on "
1694 			     "dev=%d, queue_pair_id=%d", dev_id, qp_id);
1695 		rte_errno = ENOMEM;
1696 		return NULL;
1697 	}
1698 
1699 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1700 
1701 	cb->fn = cb_fn;
1702 	cb->arg = cb_arg;
1703 
1704 	/* Add the callbacks in fifo order. */
1705 	list = &dev->deq_cbs[qp_id];
1706 	tail = list->next;
1707 
1708 	if (tail) {
1709 		while (tail->next)
1710 			tail = tail->next;
1711 		/* Stores to cb->fn and cb->param should complete before
1712 		 * cb is visible to data plane.
1713 		 */
1714 		rte_atomic_store_explicit(&tail->next, cb, rte_memory_order_release);
1715 	} else {
1716 		/* Stores to cb->fn and cb->param should complete before
1717 		 * cb is visible to data plane.
1718 		 */
1719 		rte_atomic_store_explicit(&list->next, cb, rte_memory_order_release);
1720 	}
1721 
1722 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1723 
1724 	rte_cryptodev_trace_add_deq_callback(dev_id, qp_id, cb_fn);
1725 
1726 	return cb;
1727 }
1728 
1729 int
1730 rte_cryptodev_remove_deq_callback(uint8_t dev_id,
1731 				  uint16_t qp_id,
1732 				  struct rte_cryptodev_cb *cb)
1733 {
1734 #ifndef RTE_CRYPTO_CALLBACKS
1735 	return -ENOTSUP;
1736 #endif
1737 	struct rte_cryptodev *dev;
1738 	RTE_ATOMIC(struct rte_cryptodev_cb *) *prev_cb;
1739 	struct rte_cryptodev_cb *curr_cb;
1740 	struct rte_cryptodev_cb_rcu *list;
1741 	int ret;
1742 
1743 	ret = -EINVAL;
1744 
1745 	if (!cb) {
1746 		CDEV_LOG_ERR("Callback is NULL");
1747 		return -EINVAL;
1748 	}
1749 
1750 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1751 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1752 		return -ENODEV;
1753 	}
1754 
1755 	rte_cryptodev_trace_remove_deq_callback(dev_id, qp_id, cb->fn);
1756 
1757 	dev = &rte_crypto_devices[dev_id];
1758 	if (qp_id >= dev->data->nb_queue_pairs) {
1759 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1760 		return -ENODEV;
1761 	}
1762 
1763 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1764 	if (dev->enq_cbs == NULL) {
1765 		CDEV_LOG_ERR("Callback not initialized");
1766 		goto cb_err;
1767 	}
1768 
1769 	list = &dev->deq_cbs[qp_id];
1770 	if (list == NULL) {
1771 		CDEV_LOG_ERR("Callback list is NULL");
1772 		goto cb_err;
1773 	}
1774 
1775 	if (list->qsbr == NULL) {
1776 		CDEV_LOG_ERR("Rcu qsbr is NULL");
1777 		goto cb_err;
1778 	}
1779 
1780 	prev_cb = &list->next;
1781 	for (; *prev_cb != NULL; prev_cb = &curr_cb->next) {
1782 		curr_cb = *prev_cb;
1783 		if (curr_cb == cb) {
1784 			/* Remove the user cb from the callback list. */
1785 			rte_atomic_store_explicit(prev_cb, curr_cb->next,
1786 				rte_memory_order_relaxed);
1787 			ret = 0;
1788 			break;
1789 		}
1790 	}
1791 
1792 	if (!ret) {
1793 		/* Call sync with invalid thread id as this is part of
1794 		 * control plane API
1795 		 */
1796 		rte_rcu_qsbr_synchronize(list->qsbr, RTE_QSBR_THRID_INVALID);
1797 		rte_free(cb);
1798 	}
1799 
1800 cb_err:
1801 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1802 	return ret;
1803 }
1804 
1805 int
1806 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats)
1807 {
1808 	struct rte_cryptodev *dev;
1809 
1810 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1811 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1812 		return -ENODEV;
1813 	}
1814 
1815 	if (stats == NULL) {
1816 		CDEV_LOG_ERR("Invalid stats ptr");
1817 		return -EINVAL;
1818 	}
1819 
1820 	dev = &rte_crypto_devices[dev_id];
1821 	memset(stats, 0, sizeof(*stats));
1822 
1823 	if (*dev->dev_ops->stats_get == NULL)
1824 		return -ENOTSUP;
1825 	(*dev->dev_ops->stats_get)(dev, stats);
1826 
1827 	rte_cryptodev_trace_stats_get(dev_id, stats);
1828 	return 0;
1829 }
1830 
1831 void
1832 rte_cryptodev_stats_reset(uint8_t dev_id)
1833 {
1834 	struct rte_cryptodev *dev;
1835 
1836 	rte_cryptodev_trace_stats_reset(dev_id);
1837 
1838 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1839 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1840 		return;
1841 	}
1842 
1843 	dev = &rte_crypto_devices[dev_id];
1844 
1845 	if (*dev->dev_ops->stats_reset == NULL)
1846 		return;
1847 	(*dev->dev_ops->stats_reset)(dev);
1848 }
1849 
1850 void
1851 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
1852 {
1853 	struct rte_cryptodev *dev;
1854 
1855 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1856 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1857 		return;
1858 	}
1859 
1860 	dev = &rte_crypto_devices[dev_id];
1861 
1862 	memset(dev_info, 0, sizeof(struct rte_cryptodev_info));
1863 
1864 	if (*dev->dev_ops->dev_infos_get == NULL)
1865 		return;
1866 	(*dev->dev_ops->dev_infos_get)(dev, dev_info);
1867 
1868 	dev_info->driver_name = dev->device->driver->name;
1869 	dev_info->device = dev->device;
1870 
1871 	rte_cryptodev_trace_info_get(dev_id, dev_info->driver_name);
1872 
1873 }
1874 
1875 int
1876 rte_cryptodev_callback_register(uint8_t dev_id,
1877 			enum rte_cryptodev_event_type event,
1878 			rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1879 {
1880 	struct rte_cryptodev *dev;
1881 	struct rte_cryptodev_callback *user_cb;
1882 
1883 	if (!cb_fn)
1884 		return -EINVAL;
1885 
1886 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1887 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1888 		return -EINVAL;
1889 	}
1890 
1891 	dev = &rte_crypto_devices[dev_id];
1892 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
1893 
1894 	TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
1895 		if (user_cb->cb_fn == cb_fn &&
1896 			user_cb->cb_arg == cb_arg &&
1897 			user_cb->event == event) {
1898 			break;
1899 		}
1900 	}
1901 
1902 	/* create a new callback. */
1903 	if (user_cb == NULL) {
1904 		user_cb = rte_zmalloc("INTR_USER_CALLBACK",
1905 				sizeof(struct rte_cryptodev_callback), 0);
1906 		if (user_cb != NULL) {
1907 			user_cb->cb_fn = cb_fn;
1908 			user_cb->cb_arg = cb_arg;
1909 			user_cb->event = event;
1910 			TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
1911 		}
1912 	}
1913 
1914 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1915 
1916 	rte_cryptodev_trace_callback_register(dev_id, event, cb_fn);
1917 	return (user_cb == NULL) ? -ENOMEM : 0;
1918 }
1919 
1920 int
1921 rte_cryptodev_callback_unregister(uint8_t dev_id,
1922 			enum rte_cryptodev_event_type event,
1923 			rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1924 {
1925 	int ret;
1926 	struct rte_cryptodev *dev;
1927 	struct rte_cryptodev_callback *cb, *next;
1928 
1929 	if (!cb_fn)
1930 		return -EINVAL;
1931 
1932 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1933 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1934 		return -EINVAL;
1935 	}
1936 
1937 	dev = &rte_crypto_devices[dev_id];
1938 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
1939 
1940 	ret = 0;
1941 	for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
1942 
1943 		next = TAILQ_NEXT(cb, next);
1944 
1945 		if (cb->cb_fn != cb_fn || cb->event != event ||
1946 				(cb->cb_arg != (void *)-1 &&
1947 				cb->cb_arg != cb_arg))
1948 			continue;
1949 
1950 		/*
1951 		 * if this callback is not executing right now,
1952 		 * then remove it.
1953 		 */
1954 		if (cb->active == 0) {
1955 			TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
1956 			rte_free(cb);
1957 		} else {
1958 			ret = -EAGAIN;
1959 		}
1960 	}
1961 
1962 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1963 
1964 	rte_cryptodev_trace_callback_unregister(dev_id, event, cb_fn);
1965 	return ret;
1966 }
1967 
1968 void
1969 rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev,
1970 	enum rte_cryptodev_event_type event)
1971 {
1972 	struct rte_cryptodev_callback *cb_lst;
1973 	struct rte_cryptodev_callback dev_cb;
1974 
1975 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
1976 	TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
1977 		if (cb_lst->cb_fn == NULL || cb_lst->event != event)
1978 			continue;
1979 		dev_cb = *cb_lst;
1980 		cb_lst->active = 1;
1981 		rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1982 		dev_cb.cb_fn(dev->data->dev_id, dev_cb.event,
1983 						dev_cb.cb_arg);
1984 		rte_spinlock_lock(&rte_cryptodev_cb_lock);
1985 		cb_lst->active = 0;
1986 	}
1987 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1988 }
1989 
1990 int
1991 rte_cryptodev_queue_pair_event_error_query(uint8_t dev_id, uint16_t qp_id)
1992 {
1993 	struct rte_cryptodev *dev;
1994 
1995 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1996 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1997 		return -EINVAL;
1998 	}
1999 	dev = &rte_crypto_devices[dev_id];
2000 
2001 	if (qp_id >= dev->data->nb_queue_pairs)
2002 		return -EINVAL;
2003 	if (*dev->dev_ops->queue_pair_event_error_query == NULL)
2004 		return -ENOTSUP;
2005 
2006 	return dev->dev_ops->queue_pair_event_error_query(dev, qp_id);
2007 }
2008 
2009 struct rte_mempool *
2010 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
2011 	uint32_t elt_size, uint32_t cache_size, uint16_t user_data_size,
2012 	int socket_id)
2013 {
2014 	struct rte_mempool *mp;
2015 	struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
2016 	uint32_t obj_sz;
2017 
2018 	obj_sz = sizeof(struct rte_cryptodev_sym_session) + elt_size + user_data_size;
2019 
2020 	obj_sz = RTE_ALIGN_CEIL(obj_sz, RTE_CACHE_LINE_SIZE);
2021 	mp = rte_mempool_create(name, nb_elts, obj_sz, cache_size,
2022 			(uint32_t)(sizeof(*pool_priv)), NULL, NULL,
2023 			NULL, NULL,
2024 			socket_id, 0);
2025 	if (mp == NULL) {
2026 		CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d",
2027 			__func__, name, rte_errno);
2028 		return NULL;
2029 	}
2030 
2031 	pool_priv = rte_mempool_get_priv(mp);
2032 	if (!pool_priv) {
2033 		CDEV_LOG_ERR("%s(name=%s) failed to get private data",
2034 			__func__, name);
2035 		rte_mempool_free(mp);
2036 		return NULL;
2037 	}
2038 
2039 	pool_priv->sess_data_sz = elt_size;
2040 	pool_priv->user_data_sz = user_data_size;
2041 
2042 	rte_cryptodev_trace_sym_session_pool_create(name, nb_elts,
2043 		elt_size, cache_size, user_data_size, mp);
2044 	return mp;
2045 }
2046 
2047 struct rte_mempool *
2048 rte_cryptodev_asym_session_pool_create(const char *name, uint32_t nb_elts,
2049 	uint32_t cache_size, uint16_t user_data_size, int socket_id)
2050 {
2051 	struct rte_mempool *mp;
2052 	struct rte_cryptodev_asym_session_pool_private_data *pool_priv;
2053 	uint32_t obj_sz, obj_sz_aligned;
2054 	uint8_t dev_id;
2055 	unsigned int priv_sz, max_priv_sz = 0;
2056 
2057 	for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++)
2058 		if (rte_cryptodev_is_valid_dev(dev_id)) {
2059 			priv_sz = rte_cryptodev_asym_get_private_session_size(dev_id);
2060 			if (priv_sz > max_priv_sz)
2061 				max_priv_sz = priv_sz;
2062 		}
2063 	if (max_priv_sz == 0) {
2064 		CDEV_LOG_INFO("Could not set max private session size");
2065 		return NULL;
2066 	}
2067 
2068 	obj_sz = rte_cryptodev_asym_get_header_session_size() + max_priv_sz +
2069 			user_data_size;
2070 	obj_sz_aligned =  RTE_ALIGN_CEIL(obj_sz, RTE_CACHE_LINE_SIZE);
2071 
2072 	mp = rte_mempool_create(name, nb_elts, obj_sz_aligned, cache_size,
2073 			(uint32_t)(sizeof(*pool_priv)),
2074 			NULL, NULL, NULL, NULL,
2075 			socket_id, 0);
2076 	if (mp == NULL) {
2077 		CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d",
2078 			__func__, name, rte_errno);
2079 		return NULL;
2080 	}
2081 
2082 	pool_priv = rte_mempool_get_priv(mp);
2083 	if (!pool_priv) {
2084 		CDEV_LOG_ERR("%s(name=%s) failed to get private data",
2085 			__func__, name);
2086 		rte_mempool_free(mp);
2087 		return NULL;
2088 	}
2089 	pool_priv->max_priv_session_sz = max_priv_sz;
2090 	pool_priv->user_data_sz = user_data_size;
2091 
2092 	rte_cryptodev_trace_asym_session_pool_create(name, nb_elts,
2093 		user_data_size, cache_size, mp);
2094 	return mp;
2095 }
2096 
2097 void *
2098 rte_cryptodev_sym_session_create(uint8_t dev_id,
2099 		struct rte_crypto_sym_xform *xforms,
2100 		struct rte_mempool *mp)
2101 {
2102 	struct rte_cryptodev *dev;
2103 	struct rte_cryptodev_sym_session *sess;
2104 	struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
2105 	uint32_t sess_priv_sz;
2106 	int ret;
2107 
2108 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
2109 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
2110 		rte_errno = EINVAL;
2111 		return NULL;
2112 	}
2113 
2114 	if (xforms == NULL) {
2115 		CDEV_LOG_ERR("Invalid xform");
2116 		rte_errno = EINVAL;
2117 		return NULL;
2118 	}
2119 
2120 	sess_priv_sz = rte_cryptodev_sym_get_private_session_size(dev_id);
2121 	if (!rte_cryptodev_sym_is_valid_session_pool(mp, sess_priv_sz)) {
2122 		CDEV_LOG_ERR("Invalid mempool");
2123 		rte_errno = EINVAL;
2124 		return NULL;
2125 	}
2126 
2127 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2128 
2129 	/* Allocate a session structure from the session pool */
2130 	if (rte_mempool_get(mp, (void **)&sess)) {
2131 		CDEV_LOG_ERR("couldn't get object from session mempool");
2132 		rte_errno = ENOMEM;
2133 		return NULL;
2134 	}
2135 
2136 	pool_priv = rte_mempool_get_priv(mp);
2137 	sess->driver_id = dev->driver_id;
2138 	sess->sess_data_sz = pool_priv->sess_data_sz;
2139 	sess->user_data_sz = pool_priv->user_data_sz;
2140 	sess->driver_priv_data_iova = rte_mempool_virt2iova(sess) +
2141 		offsetof(struct rte_cryptodev_sym_session, driver_priv_data);
2142 
2143 	if (dev->dev_ops->sym_session_configure == NULL) {
2144 		rte_errno = ENOTSUP;
2145 		goto error_exit;
2146 	}
2147 	memset(sess->driver_priv_data, 0, pool_priv->sess_data_sz + pool_priv->user_data_sz);
2148 
2149 	ret = dev->dev_ops->sym_session_configure(dev, xforms, sess);
2150 	if (ret < 0) {
2151 		rte_errno = -ret;
2152 		goto error_exit;
2153 	}
2154 	sess->driver_id = dev->driver_id;
2155 
2156 	rte_cryptodev_trace_sym_session_create(dev_id, sess, xforms, mp);
2157 
2158 	return (void *)sess;
2159 error_exit:
2160 	rte_mempool_put(mp, (void *)sess);
2161 	return NULL;
2162 }
2163 
2164 int
2165 rte_cryptodev_asym_session_create(uint8_t dev_id,
2166 		struct rte_crypto_asym_xform *xforms, struct rte_mempool *mp,
2167 		void **session)
2168 {
2169 	struct rte_cryptodev_asym_session *sess;
2170 	uint32_t session_priv_data_sz;
2171 	struct rte_cryptodev_asym_session_pool_private_data *pool_priv;
2172 	unsigned int session_header_size =
2173 			rte_cryptodev_asym_get_header_session_size();
2174 	struct rte_cryptodev *dev;
2175 	int ret;
2176 
2177 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
2178 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
2179 		return -EINVAL;
2180 	}
2181 
2182 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2183 
2184 	if (dev == NULL)
2185 		return -EINVAL;
2186 
2187 	if (!mp) {
2188 		CDEV_LOG_ERR("invalid mempool");
2189 		return -EINVAL;
2190 	}
2191 
2192 	session_priv_data_sz = rte_cryptodev_asym_get_private_session_size(
2193 			dev_id);
2194 	pool_priv = rte_mempool_get_priv(mp);
2195 
2196 	if (pool_priv->max_priv_session_sz < session_priv_data_sz) {
2197 		CDEV_LOG_DEBUG(
2198 			"The private session data size used when creating the mempool is smaller than this device's private session data.");
2199 		return -EINVAL;
2200 	}
2201 
2202 	/* Verify if provided mempool can hold elements big enough. */
2203 	if (mp->elt_size < session_header_size + session_priv_data_sz) {
2204 		CDEV_LOG_ERR(
2205 			"mempool elements too small to hold session objects");
2206 		return -EINVAL;
2207 	}
2208 
2209 	/* Allocate a session structure from the session pool */
2210 	if (rte_mempool_get(mp, session)) {
2211 		CDEV_LOG_ERR("couldn't get object from session mempool");
2212 		return -ENOMEM;
2213 	}
2214 
2215 	sess = *session;
2216 	sess->driver_id = dev->driver_id;
2217 	sess->user_data_sz = pool_priv->user_data_sz;
2218 	sess->max_priv_data_sz = pool_priv->max_priv_session_sz;
2219 
2220 	/* Clear device session pointer.*/
2221 	memset(sess->sess_private_data, 0, session_priv_data_sz + sess->user_data_sz);
2222 
2223 	if (*dev->dev_ops->asym_session_configure == NULL)
2224 		return -ENOTSUP;
2225 
2226 	if (sess->sess_private_data[0] == 0) {
2227 		ret = dev->dev_ops->asym_session_configure(dev, xforms, sess);
2228 		if (ret < 0) {
2229 			CDEV_LOG_ERR(
2230 				"dev_id %d failed to configure session details",
2231 				dev_id);
2232 			return ret;
2233 		}
2234 	}
2235 
2236 	rte_cryptodev_trace_asym_session_create(dev_id, xforms, mp, sess);
2237 	return 0;
2238 }
2239 
2240 int
2241 rte_cryptodev_sym_session_free(uint8_t dev_id, void *_sess)
2242 {
2243 	struct rte_cryptodev *dev;
2244 	struct rte_mempool *sess_mp;
2245 	struct rte_cryptodev_sym_session *sess = _sess;
2246 	struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
2247 
2248 	if (sess == NULL)
2249 		return -EINVAL;
2250 
2251 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
2252 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
2253 		return -EINVAL;
2254 	}
2255 
2256 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2257 
2258 	if (dev == NULL || sess == NULL)
2259 		return -EINVAL;
2260 
2261 	sess_mp = rte_mempool_from_obj(sess);
2262 	if (!sess_mp)
2263 		return -EINVAL;
2264 	pool_priv = rte_mempool_get_priv(sess_mp);
2265 
2266 	if (sess->driver_id != dev->driver_id) {
2267 		CDEV_LOG_ERR("Session created by driver %u but freed by %u",
2268 			sess->driver_id, dev->driver_id);
2269 		return -EINVAL;
2270 	}
2271 
2272 	if (*dev->dev_ops->sym_session_clear == NULL)
2273 		return -ENOTSUP;
2274 
2275 	dev->dev_ops->sym_session_clear(dev, sess);
2276 
2277 	memset(sess->driver_priv_data, 0, pool_priv->sess_data_sz + pool_priv->user_data_sz);
2278 
2279 	/* Return session to mempool */
2280 	rte_mempool_put(sess_mp, sess);
2281 
2282 	rte_cryptodev_trace_sym_session_free(dev_id, sess);
2283 	return 0;
2284 }
2285 
2286 int
2287 rte_cryptodev_asym_session_free(uint8_t dev_id, void *sess)
2288 {
2289 	struct rte_mempool *sess_mp;
2290 	struct rte_cryptodev *dev;
2291 
2292 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
2293 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
2294 		return -EINVAL;
2295 	}
2296 
2297 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2298 
2299 	if (dev == NULL || sess == NULL)
2300 		return -EINVAL;
2301 
2302 	if (*dev->dev_ops->asym_session_clear == NULL)
2303 		return -ENOTSUP;
2304 
2305 	dev->dev_ops->asym_session_clear(dev, sess);
2306 
2307 	rte_free(((struct rte_cryptodev_asym_session *)sess)->event_mdata);
2308 
2309 	/* Return session to mempool */
2310 	sess_mp = rte_mempool_from_obj(sess);
2311 	rte_mempool_put(sess_mp, sess);
2312 
2313 	rte_cryptodev_trace_asym_session_free(dev_id, sess);
2314 	return 0;
2315 }
2316 
2317 unsigned int
2318 rte_cryptodev_asym_get_header_session_size(void)
2319 {
2320 	return sizeof(struct rte_cryptodev_asym_session);
2321 }
2322 
2323 unsigned int
2324 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
2325 {
2326 	struct rte_cryptodev *dev;
2327 	unsigned int priv_sess_size;
2328 
2329 	if (!rte_cryptodev_is_valid_dev(dev_id))
2330 		return 0;
2331 
2332 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2333 
2334 	if (*dev->dev_ops->sym_session_get_size == NULL)
2335 		return 0;
2336 
2337 	priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev);
2338 
2339 	rte_cryptodev_trace_sym_get_private_session_size(dev_id,
2340 		priv_sess_size);
2341 
2342 	return priv_sess_size;
2343 }
2344 
2345 unsigned int
2346 rte_cryptodev_asym_get_private_session_size(uint8_t dev_id)
2347 {
2348 	struct rte_cryptodev *dev;
2349 	unsigned int priv_sess_size;
2350 
2351 	if (!rte_cryptodev_is_valid_dev(dev_id))
2352 		return 0;
2353 
2354 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2355 
2356 	if (*dev->dev_ops->asym_session_get_size == NULL)
2357 		return 0;
2358 
2359 	priv_sess_size = (*dev->dev_ops->asym_session_get_size)(dev);
2360 
2361 	rte_cryptodev_trace_asym_get_private_session_size(dev_id,
2362 		priv_sess_size);
2363 
2364 	return priv_sess_size;
2365 }
2366 
2367 int
2368 rte_cryptodev_sym_session_set_user_data(void *_sess, void *data,
2369 		uint16_t size)
2370 {
2371 	struct rte_cryptodev_sym_session *sess = _sess;
2372 
2373 	if (sess == NULL)
2374 		return -EINVAL;
2375 
2376 	if (sess->user_data_sz < size)
2377 		return -ENOMEM;
2378 
2379 	rte_memcpy(sess->driver_priv_data + sess->sess_data_sz, data, size);
2380 
2381 	rte_cryptodev_trace_sym_session_set_user_data(sess, data, size);
2382 
2383 	return 0;
2384 }
2385 
2386 void *
2387 rte_cryptodev_sym_session_get_user_data(void *_sess)
2388 {
2389 	struct rte_cryptodev_sym_session *sess = _sess;
2390 	void *data = NULL;
2391 
2392 	if (sess == NULL || sess->user_data_sz == 0)
2393 		return NULL;
2394 
2395 	data = (void *)(sess->driver_priv_data + sess->sess_data_sz);
2396 
2397 	rte_cryptodev_trace_sym_session_get_user_data(sess, data);
2398 
2399 	return data;
2400 }
2401 
2402 int
2403 rte_cryptodev_asym_session_set_user_data(void *session, void *data, uint16_t size)
2404 {
2405 	struct rte_cryptodev_asym_session *sess = session;
2406 	if (sess == NULL)
2407 		return -EINVAL;
2408 
2409 	if (sess->user_data_sz < size)
2410 		return -ENOMEM;
2411 
2412 	rte_memcpy(sess->sess_private_data +
2413 			sess->max_priv_data_sz,
2414 			data, size);
2415 
2416 	rte_cryptodev_trace_asym_session_set_user_data(sess, data, size);
2417 
2418 	return 0;
2419 }
2420 
2421 void *
2422 rte_cryptodev_asym_session_get_user_data(void *session)
2423 {
2424 	struct rte_cryptodev_asym_session *sess = session;
2425 	void *data = NULL;
2426 
2427 	if (sess == NULL || sess->user_data_sz == 0)
2428 		return NULL;
2429 
2430 	data = (void *)(sess->sess_private_data + sess->max_priv_data_sz);
2431 
2432 	rte_cryptodev_trace_asym_session_get_user_data(sess, data);
2433 
2434 	return data;
2435 }
2436 
2437 static inline void
2438 sym_crypto_fill_status(struct rte_crypto_sym_vec *vec, int32_t errnum)
2439 {
2440 	uint32_t i;
2441 	for (i = 0; i < vec->num; i++)
2442 		vec->status[i] = errnum;
2443 }
2444 
2445 uint32_t
2446 rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id,
2447 	void *_sess, union rte_crypto_sym_ofs ofs,
2448 	struct rte_crypto_sym_vec *vec)
2449 {
2450 	struct rte_cryptodev *dev;
2451 	struct rte_cryptodev_sym_session *sess = _sess;
2452 
2453 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
2454 		sym_crypto_fill_status(vec, EINVAL);
2455 		return 0;
2456 	}
2457 
2458 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2459 
2460 	if (*dev->dev_ops->sym_cpu_process == NULL ||
2461 		!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO)) {
2462 		sym_crypto_fill_status(vec, ENOTSUP);
2463 		return 0;
2464 	}
2465 
2466 	rte_cryptodev_trace_sym_cpu_crypto_process(dev_id, sess);
2467 
2468 	return dev->dev_ops->sym_cpu_process(dev, sess, ofs, vec);
2469 }
2470 
2471 int
2472 rte_cryptodev_get_raw_dp_ctx_size(uint8_t dev_id)
2473 {
2474 	struct rte_cryptodev *dev;
2475 	int32_t size = sizeof(struct rte_crypto_raw_dp_ctx);
2476 	int32_t priv_size;
2477 
2478 	if (!rte_cryptodev_is_valid_dev(dev_id))
2479 		return -EINVAL;
2480 
2481 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2482 
2483 	if (*dev->dev_ops->sym_get_raw_dp_ctx_size == NULL ||
2484 		!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP)) {
2485 		return -ENOTSUP;
2486 	}
2487 
2488 	priv_size = (*dev->dev_ops->sym_get_raw_dp_ctx_size)(dev);
2489 	if (priv_size < 0)
2490 		return -ENOTSUP;
2491 
2492 	rte_cryptodev_trace_get_raw_dp_ctx_size(dev_id);
2493 
2494 	return RTE_ALIGN_CEIL((size + priv_size), 8);
2495 }
2496 
2497 int
2498 rte_cryptodev_configure_raw_dp_ctx(uint8_t dev_id, uint16_t qp_id,
2499 	struct rte_crypto_raw_dp_ctx *ctx,
2500 	enum rte_crypto_op_sess_type sess_type,
2501 	union rte_cryptodev_session_ctx session_ctx,
2502 	uint8_t is_update)
2503 {
2504 	struct rte_cryptodev *dev;
2505 
2506 	if (!rte_cryptodev_get_qp_status(dev_id, qp_id))
2507 		return -EINVAL;
2508 
2509 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2510 	if (!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP)
2511 			|| dev->dev_ops->sym_configure_raw_dp_ctx == NULL)
2512 		return -ENOTSUP;
2513 
2514 	rte_cryptodev_trace_configure_raw_dp_ctx(dev_id, qp_id, sess_type);
2515 
2516 	return (*dev->dev_ops->sym_configure_raw_dp_ctx)(dev, qp_id, ctx,
2517 			sess_type, session_ctx, is_update);
2518 }
2519 
2520 int
2521 rte_cryptodev_session_event_mdata_set(uint8_t dev_id, void *sess,
2522 	enum rte_crypto_op_type op_type,
2523 	enum rte_crypto_op_sess_type sess_type,
2524 	void *ev_mdata,
2525 	uint16_t size)
2526 {
2527 	struct rte_cryptodev *dev;
2528 
2529 	if (sess == NULL || ev_mdata == NULL)
2530 		return -EINVAL;
2531 
2532 	if (!rte_cryptodev_is_valid_dev(dev_id))
2533 		goto skip_pmd_op;
2534 
2535 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2536 	if (dev->dev_ops->session_ev_mdata_set == NULL)
2537 		goto skip_pmd_op;
2538 
2539 	rte_cryptodev_trace_session_event_mdata_set(dev_id, sess, op_type,
2540 		sess_type, ev_mdata, size);
2541 
2542 	return (*dev->dev_ops->session_ev_mdata_set)(dev, sess, op_type,
2543 			sess_type, ev_mdata);
2544 
2545 skip_pmd_op:
2546 	if (op_type == RTE_CRYPTO_OP_TYPE_SYMMETRIC)
2547 		return rte_cryptodev_sym_session_set_user_data(sess, ev_mdata,
2548 				size);
2549 	else if (op_type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
2550 		struct rte_cryptodev_asym_session *s = sess;
2551 
2552 		if (s->event_mdata == NULL) {
2553 			s->event_mdata = rte_malloc(NULL, size, 0);
2554 			if (s->event_mdata == NULL)
2555 				return -ENOMEM;
2556 		}
2557 		rte_memcpy(s->event_mdata, ev_mdata, size);
2558 
2559 		return 0;
2560 	} else
2561 		return -ENOTSUP;
2562 }
2563 
2564 uint32_t
2565 rte_cryptodev_raw_enqueue_burst(struct rte_crypto_raw_dp_ctx *ctx,
2566 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
2567 	void **user_data, int *enqueue_status)
2568 {
2569 	return (*ctx->enqueue_burst)(ctx->qp_data, ctx->drv_ctx_data, vec,
2570 			ofs, user_data, enqueue_status);
2571 }
2572 
2573 int
2574 rte_cryptodev_raw_enqueue_done(struct rte_crypto_raw_dp_ctx *ctx,
2575 		uint32_t n)
2576 {
2577 	return (*ctx->enqueue_done)(ctx->qp_data, ctx->drv_ctx_data, n);
2578 }
2579 
2580 uint32_t
2581 rte_cryptodev_raw_dequeue_burst(struct rte_crypto_raw_dp_ctx *ctx,
2582 	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
2583 	uint32_t max_nb_to_dequeue,
2584 	rte_cryptodev_raw_post_dequeue_t post_dequeue,
2585 	void **out_user_data, uint8_t is_user_data_array,
2586 	uint32_t *n_success_jobs, int *status)
2587 {
2588 	return (*ctx->dequeue_burst)(ctx->qp_data, ctx->drv_ctx_data,
2589 		get_dequeue_count, max_nb_to_dequeue, post_dequeue,
2590 		out_user_data, is_user_data_array, n_success_jobs, status);
2591 }
2592 
2593 int
2594 rte_cryptodev_raw_dequeue_done(struct rte_crypto_raw_dp_ctx *ctx,
2595 		uint32_t n)
2596 {
2597 	return (*ctx->dequeue_done)(ctx->qp_data, ctx->drv_ctx_data, n);
2598 }
2599 
2600 /** Initialise rte_crypto_op mempool element */
2601 static void
2602 rte_crypto_op_init(struct rte_mempool *mempool,
2603 		void *opaque_arg,
2604 		void *_op_data,
2605 		__rte_unused unsigned i)
2606 {
2607 	struct rte_crypto_op *op = _op_data;
2608 	enum rte_crypto_op_type type = *(enum rte_crypto_op_type *)opaque_arg;
2609 
2610 	memset(_op_data, 0, mempool->elt_size);
2611 
2612 	__rte_crypto_op_reset(op, type);
2613 
2614 	op->phys_addr = rte_mempool_virt2iova(_op_data);
2615 	op->mempool = mempool;
2616 }
2617 
2618 
2619 struct rte_mempool *
2620 rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
2621 		unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
2622 		int socket_id)
2623 {
2624 	struct rte_crypto_op_pool_private *priv;
2625 
2626 	unsigned elt_size = sizeof(struct rte_crypto_op) +
2627 			priv_size;
2628 
2629 	if (type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
2630 		elt_size += sizeof(struct rte_crypto_sym_op);
2631 	} else if (type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
2632 		elt_size += sizeof(struct rte_crypto_asym_op);
2633 	} else if (type == RTE_CRYPTO_OP_TYPE_UNDEFINED) {
2634 		elt_size += RTE_MAX(sizeof(struct rte_crypto_sym_op),
2635 		                    sizeof(struct rte_crypto_asym_op));
2636 	} else {
2637 		CDEV_LOG_ERR("Invalid op_type");
2638 		return NULL;
2639 	}
2640 
2641 	/* lookup mempool in case already allocated */
2642 	struct rte_mempool *mp = rte_mempool_lookup(name);
2643 
2644 	if (mp != NULL) {
2645 		priv = (struct rte_crypto_op_pool_private *)
2646 				rte_mempool_get_priv(mp);
2647 
2648 		if (mp->elt_size != elt_size ||
2649 				mp->cache_size < cache_size ||
2650 				mp->size < nb_elts ||
2651 				priv->priv_size <  priv_size) {
2652 			mp = NULL;
2653 			CDEV_LOG_ERR("Mempool %s already exists but with "
2654 					"incompatible parameters", name);
2655 			return NULL;
2656 		}
2657 		return mp;
2658 	}
2659 
2660 	mp = rte_mempool_create(
2661 			name,
2662 			nb_elts,
2663 			elt_size,
2664 			cache_size,
2665 			sizeof(struct rte_crypto_op_pool_private),
2666 			NULL,
2667 			NULL,
2668 			rte_crypto_op_init,
2669 			&type,
2670 			socket_id,
2671 			0);
2672 
2673 	if (mp == NULL) {
2674 		CDEV_LOG_ERR("Failed to create mempool %s", name);
2675 		return NULL;
2676 	}
2677 
2678 	priv = (struct rte_crypto_op_pool_private *)
2679 			rte_mempool_get_priv(mp);
2680 
2681 	priv->priv_size = priv_size;
2682 	priv->type = type;
2683 
2684 	rte_cryptodev_trace_op_pool_create(name, socket_id, type, nb_elts, mp);
2685 	return mp;
2686 }
2687 
2688 int
2689 rte_cryptodev_pmd_create_dev_name(char *name, const char *dev_name_prefix)
2690 {
2691 	struct rte_cryptodev *dev = NULL;
2692 	uint32_t i = 0;
2693 
2694 	if (name == NULL)
2695 		return -EINVAL;
2696 
2697 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
2698 		int ret = snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
2699 				"%s_%u", dev_name_prefix, i);
2700 
2701 		if (ret < 0)
2702 			return ret;
2703 
2704 		dev = rte_cryptodev_pmd_get_named_dev(name);
2705 		if (!dev)
2706 			return 0;
2707 	}
2708 
2709 	return -1;
2710 }
2711 
2712 TAILQ_HEAD(cryptodev_driver_list, cryptodev_driver);
2713 
2714 static struct cryptodev_driver_list cryptodev_driver_list =
2715 	TAILQ_HEAD_INITIALIZER(cryptodev_driver_list);
2716 
2717 int
2718 rte_cryptodev_driver_id_get(const char *name)
2719 {
2720 	struct cryptodev_driver *driver;
2721 	const char *driver_name;
2722 	int driver_id = -1;
2723 
2724 	if (name == NULL) {
2725 		CDEV_LOG_DEBUG("name pointer NULL");
2726 		return -1;
2727 	}
2728 
2729 	TAILQ_FOREACH(driver, &cryptodev_driver_list, next) {
2730 		driver_name = driver->driver->name;
2731 		if (strncmp(driver_name, name, strlen(driver_name) + 1) == 0) {
2732 			driver_id = driver->id;
2733 			break;
2734 		}
2735 	}
2736 
2737 	rte_cryptodev_trace_driver_id_get(name, driver_id);
2738 
2739 	return driver_id;
2740 }
2741 
2742 const char *
2743 rte_cryptodev_name_get(uint8_t dev_id)
2744 {
2745 	struct rte_cryptodev *dev;
2746 
2747 	if (!rte_cryptodev_is_valid_device_data(dev_id)) {
2748 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
2749 		return NULL;
2750 	}
2751 
2752 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2753 	if (dev == NULL)
2754 		return NULL;
2755 
2756 	rte_cryptodev_trace_name_get(dev_id, dev->data->name);
2757 
2758 	return dev->data->name;
2759 }
2760 
2761 const char *
2762 rte_cryptodev_driver_name_get(uint8_t driver_id)
2763 {
2764 	struct cryptodev_driver *driver;
2765 
2766 	TAILQ_FOREACH(driver, &cryptodev_driver_list, next) {
2767 		if (driver->id == driver_id) {
2768 			rte_cryptodev_trace_driver_name_get(driver_id,
2769 				driver->driver->name);
2770 			return driver->driver->name;
2771 		}
2772 	}
2773 	return NULL;
2774 }
2775 
2776 uint8_t
2777 rte_cryptodev_allocate_driver(struct cryptodev_driver *crypto_drv,
2778 		const struct rte_driver *drv)
2779 {
2780 	crypto_drv->driver = drv;
2781 	crypto_drv->id = nb_drivers;
2782 
2783 	TAILQ_INSERT_TAIL(&cryptodev_driver_list, crypto_drv, next);
2784 
2785 	rte_cryptodev_trace_allocate_driver(drv->name);
2786 
2787 	return nb_drivers++;
2788 }
2789 
2790 RTE_INIT(cryptodev_init_fp_ops)
2791 {
2792 	uint32_t i;
2793 
2794 	for (i = 0; i != RTE_DIM(rte_crypto_fp_ops); i++)
2795 		cryptodev_fp_ops_reset(rte_crypto_fp_ops + i);
2796 }
2797 
2798 static int
2799 cryptodev_handle_dev_list(const char *cmd __rte_unused,
2800 		const char *params __rte_unused,
2801 		struct rte_tel_data *d)
2802 {
2803 	int dev_id;
2804 
2805 	if (rte_cryptodev_count() < 1)
2806 		return -EINVAL;
2807 
2808 	rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
2809 	for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++)
2810 		if (rte_cryptodev_is_valid_dev(dev_id))
2811 			rte_tel_data_add_array_int(d, dev_id);
2812 
2813 	return 0;
2814 }
2815 
2816 static int
2817 cryptodev_handle_dev_info(const char *cmd __rte_unused,
2818 		const char *params, struct rte_tel_data *d)
2819 {
2820 	struct rte_cryptodev_info cryptodev_info;
2821 	int dev_id;
2822 	char *end_param;
2823 
2824 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
2825 		return -EINVAL;
2826 
2827 	dev_id = strtoul(params, &end_param, 0);
2828 	if (*end_param != '\0')
2829 		CDEV_LOG_ERR("Extra parameters passed to command, ignoring");
2830 	if (!rte_cryptodev_is_valid_dev(dev_id))
2831 		return -EINVAL;
2832 
2833 	rte_cryptodev_info_get(dev_id, &cryptodev_info);
2834 
2835 	rte_tel_data_start_dict(d);
2836 	rte_tel_data_add_dict_string(d, "device_name",
2837 		cryptodev_info.device->name);
2838 	rte_tel_data_add_dict_uint(d, "max_nb_queue_pairs",
2839 		cryptodev_info.max_nb_queue_pairs);
2840 
2841 	return 0;
2842 }
2843 
2844 #define ADD_DICT_STAT(s) rte_tel_data_add_dict_uint(d, #s, cryptodev_stats.s)
2845 
2846 static int
2847 cryptodev_handle_dev_stats(const char *cmd __rte_unused,
2848 		const char *params,
2849 		struct rte_tel_data *d)
2850 {
2851 	struct rte_cryptodev_stats cryptodev_stats;
2852 	int dev_id, ret;
2853 	char *end_param;
2854 
2855 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
2856 		return -EINVAL;
2857 
2858 	dev_id = strtoul(params, &end_param, 0);
2859 	if (*end_param != '\0')
2860 		CDEV_LOG_ERR("Extra parameters passed to command, ignoring");
2861 	if (!rte_cryptodev_is_valid_dev(dev_id))
2862 		return -EINVAL;
2863 
2864 	ret = rte_cryptodev_stats_get(dev_id, &cryptodev_stats);
2865 	if (ret < 0)
2866 		return ret;
2867 
2868 	rte_tel_data_start_dict(d);
2869 	ADD_DICT_STAT(enqueued_count);
2870 	ADD_DICT_STAT(dequeued_count);
2871 	ADD_DICT_STAT(enqueue_err_count);
2872 	ADD_DICT_STAT(dequeue_err_count);
2873 
2874 	return 0;
2875 }
2876 
2877 #define CRYPTO_CAPS_SZ                                             \
2878 	(RTE_ALIGN_CEIL(sizeof(struct rte_cryptodev_capabilities), \
2879 					sizeof(uint64_t)) /        \
2880 	 sizeof(uint64_t))
2881 
2882 static int
2883 crypto_caps_array(struct rte_tel_data *d,
2884 		  const struct rte_cryptodev_capabilities *capabilities)
2885 {
2886 	const struct rte_cryptodev_capabilities *dev_caps;
2887 	uint64_t caps_val[CRYPTO_CAPS_SZ];
2888 	unsigned int i = 0, j;
2889 
2890 	rte_tel_data_start_array(d, RTE_TEL_UINT_VAL);
2891 
2892 	while ((dev_caps = &capabilities[i++])->op !=
2893 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
2894 		memset(&caps_val, 0, CRYPTO_CAPS_SZ * sizeof(caps_val[0]));
2895 		rte_memcpy(caps_val, dev_caps, sizeof(capabilities[0]));
2896 		for (j = 0; j < CRYPTO_CAPS_SZ; j++)
2897 			rte_tel_data_add_array_uint(d, caps_val[j]);
2898 	}
2899 
2900 	return i;
2901 }
2902 
2903 static int
2904 cryptodev_handle_dev_caps(const char *cmd __rte_unused, const char *params,
2905 			  struct rte_tel_data *d)
2906 {
2907 	struct rte_cryptodev_info dev_info;
2908 	struct rte_tel_data *crypto_caps;
2909 	int crypto_caps_n;
2910 	char *end_param;
2911 	int dev_id;
2912 
2913 	if (!params || strlen(params) == 0 || !isdigit(*params))
2914 		return -EINVAL;
2915 
2916 	dev_id = strtoul(params, &end_param, 0);
2917 	if (*end_param != '\0')
2918 		CDEV_LOG_ERR("Extra parameters passed to command, ignoring");
2919 	if (!rte_cryptodev_is_valid_dev(dev_id))
2920 		return -EINVAL;
2921 
2922 	rte_tel_data_start_dict(d);
2923 	crypto_caps = rte_tel_data_alloc();
2924 	if (!crypto_caps)
2925 		return -ENOMEM;
2926 
2927 	rte_cryptodev_info_get(dev_id, &dev_info);
2928 	crypto_caps_n = crypto_caps_array(crypto_caps, dev_info.capabilities);
2929 	rte_tel_data_add_dict_container(d, "crypto_caps", crypto_caps, 0);
2930 	rte_tel_data_add_dict_int(d, "crypto_caps_n", crypto_caps_n);
2931 
2932 	return 0;
2933 }
2934 
2935 RTE_INIT(cryptodev_init_telemetry)
2936 {
2937 	rte_telemetry_register_cmd("/cryptodev/info", cryptodev_handle_dev_info,
2938 			"Returns information for a cryptodev. Parameters: int dev_id");
2939 	rte_telemetry_register_cmd("/cryptodev/list",
2940 			cryptodev_handle_dev_list,
2941 			"Returns list of available crypto devices by IDs. No parameters.");
2942 	rte_telemetry_register_cmd("/cryptodev/stats",
2943 			cryptodev_handle_dev_stats,
2944 			"Returns the stats for a cryptodev. Parameters: int dev_id");
2945 	rte_telemetry_register_cmd("/cryptodev/caps",
2946 			cryptodev_handle_dev_caps,
2947 			"Returns the capabilities for a cryptodev. Parameters: int dev_id");
2948 }
2949