xref: /dpdk/lib/cryptodev/rte_cryptodev.c (revision 53c65a3ce2c6b56cf3fa71621a74b97c41432fc0)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020 Intel Corporation
3  */
4 
5 #include <sys/queue.h>
6 #include <ctype.h>
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <string.h>
10 #include <errno.h>
11 #include <stdint.h>
12 #include <inttypes.h>
13 
14 #include <rte_log.h>
15 #include <rte_debug.h>
16 #include <dev_driver.h>
17 #include <rte_memory.h>
18 #include <rte_memcpy.h>
19 #include <rte_memzone.h>
20 #include <rte_eal.h>
21 #include <rte_common.h>
22 #include <rte_mempool.h>
23 #include <rte_malloc.h>
24 #include <rte_errno.h>
25 #include <rte_spinlock.h>
26 #include <rte_string_fns.h>
27 #include <rte_telemetry.h>
28 
29 #include "rte_crypto.h"
30 #include "rte_cryptodev.h"
31 #include "cryptodev_pmd.h"
32 #include "cryptodev_trace.h"
33 
34 static uint8_t nb_drivers;
35 
36 static struct rte_cryptodev rte_crypto_devices[RTE_CRYPTO_MAX_DEVS];
37 
38 struct rte_cryptodev *rte_cryptodevs = rte_crypto_devices;
39 
40 static struct rte_cryptodev_global cryptodev_globals = {
41 		.devs			= rte_crypto_devices,
42 		.data			= { NULL },
43 		.nb_devs		= 0
44 };
45 
46 /* Public fastpath APIs. */
47 struct rte_crypto_fp_ops rte_crypto_fp_ops[RTE_CRYPTO_MAX_DEVS];
48 
49 /* spinlock for crypto device callbacks */
50 static rte_spinlock_t rte_cryptodev_cb_lock = RTE_SPINLOCK_INITIALIZER;
51 
52 RTE_LOG_REGISTER_DEFAULT(rte_cryptodev_logtype, INFO);
53 
54 /**
55  * The user application callback description.
56  *
57  * It contains callback address to be registered by user application,
58  * the pointer to the parameters for callback, and the event type.
59  */
60 struct rte_cryptodev_callback {
61 	TAILQ_ENTRY(rte_cryptodev_callback) next; /**< Callbacks list */
62 	rte_cryptodev_cb_fn cb_fn;		/**< Callback address */
63 	void *cb_arg;				/**< Parameter for callback */
64 	enum rte_cryptodev_event_type event;	/**< Interrupt event type */
65 	uint32_t active;			/**< Callback is executing */
66 };
67 
68 /**
69  * The crypto cipher algorithm strings identifiers.
70  * Not to be used in application directly.
71  * Application can use rte_cryptodev_get_cipher_algo_string().
72  */
73 static const char *
74 crypto_cipher_algorithm_strings[] = {
75 	[RTE_CRYPTO_CIPHER_3DES_CBC]	= "3des-cbc",
76 	[RTE_CRYPTO_CIPHER_3DES_ECB]	= "3des-ecb",
77 	[RTE_CRYPTO_CIPHER_3DES_CTR]	= "3des-ctr",
78 
79 	[RTE_CRYPTO_CIPHER_AES_CBC]	= "aes-cbc",
80 	[RTE_CRYPTO_CIPHER_AES_CTR]	= "aes-ctr",
81 	[RTE_CRYPTO_CIPHER_AES_DOCSISBPI]	= "aes-docsisbpi",
82 	[RTE_CRYPTO_CIPHER_AES_ECB]	= "aes-ecb",
83 	[RTE_CRYPTO_CIPHER_AES_F8]	= "aes-f8",
84 	[RTE_CRYPTO_CIPHER_AES_XTS]	= "aes-xts",
85 
86 	[RTE_CRYPTO_CIPHER_ARC4]	= "arc4",
87 
88 	[RTE_CRYPTO_CIPHER_DES_CBC]     = "des-cbc",
89 	[RTE_CRYPTO_CIPHER_DES_DOCSISBPI]	= "des-docsisbpi",
90 
91 	[RTE_CRYPTO_CIPHER_NULL]	= "null",
92 
93 	[RTE_CRYPTO_CIPHER_KASUMI_F8]	= "kasumi-f8",
94 	[RTE_CRYPTO_CIPHER_SNOW3G_UEA2]	= "snow3g-uea2",
95 	[RTE_CRYPTO_CIPHER_ZUC_EEA3]	= "zuc-eea3",
96 	[RTE_CRYPTO_CIPHER_SM4_ECB]	= "sm4-ecb",
97 	[RTE_CRYPTO_CIPHER_SM4_CBC]	= "sm4-cbc",
98 	[RTE_CRYPTO_CIPHER_SM4_CTR]	= "sm4-ctr",
99 	[RTE_CRYPTO_CIPHER_SM4_CFB]	= "sm4-cfb",
100 	[RTE_CRYPTO_CIPHER_SM4_OFB]	= "sm4-ofb"
101 };
102 
103 /**
104  * The crypto cipher operation strings identifiers.
105  * It could be used in application command line.
106  */
107 const char *
108 rte_crypto_cipher_operation_strings[] = {
109 		[RTE_CRYPTO_CIPHER_OP_ENCRYPT]	= "encrypt",
110 		[RTE_CRYPTO_CIPHER_OP_DECRYPT]	= "decrypt"
111 };
112 
113 /**
114  * The crypto auth algorithm strings identifiers.
115  * Not to be used in application directly.
116  * Application can use rte_cryptodev_get_auth_algo_string().
117  */
118 static const char *
119 crypto_auth_algorithm_strings[] = {
120 	[RTE_CRYPTO_AUTH_AES_CBC_MAC]	= "aes-cbc-mac",
121 	[RTE_CRYPTO_AUTH_AES_CMAC]	= "aes-cmac",
122 	[RTE_CRYPTO_AUTH_AES_GMAC]	= "aes-gmac",
123 	[RTE_CRYPTO_AUTH_AES_XCBC_MAC]	= "aes-xcbc-mac",
124 
125 	[RTE_CRYPTO_AUTH_MD5]		= "md5",
126 	[RTE_CRYPTO_AUTH_MD5_HMAC]	= "md5-hmac",
127 
128 	[RTE_CRYPTO_AUTH_NULL]		= "null",
129 
130 	[RTE_CRYPTO_AUTH_SHA1]		= "sha1",
131 	[RTE_CRYPTO_AUTH_SHA1_HMAC]	= "sha1-hmac",
132 
133 	[RTE_CRYPTO_AUTH_SHA224]	= "sha2-224",
134 	[RTE_CRYPTO_AUTH_SHA224_HMAC]	= "sha2-224-hmac",
135 	[RTE_CRYPTO_AUTH_SHA256]	= "sha2-256",
136 	[RTE_CRYPTO_AUTH_SHA256_HMAC]	= "sha2-256-hmac",
137 	[RTE_CRYPTO_AUTH_SHA384]	= "sha2-384",
138 	[RTE_CRYPTO_AUTH_SHA384_HMAC]	= "sha2-384-hmac",
139 	[RTE_CRYPTO_AUTH_SHA512]	= "sha2-512",
140 	[RTE_CRYPTO_AUTH_SHA512_HMAC]	= "sha2-512-hmac",
141 
142 	[RTE_CRYPTO_AUTH_SHA3_224]	= "sha3-224",
143 	[RTE_CRYPTO_AUTH_SHA3_224_HMAC] = "sha3-224-hmac",
144 	[RTE_CRYPTO_AUTH_SHA3_256]	= "sha3-256",
145 	[RTE_CRYPTO_AUTH_SHA3_256_HMAC] = "sha3-256-hmac",
146 	[RTE_CRYPTO_AUTH_SHA3_384]	= "sha3-384",
147 	[RTE_CRYPTO_AUTH_SHA3_384_HMAC] = "sha3-384-hmac",
148 	[RTE_CRYPTO_AUTH_SHA3_512]	= "sha3-512",
149 	[RTE_CRYPTO_AUTH_SHA3_512_HMAC]	= "sha3-512-hmac",
150 
151 	[RTE_CRYPTO_AUTH_KASUMI_F9]	= "kasumi-f9",
152 	[RTE_CRYPTO_AUTH_SNOW3G_UIA2]	= "snow3g-uia2",
153 	[RTE_CRYPTO_AUTH_ZUC_EIA3]	= "zuc-eia3",
154 	[RTE_CRYPTO_AUTH_SM3]		= "sm3",
155 	[RTE_CRYPTO_AUTH_SM3_HMAC]	= "sm3-hmac",
156 
157 	[RTE_CRYPTO_AUTH_SHAKE_128]	 = "shake-128",
158 	[RTE_CRYPTO_AUTH_SHAKE_256]	 = "shake-256",
159 };
160 
161 /**
162  * The crypto AEAD algorithm strings identifiers.
163  * Not to be used in application directly.
164  * Application can use rte_cryptodev_get_aead_algo_string().
165  */
166 static const char *
167 crypto_aead_algorithm_strings[] = {
168 	[RTE_CRYPTO_AEAD_AES_CCM]	= "aes-ccm",
169 	[RTE_CRYPTO_AEAD_AES_GCM]	= "aes-gcm",
170 	[RTE_CRYPTO_AEAD_CHACHA20_POLY1305] = "chacha20-poly1305"
171 };
172 
173 
174 /**
175  * The crypto AEAD operation strings identifiers.
176  * It could be used in application command line.
177  */
178 const char *
179 rte_crypto_aead_operation_strings[] = {
180 	[RTE_CRYPTO_AEAD_OP_ENCRYPT]	= "encrypt",
181 	[RTE_CRYPTO_AEAD_OP_DECRYPT]	= "decrypt"
182 };
183 
184 /**
185  * Asymmetric crypto transform operation strings identifiers.
186  * Not to be used in application directly.
187  * Application can use rte_cryptodev_asym_get_xform_string().
188  */
189 static const char *
190 crypto_asym_xform_strings[] = {
191 	[RTE_CRYPTO_ASYM_XFORM_NONE]	= "none",
192 	[RTE_CRYPTO_ASYM_XFORM_RSA]	= "rsa",
193 	[RTE_CRYPTO_ASYM_XFORM_MODEX]	= "modexp",
194 	[RTE_CRYPTO_ASYM_XFORM_MODINV]	= "modinv",
195 	[RTE_CRYPTO_ASYM_XFORM_DH]	= "dh",
196 	[RTE_CRYPTO_ASYM_XFORM_DSA]	= "dsa",
197 	[RTE_CRYPTO_ASYM_XFORM_ECDSA]	= "ecdsa",
198 	[RTE_CRYPTO_ASYM_XFORM_ECPM]	= "ecpm",
199 	[RTE_CRYPTO_ASYM_XFORM_SM2]	= "sm2",
200 };
201 
202 /**
203  * Asymmetric crypto operation strings identifiers.
204  */
205 const char *rte_crypto_asym_op_strings[] = {
206 	[RTE_CRYPTO_ASYM_OP_ENCRYPT]	= "encrypt",
207 	[RTE_CRYPTO_ASYM_OP_DECRYPT]	= "decrypt",
208 	[RTE_CRYPTO_ASYM_OP_SIGN]	= "sign",
209 	[RTE_CRYPTO_ASYM_OP_VERIFY]	= "verify"
210 };
211 
212 /**
213  * Asymmetric crypto key exchange operation strings identifiers.
214  */
215 const char *rte_crypto_asym_ke_strings[] = {
216 	[RTE_CRYPTO_ASYM_KE_PRIV_KEY_GENERATE] = "priv_key_generate",
217 	[RTE_CRYPTO_ASYM_KE_PUB_KEY_GENERATE] = "pub_key_generate",
218 	[RTE_CRYPTO_ASYM_KE_SHARED_SECRET_COMPUTE] = "sharedsecret_compute",
219 	[RTE_CRYPTO_ASYM_KE_PUB_KEY_VERIFY] = "pub_ec_key_verify"
220 };
221 
222 struct rte_cryptodev_sym_session_pool_private_data {
223 	uint16_t sess_data_sz;
224 	/**< driver session data size */
225 	uint16_t user_data_sz;
226 	/**< session user data will be placed after sess_data */
227 };
228 
229 /**
230  * The private data structure stored in the asym session mempool private data.
231  */
232 struct rte_cryptodev_asym_session_pool_private_data {
233 	uint16_t max_priv_session_sz;
234 	/**< Size of private session data used when creating mempool */
235 	uint16_t user_data_sz;
236 	/**< Session user data will be placed after sess_private_data */
237 };
238 
239 int
240 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum,
241 		const char *algo_string)
242 {
243 	unsigned int i;
244 	int ret = -1;	/* Invalid string */
245 
246 	for (i = 1; i < RTE_DIM(crypto_cipher_algorithm_strings); i++) {
247 		if (strcmp(algo_string, crypto_cipher_algorithm_strings[i]) == 0) {
248 			*algo_enum = (enum rte_crypto_cipher_algorithm) i;
249 			ret = 0;
250 			break;
251 		}
252 	}
253 
254 	rte_cryptodev_trace_get_cipher_algo_enum(algo_string, *algo_enum, ret);
255 
256 	return ret;
257 }
258 
259 int
260 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum,
261 		const char *algo_string)
262 {
263 	unsigned int i;
264 	int ret = -1;	/* Invalid string */
265 
266 	for (i = 1; i < RTE_DIM(crypto_auth_algorithm_strings); i++) {
267 		if (strcmp(algo_string, crypto_auth_algorithm_strings[i]) == 0) {
268 			*algo_enum = (enum rte_crypto_auth_algorithm) i;
269 			ret = 0;
270 			break;
271 		}
272 	}
273 
274 	rte_cryptodev_trace_get_auth_algo_enum(algo_string, *algo_enum, ret);
275 
276 	return ret;
277 }
278 
279 int
280 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum,
281 		const char *algo_string)
282 {
283 	unsigned int i;
284 	int ret = -1;	/* Invalid string */
285 
286 	for (i = 1; i < RTE_DIM(crypto_aead_algorithm_strings); i++) {
287 		if (strcmp(algo_string, crypto_aead_algorithm_strings[i]) == 0) {
288 			*algo_enum = (enum rte_crypto_aead_algorithm) i;
289 			ret = 0;
290 			break;
291 		}
292 	}
293 
294 	rte_cryptodev_trace_get_aead_algo_enum(algo_string, *algo_enum, ret);
295 
296 	return ret;
297 }
298 
299 int
300 rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum,
301 		const char *xform_string)
302 {
303 	unsigned int i;
304 	int ret = -1;	/* Invalid string */
305 
306 	for (i = 1; i < RTE_DIM(crypto_asym_xform_strings); i++) {
307 		if (strcmp(xform_string,
308 			crypto_asym_xform_strings[i]) == 0) {
309 			*xform_enum = (enum rte_crypto_asym_xform_type) i;
310 			ret = 0;
311 			break;
312 		}
313 	}
314 
315 	rte_cryptodev_trace_asym_get_xform_enum(xform_string, *xform_enum, ret);
316 
317 	return ret;
318 }
319 
320 const char *
321 rte_cryptodev_get_cipher_algo_string(enum rte_crypto_cipher_algorithm algo_enum)
322 {
323 	const char *alg_str = NULL;
324 
325 	if ((unsigned int)algo_enum < RTE_DIM(crypto_cipher_algorithm_strings))
326 		alg_str = crypto_cipher_algorithm_strings[algo_enum];
327 
328 	rte_cryptodev_trace_get_cipher_algo_string(algo_enum, alg_str);
329 
330 	return alg_str;
331 }
332 
333 const char *
334 rte_cryptodev_get_auth_algo_string(enum rte_crypto_auth_algorithm algo_enum)
335 {
336 	const char *alg_str = NULL;
337 
338 	if ((unsigned int)algo_enum < RTE_DIM(crypto_auth_algorithm_strings))
339 		alg_str = crypto_auth_algorithm_strings[algo_enum];
340 
341 	rte_cryptodev_trace_get_auth_algo_string(algo_enum, alg_str);
342 
343 	return alg_str;
344 }
345 
346 const char *
347 rte_cryptodev_get_aead_algo_string(enum rte_crypto_aead_algorithm algo_enum)
348 {
349 	const char *alg_str = NULL;
350 
351 	if ((unsigned int)algo_enum < RTE_DIM(crypto_aead_algorithm_strings))
352 		alg_str = crypto_aead_algorithm_strings[algo_enum];
353 
354 	rte_cryptodev_trace_get_aead_algo_string(algo_enum, alg_str);
355 
356 	return alg_str;
357 }
358 
359 const char *
360 rte_cryptodev_asym_get_xform_string(enum rte_crypto_asym_xform_type xform_enum)
361 {
362 	const char *xform_str = NULL;
363 
364 	if ((unsigned int)xform_enum < RTE_DIM(crypto_asym_xform_strings))
365 		xform_str = crypto_asym_xform_strings[xform_enum];
366 
367 	rte_cryptodev_trace_asym_get_xform_string(xform_enum, xform_str);
368 
369 	return xform_str;
370 }
371 
372 /**
373  * The crypto auth operation strings identifiers.
374  * It could be used in application command line.
375  */
376 const char *
377 rte_crypto_auth_operation_strings[] = {
378 		[RTE_CRYPTO_AUTH_OP_VERIFY]	= "verify",
379 		[RTE_CRYPTO_AUTH_OP_GENERATE]	= "generate"
380 };
381 
382 const struct rte_cryptodev_symmetric_capability *
383 rte_cryptodev_sym_capability_get(uint8_t dev_id,
384 		const struct rte_cryptodev_sym_capability_idx *idx)
385 {
386 	const struct rte_cryptodev_capabilities *capability;
387 	const struct rte_cryptodev_symmetric_capability *sym_capability = NULL;
388 	struct rte_cryptodev_info dev_info;
389 	int i = 0;
390 
391 	rte_cryptodev_info_get(dev_id, &dev_info);
392 
393 	while ((capability = &dev_info.capabilities[i++])->op !=
394 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
395 		if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
396 			continue;
397 
398 		if (capability->sym.xform_type != idx->type)
399 			continue;
400 
401 		if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
402 			capability->sym.auth.algo == idx->algo.auth) {
403 			sym_capability = &capability->sym;
404 			break;
405 		}
406 
407 		if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
408 			capability->sym.cipher.algo == idx->algo.cipher) {
409 			sym_capability = &capability->sym;
410 			break;
411 		}
412 
413 		if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
414 				capability->sym.aead.algo == idx->algo.aead) {
415 			sym_capability = &capability->sym;
416 			break;
417 		}
418 	}
419 
420 	rte_cryptodev_trace_sym_capability_get(dev_id, dev_info.driver_name,
421 		dev_info.driver_id, idx->type, sym_capability);
422 
423 	return sym_capability;
424 }
425 
426 static int
427 param_range_check(uint16_t size, const struct rte_crypto_param_range *range)
428 {
429 	unsigned int next_size;
430 
431 	/* Check lower/upper bounds */
432 	if (size < range->min)
433 		return -1;
434 
435 	if (size > range->max)
436 		return -1;
437 
438 	/* If range is actually only one value, size is correct */
439 	if (range->increment == 0)
440 		return 0;
441 
442 	/* Check if value is one of the supported sizes */
443 	for (next_size = range->min; next_size <= range->max;
444 			next_size += range->increment)
445 		if (size == next_size)
446 			return 0;
447 
448 	return -1;
449 }
450 
451 const struct rte_cryptodev_asymmetric_xform_capability *
452 rte_cryptodev_asym_capability_get(uint8_t dev_id,
453 		const struct rte_cryptodev_asym_capability_idx *idx)
454 {
455 	const struct rte_cryptodev_capabilities *capability;
456 	const struct rte_cryptodev_asymmetric_xform_capability *asym_cap = NULL;
457 	struct rte_cryptodev_info dev_info;
458 	unsigned int i = 0;
459 
460 	memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
461 	rte_cryptodev_info_get(dev_id, &dev_info);
462 
463 	while ((capability = &dev_info.capabilities[i++])->op !=
464 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
465 		if (capability->op != RTE_CRYPTO_OP_TYPE_ASYMMETRIC)
466 			continue;
467 
468 		if (capability->asym.xform_capa.xform_type == idx->type) {
469 			asym_cap = &capability->asym.xform_capa;
470 			break;
471 		}
472 	}
473 
474 	rte_cryptodev_trace_asym_capability_get(dev_info.driver_name,
475 		dev_info.driver_id, idx->type, asym_cap);
476 
477 	return asym_cap;
478 };
479 
480 int
481 rte_cryptodev_sym_capability_check_cipher(
482 		const struct rte_cryptodev_symmetric_capability *capability,
483 		uint16_t key_size, uint16_t iv_size)
484 {
485 	int ret = 0; /* success */
486 
487 	if (param_range_check(key_size, &capability->cipher.key_size) != 0) {
488 		ret = -1;
489 		goto done;
490 	}
491 
492 	if (param_range_check(iv_size, &capability->cipher.iv_size) != 0)
493 		ret = -1;
494 
495 done:
496 	rte_cryptodev_trace_sym_capability_check_cipher(capability, key_size,
497 		iv_size, ret);
498 
499 	return ret;
500 }
501 
502 int
503 rte_cryptodev_sym_capability_check_auth(
504 		const struct rte_cryptodev_symmetric_capability *capability,
505 		uint16_t key_size, uint16_t digest_size, uint16_t iv_size)
506 {
507 	int ret = 0; /* success */
508 
509 	if (param_range_check(key_size, &capability->auth.key_size) != 0) {
510 		ret = -1;
511 		goto done;
512 	}
513 
514 	if (param_range_check(digest_size,
515 		&capability->auth.digest_size) != 0) {
516 		ret = -1;
517 		goto done;
518 	}
519 
520 	if (param_range_check(iv_size, &capability->auth.iv_size) != 0)
521 		ret = -1;
522 
523 done:
524 	rte_cryptodev_trace_sym_capability_check_auth(capability, key_size,
525 		digest_size, iv_size, ret);
526 
527 	return ret;
528 }
529 
530 int
531 rte_cryptodev_sym_capability_check_aead(
532 		const struct rte_cryptodev_symmetric_capability *capability,
533 		uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
534 		uint16_t iv_size)
535 {
536 	int ret = 0; /* success */
537 
538 	if (param_range_check(key_size, &capability->aead.key_size) != 0) {
539 		ret = -1;
540 		goto done;
541 	}
542 
543 	if (param_range_check(digest_size,
544 		&capability->aead.digest_size) != 0) {
545 		ret = -1;
546 		goto done;
547 	}
548 
549 	if (param_range_check(aad_size, &capability->aead.aad_size) != 0) {
550 		ret = -1;
551 		goto done;
552 	}
553 
554 	if (param_range_check(iv_size, &capability->aead.iv_size) != 0)
555 		ret = -1;
556 
557 done:
558 	rte_cryptodev_trace_sym_capability_check_aead(capability, key_size,
559 		digest_size, aad_size, iv_size, ret);
560 
561 	return ret;
562 }
563 
564 int
565 rte_cryptodev_asym_xform_capability_check_optype(
566 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
567 	enum rte_crypto_asym_op_type op_type)
568 {
569 	int ret = 0;
570 
571 	if (capability->op_types & (1 << op_type))
572 		ret = 1;
573 
574 	rte_cryptodev_trace_asym_xform_capability_check_optype(
575 		capability->op_types, op_type, ret);
576 
577 	return ret;
578 }
579 
580 int
581 rte_cryptodev_asym_xform_capability_check_modlen(
582 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
583 	uint16_t modlen)
584 {
585 	int ret = 0; /* success */
586 
587 	/* no need to check for limits, if min or max = 0 */
588 	if (capability->modlen.min != 0) {
589 		if (modlen < capability->modlen.min) {
590 			ret = -1;
591 			goto done;
592 		}
593 	}
594 
595 	if (capability->modlen.max != 0) {
596 		if (modlen > capability->modlen.max) {
597 			ret = -1;
598 			goto done;
599 		}
600 	}
601 
602 	/* in any case, check if given modlen is module increment */
603 	if (capability->modlen.increment != 0) {
604 		if (modlen % (capability->modlen.increment))
605 			ret = -1;
606 	}
607 
608 done:
609 	rte_cryptodev_trace_asym_xform_capability_check_modlen(capability,
610 		modlen, ret);
611 
612 	return ret;
613 }
614 
615 bool
616 rte_cryptodev_asym_xform_capability_check_hash(
617 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
618 	enum rte_crypto_auth_algorithm hash)
619 {
620 	bool ret = false;
621 
622 	if (capability->hash_algos & (1 << hash))
623 		ret = true;
624 
625 	rte_cryptodev_trace_asym_xform_capability_check_hash(
626 		capability->hash_algos, hash, ret);
627 
628 	return ret;
629 }
630 
631 int
632 rte_cryptodev_asym_xform_capability_check_opcap(
633 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
634 	enum rte_crypto_asym_op_type op_type, uint8_t cap)
635 {
636 	int ret = 0;
637 
638 	if (!(capability->op_types & (1 << op_type)))
639 		return ret;
640 
641 	if (capability->op_capa[op_type] & (1 << cap))
642 		ret = 1;
643 
644 	return ret;
645 }
646 
647 /* spinlock for crypto device enq callbacks */
648 static rte_spinlock_t rte_cryptodev_callback_lock = RTE_SPINLOCK_INITIALIZER;
649 
650 static void
651 cryptodev_cb_cleanup(struct rte_cryptodev *dev)
652 {
653 	struct rte_cryptodev_cb_rcu *list;
654 	struct rte_cryptodev_cb *cb, *next;
655 	uint16_t qp_id;
656 
657 	if (dev->enq_cbs == NULL && dev->deq_cbs == NULL)
658 		return;
659 
660 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
661 		list = &dev->enq_cbs[qp_id];
662 		cb = list->next;
663 		while (cb != NULL) {
664 			next = cb->next;
665 			rte_free(cb);
666 			cb = next;
667 		}
668 
669 		rte_free(list->qsbr);
670 	}
671 
672 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
673 		list = &dev->deq_cbs[qp_id];
674 		cb = list->next;
675 		while (cb != NULL) {
676 			next = cb->next;
677 			rte_free(cb);
678 			cb = next;
679 		}
680 
681 		rte_free(list->qsbr);
682 	}
683 
684 	rte_free(dev->enq_cbs);
685 	dev->enq_cbs = NULL;
686 	rte_free(dev->deq_cbs);
687 	dev->deq_cbs = NULL;
688 }
689 
690 static int
691 cryptodev_cb_init(struct rte_cryptodev *dev)
692 {
693 	struct rte_cryptodev_cb_rcu *list;
694 	struct rte_rcu_qsbr *qsbr;
695 	uint16_t qp_id;
696 	size_t size;
697 
698 	/* Max thread set to 1, as one DP thread accessing a queue-pair */
699 	const uint32_t max_threads = 1;
700 
701 	dev->enq_cbs = rte_zmalloc(NULL,
702 				   sizeof(struct rte_cryptodev_cb_rcu) *
703 				   dev->data->nb_queue_pairs, 0);
704 	if (dev->enq_cbs == NULL) {
705 		CDEV_LOG_ERR("Failed to allocate memory for enq callbacks");
706 		return -ENOMEM;
707 	}
708 
709 	dev->deq_cbs = rte_zmalloc(NULL,
710 				   sizeof(struct rte_cryptodev_cb_rcu) *
711 				   dev->data->nb_queue_pairs, 0);
712 	if (dev->deq_cbs == NULL) {
713 		CDEV_LOG_ERR("Failed to allocate memory for deq callbacks");
714 		rte_free(dev->enq_cbs);
715 		return -ENOMEM;
716 	}
717 
718 	/* Create RCU QSBR variable */
719 	size = rte_rcu_qsbr_get_memsize(max_threads);
720 
721 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
722 		list = &dev->enq_cbs[qp_id];
723 		qsbr = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
724 		if (qsbr == NULL) {
725 			CDEV_LOG_ERR("Failed to allocate memory for RCU on "
726 				"queue_pair_id=%d", qp_id);
727 			goto cb_init_err;
728 		}
729 
730 		if (rte_rcu_qsbr_init(qsbr, max_threads)) {
731 			CDEV_LOG_ERR("Failed to initialize for RCU on "
732 				"queue_pair_id=%d", qp_id);
733 			goto cb_init_err;
734 		}
735 
736 		list->qsbr = qsbr;
737 	}
738 
739 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
740 		list = &dev->deq_cbs[qp_id];
741 		qsbr = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
742 		if (qsbr == NULL) {
743 			CDEV_LOG_ERR("Failed to allocate memory for RCU on "
744 				"queue_pair_id=%d", qp_id);
745 			goto cb_init_err;
746 		}
747 
748 		if (rte_rcu_qsbr_init(qsbr, max_threads)) {
749 			CDEV_LOG_ERR("Failed to initialize for RCU on "
750 				"queue_pair_id=%d", qp_id);
751 			goto cb_init_err;
752 		}
753 
754 		list->qsbr = qsbr;
755 	}
756 
757 	return 0;
758 
759 cb_init_err:
760 	cryptodev_cb_cleanup(dev);
761 	return -ENOMEM;
762 }
763 
764 const char *
765 rte_cryptodev_get_feature_name(uint64_t flag)
766 {
767 	rte_cryptodev_trace_get_feature_name(flag);
768 
769 	switch (flag) {
770 	case RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO:
771 		return "SYMMETRIC_CRYPTO";
772 	case RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO:
773 		return "ASYMMETRIC_CRYPTO";
774 	case RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING:
775 		return "SYM_OPERATION_CHAINING";
776 	case RTE_CRYPTODEV_FF_CPU_SSE:
777 		return "CPU_SSE";
778 	case RTE_CRYPTODEV_FF_CPU_AVX:
779 		return "CPU_AVX";
780 	case RTE_CRYPTODEV_FF_CPU_AVX2:
781 		return "CPU_AVX2";
782 	case RTE_CRYPTODEV_FF_CPU_AVX512:
783 		return "CPU_AVX512";
784 	case RTE_CRYPTODEV_FF_CPU_AESNI:
785 		return "CPU_AESNI";
786 	case RTE_CRYPTODEV_FF_HW_ACCELERATED:
787 		return "HW_ACCELERATED";
788 	case RTE_CRYPTODEV_FF_IN_PLACE_SGL:
789 		return "IN_PLACE_SGL";
790 	case RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT:
791 		return "OOP_SGL_IN_SGL_OUT";
792 	case RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT:
793 		return "OOP_SGL_IN_LB_OUT";
794 	case RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT:
795 		return "OOP_LB_IN_SGL_OUT";
796 	case RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT:
797 		return "OOP_LB_IN_LB_OUT";
798 	case RTE_CRYPTODEV_FF_CPU_NEON:
799 		return "CPU_NEON";
800 	case RTE_CRYPTODEV_FF_CPU_ARM_CE:
801 		return "CPU_ARM_CE";
802 	case RTE_CRYPTODEV_FF_SECURITY:
803 		return "SECURITY_PROTOCOL";
804 	case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP:
805 		return "RSA_PRIV_OP_KEY_EXP";
806 	case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT:
807 		return "RSA_PRIV_OP_KEY_QT";
808 	case RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED:
809 		return "DIGEST_ENCRYPTED";
810 	case RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO:
811 		return "SYM_CPU_CRYPTO";
812 	case RTE_CRYPTODEV_FF_ASYM_SESSIONLESS:
813 		return "ASYM_SESSIONLESS";
814 	case RTE_CRYPTODEV_FF_SYM_SESSIONLESS:
815 		return "SYM_SESSIONLESS";
816 	case RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA:
817 		return "NON_BYTE_ALIGNED_DATA";
818 	case RTE_CRYPTODEV_FF_CIPHER_MULTIPLE_DATA_UNITS:
819 		return "CIPHER_MULTIPLE_DATA_UNITS";
820 	case RTE_CRYPTODEV_FF_CIPHER_WRAPPED_KEY:
821 		return "CIPHER_WRAPPED_KEY";
822 	default:
823 		return NULL;
824 	}
825 }
826 
827 struct rte_cryptodev *
828 rte_cryptodev_pmd_get_dev(uint8_t dev_id)
829 {
830 	return &cryptodev_globals.devs[dev_id];
831 }
832 
833 struct rte_cryptodev *
834 rte_cryptodev_pmd_get_named_dev(const char *name)
835 {
836 	struct rte_cryptodev *dev;
837 	unsigned int i;
838 
839 	if (name == NULL)
840 		return NULL;
841 
842 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
843 		dev = &cryptodev_globals.devs[i];
844 
845 		if ((dev->attached == RTE_CRYPTODEV_ATTACHED) &&
846 				(strcmp(dev->data->name, name) == 0))
847 			return dev;
848 	}
849 
850 	return NULL;
851 }
852 
853 static inline uint8_t
854 rte_cryptodev_is_valid_device_data(uint8_t dev_id)
855 {
856 	if (dev_id >= RTE_CRYPTO_MAX_DEVS ||
857 			rte_crypto_devices[dev_id].data == NULL)
858 		return 0;
859 
860 	return 1;
861 }
862 
863 unsigned int
864 rte_cryptodev_is_valid_dev(uint8_t dev_id)
865 {
866 	struct rte_cryptodev *dev = NULL;
867 	unsigned int ret = 1;
868 
869 	if (!rte_cryptodev_is_valid_device_data(dev_id)) {
870 		ret = 0;
871 		goto done;
872 	}
873 
874 	dev = rte_cryptodev_pmd_get_dev(dev_id);
875 	if (dev->attached != RTE_CRYPTODEV_ATTACHED)
876 		ret = 0;
877 
878 done:
879 	rte_cryptodev_trace_is_valid_dev(dev_id, ret);
880 
881 	return ret;
882 }
883 
884 int
885 rte_cryptodev_get_dev_id(const char *name)
886 {
887 	unsigned i;
888 	int ret = -1;
889 
890 	if (name == NULL)
891 		return -1;
892 
893 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
894 		if (!rte_cryptodev_is_valid_device_data(i))
895 			continue;
896 		if ((strcmp(cryptodev_globals.devs[i].data->name, name)
897 				== 0) &&
898 				(cryptodev_globals.devs[i].attached ==
899 						RTE_CRYPTODEV_ATTACHED)) {
900 			ret = (int)i;
901 			break;
902 		}
903 	}
904 
905 	rte_cryptodev_trace_get_dev_id(name, ret);
906 
907 	return ret;
908 }
909 
910 uint8_t
911 rte_cryptodev_count(void)
912 {
913 	rte_cryptodev_trace_count(cryptodev_globals.nb_devs);
914 
915 	return cryptodev_globals.nb_devs;
916 }
917 
918 uint8_t
919 rte_cryptodev_device_count_by_driver(uint8_t driver_id)
920 {
921 	uint8_t i, dev_count = 0;
922 
923 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++)
924 		if (cryptodev_globals.devs[i].driver_id == driver_id &&
925 			cryptodev_globals.devs[i].attached ==
926 					RTE_CRYPTODEV_ATTACHED)
927 			dev_count++;
928 
929 	rte_cryptodev_trace_device_count_by_driver(driver_id, dev_count);
930 
931 	return dev_count;
932 }
933 
934 uint8_t
935 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
936 	uint8_t nb_devices)
937 {
938 	uint8_t i, count = 0;
939 	struct rte_cryptodev *devs = cryptodev_globals.devs;
940 
941 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS && count < nb_devices; i++) {
942 		if (!rte_cryptodev_is_valid_device_data(i))
943 			continue;
944 
945 		if (devs[i].attached == RTE_CRYPTODEV_ATTACHED) {
946 			int cmp;
947 
948 			cmp = strncmp(devs[i].device->driver->name,
949 					driver_name,
950 					strlen(driver_name) + 1);
951 
952 			if (cmp == 0)
953 				devices[count++] = devs[i].data->dev_id;
954 		}
955 	}
956 
957 	rte_cryptodev_trace_devices_get(driver_name, count);
958 
959 	return count;
960 }
961 
962 void *
963 rte_cryptodev_get_sec_ctx(uint8_t dev_id)
964 {
965 	void *sec_ctx = NULL;
966 
967 	if (dev_id < RTE_CRYPTO_MAX_DEVS &&
968 			(rte_crypto_devices[dev_id].feature_flags &
969 			RTE_CRYPTODEV_FF_SECURITY))
970 		sec_ctx = rte_crypto_devices[dev_id].security_ctx;
971 
972 	rte_cryptodev_trace_get_sec_ctx(dev_id, sec_ctx);
973 
974 	return sec_ctx;
975 }
976 
977 int
978 rte_cryptodev_socket_id(uint8_t dev_id)
979 {
980 	struct rte_cryptodev *dev;
981 
982 	if (!rte_cryptodev_is_valid_dev(dev_id))
983 		return -1;
984 
985 	dev = rte_cryptodev_pmd_get_dev(dev_id);
986 
987 	rte_cryptodev_trace_socket_id(dev_id, dev->data->name,
988 		dev->data->socket_id);
989 	return dev->data->socket_id;
990 }
991 
992 static inline int
993 rte_cryptodev_data_alloc(uint8_t dev_id, struct rte_cryptodev_data **data,
994 		int socket_id)
995 {
996 	char mz_name[RTE_MEMZONE_NAMESIZE];
997 	const struct rte_memzone *mz;
998 	int n;
999 
1000 	/* generate memzone name */
1001 	n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
1002 	if (n >= (int)sizeof(mz_name))
1003 		return -EINVAL;
1004 
1005 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1006 		mz = rte_memzone_reserve(mz_name,
1007 				sizeof(struct rte_cryptodev_data),
1008 				socket_id, 0);
1009 		CDEV_LOG_DEBUG("PRIMARY:reserved memzone for %s (%p)",
1010 				mz_name, mz);
1011 	} else {
1012 		mz = rte_memzone_lookup(mz_name);
1013 		CDEV_LOG_DEBUG("SECONDARY:looked up memzone for %s (%p)",
1014 				mz_name, mz);
1015 	}
1016 
1017 	if (mz == NULL)
1018 		return -ENOMEM;
1019 
1020 	*data = mz->addr;
1021 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1022 		memset(*data, 0, sizeof(struct rte_cryptodev_data));
1023 
1024 	return 0;
1025 }
1026 
1027 static inline int
1028 rte_cryptodev_data_free(uint8_t dev_id, struct rte_cryptodev_data **data)
1029 {
1030 	char mz_name[RTE_MEMZONE_NAMESIZE];
1031 	const struct rte_memzone *mz;
1032 	int n;
1033 
1034 	/* generate memzone name */
1035 	n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
1036 	if (n >= (int)sizeof(mz_name))
1037 		return -EINVAL;
1038 
1039 	mz = rte_memzone_lookup(mz_name);
1040 	if (mz == NULL)
1041 		return -ENOMEM;
1042 
1043 	RTE_ASSERT(*data == mz->addr);
1044 	*data = NULL;
1045 
1046 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1047 		CDEV_LOG_DEBUG("PRIMARY:free memzone of %s (%p)",
1048 				mz_name, mz);
1049 		return rte_memzone_free(mz);
1050 	} else {
1051 		CDEV_LOG_DEBUG("SECONDARY:don't free memzone of %s (%p)",
1052 				mz_name, mz);
1053 	}
1054 
1055 	return 0;
1056 }
1057 
1058 static uint8_t
1059 rte_cryptodev_find_free_device_index(void)
1060 {
1061 	uint8_t dev_id;
1062 
1063 	for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++) {
1064 		if (rte_crypto_devices[dev_id].attached ==
1065 				RTE_CRYPTODEV_DETACHED)
1066 			return dev_id;
1067 	}
1068 	return RTE_CRYPTO_MAX_DEVS;
1069 }
1070 
1071 struct rte_cryptodev *
1072 rte_cryptodev_pmd_allocate(const char *name, int socket_id)
1073 {
1074 	struct rte_cryptodev *cryptodev;
1075 	uint8_t dev_id;
1076 
1077 	if (rte_cryptodev_pmd_get_named_dev(name) != NULL) {
1078 		CDEV_LOG_ERR("Crypto device with name %s already "
1079 				"allocated!", name);
1080 		return NULL;
1081 	}
1082 
1083 	dev_id = rte_cryptodev_find_free_device_index();
1084 	if (dev_id == RTE_CRYPTO_MAX_DEVS) {
1085 		CDEV_LOG_ERR("Reached maximum number of crypto devices");
1086 		return NULL;
1087 	}
1088 
1089 	cryptodev = rte_cryptodev_pmd_get_dev(dev_id);
1090 
1091 	if (cryptodev->data == NULL) {
1092 		struct rte_cryptodev_data **cryptodev_data =
1093 				&cryptodev_globals.data[dev_id];
1094 
1095 		int retval = rte_cryptodev_data_alloc(dev_id, cryptodev_data,
1096 				socket_id);
1097 
1098 		if (retval < 0 || *cryptodev_data == NULL)
1099 			return NULL;
1100 
1101 		cryptodev->data = *cryptodev_data;
1102 
1103 		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1104 			strlcpy(cryptodev->data->name, name,
1105 				RTE_CRYPTODEV_NAME_MAX_LEN);
1106 
1107 			cryptodev->data->dev_id = dev_id;
1108 			cryptodev->data->socket_id = socket_id;
1109 			cryptodev->data->dev_started = 0;
1110 			CDEV_LOG_DEBUG("PRIMARY:init data");
1111 		}
1112 
1113 		CDEV_LOG_DEBUG("Data for %s: dev_id %d, socket %d, started %d",
1114 				cryptodev->data->name,
1115 				cryptodev->data->dev_id,
1116 				cryptodev->data->socket_id,
1117 				cryptodev->data->dev_started);
1118 
1119 		/* init user callbacks */
1120 		TAILQ_INIT(&(cryptodev->link_intr_cbs));
1121 
1122 		cryptodev->attached = RTE_CRYPTODEV_ATTACHED;
1123 
1124 		cryptodev_globals.nb_devs++;
1125 	}
1126 
1127 	return cryptodev;
1128 }
1129 
1130 int
1131 rte_cryptodev_pmd_release_device(struct rte_cryptodev *cryptodev)
1132 {
1133 	int ret;
1134 	uint8_t dev_id;
1135 
1136 	if (cryptodev == NULL)
1137 		return -EINVAL;
1138 
1139 	dev_id = cryptodev->data->dev_id;
1140 
1141 	cryptodev_fp_ops_reset(rte_crypto_fp_ops + dev_id);
1142 
1143 	/* Close device only if device operations have been set */
1144 	if (cryptodev->dev_ops) {
1145 		ret = rte_cryptodev_close(dev_id);
1146 		if (ret < 0)
1147 			return ret;
1148 	}
1149 
1150 	ret = rte_cryptodev_data_free(dev_id, &cryptodev_globals.data[dev_id]);
1151 	if (ret < 0)
1152 		return ret;
1153 
1154 	cryptodev->attached = RTE_CRYPTODEV_DETACHED;
1155 	cryptodev_globals.nb_devs--;
1156 	return 0;
1157 }
1158 
1159 uint16_t
1160 rte_cryptodev_queue_pair_count(uint8_t dev_id)
1161 {
1162 	struct rte_cryptodev *dev;
1163 
1164 	if (!rte_cryptodev_is_valid_device_data(dev_id)) {
1165 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1166 		return 0;
1167 	}
1168 
1169 	dev = &rte_crypto_devices[dev_id];
1170 	rte_cryptodev_trace_queue_pair_count(dev, dev->data->name,
1171 		dev->data->socket_id, dev->data->dev_id,
1172 		dev->data->nb_queue_pairs);
1173 
1174 	return dev->data->nb_queue_pairs;
1175 }
1176 
1177 static int
1178 rte_cryptodev_queue_pairs_config(struct rte_cryptodev *dev, uint16_t nb_qpairs,
1179 		int socket_id)
1180 {
1181 	struct rte_cryptodev_info dev_info;
1182 	void **qp;
1183 	unsigned i;
1184 
1185 	if ((dev == NULL) || (nb_qpairs < 1)) {
1186 		CDEV_LOG_ERR("invalid param: dev %p, nb_queues %u",
1187 							dev, nb_qpairs);
1188 		return -EINVAL;
1189 	}
1190 
1191 	CDEV_LOG_DEBUG("Setup %d queues pairs on device %u",
1192 			nb_qpairs, dev->data->dev_id);
1193 
1194 	memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
1195 
1196 	if (*dev->dev_ops->dev_infos_get == NULL)
1197 		return -ENOTSUP;
1198 	(*dev->dev_ops->dev_infos_get)(dev, &dev_info);
1199 
1200 	if (nb_qpairs > (dev_info.max_nb_queue_pairs)) {
1201 		CDEV_LOG_ERR("Invalid num queue_pairs (%u) for dev %u",
1202 				nb_qpairs, dev->data->dev_id);
1203 	    return -EINVAL;
1204 	}
1205 
1206 	if (dev->data->queue_pairs == NULL) { /* first time configuration */
1207 		dev->data->queue_pairs = rte_zmalloc_socket(
1208 				"cryptodev->queue_pairs",
1209 				sizeof(dev->data->queue_pairs[0]) *
1210 				dev_info.max_nb_queue_pairs,
1211 				RTE_CACHE_LINE_SIZE, socket_id);
1212 
1213 		if (dev->data->queue_pairs == NULL) {
1214 			dev->data->nb_queue_pairs = 0;
1215 			CDEV_LOG_ERR("failed to get memory for qp meta data, "
1216 							"nb_queues %u",
1217 							nb_qpairs);
1218 			return -(ENOMEM);
1219 		}
1220 	} else { /* re-configure */
1221 		int ret;
1222 		uint16_t old_nb_queues = dev->data->nb_queue_pairs;
1223 
1224 		qp = dev->data->queue_pairs;
1225 
1226 		if (*dev->dev_ops->queue_pair_release == NULL)
1227 			return -ENOTSUP;
1228 
1229 		for (i = nb_qpairs; i < old_nb_queues; i++) {
1230 			ret = (*dev->dev_ops->queue_pair_release)(dev, i);
1231 			if (ret < 0)
1232 				return ret;
1233 			qp[i] = NULL;
1234 		}
1235 
1236 	}
1237 	dev->data->nb_queue_pairs = nb_qpairs;
1238 	return 0;
1239 }
1240 
1241 int
1242 rte_cryptodev_queue_pair_reset(uint8_t dev_id, uint16_t queue_pair_id,
1243 		const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
1244 {
1245 	struct rte_cryptodev *dev;
1246 
1247 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1248 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1249 		return -EINVAL;
1250 	}
1251 
1252 	dev = &rte_crypto_devices[dev_id];
1253 	if (queue_pair_id >= dev->data->nb_queue_pairs) {
1254 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
1255 		return -EINVAL;
1256 	}
1257 
1258 	if (*dev->dev_ops->queue_pair_reset == NULL)
1259 		return -ENOTSUP;
1260 
1261 	rte_cryptodev_trace_queue_pair_reset(dev_id, queue_pair_id, qp_conf, socket_id);
1262 	return (*dev->dev_ops->queue_pair_reset)(dev, queue_pair_id, qp_conf, socket_id);
1263 }
1264 
1265 int
1266 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
1267 {
1268 	struct rte_cryptodev *dev;
1269 	int diag;
1270 
1271 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1272 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1273 		return -EINVAL;
1274 	}
1275 
1276 	dev = &rte_crypto_devices[dev_id];
1277 
1278 	if (dev->data->dev_started) {
1279 		CDEV_LOG_ERR(
1280 		    "device %d must be stopped to allow configuration", dev_id);
1281 		return -EBUSY;
1282 	}
1283 
1284 	if (*dev->dev_ops->dev_configure == NULL)
1285 		return -ENOTSUP;
1286 
1287 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1288 	cryptodev_cb_cleanup(dev);
1289 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1290 
1291 	/* Setup new number of queue pairs and reconfigure device. */
1292 	diag = rte_cryptodev_queue_pairs_config(dev, config->nb_queue_pairs,
1293 			config->socket_id);
1294 	if (diag != 0) {
1295 		CDEV_LOG_ERR("dev%d rte_crypto_dev_queue_pairs_config = %d",
1296 				dev_id, diag);
1297 		return diag;
1298 	}
1299 
1300 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1301 	diag = cryptodev_cb_init(dev);
1302 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1303 	if (diag) {
1304 		CDEV_LOG_ERR("Callback init failed for dev_id=%d", dev_id);
1305 		return diag;
1306 	}
1307 
1308 	rte_cryptodev_trace_configure(dev_id, config);
1309 	return (*dev->dev_ops->dev_configure)(dev, config);
1310 }
1311 
1312 int
1313 rte_cryptodev_start(uint8_t dev_id)
1314 {
1315 	struct rte_cryptodev *dev;
1316 	int diag;
1317 
1318 	CDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
1319 
1320 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1321 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1322 		return -EINVAL;
1323 	}
1324 
1325 	dev = &rte_crypto_devices[dev_id];
1326 
1327 	if (*dev->dev_ops->dev_start == NULL)
1328 		return -ENOTSUP;
1329 
1330 	if (dev->data->dev_started != 0) {
1331 		CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already started",
1332 			dev_id);
1333 		return 0;
1334 	}
1335 
1336 	diag = (*dev->dev_ops->dev_start)(dev);
1337 	/* expose selection of PMD fast-path functions */
1338 	cryptodev_fp_ops_set(rte_crypto_fp_ops + dev_id, dev);
1339 
1340 	rte_cryptodev_trace_start(dev_id, diag);
1341 	if (diag == 0)
1342 		dev->data->dev_started = 1;
1343 	else
1344 		return diag;
1345 
1346 	return 0;
1347 }
1348 
1349 void
1350 rte_cryptodev_stop(uint8_t dev_id)
1351 {
1352 	struct rte_cryptodev *dev;
1353 
1354 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1355 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1356 		return;
1357 	}
1358 
1359 	dev = &rte_crypto_devices[dev_id];
1360 
1361 	if (*dev->dev_ops->dev_stop == NULL)
1362 		return;
1363 
1364 	if (dev->data->dev_started == 0) {
1365 		CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already stopped",
1366 			dev_id);
1367 		return;
1368 	}
1369 
1370 	/* point fast-path functions to dummy ones */
1371 	cryptodev_fp_ops_reset(rte_crypto_fp_ops + dev_id);
1372 
1373 	(*dev->dev_ops->dev_stop)(dev);
1374 	rte_cryptodev_trace_stop(dev_id);
1375 	dev->data->dev_started = 0;
1376 }
1377 
1378 int
1379 rte_cryptodev_close(uint8_t dev_id)
1380 {
1381 	struct rte_cryptodev *dev;
1382 	int retval;
1383 
1384 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1385 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1386 		return -1;
1387 	}
1388 
1389 	dev = &rte_crypto_devices[dev_id];
1390 
1391 	/* Device must be stopped before it can be closed */
1392 	if (dev->data->dev_started == 1) {
1393 		CDEV_LOG_ERR("Device %u must be stopped before closing",
1394 				dev_id);
1395 		return -EBUSY;
1396 	}
1397 
1398 	/* We can't close the device if there are outstanding sessions in use */
1399 	if (dev->data->session_pool != NULL) {
1400 		if (!rte_mempool_full(dev->data->session_pool)) {
1401 			CDEV_LOG_ERR("dev_id=%u close failed, session mempool "
1402 					"has sessions still in use, free "
1403 					"all sessions before calling close",
1404 					(unsigned)dev_id);
1405 			return -EBUSY;
1406 		}
1407 	}
1408 
1409 	if (*dev->dev_ops->dev_close == NULL)
1410 		return -ENOTSUP;
1411 	retval = (*dev->dev_ops->dev_close)(dev);
1412 	rte_cryptodev_trace_close(dev_id, retval);
1413 
1414 	if (retval < 0)
1415 		return retval;
1416 
1417 	return 0;
1418 }
1419 
1420 int
1421 rte_cryptodev_get_qp_status(uint8_t dev_id, uint16_t queue_pair_id)
1422 {
1423 	struct rte_cryptodev *dev;
1424 	int ret = 0;
1425 
1426 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1427 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1428 		ret = -EINVAL;
1429 		goto done;
1430 	}
1431 
1432 	dev = &rte_crypto_devices[dev_id];
1433 	if (queue_pair_id >= dev->data->nb_queue_pairs) {
1434 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
1435 		ret = -EINVAL;
1436 		goto done;
1437 	}
1438 	void **qps = dev->data->queue_pairs;
1439 
1440 	if (qps[queue_pair_id])	{
1441 		CDEV_LOG_DEBUG("qp %d on dev %d is initialised",
1442 			queue_pair_id, dev_id);
1443 		ret = 1;
1444 		goto done;
1445 	}
1446 
1447 	CDEV_LOG_DEBUG("qp %d on dev %d is not initialised",
1448 		queue_pair_id, dev_id);
1449 
1450 done:
1451 	rte_cryptodev_trace_get_qp_status(dev_id, queue_pair_id, ret);
1452 
1453 	return ret;
1454 }
1455 
1456 static uint8_t
1457 rte_cryptodev_sym_is_valid_session_pool(struct rte_mempool *mp,
1458 	uint32_t sess_priv_size)
1459 {
1460 	struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1461 
1462 	if (!mp)
1463 		return 0;
1464 
1465 	pool_priv = rte_mempool_get_priv(mp);
1466 
1467 	if (!pool_priv || mp->private_data_size < sizeof(*pool_priv) ||
1468 			pool_priv->sess_data_sz < sess_priv_size)
1469 		return 0;
1470 
1471 	return 1;
1472 }
1473 
1474 int
1475 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
1476 		const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
1477 
1478 {
1479 	struct rte_cryptodev *dev;
1480 
1481 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1482 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1483 		return -EINVAL;
1484 	}
1485 
1486 	dev = &rte_crypto_devices[dev_id];
1487 	if (queue_pair_id >= dev->data->nb_queue_pairs) {
1488 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
1489 		return -EINVAL;
1490 	}
1491 
1492 	if (!qp_conf) {
1493 		CDEV_LOG_ERR("qp_conf cannot be NULL");
1494 		return -EINVAL;
1495 	}
1496 
1497 	if (qp_conf->mp_session) {
1498 		struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1499 
1500 		pool_priv = rte_mempool_get_priv(qp_conf->mp_session);
1501 		if (!pool_priv || qp_conf->mp_session->private_data_size <
1502 				sizeof(*pool_priv)) {
1503 			CDEV_LOG_ERR("Invalid mempool");
1504 			return -EINVAL;
1505 		}
1506 
1507 		if (!rte_cryptodev_sym_is_valid_session_pool(qp_conf->mp_session,
1508 					rte_cryptodev_sym_get_private_session_size(dev_id))) {
1509 			CDEV_LOG_ERR("Invalid mempool");
1510 			return -EINVAL;
1511 		}
1512 	}
1513 
1514 	if (dev->data->dev_started) {
1515 		CDEV_LOG_ERR(
1516 		    "device %d must be stopped to allow configuration", dev_id);
1517 		return -EBUSY;
1518 	}
1519 
1520 	if (*dev->dev_ops->queue_pair_setup == NULL)
1521 		return -ENOTSUP;
1522 
1523 	rte_cryptodev_trace_queue_pair_setup(dev_id, queue_pair_id, qp_conf);
1524 	return (*dev->dev_ops->queue_pair_setup)(dev, queue_pair_id, qp_conf,
1525 			socket_id);
1526 }
1527 
1528 struct rte_cryptodev_cb *
1529 rte_cryptodev_add_enq_callback(uint8_t dev_id,
1530 			       uint16_t qp_id,
1531 			       rte_cryptodev_callback_fn cb_fn,
1532 			       void *cb_arg)
1533 {
1534 #ifndef RTE_CRYPTO_CALLBACKS
1535 	rte_errno = ENOTSUP;
1536 	return NULL;
1537 #endif
1538 	struct rte_cryptodev *dev;
1539 	struct rte_cryptodev_cb_rcu *list;
1540 	struct rte_cryptodev_cb *cb, *tail;
1541 
1542 	if (!cb_fn) {
1543 		CDEV_LOG_ERR("Callback is NULL on dev_id=%d", dev_id);
1544 		rte_errno = EINVAL;
1545 		return NULL;
1546 	}
1547 
1548 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1549 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1550 		rte_errno = ENODEV;
1551 		return NULL;
1552 	}
1553 
1554 	dev = &rte_crypto_devices[dev_id];
1555 	if (qp_id >= dev->data->nb_queue_pairs) {
1556 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1557 		rte_errno = ENODEV;
1558 		return NULL;
1559 	}
1560 
1561 	cb = rte_zmalloc(NULL, sizeof(*cb), 0);
1562 	if (cb == NULL) {
1563 		CDEV_LOG_ERR("Failed to allocate memory for callback on "
1564 			     "dev=%d, queue_pair_id=%d", dev_id, qp_id);
1565 		rte_errno = ENOMEM;
1566 		return NULL;
1567 	}
1568 
1569 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1570 
1571 	cb->fn = cb_fn;
1572 	cb->arg = cb_arg;
1573 
1574 	/* Add the callbacks in fifo order. */
1575 	list = &dev->enq_cbs[qp_id];
1576 	tail = list->next;
1577 
1578 	if (tail) {
1579 		while (tail->next)
1580 			tail = tail->next;
1581 		/* Stores to cb->fn and cb->param should complete before
1582 		 * cb is visible to data plane.
1583 		 */
1584 		rte_atomic_store_explicit(&tail->next, cb, rte_memory_order_release);
1585 	} else {
1586 		/* Stores to cb->fn and cb->param should complete before
1587 		 * cb is visible to data plane.
1588 		 */
1589 		rte_atomic_store_explicit(&list->next, cb, rte_memory_order_release);
1590 	}
1591 
1592 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1593 
1594 	rte_cryptodev_trace_add_enq_callback(dev_id, qp_id, cb_fn);
1595 	return cb;
1596 }
1597 
1598 int
1599 rte_cryptodev_remove_enq_callback(uint8_t dev_id,
1600 				  uint16_t qp_id,
1601 				  struct rte_cryptodev_cb *cb)
1602 {
1603 #ifndef RTE_CRYPTO_CALLBACKS
1604 	return -ENOTSUP;
1605 #endif
1606 	struct rte_cryptodev *dev;
1607 	RTE_ATOMIC(struct rte_cryptodev_cb *) *prev_cb;
1608 	struct rte_cryptodev_cb *curr_cb;
1609 	struct rte_cryptodev_cb_rcu *list;
1610 	int ret;
1611 
1612 	ret = -EINVAL;
1613 
1614 	if (!cb) {
1615 		CDEV_LOG_ERR("Callback is NULL");
1616 		return -EINVAL;
1617 	}
1618 
1619 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1620 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1621 		return -ENODEV;
1622 	}
1623 
1624 	rte_cryptodev_trace_remove_enq_callback(dev_id, qp_id, cb->fn);
1625 
1626 	dev = &rte_crypto_devices[dev_id];
1627 	if (qp_id >= dev->data->nb_queue_pairs) {
1628 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1629 		return -ENODEV;
1630 	}
1631 
1632 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1633 	if (dev->enq_cbs == NULL) {
1634 		CDEV_LOG_ERR("Callback not initialized");
1635 		goto cb_err;
1636 	}
1637 
1638 	list = &dev->enq_cbs[qp_id];
1639 	if (list == NULL) {
1640 		CDEV_LOG_ERR("Callback list is NULL");
1641 		goto cb_err;
1642 	}
1643 
1644 	if (list->qsbr == NULL) {
1645 		CDEV_LOG_ERR("Rcu qsbr is NULL");
1646 		goto cb_err;
1647 	}
1648 
1649 	prev_cb = &list->next;
1650 	for (; *prev_cb != NULL; prev_cb = &curr_cb->next) {
1651 		curr_cb = *prev_cb;
1652 		if (curr_cb == cb) {
1653 			/* Remove the user cb from the callback list. */
1654 			rte_atomic_store_explicit(prev_cb, curr_cb->next,
1655 				rte_memory_order_relaxed);
1656 			ret = 0;
1657 			break;
1658 		}
1659 	}
1660 
1661 	if (!ret) {
1662 		/* Call sync with invalid thread id as this is part of
1663 		 * control plane API
1664 		 */
1665 		rte_rcu_qsbr_synchronize(list->qsbr, RTE_QSBR_THRID_INVALID);
1666 		rte_free(cb);
1667 	}
1668 
1669 cb_err:
1670 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1671 	return ret;
1672 }
1673 
1674 struct rte_cryptodev_cb *
1675 rte_cryptodev_add_deq_callback(uint8_t dev_id,
1676 			       uint16_t qp_id,
1677 			       rte_cryptodev_callback_fn cb_fn,
1678 			       void *cb_arg)
1679 {
1680 #ifndef RTE_CRYPTO_CALLBACKS
1681 	rte_errno = ENOTSUP;
1682 	return NULL;
1683 #endif
1684 	struct rte_cryptodev *dev;
1685 	struct rte_cryptodev_cb_rcu *list;
1686 	struct rte_cryptodev_cb *cb, *tail;
1687 
1688 	if (!cb_fn) {
1689 		CDEV_LOG_ERR("Callback is NULL on dev_id=%d", dev_id);
1690 		rte_errno = EINVAL;
1691 		return NULL;
1692 	}
1693 
1694 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1695 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1696 		rte_errno = ENODEV;
1697 		return NULL;
1698 	}
1699 
1700 	dev = &rte_crypto_devices[dev_id];
1701 	if (qp_id >= dev->data->nb_queue_pairs) {
1702 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1703 		rte_errno = ENODEV;
1704 		return NULL;
1705 	}
1706 
1707 	cb = rte_zmalloc(NULL, sizeof(*cb), 0);
1708 	if (cb == NULL) {
1709 		CDEV_LOG_ERR("Failed to allocate memory for callback on "
1710 			     "dev=%d, queue_pair_id=%d", dev_id, qp_id);
1711 		rte_errno = ENOMEM;
1712 		return NULL;
1713 	}
1714 
1715 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1716 
1717 	cb->fn = cb_fn;
1718 	cb->arg = cb_arg;
1719 
1720 	/* Add the callbacks in fifo order. */
1721 	list = &dev->deq_cbs[qp_id];
1722 	tail = list->next;
1723 
1724 	if (tail) {
1725 		while (tail->next)
1726 			tail = tail->next;
1727 		/* Stores to cb->fn and cb->param should complete before
1728 		 * cb is visible to data plane.
1729 		 */
1730 		rte_atomic_store_explicit(&tail->next, cb, rte_memory_order_release);
1731 	} else {
1732 		/* Stores to cb->fn and cb->param should complete before
1733 		 * cb is visible to data plane.
1734 		 */
1735 		rte_atomic_store_explicit(&list->next, cb, rte_memory_order_release);
1736 	}
1737 
1738 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1739 
1740 	rte_cryptodev_trace_add_deq_callback(dev_id, qp_id, cb_fn);
1741 
1742 	return cb;
1743 }
1744 
1745 int
1746 rte_cryptodev_remove_deq_callback(uint8_t dev_id,
1747 				  uint16_t qp_id,
1748 				  struct rte_cryptodev_cb *cb)
1749 {
1750 #ifndef RTE_CRYPTO_CALLBACKS
1751 	return -ENOTSUP;
1752 #endif
1753 	struct rte_cryptodev *dev;
1754 	RTE_ATOMIC(struct rte_cryptodev_cb *) *prev_cb;
1755 	struct rte_cryptodev_cb *curr_cb;
1756 	struct rte_cryptodev_cb_rcu *list;
1757 	int ret;
1758 
1759 	ret = -EINVAL;
1760 
1761 	if (!cb) {
1762 		CDEV_LOG_ERR("Callback is NULL");
1763 		return -EINVAL;
1764 	}
1765 
1766 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1767 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1768 		return -ENODEV;
1769 	}
1770 
1771 	rte_cryptodev_trace_remove_deq_callback(dev_id, qp_id, cb->fn);
1772 
1773 	dev = &rte_crypto_devices[dev_id];
1774 	if (qp_id >= dev->data->nb_queue_pairs) {
1775 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1776 		return -ENODEV;
1777 	}
1778 
1779 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1780 	if (dev->enq_cbs == NULL) {
1781 		CDEV_LOG_ERR("Callback not initialized");
1782 		goto cb_err;
1783 	}
1784 
1785 	list = &dev->deq_cbs[qp_id];
1786 	if (list == NULL) {
1787 		CDEV_LOG_ERR("Callback list is NULL");
1788 		goto cb_err;
1789 	}
1790 
1791 	if (list->qsbr == NULL) {
1792 		CDEV_LOG_ERR("Rcu qsbr is NULL");
1793 		goto cb_err;
1794 	}
1795 
1796 	prev_cb = &list->next;
1797 	for (; *prev_cb != NULL; prev_cb = &curr_cb->next) {
1798 		curr_cb = *prev_cb;
1799 		if (curr_cb == cb) {
1800 			/* Remove the user cb from the callback list. */
1801 			rte_atomic_store_explicit(prev_cb, curr_cb->next,
1802 				rte_memory_order_relaxed);
1803 			ret = 0;
1804 			break;
1805 		}
1806 	}
1807 
1808 	if (!ret) {
1809 		/* Call sync with invalid thread id as this is part of
1810 		 * control plane API
1811 		 */
1812 		rte_rcu_qsbr_synchronize(list->qsbr, RTE_QSBR_THRID_INVALID);
1813 		rte_free(cb);
1814 	}
1815 
1816 cb_err:
1817 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1818 	return ret;
1819 }
1820 
1821 int
1822 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats)
1823 {
1824 	struct rte_cryptodev *dev;
1825 
1826 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1827 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1828 		return -ENODEV;
1829 	}
1830 
1831 	if (stats == NULL) {
1832 		CDEV_LOG_ERR("Invalid stats ptr");
1833 		return -EINVAL;
1834 	}
1835 
1836 	dev = &rte_crypto_devices[dev_id];
1837 	memset(stats, 0, sizeof(*stats));
1838 
1839 	if (*dev->dev_ops->stats_get == NULL)
1840 		return -ENOTSUP;
1841 	(*dev->dev_ops->stats_get)(dev, stats);
1842 
1843 	rte_cryptodev_trace_stats_get(dev_id, stats);
1844 	return 0;
1845 }
1846 
1847 void
1848 rte_cryptodev_stats_reset(uint8_t dev_id)
1849 {
1850 	struct rte_cryptodev *dev;
1851 
1852 	rte_cryptodev_trace_stats_reset(dev_id);
1853 
1854 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1855 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1856 		return;
1857 	}
1858 
1859 	dev = &rte_crypto_devices[dev_id];
1860 
1861 	if (*dev->dev_ops->stats_reset == NULL)
1862 		return;
1863 	(*dev->dev_ops->stats_reset)(dev);
1864 }
1865 
1866 void
1867 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
1868 {
1869 	struct rte_cryptodev *dev;
1870 
1871 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1872 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1873 		return;
1874 	}
1875 
1876 	dev = &rte_crypto_devices[dev_id];
1877 
1878 	memset(dev_info, 0, sizeof(struct rte_cryptodev_info));
1879 
1880 	if (*dev->dev_ops->dev_infos_get == NULL)
1881 		return;
1882 	(*dev->dev_ops->dev_infos_get)(dev, dev_info);
1883 
1884 	dev_info->driver_name = dev->device->driver->name;
1885 	dev_info->device = dev->device;
1886 
1887 	rte_cryptodev_trace_info_get(dev_id, dev_info->driver_name);
1888 
1889 }
1890 
1891 int
1892 rte_cryptodev_callback_register(uint8_t dev_id,
1893 			enum rte_cryptodev_event_type event,
1894 			rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1895 {
1896 	struct rte_cryptodev *dev;
1897 	struct rte_cryptodev_callback *user_cb;
1898 
1899 	if (!cb_fn)
1900 		return -EINVAL;
1901 
1902 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1903 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1904 		return -EINVAL;
1905 	}
1906 
1907 	dev = &rte_crypto_devices[dev_id];
1908 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
1909 
1910 	TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
1911 		if (user_cb->cb_fn == cb_fn &&
1912 			user_cb->cb_arg == cb_arg &&
1913 			user_cb->event == event) {
1914 			break;
1915 		}
1916 	}
1917 
1918 	/* create a new callback. */
1919 	if (user_cb == NULL) {
1920 		user_cb = rte_zmalloc("INTR_USER_CALLBACK",
1921 				sizeof(struct rte_cryptodev_callback), 0);
1922 		if (user_cb != NULL) {
1923 			user_cb->cb_fn = cb_fn;
1924 			user_cb->cb_arg = cb_arg;
1925 			user_cb->event = event;
1926 			TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
1927 		}
1928 	}
1929 
1930 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1931 
1932 	rte_cryptodev_trace_callback_register(dev_id, event, cb_fn);
1933 	return (user_cb == NULL) ? -ENOMEM : 0;
1934 }
1935 
1936 int
1937 rte_cryptodev_callback_unregister(uint8_t dev_id,
1938 			enum rte_cryptodev_event_type event,
1939 			rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1940 {
1941 	int ret;
1942 	struct rte_cryptodev *dev;
1943 	struct rte_cryptodev_callback *cb, *next;
1944 
1945 	if (!cb_fn)
1946 		return -EINVAL;
1947 
1948 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1949 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1950 		return -EINVAL;
1951 	}
1952 
1953 	dev = &rte_crypto_devices[dev_id];
1954 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
1955 
1956 	ret = 0;
1957 	for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
1958 
1959 		next = TAILQ_NEXT(cb, next);
1960 
1961 		if (cb->cb_fn != cb_fn || cb->event != event ||
1962 				(cb->cb_arg != (void *)-1 &&
1963 				cb->cb_arg != cb_arg))
1964 			continue;
1965 
1966 		/*
1967 		 * if this callback is not executing right now,
1968 		 * then remove it.
1969 		 */
1970 		if (cb->active == 0) {
1971 			TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
1972 			rte_free(cb);
1973 		} else {
1974 			ret = -EAGAIN;
1975 		}
1976 	}
1977 
1978 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1979 
1980 	rte_cryptodev_trace_callback_unregister(dev_id, event, cb_fn);
1981 	return ret;
1982 }
1983 
1984 void
1985 rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev,
1986 	enum rte_cryptodev_event_type event)
1987 {
1988 	struct rte_cryptodev_callback *cb_lst;
1989 	struct rte_cryptodev_callback dev_cb;
1990 
1991 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
1992 	TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
1993 		if (cb_lst->cb_fn == NULL || cb_lst->event != event)
1994 			continue;
1995 		dev_cb = *cb_lst;
1996 		cb_lst->active = 1;
1997 		rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1998 		dev_cb.cb_fn(dev->data->dev_id, dev_cb.event,
1999 						dev_cb.cb_arg);
2000 		rte_spinlock_lock(&rte_cryptodev_cb_lock);
2001 		cb_lst->active = 0;
2002 	}
2003 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
2004 }
2005 
2006 int
2007 rte_cryptodev_queue_pair_event_error_query(uint8_t dev_id, uint16_t qp_id)
2008 {
2009 	struct rte_cryptodev *dev;
2010 
2011 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
2012 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
2013 		return -EINVAL;
2014 	}
2015 	dev = &rte_crypto_devices[dev_id];
2016 
2017 	if (qp_id >= dev->data->nb_queue_pairs)
2018 		return -EINVAL;
2019 	if (*dev->dev_ops->queue_pair_event_error_query == NULL)
2020 		return -ENOTSUP;
2021 
2022 	return dev->dev_ops->queue_pair_event_error_query(dev, qp_id);
2023 }
2024 
2025 struct rte_mempool *
2026 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
2027 	uint32_t elt_size, uint32_t cache_size, uint16_t user_data_size,
2028 	int socket_id)
2029 {
2030 	struct rte_mempool *mp;
2031 	struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
2032 	uint32_t obj_sz;
2033 
2034 	obj_sz = sizeof(struct rte_cryptodev_sym_session) + elt_size + user_data_size;
2035 
2036 	obj_sz = RTE_ALIGN_CEIL(obj_sz, RTE_CACHE_LINE_SIZE);
2037 	mp = rte_mempool_create(name, nb_elts, obj_sz, cache_size,
2038 			(uint32_t)(sizeof(*pool_priv)), NULL, NULL,
2039 			NULL, NULL,
2040 			socket_id, 0);
2041 	if (mp == NULL) {
2042 		CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d",
2043 			__func__, name, rte_errno);
2044 		return NULL;
2045 	}
2046 
2047 	pool_priv = rte_mempool_get_priv(mp);
2048 	if (!pool_priv) {
2049 		CDEV_LOG_ERR("%s(name=%s) failed to get private data",
2050 			__func__, name);
2051 		rte_mempool_free(mp);
2052 		return NULL;
2053 	}
2054 
2055 	pool_priv->sess_data_sz = elt_size;
2056 	pool_priv->user_data_sz = user_data_size;
2057 
2058 	rte_cryptodev_trace_sym_session_pool_create(name, nb_elts,
2059 		elt_size, cache_size, user_data_size, mp);
2060 	return mp;
2061 }
2062 
2063 struct rte_mempool *
2064 rte_cryptodev_asym_session_pool_create(const char *name, uint32_t nb_elts,
2065 	uint32_t cache_size, uint16_t user_data_size, int socket_id)
2066 {
2067 	struct rte_mempool *mp;
2068 	struct rte_cryptodev_asym_session_pool_private_data *pool_priv;
2069 	uint32_t obj_sz, obj_sz_aligned;
2070 	uint8_t dev_id;
2071 	unsigned int priv_sz, max_priv_sz = 0;
2072 
2073 	for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++)
2074 		if (rte_cryptodev_is_valid_dev(dev_id)) {
2075 			priv_sz = rte_cryptodev_asym_get_private_session_size(dev_id);
2076 			if (priv_sz > max_priv_sz)
2077 				max_priv_sz = priv_sz;
2078 		}
2079 	if (max_priv_sz == 0) {
2080 		CDEV_LOG_INFO("Could not set max private session size");
2081 		return NULL;
2082 	}
2083 
2084 	obj_sz = rte_cryptodev_asym_get_header_session_size() + max_priv_sz +
2085 			user_data_size;
2086 	obj_sz_aligned =  RTE_ALIGN_CEIL(obj_sz, RTE_CACHE_LINE_SIZE);
2087 
2088 	mp = rte_mempool_create(name, nb_elts, obj_sz_aligned, cache_size,
2089 			(uint32_t)(sizeof(*pool_priv)),
2090 			NULL, NULL, NULL, NULL,
2091 			socket_id, 0);
2092 	if (mp == NULL) {
2093 		CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d",
2094 			__func__, name, rte_errno);
2095 		return NULL;
2096 	}
2097 
2098 	pool_priv = rte_mempool_get_priv(mp);
2099 	if (!pool_priv) {
2100 		CDEV_LOG_ERR("%s(name=%s) failed to get private data",
2101 			__func__, name);
2102 		rte_mempool_free(mp);
2103 		return NULL;
2104 	}
2105 	pool_priv->max_priv_session_sz = max_priv_sz;
2106 	pool_priv->user_data_sz = user_data_size;
2107 
2108 	rte_cryptodev_trace_asym_session_pool_create(name, nb_elts,
2109 		user_data_size, cache_size, mp);
2110 	return mp;
2111 }
2112 
2113 void *
2114 rte_cryptodev_sym_session_create(uint8_t dev_id,
2115 		struct rte_crypto_sym_xform *xforms,
2116 		struct rte_mempool *mp)
2117 {
2118 	struct rte_cryptodev *dev;
2119 	struct rte_cryptodev_sym_session *sess;
2120 	struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
2121 	uint32_t sess_priv_sz;
2122 	int ret;
2123 
2124 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
2125 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
2126 		rte_errno = EINVAL;
2127 		return NULL;
2128 	}
2129 
2130 	if (xforms == NULL) {
2131 		CDEV_LOG_ERR("Invalid xform");
2132 		rte_errno = EINVAL;
2133 		return NULL;
2134 	}
2135 
2136 	sess_priv_sz = rte_cryptodev_sym_get_private_session_size(dev_id);
2137 	if (!rte_cryptodev_sym_is_valid_session_pool(mp, sess_priv_sz)) {
2138 		CDEV_LOG_ERR("Invalid mempool");
2139 		rte_errno = EINVAL;
2140 		return NULL;
2141 	}
2142 
2143 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2144 
2145 	/* Allocate a session structure from the session pool */
2146 	if (rte_mempool_get(mp, (void **)&sess)) {
2147 		CDEV_LOG_ERR("couldn't get object from session mempool");
2148 		rte_errno = ENOMEM;
2149 		return NULL;
2150 	}
2151 
2152 	pool_priv = rte_mempool_get_priv(mp);
2153 	sess->driver_id = dev->driver_id;
2154 	sess->sess_data_sz = pool_priv->sess_data_sz;
2155 	sess->user_data_sz = pool_priv->user_data_sz;
2156 	sess->driver_priv_data_iova = rte_mempool_virt2iova(sess) +
2157 		offsetof(struct rte_cryptodev_sym_session, driver_priv_data);
2158 
2159 	if (dev->dev_ops->sym_session_configure == NULL) {
2160 		rte_errno = ENOTSUP;
2161 		goto error_exit;
2162 	}
2163 	memset(sess->driver_priv_data, 0, pool_priv->sess_data_sz + pool_priv->user_data_sz);
2164 
2165 	ret = dev->dev_ops->sym_session_configure(dev, xforms, sess);
2166 	if (ret < 0) {
2167 		rte_errno = -ret;
2168 		goto error_exit;
2169 	}
2170 	sess->driver_id = dev->driver_id;
2171 
2172 	rte_cryptodev_trace_sym_session_create(dev_id, sess, xforms, mp);
2173 
2174 	return (void *)sess;
2175 error_exit:
2176 	rte_mempool_put(mp, (void *)sess);
2177 	return NULL;
2178 }
2179 
2180 int
2181 rte_cryptodev_asym_session_create(uint8_t dev_id,
2182 		struct rte_crypto_asym_xform *xforms, struct rte_mempool *mp,
2183 		void **session)
2184 {
2185 	struct rte_cryptodev_asym_session *sess;
2186 	uint32_t session_priv_data_sz;
2187 	struct rte_cryptodev_asym_session_pool_private_data *pool_priv;
2188 	unsigned int session_header_size =
2189 			rte_cryptodev_asym_get_header_session_size();
2190 	struct rte_cryptodev *dev;
2191 	int ret;
2192 
2193 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
2194 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
2195 		return -EINVAL;
2196 	}
2197 
2198 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2199 
2200 	if (dev == NULL)
2201 		return -EINVAL;
2202 
2203 	if (!mp) {
2204 		CDEV_LOG_ERR("invalid mempool");
2205 		return -EINVAL;
2206 	}
2207 
2208 	session_priv_data_sz = rte_cryptodev_asym_get_private_session_size(
2209 			dev_id);
2210 	pool_priv = rte_mempool_get_priv(mp);
2211 
2212 	if (pool_priv->max_priv_session_sz < session_priv_data_sz) {
2213 		CDEV_LOG_DEBUG(
2214 			"The private session data size used when creating the mempool is smaller than this device's private session data.");
2215 		return -EINVAL;
2216 	}
2217 
2218 	/* Verify if provided mempool can hold elements big enough. */
2219 	if (mp->elt_size < session_header_size + session_priv_data_sz) {
2220 		CDEV_LOG_ERR(
2221 			"mempool elements too small to hold session objects");
2222 		return -EINVAL;
2223 	}
2224 
2225 	/* Allocate a session structure from the session pool */
2226 	if (rte_mempool_get(mp, session)) {
2227 		CDEV_LOG_ERR("couldn't get object from session mempool");
2228 		return -ENOMEM;
2229 	}
2230 
2231 	sess = *session;
2232 	sess->driver_id = dev->driver_id;
2233 	sess->user_data_sz = pool_priv->user_data_sz;
2234 	sess->max_priv_data_sz = pool_priv->max_priv_session_sz;
2235 
2236 	/* Clear device session pointer.*/
2237 	memset(sess->sess_private_data, 0, session_priv_data_sz + sess->user_data_sz);
2238 
2239 	if (*dev->dev_ops->asym_session_configure == NULL)
2240 		return -ENOTSUP;
2241 
2242 	if (sess->sess_private_data[0] == 0) {
2243 		ret = dev->dev_ops->asym_session_configure(dev, xforms, sess);
2244 		if (ret < 0) {
2245 			CDEV_LOG_ERR(
2246 				"dev_id %d failed to configure session details",
2247 				dev_id);
2248 			return ret;
2249 		}
2250 	}
2251 
2252 	rte_cryptodev_trace_asym_session_create(dev_id, xforms, mp, sess);
2253 	return 0;
2254 }
2255 
2256 int
2257 rte_cryptodev_sym_session_free(uint8_t dev_id, void *_sess)
2258 {
2259 	struct rte_cryptodev *dev;
2260 	struct rte_mempool *sess_mp;
2261 	struct rte_cryptodev_sym_session *sess = _sess;
2262 	struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
2263 
2264 	if (sess == NULL)
2265 		return -EINVAL;
2266 
2267 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
2268 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
2269 		return -EINVAL;
2270 	}
2271 
2272 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2273 
2274 	if (dev == NULL || sess == NULL)
2275 		return -EINVAL;
2276 
2277 	sess_mp = rte_mempool_from_obj(sess);
2278 	if (!sess_mp)
2279 		return -EINVAL;
2280 	pool_priv = rte_mempool_get_priv(sess_mp);
2281 
2282 	if (sess->driver_id != dev->driver_id) {
2283 		CDEV_LOG_ERR("Session created by driver %u but freed by %u",
2284 			sess->driver_id, dev->driver_id);
2285 		return -EINVAL;
2286 	}
2287 
2288 	if (*dev->dev_ops->sym_session_clear == NULL)
2289 		return -ENOTSUP;
2290 
2291 	dev->dev_ops->sym_session_clear(dev, sess);
2292 
2293 	memset(sess->driver_priv_data, 0, pool_priv->sess_data_sz + pool_priv->user_data_sz);
2294 
2295 	/* Return session to mempool */
2296 	rte_mempool_put(sess_mp, sess);
2297 
2298 	rte_cryptodev_trace_sym_session_free(dev_id, sess);
2299 	return 0;
2300 }
2301 
2302 int
2303 rte_cryptodev_asym_session_free(uint8_t dev_id, void *sess)
2304 {
2305 	struct rte_mempool *sess_mp;
2306 	struct rte_cryptodev *dev;
2307 
2308 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
2309 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
2310 		return -EINVAL;
2311 	}
2312 
2313 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2314 
2315 	if (dev == NULL || sess == NULL)
2316 		return -EINVAL;
2317 
2318 	if (*dev->dev_ops->asym_session_clear == NULL)
2319 		return -ENOTSUP;
2320 
2321 	dev->dev_ops->asym_session_clear(dev, sess);
2322 
2323 	rte_free(((struct rte_cryptodev_asym_session *)sess)->event_mdata);
2324 
2325 	/* Return session to mempool */
2326 	sess_mp = rte_mempool_from_obj(sess);
2327 	rte_mempool_put(sess_mp, sess);
2328 
2329 	rte_cryptodev_trace_asym_session_free(dev_id, sess);
2330 	return 0;
2331 }
2332 
2333 unsigned int
2334 rte_cryptodev_asym_get_header_session_size(void)
2335 {
2336 	return sizeof(struct rte_cryptodev_asym_session);
2337 }
2338 
2339 unsigned int
2340 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
2341 {
2342 	struct rte_cryptodev *dev;
2343 	unsigned int priv_sess_size;
2344 
2345 	if (!rte_cryptodev_is_valid_dev(dev_id))
2346 		return 0;
2347 
2348 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2349 
2350 	if (*dev->dev_ops->sym_session_get_size == NULL)
2351 		return 0;
2352 
2353 	priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev);
2354 
2355 	rte_cryptodev_trace_sym_get_private_session_size(dev_id,
2356 		priv_sess_size);
2357 
2358 	return priv_sess_size;
2359 }
2360 
2361 unsigned int
2362 rte_cryptodev_asym_get_private_session_size(uint8_t dev_id)
2363 {
2364 	struct rte_cryptodev *dev;
2365 	unsigned int priv_sess_size;
2366 
2367 	if (!rte_cryptodev_is_valid_dev(dev_id))
2368 		return 0;
2369 
2370 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2371 
2372 	if (*dev->dev_ops->asym_session_get_size == NULL)
2373 		return 0;
2374 
2375 	priv_sess_size = (*dev->dev_ops->asym_session_get_size)(dev);
2376 
2377 	rte_cryptodev_trace_asym_get_private_session_size(dev_id,
2378 		priv_sess_size);
2379 
2380 	return priv_sess_size;
2381 }
2382 
2383 int
2384 rte_cryptodev_sym_session_set_user_data(void *_sess, void *data,
2385 		uint16_t size)
2386 {
2387 	struct rte_cryptodev_sym_session *sess = _sess;
2388 
2389 	if (sess == NULL)
2390 		return -EINVAL;
2391 
2392 	if (sess->user_data_sz < size)
2393 		return -ENOMEM;
2394 
2395 	rte_memcpy(sess->driver_priv_data + sess->sess_data_sz, data, size);
2396 
2397 	rte_cryptodev_trace_sym_session_set_user_data(sess, data, size);
2398 
2399 	return 0;
2400 }
2401 
2402 void *
2403 rte_cryptodev_sym_session_get_user_data(void *_sess)
2404 {
2405 	struct rte_cryptodev_sym_session *sess = _sess;
2406 	void *data = NULL;
2407 
2408 	if (sess == NULL || sess->user_data_sz == 0)
2409 		return NULL;
2410 
2411 	data = (void *)(sess->driver_priv_data + sess->sess_data_sz);
2412 
2413 	rte_cryptodev_trace_sym_session_get_user_data(sess, data);
2414 
2415 	return data;
2416 }
2417 
2418 int
2419 rte_cryptodev_asym_session_set_user_data(void *session, void *data, uint16_t size)
2420 {
2421 	struct rte_cryptodev_asym_session *sess = session;
2422 	if (sess == NULL)
2423 		return -EINVAL;
2424 
2425 	if (sess->user_data_sz < size)
2426 		return -ENOMEM;
2427 
2428 	rte_memcpy(sess->sess_private_data +
2429 			sess->max_priv_data_sz,
2430 			data, size);
2431 
2432 	rte_cryptodev_trace_asym_session_set_user_data(sess, data, size);
2433 
2434 	return 0;
2435 }
2436 
2437 void *
2438 rte_cryptodev_asym_session_get_user_data(void *session)
2439 {
2440 	struct rte_cryptodev_asym_session *sess = session;
2441 	void *data = NULL;
2442 
2443 	if (sess == NULL || sess->user_data_sz == 0)
2444 		return NULL;
2445 
2446 	data = (void *)(sess->sess_private_data + sess->max_priv_data_sz);
2447 
2448 	rte_cryptodev_trace_asym_session_get_user_data(sess, data);
2449 
2450 	return data;
2451 }
2452 
2453 static inline void
2454 sym_crypto_fill_status(struct rte_crypto_sym_vec *vec, int32_t errnum)
2455 {
2456 	uint32_t i;
2457 	for (i = 0; i < vec->num; i++)
2458 		vec->status[i] = errnum;
2459 }
2460 
2461 uint32_t
2462 rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id,
2463 	void *_sess, union rte_crypto_sym_ofs ofs,
2464 	struct rte_crypto_sym_vec *vec)
2465 {
2466 	struct rte_cryptodev *dev;
2467 	struct rte_cryptodev_sym_session *sess = _sess;
2468 
2469 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
2470 		sym_crypto_fill_status(vec, EINVAL);
2471 		return 0;
2472 	}
2473 
2474 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2475 
2476 	if (*dev->dev_ops->sym_cpu_process == NULL ||
2477 		!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO)) {
2478 		sym_crypto_fill_status(vec, ENOTSUP);
2479 		return 0;
2480 	}
2481 
2482 	rte_cryptodev_trace_sym_cpu_crypto_process(dev_id, sess);
2483 
2484 	return dev->dev_ops->sym_cpu_process(dev, sess, ofs, vec);
2485 }
2486 
2487 int
2488 rte_cryptodev_get_raw_dp_ctx_size(uint8_t dev_id)
2489 {
2490 	struct rte_cryptodev *dev;
2491 	int32_t size = sizeof(struct rte_crypto_raw_dp_ctx);
2492 	int32_t priv_size;
2493 
2494 	if (!rte_cryptodev_is_valid_dev(dev_id))
2495 		return -EINVAL;
2496 
2497 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2498 
2499 	if (*dev->dev_ops->sym_get_raw_dp_ctx_size == NULL ||
2500 		!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP)) {
2501 		return -ENOTSUP;
2502 	}
2503 
2504 	priv_size = (*dev->dev_ops->sym_get_raw_dp_ctx_size)(dev);
2505 	if (priv_size < 0)
2506 		return -ENOTSUP;
2507 
2508 	rte_cryptodev_trace_get_raw_dp_ctx_size(dev_id);
2509 
2510 	return RTE_ALIGN_CEIL((size + priv_size), 8);
2511 }
2512 
2513 int
2514 rte_cryptodev_configure_raw_dp_ctx(uint8_t dev_id, uint16_t qp_id,
2515 	struct rte_crypto_raw_dp_ctx *ctx,
2516 	enum rte_crypto_op_sess_type sess_type,
2517 	union rte_cryptodev_session_ctx session_ctx,
2518 	uint8_t is_update)
2519 {
2520 	struct rte_cryptodev *dev;
2521 
2522 	if (!rte_cryptodev_get_qp_status(dev_id, qp_id))
2523 		return -EINVAL;
2524 
2525 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2526 	if (!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP)
2527 			|| dev->dev_ops->sym_configure_raw_dp_ctx == NULL)
2528 		return -ENOTSUP;
2529 
2530 	rte_cryptodev_trace_configure_raw_dp_ctx(dev_id, qp_id, sess_type);
2531 
2532 	return (*dev->dev_ops->sym_configure_raw_dp_ctx)(dev, qp_id, ctx,
2533 			sess_type, session_ctx, is_update);
2534 }
2535 
2536 int
2537 rte_cryptodev_session_event_mdata_set(uint8_t dev_id, void *sess,
2538 	enum rte_crypto_op_type op_type,
2539 	enum rte_crypto_op_sess_type sess_type,
2540 	void *ev_mdata,
2541 	uint16_t size)
2542 {
2543 	struct rte_cryptodev *dev;
2544 
2545 	if (sess == NULL || ev_mdata == NULL)
2546 		return -EINVAL;
2547 
2548 	if (!rte_cryptodev_is_valid_dev(dev_id))
2549 		goto skip_pmd_op;
2550 
2551 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2552 	if (dev->dev_ops->session_ev_mdata_set == NULL)
2553 		goto skip_pmd_op;
2554 
2555 	rte_cryptodev_trace_session_event_mdata_set(dev_id, sess, op_type,
2556 		sess_type, ev_mdata, size);
2557 
2558 	return (*dev->dev_ops->session_ev_mdata_set)(dev, sess, op_type,
2559 			sess_type, ev_mdata);
2560 
2561 skip_pmd_op:
2562 	if (op_type == RTE_CRYPTO_OP_TYPE_SYMMETRIC)
2563 		return rte_cryptodev_sym_session_set_user_data(sess, ev_mdata,
2564 				size);
2565 	else if (op_type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
2566 		struct rte_cryptodev_asym_session *s = sess;
2567 
2568 		if (s->event_mdata == NULL) {
2569 			s->event_mdata = rte_malloc(NULL, size, 0);
2570 			if (s->event_mdata == NULL)
2571 				return -ENOMEM;
2572 		}
2573 		rte_memcpy(s->event_mdata, ev_mdata, size);
2574 
2575 		return 0;
2576 	} else
2577 		return -ENOTSUP;
2578 }
2579 
2580 uint32_t
2581 rte_cryptodev_raw_enqueue_burst(struct rte_crypto_raw_dp_ctx *ctx,
2582 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
2583 	void **user_data, int *enqueue_status)
2584 {
2585 	return (*ctx->enqueue_burst)(ctx->qp_data, ctx->drv_ctx_data, vec,
2586 			ofs, user_data, enqueue_status);
2587 }
2588 
2589 int
2590 rte_cryptodev_raw_enqueue_done(struct rte_crypto_raw_dp_ctx *ctx,
2591 		uint32_t n)
2592 {
2593 	return (*ctx->enqueue_done)(ctx->qp_data, ctx->drv_ctx_data, n);
2594 }
2595 
2596 uint32_t
2597 rte_cryptodev_raw_dequeue_burst(struct rte_crypto_raw_dp_ctx *ctx,
2598 	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
2599 	uint32_t max_nb_to_dequeue,
2600 	rte_cryptodev_raw_post_dequeue_t post_dequeue,
2601 	void **out_user_data, uint8_t is_user_data_array,
2602 	uint32_t *n_success_jobs, int *status)
2603 {
2604 	return (*ctx->dequeue_burst)(ctx->qp_data, ctx->drv_ctx_data,
2605 		get_dequeue_count, max_nb_to_dequeue, post_dequeue,
2606 		out_user_data, is_user_data_array, n_success_jobs, status);
2607 }
2608 
2609 int
2610 rte_cryptodev_raw_dequeue_done(struct rte_crypto_raw_dp_ctx *ctx,
2611 		uint32_t n)
2612 {
2613 	return (*ctx->dequeue_done)(ctx->qp_data, ctx->drv_ctx_data, n);
2614 }
2615 
2616 /** Initialise rte_crypto_op mempool element */
2617 static void
2618 rte_crypto_op_init(struct rte_mempool *mempool,
2619 		void *opaque_arg,
2620 		void *_op_data,
2621 		__rte_unused unsigned i)
2622 {
2623 	struct rte_crypto_op *op = _op_data;
2624 	enum rte_crypto_op_type type = *(enum rte_crypto_op_type *)opaque_arg;
2625 
2626 	memset(_op_data, 0, mempool->elt_size);
2627 
2628 	__rte_crypto_op_reset(op, type);
2629 
2630 	op->phys_addr = rte_mempool_virt2iova(_op_data);
2631 	op->mempool = mempool;
2632 }
2633 
2634 
2635 struct rte_mempool *
2636 rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
2637 		unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
2638 		int socket_id)
2639 {
2640 	struct rte_crypto_op_pool_private *priv;
2641 
2642 	unsigned elt_size = sizeof(struct rte_crypto_op) +
2643 			priv_size;
2644 
2645 	if (type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
2646 		elt_size += sizeof(struct rte_crypto_sym_op);
2647 	} else if (type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
2648 		elt_size += sizeof(struct rte_crypto_asym_op);
2649 	} else if (type == RTE_CRYPTO_OP_TYPE_UNDEFINED) {
2650 		elt_size += RTE_MAX(sizeof(struct rte_crypto_sym_op),
2651 		                    sizeof(struct rte_crypto_asym_op));
2652 	} else {
2653 		CDEV_LOG_ERR("Invalid op_type");
2654 		return NULL;
2655 	}
2656 
2657 	/* lookup mempool in case already allocated */
2658 	struct rte_mempool *mp = rte_mempool_lookup(name);
2659 
2660 	if (mp != NULL) {
2661 		priv = (struct rte_crypto_op_pool_private *)
2662 				rte_mempool_get_priv(mp);
2663 
2664 		if (mp->elt_size != elt_size ||
2665 				mp->cache_size < cache_size ||
2666 				mp->size < nb_elts ||
2667 				priv->priv_size <  priv_size) {
2668 			mp = NULL;
2669 			CDEV_LOG_ERR("Mempool %s already exists but with "
2670 					"incompatible parameters", name);
2671 			return NULL;
2672 		}
2673 		return mp;
2674 	}
2675 
2676 	mp = rte_mempool_create(
2677 			name,
2678 			nb_elts,
2679 			elt_size,
2680 			cache_size,
2681 			sizeof(struct rte_crypto_op_pool_private),
2682 			NULL,
2683 			NULL,
2684 			rte_crypto_op_init,
2685 			&type,
2686 			socket_id,
2687 			0);
2688 
2689 	if (mp == NULL) {
2690 		CDEV_LOG_ERR("Failed to create mempool %s", name);
2691 		return NULL;
2692 	}
2693 
2694 	priv = (struct rte_crypto_op_pool_private *)
2695 			rte_mempool_get_priv(mp);
2696 
2697 	priv->priv_size = priv_size;
2698 	priv->type = type;
2699 
2700 	rte_cryptodev_trace_op_pool_create(name, socket_id, type, nb_elts, mp);
2701 	return mp;
2702 }
2703 
2704 int
2705 rte_cryptodev_pmd_create_dev_name(char *name, const char *dev_name_prefix)
2706 {
2707 	struct rte_cryptodev *dev = NULL;
2708 	uint32_t i = 0;
2709 
2710 	if (name == NULL)
2711 		return -EINVAL;
2712 
2713 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
2714 		int ret = snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
2715 				"%s_%u", dev_name_prefix, i);
2716 
2717 		if (ret < 0)
2718 			return ret;
2719 
2720 		dev = rte_cryptodev_pmd_get_named_dev(name);
2721 		if (!dev)
2722 			return 0;
2723 	}
2724 
2725 	return -1;
2726 }
2727 
2728 TAILQ_HEAD(cryptodev_driver_list, cryptodev_driver);
2729 
2730 static struct cryptodev_driver_list cryptodev_driver_list =
2731 	TAILQ_HEAD_INITIALIZER(cryptodev_driver_list);
2732 
2733 int
2734 rte_cryptodev_driver_id_get(const char *name)
2735 {
2736 	struct cryptodev_driver *driver;
2737 	const char *driver_name;
2738 	int driver_id = -1;
2739 
2740 	if (name == NULL) {
2741 		CDEV_LOG_DEBUG("name pointer NULL");
2742 		return -1;
2743 	}
2744 
2745 	TAILQ_FOREACH(driver, &cryptodev_driver_list, next) {
2746 		driver_name = driver->driver->name;
2747 		if (strncmp(driver_name, name, strlen(driver_name) + 1) == 0) {
2748 			driver_id = driver->id;
2749 			break;
2750 		}
2751 	}
2752 
2753 	rte_cryptodev_trace_driver_id_get(name, driver_id);
2754 
2755 	return driver_id;
2756 }
2757 
2758 const char *
2759 rte_cryptodev_name_get(uint8_t dev_id)
2760 {
2761 	struct rte_cryptodev *dev;
2762 
2763 	if (!rte_cryptodev_is_valid_device_data(dev_id)) {
2764 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
2765 		return NULL;
2766 	}
2767 
2768 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2769 	if (dev == NULL)
2770 		return NULL;
2771 
2772 	rte_cryptodev_trace_name_get(dev_id, dev->data->name);
2773 
2774 	return dev->data->name;
2775 }
2776 
2777 const char *
2778 rte_cryptodev_driver_name_get(uint8_t driver_id)
2779 {
2780 	struct cryptodev_driver *driver;
2781 
2782 	TAILQ_FOREACH(driver, &cryptodev_driver_list, next) {
2783 		if (driver->id == driver_id) {
2784 			rte_cryptodev_trace_driver_name_get(driver_id,
2785 				driver->driver->name);
2786 			return driver->driver->name;
2787 		}
2788 	}
2789 	return NULL;
2790 }
2791 
2792 uint8_t
2793 rte_cryptodev_allocate_driver(struct cryptodev_driver *crypto_drv,
2794 		const struct rte_driver *drv)
2795 {
2796 	crypto_drv->driver = drv;
2797 	crypto_drv->id = nb_drivers;
2798 
2799 	TAILQ_INSERT_TAIL(&cryptodev_driver_list, crypto_drv, next);
2800 
2801 	rte_cryptodev_trace_allocate_driver(drv->name);
2802 
2803 	return nb_drivers++;
2804 }
2805 
2806 RTE_INIT(cryptodev_init_fp_ops)
2807 {
2808 	uint32_t i;
2809 
2810 	for (i = 0; i != RTE_DIM(rte_crypto_fp_ops); i++)
2811 		cryptodev_fp_ops_reset(rte_crypto_fp_ops + i);
2812 }
2813 
2814 static int
2815 cryptodev_handle_dev_list(const char *cmd __rte_unused,
2816 		const char *params __rte_unused,
2817 		struct rte_tel_data *d)
2818 {
2819 	int dev_id;
2820 
2821 	if (rte_cryptodev_count() < 1)
2822 		return -EINVAL;
2823 
2824 	rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
2825 	for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++)
2826 		if (rte_cryptodev_is_valid_dev(dev_id))
2827 			rte_tel_data_add_array_int(d, dev_id);
2828 
2829 	return 0;
2830 }
2831 
2832 static int
2833 cryptodev_handle_dev_info(const char *cmd __rte_unused,
2834 		const char *params, struct rte_tel_data *d)
2835 {
2836 	struct rte_cryptodev_info cryptodev_info;
2837 	int dev_id;
2838 	char *end_param;
2839 
2840 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
2841 		return -EINVAL;
2842 
2843 	dev_id = strtoul(params, &end_param, 0);
2844 	if (*end_param != '\0')
2845 		CDEV_LOG_ERR("Extra parameters passed to command, ignoring");
2846 	if (!rte_cryptodev_is_valid_dev(dev_id))
2847 		return -EINVAL;
2848 
2849 	rte_cryptodev_info_get(dev_id, &cryptodev_info);
2850 
2851 	rte_tel_data_start_dict(d);
2852 	rte_tel_data_add_dict_string(d, "device_name",
2853 		cryptodev_info.device->name);
2854 	rte_tel_data_add_dict_uint(d, "max_nb_queue_pairs",
2855 		cryptodev_info.max_nb_queue_pairs);
2856 
2857 	return 0;
2858 }
2859 
2860 #define ADD_DICT_STAT(s) rte_tel_data_add_dict_uint(d, #s, cryptodev_stats.s)
2861 
2862 static int
2863 cryptodev_handle_dev_stats(const char *cmd __rte_unused,
2864 		const char *params,
2865 		struct rte_tel_data *d)
2866 {
2867 	struct rte_cryptodev_stats cryptodev_stats;
2868 	int dev_id, ret;
2869 	char *end_param;
2870 
2871 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
2872 		return -EINVAL;
2873 
2874 	dev_id = strtoul(params, &end_param, 0);
2875 	if (*end_param != '\0')
2876 		CDEV_LOG_ERR("Extra parameters passed to command, ignoring");
2877 	if (!rte_cryptodev_is_valid_dev(dev_id))
2878 		return -EINVAL;
2879 
2880 	ret = rte_cryptodev_stats_get(dev_id, &cryptodev_stats);
2881 	if (ret < 0)
2882 		return ret;
2883 
2884 	rte_tel_data_start_dict(d);
2885 	ADD_DICT_STAT(enqueued_count);
2886 	ADD_DICT_STAT(dequeued_count);
2887 	ADD_DICT_STAT(enqueue_err_count);
2888 	ADD_DICT_STAT(dequeue_err_count);
2889 
2890 	return 0;
2891 }
2892 
2893 #define CRYPTO_CAPS_SZ                                             \
2894 	(RTE_ALIGN_CEIL(sizeof(struct rte_cryptodev_capabilities), \
2895 					sizeof(uint64_t)) /        \
2896 	 sizeof(uint64_t))
2897 
2898 static int
2899 crypto_caps_array(struct rte_tel_data *d,
2900 		  const struct rte_cryptodev_capabilities *capabilities)
2901 {
2902 	const struct rte_cryptodev_capabilities *dev_caps;
2903 	uint64_t caps_val[CRYPTO_CAPS_SZ];
2904 	unsigned int i = 0, j;
2905 
2906 	rte_tel_data_start_array(d, RTE_TEL_UINT_VAL);
2907 
2908 	while ((dev_caps = &capabilities[i++])->op !=
2909 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
2910 		memset(&caps_val, 0, CRYPTO_CAPS_SZ * sizeof(caps_val[0]));
2911 		rte_memcpy(caps_val, dev_caps, sizeof(capabilities[0]));
2912 		for (j = 0; j < CRYPTO_CAPS_SZ; j++)
2913 			rte_tel_data_add_array_uint(d, caps_val[j]);
2914 	}
2915 
2916 	return i;
2917 }
2918 
2919 static int
2920 cryptodev_handle_dev_caps(const char *cmd __rte_unused, const char *params,
2921 			  struct rte_tel_data *d)
2922 {
2923 	struct rte_cryptodev_info dev_info;
2924 	struct rte_tel_data *crypto_caps;
2925 	int crypto_caps_n;
2926 	char *end_param;
2927 	int dev_id;
2928 
2929 	if (!params || strlen(params) == 0 || !isdigit(*params))
2930 		return -EINVAL;
2931 
2932 	dev_id = strtoul(params, &end_param, 0);
2933 	if (*end_param != '\0')
2934 		CDEV_LOG_ERR("Extra parameters passed to command, ignoring");
2935 	if (!rte_cryptodev_is_valid_dev(dev_id))
2936 		return -EINVAL;
2937 
2938 	rte_tel_data_start_dict(d);
2939 	crypto_caps = rte_tel_data_alloc();
2940 	if (!crypto_caps)
2941 		return -ENOMEM;
2942 
2943 	rte_cryptodev_info_get(dev_id, &dev_info);
2944 	crypto_caps_n = crypto_caps_array(crypto_caps, dev_info.capabilities);
2945 	rte_tel_data_add_dict_container(d, "crypto_caps", crypto_caps, 0);
2946 	rte_tel_data_add_dict_int(d, "crypto_caps_n", crypto_caps_n);
2947 
2948 	return 0;
2949 }
2950 
2951 RTE_INIT(cryptodev_init_telemetry)
2952 {
2953 	rte_telemetry_register_cmd("/cryptodev/info", cryptodev_handle_dev_info,
2954 			"Returns information for a cryptodev. Parameters: int dev_id");
2955 	rte_telemetry_register_cmd("/cryptodev/list",
2956 			cryptodev_handle_dev_list,
2957 			"Returns list of available crypto devices by IDs. No parameters.");
2958 	rte_telemetry_register_cmd("/cryptodev/stats",
2959 			cryptodev_handle_dev_stats,
2960 			"Returns the stats for a cryptodev. Parameters: int dev_id");
2961 	rte_telemetry_register_cmd("/cryptodev/caps",
2962 			cryptodev_handle_dev_caps,
2963 			"Returns the capabilities for a cryptodev. Parameters: int dev_id");
2964 }
2965