xref: /dpdk/lib/cryptodev/rte_cryptodev.c (revision 4acc862b18a2f1691d1561f7b75542f6a056d41f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020 Intel Corporation
3  */
4 
5 #include <sys/queue.h>
6 #include <ctype.h>
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <string.h>
10 #include <errno.h>
11 #include <stdint.h>
12 #include <inttypes.h>
13 
14 #include <rte_log.h>
15 #include <rte_debug.h>
16 #include <dev_driver.h>
17 #include <rte_memory.h>
18 #include <rte_memcpy.h>
19 #include <rte_memzone.h>
20 #include <rte_eal.h>
21 #include <rte_common.h>
22 #include <rte_mempool.h>
23 #include <rte_malloc.h>
24 #include <rte_errno.h>
25 #include <rte_spinlock.h>
26 #include <rte_string_fns.h>
27 #include <rte_telemetry.h>
28 
29 #include "rte_crypto.h"
30 #include "rte_cryptodev.h"
31 #include "cryptodev_pmd.h"
32 #include "cryptodev_trace.h"
33 
34 static uint8_t nb_drivers;
35 
36 static struct rte_cryptodev rte_crypto_devices[RTE_CRYPTO_MAX_DEVS];
37 
38 struct rte_cryptodev *rte_cryptodevs = rte_crypto_devices;
39 
40 static struct rte_cryptodev_global cryptodev_globals = {
41 		.devs			= rte_crypto_devices,
42 		.data			= { NULL },
43 		.nb_devs		= 0
44 };
45 
46 /* Public fastpath APIs. */
47 struct rte_crypto_fp_ops rte_crypto_fp_ops[RTE_CRYPTO_MAX_DEVS];
48 
49 /* spinlock for crypto device callbacks */
50 static rte_spinlock_t rte_cryptodev_cb_lock = RTE_SPINLOCK_INITIALIZER;
51 
52 RTE_LOG_REGISTER_DEFAULT(rte_cryptodev_logtype, INFO);
53 
54 /**
55  * The user application callback description.
56  *
57  * It contains callback address to be registered by user application,
58  * the pointer to the parameters for callback, and the event type.
59  */
60 struct rte_cryptodev_callback {
61 	TAILQ_ENTRY(rte_cryptodev_callback) next; /**< Callbacks list */
62 	rte_cryptodev_cb_fn cb_fn;		/**< Callback address */
63 	void *cb_arg;				/**< Parameter for callback */
64 	enum rte_cryptodev_event_type event;	/**< Interrupt event type */
65 	uint32_t active;			/**< Callback is executing */
66 };
67 
68 /**
69  * The crypto cipher algorithm strings identifiers.
70  * Not to be used in application directly.
71  * Application can use rte_cryptodev_get_cipher_algo_string().
72  */
73 static const char *
74 crypto_cipher_algorithm_strings[] = {
75 	[RTE_CRYPTO_CIPHER_3DES_CBC]	= "3des-cbc",
76 	[RTE_CRYPTO_CIPHER_3DES_ECB]	= "3des-ecb",
77 	[RTE_CRYPTO_CIPHER_3DES_CTR]	= "3des-ctr",
78 
79 	[RTE_CRYPTO_CIPHER_AES_CBC]	= "aes-cbc",
80 	[RTE_CRYPTO_CIPHER_AES_CTR]	= "aes-ctr",
81 	[RTE_CRYPTO_CIPHER_AES_DOCSISBPI]	= "aes-docsisbpi",
82 	[RTE_CRYPTO_CIPHER_AES_ECB]	= "aes-ecb",
83 	[RTE_CRYPTO_CIPHER_AES_F8]	= "aes-f8",
84 	[RTE_CRYPTO_CIPHER_AES_XTS]	= "aes-xts",
85 
86 	[RTE_CRYPTO_CIPHER_ARC4]	= "arc4",
87 
88 	[RTE_CRYPTO_CIPHER_DES_CBC]     = "des-cbc",
89 	[RTE_CRYPTO_CIPHER_DES_DOCSISBPI]	= "des-docsisbpi",
90 
91 	[RTE_CRYPTO_CIPHER_NULL]	= "null",
92 
93 	[RTE_CRYPTO_CIPHER_KASUMI_F8]	= "kasumi-f8",
94 	[RTE_CRYPTO_CIPHER_SNOW3G_UEA2]	= "snow3g-uea2",
95 	[RTE_CRYPTO_CIPHER_ZUC_EEA3]	= "zuc-eea3",
96 	[RTE_CRYPTO_CIPHER_SM4_ECB]	= "sm4-ecb",
97 	[RTE_CRYPTO_CIPHER_SM4_CBC]	= "sm4-cbc",
98 	[RTE_CRYPTO_CIPHER_SM4_CTR]	= "sm4-ctr",
99 	[RTE_CRYPTO_CIPHER_SM4_CFB]	= "sm4-cfb",
100 	[RTE_CRYPTO_CIPHER_SM4_OFB]	= "sm4-ofb",
101 	[RTE_CRYPTO_CIPHER_SM4_XTS]	= "sm4-xts"
102 };
103 
104 /**
105  * The crypto cipher operation strings identifiers.
106  * It could be used in application command line.
107  */
108 const char *
109 rte_crypto_cipher_operation_strings[] = {
110 		[RTE_CRYPTO_CIPHER_OP_ENCRYPT]	= "encrypt",
111 		[RTE_CRYPTO_CIPHER_OP_DECRYPT]	= "decrypt"
112 };
113 
114 /**
115  * The crypto auth algorithm strings identifiers.
116  * Not to be used in application directly.
117  * Application can use rte_cryptodev_get_auth_algo_string().
118  */
119 static const char *
120 crypto_auth_algorithm_strings[] = {
121 	[RTE_CRYPTO_AUTH_AES_CBC_MAC]	= "aes-cbc-mac",
122 	[RTE_CRYPTO_AUTH_AES_CMAC]	= "aes-cmac",
123 	[RTE_CRYPTO_AUTH_AES_GMAC]	= "aes-gmac",
124 	[RTE_CRYPTO_AUTH_AES_XCBC_MAC]	= "aes-xcbc-mac",
125 
126 	[RTE_CRYPTO_AUTH_MD5]		= "md5",
127 	[RTE_CRYPTO_AUTH_MD5_HMAC]	= "md5-hmac",
128 
129 	[RTE_CRYPTO_AUTH_NULL]		= "null",
130 
131 	[RTE_CRYPTO_AUTH_SHA1]		= "sha1",
132 	[RTE_CRYPTO_AUTH_SHA1_HMAC]	= "sha1-hmac",
133 
134 	[RTE_CRYPTO_AUTH_SHA224]	= "sha2-224",
135 	[RTE_CRYPTO_AUTH_SHA224_HMAC]	= "sha2-224-hmac",
136 	[RTE_CRYPTO_AUTH_SHA256]	= "sha2-256",
137 	[RTE_CRYPTO_AUTH_SHA256_HMAC]	= "sha2-256-hmac",
138 	[RTE_CRYPTO_AUTH_SHA384]	= "sha2-384",
139 	[RTE_CRYPTO_AUTH_SHA384_HMAC]	= "sha2-384-hmac",
140 	[RTE_CRYPTO_AUTH_SHA512]	= "sha2-512",
141 	[RTE_CRYPTO_AUTH_SHA512_HMAC]	= "sha2-512-hmac",
142 
143 	[RTE_CRYPTO_AUTH_SHA3_224]	= "sha3-224",
144 	[RTE_CRYPTO_AUTH_SHA3_224_HMAC] = "sha3-224-hmac",
145 	[RTE_CRYPTO_AUTH_SHA3_256]	= "sha3-256",
146 	[RTE_CRYPTO_AUTH_SHA3_256_HMAC] = "sha3-256-hmac",
147 	[RTE_CRYPTO_AUTH_SHA3_384]	= "sha3-384",
148 	[RTE_CRYPTO_AUTH_SHA3_384_HMAC] = "sha3-384-hmac",
149 	[RTE_CRYPTO_AUTH_SHA3_512]	= "sha3-512",
150 	[RTE_CRYPTO_AUTH_SHA3_512_HMAC]	= "sha3-512-hmac",
151 
152 	[RTE_CRYPTO_AUTH_KASUMI_F9]	= "kasumi-f9",
153 	[RTE_CRYPTO_AUTH_SNOW3G_UIA2]	= "snow3g-uia2",
154 	[RTE_CRYPTO_AUTH_ZUC_EIA3]	= "zuc-eia3",
155 	[RTE_CRYPTO_AUTH_SM3]		= "sm3",
156 	[RTE_CRYPTO_AUTH_SM3_HMAC]	= "sm3-hmac",
157 
158 	[RTE_CRYPTO_AUTH_SHAKE_128]	 = "shake-128",
159 	[RTE_CRYPTO_AUTH_SHAKE_256]	 = "shake-256",
160 };
161 
162 /**
163  * The crypto AEAD algorithm strings identifiers.
164  * Not to be used in application directly.
165  * Application can use rte_cryptodev_get_aead_algo_string().
166  */
167 static const char *
168 crypto_aead_algorithm_strings[] = {
169 	[RTE_CRYPTO_AEAD_AES_CCM]	= "aes-ccm",
170 	[RTE_CRYPTO_AEAD_AES_GCM]	= "aes-gcm",
171 	[RTE_CRYPTO_AEAD_CHACHA20_POLY1305] = "chacha20-poly1305"
172 };
173 
174 
175 /**
176  * The crypto AEAD operation strings identifiers.
177  * It could be used in application command line.
178  */
179 const char *
180 rte_crypto_aead_operation_strings[] = {
181 	[RTE_CRYPTO_AEAD_OP_ENCRYPT]	= "encrypt",
182 	[RTE_CRYPTO_AEAD_OP_DECRYPT]	= "decrypt"
183 };
184 
185 /**
186  * Asymmetric crypto transform operation strings identifiers.
187  * Not to be used in application directly.
188  * Application can use rte_cryptodev_asym_get_xform_string().
189  */
190 static const char *
191 crypto_asym_xform_strings[] = {
192 	[RTE_CRYPTO_ASYM_XFORM_NONE]	= "none",
193 	[RTE_CRYPTO_ASYM_XFORM_RSA]	= "rsa",
194 	[RTE_CRYPTO_ASYM_XFORM_MODEX]	= "modexp",
195 	[RTE_CRYPTO_ASYM_XFORM_MODINV]	= "modinv",
196 	[RTE_CRYPTO_ASYM_XFORM_DH]	= "dh",
197 	[RTE_CRYPTO_ASYM_XFORM_DSA]	= "dsa",
198 	[RTE_CRYPTO_ASYM_XFORM_ECDSA]	= "ecdsa",
199 	[RTE_CRYPTO_ASYM_XFORM_ECPM]	= "ecpm",
200 	[RTE_CRYPTO_ASYM_XFORM_SM2]	= "sm2",
201 };
202 
203 /**
204  * Asymmetric crypto operation strings identifiers.
205  */
206 const char *rte_crypto_asym_op_strings[] = {
207 	[RTE_CRYPTO_ASYM_OP_ENCRYPT]	= "encrypt",
208 	[RTE_CRYPTO_ASYM_OP_DECRYPT]	= "decrypt",
209 	[RTE_CRYPTO_ASYM_OP_SIGN]	= "sign",
210 	[RTE_CRYPTO_ASYM_OP_VERIFY]	= "verify"
211 };
212 
213 /**
214  * Asymmetric crypto key exchange operation strings identifiers.
215  */
216 const char *rte_crypto_asym_ke_strings[] = {
217 	[RTE_CRYPTO_ASYM_KE_PRIV_KEY_GENERATE] = "priv_key_generate",
218 	[RTE_CRYPTO_ASYM_KE_PUB_KEY_GENERATE] = "pub_key_generate",
219 	[RTE_CRYPTO_ASYM_KE_SHARED_SECRET_COMPUTE] = "sharedsecret_compute",
220 	[RTE_CRYPTO_ASYM_KE_PUB_KEY_VERIFY] = "pub_ec_key_verify"
221 };
222 
223 struct rte_cryptodev_sym_session_pool_private_data {
224 	uint16_t sess_data_sz;
225 	/**< driver session data size */
226 	uint16_t user_data_sz;
227 	/**< session user data will be placed after sess_data */
228 };
229 
230 /**
231  * The private data structure stored in the asym session mempool private data.
232  */
233 struct rte_cryptodev_asym_session_pool_private_data {
234 	uint16_t max_priv_session_sz;
235 	/**< Size of private session data used when creating mempool */
236 	uint16_t user_data_sz;
237 	/**< Session user data will be placed after sess_private_data */
238 };
239 
240 int
241 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum,
242 		const char *algo_string)
243 {
244 	unsigned int i;
245 	int ret = -1;	/* Invalid string */
246 
247 	for (i = 1; i < RTE_DIM(crypto_cipher_algorithm_strings); i++) {
248 		if (strcmp(algo_string, crypto_cipher_algorithm_strings[i]) == 0) {
249 			*algo_enum = (enum rte_crypto_cipher_algorithm) i;
250 			ret = 0;
251 			break;
252 		}
253 	}
254 
255 	rte_cryptodev_trace_get_cipher_algo_enum(algo_string, *algo_enum, ret);
256 
257 	return ret;
258 }
259 
260 int
261 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum,
262 		const char *algo_string)
263 {
264 	unsigned int i;
265 	int ret = -1;	/* Invalid string */
266 
267 	for (i = 1; i < RTE_DIM(crypto_auth_algorithm_strings); i++) {
268 		if (strcmp(algo_string, crypto_auth_algorithm_strings[i]) == 0) {
269 			*algo_enum = (enum rte_crypto_auth_algorithm) i;
270 			ret = 0;
271 			break;
272 		}
273 	}
274 
275 	rte_cryptodev_trace_get_auth_algo_enum(algo_string, *algo_enum, ret);
276 
277 	return ret;
278 }
279 
280 int
281 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum,
282 		const char *algo_string)
283 {
284 	unsigned int i;
285 	int ret = -1;	/* Invalid string */
286 
287 	for (i = 1; i < RTE_DIM(crypto_aead_algorithm_strings); i++) {
288 		if (strcmp(algo_string, crypto_aead_algorithm_strings[i]) == 0) {
289 			*algo_enum = (enum rte_crypto_aead_algorithm) i;
290 			ret = 0;
291 			break;
292 		}
293 	}
294 
295 	rte_cryptodev_trace_get_aead_algo_enum(algo_string, *algo_enum, ret);
296 
297 	return ret;
298 }
299 
300 int
301 rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum,
302 		const char *xform_string)
303 {
304 	unsigned int i;
305 	int ret = -1;	/* Invalid string */
306 
307 	for (i = 1; i < RTE_DIM(crypto_asym_xform_strings); i++) {
308 		if (strcmp(xform_string,
309 			crypto_asym_xform_strings[i]) == 0) {
310 			*xform_enum = (enum rte_crypto_asym_xform_type) i;
311 			ret = 0;
312 			break;
313 		}
314 	}
315 
316 	rte_cryptodev_trace_asym_get_xform_enum(xform_string, *xform_enum, ret);
317 
318 	return ret;
319 }
320 
321 const char *
322 rte_cryptodev_get_cipher_algo_string(enum rte_crypto_cipher_algorithm algo_enum)
323 {
324 	const char *alg_str = NULL;
325 
326 	if ((unsigned int)algo_enum < RTE_DIM(crypto_cipher_algorithm_strings))
327 		alg_str = crypto_cipher_algorithm_strings[algo_enum];
328 
329 	rte_cryptodev_trace_get_cipher_algo_string(algo_enum, alg_str);
330 
331 	return alg_str;
332 }
333 
334 const char *
335 rte_cryptodev_get_auth_algo_string(enum rte_crypto_auth_algorithm algo_enum)
336 {
337 	const char *alg_str = NULL;
338 
339 	if ((unsigned int)algo_enum < RTE_DIM(crypto_auth_algorithm_strings))
340 		alg_str = crypto_auth_algorithm_strings[algo_enum];
341 
342 	rte_cryptodev_trace_get_auth_algo_string(algo_enum, alg_str);
343 
344 	return alg_str;
345 }
346 
347 const char *
348 rte_cryptodev_get_aead_algo_string(enum rte_crypto_aead_algorithm algo_enum)
349 {
350 	const char *alg_str = NULL;
351 
352 	if ((unsigned int)algo_enum < RTE_DIM(crypto_aead_algorithm_strings))
353 		alg_str = crypto_aead_algorithm_strings[algo_enum];
354 
355 	rte_cryptodev_trace_get_aead_algo_string(algo_enum, alg_str);
356 
357 	return alg_str;
358 }
359 
360 const char *
361 rte_cryptodev_asym_get_xform_string(enum rte_crypto_asym_xform_type xform_enum)
362 {
363 	const char *xform_str = NULL;
364 
365 	if ((unsigned int)xform_enum < RTE_DIM(crypto_asym_xform_strings))
366 		xform_str = crypto_asym_xform_strings[xform_enum];
367 
368 	rte_cryptodev_trace_asym_get_xform_string(xform_enum, xform_str);
369 
370 	return xform_str;
371 }
372 
373 /**
374  * The crypto auth operation strings identifiers.
375  * It could be used in application command line.
376  */
377 const char *
378 rte_crypto_auth_operation_strings[] = {
379 		[RTE_CRYPTO_AUTH_OP_VERIFY]	= "verify",
380 		[RTE_CRYPTO_AUTH_OP_GENERATE]	= "generate"
381 };
382 
383 const struct rte_cryptodev_symmetric_capability *
384 rte_cryptodev_sym_capability_get(uint8_t dev_id,
385 		const struct rte_cryptodev_sym_capability_idx *idx)
386 {
387 	const struct rte_cryptodev_capabilities *capability;
388 	const struct rte_cryptodev_symmetric_capability *sym_capability = NULL;
389 	struct rte_cryptodev_info dev_info;
390 	int i = 0;
391 
392 	rte_cryptodev_info_get(dev_id, &dev_info);
393 
394 	while ((capability = &dev_info.capabilities[i++])->op !=
395 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
396 		if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
397 			continue;
398 
399 		if (capability->sym.xform_type != idx->type)
400 			continue;
401 
402 		if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
403 			capability->sym.auth.algo == idx->algo.auth) {
404 			sym_capability = &capability->sym;
405 			break;
406 		}
407 
408 		if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
409 			capability->sym.cipher.algo == idx->algo.cipher) {
410 			sym_capability = &capability->sym;
411 			break;
412 		}
413 
414 		if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
415 				capability->sym.aead.algo == idx->algo.aead) {
416 			sym_capability = &capability->sym;
417 			break;
418 		}
419 	}
420 
421 	rte_cryptodev_trace_sym_capability_get(dev_id, dev_info.driver_name,
422 		dev_info.driver_id, idx->type, sym_capability);
423 
424 	return sym_capability;
425 }
426 
427 static int
428 param_range_check(uint16_t size, const struct rte_crypto_param_range *range)
429 {
430 	unsigned int next_size;
431 
432 	/* Check lower/upper bounds */
433 	if (size < range->min)
434 		return -1;
435 
436 	if (size > range->max)
437 		return -1;
438 
439 	/* If range is actually only one value, size is correct */
440 	if (range->increment == 0)
441 		return 0;
442 
443 	/* Check if value is one of the supported sizes */
444 	for (next_size = range->min; next_size <= range->max;
445 			next_size += range->increment)
446 		if (size == next_size)
447 			return 0;
448 
449 	return -1;
450 }
451 
452 const struct rte_cryptodev_asymmetric_xform_capability *
453 rte_cryptodev_asym_capability_get(uint8_t dev_id,
454 		const struct rte_cryptodev_asym_capability_idx *idx)
455 {
456 	const struct rte_cryptodev_capabilities *capability;
457 	const struct rte_cryptodev_asymmetric_xform_capability *asym_cap = NULL;
458 	struct rte_cryptodev_info dev_info;
459 	unsigned int i = 0;
460 
461 	memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
462 	rte_cryptodev_info_get(dev_id, &dev_info);
463 
464 	while ((capability = &dev_info.capabilities[i++])->op !=
465 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
466 		if (capability->op != RTE_CRYPTO_OP_TYPE_ASYMMETRIC)
467 			continue;
468 
469 		if (capability->asym.xform_capa.xform_type == idx->type) {
470 			asym_cap = &capability->asym.xform_capa;
471 			break;
472 		}
473 	}
474 
475 	rte_cryptodev_trace_asym_capability_get(dev_info.driver_name,
476 		dev_info.driver_id, idx->type, asym_cap);
477 
478 	return asym_cap;
479 };
480 
481 int
482 rte_cryptodev_sym_capability_check_cipher(
483 		const struct rte_cryptodev_symmetric_capability *capability,
484 		uint16_t key_size, uint16_t iv_size)
485 {
486 	int ret = 0; /* success */
487 
488 	if (param_range_check(key_size, &capability->cipher.key_size) != 0) {
489 		ret = -1;
490 		goto done;
491 	}
492 
493 	if (param_range_check(iv_size, &capability->cipher.iv_size) != 0)
494 		ret = -1;
495 
496 done:
497 	rte_cryptodev_trace_sym_capability_check_cipher(capability, key_size,
498 		iv_size, ret);
499 
500 	return ret;
501 }
502 
503 int
504 rte_cryptodev_sym_capability_check_auth(
505 		const struct rte_cryptodev_symmetric_capability *capability,
506 		uint16_t key_size, uint16_t digest_size, uint16_t iv_size)
507 {
508 	int ret = 0; /* success */
509 
510 	if (param_range_check(key_size, &capability->auth.key_size) != 0) {
511 		ret = -1;
512 		goto done;
513 	}
514 
515 	if (param_range_check(digest_size,
516 		&capability->auth.digest_size) != 0) {
517 		ret = -1;
518 		goto done;
519 	}
520 
521 	if (param_range_check(iv_size, &capability->auth.iv_size) != 0)
522 		ret = -1;
523 
524 done:
525 	rte_cryptodev_trace_sym_capability_check_auth(capability, key_size,
526 		digest_size, iv_size, ret);
527 
528 	return ret;
529 }
530 
531 int
532 rte_cryptodev_sym_capability_check_aead(
533 		const struct rte_cryptodev_symmetric_capability *capability,
534 		uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
535 		uint16_t iv_size)
536 {
537 	int ret = 0; /* success */
538 
539 	if (param_range_check(key_size, &capability->aead.key_size) != 0) {
540 		ret = -1;
541 		goto done;
542 	}
543 
544 	if (param_range_check(digest_size,
545 		&capability->aead.digest_size) != 0) {
546 		ret = -1;
547 		goto done;
548 	}
549 
550 	if (param_range_check(aad_size, &capability->aead.aad_size) != 0) {
551 		ret = -1;
552 		goto done;
553 	}
554 
555 	if (param_range_check(iv_size, &capability->aead.iv_size) != 0)
556 		ret = -1;
557 
558 done:
559 	rte_cryptodev_trace_sym_capability_check_aead(capability, key_size,
560 		digest_size, aad_size, iv_size, ret);
561 
562 	return ret;
563 }
564 
565 int
566 rte_cryptodev_asym_xform_capability_check_optype(
567 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
568 	enum rte_crypto_asym_op_type op_type)
569 {
570 	int ret = 0;
571 
572 	if (capability->op_types & (1 << op_type))
573 		ret = 1;
574 
575 	rte_cryptodev_trace_asym_xform_capability_check_optype(
576 		capability->op_types, op_type, ret);
577 
578 	return ret;
579 }
580 
581 int
582 rte_cryptodev_asym_xform_capability_check_modlen(
583 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
584 	uint16_t modlen)
585 {
586 	int ret = 0; /* success */
587 
588 	/* no need to check for limits, if min or max = 0 */
589 	if (capability->modlen.min != 0) {
590 		if (modlen < capability->modlen.min) {
591 			ret = -1;
592 			goto done;
593 		}
594 	}
595 
596 	if (capability->modlen.max != 0) {
597 		if (modlen > capability->modlen.max) {
598 			ret = -1;
599 			goto done;
600 		}
601 	}
602 
603 	/* in any case, check if given modlen is module increment */
604 	if (capability->modlen.increment != 0) {
605 		if (modlen % (capability->modlen.increment))
606 			ret = -1;
607 	}
608 
609 done:
610 	rte_cryptodev_trace_asym_xform_capability_check_modlen(capability,
611 		modlen, ret);
612 
613 	return ret;
614 }
615 
616 bool
617 rte_cryptodev_asym_xform_capability_check_hash(
618 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
619 	enum rte_crypto_auth_algorithm hash)
620 {
621 	bool ret = false;
622 
623 	if (capability->hash_algos & (1 << hash))
624 		ret = true;
625 
626 	rte_cryptodev_trace_asym_xform_capability_check_hash(
627 		capability->hash_algos, hash, ret);
628 
629 	return ret;
630 }
631 
632 int
633 rte_cryptodev_asym_xform_capability_check_opcap(
634 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
635 	enum rte_crypto_asym_op_type op_type, uint8_t cap)
636 {
637 	int ret = 0;
638 
639 	if (!(capability->op_types & (1 << op_type)))
640 		return ret;
641 
642 	if (capability->op_capa[op_type] & (1 << cap))
643 		ret = 1;
644 
645 	return ret;
646 }
647 
648 /* spinlock for crypto device enq callbacks */
649 static rte_spinlock_t rte_cryptodev_callback_lock = RTE_SPINLOCK_INITIALIZER;
650 
651 static void
652 cryptodev_cb_cleanup(struct rte_cryptodev *dev)
653 {
654 	struct rte_cryptodev_cb_rcu *list;
655 	struct rte_cryptodev_cb *cb, *next;
656 	uint16_t qp_id;
657 
658 	if (dev->enq_cbs == NULL && dev->deq_cbs == NULL)
659 		return;
660 
661 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
662 		list = &dev->enq_cbs[qp_id];
663 		cb = list->next;
664 		while (cb != NULL) {
665 			next = cb->next;
666 			rte_free(cb);
667 			cb = next;
668 		}
669 
670 		rte_free(list->qsbr);
671 	}
672 
673 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
674 		list = &dev->deq_cbs[qp_id];
675 		cb = list->next;
676 		while (cb != NULL) {
677 			next = cb->next;
678 			rte_free(cb);
679 			cb = next;
680 		}
681 
682 		rte_free(list->qsbr);
683 	}
684 
685 	rte_free(dev->enq_cbs);
686 	dev->enq_cbs = NULL;
687 	rte_free(dev->deq_cbs);
688 	dev->deq_cbs = NULL;
689 }
690 
691 static int
692 cryptodev_cb_init(struct rte_cryptodev *dev)
693 {
694 	struct rte_cryptodev_cb_rcu *list;
695 	struct rte_rcu_qsbr *qsbr;
696 	uint16_t qp_id;
697 	size_t size;
698 
699 	/* Max thread set to 1, as one DP thread accessing a queue-pair */
700 	const uint32_t max_threads = 1;
701 
702 	dev->enq_cbs = rte_zmalloc(NULL,
703 				   sizeof(struct rte_cryptodev_cb_rcu) *
704 				   dev->data->nb_queue_pairs, 0);
705 	if (dev->enq_cbs == NULL) {
706 		CDEV_LOG_ERR("Failed to allocate memory for enq callbacks");
707 		return -ENOMEM;
708 	}
709 
710 	dev->deq_cbs = rte_zmalloc(NULL,
711 				   sizeof(struct rte_cryptodev_cb_rcu) *
712 				   dev->data->nb_queue_pairs, 0);
713 	if (dev->deq_cbs == NULL) {
714 		CDEV_LOG_ERR("Failed to allocate memory for deq callbacks");
715 		rte_free(dev->enq_cbs);
716 		return -ENOMEM;
717 	}
718 
719 	/* Create RCU QSBR variable */
720 	size = rte_rcu_qsbr_get_memsize(max_threads);
721 
722 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
723 		list = &dev->enq_cbs[qp_id];
724 		qsbr = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
725 		if (qsbr == NULL) {
726 			CDEV_LOG_ERR("Failed to allocate memory for RCU on "
727 				"queue_pair_id=%d", qp_id);
728 			goto cb_init_err;
729 		}
730 
731 		if (rte_rcu_qsbr_init(qsbr, max_threads)) {
732 			CDEV_LOG_ERR("Failed to initialize for RCU on "
733 				"queue_pair_id=%d", qp_id);
734 			goto cb_init_err;
735 		}
736 
737 		list->qsbr = qsbr;
738 	}
739 
740 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
741 		list = &dev->deq_cbs[qp_id];
742 		qsbr = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
743 		if (qsbr == NULL) {
744 			CDEV_LOG_ERR("Failed to allocate memory for RCU on "
745 				"queue_pair_id=%d", qp_id);
746 			goto cb_init_err;
747 		}
748 
749 		if (rte_rcu_qsbr_init(qsbr, max_threads)) {
750 			CDEV_LOG_ERR("Failed to initialize for RCU on "
751 				"queue_pair_id=%d", qp_id);
752 			goto cb_init_err;
753 		}
754 
755 		list->qsbr = qsbr;
756 	}
757 
758 	return 0;
759 
760 cb_init_err:
761 	cryptodev_cb_cleanup(dev);
762 	return -ENOMEM;
763 }
764 
765 const char *
766 rte_cryptodev_get_feature_name(uint64_t flag)
767 {
768 	rte_cryptodev_trace_get_feature_name(flag);
769 
770 	switch (flag) {
771 	case RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO:
772 		return "SYMMETRIC_CRYPTO";
773 	case RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO:
774 		return "ASYMMETRIC_CRYPTO";
775 	case RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING:
776 		return "SYM_OPERATION_CHAINING";
777 	case RTE_CRYPTODEV_FF_CPU_SSE:
778 		return "CPU_SSE";
779 	case RTE_CRYPTODEV_FF_CPU_AVX:
780 		return "CPU_AVX";
781 	case RTE_CRYPTODEV_FF_CPU_AVX2:
782 		return "CPU_AVX2";
783 	case RTE_CRYPTODEV_FF_CPU_AVX512:
784 		return "CPU_AVX512";
785 	case RTE_CRYPTODEV_FF_CPU_AESNI:
786 		return "CPU_AESNI";
787 	case RTE_CRYPTODEV_FF_HW_ACCELERATED:
788 		return "HW_ACCELERATED";
789 	case RTE_CRYPTODEV_FF_IN_PLACE_SGL:
790 		return "IN_PLACE_SGL";
791 	case RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT:
792 		return "OOP_SGL_IN_SGL_OUT";
793 	case RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT:
794 		return "OOP_SGL_IN_LB_OUT";
795 	case RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT:
796 		return "OOP_LB_IN_SGL_OUT";
797 	case RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT:
798 		return "OOP_LB_IN_LB_OUT";
799 	case RTE_CRYPTODEV_FF_CPU_NEON:
800 		return "CPU_NEON";
801 	case RTE_CRYPTODEV_FF_CPU_ARM_CE:
802 		return "CPU_ARM_CE";
803 	case RTE_CRYPTODEV_FF_SECURITY:
804 		return "SECURITY_PROTOCOL";
805 	case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP:
806 		return "RSA_PRIV_OP_KEY_EXP";
807 	case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT:
808 		return "RSA_PRIV_OP_KEY_QT";
809 	case RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED:
810 		return "DIGEST_ENCRYPTED";
811 	case RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO:
812 		return "SYM_CPU_CRYPTO";
813 	case RTE_CRYPTODEV_FF_ASYM_SESSIONLESS:
814 		return "ASYM_SESSIONLESS";
815 	case RTE_CRYPTODEV_FF_SYM_SESSIONLESS:
816 		return "SYM_SESSIONLESS";
817 	case RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA:
818 		return "NON_BYTE_ALIGNED_DATA";
819 	case RTE_CRYPTODEV_FF_CIPHER_MULTIPLE_DATA_UNITS:
820 		return "CIPHER_MULTIPLE_DATA_UNITS";
821 	case RTE_CRYPTODEV_FF_CIPHER_WRAPPED_KEY:
822 		return "CIPHER_WRAPPED_KEY";
823 	default:
824 		return NULL;
825 	}
826 }
827 
828 struct rte_cryptodev *
829 rte_cryptodev_pmd_get_dev(uint8_t dev_id)
830 {
831 	return &cryptodev_globals.devs[dev_id];
832 }
833 
834 struct rte_cryptodev *
835 rte_cryptodev_pmd_get_named_dev(const char *name)
836 {
837 	struct rte_cryptodev *dev;
838 	unsigned int i;
839 
840 	if (name == NULL)
841 		return NULL;
842 
843 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
844 		dev = &cryptodev_globals.devs[i];
845 
846 		if ((dev->attached == RTE_CRYPTODEV_ATTACHED) &&
847 				(strcmp(dev->data->name, name) == 0))
848 			return dev;
849 	}
850 
851 	return NULL;
852 }
853 
854 static inline uint8_t
855 rte_cryptodev_is_valid_device_data(uint8_t dev_id)
856 {
857 	if (dev_id >= RTE_CRYPTO_MAX_DEVS ||
858 			rte_crypto_devices[dev_id].data == NULL)
859 		return 0;
860 
861 	return 1;
862 }
863 
864 unsigned int
865 rte_cryptodev_is_valid_dev(uint8_t dev_id)
866 {
867 	struct rte_cryptodev *dev = NULL;
868 	unsigned int ret = 1;
869 
870 	if (!rte_cryptodev_is_valid_device_data(dev_id)) {
871 		ret = 0;
872 		goto done;
873 	}
874 
875 	dev = rte_cryptodev_pmd_get_dev(dev_id);
876 	if (dev->attached != RTE_CRYPTODEV_ATTACHED)
877 		ret = 0;
878 
879 done:
880 	rte_cryptodev_trace_is_valid_dev(dev_id, ret);
881 
882 	return ret;
883 }
884 
885 int
886 rte_cryptodev_get_dev_id(const char *name)
887 {
888 	unsigned i;
889 	int ret = -1;
890 
891 	if (name == NULL)
892 		return -1;
893 
894 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
895 		if (!rte_cryptodev_is_valid_device_data(i))
896 			continue;
897 		if ((strcmp(cryptodev_globals.devs[i].data->name, name)
898 				== 0) &&
899 				(cryptodev_globals.devs[i].attached ==
900 						RTE_CRYPTODEV_ATTACHED)) {
901 			ret = (int)i;
902 			break;
903 		}
904 	}
905 
906 	rte_cryptodev_trace_get_dev_id(name, ret);
907 
908 	return ret;
909 }
910 
911 uint8_t
912 rte_cryptodev_count(void)
913 {
914 	rte_cryptodev_trace_count(cryptodev_globals.nb_devs);
915 
916 	return cryptodev_globals.nb_devs;
917 }
918 
919 uint8_t
920 rte_cryptodev_device_count_by_driver(uint8_t driver_id)
921 {
922 	uint8_t i, dev_count = 0;
923 
924 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++)
925 		if (cryptodev_globals.devs[i].driver_id == driver_id &&
926 			cryptodev_globals.devs[i].attached ==
927 					RTE_CRYPTODEV_ATTACHED)
928 			dev_count++;
929 
930 	rte_cryptodev_trace_device_count_by_driver(driver_id, dev_count);
931 
932 	return dev_count;
933 }
934 
935 uint8_t
936 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
937 	uint8_t nb_devices)
938 {
939 	uint8_t i, count = 0;
940 	struct rte_cryptodev *devs = cryptodev_globals.devs;
941 
942 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS && count < nb_devices; i++) {
943 		if (!rte_cryptodev_is_valid_device_data(i))
944 			continue;
945 
946 		if (devs[i].attached == RTE_CRYPTODEV_ATTACHED) {
947 			int cmp;
948 
949 			cmp = strncmp(devs[i].device->driver->name,
950 					driver_name,
951 					strlen(driver_name) + 1);
952 
953 			if (cmp == 0)
954 				devices[count++] = devs[i].data->dev_id;
955 		}
956 	}
957 
958 	rte_cryptodev_trace_devices_get(driver_name, count);
959 
960 	return count;
961 }
962 
963 void *
964 rte_cryptodev_get_sec_ctx(uint8_t dev_id)
965 {
966 	void *sec_ctx = NULL;
967 
968 	if (dev_id < RTE_CRYPTO_MAX_DEVS &&
969 			(rte_crypto_devices[dev_id].feature_flags &
970 			RTE_CRYPTODEV_FF_SECURITY))
971 		sec_ctx = rte_crypto_devices[dev_id].security_ctx;
972 
973 	rte_cryptodev_trace_get_sec_ctx(dev_id, sec_ctx);
974 
975 	return sec_ctx;
976 }
977 
978 int
979 rte_cryptodev_socket_id(uint8_t dev_id)
980 {
981 	struct rte_cryptodev *dev;
982 
983 	if (!rte_cryptodev_is_valid_dev(dev_id))
984 		return -1;
985 
986 	dev = rte_cryptodev_pmd_get_dev(dev_id);
987 
988 	rte_cryptodev_trace_socket_id(dev_id, dev->data->name,
989 		dev->data->socket_id);
990 	return dev->data->socket_id;
991 }
992 
993 static inline int
994 rte_cryptodev_data_alloc(uint8_t dev_id, struct rte_cryptodev_data **data,
995 		int socket_id)
996 {
997 	char mz_name[RTE_MEMZONE_NAMESIZE];
998 	const struct rte_memzone *mz;
999 	int n;
1000 
1001 	/* generate memzone name */
1002 	n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
1003 	if (n >= (int)sizeof(mz_name))
1004 		return -EINVAL;
1005 
1006 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1007 		mz = rte_memzone_reserve(mz_name,
1008 				sizeof(struct rte_cryptodev_data),
1009 				socket_id, 0);
1010 		CDEV_LOG_DEBUG("PRIMARY:reserved memzone for %s (%p)",
1011 				mz_name, mz);
1012 	} else {
1013 		mz = rte_memzone_lookup(mz_name);
1014 		CDEV_LOG_DEBUG("SECONDARY:looked up memzone for %s (%p)",
1015 				mz_name, mz);
1016 	}
1017 
1018 	if (mz == NULL)
1019 		return -ENOMEM;
1020 
1021 	*data = mz->addr;
1022 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1023 		memset(*data, 0, sizeof(struct rte_cryptodev_data));
1024 
1025 	return 0;
1026 }
1027 
1028 static inline int
1029 rte_cryptodev_data_free(uint8_t dev_id, struct rte_cryptodev_data **data)
1030 {
1031 	char mz_name[RTE_MEMZONE_NAMESIZE];
1032 	const struct rte_memzone *mz;
1033 	int n;
1034 
1035 	/* generate memzone name */
1036 	n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
1037 	if (n >= (int)sizeof(mz_name))
1038 		return -EINVAL;
1039 
1040 	mz = rte_memzone_lookup(mz_name);
1041 	if (mz == NULL)
1042 		return -ENOMEM;
1043 
1044 	RTE_ASSERT(*data == mz->addr);
1045 	*data = NULL;
1046 
1047 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1048 		CDEV_LOG_DEBUG("PRIMARY:free memzone of %s (%p)",
1049 				mz_name, mz);
1050 		return rte_memzone_free(mz);
1051 	} else {
1052 		CDEV_LOG_DEBUG("SECONDARY:don't free memzone of %s (%p)",
1053 				mz_name, mz);
1054 	}
1055 
1056 	return 0;
1057 }
1058 
1059 static uint8_t
1060 rte_cryptodev_find_free_device_index(void)
1061 {
1062 	uint8_t dev_id;
1063 
1064 	for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++) {
1065 		if (rte_crypto_devices[dev_id].attached ==
1066 				RTE_CRYPTODEV_DETACHED)
1067 			return dev_id;
1068 	}
1069 	return RTE_CRYPTO_MAX_DEVS;
1070 }
1071 
1072 struct rte_cryptodev *
1073 rte_cryptodev_pmd_allocate(const char *name, int socket_id)
1074 {
1075 	struct rte_cryptodev *cryptodev;
1076 	uint8_t dev_id;
1077 
1078 	if (rte_cryptodev_pmd_get_named_dev(name) != NULL) {
1079 		CDEV_LOG_ERR("Crypto device with name %s already "
1080 				"allocated!", name);
1081 		return NULL;
1082 	}
1083 
1084 	dev_id = rte_cryptodev_find_free_device_index();
1085 	if (dev_id == RTE_CRYPTO_MAX_DEVS) {
1086 		CDEV_LOG_ERR("Reached maximum number of crypto devices");
1087 		return NULL;
1088 	}
1089 
1090 	cryptodev = rte_cryptodev_pmd_get_dev(dev_id);
1091 
1092 	if (cryptodev->data == NULL) {
1093 		struct rte_cryptodev_data **cryptodev_data =
1094 				&cryptodev_globals.data[dev_id];
1095 
1096 		int retval = rte_cryptodev_data_alloc(dev_id, cryptodev_data,
1097 				socket_id);
1098 
1099 		if (retval < 0 || *cryptodev_data == NULL)
1100 			return NULL;
1101 
1102 		cryptodev->data = *cryptodev_data;
1103 
1104 		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1105 			strlcpy(cryptodev->data->name, name,
1106 				RTE_CRYPTODEV_NAME_MAX_LEN);
1107 
1108 			cryptodev->data->dev_id = dev_id;
1109 			cryptodev->data->socket_id = socket_id;
1110 			cryptodev->data->dev_started = 0;
1111 			CDEV_LOG_DEBUG("PRIMARY:init data");
1112 		}
1113 
1114 		CDEV_LOG_DEBUG("Data for %s: dev_id %d, socket %d, started %d",
1115 				cryptodev->data->name,
1116 				cryptodev->data->dev_id,
1117 				cryptodev->data->socket_id,
1118 				cryptodev->data->dev_started);
1119 
1120 		/* init user callbacks */
1121 		TAILQ_INIT(&(cryptodev->link_intr_cbs));
1122 
1123 		cryptodev->attached = RTE_CRYPTODEV_ATTACHED;
1124 
1125 		cryptodev_globals.nb_devs++;
1126 	}
1127 
1128 	return cryptodev;
1129 }
1130 
1131 int
1132 rte_cryptodev_pmd_release_device(struct rte_cryptodev *cryptodev)
1133 {
1134 	int ret;
1135 	uint8_t dev_id;
1136 
1137 	if (cryptodev == NULL)
1138 		return -EINVAL;
1139 
1140 	dev_id = cryptodev->data->dev_id;
1141 
1142 	cryptodev_fp_ops_reset(rte_crypto_fp_ops + dev_id);
1143 
1144 	/* Close device only if device operations have been set */
1145 	if (cryptodev->dev_ops) {
1146 		ret = rte_cryptodev_close(dev_id);
1147 		if (ret < 0)
1148 			return ret;
1149 	}
1150 
1151 	ret = rte_cryptodev_data_free(dev_id, &cryptodev_globals.data[dev_id]);
1152 	if (ret < 0)
1153 		return ret;
1154 
1155 	cryptodev->attached = RTE_CRYPTODEV_DETACHED;
1156 	cryptodev_globals.nb_devs--;
1157 	return 0;
1158 }
1159 
1160 uint16_t
1161 rte_cryptodev_queue_pair_count(uint8_t dev_id)
1162 {
1163 	struct rte_cryptodev *dev;
1164 
1165 	if (!rte_cryptodev_is_valid_device_data(dev_id)) {
1166 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1167 		return 0;
1168 	}
1169 
1170 	dev = &rte_crypto_devices[dev_id];
1171 	rte_cryptodev_trace_queue_pair_count(dev, dev->data->name,
1172 		dev->data->socket_id, dev->data->dev_id,
1173 		dev->data->nb_queue_pairs);
1174 
1175 	return dev->data->nb_queue_pairs;
1176 }
1177 
1178 static int
1179 rte_cryptodev_queue_pairs_config(struct rte_cryptodev *dev, uint16_t nb_qpairs,
1180 		int socket_id)
1181 {
1182 	struct rte_cryptodev_info dev_info;
1183 	void **qp;
1184 	unsigned i;
1185 
1186 	if ((dev == NULL) || (nb_qpairs < 1)) {
1187 		CDEV_LOG_ERR("invalid param: dev %p, nb_queues %u",
1188 							dev, nb_qpairs);
1189 		return -EINVAL;
1190 	}
1191 
1192 	CDEV_LOG_DEBUG("Setup %d queues pairs on device %u",
1193 			nb_qpairs, dev->data->dev_id);
1194 
1195 	memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
1196 
1197 	if (*dev->dev_ops->dev_infos_get == NULL)
1198 		return -ENOTSUP;
1199 	(*dev->dev_ops->dev_infos_get)(dev, &dev_info);
1200 
1201 	if (nb_qpairs > (dev_info.max_nb_queue_pairs)) {
1202 		CDEV_LOG_ERR("Invalid num queue_pairs (%u) for dev %u",
1203 				nb_qpairs, dev->data->dev_id);
1204 	    return -EINVAL;
1205 	}
1206 
1207 	if (dev->data->queue_pairs == NULL) { /* first time configuration */
1208 		dev->data->queue_pairs = rte_zmalloc_socket(
1209 				"cryptodev->queue_pairs",
1210 				sizeof(dev->data->queue_pairs[0]) *
1211 				dev_info.max_nb_queue_pairs,
1212 				RTE_CACHE_LINE_SIZE, socket_id);
1213 
1214 		if (dev->data->queue_pairs == NULL) {
1215 			dev->data->nb_queue_pairs = 0;
1216 			CDEV_LOG_ERR("failed to get memory for qp meta data, "
1217 							"nb_queues %u",
1218 							nb_qpairs);
1219 			return -(ENOMEM);
1220 		}
1221 	} else { /* re-configure */
1222 		int ret;
1223 		uint16_t old_nb_queues = dev->data->nb_queue_pairs;
1224 
1225 		qp = dev->data->queue_pairs;
1226 
1227 		if (*dev->dev_ops->queue_pair_release == NULL)
1228 			return -ENOTSUP;
1229 
1230 		for (i = nb_qpairs; i < old_nb_queues; i++) {
1231 			ret = (*dev->dev_ops->queue_pair_release)(dev, i);
1232 			if (ret < 0)
1233 				return ret;
1234 			qp[i] = NULL;
1235 		}
1236 
1237 	}
1238 	dev->data->nb_queue_pairs = nb_qpairs;
1239 	return 0;
1240 }
1241 
1242 int
1243 rte_cryptodev_queue_pair_reset(uint8_t dev_id, uint16_t queue_pair_id,
1244 		const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
1245 {
1246 	struct rte_cryptodev *dev;
1247 
1248 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1249 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1250 		return -EINVAL;
1251 	}
1252 
1253 	dev = &rte_crypto_devices[dev_id];
1254 	if (queue_pair_id >= dev->data->nb_queue_pairs) {
1255 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
1256 		return -EINVAL;
1257 	}
1258 
1259 	if (*dev->dev_ops->queue_pair_reset == NULL)
1260 		return -ENOTSUP;
1261 
1262 	rte_cryptodev_trace_queue_pair_reset(dev_id, queue_pair_id, qp_conf, socket_id);
1263 	return (*dev->dev_ops->queue_pair_reset)(dev, queue_pair_id, qp_conf, socket_id);
1264 }
1265 
1266 int
1267 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
1268 {
1269 	struct rte_cryptodev *dev;
1270 	int diag;
1271 
1272 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1273 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1274 		return -EINVAL;
1275 	}
1276 
1277 	dev = &rte_crypto_devices[dev_id];
1278 
1279 	if (dev->data->dev_started) {
1280 		CDEV_LOG_ERR(
1281 		    "device %d must be stopped to allow configuration", dev_id);
1282 		return -EBUSY;
1283 	}
1284 
1285 	if (*dev->dev_ops->dev_configure == NULL)
1286 		return -ENOTSUP;
1287 
1288 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1289 	cryptodev_cb_cleanup(dev);
1290 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1291 
1292 	/* Setup new number of queue pairs and reconfigure device. */
1293 	diag = rte_cryptodev_queue_pairs_config(dev, config->nb_queue_pairs,
1294 			config->socket_id);
1295 	if (diag != 0) {
1296 		CDEV_LOG_ERR("dev%d rte_crypto_dev_queue_pairs_config = %d",
1297 				dev_id, diag);
1298 		return diag;
1299 	}
1300 
1301 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1302 	diag = cryptodev_cb_init(dev);
1303 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1304 	if (diag) {
1305 		CDEV_LOG_ERR("Callback init failed for dev_id=%d", dev_id);
1306 		return diag;
1307 	}
1308 
1309 	rte_cryptodev_trace_configure(dev_id, config);
1310 	return (*dev->dev_ops->dev_configure)(dev, config);
1311 }
1312 
1313 int
1314 rte_cryptodev_start(uint8_t dev_id)
1315 {
1316 	struct rte_cryptodev *dev;
1317 	int diag;
1318 
1319 	CDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
1320 
1321 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1322 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1323 		return -EINVAL;
1324 	}
1325 
1326 	dev = &rte_crypto_devices[dev_id];
1327 
1328 	if (*dev->dev_ops->dev_start == NULL)
1329 		return -ENOTSUP;
1330 
1331 	if (dev->data->dev_started != 0) {
1332 		CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already started",
1333 			dev_id);
1334 		return 0;
1335 	}
1336 
1337 	diag = (*dev->dev_ops->dev_start)(dev);
1338 	/* expose selection of PMD fast-path functions */
1339 	cryptodev_fp_ops_set(rte_crypto_fp_ops + dev_id, dev);
1340 
1341 	rte_cryptodev_trace_start(dev_id, diag);
1342 	if (diag == 0)
1343 		dev->data->dev_started = 1;
1344 	else
1345 		return diag;
1346 
1347 	return 0;
1348 }
1349 
1350 void
1351 rte_cryptodev_stop(uint8_t dev_id)
1352 {
1353 	struct rte_cryptodev *dev;
1354 
1355 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1356 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1357 		return;
1358 	}
1359 
1360 	dev = &rte_crypto_devices[dev_id];
1361 
1362 	if (*dev->dev_ops->dev_stop == NULL)
1363 		return;
1364 
1365 	if (dev->data->dev_started == 0) {
1366 		CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already stopped",
1367 			dev_id);
1368 		return;
1369 	}
1370 
1371 	/* point fast-path functions to dummy ones */
1372 	cryptodev_fp_ops_reset(rte_crypto_fp_ops + dev_id);
1373 
1374 	(*dev->dev_ops->dev_stop)(dev);
1375 	rte_cryptodev_trace_stop(dev_id);
1376 	dev->data->dev_started = 0;
1377 }
1378 
1379 int
1380 rte_cryptodev_close(uint8_t dev_id)
1381 {
1382 	struct rte_cryptodev *dev;
1383 	int retval;
1384 
1385 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1386 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1387 		return -1;
1388 	}
1389 
1390 	dev = &rte_crypto_devices[dev_id];
1391 
1392 	/* Device must be stopped before it can be closed */
1393 	if (dev->data->dev_started == 1) {
1394 		CDEV_LOG_ERR("Device %u must be stopped before closing",
1395 				dev_id);
1396 		return -EBUSY;
1397 	}
1398 
1399 	/* We can't close the device if there are outstanding sessions in use */
1400 	if (dev->data->session_pool != NULL) {
1401 		if (!rte_mempool_full(dev->data->session_pool)) {
1402 			CDEV_LOG_ERR("dev_id=%u close failed, session mempool "
1403 					"has sessions still in use, free "
1404 					"all sessions before calling close",
1405 					(unsigned)dev_id);
1406 			return -EBUSY;
1407 		}
1408 	}
1409 
1410 	if (*dev->dev_ops->dev_close == NULL)
1411 		return -ENOTSUP;
1412 	retval = (*dev->dev_ops->dev_close)(dev);
1413 	rte_cryptodev_trace_close(dev_id, retval);
1414 
1415 	if (retval < 0)
1416 		return retval;
1417 
1418 	return 0;
1419 }
1420 
1421 int
1422 rte_cryptodev_get_qp_status(uint8_t dev_id, uint16_t queue_pair_id)
1423 {
1424 	struct rte_cryptodev *dev;
1425 	int ret = 0;
1426 
1427 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1428 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1429 		ret = -EINVAL;
1430 		goto done;
1431 	}
1432 
1433 	dev = &rte_crypto_devices[dev_id];
1434 	if (queue_pair_id >= dev->data->nb_queue_pairs) {
1435 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
1436 		ret = -EINVAL;
1437 		goto done;
1438 	}
1439 	void **qps = dev->data->queue_pairs;
1440 
1441 	if (qps[queue_pair_id])	{
1442 		CDEV_LOG_DEBUG("qp %d on dev %d is initialised",
1443 			queue_pair_id, dev_id);
1444 		ret = 1;
1445 		goto done;
1446 	}
1447 
1448 	CDEV_LOG_DEBUG("qp %d on dev %d is not initialised",
1449 		queue_pair_id, dev_id);
1450 
1451 done:
1452 	rte_cryptodev_trace_get_qp_status(dev_id, queue_pair_id, ret);
1453 
1454 	return ret;
1455 }
1456 
1457 static uint8_t
1458 rte_cryptodev_sym_is_valid_session_pool(struct rte_mempool *mp,
1459 	uint32_t sess_priv_size)
1460 {
1461 	struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1462 
1463 	if (!mp)
1464 		return 0;
1465 
1466 	pool_priv = rte_mempool_get_priv(mp);
1467 
1468 	if (!pool_priv || mp->private_data_size < sizeof(*pool_priv) ||
1469 			pool_priv->sess_data_sz < sess_priv_size)
1470 		return 0;
1471 
1472 	return 1;
1473 }
1474 
1475 int
1476 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
1477 		const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
1478 
1479 {
1480 	struct rte_cryptodev *dev;
1481 
1482 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1483 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1484 		return -EINVAL;
1485 	}
1486 
1487 	dev = &rte_crypto_devices[dev_id];
1488 	if (queue_pair_id >= dev->data->nb_queue_pairs) {
1489 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
1490 		return -EINVAL;
1491 	}
1492 
1493 	if (!qp_conf) {
1494 		CDEV_LOG_ERR("qp_conf cannot be NULL");
1495 		return -EINVAL;
1496 	}
1497 
1498 	if (qp_conf->mp_session) {
1499 		struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1500 
1501 		pool_priv = rte_mempool_get_priv(qp_conf->mp_session);
1502 		if (!pool_priv || qp_conf->mp_session->private_data_size <
1503 				sizeof(*pool_priv)) {
1504 			CDEV_LOG_ERR("Invalid mempool");
1505 			return -EINVAL;
1506 		}
1507 
1508 		if (!rte_cryptodev_sym_is_valid_session_pool(qp_conf->mp_session,
1509 					rte_cryptodev_sym_get_private_session_size(dev_id))) {
1510 			CDEV_LOG_ERR("Invalid mempool");
1511 			return -EINVAL;
1512 		}
1513 	}
1514 
1515 	if (dev->data->dev_started) {
1516 		CDEV_LOG_ERR(
1517 		    "device %d must be stopped to allow configuration", dev_id);
1518 		return -EBUSY;
1519 	}
1520 
1521 	if (*dev->dev_ops->queue_pair_setup == NULL)
1522 		return -ENOTSUP;
1523 
1524 	rte_cryptodev_trace_queue_pair_setup(dev_id, queue_pair_id, qp_conf);
1525 	return (*dev->dev_ops->queue_pair_setup)(dev, queue_pair_id, qp_conf,
1526 			socket_id);
1527 }
1528 
1529 struct rte_cryptodev_cb *
1530 rte_cryptodev_add_enq_callback(uint8_t dev_id,
1531 			       uint16_t qp_id,
1532 			       rte_cryptodev_callback_fn cb_fn,
1533 			       void *cb_arg)
1534 {
1535 #ifndef RTE_CRYPTO_CALLBACKS
1536 	rte_errno = ENOTSUP;
1537 	return NULL;
1538 #endif
1539 	struct rte_cryptodev *dev;
1540 	struct rte_cryptodev_cb_rcu *list;
1541 	struct rte_cryptodev_cb *cb, *tail;
1542 
1543 	if (!cb_fn) {
1544 		CDEV_LOG_ERR("Callback is NULL on dev_id=%d", dev_id);
1545 		rte_errno = EINVAL;
1546 		return NULL;
1547 	}
1548 
1549 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1550 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1551 		rte_errno = ENODEV;
1552 		return NULL;
1553 	}
1554 
1555 	dev = &rte_crypto_devices[dev_id];
1556 	if (qp_id >= dev->data->nb_queue_pairs) {
1557 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1558 		rte_errno = ENODEV;
1559 		return NULL;
1560 	}
1561 
1562 	cb = rte_zmalloc(NULL, sizeof(*cb), 0);
1563 	if (cb == NULL) {
1564 		CDEV_LOG_ERR("Failed to allocate memory for callback on "
1565 			     "dev=%d, queue_pair_id=%d", dev_id, qp_id);
1566 		rte_errno = ENOMEM;
1567 		return NULL;
1568 	}
1569 
1570 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1571 
1572 	cb->fn = cb_fn;
1573 	cb->arg = cb_arg;
1574 
1575 	/* Add the callbacks in fifo order. */
1576 	list = &dev->enq_cbs[qp_id];
1577 	tail = list->next;
1578 
1579 	if (tail) {
1580 		while (tail->next)
1581 			tail = tail->next;
1582 		/* Stores to cb->fn and cb->param should complete before
1583 		 * cb is visible to data plane.
1584 		 */
1585 		rte_atomic_store_explicit(&tail->next, cb, rte_memory_order_release);
1586 	} else {
1587 		/* Stores to cb->fn and cb->param should complete before
1588 		 * cb is visible to data plane.
1589 		 */
1590 		rte_atomic_store_explicit(&list->next, cb, rte_memory_order_release);
1591 	}
1592 
1593 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1594 
1595 	rte_cryptodev_trace_add_enq_callback(dev_id, qp_id, cb_fn);
1596 	return cb;
1597 }
1598 
1599 int
1600 rte_cryptodev_remove_enq_callback(uint8_t dev_id,
1601 				  uint16_t qp_id,
1602 				  struct rte_cryptodev_cb *cb)
1603 {
1604 #ifndef RTE_CRYPTO_CALLBACKS
1605 	return -ENOTSUP;
1606 #endif
1607 	struct rte_cryptodev *dev;
1608 	RTE_ATOMIC(struct rte_cryptodev_cb *) *prev_cb;
1609 	struct rte_cryptodev_cb *curr_cb;
1610 	struct rte_cryptodev_cb_rcu *list;
1611 	int ret;
1612 
1613 	ret = -EINVAL;
1614 
1615 	if (!cb) {
1616 		CDEV_LOG_ERR("Callback is NULL");
1617 		return -EINVAL;
1618 	}
1619 
1620 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1621 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1622 		return -ENODEV;
1623 	}
1624 
1625 	rte_cryptodev_trace_remove_enq_callback(dev_id, qp_id, cb->fn);
1626 
1627 	dev = &rte_crypto_devices[dev_id];
1628 	if (qp_id >= dev->data->nb_queue_pairs) {
1629 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1630 		return -ENODEV;
1631 	}
1632 
1633 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1634 	if (dev->enq_cbs == NULL) {
1635 		CDEV_LOG_ERR("Callback not initialized");
1636 		goto cb_err;
1637 	}
1638 
1639 	list = &dev->enq_cbs[qp_id];
1640 	if (list == NULL) {
1641 		CDEV_LOG_ERR("Callback list is NULL");
1642 		goto cb_err;
1643 	}
1644 
1645 	if (list->qsbr == NULL) {
1646 		CDEV_LOG_ERR("Rcu qsbr is NULL");
1647 		goto cb_err;
1648 	}
1649 
1650 	prev_cb = &list->next;
1651 	for (; *prev_cb != NULL; prev_cb = &curr_cb->next) {
1652 		curr_cb = *prev_cb;
1653 		if (curr_cb == cb) {
1654 			/* Remove the user cb from the callback list. */
1655 			rte_atomic_store_explicit(prev_cb, curr_cb->next,
1656 				rte_memory_order_relaxed);
1657 			ret = 0;
1658 			break;
1659 		}
1660 	}
1661 
1662 	if (!ret) {
1663 		/* Call sync with invalid thread id as this is part of
1664 		 * control plane API
1665 		 */
1666 		rte_rcu_qsbr_synchronize(list->qsbr, RTE_QSBR_THRID_INVALID);
1667 		rte_free(cb);
1668 	}
1669 
1670 cb_err:
1671 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1672 	return ret;
1673 }
1674 
1675 struct rte_cryptodev_cb *
1676 rte_cryptodev_add_deq_callback(uint8_t dev_id,
1677 			       uint16_t qp_id,
1678 			       rte_cryptodev_callback_fn cb_fn,
1679 			       void *cb_arg)
1680 {
1681 #ifndef RTE_CRYPTO_CALLBACKS
1682 	rte_errno = ENOTSUP;
1683 	return NULL;
1684 #endif
1685 	struct rte_cryptodev *dev;
1686 	struct rte_cryptodev_cb_rcu *list;
1687 	struct rte_cryptodev_cb *cb, *tail;
1688 
1689 	if (!cb_fn) {
1690 		CDEV_LOG_ERR("Callback is NULL on dev_id=%d", dev_id);
1691 		rte_errno = EINVAL;
1692 		return NULL;
1693 	}
1694 
1695 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1696 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1697 		rte_errno = ENODEV;
1698 		return NULL;
1699 	}
1700 
1701 	dev = &rte_crypto_devices[dev_id];
1702 	if (qp_id >= dev->data->nb_queue_pairs) {
1703 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1704 		rte_errno = ENODEV;
1705 		return NULL;
1706 	}
1707 
1708 	cb = rte_zmalloc(NULL, sizeof(*cb), 0);
1709 	if (cb == NULL) {
1710 		CDEV_LOG_ERR("Failed to allocate memory for callback on "
1711 			     "dev=%d, queue_pair_id=%d", dev_id, qp_id);
1712 		rte_errno = ENOMEM;
1713 		return NULL;
1714 	}
1715 
1716 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1717 
1718 	cb->fn = cb_fn;
1719 	cb->arg = cb_arg;
1720 
1721 	/* Add the callbacks in fifo order. */
1722 	list = &dev->deq_cbs[qp_id];
1723 	tail = list->next;
1724 
1725 	if (tail) {
1726 		while (tail->next)
1727 			tail = tail->next;
1728 		/* Stores to cb->fn and cb->param should complete before
1729 		 * cb is visible to data plane.
1730 		 */
1731 		rte_atomic_store_explicit(&tail->next, cb, rte_memory_order_release);
1732 	} else {
1733 		/* Stores to cb->fn and cb->param should complete before
1734 		 * cb is visible to data plane.
1735 		 */
1736 		rte_atomic_store_explicit(&list->next, cb, rte_memory_order_release);
1737 	}
1738 
1739 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1740 
1741 	rte_cryptodev_trace_add_deq_callback(dev_id, qp_id, cb_fn);
1742 
1743 	return cb;
1744 }
1745 
1746 int
1747 rte_cryptodev_remove_deq_callback(uint8_t dev_id,
1748 				  uint16_t qp_id,
1749 				  struct rte_cryptodev_cb *cb)
1750 {
1751 #ifndef RTE_CRYPTO_CALLBACKS
1752 	return -ENOTSUP;
1753 #endif
1754 	struct rte_cryptodev *dev;
1755 	RTE_ATOMIC(struct rte_cryptodev_cb *) *prev_cb;
1756 	struct rte_cryptodev_cb *curr_cb;
1757 	struct rte_cryptodev_cb_rcu *list;
1758 	int ret;
1759 
1760 	ret = -EINVAL;
1761 
1762 	if (!cb) {
1763 		CDEV_LOG_ERR("Callback is NULL");
1764 		return -EINVAL;
1765 	}
1766 
1767 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1768 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1769 		return -ENODEV;
1770 	}
1771 
1772 	rte_cryptodev_trace_remove_deq_callback(dev_id, qp_id, cb->fn);
1773 
1774 	dev = &rte_crypto_devices[dev_id];
1775 	if (qp_id >= dev->data->nb_queue_pairs) {
1776 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1777 		return -ENODEV;
1778 	}
1779 
1780 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1781 	if (dev->enq_cbs == NULL) {
1782 		CDEV_LOG_ERR("Callback not initialized");
1783 		goto cb_err;
1784 	}
1785 
1786 	list = &dev->deq_cbs[qp_id];
1787 	if (list == NULL) {
1788 		CDEV_LOG_ERR("Callback list is NULL");
1789 		goto cb_err;
1790 	}
1791 
1792 	if (list->qsbr == NULL) {
1793 		CDEV_LOG_ERR("Rcu qsbr is NULL");
1794 		goto cb_err;
1795 	}
1796 
1797 	prev_cb = &list->next;
1798 	for (; *prev_cb != NULL; prev_cb = &curr_cb->next) {
1799 		curr_cb = *prev_cb;
1800 		if (curr_cb == cb) {
1801 			/* Remove the user cb from the callback list. */
1802 			rte_atomic_store_explicit(prev_cb, curr_cb->next,
1803 				rte_memory_order_relaxed);
1804 			ret = 0;
1805 			break;
1806 		}
1807 	}
1808 
1809 	if (!ret) {
1810 		/* Call sync with invalid thread id as this is part of
1811 		 * control plane API
1812 		 */
1813 		rte_rcu_qsbr_synchronize(list->qsbr, RTE_QSBR_THRID_INVALID);
1814 		rte_free(cb);
1815 	}
1816 
1817 cb_err:
1818 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1819 	return ret;
1820 }
1821 
1822 int
1823 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats)
1824 {
1825 	struct rte_cryptodev *dev;
1826 
1827 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1828 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1829 		return -ENODEV;
1830 	}
1831 
1832 	if (stats == NULL) {
1833 		CDEV_LOG_ERR("Invalid stats ptr");
1834 		return -EINVAL;
1835 	}
1836 
1837 	dev = &rte_crypto_devices[dev_id];
1838 	memset(stats, 0, sizeof(*stats));
1839 
1840 	if (*dev->dev_ops->stats_get == NULL)
1841 		return -ENOTSUP;
1842 	(*dev->dev_ops->stats_get)(dev, stats);
1843 
1844 	rte_cryptodev_trace_stats_get(dev_id, stats);
1845 	return 0;
1846 }
1847 
1848 void
1849 rte_cryptodev_stats_reset(uint8_t dev_id)
1850 {
1851 	struct rte_cryptodev *dev;
1852 
1853 	rte_cryptodev_trace_stats_reset(dev_id);
1854 
1855 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1856 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1857 		return;
1858 	}
1859 
1860 	dev = &rte_crypto_devices[dev_id];
1861 
1862 	if (*dev->dev_ops->stats_reset == NULL)
1863 		return;
1864 	(*dev->dev_ops->stats_reset)(dev);
1865 }
1866 
1867 void
1868 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
1869 {
1870 	struct rte_cryptodev *dev;
1871 
1872 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1873 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1874 		return;
1875 	}
1876 
1877 	dev = &rte_crypto_devices[dev_id];
1878 
1879 	memset(dev_info, 0, sizeof(struct rte_cryptodev_info));
1880 
1881 	if (*dev->dev_ops->dev_infos_get == NULL)
1882 		return;
1883 	(*dev->dev_ops->dev_infos_get)(dev, dev_info);
1884 
1885 	dev_info->driver_name = dev->device->driver->name;
1886 	dev_info->device = dev->device;
1887 
1888 	rte_cryptodev_trace_info_get(dev_id, dev_info->driver_name);
1889 
1890 }
1891 
1892 int
1893 rte_cryptodev_callback_register(uint8_t dev_id,
1894 			enum rte_cryptodev_event_type event,
1895 			rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1896 {
1897 	struct rte_cryptodev *dev;
1898 	struct rte_cryptodev_callback *user_cb;
1899 
1900 	if (!cb_fn)
1901 		return -EINVAL;
1902 
1903 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1904 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1905 		return -EINVAL;
1906 	}
1907 
1908 	dev = &rte_crypto_devices[dev_id];
1909 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
1910 
1911 	TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
1912 		if (user_cb->cb_fn == cb_fn &&
1913 			user_cb->cb_arg == cb_arg &&
1914 			user_cb->event == event) {
1915 			break;
1916 		}
1917 	}
1918 
1919 	/* create a new callback. */
1920 	if (user_cb == NULL) {
1921 		user_cb = rte_zmalloc("INTR_USER_CALLBACK",
1922 				sizeof(struct rte_cryptodev_callback), 0);
1923 		if (user_cb != NULL) {
1924 			user_cb->cb_fn = cb_fn;
1925 			user_cb->cb_arg = cb_arg;
1926 			user_cb->event = event;
1927 			TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
1928 		}
1929 	}
1930 
1931 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1932 
1933 	rte_cryptodev_trace_callback_register(dev_id, event, cb_fn);
1934 	return (user_cb == NULL) ? -ENOMEM : 0;
1935 }
1936 
1937 int
1938 rte_cryptodev_callback_unregister(uint8_t dev_id,
1939 			enum rte_cryptodev_event_type event,
1940 			rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1941 {
1942 	int ret;
1943 	struct rte_cryptodev *dev;
1944 	struct rte_cryptodev_callback *cb, *next;
1945 
1946 	if (!cb_fn)
1947 		return -EINVAL;
1948 
1949 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1950 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1951 		return -EINVAL;
1952 	}
1953 
1954 	dev = &rte_crypto_devices[dev_id];
1955 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
1956 
1957 	ret = 0;
1958 	for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
1959 
1960 		next = TAILQ_NEXT(cb, next);
1961 
1962 		if (cb->cb_fn != cb_fn || cb->event != event ||
1963 				(cb->cb_arg != (void *)-1 &&
1964 				cb->cb_arg != cb_arg))
1965 			continue;
1966 
1967 		/*
1968 		 * if this callback is not executing right now,
1969 		 * then remove it.
1970 		 */
1971 		if (cb->active == 0) {
1972 			TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
1973 			rte_free(cb);
1974 		} else {
1975 			ret = -EAGAIN;
1976 		}
1977 	}
1978 
1979 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1980 
1981 	rte_cryptodev_trace_callback_unregister(dev_id, event, cb_fn);
1982 	return ret;
1983 }
1984 
1985 void
1986 rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev,
1987 	enum rte_cryptodev_event_type event)
1988 {
1989 	struct rte_cryptodev_callback *cb_lst;
1990 	struct rte_cryptodev_callback dev_cb;
1991 
1992 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
1993 	TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
1994 		if (cb_lst->cb_fn == NULL || cb_lst->event != event)
1995 			continue;
1996 		dev_cb = *cb_lst;
1997 		cb_lst->active = 1;
1998 		rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1999 		dev_cb.cb_fn(dev->data->dev_id, dev_cb.event,
2000 						dev_cb.cb_arg);
2001 		rte_spinlock_lock(&rte_cryptodev_cb_lock);
2002 		cb_lst->active = 0;
2003 	}
2004 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
2005 }
2006 
2007 int
2008 rte_cryptodev_queue_pair_event_error_query(uint8_t dev_id, uint16_t qp_id)
2009 {
2010 	struct rte_cryptodev *dev;
2011 
2012 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
2013 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
2014 		return -EINVAL;
2015 	}
2016 	dev = &rte_crypto_devices[dev_id];
2017 
2018 	if (qp_id >= dev->data->nb_queue_pairs)
2019 		return -EINVAL;
2020 	if (*dev->dev_ops->queue_pair_event_error_query == NULL)
2021 		return -ENOTSUP;
2022 
2023 	return dev->dev_ops->queue_pair_event_error_query(dev, qp_id);
2024 }
2025 
2026 struct rte_mempool *
2027 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
2028 	uint32_t elt_size, uint32_t cache_size, uint16_t user_data_size,
2029 	int socket_id)
2030 {
2031 	struct rte_mempool *mp;
2032 	struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
2033 	uint32_t obj_sz;
2034 
2035 	obj_sz = sizeof(struct rte_cryptodev_sym_session) + elt_size + user_data_size;
2036 
2037 	obj_sz = RTE_ALIGN_CEIL(obj_sz, RTE_CACHE_LINE_SIZE);
2038 	mp = rte_mempool_create(name, nb_elts, obj_sz, cache_size,
2039 			(uint32_t)(sizeof(*pool_priv)), NULL, NULL,
2040 			NULL, NULL,
2041 			socket_id, 0);
2042 	if (mp == NULL) {
2043 		CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d",
2044 			__func__, name, rte_errno);
2045 		return NULL;
2046 	}
2047 
2048 	pool_priv = rte_mempool_get_priv(mp);
2049 	if (!pool_priv) {
2050 		CDEV_LOG_ERR("%s(name=%s) failed to get private data",
2051 			__func__, name);
2052 		rte_mempool_free(mp);
2053 		return NULL;
2054 	}
2055 
2056 	pool_priv->sess_data_sz = elt_size;
2057 	pool_priv->user_data_sz = user_data_size;
2058 
2059 	rte_cryptodev_trace_sym_session_pool_create(name, nb_elts,
2060 		elt_size, cache_size, user_data_size, mp);
2061 	return mp;
2062 }
2063 
2064 struct rte_mempool *
2065 rte_cryptodev_asym_session_pool_create(const char *name, uint32_t nb_elts,
2066 	uint32_t cache_size, uint16_t user_data_size, int socket_id)
2067 {
2068 	struct rte_mempool *mp;
2069 	struct rte_cryptodev_asym_session_pool_private_data *pool_priv;
2070 	uint32_t obj_sz, obj_sz_aligned;
2071 	uint8_t dev_id;
2072 	unsigned int priv_sz, max_priv_sz = 0;
2073 
2074 	for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++)
2075 		if (rte_cryptodev_is_valid_dev(dev_id)) {
2076 			priv_sz = rte_cryptodev_asym_get_private_session_size(dev_id);
2077 			if (priv_sz > max_priv_sz)
2078 				max_priv_sz = priv_sz;
2079 		}
2080 	if (max_priv_sz == 0) {
2081 		CDEV_LOG_INFO("Could not set max private session size");
2082 		return NULL;
2083 	}
2084 
2085 	obj_sz = rte_cryptodev_asym_get_header_session_size() + max_priv_sz +
2086 			user_data_size;
2087 	obj_sz_aligned =  RTE_ALIGN_CEIL(obj_sz, RTE_CACHE_LINE_SIZE);
2088 
2089 	mp = rte_mempool_create(name, nb_elts, obj_sz_aligned, cache_size,
2090 			(uint32_t)(sizeof(*pool_priv)),
2091 			NULL, NULL, NULL, NULL,
2092 			socket_id, 0);
2093 	if (mp == NULL) {
2094 		CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d",
2095 			__func__, name, rte_errno);
2096 		return NULL;
2097 	}
2098 
2099 	pool_priv = rte_mempool_get_priv(mp);
2100 	if (!pool_priv) {
2101 		CDEV_LOG_ERR("%s(name=%s) failed to get private data",
2102 			__func__, name);
2103 		rte_mempool_free(mp);
2104 		return NULL;
2105 	}
2106 	pool_priv->max_priv_session_sz = max_priv_sz;
2107 	pool_priv->user_data_sz = user_data_size;
2108 
2109 	rte_cryptodev_trace_asym_session_pool_create(name, nb_elts,
2110 		user_data_size, cache_size, mp);
2111 	return mp;
2112 }
2113 
2114 void *
2115 rte_cryptodev_sym_session_create(uint8_t dev_id,
2116 		struct rte_crypto_sym_xform *xforms,
2117 		struct rte_mempool *mp)
2118 {
2119 	struct rte_cryptodev *dev;
2120 	struct rte_cryptodev_sym_session *sess;
2121 	struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
2122 	uint32_t sess_priv_sz;
2123 	int ret;
2124 
2125 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
2126 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
2127 		rte_errno = EINVAL;
2128 		return NULL;
2129 	}
2130 
2131 	if (xforms == NULL) {
2132 		CDEV_LOG_ERR("Invalid xform");
2133 		rte_errno = EINVAL;
2134 		return NULL;
2135 	}
2136 
2137 	sess_priv_sz = rte_cryptodev_sym_get_private_session_size(dev_id);
2138 	if (!rte_cryptodev_sym_is_valid_session_pool(mp, sess_priv_sz)) {
2139 		CDEV_LOG_ERR("Invalid mempool");
2140 		rte_errno = EINVAL;
2141 		return NULL;
2142 	}
2143 
2144 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2145 
2146 	/* Allocate a session structure from the session pool */
2147 	if (rte_mempool_get(mp, (void **)&sess)) {
2148 		CDEV_LOG_ERR("couldn't get object from session mempool");
2149 		rte_errno = ENOMEM;
2150 		return NULL;
2151 	}
2152 
2153 	pool_priv = rte_mempool_get_priv(mp);
2154 	sess->driver_id = dev->driver_id;
2155 	sess->sess_data_sz = pool_priv->sess_data_sz;
2156 	sess->user_data_sz = pool_priv->user_data_sz;
2157 	sess->driver_priv_data_iova = rte_mempool_virt2iova(sess) +
2158 		offsetof(struct rte_cryptodev_sym_session, driver_priv_data);
2159 
2160 	if (dev->dev_ops->sym_session_configure == NULL) {
2161 		rte_errno = ENOTSUP;
2162 		goto error_exit;
2163 	}
2164 	memset(sess->driver_priv_data, 0, pool_priv->sess_data_sz + pool_priv->user_data_sz);
2165 
2166 	ret = dev->dev_ops->sym_session_configure(dev, xforms, sess);
2167 	if (ret < 0) {
2168 		rte_errno = -ret;
2169 		goto error_exit;
2170 	}
2171 	sess->driver_id = dev->driver_id;
2172 
2173 	rte_cryptodev_trace_sym_session_create(dev_id, sess, xforms, mp);
2174 
2175 	return (void *)sess;
2176 error_exit:
2177 	rte_mempool_put(mp, (void *)sess);
2178 	return NULL;
2179 }
2180 
2181 int
2182 rte_cryptodev_asym_session_create(uint8_t dev_id,
2183 		struct rte_crypto_asym_xform *xforms, struct rte_mempool *mp,
2184 		void **session)
2185 {
2186 	struct rte_cryptodev_asym_session *sess;
2187 	uint32_t session_priv_data_sz;
2188 	struct rte_cryptodev_asym_session_pool_private_data *pool_priv;
2189 	unsigned int session_header_size =
2190 			rte_cryptodev_asym_get_header_session_size();
2191 	struct rte_cryptodev *dev;
2192 	int ret;
2193 
2194 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
2195 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
2196 		return -EINVAL;
2197 	}
2198 
2199 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2200 
2201 	if (dev == NULL)
2202 		return -EINVAL;
2203 
2204 	if (!mp) {
2205 		CDEV_LOG_ERR("invalid mempool");
2206 		return -EINVAL;
2207 	}
2208 
2209 	session_priv_data_sz = rte_cryptodev_asym_get_private_session_size(
2210 			dev_id);
2211 	pool_priv = rte_mempool_get_priv(mp);
2212 
2213 	if (pool_priv->max_priv_session_sz < session_priv_data_sz) {
2214 		CDEV_LOG_DEBUG(
2215 			"The private session data size used when creating the mempool is smaller than this device's private session data.");
2216 		return -EINVAL;
2217 	}
2218 
2219 	/* Verify if provided mempool can hold elements big enough. */
2220 	if (mp->elt_size < session_header_size + session_priv_data_sz) {
2221 		CDEV_LOG_ERR(
2222 			"mempool elements too small to hold session objects");
2223 		return -EINVAL;
2224 	}
2225 
2226 	/* Allocate a session structure from the session pool */
2227 	if (rte_mempool_get(mp, session)) {
2228 		CDEV_LOG_ERR("couldn't get object from session mempool");
2229 		return -ENOMEM;
2230 	}
2231 
2232 	sess = *session;
2233 	sess->driver_id = dev->driver_id;
2234 	sess->user_data_sz = pool_priv->user_data_sz;
2235 	sess->max_priv_data_sz = pool_priv->max_priv_session_sz;
2236 
2237 	/* Clear device session pointer.*/
2238 	memset(sess->sess_private_data, 0, session_priv_data_sz + sess->user_data_sz);
2239 
2240 	if (*dev->dev_ops->asym_session_configure == NULL)
2241 		return -ENOTSUP;
2242 
2243 	if (sess->sess_private_data[0] == 0) {
2244 		ret = dev->dev_ops->asym_session_configure(dev, xforms, sess);
2245 		if (ret < 0) {
2246 			CDEV_LOG_ERR(
2247 				"dev_id %d failed to configure session details",
2248 				dev_id);
2249 			return ret;
2250 		}
2251 	}
2252 
2253 	rte_cryptodev_trace_asym_session_create(dev_id, xforms, mp, sess);
2254 	return 0;
2255 }
2256 
2257 int
2258 rte_cryptodev_sym_session_free(uint8_t dev_id, void *_sess)
2259 {
2260 	struct rte_cryptodev *dev;
2261 	struct rte_mempool *sess_mp;
2262 	struct rte_cryptodev_sym_session *sess = _sess;
2263 	struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
2264 
2265 	if (sess == NULL)
2266 		return -EINVAL;
2267 
2268 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
2269 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
2270 		return -EINVAL;
2271 	}
2272 
2273 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2274 
2275 	if (dev == NULL || sess == NULL)
2276 		return -EINVAL;
2277 
2278 	sess_mp = rte_mempool_from_obj(sess);
2279 	if (!sess_mp)
2280 		return -EINVAL;
2281 	pool_priv = rte_mempool_get_priv(sess_mp);
2282 
2283 	if (sess->driver_id != dev->driver_id) {
2284 		CDEV_LOG_ERR("Session created by driver %u but freed by %u",
2285 			sess->driver_id, dev->driver_id);
2286 		return -EINVAL;
2287 	}
2288 
2289 	if (*dev->dev_ops->sym_session_clear == NULL)
2290 		return -ENOTSUP;
2291 
2292 	dev->dev_ops->sym_session_clear(dev, sess);
2293 
2294 	memset(sess->driver_priv_data, 0, pool_priv->sess_data_sz + pool_priv->user_data_sz);
2295 
2296 	/* Return session to mempool */
2297 	rte_mempool_put(sess_mp, sess);
2298 
2299 	rte_cryptodev_trace_sym_session_free(dev_id, sess);
2300 	return 0;
2301 }
2302 
2303 int
2304 rte_cryptodev_asym_session_free(uint8_t dev_id, void *sess)
2305 {
2306 	struct rte_mempool *sess_mp;
2307 	struct rte_cryptodev *dev;
2308 
2309 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
2310 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
2311 		return -EINVAL;
2312 	}
2313 
2314 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2315 
2316 	if (dev == NULL || sess == NULL)
2317 		return -EINVAL;
2318 
2319 	if (*dev->dev_ops->asym_session_clear == NULL)
2320 		return -ENOTSUP;
2321 
2322 	dev->dev_ops->asym_session_clear(dev, sess);
2323 
2324 	rte_free(((struct rte_cryptodev_asym_session *)sess)->event_mdata);
2325 
2326 	/* Return session to mempool */
2327 	sess_mp = rte_mempool_from_obj(sess);
2328 	rte_mempool_put(sess_mp, sess);
2329 
2330 	rte_cryptodev_trace_asym_session_free(dev_id, sess);
2331 	return 0;
2332 }
2333 
2334 unsigned int
2335 rte_cryptodev_asym_get_header_session_size(void)
2336 {
2337 	return sizeof(struct rte_cryptodev_asym_session);
2338 }
2339 
2340 unsigned int
2341 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
2342 {
2343 	struct rte_cryptodev *dev;
2344 	unsigned int priv_sess_size;
2345 
2346 	if (!rte_cryptodev_is_valid_dev(dev_id))
2347 		return 0;
2348 
2349 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2350 
2351 	if (*dev->dev_ops->sym_session_get_size == NULL)
2352 		return 0;
2353 
2354 	priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev);
2355 
2356 	rte_cryptodev_trace_sym_get_private_session_size(dev_id,
2357 		priv_sess_size);
2358 
2359 	return priv_sess_size;
2360 }
2361 
2362 unsigned int
2363 rte_cryptodev_asym_get_private_session_size(uint8_t dev_id)
2364 {
2365 	struct rte_cryptodev *dev;
2366 	unsigned int priv_sess_size;
2367 
2368 	if (!rte_cryptodev_is_valid_dev(dev_id))
2369 		return 0;
2370 
2371 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2372 
2373 	if (*dev->dev_ops->asym_session_get_size == NULL)
2374 		return 0;
2375 
2376 	priv_sess_size = (*dev->dev_ops->asym_session_get_size)(dev);
2377 
2378 	rte_cryptodev_trace_asym_get_private_session_size(dev_id,
2379 		priv_sess_size);
2380 
2381 	return priv_sess_size;
2382 }
2383 
2384 int
2385 rte_cryptodev_sym_session_set_user_data(void *_sess, void *data,
2386 		uint16_t size)
2387 {
2388 	struct rte_cryptodev_sym_session *sess = _sess;
2389 
2390 	if (sess == NULL)
2391 		return -EINVAL;
2392 
2393 	if (sess->user_data_sz < size)
2394 		return -ENOMEM;
2395 
2396 	rte_memcpy(sess->driver_priv_data + sess->sess_data_sz, data, size);
2397 
2398 	rte_cryptodev_trace_sym_session_set_user_data(sess, data, size);
2399 
2400 	return 0;
2401 }
2402 
2403 void *
2404 rte_cryptodev_sym_session_get_user_data(void *_sess)
2405 {
2406 	struct rte_cryptodev_sym_session *sess = _sess;
2407 	void *data = NULL;
2408 
2409 	if (sess == NULL || sess->user_data_sz == 0)
2410 		return NULL;
2411 
2412 	data = (void *)(sess->driver_priv_data + sess->sess_data_sz);
2413 
2414 	rte_cryptodev_trace_sym_session_get_user_data(sess, data);
2415 
2416 	return data;
2417 }
2418 
2419 int
2420 rte_cryptodev_asym_session_set_user_data(void *session, void *data, uint16_t size)
2421 {
2422 	struct rte_cryptodev_asym_session *sess = session;
2423 	if (sess == NULL)
2424 		return -EINVAL;
2425 
2426 	if (sess->user_data_sz < size)
2427 		return -ENOMEM;
2428 
2429 	rte_memcpy(sess->sess_private_data +
2430 			sess->max_priv_data_sz,
2431 			data, size);
2432 
2433 	rte_cryptodev_trace_asym_session_set_user_data(sess, data, size);
2434 
2435 	return 0;
2436 }
2437 
2438 void *
2439 rte_cryptodev_asym_session_get_user_data(void *session)
2440 {
2441 	struct rte_cryptodev_asym_session *sess = session;
2442 	void *data = NULL;
2443 
2444 	if (sess == NULL || sess->user_data_sz == 0)
2445 		return NULL;
2446 
2447 	data = (void *)(sess->sess_private_data + sess->max_priv_data_sz);
2448 
2449 	rte_cryptodev_trace_asym_session_get_user_data(sess, data);
2450 
2451 	return data;
2452 }
2453 
2454 static inline void
2455 sym_crypto_fill_status(struct rte_crypto_sym_vec *vec, int32_t errnum)
2456 {
2457 	uint32_t i;
2458 	for (i = 0; i < vec->num; i++)
2459 		vec->status[i] = errnum;
2460 }
2461 
2462 uint32_t
2463 rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id,
2464 	void *_sess, union rte_crypto_sym_ofs ofs,
2465 	struct rte_crypto_sym_vec *vec)
2466 {
2467 	struct rte_cryptodev *dev;
2468 	struct rte_cryptodev_sym_session *sess = _sess;
2469 
2470 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
2471 		sym_crypto_fill_status(vec, EINVAL);
2472 		return 0;
2473 	}
2474 
2475 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2476 
2477 	if (*dev->dev_ops->sym_cpu_process == NULL ||
2478 		!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO)) {
2479 		sym_crypto_fill_status(vec, ENOTSUP);
2480 		return 0;
2481 	}
2482 
2483 	rte_cryptodev_trace_sym_cpu_crypto_process(dev_id, sess);
2484 
2485 	return dev->dev_ops->sym_cpu_process(dev, sess, ofs, vec);
2486 }
2487 
2488 int
2489 rte_cryptodev_get_raw_dp_ctx_size(uint8_t dev_id)
2490 {
2491 	struct rte_cryptodev *dev;
2492 	int32_t size = sizeof(struct rte_crypto_raw_dp_ctx);
2493 	int32_t priv_size;
2494 
2495 	if (!rte_cryptodev_is_valid_dev(dev_id))
2496 		return -EINVAL;
2497 
2498 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2499 
2500 	if (*dev->dev_ops->sym_get_raw_dp_ctx_size == NULL ||
2501 		!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP)) {
2502 		return -ENOTSUP;
2503 	}
2504 
2505 	priv_size = (*dev->dev_ops->sym_get_raw_dp_ctx_size)(dev);
2506 	if (priv_size < 0)
2507 		return -ENOTSUP;
2508 
2509 	rte_cryptodev_trace_get_raw_dp_ctx_size(dev_id);
2510 
2511 	return RTE_ALIGN_CEIL((size + priv_size), 8);
2512 }
2513 
2514 int
2515 rte_cryptodev_configure_raw_dp_ctx(uint8_t dev_id, uint16_t qp_id,
2516 	struct rte_crypto_raw_dp_ctx *ctx,
2517 	enum rte_crypto_op_sess_type sess_type,
2518 	union rte_cryptodev_session_ctx session_ctx,
2519 	uint8_t is_update)
2520 {
2521 	struct rte_cryptodev *dev;
2522 
2523 	if (!rte_cryptodev_get_qp_status(dev_id, qp_id))
2524 		return -EINVAL;
2525 
2526 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2527 	if (!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP)
2528 			|| dev->dev_ops->sym_configure_raw_dp_ctx == NULL)
2529 		return -ENOTSUP;
2530 
2531 	rte_cryptodev_trace_configure_raw_dp_ctx(dev_id, qp_id, sess_type);
2532 
2533 	return (*dev->dev_ops->sym_configure_raw_dp_ctx)(dev, qp_id, ctx,
2534 			sess_type, session_ctx, is_update);
2535 }
2536 
2537 int
2538 rte_cryptodev_session_event_mdata_set(uint8_t dev_id, void *sess,
2539 	enum rte_crypto_op_type op_type,
2540 	enum rte_crypto_op_sess_type sess_type,
2541 	void *ev_mdata,
2542 	uint16_t size)
2543 {
2544 	struct rte_cryptodev *dev;
2545 
2546 	if (sess == NULL || ev_mdata == NULL)
2547 		return -EINVAL;
2548 
2549 	if (!rte_cryptodev_is_valid_dev(dev_id))
2550 		goto skip_pmd_op;
2551 
2552 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2553 	if (dev->dev_ops->session_ev_mdata_set == NULL)
2554 		goto skip_pmd_op;
2555 
2556 	rte_cryptodev_trace_session_event_mdata_set(dev_id, sess, op_type,
2557 		sess_type, ev_mdata, size);
2558 
2559 	return (*dev->dev_ops->session_ev_mdata_set)(dev, sess, op_type,
2560 			sess_type, ev_mdata);
2561 
2562 skip_pmd_op:
2563 	if (op_type == RTE_CRYPTO_OP_TYPE_SYMMETRIC)
2564 		return rte_cryptodev_sym_session_set_user_data(sess, ev_mdata,
2565 				size);
2566 	else if (op_type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
2567 		struct rte_cryptodev_asym_session *s = sess;
2568 
2569 		if (s->event_mdata == NULL) {
2570 			s->event_mdata = rte_malloc(NULL, size, 0);
2571 			if (s->event_mdata == NULL)
2572 				return -ENOMEM;
2573 		}
2574 		rte_memcpy(s->event_mdata, ev_mdata, size);
2575 
2576 		return 0;
2577 	} else
2578 		return -ENOTSUP;
2579 }
2580 
2581 uint32_t
2582 rte_cryptodev_raw_enqueue_burst(struct rte_crypto_raw_dp_ctx *ctx,
2583 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
2584 	void **user_data, int *enqueue_status)
2585 {
2586 	return (*ctx->enqueue_burst)(ctx->qp_data, ctx->drv_ctx_data, vec,
2587 			ofs, user_data, enqueue_status);
2588 }
2589 
2590 int
2591 rte_cryptodev_raw_enqueue_done(struct rte_crypto_raw_dp_ctx *ctx,
2592 		uint32_t n)
2593 {
2594 	return (*ctx->enqueue_done)(ctx->qp_data, ctx->drv_ctx_data, n);
2595 }
2596 
2597 uint32_t
2598 rte_cryptodev_raw_dequeue_burst(struct rte_crypto_raw_dp_ctx *ctx,
2599 	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
2600 	uint32_t max_nb_to_dequeue,
2601 	rte_cryptodev_raw_post_dequeue_t post_dequeue,
2602 	void **out_user_data, uint8_t is_user_data_array,
2603 	uint32_t *n_success_jobs, int *status)
2604 {
2605 	return (*ctx->dequeue_burst)(ctx->qp_data, ctx->drv_ctx_data,
2606 		get_dequeue_count, max_nb_to_dequeue, post_dequeue,
2607 		out_user_data, is_user_data_array, n_success_jobs, status);
2608 }
2609 
2610 int
2611 rte_cryptodev_raw_dequeue_done(struct rte_crypto_raw_dp_ctx *ctx,
2612 		uint32_t n)
2613 {
2614 	return (*ctx->dequeue_done)(ctx->qp_data, ctx->drv_ctx_data, n);
2615 }
2616 
2617 /** Initialise rte_crypto_op mempool element */
2618 static void
2619 rte_crypto_op_init(struct rte_mempool *mempool,
2620 		void *opaque_arg,
2621 		void *_op_data,
2622 		__rte_unused unsigned i)
2623 {
2624 	struct rte_crypto_op *op = _op_data;
2625 	enum rte_crypto_op_type type = *(enum rte_crypto_op_type *)opaque_arg;
2626 
2627 	memset(_op_data, 0, mempool->elt_size);
2628 
2629 	__rte_crypto_op_reset(op, type);
2630 
2631 	op->phys_addr = rte_mempool_virt2iova(_op_data);
2632 	op->mempool = mempool;
2633 }
2634 
2635 
2636 struct rte_mempool *
2637 rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
2638 		unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
2639 		int socket_id)
2640 {
2641 	struct rte_crypto_op_pool_private *priv;
2642 
2643 	unsigned elt_size = sizeof(struct rte_crypto_op) +
2644 			priv_size;
2645 
2646 	if (type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
2647 		elt_size += sizeof(struct rte_crypto_sym_op);
2648 	} else if (type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
2649 		elt_size += sizeof(struct rte_crypto_asym_op);
2650 	} else if (type == RTE_CRYPTO_OP_TYPE_UNDEFINED) {
2651 		elt_size += RTE_MAX(sizeof(struct rte_crypto_sym_op),
2652 		                    sizeof(struct rte_crypto_asym_op));
2653 	} else {
2654 		CDEV_LOG_ERR("Invalid op_type");
2655 		return NULL;
2656 	}
2657 
2658 	/* lookup mempool in case already allocated */
2659 	struct rte_mempool *mp = rte_mempool_lookup(name);
2660 
2661 	if (mp != NULL) {
2662 		priv = (struct rte_crypto_op_pool_private *)
2663 				rte_mempool_get_priv(mp);
2664 
2665 		if (mp->elt_size != elt_size ||
2666 				mp->cache_size < cache_size ||
2667 				mp->size < nb_elts ||
2668 				priv->priv_size <  priv_size) {
2669 			mp = NULL;
2670 			CDEV_LOG_ERR("Mempool %s already exists but with "
2671 					"incompatible parameters", name);
2672 			return NULL;
2673 		}
2674 		return mp;
2675 	}
2676 
2677 	mp = rte_mempool_create(
2678 			name,
2679 			nb_elts,
2680 			elt_size,
2681 			cache_size,
2682 			sizeof(struct rte_crypto_op_pool_private),
2683 			NULL,
2684 			NULL,
2685 			rte_crypto_op_init,
2686 			&type,
2687 			socket_id,
2688 			0);
2689 
2690 	if (mp == NULL) {
2691 		CDEV_LOG_ERR("Failed to create mempool %s", name);
2692 		return NULL;
2693 	}
2694 
2695 	priv = (struct rte_crypto_op_pool_private *)
2696 			rte_mempool_get_priv(mp);
2697 
2698 	priv->priv_size = priv_size;
2699 	priv->type = type;
2700 
2701 	rte_cryptodev_trace_op_pool_create(name, socket_id, type, nb_elts, mp);
2702 	return mp;
2703 }
2704 
2705 int
2706 rte_cryptodev_pmd_create_dev_name(char *name, const char *dev_name_prefix)
2707 {
2708 	struct rte_cryptodev *dev = NULL;
2709 	uint32_t i = 0;
2710 
2711 	if (name == NULL)
2712 		return -EINVAL;
2713 
2714 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
2715 		int ret = snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
2716 				"%s_%u", dev_name_prefix, i);
2717 
2718 		if (ret < 0)
2719 			return ret;
2720 
2721 		dev = rte_cryptodev_pmd_get_named_dev(name);
2722 		if (!dev)
2723 			return 0;
2724 	}
2725 
2726 	return -1;
2727 }
2728 
2729 TAILQ_HEAD(cryptodev_driver_list, cryptodev_driver);
2730 
2731 static struct cryptodev_driver_list cryptodev_driver_list =
2732 	TAILQ_HEAD_INITIALIZER(cryptodev_driver_list);
2733 
2734 int
2735 rte_cryptodev_driver_id_get(const char *name)
2736 {
2737 	struct cryptodev_driver *driver;
2738 	const char *driver_name;
2739 	int driver_id = -1;
2740 
2741 	if (name == NULL) {
2742 		CDEV_LOG_DEBUG("name pointer NULL");
2743 		return -1;
2744 	}
2745 
2746 	TAILQ_FOREACH(driver, &cryptodev_driver_list, next) {
2747 		driver_name = driver->driver->name;
2748 		if (strncmp(driver_name, name, strlen(driver_name) + 1) == 0) {
2749 			driver_id = driver->id;
2750 			break;
2751 		}
2752 	}
2753 
2754 	rte_cryptodev_trace_driver_id_get(name, driver_id);
2755 
2756 	return driver_id;
2757 }
2758 
2759 const char *
2760 rte_cryptodev_name_get(uint8_t dev_id)
2761 {
2762 	struct rte_cryptodev *dev;
2763 
2764 	if (!rte_cryptodev_is_valid_device_data(dev_id)) {
2765 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
2766 		return NULL;
2767 	}
2768 
2769 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2770 	if (dev == NULL)
2771 		return NULL;
2772 
2773 	rte_cryptodev_trace_name_get(dev_id, dev->data->name);
2774 
2775 	return dev->data->name;
2776 }
2777 
2778 const char *
2779 rte_cryptodev_driver_name_get(uint8_t driver_id)
2780 {
2781 	struct cryptodev_driver *driver;
2782 
2783 	TAILQ_FOREACH(driver, &cryptodev_driver_list, next) {
2784 		if (driver->id == driver_id) {
2785 			rte_cryptodev_trace_driver_name_get(driver_id,
2786 				driver->driver->name);
2787 			return driver->driver->name;
2788 		}
2789 	}
2790 	return NULL;
2791 }
2792 
2793 uint8_t
2794 rte_cryptodev_allocate_driver(struct cryptodev_driver *crypto_drv,
2795 		const struct rte_driver *drv)
2796 {
2797 	crypto_drv->driver = drv;
2798 	crypto_drv->id = nb_drivers;
2799 
2800 	TAILQ_INSERT_TAIL(&cryptodev_driver_list, crypto_drv, next);
2801 
2802 	rte_cryptodev_trace_allocate_driver(drv->name);
2803 
2804 	return nb_drivers++;
2805 }
2806 
2807 RTE_INIT(cryptodev_init_fp_ops)
2808 {
2809 	uint32_t i;
2810 
2811 	for (i = 0; i != RTE_DIM(rte_crypto_fp_ops); i++)
2812 		cryptodev_fp_ops_reset(rte_crypto_fp_ops + i);
2813 }
2814 
2815 static int
2816 cryptodev_handle_dev_list(const char *cmd __rte_unused,
2817 		const char *params __rte_unused,
2818 		struct rte_tel_data *d)
2819 {
2820 	int dev_id;
2821 
2822 	if (rte_cryptodev_count() < 1)
2823 		return -EINVAL;
2824 
2825 	rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
2826 	for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++)
2827 		if (rte_cryptodev_is_valid_dev(dev_id))
2828 			rte_tel_data_add_array_int(d, dev_id);
2829 
2830 	return 0;
2831 }
2832 
2833 static int
2834 cryptodev_handle_dev_info(const char *cmd __rte_unused,
2835 		const char *params, struct rte_tel_data *d)
2836 {
2837 	struct rte_cryptodev_info cryptodev_info;
2838 	int dev_id;
2839 	char *end_param;
2840 
2841 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
2842 		return -EINVAL;
2843 
2844 	dev_id = strtoul(params, &end_param, 0);
2845 	if (*end_param != '\0')
2846 		CDEV_LOG_ERR("Extra parameters passed to command, ignoring");
2847 	if (!rte_cryptodev_is_valid_dev(dev_id))
2848 		return -EINVAL;
2849 
2850 	rte_cryptodev_info_get(dev_id, &cryptodev_info);
2851 
2852 	rte_tel_data_start_dict(d);
2853 	rte_tel_data_add_dict_string(d, "device_name",
2854 		cryptodev_info.device->name);
2855 	rte_tel_data_add_dict_uint(d, "max_nb_queue_pairs",
2856 		cryptodev_info.max_nb_queue_pairs);
2857 
2858 	return 0;
2859 }
2860 
2861 #define ADD_DICT_STAT(s) rte_tel_data_add_dict_uint(d, #s, cryptodev_stats.s)
2862 
2863 static int
2864 cryptodev_handle_dev_stats(const char *cmd __rte_unused,
2865 		const char *params,
2866 		struct rte_tel_data *d)
2867 {
2868 	struct rte_cryptodev_stats cryptodev_stats;
2869 	int dev_id, ret;
2870 	char *end_param;
2871 
2872 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
2873 		return -EINVAL;
2874 
2875 	dev_id = strtoul(params, &end_param, 0);
2876 	if (*end_param != '\0')
2877 		CDEV_LOG_ERR("Extra parameters passed to command, ignoring");
2878 	if (!rte_cryptodev_is_valid_dev(dev_id))
2879 		return -EINVAL;
2880 
2881 	ret = rte_cryptodev_stats_get(dev_id, &cryptodev_stats);
2882 	if (ret < 0)
2883 		return ret;
2884 
2885 	rte_tel_data_start_dict(d);
2886 	ADD_DICT_STAT(enqueued_count);
2887 	ADD_DICT_STAT(dequeued_count);
2888 	ADD_DICT_STAT(enqueue_err_count);
2889 	ADD_DICT_STAT(dequeue_err_count);
2890 
2891 	return 0;
2892 }
2893 
2894 #define CRYPTO_CAPS_SZ                                             \
2895 	(RTE_ALIGN_CEIL(sizeof(struct rte_cryptodev_capabilities), \
2896 					sizeof(uint64_t)) /        \
2897 	 sizeof(uint64_t))
2898 
2899 static int
2900 crypto_caps_array(struct rte_tel_data *d,
2901 		  const struct rte_cryptodev_capabilities *capabilities)
2902 {
2903 	const struct rte_cryptodev_capabilities *dev_caps;
2904 	uint64_t caps_val[CRYPTO_CAPS_SZ];
2905 	unsigned int i = 0, j;
2906 
2907 	rte_tel_data_start_array(d, RTE_TEL_UINT_VAL);
2908 
2909 	while ((dev_caps = &capabilities[i++])->op !=
2910 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
2911 		memset(&caps_val, 0, CRYPTO_CAPS_SZ * sizeof(caps_val[0]));
2912 		rte_memcpy(caps_val, dev_caps, sizeof(capabilities[0]));
2913 		for (j = 0; j < CRYPTO_CAPS_SZ; j++)
2914 			rte_tel_data_add_array_uint(d, caps_val[j]);
2915 	}
2916 
2917 	return i;
2918 }
2919 
2920 static int
2921 cryptodev_handle_dev_caps(const char *cmd __rte_unused, const char *params,
2922 			  struct rte_tel_data *d)
2923 {
2924 	struct rte_cryptodev_info dev_info;
2925 	struct rte_tel_data *crypto_caps;
2926 	int crypto_caps_n;
2927 	char *end_param;
2928 	int dev_id;
2929 
2930 	if (!params || strlen(params) == 0 || !isdigit(*params))
2931 		return -EINVAL;
2932 
2933 	dev_id = strtoul(params, &end_param, 0);
2934 	if (*end_param != '\0')
2935 		CDEV_LOG_ERR("Extra parameters passed to command, ignoring");
2936 	if (!rte_cryptodev_is_valid_dev(dev_id))
2937 		return -EINVAL;
2938 
2939 	rte_tel_data_start_dict(d);
2940 	crypto_caps = rte_tel_data_alloc();
2941 	if (!crypto_caps)
2942 		return -ENOMEM;
2943 
2944 	rte_cryptodev_info_get(dev_id, &dev_info);
2945 	crypto_caps_n = crypto_caps_array(crypto_caps, dev_info.capabilities);
2946 	rte_tel_data_add_dict_container(d, "crypto_caps", crypto_caps, 0);
2947 	rte_tel_data_add_dict_int(d, "crypto_caps_n", crypto_caps_n);
2948 
2949 	return 0;
2950 }
2951 
2952 RTE_INIT(cryptodev_init_telemetry)
2953 {
2954 	rte_telemetry_register_cmd("/cryptodev/info", cryptodev_handle_dev_info,
2955 			"Returns information for a cryptodev. Parameters: int dev_id");
2956 	rte_telemetry_register_cmd("/cryptodev/list",
2957 			cryptodev_handle_dev_list,
2958 			"Returns list of available crypto devices by IDs. No parameters.");
2959 	rte_telemetry_register_cmd("/cryptodev/stats",
2960 			cryptodev_handle_dev_stats,
2961 			"Returns the stats for a cryptodev. Parameters: int dev_id");
2962 	rte_telemetry_register_cmd("/cryptodev/caps",
2963 			cryptodev_handle_dev_caps,
2964 			"Returns the capabilities for a cryptodev. Parameters: int dev_id");
2965 }
2966