xref: /dpdk/lib/cryptodev/rte_cryptodev.c (revision 54140461b60485941da282d8da2db2f2bc19e281)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020 Intel Corporation
3  */
4 
5 #include <sys/queue.h>
6 #include <ctype.h>
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <string.h>
10 #include <errno.h>
11 #include <stdint.h>
12 #include <inttypes.h>
13 
14 #include <rte_log.h>
15 #include <rte_debug.h>
16 #include <dev_driver.h>
17 #include <rte_memory.h>
18 #include <rte_memcpy.h>
19 #include <rte_memzone.h>
20 #include <rte_eal.h>
21 #include <rte_common.h>
22 #include <rte_mempool.h>
23 #include <rte_malloc.h>
24 #include <rte_errno.h>
25 #include <rte_spinlock.h>
26 #include <rte_string_fns.h>
27 #include <rte_telemetry.h>
28 
29 #include "rte_crypto.h"
30 #include "rte_cryptodev.h"
31 #include "cryptodev_pmd.h"
32 #include "cryptodev_trace.h"
33 
34 static uint8_t nb_drivers;
35 
36 static struct rte_cryptodev rte_crypto_devices[RTE_CRYPTO_MAX_DEVS];
37 
38 struct rte_cryptodev *rte_cryptodevs = rte_crypto_devices;
39 
40 static struct rte_cryptodev_global cryptodev_globals = {
41 		.devs			= rte_crypto_devices,
42 		.data			= { NULL },
43 		.nb_devs		= 0
44 };
45 
46 /* Public fastpath APIs. */
47 struct rte_crypto_fp_ops rte_crypto_fp_ops[RTE_CRYPTO_MAX_DEVS];
48 
49 /* spinlock for crypto device callbacks */
50 static rte_spinlock_t rte_cryptodev_cb_lock = RTE_SPINLOCK_INITIALIZER;
51 
52 /**
53  * The user application callback description.
54  *
55  * It contains callback address to be registered by user application,
56  * the pointer to the parameters for callback, and the event type.
57  */
58 struct rte_cryptodev_callback {
59 	TAILQ_ENTRY(rte_cryptodev_callback) next; /**< Callbacks list */
60 	rte_cryptodev_cb_fn cb_fn;		/**< Callback address */
61 	void *cb_arg;				/**< Parameter for callback */
62 	enum rte_cryptodev_event_type event;	/**< Interrupt event type */
63 	uint32_t active;			/**< Callback is executing */
64 };
65 
66 /**
67  * The crypto cipher algorithm strings identifiers.
68  * Not to be used in application directly.
69  * Application can use rte_cryptodev_get_cipher_algo_string().
70  */
71 static const char *
72 crypto_cipher_algorithm_strings[] = {
73 	[RTE_CRYPTO_CIPHER_3DES_CBC]	= "3des-cbc",
74 	[RTE_CRYPTO_CIPHER_3DES_ECB]	= "3des-ecb",
75 	[RTE_CRYPTO_CIPHER_3DES_CTR]	= "3des-ctr",
76 
77 	[RTE_CRYPTO_CIPHER_AES_CBC]	= "aes-cbc",
78 	[RTE_CRYPTO_CIPHER_AES_CTR]	= "aes-ctr",
79 	[RTE_CRYPTO_CIPHER_AES_DOCSISBPI]	= "aes-docsisbpi",
80 	[RTE_CRYPTO_CIPHER_AES_ECB]	= "aes-ecb",
81 	[RTE_CRYPTO_CIPHER_AES_F8]	= "aes-f8",
82 	[RTE_CRYPTO_CIPHER_AES_XTS]	= "aes-xts",
83 
84 	[RTE_CRYPTO_CIPHER_ARC4]	= "arc4",
85 
86 	[RTE_CRYPTO_CIPHER_DES_CBC]     = "des-cbc",
87 	[RTE_CRYPTO_CIPHER_DES_DOCSISBPI]	= "des-docsisbpi",
88 
89 	[RTE_CRYPTO_CIPHER_NULL]	= "null",
90 
91 	[RTE_CRYPTO_CIPHER_KASUMI_F8]	= "kasumi-f8",
92 	[RTE_CRYPTO_CIPHER_SNOW3G_UEA2]	= "snow3g-uea2",
93 	[RTE_CRYPTO_CIPHER_ZUC_EEA3]	= "zuc-eea3",
94 	[RTE_CRYPTO_CIPHER_SM4_ECB]	= "sm4-ecb",
95 	[RTE_CRYPTO_CIPHER_SM4_CBC]	= "sm4-cbc",
96 	[RTE_CRYPTO_CIPHER_SM4_CTR]	= "sm4-ctr",
97 	[RTE_CRYPTO_CIPHER_SM4_CFB]	= "sm4-cfb",
98 	[RTE_CRYPTO_CIPHER_SM4_OFB]	= "sm4-ofb"
99 };
100 
101 /**
102  * The crypto cipher operation strings identifiers.
103  * It could be used in application command line.
104  */
105 const char *
106 rte_crypto_cipher_operation_strings[] = {
107 		[RTE_CRYPTO_CIPHER_OP_ENCRYPT]	= "encrypt",
108 		[RTE_CRYPTO_CIPHER_OP_DECRYPT]	= "decrypt"
109 };
110 
111 /**
112  * The crypto auth algorithm strings identifiers.
113  * Not to be used in application directly.
114  * Application can use rte_cryptodev_get_auth_algo_string().
115  */
116 static const char *
117 crypto_auth_algorithm_strings[] = {
118 	[RTE_CRYPTO_AUTH_AES_CBC_MAC]	= "aes-cbc-mac",
119 	[RTE_CRYPTO_AUTH_AES_CMAC]	= "aes-cmac",
120 	[RTE_CRYPTO_AUTH_AES_GMAC]	= "aes-gmac",
121 	[RTE_CRYPTO_AUTH_AES_XCBC_MAC]	= "aes-xcbc-mac",
122 
123 	[RTE_CRYPTO_AUTH_MD5]		= "md5",
124 	[RTE_CRYPTO_AUTH_MD5_HMAC]	= "md5-hmac",
125 
126 	[RTE_CRYPTO_AUTH_NULL]		= "null",
127 
128 	[RTE_CRYPTO_AUTH_SHA1]		= "sha1",
129 	[RTE_CRYPTO_AUTH_SHA1_HMAC]	= "sha1-hmac",
130 
131 	[RTE_CRYPTO_AUTH_SHA224]	= "sha2-224",
132 	[RTE_CRYPTO_AUTH_SHA224_HMAC]	= "sha2-224-hmac",
133 	[RTE_CRYPTO_AUTH_SHA256]	= "sha2-256",
134 	[RTE_CRYPTO_AUTH_SHA256_HMAC]	= "sha2-256-hmac",
135 	[RTE_CRYPTO_AUTH_SHA384]	= "sha2-384",
136 	[RTE_CRYPTO_AUTH_SHA384_HMAC]	= "sha2-384-hmac",
137 	[RTE_CRYPTO_AUTH_SHA512]	= "sha2-512",
138 	[RTE_CRYPTO_AUTH_SHA512_HMAC]	= "sha2-512-hmac",
139 
140 	[RTE_CRYPTO_AUTH_SHA3_224]	= "sha3-224",
141 	[RTE_CRYPTO_AUTH_SHA3_224_HMAC] = "sha3-224-hmac",
142 	[RTE_CRYPTO_AUTH_SHA3_256]	= "sha3-256",
143 	[RTE_CRYPTO_AUTH_SHA3_256_HMAC] = "sha3-256-hmac",
144 	[RTE_CRYPTO_AUTH_SHA3_384]	= "sha3-384",
145 	[RTE_CRYPTO_AUTH_SHA3_384_HMAC] = "sha3-384-hmac",
146 	[RTE_CRYPTO_AUTH_SHA3_512]	= "sha3-512",
147 	[RTE_CRYPTO_AUTH_SHA3_512_HMAC]	= "sha3-512-hmac",
148 
149 	[RTE_CRYPTO_AUTH_KASUMI_F9]	= "kasumi-f9",
150 	[RTE_CRYPTO_AUTH_SNOW3G_UIA2]	= "snow3g-uia2",
151 	[RTE_CRYPTO_AUTH_ZUC_EIA3]	= "zuc-eia3",
152 	[RTE_CRYPTO_AUTH_SM3]		= "sm3",
153 	[RTE_CRYPTO_AUTH_SM3_HMAC]	= "sm3-hmac",
154 
155 	[RTE_CRYPTO_AUTH_SHAKE_128]	 = "shake-128",
156 	[RTE_CRYPTO_AUTH_SHAKE_256]	 = "shake-256",
157 };
158 
159 /**
160  * The crypto AEAD algorithm strings identifiers.
161  * Not to be used in application directly.
162  * Application can use rte_cryptodev_get_aead_algo_string().
163  */
164 static const char *
165 crypto_aead_algorithm_strings[] = {
166 	[RTE_CRYPTO_AEAD_AES_CCM]	= "aes-ccm",
167 	[RTE_CRYPTO_AEAD_AES_GCM]	= "aes-gcm",
168 	[RTE_CRYPTO_AEAD_CHACHA20_POLY1305] = "chacha20-poly1305"
169 };
170 
171 
172 /**
173  * The crypto AEAD operation strings identifiers.
174  * It could be used in application command line.
175  */
176 const char *
177 rte_crypto_aead_operation_strings[] = {
178 	[RTE_CRYPTO_AEAD_OP_ENCRYPT]	= "encrypt",
179 	[RTE_CRYPTO_AEAD_OP_DECRYPT]	= "decrypt"
180 };
181 
182 /**
183  * Asymmetric crypto transform operation strings identifiers.
184  * Not to be used in application directly.
185  * Application can use rte_cryptodev_asym_get_xform_string().
186  */
187 static const char *
188 crypto_asym_xform_strings[] = {
189 	[RTE_CRYPTO_ASYM_XFORM_NONE]	= "none",
190 	[RTE_CRYPTO_ASYM_XFORM_RSA]	= "rsa",
191 	[RTE_CRYPTO_ASYM_XFORM_MODEX]	= "modexp",
192 	[RTE_CRYPTO_ASYM_XFORM_MODINV]	= "modinv",
193 	[RTE_CRYPTO_ASYM_XFORM_DH]	= "dh",
194 	[RTE_CRYPTO_ASYM_XFORM_DSA]	= "dsa",
195 	[RTE_CRYPTO_ASYM_XFORM_ECDSA]	= "ecdsa",
196 	[RTE_CRYPTO_ASYM_XFORM_ECPM]	= "ecpm",
197 	[RTE_CRYPTO_ASYM_XFORM_SM2]	= "sm2",
198 };
199 
200 /**
201  * Asymmetric crypto operation strings identifiers.
202  */
203 const char *rte_crypto_asym_op_strings[] = {
204 	[RTE_CRYPTO_ASYM_OP_ENCRYPT]	= "encrypt",
205 	[RTE_CRYPTO_ASYM_OP_DECRYPT]	= "decrypt",
206 	[RTE_CRYPTO_ASYM_OP_SIGN]	= "sign",
207 	[RTE_CRYPTO_ASYM_OP_VERIFY]	= "verify"
208 };
209 
210 /**
211  * Asymmetric crypto key exchange operation strings identifiers.
212  */
213 const char *rte_crypto_asym_ke_strings[] = {
214 	[RTE_CRYPTO_ASYM_KE_PRIV_KEY_GENERATE] = "priv_key_generate",
215 	[RTE_CRYPTO_ASYM_KE_PUB_KEY_GENERATE] = "pub_key_generate",
216 	[RTE_CRYPTO_ASYM_KE_SHARED_SECRET_COMPUTE] = "sharedsecret_compute",
217 	[RTE_CRYPTO_ASYM_KE_PUB_KEY_VERIFY] = "pub_ec_key_verify"
218 };
219 
220 struct rte_cryptodev_sym_session_pool_private_data {
221 	uint16_t sess_data_sz;
222 	/**< driver session data size */
223 	uint16_t user_data_sz;
224 	/**< session user data will be placed after sess_data */
225 };
226 
227 /**
228  * The private data structure stored in the asym session mempool private data.
229  */
230 struct rte_cryptodev_asym_session_pool_private_data {
231 	uint16_t max_priv_session_sz;
232 	/**< Size of private session data used when creating mempool */
233 	uint16_t user_data_sz;
234 	/**< Session user data will be placed after sess_private_data */
235 };
236 
237 int
238 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum,
239 		const char *algo_string)
240 {
241 	unsigned int i;
242 	int ret = -1;	/* Invalid string */
243 
244 	for (i = 1; i < RTE_DIM(crypto_cipher_algorithm_strings); i++) {
245 		if (strcmp(algo_string, crypto_cipher_algorithm_strings[i]) == 0) {
246 			*algo_enum = (enum rte_crypto_cipher_algorithm) i;
247 			ret = 0;
248 			break;
249 		}
250 	}
251 
252 	rte_cryptodev_trace_get_cipher_algo_enum(algo_string, *algo_enum, ret);
253 
254 	return ret;
255 }
256 
257 int
258 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum,
259 		const char *algo_string)
260 {
261 	unsigned int i;
262 	int ret = -1;	/* Invalid string */
263 
264 	for (i = 1; i < RTE_DIM(crypto_auth_algorithm_strings); i++) {
265 		if (strcmp(algo_string, crypto_auth_algorithm_strings[i]) == 0) {
266 			*algo_enum = (enum rte_crypto_auth_algorithm) i;
267 			ret = 0;
268 			break;
269 		}
270 	}
271 
272 	rte_cryptodev_trace_get_auth_algo_enum(algo_string, *algo_enum, ret);
273 
274 	return ret;
275 }
276 
277 int
278 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum,
279 		const char *algo_string)
280 {
281 	unsigned int i;
282 	int ret = -1;	/* Invalid string */
283 
284 	for (i = 1; i < RTE_DIM(crypto_aead_algorithm_strings); i++) {
285 		if (strcmp(algo_string, crypto_aead_algorithm_strings[i]) == 0) {
286 			*algo_enum = (enum rte_crypto_aead_algorithm) i;
287 			ret = 0;
288 			break;
289 		}
290 	}
291 
292 	rte_cryptodev_trace_get_aead_algo_enum(algo_string, *algo_enum, ret);
293 
294 	return ret;
295 }
296 
297 int
298 rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum,
299 		const char *xform_string)
300 {
301 	unsigned int i;
302 	int ret = -1;	/* Invalid string */
303 
304 	for (i = 1; i < RTE_DIM(crypto_asym_xform_strings); i++) {
305 		if (strcmp(xform_string,
306 			crypto_asym_xform_strings[i]) == 0) {
307 			*xform_enum = (enum rte_crypto_asym_xform_type) i;
308 			ret = 0;
309 			break;
310 		}
311 	}
312 
313 	rte_cryptodev_trace_asym_get_xform_enum(xform_string, *xform_enum, ret);
314 
315 	return ret;
316 }
317 
318 const char *
319 rte_cryptodev_get_cipher_algo_string(enum rte_crypto_cipher_algorithm algo_enum)
320 {
321 	const char *alg_str = NULL;
322 
323 	if ((unsigned int)algo_enum < RTE_DIM(crypto_cipher_algorithm_strings))
324 		alg_str = crypto_cipher_algorithm_strings[algo_enum];
325 
326 	rte_cryptodev_trace_get_cipher_algo_string(algo_enum, alg_str);
327 
328 	return alg_str;
329 }
330 
331 const char *
332 rte_cryptodev_get_auth_algo_string(enum rte_crypto_auth_algorithm algo_enum)
333 {
334 	const char *alg_str = NULL;
335 
336 	if ((unsigned int)algo_enum < RTE_DIM(crypto_auth_algorithm_strings))
337 		alg_str = crypto_auth_algorithm_strings[algo_enum];
338 
339 	rte_cryptodev_trace_get_auth_algo_string(algo_enum, alg_str);
340 
341 	return alg_str;
342 }
343 
344 const char *
345 rte_cryptodev_get_aead_algo_string(enum rte_crypto_aead_algorithm algo_enum)
346 {
347 	const char *alg_str = NULL;
348 
349 	if ((unsigned int)algo_enum < RTE_DIM(crypto_aead_algorithm_strings))
350 		alg_str = crypto_aead_algorithm_strings[algo_enum];
351 
352 	rte_cryptodev_trace_get_aead_algo_string(algo_enum, alg_str);
353 
354 	return alg_str;
355 }
356 
357 const char *
358 rte_cryptodev_asym_get_xform_string(enum rte_crypto_asym_xform_type xform_enum)
359 {
360 	const char *xform_str = NULL;
361 
362 	if ((unsigned int)xform_enum < RTE_DIM(crypto_asym_xform_strings))
363 		xform_str = crypto_asym_xform_strings[xform_enum];
364 
365 	rte_cryptodev_trace_asym_get_xform_string(xform_enum, xform_str);
366 
367 	return xform_str;
368 }
369 
370 /**
371  * The crypto auth operation strings identifiers.
372  * It could be used in application command line.
373  */
374 const char *
375 rte_crypto_auth_operation_strings[] = {
376 		[RTE_CRYPTO_AUTH_OP_VERIFY]	= "verify",
377 		[RTE_CRYPTO_AUTH_OP_GENERATE]	= "generate"
378 };
379 
380 const struct rte_cryptodev_symmetric_capability *
381 rte_cryptodev_sym_capability_get(uint8_t dev_id,
382 		const struct rte_cryptodev_sym_capability_idx *idx)
383 {
384 	const struct rte_cryptodev_capabilities *capability;
385 	const struct rte_cryptodev_symmetric_capability *sym_capability = NULL;
386 	struct rte_cryptodev_info dev_info;
387 	int i = 0;
388 
389 	rte_cryptodev_info_get(dev_id, &dev_info);
390 
391 	while ((capability = &dev_info.capabilities[i++])->op !=
392 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
393 		if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
394 			continue;
395 
396 		if (capability->sym.xform_type != idx->type)
397 			continue;
398 
399 		if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
400 			capability->sym.auth.algo == idx->algo.auth) {
401 			sym_capability = &capability->sym;
402 			break;
403 		}
404 
405 		if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
406 			capability->sym.cipher.algo == idx->algo.cipher) {
407 			sym_capability = &capability->sym;
408 			break;
409 		}
410 
411 		if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
412 				capability->sym.aead.algo == idx->algo.aead) {
413 			sym_capability = &capability->sym;
414 			break;
415 		}
416 	}
417 
418 	rte_cryptodev_trace_sym_capability_get(dev_id, dev_info.driver_name,
419 		dev_info.driver_id, idx->type, sym_capability);
420 
421 	return sym_capability;
422 }
423 
424 static int
425 param_range_check(uint16_t size, const struct rte_crypto_param_range *range)
426 {
427 	unsigned int next_size;
428 
429 	/* Check lower/upper bounds */
430 	if (size < range->min)
431 		return -1;
432 
433 	if (size > range->max)
434 		return -1;
435 
436 	/* If range is actually only one value, size is correct */
437 	if (range->increment == 0)
438 		return 0;
439 
440 	/* Check if value is one of the supported sizes */
441 	for (next_size = range->min; next_size <= range->max;
442 			next_size += range->increment)
443 		if (size == next_size)
444 			return 0;
445 
446 	return -1;
447 }
448 
449 const struct rte_cryptodev_asymmetric_xform_capability *
450 rte_cryptodev_asym_capability_get(uint8_t dev_id,
451 		const struct rte_cryptodev_asym_capability_idx *idx)
452 {
453 	const struct rte_cryptodev_capabilities *capability;
454 	const struct rte_cryptodev_asymmetric_xform_capability *asym_cap = NULL;
455 	struct rte_cryptodev_info dev_info;
456 	unsigned int i = 0;
457 
458 	memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
459 	rte_cryptodev_info_get(dev_id, &dev_info);
460 
461 	while ((capability = &dev_info.capabilities[i++])->op !=
462 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
463 		if (capability->op != RTE_CRYPTO_OP_TYPE_ASYMMETRIC)
464 			continue;
465 
466 		if (capability->asym.xform_capa.xform_type == idx->type) {
467 			asym_cap = &capability->asym.xform_capa;
468 			break;
469 		}
470 	}
471 
472 	rte_cryptodev_trace_asym_capability_get(dev_info.driver_name,
473 		dev_info.driver_id, idx->type, asym_cap);
474 
475 	return asym_cap;
476 };
477 
478 int
479 rte_cryptodev_sym_capability_check_cipher(
480 		const struct rte_cryptodev_symmetric_capability *capability,
481 		uint16_t key_size, uint16_t iv_size)
482 {
483 	int ret = 0; /* success */
484 
485 	if (param_range_check(key_size, &capability->cipher.key_size) != 0) {
486 		ret = -1;
487 		goto done;
488 	}
489 
490 	if (param_range_check(iv_size, &capability->cipher.iv_size) != 0)
491 		ret = -1;
492 
493 done:
494 	rte_cryptodev_trace_sym_capability_check_cipher(capability, key_size,
495 		iv_size, ret);
496 
497 	return ret;
498 }
499 
500 int
501 rte_cryptodev_sym_capability_check_auth(
502 		const struct rte_cryptodev_symmetric_capability *capability,
503 		uint16_t key_size, uint16_t digest_size, uint16_t iv_size)
504 {
505 	int ret = 0; /* success */
506 
507 	if (param_range_check(key_size, &capability->auth.key_size) != 0) {
508 		ret = -1;
509 		goto done;
510 	}
511 
512 	if (param_range_check(digest_size,
513 		&capability->auth.digest_size) != 0) {
514 		ret = -1;
515 		goto done;
516 	}
517 
518 	if (param_range_check(iv_size, &capability->auth.iv_size) != 0)
519 		ret = -1;
520 
521 done:
522 	rte_cryptodev_trace_sym_capability_check_auth(capability, key_size,
523 		digest_size, iv_size, ret);
524 
525 	return ret;
526 }
527 
528 int
529 rte_cryptodev_sym_capability_check_aead(
530 		const struct rte_cryptodev_symmetric_capability *capability,
531 		uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
532 		uint16_t iv_size)
533 {
534 	int ret = 0; /* success */
535 
536 	if (param_range_check(key_size, &capability->aead.key_size) != 0) {
537 		ret = -1;
538 		goto done;
539 	}
540 
541 	if (param_range_check(digest_size,
542 		&capability->aead.digest_size) != 0) {
543 		ret = -1;
544 		goto done;
545 	}
546 
547 	if (param_range_check(aad_size, &capability->aead.aad_size) != 0) {
548 		ret = -1;
549 		goto done;
550 	}
551 
552 	if (param_range_check(iv_size, &capability->aead.iv_size) != 0)
553 		ret = -1;
554 
555 done:
556 	rte_cryptodev_trace_sym_capability_check_aead(capability, key_size,
557 		digest_size, aad_size, iv_size, ret);
558 
559 	return ret;
560 }
561 
562 int
563 rte_cryptodev_asym_xform_capability_check_optype(
564 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
565 	enum rte_crypto_asym_op_type op_type)
566 {
567 	int ret = 0;
568 
569 	if (capability->op_types & (1 << op_type))
570 		ret = 1;
571 
572 	rte_cryptodev_trace_asym_xform_capability_check_optype(
573 		capability->op_types, op_type, ret);
574 
575 	return ret;
576 }
577 
578 int
579 rte_cryptodev_asym_xform_capability_check_modlen(
580 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
581 	uint16_t modlen)
582 {
583 	int ret = 0; /* success */
584 
585 	/* no need to check for limits, if min or max = 0 */
586 	if (capability->modlen.min != 0) {
587 		if (modlen < capability->modlen.min) {
588 			ret = -1;
589 			goto done;
590 		}
591 	}
592 
593 	if (capability->modlen.max != 0) {
594 		if (modlen > capability->modlen.max) {
595 			ret = -1;
596 			goto done;
597 		}
598 	}
599 
600 	/* in any case, check if given modlen is module increment */
601 	if (capability->modlen.increment != 0) {
602 		if (modlen % (capability->modlen.increment))
603 			ret = -1;
604 	}
605 
606 done:
607 	rte_cryptodev_trace_asym_xform_capability_check_modlen(capability,
608 		modlen, ret);
609 
610 	return ret;
611 }
612 
613 /* spinlock for crypto device enq callbacks */
614 static rte_spinlock_t rte_cryptodev_callback_lock = RTE_SPINLOCK_INITIALIZER;
615 
616 static void
617 cryptodev_cb_cleanup(struct rte_cryptodev *dev)
618 {
619 	struct rte_cryptodev_cb_rcu *list;
620 	struct rte_cryptodev_cb *cb, *next;
621 	uint16_t qp_id;
622 
623 	if (dev->enq_cbs == NULL && dev->deq_cbs == NULL)
624 		return;
625 
626 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
627 		list = &dev->enq_cbs[qp_id];
628 		cb = list->next;
629 		while (cb != NULL) {
630 			next = cb->next;
631 			rte_free(cb);
632 			cb = next;
633 		}
634 
635 		rte_free(list->qsbr);
636 	}
637 
638 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
639 		list = &dev->deq_cbs[qp_id];
640 		cb = list->next;
641 		while (cb != NULL) {
642 			next = cb->next;
643 			rte_free(cb);
644 			cb = next;
645 		}
646 
647 		rte_free(list->qsbr);
648 	}
649 
650 	rte_free(dev->enq_cbs);
651 	dev->enq_cbs = NULL;
652 	rte_free(dev->deq_cbs);
653 	dev->deq_cbs = NULL;
654 }
655 
656 static int
657 cryptodev_cb_init(struct rte_cryptodev *dev)
658 {
659 	struct rte_cryptodev_cb_rcu *list;
660 	struct rte_rcu_qsbr *qsbr;
661 	uint16_t qp_id;
662 	size_t size;
663 
664 	/* Max thread set to 1, as one DP thread accessing a queue-pair */
665 	const uint32_t max_threads = 1;
666 
667 	dev->enq_cbs = rte_zmalloc(NULL,
668 				   sizeof(struct rte_cryptodev_cb_rcu) *
669 				   dev->data->nb_queue_pairs, 0);
670 	if (dev->enq_cbs == NULL) {
671 		CDEV_LOG_ERR("Failed to allocate memory for enq callbacks");
672 		return -ENOMEM;
673 	}
674 
675 	dev->deq_cbs = rte_zmalloc(NULL,
676 				   sizeof(struct rte_cryptodev_cb_rcu) *
677 				   dev->data->nb_queue_pairs, 0);
678 	if (dev->deq_cbs == NULL) {
679 		CDEV_LOG_ERR("Failed to allocate memory for deq callbacks");
680 		rte_free(dev->enq_cbs);
681 		return -ENOMEM;
682 	}
683 
684 	/* Create RCU QSBR variable */
685 	size = rte_rcu_qsbr_get_memsize(max_threads);
686 
687 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
688 		list = &dev->enq_cbs[qp_id];
689 		qsbr = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
690 		if (qsbr == NULL) {
691 			CDEV_LOG_ERR("Failed to allocate memory for RCU on "
692 				"queue_pair_id=%d", qp_id);
693 			goto cb_init_err;
694 		}
695 
696 		if (rte_rcu_qsbr_init(qsbr, max_threads)) {
697 			CDEV_LOG_ERR("Failed to initialize for RCU on "
698 				"queue_pair_id=%d", qp_id);
699 			goto cb_init_err;
700 		}
701 
702 		list->qsbr = qsbr;
703 	}
704 
705 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
706 		list = &dev->deq_cbs[qp_id];
707 		qsbr = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
708 		if (qsbr == NULL) {
709 			CDEV_LOG_ERR("Failed to allocate memory for RCU on "
710 				"queue_pair_id=%d", qp_id);
711 			goto cb_init_err;
712 		}
713 
714 		if (rte_rcu_qsbr_init(qsbr, max_threads)) {
715 			CDEV_LOG_ERR("Failed to initialize for RCU on "
716 				"queue_pair_id=%d", qp_id);
717 			goto cb_init_err;
718 		}
719 
720 		list->qsbr = qsbr;
721 	}
722 
723 	return 0;
724 
725 cb_init_err:
726 	cryptodev_cb_cleanup(dev);
727 	return -ENOMEM;
728 }
729 
730 const char *
731 rte_cryptodev_get_feature_name(uint64_t flag)
732 {
733 	rte_cryptodev_trace_get_feature_name(flag);
734 
735 	switch (flag) {
736 	case RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO:
737 		return "SYMMETRIC_CRYPTO";
738 	case RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO:
739 		return "ASYMMETRIC_CRYPTO";
740 	case RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING:
741 		return "SYM_OPERATION_CHAINING";
742 	case RTE_CRYPTODEV_FF_CPU_SSE:
743 		return "CPU_SSE";
744 	case RTE_CRYPTODEV_FF_CPU_AVX:
745 		return "CPU_AVX";
746 	case RTE_CRYPTODEV_FF_CPU_AVX2:
747 		return "CPU_AVX2";
748 	case RTE_CRYPTODEV_FF_CPU_AVX512:
749 		return "CPU_AVX512";
750 	case RTE_CRYPTODEV_FF_CPU_AESNI:
751 		return "CPU_AESNI";
752 	case RTE_CRYPTODEV_FF_HW_ACCELERATED:
753 		return "HW_ACCELERATED";
754 	case RTE_CRYPTODEV_FF_IN_PLACE_SGL:
755 		return "IN_PLACE_SGL";
756 	case RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT:
757 		return "OOP_SGL_IN_SGL_OUT";
758 	case RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT:
759 		return "OOP_SGL_IN_LB_OUT";
760 	case RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT:
761 		return "OOP_LB_IN_SGL_OUT";
762 	case RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT:
763 		return "OOP_LB_IN_LB_OUT";
764 	case RTE_CRYPTODEV_FF_CPU_NEON:
765 		return "CPU_NEON";
766 	case RTE_CRYPTODEV_FF_CPU_ARM_CE:
767 		return "CPU_ARM_CE";
768 	case RTE_CRYPTODEV_FF_SECURITY:
769 		return "SECURITY_PROTOCOL";
770 	case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP:
771 		return "RSA_PRIV_OP_KEY_EXP";
772 	case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT:
773 		return "RSA_PRIV_OP_KEY_QT";
774 	case RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED:
775 		return "DIGEST_ENCRYPTED";
776 	case RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO:
777 		return "SYM_CPU_CRYPTO";
778 	case RTE_CRYPTODEV_FF_ASYM_SESSIONLESS:
779 		return "ASYM_SESSIONLESS";
780 	case RTE_CRYPTODEV_FF_SYM_SESSIONLESS:
781 		return "SYM_SESSIONLESS";
782 	case RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA:
783 		return "NON_BYTE_ALIGNED_DATA";
784 	case RTE_CRYPTODEV_FF_CIPHER_MULTIPLE_DATA_UNITS:
785 		return "CIPHER_MULTIPLE_DATA_UNITS";
786 	case RTE_CRYPTODEV_FF_CIPHER_WRAPPED_KEY:
787 		return "CIPHER_WRAPPED_KEY";
788 	default:
789 		return NULL;
790 	}
791 }
792 
793 struct rte_cryptodev *
794 rte_cryptodev_pmd_get_dev(uint8_t dev_id)
795 {
796 	return &cryptodev_globals.devs[dev_id];
797 }
798 
799 struct rte_cryptodev *
800 rte_cryptodev_pmd_get_named_dev(const char *name)
801 {
802 	struct rte_cryptodev *dev;
803 	unsigned int i;
804 
805 	if (name == NULL)
806 		return NULL;
807 
808 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
809 		dev = &cryptodev_globals.devs[i];
810 
811 		if ((dev->attached == RTE_CRYPTODEV_ATTACHED) &&
812 				(strcmp(dev->data->name, name) == 0))
813 			return dev;
814 	}
815 
816 	return NULL;
817 }
818 
819 static inline uint8_t
820 rte_cryptodev_is_valid_device_data(uint8_t dev_id)
821 {
822 	if (dev_id >= RTE_CRYPTO_MAX_DEVS ||
823 			rte_crypto_devices[dev_id].data == NULL)
824 		return 0;
825 
826 	return 1;
827 }
828 
829 unsigned int
830 rte_cryptodev_is_valid_dev(uint8_t dev_id)
831 {
832 	struct rte_cryptodev *dev = NULL;
833 	unsigned int ret = 1;
834 
835 	if (!rte_cryptodev_is_valid_device_data(dev_id)) {
836 		ret = 0;
837 		goto done;
838 	}
839 
840 	dev = rte_cryptodev_pmd_get_dev(dev_id);
841 	if (dev->attached != RTE_CRYPTODEV_ATTACHED)
842 		ret = 0;
843 
844 done:
845 	rte_cryptodev_trace_is_valid_dev(dev_id, ret);
846 
847 	return ret;
848 }
849 
850 int
851 rte_cryptodev_get_dev_id(const char *name)
852 {
853 	unsigned i;
854 	int ret = -1;
855 
856 	if (name == NULL)
857 		return -1;
858 
859 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
860 		if (!rte_cryptodev_is_valid_device_data(i))
861 			continue;
862 		if ((strcmp(cryptodev_globals.devs[i].data->name, name)
863 				== 0) &&
864 				(cryptodev_globals.devs[i].attached ==
865 						RTE_CRYPTODEV_ATTACHED)) {
866 			ret = (int)i;
867 			break;
868 		}
869 	}
870 
871 	rte_cryptodev_trace_get_dev_id(name, ret);
872 
873 	return ret;
874 }
875 
876 uint8_t
877 rte_cryptodev_count(void)
878 {
879 	rte_cryptodev_trace_count(cryptodev_globals.nb_devs);
880 
881 	return cryptodev_globals.nb_devs;
882 }
883 
884 uint8_t
885 rte_cryptodev_device_count_by_driver(uint8_t driver_id)
886 {
887 	uint8_t i, dev_count = 0;
888 
889 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++)
890 		if (cryptodev_globals.devs[i].driver_id == driver_id &&
891 			cryptodev_globals.devs[i].attached ==
892 					RTE_CRYPTODEV_ATTACHED)
893 			dev_count++;
894 
895 	rte_cryptodev_trace_device_count_by_driver(driver_id, dev_count);
896 
897 	return dev_count;
898 }
899 
900 uint8_t
901 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
902 	uint8_t nb_devices)
903 {
904 	uint8_t i, count = 0;
905 	struct rte_cryptodev *devs = cryptodev_globals.devs;
906 
907 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS && count < nb_devices; i++) {
908 		if (!rte_cryptodev_is_valid_device_data(i))
909 			continue;
910 
911 		if (devs[i].attached == RTE_CRYPTODEV_ATTACHED) {
912 			int cmp;
913 
914 			cmp = strncmp(devs[i].device->driver->name,
915 					driver_name,
916 					strlen(driver_name) + 1);
917 
918 			if (cmp == 0)
919 				devices[count++] = devs[i].data->dev_id;
920 		}
921 	}
922 
923 	rte_cryptodev_trace_devices_get(driver_name, count);
924 
925 	return count;
926 }
927 
928 void *
929 rte_cryptodev_get_sec_ctx(uint8_t dev_id)
930 {
931 	void *sec_ctx = NULL;
932 
933 	if (dev_id < RTE_CRYPTO_MAX_DEVS &&
934 			(rte_crypto_devices[dev_id].feature_flags &
935 			RTE_CRYPTODEV_FF_SECURITY))
936 		sec_ctx = rte_crypto_devices[dev_id].security_ctx;
937 
938 	rte_cryptodev_trace_get_sec_ctx(dev_id, sec_ctx);
939 
940 	return sec_ctx;
941 }
942 
943 int
944 rte_cryptodev_socket_id(uint8_t dev_id)
945 {
946 	struct rte_cryptodev *dev;
947 
948 	if (!rte_cryptodev_is_valid_dev(dev_id))
949 		return -1;
950 
951 	dev = rte_cryptodev_pmd_get_dev(dev_id);
952 
953 	rte_cryptodev_trace_socket_id(dev_id, dev->data->name,
954 		dev->data->socket_id);
955 	return dev->data->socket_id;
956 }
957 
958 static inline int
959 rte_cryptodev_data_alloc(uint8_t dev_id, struct rte_cryptodev_data **data,
960 		int socket_id)
961 {
962 	char mz_name[RTE_MEMZONE_NAMESIZE];
963 	const struct rte_memzone *mz;
964 	int n;
965 
966 	/* generate memzone name */
967 	n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
968 	if (n >= (int)sizeof(mz_name))
969 		return -EINVAL;
970 
971 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
972 		mz = rte_memzone_reserve(mz_name,
973 				sizeof(struct rte_cryptodev_data),
974 				socket_id, 0);
975 		CDEV_LOG_DEBUG("PRIMARY:reserved memzone for %s (%p)",
976 				mz_name, mz);
977 	} else {
978 		mz = rte_memzone_lookup(mz_name);
979 		CDEV_LOG_DEBUG("SECONDARY:looked up memzone for %s (%p)",
980 				mz_name, mz);
981 	}
982 
983 	if (mz == NULL)
984 		return -ENOMEM;
985 
986 	*data = mz->addr;
987 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
988 		memset(*data, 0, sizeof(struct rte_cryptodev_data));
989 
990 	return 0;
991 }
992 
993 static inline int
994 rte_cryptodev_data_free(uint8_t dev_id, struct rte_cryptodev_data **data)
995 {
996 	char mz_name[RTE_MEMZONE_NAMESIZE];
997 	const struct rte_memzone *mz;
998 	int n;
999 
1000 	/* generate memzone name */
1001 	n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
1002 	if (n >= (int)sizeof(mz_name))
1003 		return -EINVAL;
1004 
1005 	mz = rte_memzone_lookup(mz_name);
1006 	if (mz == NULL)
1007 		return -ENOMEM;
1008 
1009 	RTE_ASSERT(*data == mz->addr);
1010 	*data = NULL;
1011 
1012 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1013 		CDEV_LOG_DEBUG("PRIMARY:free memzone of %s (%p)",
1014 				mz_name, mz);
1015 		return rte_memzone_free(mz);
1016 	} else {
1017 		CDEV_LOG_DEBUG("SECONDARY:don't free memzone of %s (%p)",
1018 				mz_name, mz);
1019 	}
1020 
1021 	return 0;
1022 }
1023 
1024 static uint8_t
1025 rte_cryptodev_find_free_device_index(void)
1026 {
1027 	uint8_t dev_id;
1028 
1029 	for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++) {
1030 		if (rte_crypto_devices[dev_id].attached ==
1031 				RTE_CRYPTODEV_DETACHED)
1032 			return dev_id;
1033 	}
1034 	return RTE_CRYPTO_MAX_DEVS;
1035 }
1036 
1037 struct rte_cryptodev *
1038 rte_cryptodev_pmd_allocate(const char *name, int socket_id)
1039 {
1040 	struct rte_cryptodev *cryptodev;
1041 	uint8_t dev_id;
1042 
1043 	if (rte_cryptodev_pmd_get_named_dev(name) != NULL) {
1044 		CDEV_LOG_ERR("Crypto device with name %s already "
1045 				"allocated!", name);
1046 		return NULL;
1047 	}
1048 
1049 	dev_id = rte_cryptodev_find_free_device_index();
1050 	if (dev_id == RTE_CRYPTO_MAX_DEVS) {
1051 		CDEV_LOG_ERR("Reached maximum number of crypto devices");
1052 		return NULL;
1053 	}
1054 
1055 	cryptodev = rte_cryptodev_pmd_get_dev(dev_id);
1056 
1057 	if (cryptodev->data == NULL) {
1058 		struct rte_cryptodev_data **cryptodev_data =
1059 				&cryptodev_globals.data[dev_id];
1060 
1061 		int retval = rte_cryptodev_data_alloc(dev_id, cryptodev_data,
1062 				socket_id);
1063 
1064 		if (retval < 0 || *cryptodev_data == NULL)
1065 			return NULL;
1066 
1067 		cryptodev->data = *cryptodev_data;
1068 
1069 		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1070 			strlcpy(cryptodev->data->name, name,
1071 				RTE_CRYPTODEV_NAME_MAX_LEN);
1072 
1073 			cryptodev->data->dev_id = dev_id;
1074 			cryptodev->data->socket_id = socket_id;
1075 			cryptodev->data->dev_started = 0;
1076 			CDEV_LOG_DEBUG("PRIMARY:init data");
1077 		}
1078 
1079 		CDEV_LOG_DEBUG("Data for %s: dev_id %d, socket %d, started %d",
1080 				cryptodev->data->name,
1081 				cryptodev->data->dev_id,
1082 				cryptodev->data->socket_id,
1083 				cryptodev->data->dev_started);
1084 
1085 		/* init user callbacks */
1086 		TAILQ_INIT(&(cryptodev->link_intr_cbs));
1087 
1088 		cryptodev->attached = RTE_CRYPTODEV_ATTACHED;
1089 
1090 		cryptodev_globals.nb_devs++;
1091 	}
1092 
1093 	return cryptodev;
1094 }
1095 
1096 int
1097 rte_cryptodev_pmd_release_device(struct rte_cryptodev *cryptodev)
1098 {
1099 	int ret;
1100 	uint8_t dev_id;
1101 
1102 	if (cryptodev == NULL)
1103 		return -EINVAL;
1104 
1105 	dev_id = cryptodev->data->dev_id;
1106 
1107 	cryptodev_fp_ops_reset(rte_crypto_fp_ops + dev_id);
1108 
1109 	/* Close device only if device operations have been set */
1110 	if (cryptodev->dev_ops) {
1111 		ret = rte_cryptodev_close(dev_id);
1112 		if (ret < 0)
1113 			return ret;
1114 	}
1115 
1116 	ret = rte_cryptodev_data_free(dev_id, &cryptodev_globals.data[dev_id]);
1117 	if (ret < 0)
1118 		return ret;
1119 
1120 	cryptodev->attached = RTE_CRYPTODEV_DETACHED;
1121 	cryptodev_globals.nb_devs--;
1122 	return 0;
1123 }
1124 
1125 uint16_t
1126 rte_cryptodev_queue_pair_count(uint8_t dev_id)
1127 {
1128 	struct rte_cryptodev *dev;
1129 
1130 	if (!rte_cryptodev_is_valid_device_data(dev_id)) {
1131 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1132 		return 0;
1133 	}
1134 
1135 	dev = &rte_crypto_devices[dev_id];
1136 	rte_cryptodev_trace_queue_pair_count(dev, dev->data->name,
1137 		dev->data->socket_id, dev->data->dev_id,
1138 		dev->data->nb_queue_pairs);
1139 
1140 	return dev->data->nb_queue_pairs;
1141 }
1142 
1143 static int
1144 rte_cryptodev_queue_pairs_config(struct rte_cryptodev *dev, uint16_t nb_qpairs,
1145 		int socket_id)
1146 {
1147 	struct rte_cryptodev_info dev_info;
1148 	void **qp;
1149 	unsigned i;
1150 
1151 	if ((dev == NULL) || (nb_qpairs < 1)) {
1152 		CDEV_LOG_ERR("invalid param: dev %p, nb_queues %u",
1153 							dev, nb_qpairs);
1154 		return -EINVAL;
1155 	}
1156 
1157 	CDEV_LOG_DEBUG("Setup %d queues pairs on device %u",
1158 			nb_qpairs, dev->data->dev_id);
1159 
1160 	memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
1161 
1162 	if (*dev->dev_ops->dev_infos_get == NULL)
1163 		return -ENOTSUP;
1164 	(*dev->dev_ops->dev_infos_get)(dev, &dev_info);
1165 
1166 	if (nb_qpairs > (dev_info.max_nb_queue_pairs)) {
1167 		CDEV_LOG_ERR("Invalid num queue_pairs (%u) for dev %u",
1168 				nb_qpairs, dev->data->dev_id);
1169 	    return -EINVAL;
1170 	}
1171 
1172 	if (dev->data->queue_pairs == NULL) { /* first time configuration */
1173 		dev->data->queue_pairs = rte_zmalloc_socket(
1174 				"cryptodev->queue_pairs",
1175 				sizeof(dev->data->queue_pairs[0]) *
1176 				dev_info.max_nb_queue_pairs,
1177 				RTE_CACHE_LINE_SIZE, socket_id);
1178 
1179 		if (dev->data->queue_pairs == NULL) {
1180 			dev->data->nb_queue_pairs = 0;
1181 			CDEV_LOG_ERR("failed to get memory for qp meta data, "
1182 							"nb_queues %u",
1183 							nb_qpairs);
1184 			return -(ENOMEM);
1185 		}
1186 	} else { /* re-configure */
1187 		int ret;
1188 		uint16_t old_nb_queues = dev->data->nb_queue_pairs;
1189 
1190 		qp = dev->data->queue_pairs;
1191 
1192 		if (*dev->dev_ops->queue_pair_release == NULL)
1193 			return -ENOTSUP;
1194 
1195 		for (i = nb_qpairs; i < old_nb_queues; i++) {
1196 			ret = (*dev->dev_ops->queue_pair_release)(dev, i);
1197 			if (ret < 0)
1198 				return ret;
1199 			qp[i] = NULL;
1200 		}
1201 
1202 	}
1203 	dev->data->nb_queue_pairs = nb_qpairs;
1204 	return 0;
1205 }
1206 
1207 int
1208 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
1209 {
1210 	struct rte_cryptodev *dev;
1211 	int diag;
1212 
1213 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1214 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1215 		return -EINVAL;
1216 	}
1217 
1218 	dev = &rte_crypto_devices[dev_id];
1219 
1220 	if (dev->data->dev_started) {
1221 		CDEV_LOG_ERR(
1222 		    "device %d must be stopped to allow configuration", dev_id);
1223 		return -EBUSY;
1224 	}
1225 
1226 	if (*dev->dev_ops->dev_configure == NULL)
1227 		return -ENOTSUP;
1228 
1229 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1230 	cryptodev_cb_cleanup(dev);
1231 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1232 
1233 	/* Setup new number of queue pairs and reconfigure device. */
1234 	diag = rte_cryptodev_queue_pairs_config(dev, config->nb_queue_pairs,
1235 			config->socket_id);
1236 	if (diag != 0) {
1237 		CDEV_LOG_ERR("dev%d rte_crypto_dev_queue_pairs_config = %d",
1238 				dev_id, diag);
1239 		return diag;
1240 	}
1241 
1242 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1243 	diag = cryptodev_cb_init(dev);
1244 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1245 	if (diag) {
1246 		CDEV_LOG_ERR("Callback init failed for dev_id=%d", dev_id);
1247 		return diag;
1248 	}
1249 
1250 	rte_cryptodev_trace_configure(dev_id, config);
1251 	return (*dev->dev_ops->dev_configure)(dev, config);
1252 }
1253 
1254 int
1255 rte_cryptodev_start(uint8_t dev_id)
1256 {
1257 	struct rte_cryptodev *dev;
1258 	int diag;
1259 
1260 	CDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
1261 
1262 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1263 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1264 		return -EINVAL;
1265 	}
1266 
1267 	dev = &rte_crypto_devices[dev_id];
1268 
1269 	if (*dev->dev_ops->dev_start == NULL)
1270 		return -ENOTSUP;
1271 
1272 	if (dev->data->dev_started != 0) {
1273 		CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already started",
1274 			dev_id);
1275 		return 0;
1276 	}
1277 
1278 	diag = (*dev->dev_ops->dev_start)(dev);
1279 	/* expose selection of PMD fast-path functions */
1280 	cryptodev_fp_ops_set(rte_crypto_fp_ops + dev_id, dev);
1281 
1282 	rte_cryptodev_trace_start(dev_id, diag);
1283 	if (diag == 0)
1284 		dev->data->dev_started = 1;
1285 	else
1286 		return diag;
1287 
1288 	return 0;
1289 }
1290 
1291 void
1292 rte_cryptodev_stop(uint8_t dev_id)
1293 {
1294 	struct rte_cryptodev *dev;
1295 
1296 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1297 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1298 		return;
1299 	}
1300 
1301 	dev = &rte_crypto_devices[dev_id];
1302 
1303 	if (*dev->dev_ops->dev_stop == NULL)
1304 		return;
1305 
1306 	if (dev->data->dev_started == 0) {
1307 		CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already stopped",
1308 			dev_id);
1309 		return;
1310 	}
1311 
1312 	/* point fast-path functions to dummy ones */
1313 	cryptodev_fp_ops_reset(rte_crypto_fp_ops + dev_id);
1314 
1315 	(*dev->dev_ops->dev_stop)(dev);
1316 	rte_cryptodev_trace_stop(dev_id);
1317 	dev->data->dev_started = 0;
1318 }
1319 
1320 int
1321 rte_cryptodev_close(uint8_t dev_id)
1322 {
1323 	struct rte_cryptodev *dev;
1324 	int retval;
1325 
1326 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1327 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1328 		return -1;
1329 	}
1330 
1331 	dev = &rte_crypto_devices[dev_id];
1332 
1333 	/* Device must be stopped before it can be closed */
1334 	if (dev->data->dev_started == 1) {
1335 		CDEV_LOG_ERR("Device %u must be stopped before closing",
1336 				dev_id);
1337 		return -EBUSY;
1338 	}
1339 
1340 	/* We can't close the device if there are outstanding sessions in use */
1341 	if (dev->data->session_pool != NULL) {
1342 		if (!rte_mempool_full(dev->data->session_pool)) {
1343 			CDEV_LOG_ERR("dev_id=%u close failed, session mempool "
1344 					"has sessions still in use, free "
1345 					"all sessions before calling close",
1346 					(unsigned)dev_id);
1347 			return -EBUSY;
1348 		}
1349 	}
1350 
1351 	if (*dev->dev_ops->dev_close == NULL)
1352 		return -ENOTSUP;
1353 	retval = (*dev->dev_ops->dev_close)(dev);
1354 	rte_cryptodev_trace_close(dev_id, retval);
1355 
1356 	if (retval < 0)
1357 		return retval;
1358 
1359 	return 0;
1360 }
1361 
1362 int
1363 rte_cryptodev_get_qp_status(uint8_t dev_id, uint16_t queue_pair_id)
1364 {
1365 	struct rte_cryptodev *dev;
1366 	int ret = 0;
1367 
1368 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1369 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1370 		ret = -EINVAL;
1371 		goto done;
1372 	}
1373 
1374 	dev = &rte_crypto_devices[dev_id];
1375 	if (queue_pair_id >= dev->data->nb_queue_pairs) {
1376 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
1377 		ret = -EINVAL;
1378 		goto done;
1379 	}
1380 	void **qps = dev->data->queue_pairs;
1381 
1382 	if (qps[queue_pair_id])	{
1383 		CDEV_LOG_DEBUG("qp %d on dev %d is initialised",
1384 			queue_pair_id, dev_id);
1385 		ret = 1;
1386 		goto done;
1387 	}
1388 
1389 	CDEV_LOG_DEBUG("qp %d on dev %d is not initialised",
1390 		queue_pair_id, dev_id);
1391 
1392 done:
1393 	rte_cryptodev_trace_get_qp_status(dev_id, queue_pair_id, ret);
1394 
1395 	return ret;
1396 }
1397 
1398 static uint8_t
1399 rte_cryptodev_sym_is_valid_session_pool(struct rte_mempool *mp,
1400 	uint32_t sess_priv_size)
1401 {
1402 	struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1403 
1404 	if (!mp)
1405 		return 0;
1406 
1407 	pool_priv = rte_mempool_get_priv(mp);
1408 
1409 	if (!pool_priv || mp->private_data_size < sizeof(*pool_priv) ||
1410 			pool_priv->sess_data_sz < sess_priv_size)
1411 		return 0;
1412 
1413 	return 1;
1414 }
1415 
1416 int
1417 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
1418 		const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
1419 
1420 {
1421 	struct rte_cryptodev *dev;
1422 
1423 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1424 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1425 		return -EINVAL;
1426 	}
1427 
1428 	dev = &rte_crypto_devices[dev_id];
1429 	if (queue_pair_id >= dev->data->nb_queue_pairs) {
1430 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
1431 		return -EINVAL;
1432 	}
1433 
1434 	if (!qp_conf) {
1435 		CDEV_LOG_ERR("qp_conf cannot be NULL");
1436 		return -EINVAL;
1437 	}
1438 
1439 	if (qp_conf->mp_session) {
1440 		struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1441 
1442 		pool_priv = rte_mempool_get_priv(qp_conf->mp_session);
1443 		if (!pool_priv || qp_conf->mp_session->private_data_size <
1444 				sizeof(*pool_priv)) {
1445 			CDEV_LOG_ERR("Invalid mempool");
1446 			return -EINVAL;
1447 		}
1448 
1449 		if (!rte_cryptodev_sym_is_valid_session_pool(qp_conf->mp_session,
1450 					rte_cryptodev_sym_get_private_session_size(dev_id))) {
1451 			CDEV_LOG_ERR("Invalid mempool");
1452 			return -EINVAL;
1453 		}
1454 	}
1455 
1456 	if (dev->data->dev_started) {
1457 		CDEV_LOG_ERR(
1458 		    "device %d must be stopped to allow configuration", dev_id);
1459 		return -EBUSY;
1460 	}
1461 
1462 	if (*dev->dev_ops->queue_pair_setup == NULL)
1463 		return -ENOTSUP;
1464 
1465 	rte_cryptodev_trace_queue_pair_setup(dev_id, queue_pair_id, qp_conf);
1466 	return (*dev->dev_ops->queue_pair_setup)(dev, queue_pair_id, qp_conf,
1467 			socket_id);
1468 }
1469 
1470 struct rte_cryptodev_cb *
1471 rte_cryptodev_add_enq_callback(uint8_t dev_id,
1472 			       uint16_t qp_id,
1473 			       rte_cryptodev_callback_fn cb_fn,
1474 			       void *cb_arg)
1475 {
1476 	struct rte_cryptodev *dev;
1477 	struct rte_cryptodev_cb_rcu *list;
1478 	struct rte_cryptodev_cb *cb, *tail;
1479 
1480 	if (!cb_fn) {
1481 		CDEV_LOG_ERR("Callback is NULL on dev_id=%d", dev_id);
1482 		rte_errno = EINVAL;
1483 		return NULL;
1484 	}
1485 
1486 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1487 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1488 		rte_errno = ENODEV;
1489 		return NULL;
1490 	}
1491 
1492 	dev = &rte_crypto_devices[dev_id];
1493 	if (qp_id >= dev->data->nb_queue_pairs) {
1494 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1495 		rte_errno = ENODEV;
1496 		return NULL;
1497 	}
1498 
1499 	cb = rte_zmalloc(NULL, sizeof(*cb), 0);
1500 	if (cb == NULL) {
1501 		CDEV_LOG_ERR("Failed to allocate memory for callback on "
1502 			     "dev=%d, queue_pair_id=%d", dev_id, qp_id);
1503 		rte_errno = ENOMEM;
1504 		return NULL;
1505 	}
1506 
1507 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1508 
1509 	cb->fn = cb_fn;
1510 	cb->arg = cb_arg;
1511 
1512 	/* Add the callbacks in fifo order. */
1513 	list = &dev->enq_cbs[qp_id];
1514 	tail = list->next;
1515 
1516 	if (tail) {
1517 		while (tail->next)
1518 			tail = tail->next;
1519 		/* Stores to cb->fn and cb->param should complete before
1520 		 * cb is visible to data plane.
1521 		 */
1522 		__atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
1523 	} else {
1524 		/* Stores to cb->fn and cb->param should complete before
1525 		 * cb is visible to data plane.
1526 		 */
1527 		__atomic_store_n(&list->next, cb, __ATOMIC_RELEASE);
1528 	}
1529 
1530 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1531 
1532 	rte_cryptodev_trace_add_enq_callback(dev_id, qp_id, cb_fn);
1533 	return cb;
1534 }
1535 
1536 int
1537 rte_cryptodev_remove_enq_callback(uint8_t dev_id,
1538 				  uint16_t qp_id,
1539 				  struct rte_cryptodev_cb *cb)
1540 {
1541 	struct rte_cryptodev *dev;
1542 	struct rte_cryptodev_cb **prev_cb, *curr_cb;
1543 	struct rte_cryptodev_cb_rcu *list;
1544 	int ret;
1545 
1546 	ret = -EINVAL;
1547 
1548 	if (!cb) {
1549 		CDEV_LOG_ERR("Callback is NULL");
1550 		return -EINVAL;
1551 	}
1552 
1553 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1554 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1555 		return -ENODEV;
1556 	}
1557 
1558 	rte_cryptodev_trace_remove_enq_callback(dev_id, qp_id, cb->fn);
1559 
1560 	dev = &rte_crypto_devices[dev_id];
1561 	if (qp_id >= dev->data->nb_queue_pairs) {
1562 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1563 		return -ENODEV;
1564 	}
1565 
1566 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1567 	if (dev->enq_cbs == NULL) {
1568 		CDEV_LOG_ERR("Callback not initialized");
1569 		goto cb_err;
1570 	}
1571 
1572 	list = &dev->enq_cbs[qp_id];
1573 	if (list == NULL) {
1574 		CDEV_LOG_ERR("Callback list is NULL");
1575 		goto cb_err;
1576 	}
1577 
1578 	if (list->qsbr == NULL) {
1579 		CDEV_LOG_ERR("Rcu qsbr is NULL");
1580 		goto cb_err;
1581 	}
1582 
1583 	prev_cb = &list->next;
1584 	for (; *prev_cb != NULL; prev_cb = &curr_cb->next) {
1585 		curr_cb = *prev_cb;
1586 		if (curr_cb == cb) {
1587 			/* Remove the user cb from the callback list. */
1588 			__atomic_store_n(prev_cb, curr_cb->next,
1589 				__ATOMIC_RELAXED);
1590 			ret = 0;
1591 			break;
1592 		}
1593 	}
1594 
1595 	if (!ret) {
1596 		/* Call sync with invalid thread id as this is part of
1597 		 * control plane API
1598 		 */
1599 		rte_rcu_qsbr_synchronize(list->qsbr, RTE_QSBR_THRID_INVALID);
1600 		rte_free(cb);
1601 	}
1602 
1603 cb_err:
1604 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1605 	return ret;
1606 }
1607 
1608 struct rte_cryptodev_cb *
1609 rte_cryptodev_add_deq_callback(uint8_t dev_id,
1610 			       uint16_t qp_id,
1611 			       rte_cryptodev_callback_fn cb_fn,
1612 			       void *cb_arg)
1613 {
1614 	struct rte_cryptodev *dev;
1615 	struct rte_cryptodev_cb_rcu *list;
1616 	struct rte_cryptodev_cb *cb, *tail;
1617 
1618 	if (!cb_fn) {
1619 		CDEV_LOG_ERR("Callback is NULL on dev_id=%d", dev_id);
1620 		rte_errno = EINVAL;
1621 		return NULL;
1622 	}
1623 
1624 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1625 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1626 		rte_errno = ENODEV;
1627 		return NULL;
1628 	}
1629 
1630 	dev = &rte_crypto_devices[dev_id];
1631 	if (qp_id >= dev->data->nb_queue_pairs) {
1632 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1633 		rte_errno = ENODEV;
1634 		return NULL;
1635 	}
1636 
1637 	cb = rte_zmalloc(NULL, sizeof(*cb), 0);
1638 	if (cb == NULL) {
1639 		CDEV_LOG_ERR("Failed to allocate memory for callback on "
1640 			     "dev=%d, queue_pair_id=%d", dev_id, qp_id);
1641 		rte_errno = ENOMEM;
1642 		return NULL;
1643 	}
1644 
1645 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1646 
1647 	cb->fn = cb_fn;
1648 	cb->arg = cb_arg;
1649 
1650 	/* Add the callbacks in fifo order. */
1651 	list = &dev->deq_cbs[qp_id];
1652 	tail = list->next;
1653 
1654 	if (tail) {
1655 		while (tail->next)
1656 			tail = tail->next;
1657 		/* Stores to cb->fn and cb->param should complete before
1658 		 * cb is visible to data plane.
1659 		 */
1660 		__atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
1661 	} else {
1662 		/* Stores to cb->fn and cb->param should complete before
1663 		 * cb is visible to data plane.
1664 		 */
1665 		__atomic_store_n(&list->next, cb, __ATOMIC_RELEASE);
1666 	}
1667 
1668 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1669 
1670 	rte_cryptodev_trace_add_deq_callback(dev_id, qp_id, cb_fn);
1671 
1672 	return cb;
1673 }
1674 
1675 int
1676 rte_cryptodev_remove_deq_callback(uint8_t dev_id,
1677 				  uint16_t qp_id,
1678 				  struct rte_cryptodev_cb *cb)
1679 {
1680 	struct rte_cryptodev *dev;
1681 	struct rte_cryptodev_cb **prev_cb, *curr_cb;
1682 	struct rte_cryptodev_cb_rcu *list;
1683 	int ret;
1684 
1685 	ret = -EINVAL;
1686 
1687 	if (!cb) {
1688 		CDEV_LOG_ERR("Callback is NULL");
1689 		return -EINVAL;
1690 	}
1691 
1692 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1693 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1694 		return -ENODEV;
1695 	}
1696 
1697 	rte_cryptodev_trace_remove_deq_callback(dev_id, qp_id, cb->fn);
1698 
1699 	dev = &rte_crypto_devices[dev_id];
1700 	if (qp_id >= dev->data->nb_queue_pairs) {
1701 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1702 		return -ENODEV;
1703 	}
1704 
1705 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1706 	if (dev->enq_cbs == NULL) {
1707 		CDEV_LOG_ERR("Callback not initialized");
1708 		goto cb_err;
1709 	}
1710 
1711 	list = &dev->deq_cbs[qp_id];
1712 	if (list == NULL) {
1713 		CDEV_LOG_ERR("Callback list is NULL");
1714 		goto cb_err;
1715 	}
1716 
1717 	if (list->qsbr == NULL) {
1718 		CDEV_LOG_ERR("Rcu qsbr is NULL");
1719 		goto cb_err;
1720 	}
1721 
1722 	prev_cb = &list->next;
1723 	for (; *prev_cb != NULL; prev_cb = &curr_cb->next) {
1724 		curr_cb = *prev_cb;
1725 		if (curr_cb == cb) {
1726 			/* Remove the user cb from the callback list. */
1727 			__atomic_store_n(prev_cb, curr_cb->next,
1728 				__ATOMIC_RELAXED);
1729 			ret = 0;
1730 			break;
1731 		}
1732 	}
1733 
1734 	if (!ret) {
1735 		/* Call sync with invalid thread id as this is part of
1736 		 * control plane API
1737 		 */
1738 		rte_rcu_qsbr_synchronize(list->qsbr, RTE_QSBR_THRID_INVALID);
1739 		rte_free(cb);
1740 	}
1741 
1742 cb_err:
1743 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1744 	return ret;
1745 }
1746 
1747 int
1748 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats)
1749 {
1750 	struct rte_cryptodev *dev;
1751 
1752 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1753 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1754 		return -ENODEV;
1755 	}
1756 
1757 	if (stats == NULL) {
1758 		CDEV_LOG_ERR("Invalid stats ptr");
1759 		return -EINVAL;
1760 	}
1761 
1762 	dev = &rte_crypto_devices[dev_id];
1763 	memset(stats, 0, sizeof(*stats));
1764 
1765 	if (*dev->dev_ops->stats_get == NULL)
1766 		return -ENOTSUP;
1767 	(*dev->dev_ops->stats_get)(dev, stats);
1768 
1769 	rte_cryptodev_trace_stats_get(dev_id, stats);
1770 	return 0;
1771 }
1772 
1773 void
1774 rte_cryptodev_stats_reset(uint8_t dev_id)
1775 {
1776 	struct rte_cryptodev *dev;
1777 
1778 	rte_cryptodev_trace_stats_reset(dev_id);
1779 
1780 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1781 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1782 		return;
1783 	}
1784 
1785 	dev = &rte_crypto_devices[dev_id];
1786 
1787 	if (*dev->dev_ops->stats_reset == NULL)
1788 		return;
1789 	(*dev->dev_ops->stats_reset)(dev);
1790 }
1791 
1792 void
1793 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
1794 {
1795 	struct rte_cryptodev *dev;
1796 
1797 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1798 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1799 		return;
1800 	}
1801 
1802 	dev = &rte_crypto_devices[dev_id];
1803 
1804 	memset(dev_info, 0, sizeof(struct rte_cryptodev_info));
1805 
1806 	if (*dev->dev_ops->dev_infos_get == NULL)
1807 		return;
1808 	(*dev->dev_ops->dev_infos_get)(dev, dev_info);
1809 
1810 	dev_info->driver_name = dev->device->driver->name;
1811 	dev_info->device = dev->device;
1812 
1813 	rte_cryptodev_trace_info_get(dev_id, dev_info->driver_name);
1814 
1815 }
1816 
1817 int
1818 rte_cryptodev_callback_register(uint8_t dev_id,
1819 			enum rte_cryptodev_event_type event,
1820 			rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1821 {
1822 	struct rte_cryptodev *dev;
1823 	struct rte_cryptodev_callback *user_cb;
1824 
1825 	if (!cb_fn)
1826 		return -EINVAL;
1827 
1828 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1829 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1830 		return -EINVAL;
1831 	}
1832 
1833 	dev = &rte_crypto_devices[dev_id];
1834 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
1835 
1836 	TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
1837 		if (user_cb->cb_fn == cb_fn &&
1838 			user_cb->cb_arg == cb_arg &&
1839 			user_cb->event == event) {
1840 			break;
1841 		}
1842 	}
1843 
1844 	/* create a new callback. */
1845 	if (user_cb == NULL) {
1846 		user_cb = rte_zmalloc("INTR_USER_CALLBACK",
1847 				sizeof(struct rte_cryptodev_callback), 0);
1848 		if (user_cb != NULL) {
1849 			user_cb->cb_fn = cb_fn;
1850 			user_cb->cb_arg = cb_arg;
1851 			user_cb->event = event;
1852 			TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
1853 		}
1854 	}
1855 
1856 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1857 
1858 	rte_cryptodev_trace_callback_register(dev_id, event, cb_fn);
1859 	return (user_cb == NULL) ? -ENOMEM : 0;
1860 }
1861 
1862 int
1863 rte_cryptodev_callback_unregister(uint8_t dev_id,
1864 			enum rte_cryptodev_event_type event,
1865 			rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1866 {
1867 	int ret;
1868 	struct rte_cryptodev *dev;
1869 	struct rte_cryptodev_callback *cb, *next;
1870 
1871 	if (!cb_fn)
1872 		return -EINVAL;
1873 
1874 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1875 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1876 		return -EINVAL;
1877 	}
1878 
1879 	dev = &rte_crypto_devices[dev_id];
1880 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
1881 
1882 	ret = 0;
1883 	for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
1884 
1885 		next = TAILQ_NEXT(cb, next);
1886 
1887 		if (cb->cb_fn != cb_fn || cb->event != event ||
1888 				(cb->cb_arg != (void *)-1 &&
1889 				cb->cb_arg != cb_arg))
1890 			continue;
1891 
1892 		/*
1893 		 * if this callback is not executing right now,
1894 		 * then remove it.
1895 		 */
1896 		if (cb->active == 0) {
1897 			TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
1898 			rte_free(cb);
1899 		} else {
1900 			ret = -EAGAIN;
1901 		}
1902 	}
1903 
1904 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1905 
1906 	rte_cryptodev_trace_callback_unregister(dev_id, event, cb_fn);
1907 	return ret;
1908 }
1909 
1910 void
1911 rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev,
1912 	enum rte_cryptodev_event_type event)
1913 {
1914 	struct rte_cryptodev_callback *cb_lst;
1915 	struct rte_cryptodev_callback dev_cb;
1916 
1917 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
1918 	TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
1919 		if (cb_lst->cb_fn == NULL || cb_lst->event != event)
1920 			continue;
1921 		dev_cb = *cb_lst;
1922 		cb_lst->active = 1;
1923 		rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1924 		dev_cb.cb_fn(dev->data->dev_id, dev_cb.event,
1925 						dev_cb.cb_arg);
1926 		rte_spinlock_lock(&rte_cryptodev_cb_lock);
1927 		cb_lst->active = 0;
1928 	}
1929 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1930 }
1931 
1932 int
1933 rte_cryptodev_queue_pair_event_error_query(uint8_t dev_id, uint16_t qp_id)
1934 {
1935 	struct rte_cryptodev *dev;
1936 
1937 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1938 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1939 		return -EINVAL;
1940 	}
1941 	dev = &rte_crypto_devices[dev_id];
1942 
1943 	if (qp_id >= dev->data->nb_queue_pairs)
1944 		return -EINVAL;
1945 	if (*dev->dev_ops->queue_pair_event_error_query == NULL)
1946 		return -ENOTSUP;
1947 
1948 	return dev->dev_ops->queue_pair_event_error_query(dev, qp_id);
1949 }
1950 
1951 struct rte_mempool *
1952 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
1953 	uint32_t elt_size, uint32_t cache_size, uint16_t user_data_size,
1954 	int socket_id)
1955 {
1956 	struct rte_mempool *mp;
1957 	struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1958 	uint32_t obj_sz;
1959 
1960 	obj_sz = sizeof(struct rte_cryptodev_sym_session) + elt_size + user_data_size;
1961 
1962 	obj_sz = RTE_ALIGN_CEIL(obj_sz, RTE_CACHE_LINE_SIZE);
1963 	mp = rte_mempool_create(name, nb_elts, obj_sz, cache_size,
1964 			(uint32_t)(sizeof(*pool_priv)), NULL, NULL,
1965 			NULL, NULL,
1966 			socket_id, 0);
1967 	if (mp == NULL) {
1968 		CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d",
1969 			__func__, name, rte_errno);
1970 		return NULL;
1971 	}
1972 
1973 	pool_priv = rte_mempool_get_priv(mp);
1974 	if (!pool_priv) {
1975 		CDEV_LOG_ERR("%s(name=%s) failed to get private data",
1976 			__func__, name);
1977 		rte_mempool_free(mp);
1978 		return NULL;
1979 	}
1980 
1981 	pool_priv->sess_data_sz = elt_size;
1982 	pool_priv->user_data_sz = user_data_size;
1983 
1984 	rte_cryptodev_trace_sym_session_pool_create(name, nb_elts,
1985 		elt_size, cache_size, user_data_size, mp);
1986 	return mp;
1987 }
1988 
1989 struct rte_mempool *
1990 rte_cryptodev_asym_session_pool_create(const char *name, uint32_t nb_elts,
1991 	uint32_t cache_size, uint16_t user_data_size, int socket_id)
1992 {
1993 	struct rte_mempool *mp;
1994 	struct rte_cryptodev_asym_session_pool_private_data *pool_priv;
1995 	uint32_t obj_sz, obj_sz_aligned;
1996 	uint8_t dev_id;
1997 	unsigned int priv_sz, max_priv_sz = 0;
1998 
1999 	for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++)
2000 		if (rte_cryptodev_is_valid_dev(dev_id)) {
2001 			priv_sz = rte_cryptodev_asym_get_private_session_size(dev_id);
2002 			if (priv_sz > max_priv_sz)
2003 				max_priv_sz = priv_sz;
2004 		}
2005 	if (max_priv_sz == 0) {
2006 		CDEV_LOG_INFO("Could not set max private session size");
2007 		return NULL;
2008 	}
2009 
2010 	obj_sz = rte_cryptodev_asym_get_header_session_size() + max_priv_sz +
2011 			user_data_size;
2012 	obj_sz_aligned =  RTE_ALIGN_CEIL(obj_sz, RTE_CACHE_LINE_SIZE);
2013 
2014 	mp = rte_mempool_create(name, nb_elts, obj_sz_aligned, cache_size,
2015 			(uint32_t)(sizeof(*pool_priv)),
2016 			NULL, NULL, NULL, NULL,
2017 			socket_id, 0);
2018 	if (mp == NULL) {
2019 		CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d",
2020 			__func__, name, rte_errno);
2021 		return NULL;
2022 	}
2023 
2024 	pool_priv = rte_mempool_get_priv(mp);
2025 	if (!pool_priv) {
2026 		CDEV_LOG_ERR("%s(name=%s) failed to get private data",
2027 			__func__, name);
2028 		rte_mempool_free(mp);
2029 		return NULL;
2030 	}
2031 	pool_priv->max_priv_session_sz = max_priv_sz;
2032 	pool_priv->user_data_sz = user_data_size;
2033 
2034 	rte_cryptodev_trace_asym_session_pool_create(name, nb_elts,
2035 		user_data_size, cache_size, mp);
2036 	return mp;
2037 }
2038 
2039 void *
2040 rte_cryptodev_sym_session_create(uint8_t dev_id,
2041 		struct rte_crypto_sym_xform *xforms,
2042 		struct rte_mempool *mp)
2043 {
2044 	struct rte_cryptodev *dev;
2045 	struct rte_cryptodev_sym_session *sess;
2046 	struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
2047 	uint32_t sess_priv_sz;
2048 	int ret;
2049 
2050 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
2051 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
2052 		rte_errno = EINVAL;
2053 		return NULL;
2054 	}
2055 
2056 	if (xforms == NULL) {
2057 		CDEV_LOG_ERR("Invalid xform\n");
2058 		rte_errno = EINVAL;
2059 		return NULL;
2060 	}
2061 
2062 	sess_priv_sz = rte_cryptodev_sym_get_private_session_size(dev_id);
2063 	if (!rte_cryptodev_sym_is_valid_session_pool(mp, sess_priv_sz)) {
2064 		CDEV_LOG_ERR("Invalid mempool");
2065 		rte_errno = EINVAL;
2066 		return NULL;
2067 	}
2068 
2069 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2070 
2071 	/* Allocate a session structure from the session pool */
2072 	if (rte_mempool_get(mp, (void **)&sess)) {
2073 		CDEV_LOG_ERR("couldn't get object from session mempool");
2074 		rte_errno = ENOMEM;
2075 		return NULL;
2076 	}
2077 
2078 	pool_priv = rte_mempool_get_priv(mp);
2079 	sess->driver_id = dev->driver_id;
2080 	sess->sess_data_sz = pool_priv->sess_data_sz;
2081 	sess->user_data_sz = pool_priv->user_data_sz;
2082 	sess->driver_priv_data_iova = rte_mempool_virt2iova(sess) +
2083 		offsetof(struct rte_cryptodev_sym_session, driver_priv_data);
2084 
2085 	if (dev->dev_ops->sym_session_configure == NULL) {
2086 		rte_errno = ENOTSUP;
2087 		goto error_exit;
2088 	}
2089 	memset(sess->driver_priv_data, 0, pool_priv->sess_data_sz + pool_priv->user_data_sz);
2090 
2091 	ret = dev->dev_ops->sym_session_configure(dev, xforms, sess);
2092 	if (ret < 0) {
2093 		rte_errno = -ret;
2094 		goto error_exit;
2095 	}
2096 	sess->driver_id = dev->driver_id;
2097 
2098 	rte_cryptodev_trace_sym_session_create(dev_id, sess, xforms, mp);
2099 
2100 	return (void *)sess;
2101 error_exit:
2102 	rte_mempool_put(mp, (void *)sess);
2103 	return NULL;
2104 }
2105 
2106 int
2107 rte_cryptodev_asym_session_create(uint8_t dev_id,
2108 		struct rte_crypto_asym_xform *xforms, struct rte_mempool *mp,
2109 		void **session)
2110 {
2111 	struct rte_cryptodev_asym_session *sess;
2112 	uint32_t session_priv_data_sz;
2113 	struct rte_cryptodev_asym_session_pool_private_data *pool_priv;
2114 	unsigned int session_header_size =
2115 			rte_cryptodev_asym_get_header_session_size();
2116 	struct rte_cryptodev *dev;
2117 	int ret;
2118 
2119 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
2120 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
2121 		return -EINVAL;
2122 	}
2123 
2124 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2125 
2126 	if (dev == NULL)
2127 		return -EINVAL;
2128 
2129 	if (!mp) {
2130 		CDEV_LOG_ERR("invalid mempool");
2131 		return -EINVAL;
2132 	}
2133 
2134 	session_priv_data_sz = rte_cryptodev_asym_get_private_session_size(
2135 			dev_id);
2136 	pool_priv = rte_mempool_get_priv(mp);
2137 
2138 	if (pool_priv->max_priv_session_sz < session_priv_data_sz) {
2139 		CDEV_LOG_DEBUG(
2140 			"The private session data size used when creating the mempool is smaller than this device's private session data.");
2141 		return -EINVAL;
2142 	}
2143 
2144 	/* Verify if provided mempool can hold elements big enough. */
2145 	if (mp->elt_size < session_header_size + session_priv_data_sz) {
2146 		CDEV_LOG_ERR(
2147 			"mempool elements too small to hold session objects");
2148 		return -EINVAL;
2149 	}
2150 
2151 	/* Allocate a session structure from the session pool */
2152 	if (rte_mempool_get(mp, session)) {
2153 		CDEV_LOG_ERR("couldn't get object from session mempool");
2154 		return -ENOMEM;
2155 	}
2156 
2157 	sess = *session;
2158 	sess->driver_id = dev->driver_id;
2159 	sess->user_data_sz = pool_priv->user_data_sz;
2160 	sess->max_priv_data_sz = pool_priv->max_priv_session_sz;
2161 
2162 	/* Clear device session pointer.*/
2163 	memset(sess->sess_private_data, 0, session_priv_data_sz + sess->user_data_sz);
2164 
2165 	if (*dev->dev_ops->asym_session_configure == NULL)
2166 		return -ENOTSUP;
2167 
2168 	if (sess->sess_private_data[0] == 0) {
2169 		ret = dev->dev_ops->asym_session_configure(dev, xforms, sess);
2170 		if (ret < 0) {
2171 			CDEV_LOG_ERR(
2172 				"dev_id %d failed to configure session details",
2173 				dev_id);
2174 			return ret;
2175 		}
2176 	}
2177 
2178 	rte_cryptodev_trace_asym_session_create(dev_id, xforms, mp, sess);
2179 	return 0;
2180 }
2181 
2182 int
2183 rte_cryptodev_sym_session_free(uint8_t dev_id, void *_sess)
2184 {
2185 	struct rte_cryptodev *dev;
2186 	struct rte_mempool *sess_mp;
2187 	struct rte_cryptodev_sym_session *sess = _sess;
2188 	struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
2189 
2190 	if (sess == NULL)
2191 		return -EINVAL;
2192 
2193 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
2194 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
2195 		return -EINVAL;
2196 	}
2197 
2198 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2199 
2200 	if (dev == NULL || sess == NULL)
2201 		return -EINVAL;
2202 
2203 	sess_mp = rte_mempool_from_obj(sess);
2204 	if (!sess_mp)
2205 		return -EINVAL;
2206 	pool_priv = rte_mempool_get_priv(sess_mp);
2207 
2208 	if (sess->driver_id != dev->driver_id) {
2209 		CDEV_LOG_ERR("Session created by driver %u but freed by %u",
2210 			sess->driver_id, dev->driver_id);
2211 		return -EINVAL;
2212 	}
2213 
2214 	if (*dev->dev_ops->sym_session_clear == NULL)
2215 		return -ENOTSUP;
2216 
2217 	dev->dev_ops->sym_session_clear(dev, sess);
2218 
2219 	memset(sess->driver_priv_data, 0, pool_priv->sess_data_sz + pool_priv->user_data_sz);
2220 
2221 	/* Return session to mempool */
2222 	rte_mempool_put(sess_mp, sess);
2223 
2224 	rte_cryptodev_trace_sym_session_free(dev_id, sess);
2225 	return 0;
2226 }
2227 
2228 int
2229 rte_cryptodev_asym_session_free(uint8_t dev_id, void *sess)
2230 {
2231 	struct rte_mempool *sess_mp;
2232 	struct rte_cryptodev *dev;
2233 
2234 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
2235 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
2236 		return -EINVAL;
2237 	}
2238 
2239 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2240 
2241 	if (dev == NULL || sess == NULL)
2242 		return -EINVAL;
2243 
2244 	if (*dev->dev_ops->asym_session_clear == NULL)
2245 		return -ENOTSUP;
2246 
2247 	dev->dev_ops->asym_session_clear(dev, sess);
2248 
2249 	rte_free(((struct rte_cryptodev_asym_session *)sess)->event_mdata);
2250 
2251 	/* Return session to mempool */
2252 	sess_mp = rte_mempool_from_obj(sess);
2253 	rte_mempool_put(sess_mp, sess);
2254 
2255 	rte_cryptodev_trace_asym_session_free(dev_id, sess);
2256 	return 0;
2257 }
2258 
2259 unsigned int
2260 rte_cryptodev_asym_get_header_session_size(void)
2261 {
2262 	return sizeof(struct rte_cryptodev_asym_session);
2263 }
2264 
2265 unsigned int
2266 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
2267 {
2268 	struct rte_cryptodev *dev;
2269 	unsigned int priv_sess_size;
2270 
2271 	if (!rte_cryptodev_is_valid_dev(dev_id))
2272 		return 0;
2273 
2274 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2275 
2276 	if (*dev->dev_ops->sym_session_get_size == NULL)
2277 		return 0;
2278 
2279 	priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev);
2280 
2281 	rte_cryptodev_trace_sym_get_private_session_size(dev_id,
2282 		priv_sess_size);
2283 
2284 	return priv_sess_size;
2285 }
2286 
2287 unsigned int
2288 rte_cryptodev_asym_get_private_session_size(uint8_t dev_id)
2289 {
2290 	struct rte_cryptodev *dev;
2291 	unsigned int priv_sess_size;
2292 
2293 	if (!rte_cryptodev_is_valid_dev(dev_id))
2294 		return 0;
2295 
2296 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2297 
2298 	if (*dev->dev_ops->asym_session_get_size == NULL)
2299 		return 0;
2300 
2301 	priv_sess_size = (*dev->dev_ops->asym_session_get_size)(dev);
2302 
2303 	rte_cryptodev_trace_asym_get_private_session_size(dev_id,
2304 		priv_sess_size);
2305 
2306 	return priv_sess_size;
2307 }
2308 
2309 int
2310 rte_cryptodev_sym_session_set_user_data(void *_sess, void *data,
2311 		uint16_t size)
2312 {
2313 	struct rte_cryptodev_sym_session *sess = _sess;
2314 
2315 	if (sess == NULL)
2316 		return -EINVAL;
2317 
2318 	if (sess->user_data_sz < size)
2319 		return -ENOMEM;
2320 
2321 	rte_memcpy(sess->driver_priv_data + sess->sess_data_sz, data, size);
2322 
2323 	rte_cryptodev_trace_sym_session_set_user_data(sess, data, size);
2324 
2325 	return 0;
2326 }
2327 
2328 void *
2329 rte_cryptodev_sym_session_get_user_data(void *_sess)
2330 {
2331 	struct rte_cryptodev_sym_session *sess = _sess;
2332 	void *data = NULL;
2333 
2334 	if (sess == NULL || sess->user_data_sz == 0)
2335 		return NULL;
2336 
2337 	data = (void *)(sess->driver_priv_data + sess->sess_data_sz);
2338 
2339 	rte_cryptodev_trace_sym_session_get_user_data(sess, data);
2340 
2341 	return data;
2342 }
2343 
2344 int
2345 rte_cryptodev_asym_session_set_user_data(void *session, void *data, uint16_t size)
2346 {
2347 	struct rte_cryptodev_asym_session *sess = session;
2348 	if (sess == NULL)
2349 		return -EINVAL;
2350 
2351 	if (sess->user_data_sz < size)
2352 		return -ENOMEM;
2353 
2354 	rte_memcpy(sess->sess_private_data +
2355 			sess->max_priv_data_sz,
2356 			data, size);
2357 
2358 	rte_cryptodev_trace_asym_session_set_user_data(sess, data, size);
2359 
2360 	return 0;
2361 }
2362 
2363 void *
2364 rte_cryptodev_asym_session_get_user_data(void *session)
2365 {
2366 	struct rte_cryptodev_asym_session *sess = session;
2367 	void *data = NULL;
2368 
2369 	if (sess == NULL || sess->user_data_sz == 0)
2370 		return NULL;
2371 
2372 	data = (void *)(sess->sess_private_data + sess->max_priv_data_sz);
2373 
2374 	rte_cryptodev_trace_asym_session_get_user_data(sess, data);
2375 
2376 	return data;
2377 }
2378 
2379 static inline void
2380 sym_crypto_fill_status(struct rte_crypto_sym_vec *vec, int32_t errnum)
2381 {
2382 	uint32_t i;
2383 	for (i = 0; i < vec->num; i++)
2384 		vec->status[i] = errnum;
2385 }
2386 
2387 uint32_t
2388 rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id,
2389 	void *_sess, union rte_crypto_sym_ofs ofs,
2390 	struct rte_crypto_sym_vec *vec)
2391 {
2392 	struct rte_cryptodev *dev;
2393 	struct rte_cryptodev_sym_session *sess = _sess;
2394 
2395 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
2396 		sym_crypto_fill_status(vec, EINVAL);
2397 		return 0;
2398 	}
2399 
2400 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2401 
2402 	if (*dev->dev_ops->sym_cpu_process == NULL ||
2403 		!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO)) {
2404 		sym_crypto_fill_status(vec, ENOTSUP);
2405 		return 0;
2406 	}
2407 
2408 	rte_cryptodev_trace_sym_cpu_crypto_process(dev_id, sess);
2409 
2410 	return dev->dev_ops->sym_cpu_process(dev, sess, ofs, vec);
2411 }
2412 
2413 int
2414 rte_cryptodev_get_raw_dp_ctx_size(uint8_t dev_id)
2415 {
2416 	struct rte_cryptodev *dev;
2417 	int32_t size = sizeof(struct rte_crypto_raw_dp_ctx);
2418 	int32_t priv_size;
2419 
2420 	if (!rte_cryptodev_is_valid_dev(dev_id))
2421 		return -EINVAL;
2422 
2423 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2424 
2425 	if (*dev->dev_ops->sym_get_raw_dp_ctx_size == NULL ||
2426 		!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP)) {
2427 		return -ENOTSUP;
2428 	}
2429 
2430 	priv_size = (*dev->dev_ops->sym_get_raw_dp_ctx_size)(dev);
2431 	if (priv_size < 0)
2432 		return -ENOTSUP;
2433 
2434 	rte_cryptodev_trace_get_raw_dp_ctx_size(dev_id);
2435 
2436 	return RTE_ALIGN_CEIL((size + priv_size), 8);
2437 }
2438 
2439 int
2440 rte_cryptodev_configure_raw_dp_ctx(uint8_t dev_id, uint16_t qp_id,
2441 	struct rte_crypto_raw_dp_ctx *ctx,
2442 	enum rte_crypto_op_sess_type sess_type,
2443 	union rte_cryptodev_session_ctx session_ctx,
2444 	uint8_t is_update)
2445 {
2446 	struct rte_cryptodev *dev;
2447 
2448 	if (!rte_cryptodev_get_qp_status(dev_id, qp_id))
2449 		return -EINVAL;
2450 
2451 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2452 	if (!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP)
2453 			|| dev->dev_ops->sym_configure_raw_dp_ctx == NULL)
2454 		return -ENOTSUP;
2455 
2456 	rte_cryptodev_trace_configure_raw_dp_ctx(dev_id, qp_id, sess_type);
2457 
2458 	return (*dev->dev_ops->sym_configure_raw_dp_ctx)(dev, qp_id, ctx,
2459 			sess_type, session_ctx, is_update);
2460 }
2461 
2462 int
2463 rte_cryptodev_session_event_mdata_set(uint8_t dev_id, void *sess,
2464 	enum rte_crypto_op_type op_type,
2465 	enum rte_crypto_op_sess_type sess_type,
2466 	void *ev_mdata,
2467 	uint16_t size)
2468 {
2469 	struct rte_cryptodev *dev;
2470 
2471 	if (sess == NULL || ev_mdata == NULL)
2472 		return -EINVAL;
2473 
2474 	if (!rte_cryptodev_is_valid_dev(dev_id))
2475 		goto skip_pmd_op;
2476 
2477 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2478 	if (dev->dev_ops->session_ev_mdata_set == NULL)
2479 		goto skip_pmd_op;
2480 
2481 	rte_cryptodev_trace_session_event_mdata_set(dev_id, sess, op_type,
2482 		sess_type, ev_mdata, size);
2483 
2484 	return (*dev->dev_ops->session_ev_mdata_set)(dev, sess, op_type,
2485 			sess_type, ev_mdata);
2486 
2487 skip_pmd_op:
2488 	if (op_type == RTE_CRYPTO_OP_TYPE_SYMMETRIC)
2489 		return rte_cryptodev_sym_session_set_user_data(sess, ev_mdata,
2490 				size);
2491 	else if (op_type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
2492 		struct rte_cryptodev_asym_session *s = sess;
2493 
2494 		if (s->event_mdata == NULL) {
2495 			s->event_mdata = rte_malloc(NULL, size, 0);
2496 			if (s->event_mdata == NULL)
2497 				return -ENOMEM;
2498 		}
2499 		rte_memcpy(s->event_mdata, ev_mdata, size);
2500 
2501 		return 0;
2502 	} else
2503 		return -ENOTSUP;
2504 }
2505 
2506 uint32_t
2507 rte_cryptodev_raw_enqueue_burst(struct rte_crypto_raw_dp_ctx *ctx,
2508 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
2509 	void **user_data, int *enqueue_status)
2510 {
2511 	return (*ctx->enqueue_burst)(ctx->qp_data, ctx->drv_ctx_data, vec,
2512 			ofs, user_data, enqueue_status);
2513 }
2514 
2515 int
2516 rte_cryptodev_raw_enqueue_done(struct rte_crypto_raw_dp_ctx *ctx,
2517 		uint32_t n)
2518 {
2519 	return (*ctx->enqueue_done)(ctx->qp_data, ctx->drv_ctx_data, n);
2520 }
2521 
2522 uint32_t
2523 rte_cryptodev_raw_dequeue_burst(struct rte_crypto_raw_dp_ctx *ctx,
2524 	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
2525 	uint32_t max_nb_to_dequeue,
2526 	rte_cryptodev_raw_post_dequeue_t post_dequeue,
2527 	void **out_user_data, uint8_t is_user_data_array,
2528 	uint32_t *n_success_jobs, int *status)
2529 {
2530 	return (*ctx->dequeue_burst)(ctx->qp_data, ctx->drv_ctx_data,
2531 		get_dequeue_count, max_nb_to_dequeue, post_dequeue,
2532 		out_user_data, is_user_data_array, n_success_jobs, status);
2533 }
2534 
2535 int
2536 rte_cryptodev_raw_dequeue_done(struct rte_crypto_raw_dp_ctx *ctx,
2537 		uint32_t n)
2538 {
2539 	return (*ctx->dequeue_done)(ctx->qp_data, ctx->drv_ctx_data, n);
2540 }
2541 
2542 /** Initialise rte_crypto_op mempool element */
2543 static void
2544 rte_crypto_op_init(struct rte_mempool *mempool,
2545 		void *opaque_arg,
2546 		void *_op_data,
2547 		__rte_unused unsigned i)
2548 {
2549 	struct rte_crypto_op *op = _op_data;
2550 	enum rte_crypto_op_type type = *(enum rte_crypto_op_type *)opaque_arg;
2551 
2552 	memset(_op_data, 0, mempool->elt_size);
2553 
2554 	__rte_crypto_op_reset(op, type);
2555 
2556 	op->phys_addr = rte_mem_virt2iova(_op_data);
2557 	op->mempool = mempool;
2558 }
2559 
2560 
2561 struct rte_mempool *
2562 rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
2563 		unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
2564 		int socket_id)
2565 {
2566 	struct rte_crypto_op_pool_private *priv;
2567 
2568 	unsigned elt_size = sizeof(struct rte_crypto_op) +
2569 			priv_size;
2570 
2571 	if (type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
2572 		elt_size += sizeof(struct rte_crypto_sym_op);
2573 	} else if (type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
2574 		elt_size += sizeof(struct rte_crypto_asym_op);
2575 	} else if (type == RTE_CRYPTO_OP_TYPE_UNDEFINED) {
2576 		elt_size += RTE_MAX(sizeof(struct rte_crypto_sym_op),
2577 		                    sizeof(struct rte_crypto_asym_op));
2578 	} else {
2579 		CDEV_LOG_ERR("Invalid op_type");
2580 		return NULL;
2581 	}
2582 
2583 	/* lookup mempool in case already allocated */
2584 	struct rte_mempool *mp = rte_mempool_lookup(name);
2585 
2586 	if (mp != NULL) {
2587 		priv = (struct rte_crypto_op_pool_private *)
2588 				rte_mempool_get_priv(mp);
2589 
2590 		if (mp->elt_size != elt_size ||
2591 				mp->cache_size < cache_size ||
2592 				mp->size < nb_elts ||
2593 				priv->priv_size <  priv_size) {
2594 			mp = NULL;
2595 			CDEV_LOG_ERR("Mempool %s already exists but with "
2596 					"incompatible parameters", name);
2597 			return NULL;
2598 		}
2599 		return mp;
2600 	}
2601 
2602 	mp = rte_mempool_create(
2603 			name,
2604 			nb_elts,
2605 			elt_size,
2606 			cache_size,
2607 			sizeof(struct rte_crypto_op_pool_private),
2608 			NULL,
2609 			NULL,
2610 			rte_crypto_op_init,
2611 			&type,
2612 			socket_id,
2613 			0);
2614 
2615 	if (mp == NULL) {
2616 		CDEV_LOG_ERR("Failed to create mempool %s", name);
2617 		return NULL;
2618 	}
2619 
2620 	priv = (struct rte_crypto_op_pool_private *)
2621 			rte_mempool_get_priv(mp);
2622 
2623 	priv->priv_size = priv_size;
2624 	priv->type = type;
2625 
2626 	rte_cryptodev_trace_op_pool_create(name, socket_id, type, nb_elts, mp);
2627 	return mp;
2628 }
2629 
2630 int
2631 rte_cryptodev_pmd_create_dev_name(char *name, const char *dev_name_prefix)
2632 {
2633 	struct rte_cryptodev *dev = NULL;
2634 	uint32_t i = 0;
2635 
2636 	if (name == NULL)
2637 		return -EINVAL;
2638 
2639 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
2640 		int ret = snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
2641 				"%s_%u", dev_name_prefix, i);
2642 
2643 		if (ret < 0)
2644 			return ret;
2645 
2646 		dev = rte_cryptodev_pmd_get_named_dev(name);
2647 		if (!dev)
2648 			return 0;
2649 	}
2650 
2651 	return -1;
2652 }
2653 
2654 TAILQ_HEAD(cryptodev_driver_list, cryptodev_driver);
2655 
2656 static struct cryptodev_driver_list cryptodev_driver_list =
2657 	TAILQ_HEAD_INITIALIZER(cryptodev_driver_list);
2658 
2659 int
2660 rte_cryptodev_driver_id_get(const char *name)
2661 {
2662 	struct cryptodev_driver *driver;
2663 	const char *driver_name;
2664 	int driver_id = -1;
2665 
2666 	if (name == NULL) {
2667 		RTE_LOG(DEBUG, CRYPTODEV, "name pointer NULL");
2668 		return -1;
2669 	}
2670 
2671 	TAILQ_FOREACH(driver, &cryptodev_driver_list, next) {
2672 		driver_name = driver->driver->name;
2673 		if (strncmp(driver_name, name, strlen(driver_name) + 1) == 0) {
2674 			driver_id = driver->id;
2675 			break;
2676 		}
2677 	}
2678 
2679 	rte_cryptodev_trace_driver_id_get(name, driver_id);
2680 
2681 	return driver_id;
2682 }
2683 
2684 const char *
2685 rte_cryptodev_name_get(uint8_t dev_id)
2686 {
2687 	struct rte_cryptodev *dev;
2688 
2689 	if (!rte_cryptodev_is_valid_device_data(dev_id)) {
2690 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
2691 		return NULL;
2692 	}
2693 
2694 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2695 	if (dev == NULL)
2696 		return NULL;
2697 
2698 	rte_cryptodev_trace_name_get(dev_id, dev->data->name);
2699 
2700 	return dev->data->name;
2701 }
2702 
2703 const char *
2704 rte_cryptodev_driver_name_get(uint8_t driver_id)
2705 {
2706 	struct cryptodev_driver *driver;
2707 
2708 	TAILQ_FOREACH(driver, &cryptodev_driver_list, next) {
2709 		if (driver->id == driver_id) {
2710 			rte_cryptodev_trace_driver_name_get(driver_id,
2711 				driver->driver->name);
2712 			return driver->driver->name;
2713 		}
2714 	}
2715 	return NULL;
2716 }
2717 
2718 uint8_t
2719 rte_cryptodev_allocate_driver(struct cryptodev_driver *crypto_drv,
2720 		const struct rte_driver *drv)
2721 {
2722 	crypto_drv->driver = drv;
2723 	crypto_drv->id = nb_drivers;
2724 
2725 	TAILQ_INSERT_TAIL(&cryptodev_driver_list, crypto_drv, next);
2726 
2727 	rte_cryptodev_trace_allocate_driver(drv->name);
2728 
2729 	return nb_drivers++;
2730 }
2731 
2732 RTE_INIT(cryptodev_init_fp_ops)
2733 {
2734 	uint32_t i;
2735 
2736 	for (i = 0; i != RTE_DIM(rte_crypto_fp_ops); i++)
2737 		cryptodev_fp_ops_reset(rte_crypto_fp_ops + i);
2738 }
2739 
2740 static int
2741 cryptodev_handle_dev_list(const char *cmd __rte_unused,
2742 		const char *params __rte_unused,
2743 		struct rte_tel_data *d)
2744 {
2745 	int dev_id;
2746 
2747 	if (rte_cryptodev_count() < 1)
2748 		return -EINVAL;
2749 
2750 	rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
2751 	for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++)
2752 		if (rte_cryptodev_is_valid_dev(dev_id))
2753 			rte_tel_data_add_array_int(d, dev_id);
2754 
2755 	return 0;
2756 }
2757 
2758 static int
2759 cryptodev_handle_dev_info(const char *cmd __rte_unused,
2760 		const char *params, struct rte_tel_data *d)
2761 {
2762 	struct rte_cryptodev_info cryptodev_info;
2763 	int dev_id;
2764 	char *end_param;
2765 
2766 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
2767 		return -EINVAL;
2768 
2769 	dev_id = strtoul(params, &end_param, 0);
2770 	if (*end_param != '\0')
2771 		CDEV_LOG_ERR("Extra parameters passed to command, ignoring");
2772 	if (!rte_cryptodev_is_valid_dev(dev_id))
2773 		return -EINVAL;
2774 
2775 	rte_cryptodev_info_get(dev_id, &cryptodev_info);
2776 
2777 	rte_tel_data_start_dict(d);
2778 	rte_tel_data_add_dict_string(d, "device_name",
2779 		cryptodev_info.device->name);
2780 	rte_tel_data_add_dict_uint(d, "max_nb_queue_pairs",
2781 		cryptodev_info.max_nb_queue_pairs);
2782 
2783 	return 0;
2784 }
2785 
2786 #define ADD_DICT_STAT(s) rte_tel_data_add_dict_uint(d, #s, cryptodev_stats.s)
2787 
2788 static int
2789 cryptodev_handle_dev_stats(const char *cmd __rte_unused,
2790 		const char *params,
2791 		struct rte_tel_data *d)
2792 {
2793 	struct rte_cryptodev_stats cryptodev_stats;
2794 	int dev_id, ret;
2795 	char *end_param;
2796 
2797 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
2798 		return -EINVAL;
2799 
2800 	dev_id = strtoul(params, &end_param, 0);
2801 	if (*end_param != '\0')
2802 		CDEV_LOG_ERR("Extra parameters passed to command, ignoring");
2803 	if (!rte_cryptodev_is_valid_dev(dev_id))
2804 		return -EINVAL;
2805 
2806 	ret = rte_cryptodev_stats_get(dev_id, &cryptodev_stats);
2807 	if (ret < 0)
2808 		return ret;
2809 
2810 	rte_tel_data_start_dict(d);
2811 	ADD_DICT_STAT(enqueued_count);
2812 	ADD_DICT_STAT(dequeued_count);
2813 	ADD_DICT_STAT(enqueue_err_count);
2814 	ADD_DICT_STAT(dequeue_err_count);
2815 
2816 	return 0;
2817 }
2818 
2819 #define CRYPTO_CAPS_SZ                                             \
2820 	(RTE_ALIGN_CEIL(sizeof(struct rte_cryptodev_capabilities), \
2821 					sizeof(uint64_t)) /        \
2822 	 sizeof(uint64_t))
2823 
2824 static int
2825 crypto_caps_array(struct rte_tel_data *d,
2826 		  const struct rte_cryptodev_capabilities *capabilities)
2827 {
2828 	const struct rte_cryptodev_capabilities *dev_caps;
2829 	uint64_t caps_val[CRYPTO_CAPS_SZ];
2830 	unsigned int i = 0, j;
2831 
2832 	rte_tel_data_start_array(d, RTE_TEL_UINT_VAL);
2833 
2834 	while ((dev_caps = &capabilities[i++])->op !=
2835 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
2836 		memset(&caps_val, 0, CRYPTO_CAPS_SZ * sizeof(caps_val[0]));
2837 		rte_memcpy(caps_val, dev_caps, sizeof(capabilities[0]));
2838 		for (j = 0; j < CRYPTO_CAPS_SZ; j++)
2839 			rte_tel_data_add_array_uint(d, caps_val[j]);
2840 	}
2841 
2842 	return i;
2843 }
2844 
2845 static int
2846 cryptodev_handle_dev_caps(const char *cmd __rte_unused, const char *params,
2847 			  struct rte_tel_data *d)
2848 {
2849 	struct rte_cryptodev_info dev_info;
2850 	struct rte_tel_data *crypto_caps;
2851 	int crypto_caps_n;
2852 	char *end_param;
2853 	int dev_id;
2854 
2855 	if (!params || strlen(params) == 0 || !isdigit(*params))
2856 		return -EINVAL;
2857 
2858 	dev_id = strtoul(params, &end_param, 0);
2859 	if (*end_param != '\0')
2860 		CDEV_LOG_ERR("Extra parameters passed to command, ignoring");
2861 	if (!rte_cryptodev_is_valid_dev(dev_id))
2862 		return -EINVAL;
2863 
2864 	rte_tel_data_start_dict(d);
2865 	crypto_caps = rte_tel_data_alloc();
2866 	if (!crypto_caps)
2867 		return -ENOMEM;
2868 
2869 	rte_cryptodev_info_get(dev_id, &dev_info);
2870 	crypto_caps_n = crypto_caps_array(crypto_caps, dev_info.capabilities);
2871 	rte_tel_data_add_dict_container(d, "crypto_caps", crypto_caps, 0);
2872 	rte_tel_data_add_dict_int(d, "crypto_caps_n", crypto_caps_n);
2873 
2874 	return 0;
2875 }
2876 
2877 RTE_INIT(cryptodev_init_telemetry)
2878 {
2879 	rte_telemetry_register_cmd("/cryptodev/info", cryptodev_handle_dev_info,
2880 			"Returns information for a cryptodev. Parameters: int dev_id");
2881 	rte_telemetry_register_cmd("/cryptodev/list",
2882 			cryptodev_handle_dev_list,
2883 			"Returns list of available crypto devices by IDs. No parameters.");
2884 	rte_telemetry_register_cmd("/cryptodev/stats",
2885 			cryptodev_handle_dev_stats,
2886 			"Returns the stats for a cryptodev. Parameters: int dev_id");
2887 	rte_telemetry_register_cmd("/cryptodev/caps",
2888 			cryptodev_handle_dev_caps,
2889 			"Returns the capabilities for a cryptodev. Parameters: int dev_id");
2890 }
2891