xref: /dpdk/lib/cryptodev/rte_cryptodev.c (revision d46b9fa83f136beb0e6feedd0a7b3a228b0d8cd3)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020 Intel Corporation
3  */
4 
5 #include <sys/queue.h>
6 #include <ctype.h>
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <string.h>
10 #include <errno.h>
11 #include <stdint.h>
12 #include <inttypes.h>
13 
14 #include <rte_log.h>
15 #include <rte_debug.h>
16 #include <dev_driver.h>
17 #include <rte_memory.h>
18 #include <rte_memcpy.h>
19 #include <rte_memzone.h>
20 #include <rte_eal.h>
21 #include <rte_common.h>
22 #include <rte_mempool.h>
23 #include <rte_malloc.h>
24 #include <rte_errno.h>
25 #include <rte_spinlock.h>
26 #include <rte_string_fns.h>
27 #include <rte_telemetry.h>
28 
29 #include "rte_crypto.h"
30 #include "rte_cryptodev.h"
31 #include "cryptodev_pmd.h"
32 #include "cryptodev_trace.h"
33 
34 static uint8_t nb_drivers;
35 
36 static struct rte_cryptodev rte_crypto_devices[RTE_CRYPTO_MAX_DEVS];
37 
38 struct rte_cryptodev *rte_cryptodevs = rte_crypto_devices;
39 
40 static struct rte_cryptodev_global cryptodev_globals = {
41 		.devs			= rte_crypto_devices,
42 		.data			= { NULL },
43 		.nb_devs		= 0
44 };
45 
46 /* Public fastpath APIs. */
47 struct rte_crypto_fp_ops rte_crypto_fp_ops[RTE_CRYPTO_MAX_DEVS];
48 
49 /* spinlock for crypto device callbacks */
50 static rte_spinlock_t rte_cryptodev_cb_lock = RTE_SPINLOCK_INITIALIZER;
51 
52 RTE_LOG_REGISTER_DEFAULT(rte_cryptodev_logtype, INFO);
53 
54 /**
55  * The user application callback description.
56  *
57  * It contains callback address to be registered by user application,
58  * the pointer to the parameters for callback, and the event type.
59  */
60 struct rte_cryptodev_callback {
61 	TAILQ_ENTRY(rte_cryptodev_callback) next; /**< Callbacks list */
62 	rte_cryptodev_cb_fn cb_fn;		/**< Callback address */
63 	void *cb_arg;				/**< Parameter for callback */
64 	enum rte_cryptodev_event_type event;	/**< Interrupt event type */
65 	uint32_t active;			/**< Callback is executing */
66 };
67 
68 /**
69  * The crypto cipher algorithm strings identifiers.
70  * Not to be used in application directly.
71  * Application can use rte_cryptodev_get_cipher_algo_string().
72  */
73 static const char *
74 crypto_cipher_algorithm_strings[] = {
75 	[RTE_CRYPTO_CIPHER_3DES_CBC]	= "3des-cbc",
76 	[RTE_CRYPTO_CIPHER_3DES_ECB]	= "3des-ecb",
77 	[RTE_CRYPTO_CIPHER_3DES_CTR]	= "3des-ctr",
78 
79 	[RTE_CRYPTO_CIPHER_AES_CBC]	= "aes-cbc",
80 	[RTE_CRYPTO_CIPHER_AES_CTR]	= "aes-ctr",
81 	[RTE_CRYPTO_CIPHER_AES_DOCSISBPI]	= "aes-docsisbpi",
82 	[RTE_CRYPTO_CIPHER_AES_ECB]	= "aes-ecb",
83 	[RTE_CRYPTO_CIPHER_AES_F8]	= "aes-f8",
84 	[RTE_CRYPTO_CIPHER_AES_XTS]	= "aes-xts",
85 
86 	[RTE_CRYPTO_CIPHER_ARC4]	= "arc4",
87 
88 	[RTE_CRYPTO_CIPHER_DES_CBC]     = "des-cbc",
89 	[RTE_CRYPTO_CIPHER_DES_DOCSISBPI]	= "des-docsisbpi",
90 
91 	[RTE_CRYPTO_CIPHER_NULL]	= "null",
92 
93 	[RTE_CRYPTO_CIPHER_KASUMI_F8]	= "kasumi-f8",
94 	[RTE_CRYPTO_CIPHER_SNOW3G_UEA2]	= "snow3g-uea2",
95 	[RTE_CRYPTO_CIPHER_ZUC_EEA3]	= "zuc-eea3",
96 	[RTE_CRYPTO_CIPHER_SM4_ECB]	= "sm4-ecb",
97 	[RTE_CRYPTO_CIPHER_SM4_CBC]	= "sm4-cbc",
98 	[RTE_CRYPTO_CIPHER_SM4_CTR]	= "sm4-ctr",
99 	[RTE_CRYPTO_CIPHER_SM4_CFB]	= "sm4-cfb",
100 	[RTE_CRYPTO_CIPHER_SM4_OFB]	= "sm4-ofb"
101 };
102 
103 /**
104  * The crypto cipher operation strings identifiers.
105  * It could be used in application command line.
106  */
107 const char *
108 rte_crypto_cipher_operation_strings[] = {
109 		[RTE_CRYPTO_CIPHER_OP_ENCRYPT]	= "encrypt",
110 		[RTE_CRYPTO_CIPHER_OP_DECRYPT]	= "decrypt"
111 };
112 
113 /**
114  * The crypto auth algorithm strings identifiers.
115  * Not to be used in application directly.
116  * Application can use rte_cryptodev_get_auth_algo_string().
117  */
118 static const char *
119 crypto_auth_algorithm_strings[] = {
120 	[RTE_CRYPTO_AUTH_AES_CBC_MAC]	= "aes-cbc-mac",
121 	[RTE_CRYPTO_AUTH_AES_CMAC]	= "aes-cmac",
122 	[RTE_CRYPTO_AUTH_AES_GMAC]	= "aes-gmac",
123 	[RTE_CRYPTO_AUTH_AES_XCBC_MAC]	= "aes-xcbc-mac",
124 
125 	[RTE_CRYPTO_AUTH_MD5]		= "md5",
126 	[RTE_CRYPTO_AUTH_MD5_HMAC]	= "md5-hmac",
127 
128 	[RTE_CRYPTO_AUTH_NULL]		= "null",
129 
130 	[RTE_CRYPTO_AUTH_SHA1]		= "sha1",
131 	[RTE_CRYPTO_AUTH_SHA1_HMAC]	= "sha1-hmac",
132 
133 	[RTE_CRYPTO_AUTH_SHA224]	= "sha2-224",
134 	[RTE_CRYPTO_AUTH_SHA224_HMAC]	= "sha2-224-hmac",
135 	[RTE_CRYPTO_AUTH_SHA256]	= "sha2-256",
136 	[RTE_CRYPTO_AUTH_SHA256_HMAC]	= "sha2-256-hmac",
137 	[RTE_CRYPTO_AUTH_SHA384]	= "sha2-384",
138 	[RTE_CRYPTO_AUTH_SHA384_HMAC]	= "sha2-384-hmac",
139 	[RTE_CRYPTO_AUTH_SHA512]	= "sha2-512",
140 	[RTE_CRYPTO_AUTH_SHA512_HMAC]	= "sha2-512-hmac",
141 
142 	[RTE_CRYPTO_AUTH_SHA3_224]	= "sha3-224",
143 	[RTE_CRYPTO_AUTH_SHA3_224_HMAC] = "sha3-224-hmac",
144 	[RTE_CRYPTO_AUTH_SHA3_256]	= "sha3-256",
145 	[RTE_CRYPTO_AUTH_SHA3_256_HMAC] = "sha3-256-hmac",
146 	[RTE_CRYPTO_AUTH_SHA3_384]	= "sha3-384",
147 	[RTE_CRYPTO_AUTH_SHA3_384_HMAC] = "sha3-384-hmac",
148 	[RTE_CRYPTO_AUTH_SHA3_512]	= "sha3-512",
149 	[RTE_CRYPTO_AUTH_SHA3_512_HMAC]	= "sha3-512-hmac",
150 
151 	[RTE_CRYPTO_AUTH_KASUMI_F9]	= "kasumi-f9",
152 	[RTE_CRYPTO_AUTH_SNOW3G_UIA2]	= "snow3g-uia2",
153 	[RTE_CRYPTO_AUTH_ZUC_EIA3]	= "zuc-eia3",
154 	[RTE_CRYPTO_AUTH_SM3]		= "sm3",
155 	[RTE_CRYPTO_AUTH_SM3_HMAC]	= "sm3-hmac",
156 
157 	[RTE_CRYPTO_AUTH_SHAKE_128]	 = "shake-128",
158 	[RTE_CRYPTO_AUTH_SHAKE_256]	 = "shake-256",
159 };
160 
161 /**
162  * The crypto AEAD algorithm strings identifiers.
163  * Not to be used in application directly.
164  * Application can use rte_cryptodev_get_aead_algo_string().
165  */
166 static const char *
167 crypto_aead_algorithm_strings[] = {
168 	[RTE_CRYPTO_AEAD_AES_CCM]	= "aes-ccm",
169 	[RTE_CRYPTO_AEAD_AES_GCM]	= "aes-gcm",
170 	[RTE_CRYPTO_AEAD_CHACHA20_POLY1305] = "chacha20-poly1305"
171 };
172 
173 
174 /**
175  * The crypto AEAD operation strings identifiers.
176  * It could be used in application command line.
177  */
178 const char *
179 rte_crypto_aead_operation_strings[] = {
180 	[RTE_CRYPTO_AEAD_OP_ENCRYPT]	= "encrypt",
181 	[RTE_CRYPTO_AEAD_OP_DECRYPT]	= "decrypt"
182 };
183 
184 /**
185  * Asymmetric crypto transform operation strings identifiers.
186  * Not to be used in application directly.
187  * Application can use rte_cryptodev_asym_get_xform_string().
188  */
189 static const char *
190 crypto_asym_xform_strings[] = {
191 	[RTE_CRYPTO_ASYM_XFORM_NONE]	= "none",
192 	[RTE_CRYPTO_ASYM_XFORM_RSA]	= "rsa",
193 	[RTE_CRYPTO_ASYM_XFORM_MODEX]	= "modexp",
194 	[RTE_CRYPTO_ASYM_XFORM_MODINV]	= "modinv",
195 	[RTE_CRYPTO_ASYM_XFORM_DH]	= "dh",
196 	[RTE_CRYPTO_ASYM_XFORM_DSA]	= "dsa",
197 	[RTE_CRYPTO_ASYM_XFORM_ECDSA]	= "ecdsa",
198 	[RTE_CRYPTO_ASYM_XFORM_ECPM]	= "ecpm",
199 	[RTE_CRYPTO_ASYM_XFORM_SM2]	= "sm2",
200 };
201 
202 /**
203  * Asymmetric crypto operation strings identifiers.
204  */
205 const char *rte_crypto_asym_op_strings[] = {
206 	[RTE_CRYPTO_ASYM_OP_ENCRYPT]	= "encrypt",
207 	[RTE_CRYPTO_ASYM_OP_DECRYPT]	= "decrypt",
208 	[RTE_CRYPTO_ASYM_OP_SIGN]	= "sign",
209 	[RTE_CRYPTO_ASYM_OP_VERIFY]	= "verify"
210 };
211 
212 /**
213  * Asymmetric crypto key exchange operation strings identifiers.
214  */
215 const char *rte_crypto_asym_ke_strings[] = {
216 	[RTE_CRYPTO_ASYM_KE_PRIV_KEY_GENERATE] = "priv_key_generate",
217 	[RTE_CRYPTO_ASYM_KE_PUB_KEY_GENERATE] = "pub_key_generate",
218 	[RTE_CRYPTO_ASYM_KE_SHARED_SECRET_COMPUTE] = "sharedsecret_compute",
219 	[RTE_CRYPTO_ASYM_KE_PUB_KEY_VERIFY] = "pub_ec_key_verify"
220 };
221 
222 struct rte_cryptodev_sym_session_pool_private_data {
223 	uint16_t sess_data_sz;
224 	/**< driver session data size */
225 	uint16_t user_data_sz;
226 	/**< session user data will be placed after sess_data */
227 };
228 
229 /**
230  * The private data structure stored in the asym session mempool private data.
231  */
232 struct rte_cryptodev_asym_session_pool_private_data {
233 	uint16_t max_priv_session_sz;
234 	/**< Size of private session data used when creating mempool */
235 	uint16_t user_data_sz;
236 	/**< Session user data will be placed after sess_private_data */
237 };
238 
239 int
240 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum,
241 		const char *algo_string)
242 {
243 	unsigned int i;
244 	int ret = -1;	/* Invalid string */
245 
246 	for (i = 1; i < RTE_DIM(crypto_cipher_algorithm_strings); i++) {
247 		if (strcmp(algo_string, crypto_cipher_algorithm_strings[i]) == 0) {
248 			*algo_enum = (enum rte_crypto_cipher_algorithm) i;
249 			ret = 0;
250 			break;
251 		}
252 	}
253 
254 	rte_cryptodev_trace_get_cipher_algo_enum(algo_string, *algo_enum, ret);
255 
256 	return ret;
257 }
258 
259 int
260 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum,
261 		const char *algo_string)
262 {
263 	unsigned int i;
264 	int ret = -1;	/* Invalid string */
265 
266 	for (i = 1; i < RTE_DIM(crypto_auth_algorithm_strings); i++) {
267 		if (strcmp(algo_string, crypto_auth_algorithm_strings[i]) == 0) {
268 			*algo_enum = (enum rte_crypto_auth_algorithm) i;
269 			ret = 0;
270 			break;
271 		}
272 	}
273 
274 	rte_cryptodev_trace_get_auth_algo_enum(algo_string, *algo_enum, ret);
275 
276 	return ret;
277 }
278 
279 int
280 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum,
281 		const char *algo_string)
282 {
283 	unsigned int i;
284 	int ret = -1;	/* Invalid string */
285 
286 	for (i = 1; i < RTE_DIM(crypto_aead_algorithm_strings); i++) {
287 		if (strcmp(algo_string, crypto_aead_algorithm_strings[i]) == 0) {
288 			*algo_enum = (enum rte_crypto_aead_algorithm) i;
289 			ret = 0;
290 			break;
291 		}
292 	}
293 
294 	rte_cryptodev_trace_get_aead_algo_enum(algo_string, *algo_enum, ret);
295 
296 	return ret;
297 }
298 
299 int
300 rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum,
301 		const char *xform_string)
302 {
303 	unsigned int i;
304 	int ret = -1;	/* Invalid string */
305 
306 	for (i = 1; i < RTE_DIM(crypto_asym_xform_strings); i++) {
307 		if (strcmp(xform_string,
308 			crypto_asym_xform_strings[i]) == 0) {
309 			*xform_enum = (enum rte_crypto_asym_xform_type) i;
310 			ret = 0;
311 			break;
312 		}
313 	}
314 
315 	rte_cryptodev_trace_asym_get_xform_enum(xform_string, *xform_enum, ret);
316 
317 	return ret;
318 }
319 
320 const char *
321 rte_cryptodev_get_cipher_algo_string(enum rte_crypto_cipher_algorithm algo_enum)
322 {
323 	const char *alg_str = NULL;
324 
325 	if ((unsigned int)algo_enum < RTE_DIM(crypto_cipher_algorithm_strings))
326 		alg_str = crypto_cipher_algorithm_strings[algo_enum];
327 
328 	rte_cryptodev_trace_get_cipher_algo_string(algo_enum, alg_str);
329 
330 	return alg_str;
331 }
332 
333 const char *
334 rte_cryptodev_get_auth_algo_string(enum rte_crypto_auth_algorithm algo_enum)
335 {
336 	const char *alg_str = NULL;
337 
338 	if ((unsigned int)algo_enum < RTE_DIM(crypto_auth_algorithm_strings))
339 		alg_str = crypto_auth_algorithm_strings[algo_enum];
340 
341 	rte_cryptodev_trace_get_auth_algo_string(algo_enum, alg_str);
342 
343 	return alg_str;
344 }
345 
346 const char *
347 rte_cryptodev_get_aead_algo_string(enum rte_crypto_aead_algorithm algo_enum)
348 {
349 	const char *alg_str = NULL;
350 
351 	if ((unsigned int)algo_enum < RTE_DIM(crypto_aead_algorithm_strings))
352 		alg_str = crypto_aead_algorithm_strings[algo_enum];
353 
354 	rte_cryptodev_trace_get_aead_algo_string(algo_enum, alg_str);
355 
356 	return alg_str;
357 }
358 
359 const char *
360 rte_cryptodev_asym_get_xform_string(enum rte_crypto_asym_xform_type xform_enum)
361 {
362 	const char *xform_str = NULL;
363 
364 	if ((unsigned int)xform_enum < RTE_DIM(crypto_asym_xform_strings))
365 		xform_str = crypto_asym_xform_strings[xform_enum];
366 
367 	rte_cryptodev_trace_asym_get_xform_string(xform_enum, xform_str);
368 
369 	return xform_str;
370 }
371 
372 /**
373  * The crypto auth operation strings identifiers.
374  * It could be used in application command line.
375  */
376 const char *
377 rte_crypto_auth_operation_strings[] = {
378 		[RTE_CRYPTO_AUTH_OP_VERIFY]	= "verify",
379 		[RTE_CRYPTO_AUTH_OP_GENERATE]	= "generate"
380 };
381 
382 const struct rte_cryptodev_symmetric_capability *
383 rte_cryptodev_sym_capability_get(uint8_t dev_id,
384 		const struct rte_cryptodev_sym_capability_idx *idx)
385 {
386 	const struct rte_cryptodev_capabilities *capability;
387 	const struct rte_cryptodev_symmetric_capability *sym_capability = NULL;
388 	struct rte_cryptodev_info dev_info;
389 	int i = 0;
390 
391 	rte_cryptodev_info_get(dev_id, &dev_info);
392 
393 	while ((capability = &dev_info.capabilities[i++])->op !=
394 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
395 		if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
396 			continue;
397 
398 		if (capability->sym.xform_type != idx->type)
399 			continue;
400 
401 		if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
402 			capability->sym.auth.algo == idx->algo.auth) {
403 			sym_capability = &capability->sym;
404 			break;
405 		}
406 
407 		if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
408 			capability->sym.cipher.algo == idx->algo.cipher) {
409 			sym_capability = &capability->sym;
410 			break;
411 		}
412 
413 		if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
414 				capability->sym.aead.algo == idx->algo.aead) {
415 			sym_capability = &capability->sym;
416 			break;
417 		}
418 	}
419 
420 	rte_cryptodev_trace_sym_capability_get(dev_id, dev_info.driver_name,
421 		dev_info.driver_id, idx->type, sym_capability);
422 
423 	return sym_capability;
424 }
425 
426 static int
427 param_range_check(uint16_t size, const struct rte_crypto_param_range *range)
428 {
429 	unsigned int next_size;
430 
431 	/* Check lower/upper bounds */
432 	if (size < range->min)
433 		return -1;
434 
435 	if (size > range->max)
436 		return -1;
437 
438 	/* If range is actually only one value, size is correct */
439 	if (range->increment == 0)
440 		return 0;
441 
442 	/* Check if value is one of the supported sizes */
443 	for (next_size = range->min; next_size <= range->max;
444 			next_size += range->increment)
445 		if (size == next_size)
446 			return 0;
447 
448 	return -1;
449 }
450 
451 const struct rte_cryptodev_asymmetric_xform_capability *
452 rte_cryptodev_asym_capability_get(uint8_t dev_id,
453 		const struct rte_cryptodev_asym_capability_idx *idx)
454 {
455 	const struct rte_cryptodev_capabilities *capability;
456 	const struct rte_cryptodev_asymmetric_xform_capability *asym_cap = NULL;
457 	struct rte_cryptodev_info dev_info;
458 	unsigned int i = 0;
459 
460 	memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
461 	rte_cryptodev_info_get(dev_id, &dev_info);
462 
463 	while ((capability = &dev_info.capabilities[i++])->op !=
464 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
465 		if (capability->op != RTE_CRYPTO_OP_TYPE_ASYMMETRIC)
466 			continue;
467 
468 		if (capability->asym.xform_capa.xform_type == idx->type) {
469 			asym_cap = &capability->asym.xform_capa;
470 			break;
471 		}
472 	}
473 
474 	rte_cryptodev_trace_asym_capability_get(dev_info.driver_name,
475 		dev_info.driver_id, idx->type, asym_cap);
476 
477 	return asym_cap;
478 };
479 
480 int
481 rte_cryptodev_sym_capability_check_cipher(
482 		const struct rte_cryptodev_symmetric_capability *capability,
483 		uint16_t key_size, uint16_t iv_size)
484 {
485 	int ret = 0; /* success */
486 
487 	if (param_range_check(key_size, &capability->cipher.key_size) != 0) {
488 		ret = -1;
489 		goto done;
490 	}
491 
492 	if (param_range_check(iv_size, &capability->cipher.iv_size) != 0)
493 		ret = -1;
494 
495 done:
496 	rte_cryptodev_trace_sym_capability_check_cipher(capability, key_size,
497 		iv_size, ret);
498 
499 	return ret;
500 }
501 
502 int
503 rte_cryptodev_sym_capability_check_auth(
504 		const struct rte_cryptodev_symmetric_capability *capability,
505 		uint16_t key_size, uint16_t digest_size, uint16_t iv_size)
506 {
507 	int ret = 0; /* success */
508 
509 	if (param_range_check(key_size, &capability->auth.key_size) != 0) {
510 		ret = -1;
511 		goto done;
512 	}
513 
514 	if (param_range_check(digest_size,
515 		&capability->auth.digest_size) != 0) {
516 		ret = -1;
517 		goto done;
518 	}
519 
520 	if (param_range_check(iv_size, &capability->auth.iv_size) != 0)
521 		ret = -1;
522 
523 done:
524 	rte_cryptodev_trace_sym_capability_check_auth(capability, key_size,
525 		digest_size, iv_size, ret);
526 
527 	return ret;
528 }
529 
530 int
531 rte_cryptodev_sym_capability_check_aead(
532 		const struct rte_cryptodev_symmetric_capability *capability,
533 		uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
534 		uint16_t iv_size)
535 {
536 	int ret = 0; /* success */
537 
538 	if (param_range_check(key_size, &capability->aead.key_size) != 0) {
539 		ret = -1;
540 		goto done;
541 	}
542 
543 	if (param_range_check(digest_size,
544 		&capability->aead.digest_size) != 0) {
545 		ret = -1;
546 		goto done;
547 	}
548 
549 	if (param_range_check(aad_size, &capability->aead.aad_size) != 0) {
550 		ret = -1;
551 		goto done;
552 	}
553 
554 	if (param_range_check(iv_size, &capability->aead.iv_size) != 0)
555 		ret = -1;
556 
557 done:
558 	rte_cryptodev_trace_sym_capability_check_aead(capability, key_size,
559 		digest_size, aad_size, iv_size, ret);
560 
561 	return ret;
562 }
563 
564 int
565 rte_cryptodev_asym_xform_capability_check_optype(
566 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
567 	enum rte_crypto_asym_op_type op_type)
568 {
569 	int ret = 0;
570 
571 	if (capability->op_types & (1 << op_type))
572 		ret = 1;
573 
574 	rte_cryptodev_trace_asym_xform_capability_check_optype(
575 		capability->op_types, op_type, ret);
576 
577 	return ret;
578 }
579 
580 int
581 rte_cryptodev_asym_xform_capability_check_modlen(
582 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
583 	uint16_t modlen)
584 {
585 	int ret = 0; /* success */
586 
587 	/* no need to check for limits, if min or max = 0 */
588 	if (capability->modlen.min != 0) {
589 		if (modlen < capability->modlen.min) {
590 			ret = -1;
591 			goto done;
592 		}
593 	}
594 
595 	if (capability->modlen.max != 0) {
596 		if (modlen > capability->modlen.max) {
597 			ret = -1;
598 			goto done;
599 		}
600 	}
601 
602 	/* in any case, check if given modlen is module increment */
603 	if (capability->modlen.increment != 0) {
604 		if (modlen % (capability->modlen.increment))
605 			ret = -1;
606 	}
607 
608 done:
609 	rte_cryptodev_trace_asym_xform_capability_check_modlen(capability,
610 		modlen, ret);
611 
612 	return ret;
613 }
614 
615 bool
616 rte_cryptodev_asym_xform_capability_check_hash(
617 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
618 	enum rte_crypto_auth_algorithm hash)
619 {
620 	bool ret = false;
621 
622 	if (capability->hash_algos & (1 << hash))
623 		ret = true;
624 
625 	rte_cryptodev_trace_asym_xform_capability_check_hash(
626 		capability->hash_algos, hash, ret);
627 
628 	return ret;
629 }
630 
631 /* spinlock for crypto device enq callbacks */
632 static rte_spinlock_t rte_cryptodev_callback_lock = RTE_SPINLOCK_INITIALIZER;
633 
634 static void
635 cryptodev_cb_cleanup(struct rte_cryptodev *dev)
636 {
637 	struct rte_cryptodev_cb_rcu *list;
638 	struct rte_cryptodev_cb *cb, *next;
639 	uint16_t qp_id;
640 
641 	if (dev->enq_cbs == NULL && dev->deq_cbs == NULL)
642 		return;
643 
644 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
645 		list = &dev->enq_cbs[qp_id];
646 		cb = list->next;
647 		while (cb != NULL) {
648 			next = cb->next;
649 			rte_free(cb);
650 			cb = next;
651 		}
652 
653 		rte_free(list->qsbr);
654 	}
655 
656 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
657 		list = &dev->deq_cbs[qp_id];
658 		cb = list->next;
659 		while (cb != NULL) {
660 			next = cb->next;
661 			rte_free(cb);
662 			cb = next;
663 		}
664 
665 		rte_free(list->qsbr);
666 	}
667 
668 	rte_free(dev->enq_cbs);
669 	dev->enq_cbs = NULL;
670 	rte_free(dev->deq_cbs);
671 	dev->deq_cbs = NULL;
672 }
673 
674 static int
675 cryptodev_cb_init(struct rte_cryptodev *dev)
676 {
677 	struct rte_cryptodev_cb_rcu *list;
678 	struct rte_rcu_qsbr *qsbr;
679 	uint16_t qp_id;
680 	size_t size;
681 
682 	/* Max thread set to 1, as one DP thread accessing a queue-pair */
683 	const uint32_t max_threads = 1;
684 
685 	dev->enq_cbs = rte_zmalloc(NULL,
686 				   sizeof(struct rte_cryptodev_cb_rcu) *
687 				   dev->data->nb_queue_pairs, 0);
688 	if (dev->enq_cbs == NULL) {
689 		CDEV_LOG_ERR("Failed to allocate memory for enq callbacks");
690 		return -ENOMEM;
691 	}
692 
693 	dev->deq_cbs = rte_zmalloc(NULL,
694 				   sizeof(struct rte_cryptodev_cb_rcu) *
695 				   dev->data->nb_queue_pairs, 0);
696 	if (dev->deq_cbs == NULL) {
697 		CDEV_LOG_ERR("Failed to allocate memory for deq callbacks");
698 		rte_free(dev->enq_cbs);
699 		return -ENOMEM;
700 	}
701 
702 	/* Create RCU QSBR variable */
703 	size = rte_rcu_qsbr_get_memsize(max_threads);
704 
705 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
706 		list = &dev->enq_cbs[qp_id];
707 		qsbr = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
708 		if (qsbr == NULL) {
709 			CDEV_LOG_ERR("Failed to allocate memory for RCU on "
710 				"queue_pair_id=%d", qp_id);
711 			goto cb_init_err;
712 		}
713 
714 		if (rte_rcu_qsbr_init(qsbr, max_threads)) {
715 			CDEV_LOG_ERR("Failed to initialize for RCU on "
716 				"queue_pair_id=%d", qp_id);
717 			goto cb_init_err;
718 		}
719 
720 		list->qsbr = qsbr;
721 	}
722 
723 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
724 		list = &dev->deq_cbs[qp_id];
725 		qsbr = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
726 		if (qsbr == NULL) {
727 			CDEV_LOG_ERR("Failed to allocate memory for RCU on "
728 				"queue_pair_id=%d", qp_id);
729 			goto cb_init_err;
730 		}
731 
732 		if (rte_rcu_qsbr_init(qsbr, max_threads)) {
733 			CDEV_LOG_ERR("Failed to initialize for RCU on "
734 				"queue_pair_id=%d", qp_id);
735 			goto cb_init_err;
736 		}
737 
738 		list->qsbr = qsbr;
739 	}
740 
741 	return 0;
742 
743 cb_init_err:
744 	cryptodev_cb_cleanup(dev);
745 	return -ENOMEM;
746 }
747 
748 const char *
749 rte_cryptodev_get_feature_name(uint64_t flag)
750 {
751 	rte_cryptodev_trace_get_feature_name(flag);
752 
753 	switch (flag) {
754 	case RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO:
755 		return "SYMMETRIC_CRYPTO";
756 	case RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO:
757 		return "ASYMMETRIC_CRYPTO";
758 	case RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING:
759 		return "SYM_OPERATION_CHAINING";
760 	case RTE_CRYPTODEV_FF_CPU_SSE:
761 		return "CPU_SSE";
762 	case RTE_CRYPTODEV_FF_CPU_AVX:
763 		return "CPU_AVX";
764 	case RTE_CRYPTODEV_FF_CPU_AVX2:
765 		return "CPU_AVX2";
766 	case RTE_CRYPTODEV_FF_CPU_AVX512:
767 		return "CPU_AVX512";
768 	case RTE_CRYPTODEV_FF_CPU_AESNI:
769 		return "CPU_AESNI";
770 	case RTE_CRYPTODEV_FF_HW_ACCELERATED:
771 		return "HW_ACCELERATED";
772 	case RTE_CRYPTODEV_FF_IN_PLACE_SGL:
773 		return "IN_PLACE_SGL";
774 	case RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT:
775 		return "OOP_SGL_IN_SGL_OUT";
776 	case RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT:
777 		return "OOP_SGL_IN_LB_OUT";
778 	case RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT:
779 		return "OOP_LB_IN_SGL_OUT";
780 	case RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT:
781 		return "OOP_LB_IN_LB_OUT";
782 	case RTE_CRYPTODEV_FF_CPU_NEON:
783 		return "CPU_NEON";
784 	case RTE_CRYPTODEV_FF_CPU_ARM_CE:
785 		return "CPU_ARM_CE";
786 	case RTE_CRYPTODEV_FF_SECURITY:
787 		return "SECURITY_PROTOCOL";
788 	case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP:
789 		return "RSA_PRIV_OP_KEY_EXP";
790 	case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT:
791 		return "RSA_PRIV_OP_KEY_QT";
792 	case RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED:
793 		return "DIGEST_ENCRYPTED";
794 	case RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO:
795 		return "SYM_CPU_CRYPTO";
796 	case RTE_CRYPTODEV_FF_ASYM_SESSIONLESS:
797 		return "ASYM_SESSIONLESS";
798 	case RTE_CRYPTODEV_FF_SYM_SESSIONLESS:
799 		return "SYM_SESSIONLESS";
800 	case RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA:
801 		return "NON_BYTE_ALIGNED_DATA";
802 	case RTE_CRYPTODEV_FF_CIPHER_MULTIPLE_DATA_UNITS:
803 		return "CIPHER_MULTIPLE_DATA_UNITS";
804 	case RTE_CRYPTODEV_FF_CIPHER_WRAPPED_KEY:
805 		return "CIPHER_WRAPPED_KEY";
806 	default:
807 		return NULL;
808 	}
809 }
810 
811 struct rte_cryptodev *
812 rte_cryptodev_pmd_get_dev(uint8_t dev_id)
813 {
814 	return &cryptodev_globals.devs[dev_id];
815 }
816 
817 struct rte_cryptodev *
818 rte_cryptodev_pmd_get_named_dev(const char *name)
819 {
820 	struct rte_cryptodev *dev;
821 	unsigned int i;
822 
823 	if (name == NULL)
824 		return NULL;
825 
826 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
827 		dev = &cryptodev_globals.devs[i];
828 
829 		if ((dev->attached == RTE_CRYPTODEV_ATTACHED) &&
830 				(strcmp(dev->data->name, name) == 0))
831 			return dev;
832 	}
833 
834 	return NULL;
835 }
836 
837 static inline uint8_t
838 rte_cryptodev_is_valid_device_data(uint8_t dev_id)
839 {
840 	if (dev_id >= RTE_CRYPTO_MAX_DEVS ||
841 			rte_crypto_devices[dev_id].data == NULL)
842 		return 0;
843 
844 	return 1;
845 }
846 
847 unsigned int
848 rte_cryptodev_is_valid_dev(uint8_t dev_id)
849 {
850 	struct rte_cryptodev *dev = NULL;
851 	unsigned int ret = 1;
852 
853 	if (!rte_cryptodev_is_valid_device_data(dev_id)) {
854 		ret = 0;
855 		goto done;
856 	}
857 
858 	dev = rte_cryptodev_pmd_get_dev(dev_id);
859 	if (dev->attached != RTE_CRYPTODEV_ATTACHED)
860 		ret = 0;
861 
862 done:
863 	rte_cryptodev_trace_is_valid_dev(dev_id, ret);
864 
865 	return ret;
866 }
867 
868 int
869 rte_cryptodev_get_dev_id(const char *name)
870 {
871 	unsigned i;
872 	int ret = -1;
873 
874 	if (name == NULL)
875 		return -1;
876 
877 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
878 		if (!rte_cryptodev_is_valid_device_data(i))
879 			continue;
880 		if ((strcmp(cryptodev_globals.devs[i].data->name, name)
881 				== 0) &&
882 				(cryptodev_globals.devs[i].attached ==
883 						RTE_CRYPTODEV_ATTACHED)) {
884 			ret = (int)i;
885 			break;
886 		}
887 	}
888 
889 	rte_cryptodev_trace_get_dev_id(name, ret);
890 
891 	return ret;
892 }
893 
894 uint8_t
895 rte_cryptodev_count(void)
896 {
897 	rte_cryptodev_trace_count(cryptodev_globals.nb_devs);
898 
899 	return cryptodev_globals.nb_devs;
900 }
901 
902 uint8_t
903 rte_cryptodev_device_count_by_driver(uint8_t driver_id)
904 {
905 	uint8_t i, dev_count = 0;
906 
907 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++)
908 		if (cryptodev_globals.devs[i].driver_id == driver_id &&
909 			cryptodev_globals.devs[i].attached ==
910 					RTE_CRYPTODEV_ATTACHED)
911 			dev_count++;
912 
913 	rte_cryptodev_trace_device_count_by_driver(driver_id, dev_count);
914 
915 	return dev_count;
916 }
917 
918 uint8_t
919 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
920 	uint8_t nb_devices)
921 {
922 	uint8_t i, count = 0;
923 	struct rte_cryptodev *devs = cryptodev_globals.devs;
924 
925 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS && count < nb_devices; i++) {
926 		if (!rte_cryptodev_is_valid_device_data(i))
927 			continue;
928 
929 		if (devs[i].attached == RTE_CRYPTODEV_ATTACHED) {
930 			int cmp;
931 
932 			cmp = strncmp(devs[i].device->driver->name,
933 					driver_name,
934 					strlen(driver_name) + 1);
935 
936 			if (cmp == 0)
937 				devices[count++] = devs[i].data->dev_id;
938 		}
939 	}
940 
941 	rte_cryptodev_trace_devices_get(driver_name, count);
942 
943 	return count;
944 }
945 
946 void *
947 rte_cryptodev_get_sec_ctx(uint8_t dev_id)
948 {
949 	void *sec_ctx = NULL;
950 
951 	if (dev_id < RTE_CRYPTO_MAX_DEVS &&
952 			(rte_crypto_devices[dev_id].feature_flags &
953 			RTE_CRYPTODEV_FF_SECURITY))
954 		sec_ctx = rte_crypto_devices[dev_id].security_ctx;
955 
956 	rte_cryptodev_trace_get_sec_ctx(dev_id, sec_ctx);
957 
958 	return sec_ctx;
959 }
960 
961 int
962 rte_cryptodev_socket_id(uint8_t dev_id)
963 {
964 	struct rte_cryptodev *dev;
965 
966 	if (!rte_cryptodev_is_valid_dev(dev_id))
967 		return -1;
968 
969 	dev = rte_cryptodev_pmd_get_dev(dev_id);
970 
971 	rte_cryptodev_trace_socket_id(dev_id, dev->data->name,
972 		dev->data->socket_id);
973 	return dev->data->socket_id;
974 }
975 
976 static inline int
977 rte_cryptodev_data_alloc(uint8_t dev_id, struct rte_cryptodev_data **data,
978 		int socket_id)
979 {
980 	char mz_name[RTE_MEMZONE_NAMESIZE];
981 	const struct rte_memzone *mz;
982 	int n;
983 
984 	/* generate memzone name */
985 	n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
986 	if (n >= (int)sizeof(mz_name))
987 		return -EINVAL;
988 
989 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
990 		mz = rte_memzone_reserve(mz_name,
991 				sizeof(struct rte_cryptodev_data),
992 				socket_id, 0);
993 		CDEV_LOG_DEBUG("PRIMARY:reserved memzone for %s (%p)",
994 				mz_name, mz);
995 	} else {
996 		mz = rte_memzone_lookup(mz_name);
997 		CDEV_LOG_DEBUG("SECONDARY:looked up memzone for %s (%p)",
998 				mz_name, mz);
999 	}
1000 
1001 	if (mz == NULL)
1002 		return -ENOMEM;
1003 
1004 	*data = mz->addr;
1005 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1006 		memset(*data, 0, sizeof(struct rte_cryptodev_data));
1007 
1008 	return 0;
1009 }
1010 
1011 static inline int
1012 rte_cryptodev_data_free(uint8_t dev_id, struct rte_cryptodev_data **data)
1013 {
1014 	char mz_name[RTE_MEMZONE_NAMESIZE];
1015 	const struct rte_memzone *mz;
1016 	int n;
1017 
1018 	/* generate memzone name */
1019 	n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
1020 	if (n >= (int)sizeof(mz_name))
1021 		return -EINVAL;
1022 
1023 	mz = rte_memzone_lookup(mz_name);
1024 	if (mz == NULL)
1025 		return -ENOMEM;
1026 
1027 	RTE_ASSERT(*data == mz->addr);
1028 	*data = NULL;
1029 
1030 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1031 		CDEV_LOG_DEBUG("PRIMARY:free memzone of %s (%p)",
1032 				mz_name, mz);
1033 		return rte_memzone_free(mz);
1034 	} else {
1035 		CDEV_LOG_DEBUG("SECONDARY:don't free memzone of %s (%p)",
1036 				mz_name, mz);
1037 	}
1038 
1039 	return 0;
1040 }
1041 
1042 static uint8_t
1043 rte_cryptodev_find_free_device_index(void)
1044 {
1045 	uint8_t dev_id;
1046 
1047 	for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++) {
1048 		if (rte_crypto_devices[dev_id].attached ==
1049 				RTE_CRYPTODEV_DETACHED)
1050 			return dev_id;
1051 	}
1052 	return RTE_CRYPTO_MAX_DEVS;
1053 }
1054 
1055 struct rte_cryptodev *
1056 rte_cryptodev_pmd_allocate(const char *name, int socket_id)
1057 {
1058 	struct rte_cryptodev *cryptodev;
1059 	uint8_t dev_id;
1060 
1061 	if (rte_cryptodev_pmd_get_named_dev(name) != NULL) {
1062 		CDEV_LOG_ERR("Crypto device with name %s already "
1063 				"allocated!", name);
1064 		return NULL;
1065 	}
1066 
1067 	dev_id = rte_cryptodev_find_free_device_index();
1068 	if (dev_id == RTE_CRYPTO_MAX_DEVS) {
1069 		CDEV_LOG_ERR("Reached maximum number of crypto devices");
1070 		return NULL;
1071 	}
1072 
1073 	cryptodev = rte_cryptodev_pmd_get_dev(dev_id);
1074 
1075 	if (cryptodev->data == NULL) {
1076 		struct rte_cryptodev_data **cryptodev_data =
1077 				&cryptodev_globals.data[dev_id];
1078 
1079 		int retval = rte_cryptodev_data_alloc(dev_id, cryptodev_data,
1080 				socket_id);
1081 
1082 		if (retval < 0 || *cryptodev_data == NULL)
1083 			return NULL;
1084 
1085 		cryptodev->data = *cryptodev_data;
1086 
1087 		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1088 			strlcpy(cryptodev->data->name, name,
1089 				RTE_CRYPTODEV_NAME_MAX_LEN);
1090 
1091 			cryptodev->data->dev_id = dev_id;
1092 			cryptodev->data->socket_id = socket_id;
1093 			cryptodev->data->dev_started = 0;
1094 			CDEV_LOG_DEBUG("PRIMARY:init data");
1095 		}
1096 
1097 		CDEV_LOG_DEBUG("Data for %s: dev_id %d, socket %d, started %d",
1098 				cryptodev->data->name,
1099 				cryptodev->data->dev_id,
1100 				cryptodev->data->socket_id,
1101 				cryptodev->data->dev_started);
1102 
1103 		/* init user callbacks */
1104 		TAILQ_INIT(&(cryptodev->link_intr_cbs));
1105 
1106 		cryptodev->attached = RTE_CRYPTODEV_ATTACHED;
1107 
1108 		cryptodev_globals.nb_devs++;
1109 	}
1110 
1111 	return cryptodev;
1112 }
1113 
1114 int
1115 rte_cryptodev_pmd_release_device(struct rte_cryptodev *cryptodev)
1116 {
1117 	int ret;
1118 	uint8_t dev_id;
1119 
1120 	if (cryptodev == NULL)
1121 		return -EINVAL;
1122 
1123 	dev_id = cryptodev->data->dev_id;
1124 
1125 	cryptodev_fp_ops_reset(rte_crypto_fp_ops + dev_id);
1126 
1127 	/* Close device only if device operations have been set */
1128 	if (cryptodev->dev_ops) {
1129 		ret = rte_cryptodev_close(dev_id);
1130 		if (ret < 0)
1131 			return ret;
1132 	}
1133 
1134 	ret = rte_cryptodev_data_free(dev_id, &cryptodev_globals.data[dev_id]);
1135 	if (ret < 0)
1136 		return ret;
1137 
1138 	cryptodev->attached = RTE_CRYPTODEV_DETACHED;
1139 	cryptodev_globals.nb_devs--;
1140 	return 0;
1141 }
1142 
1143 uint16_t
1144 rte_cryptodev_queue_pair_count(uint8_t dev_id)
1145 {
1146 	struct rte_cryptodev *dev;
1147 
1148 	if (!rte_cryptodev_is_valid_device_data(dev_id)) {
1149 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1150 		return 0;
1151 	}
1152 
1153 	dev = &rte_crypto_devices[dev_id];
1154 	rte_cryptodev_trace_queue_pair_count(dev, dev->data->name,
1155 		dev->data->socket_id, dev->data->dev_id,
1156 		dev->data->nb_queue_pairs);
1157 
1158 	return dev->data->nb_queue_pairs;
1159 }
1160 
1161 static int
1162 rte_cryptodev_queue_pairs_config(struct rte_cryptodev *dev, uint16_t nb_qpairs,
1163 		int socket_id)
1164 {
1165 	struct rte_cryptodev_info dev_info;
1166 	void **qp;
1167 	unsigned i;
1168 
1169 	if ((dev == NULL) || (nb_qpairs < 1)) {
1170 		CDEV_LOG_ERR("invalid param: dev %p, nb_queues %u",
1171 							dev, nb_qpairs);
1172 		return -EINVAL;
1173 	}
1174 
1175 	CDEV_LOG_DEBUG("Setup %d queues pairs on device %u",
1176 			nb_qpairs, dev->data->dev_id);
1177 
1178 	memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
1179 
1180 	if (*dev->dev_ops->dev_infos_get == NULL)
1181 		return -ENOTSUP;
1182 	(*dev->dev_ops->dev_infos_get)(dev, &dev_info);
1183 
1184 	if (nb_qpairs > (dev_info.max_nb_queue_pairs)) {
1185 		CDEV_LOG_ERR("Invalid num queue_pairs (%u) for dev %u",
1186 				nb_qpairs, dev->data->dev_id);
1187 	    return -EINVAL;
1188 	}
1189 
1190 	if (dev->data->queue_pairs == NULL) { /* first time configuration */
1191 		dev->data->queue_pairs = rte_zmalloc_socket(
1192 				"cryptodev->queue_pairs",
1193 				sizeof(dev->data->queue_pairs[0]) *
1194 				dev_info.max_nb_queue_pairs,
1195 				RTE_CACHE_LINE_SIZE, socket_id);
1196 
1197 		if (dev->data->queue_pairs == NULL) {
1198 			dev->data->nb_queue_pairs = 0;
1199 			CDEV_LOG_ERR("failed to get memory for qp meta data, "
1200 							"nb_queues %u",
1201 							nb_qpairs);
1202 			return -(ENOMEM);
1203 		}
1204 	} else { /* re-configure */
1205 		int ret;
1206 		uint16_t old_nb_queues = dev->data->nb_queue_pairs;
1207 
1208 		qp = dev->data->queue_pairs;
1209 
1210 		if (*dev->dev_ops->queue_pair_release == NULL)
1211 			return -ENOTSUP;
1212 
1213 		for (i = nb_qpairs; i < old_nb_queues; i++) {
1214 			ret = (*dev->dev_ops->queue_pair_release)(dev, i);
1215 			if (ret < 0)
1216 				return ret;
1217 			qp[i] = NULL;
1218 		}
1219 
1220 	}
1221 	dev->data->nb_queue_pairs = nb_qpairs;
1222 	return 0;
1223 }
1224 
1225 int
1226 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
1227 {
1228 	struct rte_cryptodev *dev;
1229 	int diag;
1230 
1231 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1232 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1233 		return -EINVAL;
1234 	}
1235 
1236 	dev = &rte_crypto_devices[dev_id];
1237 
1238 	if (dev->data->dev_started) {
1239 		CDEV_LOG_ERR(
1240 		    "device %d must be stopped to allow configuration", dev_id);
1241 		return -EBUSY;
1242 	}
1243 
1244 	if (*dev->dev_ops->dev_configure == NULL)
1245 		return -ENOTSUP;
1246 
1247 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1248 	cryptodev_cb_cleanup(dev);
1249 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1250 
1251 	/* Setup new number of queue pairs and reconfigure device. */
1252 	diag = rte_cryptodev_queue_pairs_config(dev, config->nb_queue_pairs,
1253 			config->socket_id);
1254 	if (diag != 0) {
1255 		CDEV_LOG_ERR("dev%d rte_crypto_dev_queue_pairs_config = %d",
1256 				dev_id, diag);
1257 		return diag;
1258 	}
1259 
1260 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1261 	diag = cryptodev_cb_init(dev);
1262 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1263 	if (diag) {
1264 		CDEV_LOG_ERR("Callback init failed for dev_id=%d", dev_id);
1265 		return diag;
1266 	}
1267 
1268 	rte_cryptodev_trace_configure(dev_id, config);
1269 	return (*dev->dev_ops->dev_configure)(dev, config);
1270 }
1271 
1272 int
1273 rte_cryptodev_start(uint8_t dev_id)
1274 {
1275 	struct rte_cryptodev *dev;
1276 	int diag;
1277 
1278 	CDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
1279 
1280 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1281 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1282 		return -EINVAL;
1283 	}
1284 
1285 	dev = &rte_crypto_devices[dev_id];
1286 
1287 	if (*dev->dev_ops->dev_start == NULL)
1288 		return -ENOTSUP;
1289 
1290 	if (dev->data->dev_started != 0) {
1291 		CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already started",
1292 			dev_id);
1293 		return 0;
1294 	}
1295 
1296 	diag = (*dev->dev_ops->dev_start)(dev);
1297 	/* expose selection of PMD fast-path functions */
1298 	cryptodev_fp_ops_set(rte_crypto_fp_ops + dev_id, dev);
1299 
1300 	rte_cryptodev_trace_start(dev_id, diag);
1301 	if (diag == 0)
1302 		dev->data->dev_started = 1;
1303 	else
1304 		return diag;
1305 
1306 	return 0;
1307 }
1308 
1309 void
1310 rte_cryptodev_stop(uint8_t dev_id)
1311 {
1312 	struct rte_cryptodev *dev;
1313 
1314 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1315 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1316 		return;
1317 	}
1318 
1319 	dev = &rte_crypto_devices[dev_id];
1320 
1321 	if (*dev->dev_ops->dev_stop == NULL)
1322 		return;
1323 
1324 	if (dev->data->dev_started == 0) {
1325 		CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already stopped",
1326 			dev_id);
1327 		return;
1328 	}
1329 
1330 	/* point fast-path functions to dummy ones */
1331 	cryptodev_fp_ops_reset(rte_crypto_fp_ops + dev_id);
1332 
1333 	(*dev->dev_ops->dev_stop)(dev);
1334 	rte_cryptodev_trace_stop(dev_id);
1335 	dev->data->dev_started = 0;
1336 }
1337 
1338 int
1339 rte_cryptodev_close(uint8_t dev_id)
1340 {
1341 	struct rte_cryptodev *dev;
1342 	int retval;
1343 
1344 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1345 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1346 		return -1;
1347 	}
1348 
1349 	dev = &rte_crypto_devices[dev_id];
1350 
1351 	/* Device must be stopped before it can be closed */
1352 	if (dev->data->dev_started == 1) {
1353 		CDEV_LOG_ERR("Device %u must be stopped before closing",
1354 				dev_id);
1355 		return -EBUSY;
1356 	}
1357 
1358 	/* We can't close the device if there are outstanding sessions in use */
1359 	if (dev->data->session_pool != NULL) {
1360 		if (!rte_mempool_full(dev->data->session_pool)) {
1361 			CDEV_LOG_ERR("dev_id=%u close failed, session mempool "
1362 					"has sessions still in use, free "
1363 					"all sessions before calling close",
1364 					(unsigned)dev_id);
1365 			return -EBUSY;
1366 		}
1367 	}
1368 
1369 	if (*dev->dev_ops->dev_close == NULL)
1370 		return -ENOTSUP;
1371 	retval = (*dev->dev_ops->dev_close)(dev);
1372 	rte_cryptodev_trace_close(dev_id, retval);
1373 
1374 	if (retval < 0)
1375 		return retval;
1376 
1377 	return 0;
1378 }
1379 
1380 int
1381 rte_cryptodev_get_qp_status(uint8_t dev_id, uint16_t queue_pair_id)
1382 {
1383 	struct rte_cryptodev *dev;
1384 	int ret = 0;
1385 
1386 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1387 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1388 		ret = -EINVAL;
1389 		goto done;
1390 	}
1391 
1392 	dev = &rte_crypto_devices[dev_id];
1393 	if (queue_pair_id >= dev->data->nb_queue_pairs) {
1394 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
1395 		ret = -EINVAL;
1396 		goto done;
1397 	}
1398 	void **qps = dev->data->queue_pairs;
1399 
1400 	if (qps[queue_pair_id])	{
1401 		CDEV_LOG_DEBUG("qp %d on dev %d is initialised",
1402 			queue_pair_id, dev_id);
1403 		ret = 1;
1404 		goto done;
1405 	}
1406 
1407 	CDEV_LOG_DEBUG("qp %d on dev %d is not initialised",
1408 		queue_pair_id, dev_id);
1409 
1410 done:
1411 	rte_cryptodev_trace_get_qp_status(dev_id, queue_pair_id, ret);
1412 
1413 	return ret;
1414 }
1415 
1416 static uint8_t
1417 rte_cryptodev_sym_is_valid_session_pool(struct rte_mempool *mp,
1418 	uint32_t sess_priv_size)
1419 {
1420 	struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1421 
1422 	if (!mp)
1423 		return 0;
1424 
1425 	pool_priv = rte_mempool_get_priv(mp);
1426 
1427 	if (!pool_priv || mp->private_data_size < sizeof(*pool_priv) ||
1428 			pool_priv->sess_data_sz < sess_priv_size)
1429 		return 0;
1430 
1431 	return 1;
1432 }
1433 
1434 int
1435 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
1436 		const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
1437 
1438 {
1439 	struct rte_cryptodev *dev;
1440 
1441 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1442 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1443 		return -EINVAL;
1444 	}
1445 
1446 	dev = &rte_crypto_devices[dev_id];
1447 	if (queue_pair_id >= dev->data->nb_queue_pairs) {
1448 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
1449 		return -EINVAL;
1450 	}
1451 
1452 	if (!qp_conf) {
1453 		CDEV_LOG_ERR("qp_conf cannot be NULL");
1454 		return -EINVAL;
1455 	}
1456 
1457 	if (qp_conf->mp_session) {
1458 		struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1459 
1460 		pool_priv = rte_mempool_get_priv(qp_conf->mp_session);
1461 		if (!pool_priv || qp_conf->mp_session->private_data_size <
1462 				sizeof(*pool_priv)) {
1463 			CDEV_LOG_ERR("Invalid mempool");
1464 			return -EINVAL;
1465 		}
1466 
1467 		if (!rte_cryptodev_sym_is_valid_session_pool(qp_conf->mp_session,
1468 					rte_cryptodev_sym_get_private_session_size(dev_id))) {
1469 			CDEV_LOG_ERR("Invalid mempool");
1470 			return -EINVAL;
1471 		}
1472 	}
1473 
1474 	if (dev->data->dev_started) {
1475 		CDEV_LOG_ERR(
1476 		    "device %d must be stopped to allow configuration", dev_id);
1477 		return -EBUSY;
1478 	}
1479 
1480 	if (*dev->dev_ops->queue_pair_setup == NULL)
1481 		return -ENOTSUP;
1482 
1483 	rte_cryptodev_trace_queue_pair_setup(dev_id, queue_pair_id, qp_conf);
1484 	return (*dev->dev_ops->queue_pair_setup)(dev, queue_pair_id, qp_conf,
1485 			socket_id);
1486 }
1487 
1488 struct rte_cryptodev_cb *
1489 rte_cryptodev_add_enq_callback(uint8_t dev_id,
1490 			       uint16_t qp_id,
1491 			       rte_cryptodev_callback_fn cb_fn,
1492 			       void *cb_arg)
1493 {
1494 #ifndef RTE_CRYPTO_CALLBACKS
1495 	rte_errno = ENOTSUP;
1496 	return NULL;
1497 #endif
1498 	struct rte_cryptodev *dev;
1499 	struct rte_cryptodev_cb_rcu *list;
1500 	struct rte_cryptodev_cb *cb, *tail;
1501 
1502 	if (!cb_fn) {
1503 		CDEV_LOG_ERR("Callback is NULL on dev_id=%d", dev_id);
1504 		rte_errno = EINVAL;
1505 		return NULL;
1506 	}
1507 
1508 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1509 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1510 		rte_errno = ENODEV;
1511 		return NULL;
1512 	}
1513 
1514 	dev = &rte_crypto_devices[dev_id];
1515 	if (qp_id >= dev->data->nb_queue_pairs) {
1516 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1517 		rte_errno = ENODEV;
1518 		return NULL;
1519 	}
1520 
1521 	cb = rte_zmalloc(NULL, sizeof(*cb), 0);
1522 	if (cb == NULL) {
1523 		CDEV_LOG_ERR("Failed to allocate memory for callback on "
1524 			     "dev=%d, queue_pair_id=%d", dev_id, qp_id);
1525 		rte_errno = ENOMEM;
1526 		return NULL;
1527 	}
1528 
1529 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1530 
1531 	cb->fn = cb_fn;
1532 	cb->arg = cb_arg;
1533 
1534 	/* Add the callbacks in fifo order. */
1535 	list = &dev->enq_cbs[qp_id];
1536 	tail = list->next;
1537 
1538 	if (tail) {
1539 		while (tail->next)
1540 			tail = tail->next;
1541 		/* Stores to cb->fn and cb->param should complete before
1542 		 * cb is visible to data plane.
1543 		 */
1544 		rte_atomic_store_explicit(&tail->next, cb, rte_memory_order_release);
1545 	} else {
1546 		/* Stores to cb->fn and cb->param should complete before
1547 		 * cb is visible to data plane.
1548 		 */
1549 		rte_atomic_store_explicit(&list->next, cb, rte_memory_order_release);
1550 	}
1551 
1552 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1553 
1554 	rte_cryptodev_trace_add_enq_callback(dev_id, qp_id, cb_fn);
1555 	return cb;
1556 }
1557 
1558 int
1559 rte_cryptodev_remove_enq_callback(uint8_t dev_id,
1560 				  uint16_t qp_id,
1561 				  struct rte_cryptodev_cb *cb)
1562 {
1563 #ifndef RTE_CRYPTO_CALLBACKS
1564 	return -ENOTSUP;
1565 #endif
1566 	struct rte_cryptodev *dev;
1567 	RTE_ATOMIC(struct rte_cryptodev_cb *) *prev_cb;
1568 	struct rte_cryptodev_cb *curr_cb;
1569 	struct rte_cryptodev_cb_rcu *list;
1570 	int ret;
1571 
1572 	ret = -EINVAL;
1573 
1574 	if (!cb) {
1575 		CDEV_LOG_ERR("Callback is NULL");
1576 		return -EINVAL;
1577 	}
1578 
1579 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1580 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1581 		return -ENODEV;
1582 	}
1583 
1584 	rte_cryptodev_trace_remove_enq_callback(dev_id, qp_id, cb->fn);
1585 
1586 	dev = &rte_crypto_devices[dev_id];
1587 	if (qp_id >= dev->data->nb_queue_pairs) {
1588 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1589 		return -ENODEV;
1590 	}
1591 
1592 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1593 	if (dev->enq_cbs == NULL) {
1594 		CDEV_LOG_ERR("Callback not initialized");
1595 		goto cb_err;
1596 	}
1597 
1598 	list = &dev->enq_cbs[qp_id];
1599 	if (list == NULL) {
1600 		CDEV_LOG_ERR("Callback list is NULL");
1601 		goto cb_err;
1602 	}
1603 
1604 	if (list->qsbr == NULL) {
1605 		CDEV_LOG_ERR("Rcu qsbr is NULL");
1606 		goto cb_err;
1607 	}
1608 
1609 	prev_cb = &list->next;
1610 	for (; *prev_cb != NULL; prev_cb = &curr_cb->next) {
1611 		curr_cb = *prev_cb;
1612 		if (curr_cb == cb) {
1613 			/* Remove the user cb from the callback list. */
1614 			rte_atomic_store_explicit(prev_cb, curr_cb->next,
1615 				rte_memory_order_relaxed);
1616 			ret = 0;
1617 			break;
1618 		}
1619 	}
1620 
1621 	if (!ret) {
1622 		/* Call sync with invalid thread id as this is part of
1623 		 * control plane API
1624 		 */
1625 		rte_rcu_qsbr_synchronize(list->qsbr, RTE_QSBR_THRID_INVALID);
1626 		rte_free(cb);
1627 	}
1628 
1629 cb_err:
1630 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1631 	return ret;
1632 }
1633 
1634 struct rte_cryptodev_cb *
1635 rte_cryptodev_add_deq_callback(uint8_t dev_id,
1636 			       uint16_t qp_id,
1637 			       rte_cryptodev_callback_fn cb_fn,
1638 			       void *cb_arg)
1639 {
1640 #ifndef RTE_CRYPTO_CALLBACKS
1641 	rte_errno = ENOTSUP;
1642 	return NULL;
1643 #endif
1644 	struct rte_cryptodev *dev;
1645 	struct rte_cryptodev_cb_rcu *list;
1646 	struct rte_cryptodev_cb *cb, *tail;
1647 
1648 	if (!cb_fn) {
1649 		CDEV_LOG_ERR("Callback is NULL on dev_id=%d", dev_id);
1650 		rte_errno = EINVAL;
1651 		return NULL;
1652 	}
1653 
1654 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1655 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1656 		rte_errno = ENODEV;
1657 		return NULL;
1658 	}
1659 
1660 	dev = &rte_crypto_devices[dev_id];
1661 	if (qp_id >= dev->data->nb_queue_pairs) {
1662 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1663 		rte_errno = ENODEV;
1664 		return NULL;
1665 	}
1666 
1667 	cb = rte_zmalloc(NULL, sizeof(*cb), 0);
1668 	if (cb == NULL) {
1669 		CDEV_LOG_ERR("Failed to allocate memory for callback on "
1670 			     "dev=%d, queue_pair_id=%d", dev_id, qp_id);
1671 		rte_errno = ENOMEM;
1672 		return NULL;
1673 	}
1674 
1675 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1676 
1677 	cb->fn = cb_fn;
1678 	cb->arg = cb_arg;
1679 
1680 	/* Add the callbacks in fifo order. */
1681 	list = &dev->deq_cbs[qp_id];
1682 	tail = list->next;
1683 
1684 	if (tail) {
1685 		while (tail->next)
1686 			tail = tail->next;
1687 		/* Stores to cb->fn and cb->param should complete before
1688 		 * cb is visible to data plane.
1689 		 */
1690 		rte_atomic_store_explicit(&tail->next, cb, rte_memory_order_release);
1691 	} else {
1692 		/* Stores to cb->fn and cb->param should complete before
1693 		 * cb is visible to data plane.
1694 		 */
1695 		rte_atomic_store_explicit(&list->next, cb, rte_memory_order_release);
1696 	}
1697 
1698 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1699 
1700 	rte_cryptodev_trace_add_deq_callback(dev_id, qp_id, cb_fn);
1701 
1702 	return cb;
1703 }
1704 
1705 int
1706 rte_cryptodev_remove_deq_callback(uint8_t dev_id,
1707 				  uint16_t qp_id,
1708 				  struct rte_cryptodev_cb *cb)
1709 {
1710 #ifndef RTE_CRYPTO_CALLBACKS
1711 	return -ENOTSUP;
1712 #endif
1713 	struct rte_cryptodev *dev;
1714 	RTE_ATOMIC(struct rte_cryptodev_cb *) *prev_cb;
1715 	struct rte_cryptodev_cb *curr_cb;
1716 	struct rte_cryptodev_cb_rcu *list;
1717 	int ret;
1718 
1719 	ret = -EINVAL;
1720 
1721 	if (!cb) {
1722 		CDEV_LOG_ERR("Callback is NULL");
1723 		return -EINVAL;
1724 	}
1725 
1726 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1727 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1728 		return -ENODEV;
1729 	}
1730 
1731 	rte_cryptodev_trace_remove_deq_callback(dev_id, qp_id, cb->fn);
1732 
1733 	dev = &rte_crypto_devices[dev_id];
1734 	if (qp_id >= dev->data->nb_queue_pairs) {
1735 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1736 		return -ENODEV;
1737 	}
1738 
1739 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1740 	if (dev->enq_cbs == NULL) {
1741 		CDEV_LOG_ERR("Callback not initialized");
1742 		goto cb_err;
1743 	}
1744 
1745 	list = &dev->deq_cbs[qp_id];
1746 	if (list == NULL) {
1747 		CDEV_LOG_ERR("Callback list is NULL");
1748 		goto cb_err;
1749 	}
1750 
1751 	if (list->qsbr == NULL) {
1752 		CDEV_LOG_ERR("Rcu qsbr is NULL");
1753 		goto cb_err;
1754 	}
1755 
1756 	prev_cb = &list->next;
1757 	for (; *prev_cb != NULL; prev_cb = &curr_cb->next) {
1758 		curr_cb = *prev_cb;
1759 		if (curr_cb == cb) {
1760 			/* Remove the user cb from the callback list. */
1761 			rte_atomic_store_explicit(prev_cb, curr_cb->next,
1762 				rte_memory_order_relaxed);
1763 			ret = 0;
1764 			break;
1765 		}
1766 	}
1767 
1768 	if (!ret) {
1769 		/* Call sync with invalid thread id as this is part of
1770 		 * control plane API
1771 		 */
1772 		rte_rcu_qsbr_synchronize(list->qsbr, RTE_QSBR_THRID_INVALID);
1773 		rte_free(cb);
1774 	}
1775 
1776 cb_err:
1777 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1778 	return ret;
1779 }
1780 
1781 int
1782 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats)
1783 {
1784 	struct rte_cryptodev *dev;
1785 
1786 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1787 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1788 		return -ENODEV;
1789 	}
1790 
1791 	if (stats == NULL) {
1792 		CDEV_LOG_ERR("Invalid stats ptr");
1793 		return -EINVAL;
1794 	}
1795 
1796 	dev = &rte_crypto_devices[dev_id];
1797 	memset(stats, 0, sizeof(*stats));
1798 
1799 	if (*dev->dev_ops->stats_get == NULL)
1800 		return -ENOTSUP;
1801 	(*dev->dev_ops->stats_get)(dev, stats);
1802 
1803 	rte_cryptodev_trace_stats_get(dev_id, stats);
1804 	return 0;
1805 }
1806 
1807 void
1808 rte_cryptodev_stats_reset(uint8_t dev_id)
1809 {
1810 	struct rte_cryptodev *dev;
1811 
1812 	rte_cryptodev_trace_stats_reset(dev_id);
1813 
1814 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1815 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1816 		return;
1817 	}
1818 
1819 	dev = &rte_crypto_devices[dev_id];
1820 
1821 	if (*dev->dev_ops->stats_reset == NULL)
1822 		return;
1823 	(*dev->dev_ops->stats_reset)(dev);
1824 }
1825 
1826 void
1827 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
1828 {
1829 	struct rte_cryptodev *dev;
1830 
1831 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1832 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1833 		return;
1834 	}
1835 
1836 	dev = &rte_crypto_devices[dev_id];
1837 
1838 	memset(dev_info, 0, sizeof(struct rte_cryptodev_info));
1839 
1840 	if (*dev->dev_ops->dev_infos_get == NULL)
1841 		return;
1842 	(*dev->dev_ops->dev_infos_get)(dev, dev_info);
1843 
1844 	dev_info->driver_name = dev->device->driver->name;
1845 	dev_info->device = dev->device;
1846 
1847 	rte_cryptodev_trace_info_get(dev_id, dev_info->driver_name);
1848 
1849 }
1850 
1851 int
1852 rte_cryptodev_callback_register(uint8_t dev_id,
1853 			enum rte_cryptodev_event_type event,
1854 			rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1855 {
1856 	struct rte_cryptodev *dev;
1857 	struct rte_cryptodev_callback *user_cb;
1858 
1859 	if (!cb_fn)
1860 		return -EINVAL;
1861 
1862 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1863 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1864 		return -EINVAL;
1865 	}
1866 
1867 	dev = &rte_crypto_devices[dev_id];
1868 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
1869 
1870 	TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
1871 		if (user_cb->cb_fn == cb_fn &&
1872 			user_cb->cb_arg == cb_arg &&
1873 			user_cb->event == event) {
1874 			break;
1875 		}
1876 	}
1877 
1878 	/* create a new callback. */
1879 	if (user_cb == NULL) {
1880 		user_cb = rte_zmalloc("INTR_USER_CALLBACK",
1881 				sizeof(struct rte_cryptodev_callback), 0);
1882 		if (user_cb != NULL) {
1883 			user_cb->cb_fn = cb_fn;
1884 			user_cb->cb_arg = cb_arg;
1885 			user_cb->event = event;
1886 			TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
1887 		}
1888 	}
1889 
1890 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1891 
1892 	rte_cryptodev_trace_callback_register(dev_id, event, cb_fn);
1893 	return (user_cb == NULL) ? -ENOMEM : 0;
1894 }
1895 
1896 int
1897 rte_cryptodev_callback_unregister(uint8_t dev_id,
1898 			enum rte_cryptodev_event_type event,
1899 			rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1900 {
1901 	int ret;
1902 	struct rte_cryptodev *dev;
1903 	struct rte_cryptodev_callback *cb, *next;
1904 
1905 	if (!cb_fn)
1906 		return -EINVAL;
1907 
1908 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1909 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1910 		return -EINVAL;
1911 	}
1912 
1913 	dev = &rte_crypto_devices[dev_id];
1914 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
1915 
1916 	ret = 0;
1917 	for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
1918 
1919 		next = TAILQ_NEXT(cb, next);
1920 
1921 		if (cb->cb_fn != cb_fn || cb->event != event ||
1922 				(cb->cb_arg != (void *)-1 &&
1923 				cb->cb_arg != cb_arg))
1924 			continue;
1925 
1926 		/*
1927 		 * if this callback is not executing right now,
1928 		 * then remove it.
1929 		 */
1930 		if (cb->active == 0) {
1931 			TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
1932 			rte_free(cb);
1933 		} else {
1934 			ret = -EAGAIN;
1935 		}
1936 	}
1937 
1938 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1939 
1940 	rte_cryptodev_trace_callback_unregister(dev_id, event, cb_fn);
1941 	return ret;
1942 }
1943 
1944 void
1945 rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev,
1946 	enum rte_cryptodev_event_type event)
1947 {
1948 	struct rte_cryptodev_callback *cb_lst;
1949 	struct rte_cryptodev_callback dev_cb;
1950 
1951 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
1952 	TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
1953 		if (cb_lst->cb_fn == NULL || cb_lst->event != event)
1954 			continue;
1955 		dev_cb = *cb_lst;
1956 		cb_lst->active = 1;
1957 		rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1958 		dev_cb.cb_fn(dev->data->dev_id, dev_cb.event,
1959 						dev_cb.cb_arg);
1960 		rte_spinlock_lock(&rte_cryptodev_cb_lock);
1961 		cb_lst->active = 0;
1962 	}
1963 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1964 }
1965 
1966 int
1967 rte_cryptodev_queue_pair_event_error_query(uint8_t dev_id, uint16_t qp_id)
1968 {
1969 	struct rte_cryptodev *dev;
1970 
1971 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1972 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1973 		return -EINVAL;
1974 	}
1975 	dev = &rte_crypto_devices[dev_id];
1976 
1977 	if (qp_id >= dev->data->nb_queue_pairs)
1978 		return -EINVAL;
1979 	if (*dev->dev_ops->queue_pair_event_error_query == NULL)
1980 		return -ENOTSUP;
1981 
1982 	return dev->dev_ops->queue_pair_event_error_query(dev, qp_id);
1983 }
1984 
1985 struct rte_mempool *
1986 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
1987 	uint32_t elt_size, uint32_t cache_size, uint16_t user_data_size,
1988 	int socket_id)
1989 {
1990 	struct rte_mempool *mp;
1991 	struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1992 	uint32_t obj_sz;
1993 
1994 	obj_sz = sizeof(struct rte_cryptodev_sym_session) + elt_size + user_data_size;
1995 
1996 	obj_sz = RTE_ALIGN_CEIL(obj_sz, RTE_CACHE_LINE_SIZE);
1997 	mp = rte_mempool_create(name, nb_elts, obj_sz, cache_size,
1998 			(uint32_t)(sizeof(*pool_priv)), NULL, NULL,
1999 			NULL, NULL,
2000 			socket_id, 0);
2001 	if (mp == NULL) {
2002 		CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d",
2003 			__func__, name, rte_errno);
2004 		return NULL;
2005 	}
2006 
2007 	pool_priv = rte_mempool_get_priv(mp);
2008 	if (!pool_priv) {
2009 		CDEV_LOG_ERR("%s(name=%s) failed to get private data",
2010 			__func__, name);
2011 		rte_mempool_free(mp);
2012 		return NULL;
2013 	}
2014 
2015 	pool_priv->sess_data_sz = elt_size;
2016 	pool_priv->user_data_sz = user_data_size;
2017 
2018 	rte_cryptodev_trace_sym_session_pool_create(name, nb_elts,
2019 		elt_size, cache_size, user_data_size, mp);
2020 	return mp;
2021 }
2022 
2023 struct rte_mempool *
2024 rte_cryptodev_asym_session_pool_create(const char *name, uint32_t nb_elts,
2025 	uint32_t cache_size, uint16_t user_data_size, int socket_id)
2026 {
2027 	struct rte_mempool *mp;
2028 	struct rte_cryptodev_asym_session_pool_private_data *pool_priv;
2029 	uint32_t obj_sz, obj_sz_aligned;
2030 	uint8_t dev_id;
2031 	unsigned int priv_sz, max_priv_sz = 0;
2032 
2033 	for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++)
2034 		if (rte_cryptodev_is_valid_dev(dev_id)) {
2035 			priv_sz = rte_cryptodev_asym_get_private_session_size(dev_id);
2036 			if (priv_sz > max_priv_sz)
2037 				max_priv_sz = priv_sz;
2038 		}
2039 	if (max_priv_sz == 0) {
2040 		CDEV_LOG_INFO("Could not set max private session size");
2041 		return NULL;
2042 	}
2043 
2044 	obj_sz = rte_cryptodev_asym_get_header_session_size() + max_priv_sz +
2045 			user_data_size;
2046 	obj_sz_aligned =  RTE_ALIGN_CEIL(obj_sz, RTE_CACHE_LINE_SIZE);
2047 
2048 	mp = rte_mempool_create(name, nb_elts, obj_sz_aligned, cache_size,
2049 			(uint32_t)(sizeof(*pool_priv)),
2050 			NULL, NULL, NULL, NULL,
2051 			socket_id, 0);
2052 	if (mp == NULL) {
2053 		CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d",
2054 			__func__, name, rte_errno);
2055 		return NULL;
2056 	}
2057 
2058 	pool_priv = rte_mempool_get_priv(mp);
2059 	if (!pool_priv) {
2060 		CDEV_LOG_ERR("%s(name=%s) failed to get private data",
2061 			__func__, name);
2062 		rte_mempool_free(mp);
2063 		return NULL;
2064 	}
2065 	pool_priv->max_priv_session_sz = max_priv_sz;
2066 	pool_priv->user_data_sz = user_data_size;
2067 
2068 	rte_cryptodev_trace_asym_session_pool_create(name, nb_elts,
2069 		user_data_size, cache_size, mp);
2070 	return mp;
2071 }
2072 
2073 void *
2074 rte_cryptodev_sym_session_create(uint8_t dev_id,
2075 		struct rte_crypto_sym_xform *xforms,
2076 		struct rte_mempool *mp)
2077 {
2078 	struct rte_cryptodev *dev;
2079 	struct rte_cryptodev_sym_session *sess;
2080 	struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
2081 	uint32_t sess_priv_sz;
2082 	int ret;
2083 
2084 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
2085 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
2086 		rte_errno = EINVAL;
2087 		return NULL;
2088 	}
2089 
2090 	if (xforms == NULL) {
2091 		CDEV_LOG_ERR("Invalid xform");
2092 		rte_errno = EINVAL;
2093 		return NULL;
2094 	}
2095 
2096 	sess_priv_sz = rte_cryptodev_sym_get_private_session_size(dev_id);
2097 	if (!rte_cryptodev_sym_is_valid_session_pool(mp, sess_priv_sz)) {
2098 		CDEV_LOG_ERR("Invalid mempool");
2099 		rte_errno = EINVAL;
2100 		return NULL;
2101 	}
2102 
2103 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2104 
2105 	/* Allocate a session structure from the session pool */
2106 	if (rte_mempool_get(mp, (void **)&sess)) {
2107 		CDEV_LOG_ERR("couldn't get object from session mempool");
2108 		rte_errno = ENOMEM;
2109 		return NULL;
2110 	}
2111 
2112 	pool_priv = rte_mempool_get_priv(mp);
2113 	sess->driver_id = dev->driver_id;
2114 	sess->sess_data_sz = pool_priv->sess_data_sz;
2115 	sess->user_data_sz = pool_priv->user_data_sz;
2116 	sess->driver_priv_data_iova = rte_mempool_virt2iova(sess) +
2117 		offsetof(struct rte_cryptodev_sym_session, driver_priv_data);
2118 
2119 	if (dev->dev_ops->sym_session_configure == NULL) {
2120 		rte_errno = ENOTSUP;
2121 		goto error_exit;
2122 	}
2123 	memset(sess->driver_priv_data, 0, pool_priv->sess_data_sz + pool_priv->user_data_sz);
2124 
2125 	ret = dev->dev_ops->sym_session_configure(dev, xforms, sess);
2126 	if (ret < 0) {
2127 		rte_errno = -ret;
2128 		goto error_exit;
2129 	}
2130 	sess->driver_id = dev->driver_id;
2131 
2132 	rte_cryptodev_trace_sym_session_create(dev_id, sess, xforms, mp);
2133 
2134 	return (void *)sess;
2135 error_exit:
2136 	rte_mempool_put(mp, (void *)sess);
2137 	return NULL;
2138 }
2139 
2140 int
2141 rte_cryptodev_asym_session_create(uint8_t dev_id,
2142 		struct rte_crypto_asym_xform *xforms, struct rte_mempool *mp,
2143 		void **session)
2144 {
2145 	struct rte_cryptodev_asym_session *sess;
2146 	uint32_t session_priv_data_sz;
2147 	struct rte_cryptodev_asym_session_pool_private_data *pool_priv;
2148 	unsigned int session_header_size =
2149 			rte_cryptodev_asym_get_header_session_size();
2150 	struct rte_cryptodev *dev;
2151 	int ret;
2152 
2153 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
2154 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
2155 		return -EINVAL;
2156 	}
2157 
2158 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2159 
2160 	if (dev == NULL)
2161 		return -EINVAL;
2162 
2163 	if (!mp) {
2164 		CDEV_LOG_ERR("invalid mempool");
2165 		return -EINVAL;
2166 	}
2167 
2168 	session_priv_data_sz = rte_cryptodev_asym_get_private_session_size(
2169 			dev_id);
2170 	pool_priv = rte_mempool_get_priv(mp);
2171 
2172 	if (pool_priv->max_priv_session_sz < session_priv_data_sz) {
2173 		CDEV_LOG_DEBUG(
2174 			"The private session data size used when creating the mempool is smaller than this device's private session data.");
2175 		return -EINVAL;
2176 	}
2177 
2178 	/* Verify if provided mempool can hold elements big enough. */
2179 	if (mp->elt_size < session_header_size + session_priv_data_sz) {
2180 		CDEV_LOG_ERR(
2181 			"mempool elements too small to hold session objects");
2182 		return -EINVAL;
2183 	}
2184 
2185 	/* Allocate a session structure from the session pool */
2186 	if (rte_mempool_get(mp, session)) {
2187 		CDEV_LOG_ERR("couldn't get object from session mempool");
2188 		return -ENOMEM;
2189 	}
2190 
2191 	sess = *session;
2192 	sess->driver_id = dev->driver_id;
2193 	sess->user_data_sz = pool_priv->user_data_sz;
2194 	sess->max_priv_data_sz = pool_priv->max_priv_session_sz;
2195 
2196 	/* Clear device session pointer.*/
2197 	memset(sess->sess_private_data, 0, session_priv_data_sz + sess->user_data_sz);
2198 
2199 	if (*dev->dev_ops->asym_session_configure == NULL)
2200 		return -ENOTSUP;
2201 
2202 	if (sess->sess_private_data[0] == 0) {
2203 		ret = dev->dev_ops->asym_session_configure(dev, xforms, sess);
2204 		if (ret < 0) {
2205 			CDEV_LOG_ERR(
2206 				"dev_id %d failed to configure session details",
2207 				dev_id);
2208 			return ret;
2209 		}
2210 	}
2211 
2212 	rte_cryptodev_trace_asym_session_create(dev_id, xforms, mp, sess);
2213 	return 0;
2214 }
2215 
2216 int
2217 rte_cryptodev_sym_session_free(uint8_t dev_id, void *_sess)
2218 {
2219 	struct rte_cryptodev *dev;
2220 	struct rte_mempool *sess_mp;
2221 	struct rte_cryptodev_sym_session *sess = _sess;
2222 	struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
2223 
2224 	if (sess == NULL)
2225 		return -EINVAL;
2226 
2227 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
2228 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
2229 		return -EINVAL;
2230 	}
2231 
2232 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2233 
2234 	if (dev == NULL || sess == NULL)
2235 		return -EINVAL;
2236 
2237 	sess_mp = rte_mempool_from_obj(sess);
2238 	if (!sess_mp)
2239 		return -EINVAL;
2240 	pool_priv = rte_mempool_get_priv(sess_mp);
2241 
2242 	if (sess->driver_id != dev->driver_id) {
2243 		CDEV_LOG_ERR("Session created by driver %u but freed by %u",
2244 			sess->driver_id, dev->driver_id);
2245 		return -EINVAL;
2246 	}
2247 
2248 	if (*dev->dev_ops->sym_session_clear == NULL)
2249 		return -ENOTSUP;
2250 
2251 	dev->dev_ops->sym_session_clear(dev, sess);
2252 
2253 	memset(sess->driver_priv_data, 0, pool_priv->sess_data_sz + pool_priv->user_data_sz);
2254 
2255 	/* Return session to mempool */
2256 	rte_mempool_put(sess_mp, sess);
2257 
2258 	rte_cryptodev_trace_sym_session_free(dev_id, sess);
2259 	return 0;
2260 }
2261 
2262 int
2263 rte_cryptodev_asym_session_free(uint8_t dev_id, void *sess)
2264 {
2265 	struct rte_mempool *sess_mp;
2266 	struct rte_cryptodev *dev;
2267 
2268 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
2269 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
2270 		return -EINVAL;
2271 	}
2272 
2273 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2274 
2275 	if (dev == NULL || sess == NULL)
2276 		return -EINVAL;
2277 
2278 	if (*dev->dev_ops->asym_session_clear == NULL)
2279 		return -ENOTSUP;
2280 
2281 	dev->dev_ops->asym_session_clear(dev, sess);
2282 
2283 	rte_free(((struct rte_cryptodev_asym_session *)sess)->event_mdata);
2284 
2285 	/* Return session to mempool */
2286 	sess_mp = rte_mempool_from_obj(sess);
2287 	rte_mempool_put(sess_mp, sess);
2288 
2289 	rte_cryptodev_trace_asym_session_free(dev_id, sess);
2290 	return 0;
2291 }
2292 
2293 unsigned int
2294 rte_cryptodev_asym_get_header_session_size(void)
2295 {
2296 	return sizeof(struct rte_cryptodev_asym_session);
2297 }
2298 
2299 unsigned int
2300 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
2301 {
2302 	struct rte_cryptodev *dev;
2303 	unsigned int priv_sess_size;
2304 
2305 	if (!rte_cryptodev_is_valid_dev(dev_id))
2306 		return 0;
2307 
2308 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2309 
2310 	if (*dev->dev_ops->sym_session_get_size == NULL)
2311 		return 0;
2312 
2313 	priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev);
2314 
2315 	rte_cryptodev_trace_sym_get_private_session_size(dev_id,
2316 		priv_sess_size);
2317 
2318 	return priv_sess_size;
2319 }
2320 
2321 unsigned int
2322 rte_cryptodev_asym_get_private_session_size(uint8_t dev_id)
2323 {
2324 	struct rte_cryptodev *dev;
2325 	unsigned int priv_sess_size;
2326 
2327 	if (!rte_cryptodev_is_valid_dev(dev_id))
2328 		return 0;
2329 
2330 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2331 
2332 	if (*dev->dev_ops->asym_session_get_size == NULL)
2333 		return 0;
2334 
2335 	priv_sess_size = (*dev->dev_ops->asym_session_get_size)(dev);
2336 
2337 	rte_cryptodev_trace_asym_get_private_session_size(dev_id,
2338 		priv_sess_size);
2339 
2340 	return priv_sess_size;
2341 }
2342 
2343 int
2344 rte_cryptodev_sym_session_set_user_data(void *_sess, void *data,
2345 		uint16_t size)
2346 {
2347 	struct rte_cryptodev_sym_session *sess = _sess;
2348 
2349 	if (sess == NULL)
2350 		return -EINVAL;
2351 
2352 	if (sess->user_data_sz < size)
2353 		return -ENOMEM;
2354 
2355 	rte_memcpy(sess->driver_priv_data + sess->sess_data_sz, data, size);
2356 
2357 	rte_cryptodev_trace_sym_session_set_user_data(sess, data, size);
2358 
2359 	return 0;
2360 }
2361 
2362 void *
2363 rte_cryptodev_sym_session_get_user_data(void *_sess)
2364 {
2365 	struct rte_cryptodev_sym_session *sess = _sess;
2366 	void *data = NULL;
2367 
2368 	if (sess == NULL || sess->user_data_sz == 0)
2369 		return NULL;
2370 
2371 	data = (void *)(sess->driver_priv_data + sess->sess_data_sz);
2372 
2373 	rte_cryptodev_trace_sym_session_get_user_data(sess, data);
2374 
2375 	return data;
2376 }
2377 
2378 int
2379 rte_cryptodev_asym_session_set_user_data(void *session, void *data, uint16_t size)
2380 {
2381 	struct rte_cryptodev_asym_session *sess = session;
2382 	if (sess == NULL)
2383 		return -EINVAL;
2384 
2385 	if (sess->user_data_sz < size)
2386 		return -ENOMEM;
2387 
2388 	rte_memcpy(sess->sess_private_data +
2389 			sess->max_priv_data_sz,
2390 			data, size);
2391 
2392 	rte_cryptodev_trace_asym_session_set_user_data(sess, data, size);
2393 
2394 	return 0;
2395 }
2396 
2397 void *
2398 rte_cryptodev_asym_session_get_user_data(void *session)
2399 {
2400 	struct rte_cryptodev_asym_session *sess = session;
2401 	void *data = NULL;
2402 
2403 	if (sess == NULL || sess->user_data_sz == 0)
2404 		return NULL;
2405 
2406 	data = (void *)(sess->sess_private_data + sess->max_priv_data_sz);
2407 
2408 	rte_cryptodev_trace_asym_session_get_user_data(sess, data);
2409 
2410 	return data;
2411 }
2412 
2413 static inline void
2414 sym_crypto_fill_status(struct rte_crypto_sym_vec *vec, int32_t errnum)
2415 {
2416 	uint32_t i;
2417 	for (i = 0; i < vec->num; i++)
2418 		vec->status[i] = errnum;
2419 }
2420 
2421 uint32_t
2422 rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id,
2423 	void *_sess, union rte_crypto_sym_ofs ofs,
2424 	struct rte_crypto_sym_vec *vec)
2425 {
2426 	struct rte_cryptodev *dev;
2427 	struct rte_cryptodev_sym_session *sess = _sess;
2428 
2429 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
2430 		sym_crypto_fill_status(vec, EINVAL);
2431 		return 0;
2432 	}
2433 
2434 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2435 
2436 	if (*dev->dev_ops->sym_cpu_process == NULL ||
2437 		!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO)) {
2438 		sym_crypto_fill_status(vec, ENOTSUP);
2439 		return 0;
2440 	}
2441 
2442 	rte_cryptodev_trace_sym_cpu_crypto_process(dev_id, sess);
2443 
2444 	return dev->dev_ops->sym_cpu_process(dev, sess, ofs, vec);
2445 }
2446 
2447 int
2448 rte_cryptodev_get_raw_dp_ctx_size(uint8_t dev_id)
2449 {
2450 	struct rte_cryptodev *dev;
2451 	int32_t size = sizeof(struct rte_crypto_raw_dp_ctx);
2452 	int32_t priv_size;
2453 
2454 	if (!rte_cryptodev_is_valid_dev(dev_id))
2455 		return -EINVAL;
2456 
2457 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2458 
2459 	if (*dev->dev_ops->sym_get_raw_dp_ctx_size == NULL ||
2460 		!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP)) {
2461 		return -ENOTSUP;
2462 	}
2463 
2464 	priv_size = (*dev->dev_ops->sym_get_raw_dp_ctx_size)(dev);
2465 	if (priv_size < 0)
2466 		return -ENOTSUP;
2467 
2468 	rte_cryptodev_trace_get_raw_dp_ctx_size(dev_id);
2469 
2470 	return RTE_ALIGN_CEIL((size + priv_size), 8);
2471 }
2472 
2473 int
2474 rte_cryptodev_configure_raw_dp_ctx(uint8_t dev_id, uint16_t qp_id,
2475 	struct rte_crypto_raw_dp_ctx *ctx,
2476 	enum rte_crypto_op_sess_type sess_type,
2477 	union rte_cryptodev_session_ctx session_ctx,
2478 	uint8_t is_update)
2479 {
2480 	struct rte_cryptodev *dev;
2481 
2482 	if (!rte_cryptodev_get_qp_status(dev_id, qp_id))
2483 		return -EINVAL;
2484 
2485 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2486 	if (!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP)
2487 			|| dev->dev_ops->sym_configure_raw_dp_ctx == NULL)
2488 		return -ENOTSUP;
2489 
2490 	rte_cryptodev_trace_configure_raw_dp_ctx(dev_id, qp_id, sess_type);
2491 
2492 	return (*dev->dev_ops->sym_configure_raw_dp_ctx)(dev, qp_id, ctx,
2493 			sess_type, session_ctx, is_update);
2494 }
2495 
2496 int
2497 rte_cryptodev_session_event_mdata_set(uint8_t dev_id, void *sess,
2498 	enum rte_crypto_op_type op_type,
2499 	enum rte_crypto_op_sess_type sess_type,
2500 	void *ev_mdata,
2501 	uint16_t size)
2502 {
2503 	struct rte_cryptodev *dev;
2504 
2505 	if (sess == NULL || ev_mdata == NULL)
2506 		return -EINVAL;
2507 
2508 	if (!rte_cryptodev_is_valid_dev(dev_id))
2509 		goto skip_pmd_op;
2510 
2511 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2512 	if (dev->dev_ops->session_ev_mdata_set == NULL)
2513 		goto skip_pmd_op;
2514 
2515 	rte_cryptodev_trace_session_event_mdata_set(dev_id, sess, op_type,
2516 		sess_type, ev_mdata, size);
2517 
2518 	return (*dev->dev_ops->session_ev_mdata_set)(dev, sess, op_type,
2519 			sess_type, ev_mdata);
2520 
2521 skip_pmd_op:
2522 	if (op_type == RTE_CRYPTO_OP_TYPE_SYMMETRIC)
2523 		return rte_cryptodev_sym_session_set_user_data(sess, ev_mdata,
2524 				size);
2525 	else if (op_type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
2526 		struct rte_cryptodev_asym_session *s = sess;
2527 
2528 		if (s->event_mdata == NULL) {
2529 			s->event_mdata = rte_malloc(NULL, size, 0);
2530 			if (s->event_mdata == NULL)
2531 				return -ENOMEM;
2532 		}
2533 		rte_memcpy(s->event_mdata, ev_mdata, size);
2534 
2535 		return 0;
2536 	} else
2537 		return -ENOTSUP;
2538 }
2539 
2540 uint32_t
2541 rte_cryptodev_raw_enqueue_burst(struct rte_crypto_raw_dp_ctx *ctx,
2542 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
2543 	void **user_data, int *enqueue_status)
2544 {
2545 	return (*ctx->enqueue_burst)(ctx->qp_data, ctx->drv_ctx_data, vec,
2546 			ofs, user_data, enqueue_status);
2547 }
2548 
2549 int
2550 rte_cryptodev_raw_enqueue_done(struct rte_crypto_raw_dp_ctx *ctx,
2551 		uint32_t n)
2552 {
2553 	return (*ctx->enqueue_done)(ctx->qp_data, ctx->drv_ctx_data, n);
2554 }
2555 
2556 uint32_t
2557 rte_cryptodev_raw_dequeue_burst(struct rte_crypto_raw_dp_ctx *ctx,
2558 	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
2559 	uint32_t max_nb_to_dequeue,
2560 	rte_cryptodev_raw_post_dequeue_t post_dequeue,
2561 	void **out_user_data, uint8_t is_user_data_array,
2562 	uint32_t *n_success_jobs, int *status)
2563 {
2564 	return (*ctx->dequeue_burst)(ctx->qp_data, ctx->drv_ctx_data,
2565 		get_dequeue_count, max_nb_to_dequeue, post_dequeue,
2566 		out_user_data, is_user_data_array, n_success_jobs, status);
2567 }
2568 
2569 int
2570 rte_cryptodev_raw_dequeue_done(struct rte_crypto_raw_dp_ctx *ctx,
2571 		uint32_t n)
2572 {
2573 	return (*ctx->dequeue_done)(ctx->qp_data, ctx->drv_ctx_data, n);
2574 }
2575 
2576 /** Initialise rte_crypto_op mempool element */
2577 static void
2578 rte_crypto_op_init(struct rte_mempool *mempool,
2579 		void *opaque_arg,
2580 		void *_op_data,
2581 		__rte_unused unsigned i)
2582 {
2583 	struct rte_crypto_op *op = _op_data;
2584 	enum rte_crypto_op_type type = *(enum rte_crypto_op_type *)opaque_arg;
2585 
2586 	memset(_op_data, 0, mempool->elt_size);
2587 
2588 	__rte_crypto_op_reset(op, type);
2589 
2590 	op->phys_addr = rte_mempool_virt2iova(_op_data);
2591 	op->mempool = mempool;
2592 }
2593 
2594 
2595 struct rte_mempool *
2596 rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
2597 		unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
2598 		int socket_id)
2599 {
2600 	struct rte_crypto_op_pool_private *priv;
2601 
2602 	unsigned elt_size = sizeof(struct rte_crypto_op) +
2603 			priv_size;
2604 
2605 	if (type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
2606 		elt_size += sizeof(struct rte_crypto_sym_op);
2607 	} else if (type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
2608 		elt_size += sizeof(struct rte_crypto_asym_op);
2609 	} else if (type == RTE_CRYPTO_OP_TYPE_UNDEFINED) {
2610 		elt_size += RTE_MAX(sizeof(struct rte_crypto_sym_op),
2611 		                    sizeof(struct rte_crypto_asym_op));
2612 	} else {
2613 		CDEV_LOG_ERR("Invalid op_type");
2614 		return NULL;
2615 	}
2616 
2617 	/* lookup mempool in case already allocated */
2618 	struct rte_mempool *mp = rte_mempool_lookup(name);
2619 
2620 	if (mp != NULL) {
2621 		priv = (struct rte_crypto_op_pool_private *)
2622 				rte_mempool_get_priv(mp);
2623 
2624 		if (mp->elt_size != elt_size ||
2625 				mp->cache_size < cache_size ||
2626 				mp->size < nb_elts ||
2627 				priv->priv_size <  priv_size) {
2628 			mp = NULL;
2629 			CDEV_LOG_ERR("Mempool %s already exists but with "
2630 					"incompatible parameters", name);
2631 			return NULL;
2632 		}
2633 		return mp;
2634 	}
2635 
2636 	mp = rte_mempool_create(
2637 			name,
2638 			nb_elts,
2639 			elt_size,
2640 			cache_size,
2641 			sizeof(struct rte_crypto_op_pool_private),
2642 			NULL,
2643 			NULL,
2644 			rte_crypto_op_init,
2645 			&type,
2646 			socket_id,
2647 			0);
2648 
2649 	if (mp == NULL) {
2650 		CDEV_LOG_ERR("Failed to create mempool %s", name);
2651 		return NULL;
2652 	}
2653 
2654 	priv = (struct rte_crypto_op_pool_private *)
2655 			rte_mempool_get_priv(mp);
2656 
2657 	priv->priv_size = priv_size;
2658 	priv->type = type;
2659 
2660 	rte_cryptodev_trace_op_pool_create(name, socket_id, type, nb_elts, mp);
2661 	return mp;
2662 }
2663 
2664 int
2665 rte_cryptodev_pmd_create_dev_name(char *name, const char *dev_name_prefix)
2666 {
2667 	struct rte_cryptodev *dev = NULL;
2668 	uint32_t i = 0;
2669 
2670 	if (name == NULL)
2671 		return -EINVAL;
2672 
2673 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
2674 		int ret = snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
2675 				"%s_%u", dev_name_prefix, i);
2676 
2677 		if (ret < 0)
2678 			return ret;
2679 
2680 		dev = rte_cryptodev_pmd_get_named_dev(name);
2681 		if (!dev)
2682 			return 0;
2683 	}
2684 
2685 	return -1;
2686 }
2687 
2688 TAILQ_HEAD(cryptodev_driver_list, cryptodev_driver);
2689 
2690 static struct cryptodev_driver_list cryptodev_driver_list =
2691 	TAILQ_HEAD_INITIALIZER(cryptodev_driver_list);
2692 
2693 int
2694 rte_cryptodev_driver_id_get(const char *name)
2695 {
2696 	struct cryptodev_driver *driver;
2697 	const char *driver_name;
2698 	int driver_id = -1;
2699 
2700 	if (name == NULL) {
2701 		CDEV_LOG_DEBUG("name pointer NULL");
2702 		return -1;
2703 	}
2704 
2705 	TAILQ_FOREACH(driver, &cryptodev_driver_list, next) {
2706 		driver_name = driver->driver->name;
2707 		if (strncmp(driver_name, name, strlen(driver_name) + 1) == 0) {
2708 			driver_id = driver->id;
2709 			break;
2710 		}
2711 	}
2712 
2713 	rte_cryptodev_trace_driver_id_get(name, driver_id);
2714 
2715 	return driver_id;
2716 }
2717 
2718 const char *
2719 rte_cryptodev_name_get(uint8_t dev_id)
2720 {
2721 	struct rte_cryptodev *dev;
2722 
2723 	if (!rte_cryptodev_is_valid_device_data(dev_id)) {
2724 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
2725 		return NULL;
2726 	}
2727 
2728 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2729 	if (dev == NULL)
2730 		return NULL;
2731 
2732 	rte_cryptodev_trace_name_get(dev_id, dev->data->name);
2733 
2734 	return dev->data->name;
2735 }
2736 
2737 const char *
2738 rte_cryptodev_driver_name_get(uint8_t driver_id)
2739 {
2740 	struct cryptodev_driver *driver;
2741 
2742 	TAILQ_FOREACH(driver, &cryptodev_driver_list, next) {
2743 		if (driver->id == driver_id) {
2744 			rte_cryptodev_trace_driver_name_get(driver_id,
2745 				driver->driver->name);
2746 			return driver->driver->name;
2747 		}
2748 	}
2749 	return NULL;
2750 }
2751 
2752 uint8_t
2753 rte_cryptodev_allocate_driver(struct cryptodev_driver *crypto_drv,
2754 		const struct rte_driver *drv)
2755 {
2756 	crypto_drv->driver = drv;
2757 	crypto_drv->id = nb_drivers;
2758 
2759 	TAILQ_INSERT_TAIL(&cryptodev_driver_list, crypto_drv, next);
2760 
2761 	rte_cryptodev_trace_allocate_driver(drv->name);
2762 
2763 	return nb_drivers++;
2764 }
2765 
2766 RTE_INIT(cryptodev_init_fp_ops)
2767 {
2768 	uint32_t i;
2769 
2770 	for (i = 0; i != RTE_DIM(rte_crypto_fp_ops); i++)
2771 		cryptodev_fp_ops_reset(rte_crypto_fp_ops + i);
2772 }
2773 
2774 static int
2775 cryptodev_handle_dev_list(const char *cmd __rte_unused,
2776 		const char *params __rte_unused,
2777 		struct rte_tel_data *d)
2778 {
2779 	int dev_id;
2780 
2781 	if (rte_cryptodev_count() < 1)
2782 		return -EINVAL;
2783 
2784 	rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
2785 	for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++)
2786 		if (rte_cryptodev_is_valid_dev(dev_id))
2787 			rte_tel_data_add_array_int(d, dev_id);
2788 
2789 	return 0;
2790 }
2791 
2792 static int
2793 cryptodev_handle_dev_info(const char *cmd __rte_unused,
2794 		const char *params, struct rte_tel_data *d)
2795 {
2796 	struct rte_cryptodev_info cryptodev_info;
2797 	int dev_id;
2798 	char *end_param;
2799 
2800 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
2801 		return -EINVAL;
2802 
2803 	dev_id = strtoul(params, &end_param, 0);
2804 	if (*end_param != '\0')
2805 		CDEV_LOG_ERR("Extra parameters passed to command, ignoring");
2806 	if (!rte_cryptodev_is_valid_dev(dev_id))
2807 		return -EINVAL;
2808 
2809 	rte_cryptodev_info_get(dev_id, &cryptodev_info);
2810 
2811 	rte_tel_data_start_dict(d);
2812 	rte_tel_data_add_dict_string(d, "device_name",
2813 		cryptodev_info.device->name);
2814 	rte_tel_data_add_dict_uint(d, "max_nb_queue_pairs",
2815 		cryptodev_info.max_nb_queue_pairs);
2816 
2817 	return 0;
2818 }
2819 
2820 #define ADD_DICT_STAT(s) rte_tel_data_add_dict_uint(d, #s, cryptodev_stats.s)
2821 
2822 static int
2823 cryptodev_handle_dev_stats(const char *cmd __rte_unused,
2824 		const char *params,
2825 		struct rte_tel_data *d)
2826 {
2827 	struct rte_cryptodev_stats cryptodev_stats;
2828 	int dev_id, ret;
2829 	char *end_param;
2830 
2831 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
2832 		return -EINVAL;
2833 
2834 	dev_id = strtoul(params, &end_param, 0);
2835 	if (*end_param != '\0')
2836 		CDEV_LOG_ERR("Extra parameters passed to command, ignoring");
2837 	if (!rte_cryptodev_is_valid_dev(dev_id))
2838 		return -EINVAL;
2839 
2840 	ret = rte_cryptodev_stats_get(dev_id, &cryptodev_stats);
2841 	if (ret < 0)
2842 		return ret;
2843 
2844 	rte_tel_data_start_dict(d);
2845 	ADD_DICT_STAT(enqueued_count);
2846 	ADD_DICT_STAT(dequeued_count);
2847 	ADD_DICT_STAT(enqueue_err_count);
2848 	ADD_DICT_STAT(dequeue_err_count);
2849 
2850 	return 0;
2851 }
2852 
2853 #define CRYPTO_CAPS_SZ                                             \
2854 	(RTE_ALIGN_CEIL(sizeof(struct rte_cryptodev_capabilities), \
2855 					sizeof(uint64_t)) /        \
2856 	 sizeof(uint64_t))
2857 
2858 static int
2859 crypto_caps_array(struct rte_tel_data *d,
2860 		  const struct rte_cryptodev_capabilities *capabilities)
2861 {
2862 	const struct rte_cryptodev_capabilities *dev_caps;
2863 	uint64_t caps_val[CRYPTO_CAPS_SZ];
2864 	unsigned int i = 0, j;
2865 
2866 	rte_tel_data_start_array(d, RTE_TEL_UINT_VAL);
2867 
2868 	while ((dev_caps = &capabilities[i++])->op !=
2869 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
2870 		memset(&caps_val, 0, CRYPTO_CAPS_SZ * sizeof(caps_val[0]));
2871 		rte_memcpy(caps_val, dev_caps, sizeof(capabilities[0]));
2872 		for (j = 0; j < CRYPTO_CAPS_SZ; j++)
2873 			rte_tel_data_add_array_uint(d, caps_val[j]);
2874 	}
2875 
2876 	return i;
2877 }
2878 
2879 static int
2880 cryptodev_handle_dev_caps(const char *cmd __rte_unused, const char *params,
2881 			  struct rte_tel_data *d)
2882 {
2883 	struct rte_cryptodev_info dev_info;
2884 	struct rte_tel_data *crypto_caps;
2885 	int crypto_caps_n;
2886 	char *end_param;
2887 	int dev_id;
2888 
2889 	if (!params || strlen(params) == 0 || !isdigit(*params))
2890 		return -EINVAL;
2891 
2892 	dev_id = strtoul(params, &end_param, 0);
2893 	if (*end_param != '\0')
2894 		CDEV_LOG_ERR("Extra parameters passed to command, ignoring");
2895 	if (!rte_cryptodev_is_valid_dev(dev_id))
2896 		return -EINVAL;
2897 
2898 	rte_tel_data_start_dict(d);
2899 	crypto_caps = rte_tel_data_alloc();
2900 	if (!crypto_caps)
2901 		return -ENOMEM;
2902 
2903 	rte_cryptodev_info_get(dev_id, &dev_info);
2904 	crypto_caps_n = crypto_caps_array(crypto_caps, dev_info.capabilities);
2905 	rte_tel_data_add_dict_container(d, "crypto_caps", crypto_caps, 0);
2906 	rte_tel_data_add_dict_int(d, "crypto_caps_n", crypto_caps_n);
2907 
2908 	return 0;
2909 }
2910 
2911 RTE_INIT(cryptodev_init_telemetry)
2912 {
2913 	rte_telemetry_register_cmd("/cryptodev/info", cryptodev_handle_dev_info,
2914 			"Returns information for a cryptodev. Parameters: int dev_id");
2915 	rte_telemetry_register_cmd("/cryptodev/list",
2916 			cryptodev_handle_dev_list,
2917 			"Returns list of available crypto devices by IDs. No parameters.");
2918 	rte_telemetry_register_cmd("/cryptodev/stats",
2919 			cryptodev_handle_dev_stats,
2920 			"Returns the stats for a cryptodev. Parameters: int dev_id");
2921 	rte_telemetry_register_cmd("/cryptodev/caps",
2922 			cryptodev_handle_dev_caps,
2923 			"Returns the capabilities for a cryptodev. Parameters: int dev_id");
2924 }
2925