xref: /dpdk/lib/cryptodev/rte_cryptodev.c (revision 0f1dc8cb671203d52488fd66936f2fe6dcca03cc)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020 Intel Corporation
3  */
4 
5 #include <sys/queue.h>
6 #include <ctype.h>
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <string.h>
10 #include <errno.h>
11 #include <stdint.h>
12 #include <inttypes.h>
13 
14 #include <rte_log.h>
15 #include <rte_debug.h>
16 #include <dev_driver.h>
17 #include <rte_memory.h>
18 #include <rte_memcpy.h>
19 #include <rte_memzone.h>
20 #include <rte_eal.h>
21 #include <rte_common.h>
22 #include <rte_mempool.h>
23 #include <rte_malloc.h>
24 #include <rte_errno.h>
25 #include <rte_spinlock.h>
26 #include <rte_string_fns.h>
27 #include <rte_telemetry.h>
28 
29 #include "rte_crypto.h"
30 #include "rte_cryptodev.h"
31 #include "cryptodev_pmd.h"
32 #include "cryptodev_trace.h"
33 
34 static uint8_t nb_drivers;
35 
36 static struct rte_cryptodev rte_crypto_devices[RTE_CRYPTO_MAX_DEVS];
37 
38 struct rte_cryptodev *rte_cryptodevs = rte_crypto_devices;
39 
40 static struct rte_cryptodev_global cryptodev_globals = {
41 		.devs			= rte_crypto_devices,
42 		.data			= { NULL },
43 		.nb_devs		= 0
44 };
45 
46 /* Public fastpath APIs. */
47 struct rte_crypto_fp_ops rte_crypto_fp_ops[RTE_CRYPTO_MAX_DEVS];
48 
49 /* spinlock for crypto device callbacks */
50 static rte_spinlock_t rte_cryptodev_cb_lock = RTE_SPINLOCK_INITIALIZER;
51 
52 RTE_LOG_REGISTER_DEFAULT(rte_cryptodev_logtype, INFO);
53 
54 /**
55  * The user application callback description.
56  *
57  * It contains callback address to be registered by user application,
58  * the pointer to the parameters for callback, and the event type.
59  */
60 struct rte_cryptodev_callback {
61 	TAILQ_ENTRY(rte_cryptodev_callback) next; /**< Callbacks list */
62 	rte_cryptodev_cb_fn cb_fn;		/**< Callback address */
63 	void *cb_arg;				/**< Parameter for callback */
64 	enum rte_cryptodev_event_type event;	/**< Interrupt event type */
65 	uint32_t active;			/**< Callback is executing */
66 };
67 
68 /**
69  * The crypto cipher algorithm strings identifiers.
70  * Not to be used in application directly.
71  * Application can use rte_cryptodev_get_cipher_algo_string().
72  */
73 static const char *
74 crypto_cipher_algorithm_strings[] = {
75 	[RTE_CRYPTO_CIPHER_3DES_CBC]	= "3des-cbc",
76 	[RTE_CRYPTO_CIPHER_3DES_ECB]	= "3des-ecb",
77 	[RTE_CRYPTO_CIPHER_3DES_CTR]	= "3des-ctr",
78 
79 	[RTE_CRYPTO_CIPHER_AES_CBC]	= "aes-cbc",
80 	[RTE_CRYPTO_CIPHER_AES_CTR]	= "aes-ctr",
81 	[RTE_CRYPTO_CIPHER_AES_DOCSISBPI]	= "aes-docsisbpi",
82 	[RTE_CRYPTO_CIPHER_AES_ECB]	= "aes-ecb",
83 	[RTE_CRYPTO_CIPHER_AES_F8]	= "aes-f8",
84 	[RTE_CRYPTO_CIPHER_AES_XTS]	= "aes-xts",
85 
86 	[RTE_CRYPTO_CIPHER_ARC4]	= "arc4",
87 
88 	[RTE_CRYPTO_CIPHER_DES_CBC]     = "des-cbc",
89 	[RTE_CRYPTO_CIPHER_DES_DOCSISBPI]	= "des-docsisbpi",
90 
91 	[RTE_CRYPTO_CIPHER_NULL]	= "null",
92 
93 	[RTE_CRYPTO_CIPHER_KASUMI_F8]	= "kasumi-f8",
94 	[RTE_CRYPTO_CIPHER_SNOW3G_UEA2]	= "snow3g-uea2",
95 	[RTE_CRYPTO_CIPHER_ZUC_EEA3]	= "zuc-eea3",
96 	[RTE_CRYPTO_CIPHER_SM4_ECB]	= "sm4-ecb",
97 	[RTE_CRYPTO_CIPHER_SM4_CBC]	= "sm4-cbc",
98 	[RTE_CRYPTO_CIPHER_SM4_CTR]	= "sm4-ctr",
99 	[RTE_CRYPTO_CIPHER_SM4_CFB]	= "sm4-cfb",
100 	[RTE_CRYPTO_CIPHER_SM4_OFB]	= "sm4-ofb"
101 };
102 
103 /**
104  * The crypto cipher operation strings identifiers.
105  * It could be used in application command line.
106  */
107 const char *
108 rte_crypto_cipher_operation_strings[] = {
109 		[RTE_CRYPTO_CIPHER_OP_ENCRYPT]	= "encrypt",
110 		[RTE_CRYPTO_CIPHER_OP_DECRYPT]	= "decrypt"
111 };
112 
113 /**
114  * The crypto auth algorithm strings identifiers.
115  * Not to be used in application directly.
116  * Application can use rte_cryptodev_get_auth_algo_string().
117  */
118 static const char *
119 crypto_auth_algorithm_strings[] = {
120 	[RTE_CRYPTO_AUTH_AES_CBC_MAC]	= "aes-cbc-mac",
121 	[RTE_CRYPTO_AUTH_AES_CMAC]	= "aes-cmac",
122 	[RTE_CRYPTO_AUTH_AES_GMAC]	= "aes-gmac",
123 	[RTE_CRYPTO_AUTH_AES_XCBC_MAC]	= "aes-xcbc-mac",
124 
125 	[RTE_CRYPTO_AUTH_MD5]		= "md5",
126 	[RTE_CRYPTO_AUTH_MD5_HMAC]	= "md5-hmac",
127 
128 	[RTE_CRYPTO_AUTH_NULL]		= "null",
129 
130 	[RTE_CRYPTO_AUTH_SHA1]		= "sha1",
131 	[RTE_CRYPTO_AUTH_SHA1_HMAC]	= "sha1-hmac",
132 
133 	[RTE_CRYPTO_AUTH_SHA224]	= "sha2-224",
134 	[RTE_CRYPTO_AUTH_SHA224_HMAC]	= "sha2-224-hmac",
135 	[RTE_CRYPTO_AUTH_SHA256]	= "sha2-256",
136 	[RTE_CRYPTO_AUTH_SHA256_HMAC]	= "sha2-256-hmac",
137 	[RTE_CRYPTO_AUTH_SHA384]	= "sha2-384",
138 	[RTE_CRYPTO_AUTH_SHA384_HMAC]	= "sha2-384-hmac",
139 	[RTE_CRYPTO_AUTH_SHA512]	= "sha2-512",
140 	[RTE_CRYPTO_AUTH_SHA512_HMAC]	= "sha2-512-hmac",
141 
142 	[RTE_CRYPTO_AUTH_SHA3_224]	= "sha3-224",
143 	[RTE_CRYPTO_AUTH_SHA3_224_HMAC] = "sha3-224-hmac",
144 	[RTE_CRYPTO_AUTH_SHA3_256]	= "sha3-256",
145 	[RTE_CRYPTO_AUTH_SHA3_256_HMAC] = "sha3-256-hmac",
146 	[RTE_CRYPTO_AUTH_SHA3_384]	= "sha3-384",
147 	[RTE_CRYPTO_AUTH_SHA3_384_HMAC] = "sha3-384-hmac",
148 	[RTE_CRYPTO_AUTH_SHA3_512]	= "sha3-512",
149 	[RTE_CRYPTO_AUTH_SHA3_512_HMAC]	= "sha3-512-hmac",
150 
151 	[RTE_CRYPTO_AUTH_KASUMI_F9]	= "kasumi-f9",
152 	[RTE_CRYPTO_AUTH_SNOW3G_UIA2]	= "snow3g-uia2",
153 	[RTE_CRYPTO_AUTH_ZUC_EIA3]	= "zuc-eia3",
154 	[RTE_CRYPTO_AUTH_SM3]		= "sm3",
155 	[RTE_CRYPTO_AUTH_SM3_HMAC]	= "sm3-hmac",
156 
157 	[RTE_CRYPTO_AUTH_SHAKE_128]	 = "shake-128",
158 	[RTE_CRYPTO_AUTH_SHAKE_256]	 = "shake-256",
159 };
160 
161 /**
162  * The crypto AEAD algorithm strings identifiers.
163  * Not to be used in application directly.
164  * Application can use rte_cryptodev_get_aead_algo_string().
165  */
166 static const char *
167 crypto_aead_algorithm_strings[] = {
168 	[RTE_CRYPTO_AEAD_AES_CCM]	= "aes-ccm",
169 	[RTE_CRYPTO_AEAD_AES_GCM]	= "aes-gcm",
170 	[RTE_CRYPTO_AEAD_CHACHA20_POLY1305] = "chacha20-poly1305"
171 };
172 
173 
174 /**
175  * The crypto AEAD operation strings identifiers.
176  * It could be used in application command line.
177  */
178 const char *
179 rte_crypto_aead_operation_strings[] = {
180 	[RTE_CRYPTO_AEAD_OP_ENCRYPT]	= "encrypt",
181 	[RTE_CRYPTO_AEAD_OP_DECRYPT]	= "decrypt"
182 };
183 
184 /**
185  * Asymmetric crypto transform operation strings identifiers.
186  * Not to be used in application directly.
187  * Application can use rte_cryptodev_asym_get_xform_string().
188  */
189 static const char *
190 crypto_asym_xform_strings[] = {
191 	[RTE_CRYPTO_ASYM_XFORM_NONE]	= "none",
192 	[RTE_CRYPTO_ASYM_XFORM_RSA]	= "rsa",
193 	[RTE_CRYPTO_ASYM_XFORM_MODEX]	= "modexp",
194 	[RTE_CRYPTO_ASYM_XFORM_MODINV]	= "modinv",
195 	[RTE_CRYPTO_ASYM_XFORM_DH]	= "dh",
196 	[RTE_CRYPTO_ASYM_XFORM_DSA]	= "dsa",
197 	[RTE_CRYPTO_ASYM_XFORM_ECDSA]	= "ecdsa",
198 	[RTE_CRYPTO_ASYM_XFORM_ECPM]	= "ecpm",
199 	[RTE_CRYPTO_ASYM_XFORM_SM2]	= "sm2",
200 };
201 
202 /**
203  * Asymmetric crypto operation strings identifiers.
204  */
205 const char *rte_crypto_asym_op_strings[] = {
206 	[RTE_CRYPTO_ASYM_OP_ENCRYPT]	= "encrypt",
207 	[RTE_CRYPTO_ASYM_OP_DECRYPT]	= "decrypt",
208 	[RTE_CRYPTO_ASYM_OP_SIGN]	= "sign",
209 	[RTE_CRYPTO_ASYM_OP_VERIFY]	= "verify"
210 };
211 
212 /**
213  * Asymmetric crypto key exchange operation strings identifiers.
214  */
215 const char *rte_crypto_asym_ke_strings[] = {
216 	[RTE_CRYPTO_ASYM_KE_PRIV_KEY_GENERATE] = "priv_key_generate",
217 	[RTE_CRYPTO_ASYM_KE_PUB_KEY_GENERATE] = "pub_key_generate",
218 	[RTE_CRYPTO_ASYM_KE_SHARED_SECRET_COMPUTE] = "sharedsecret_compute",
219 	[RTE_CRYPTO_ASYM_KE_PUB_KEY_VERIFY] = "pub_ec_key_verify"
220 };
221 
222 struct rte_cryptodev_sym_session_pool_private_data {
223 	uint16_t sess_data_sz;
224 	/**< driver session data size */
225 	uint16_t user_data_sz;
226 	/**< session user data will be placed after sess_data */
227 };
228 
229 /**
230  * The private data structure stored in the asym session mempool private data.
231  */
232 struct rte_cryptodev_asym_session_pool_private_data {
233 	uint16_t max_priv_session_sz;
234 	/**< Size of private session data used when creating mempool */
235 	uint16_t user_data_sz;
236 	/**< Session user data will be placed after sess_private_data */
237 };
238 
239 int
240 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum,
241 		const char *algo_string)
242 {
243 	unsigned int i;
244 	int ret = -1;	/* Invalid string */
245 
246 	for (i = 1; i < RTE_DIM(crypto_cipher_algorithm_strings); i++) {
247 		if (strcmp(algo_string, crypto_cipher_algorithm_strings[i]) == 0) {
248 			*algo_enum = (enum rte_crypto_cipher_algorithm) i;
249 			ret = 0;
250 			break;
251 		}
252 	}
253 
254 	rte_cryptodev_trace_get_cipher_algo_enum(algo_string, *algo_enum, ret);
255 
256 	return ret;
257 }
258 
259 int
260 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum,
261 		const char *algo_string)
262 {
263 	unsigned int i;
264 	int ret = -1;	/* Invalid string */
265 
266 	for (i = 1; i < RTE_DIM(crypto_auth_algorithm_strings); i++) {
267 		if (strcmp(algo_string, crypto_auth_algorithm_strings[i]) == 0) {
268 			*algo_enum = (enum rte_crypto_auth_algorithm) i;
269 			ret = 0;
270 			break;
271 		}
272 	}
273 
274 	rte_cryptodev_trace_get_auth_algo_enum(algo_string, *algo_enum, ret);
275 
276 	return ret;
277 }
278 
279 int
280 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum,
281 		const char *algo_string)
282 {
283 	unsigned int i;
284 	int ret = -1;	/* Invalid string */
285 
286 	for (i = 1; i < RTE_DIM(crypto_aead_algorithm_strings); i++) {
287 		if (strcmp(algo_string, crypto_aead_algorithm_strings[i]) == 0) {
288 			*algo_enum = (enum rte_crypto_aead_algorithm) i;
289 			ret = 0;
290 			break;
291 		}
292 	}
293 
294 	rte_cryptodev_trace_get_aead_algo_enum(algo_string, *algo_enum, ret);
295 
296 	return ret;
297 }
298 
299 int
300 rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum,
301 		const char *xform_string)
302 {
303 	unsigned int i;
304 	int ret = -1;	/* Invalid string */
305 
306 	for (i = 1; i < RTE_DIM(crypto_asym_xform_strings); i++) {
307 		if (strcmp(xform_string,
308 			crypto_asym_xform_strings[i]) == 0) {
309 			*xform_enum = (enum rte_crypto_asym_xform_type) i;
310 			ret = 0;
311 			break;
312 		}
313 	}
314 
315 	rte_cryptodev_trace_asym_get_xform_enum(xform_string, *xform_enum, ret);
316 
317 	return ret;
318 }
319 
320 const char *
321 rte_cryptodev_get_cipher_algo_string(enum rte_crypto_cipher_algorithm algo_enum)
322 {
323 	const char *alg_str = NULL;
324 
325 	if ((unsigned int)algo_enum < RTE_DIM(crypto_cipher_algorithm_strings))
326 		alg_str = crypto_cipher_algorithm_strings[algo_enum];
327 
328 	rte_cryptodev_trace_get_cipher_algo_string(algo_enum, alg_str);
329 
330 	return alg_str;
331 }
332 
333 const char *
334 rte_cryptodev_get_auth_algo_string(enum rte_crypto_auth_algorithm algo_enum)
335 {
336 	const char *alg_str = NULL;
337 
338 	if ((unsigned int)algo_enum < RTE_DIM(crypto_auth_algorithm_strings))
339 		alg_str = crypto_auth_algorithm_strings[algo_enum];
340 
341 	rte_cryptodev_trace_get_auth_algo_string(algo_enum, alg_str);
342 
343 	return alg_str;
344 }
345 
346 const char *
347 rte_cryptodev_get_aead_algo_string(enum rte_crypto_aead_algorithm algo_enum)
348 {
349 	const char *alg_str = NULL;
350 
351 	if ((unsigned int)algo_enum < RTE_DIM(crypto_aead_algorithm_strings))
352 		alg_str = crypto_aead_algorithm_strings[algo_enum];
353 
354 	rte_cryptodev_trace_get_aead_algo_string(algo_enum, alg_str);
355 
356 	return alg_str;
357 }
358 
359 const char *
360 rte_cryptodev_asym_get_xform_string(enum rte_crypto_asym_xform_type xform_enum)
361 {
362 	const char *xform_str = NULL;
363 
364 	if ((unsigned int)xform_enum < RTE_DIM(crypto_asym_xform_strings))
365 		xform_str = crypto_asym_xform_strings[xform_enum];
366 
367 	rte_cryptodev_trace_asym_get_xform_string(xform_enum, xform_str);
368 
369 	return xform_str;
370 }
371 
372 /**
373  * The crypto auth operation strings identifiers.
374  * It could be used in application command line.
375  */
376 const char *
377 rte_crypto_auth_operation_strings[] = {
378 		[RTE_CRYPTO_AUTH_OP_VERIFY]	= "verify",
379 		[RTE_CRYPTO_AUTH_OP_GENERATE]	= "generate"
380 };
381 
382 const struct rte_cryptodev_symmetric_capability *
383 rte_cryptodev_sym_capability_get(uint8_t dev_id,
384 		const struct rte_cryptodev_sym_capability_idx *idx)
385 {
386 	const struct rte_cryptodev_capabilities *capability;
387 	const struct rte_cryptodev_symmetric_capability *sym_capability = NULL;
388 	struct rte_cryptodev_info dev_info;
389 	int i = 0;
390 
391 	rte_cryptodev_info_get(dev_id, &dev_info);
392 
393 	while ((capability = &dev_info.capabilities[i++])->op !=
394 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
395 		if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
396 			continue;
397 
398 		if (capability->sym.xform_type != idx->type)
399 			continue;
400 
401 		if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
402 			capability->sym.auth.algo == idx->algo.auth) {
403 			sym_capability = &capability->sym;
404 			break;
405 		}
406 
407 		if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
408 			capability->sym.cipher.algo == idx->algo.cipher) {
409 			sym_capability = &capability->sym;
410 			break;
411 		}
412 
413 		if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
414 				capability->sym.aead.algo == idx->algo.aead) {
415 			sym_capability = &capability->sym;
416 			break;
417 		}
418 	}
419 
420 	rte_cryptodev_trace_sym_capability_get(dev_id, dev_info.driver_name,
421 		dev_info.driver_id, idx->type, sym_capability);
422 
423 	return sym_capability;
424 }
425 
426 static int
427 param_range_check(uint16_t size, const struct rte_crypto_param_range *range)
428 {
429 	unsigned int next_size;
430 
431 	/* Check lower/upper bounds */
432 	if (size < range->min)
433 		return -1;
434 
435 	if (size > range->max)
436 		return -1;
437 
438 	/* If range is actually only one value, size is correct */
439 	if (range->increment == 0)
440 		return 0;
441 
442 	/* Check if value is one of the supported sizes */
443 	for (next_size = range->min; next_size <= range->max;
444 			next_size += range->increment)
445 		if (size == next_size)
446 			return 0;
447 
448 	return -1;
449 }
450 
451 const struct rte_cryptodev_asymmetric_xform_capability *
452 rte_cryptodev_asym_capability_get(uint8_t dev_id,
453 		const struct rte_cryptodev_asym_capability_idx *idx)
454 {
455 	const struct rte_cryptodev_capabilities *capability;
456 	const struct rte_cryptodev_asymmetric_xform_capability *asym_cap = NULL;
457 	struct rte_cryptodev_info dev_info;
458 	unsigned int i = 0;
459 
460 	memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
461 	rte_cryptodev_info_get(dev_id, &dev_info);
462 
463 	while ((capability = &dev_info.capabilities[i++])->op !=
464 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
465 		if (capability->op != RTE_CRYPTO_OP_TYPE_ASYMMETRIC)
466 			continue;
467 
468 		if (capability->asym.xform_capa.xform_type == idx->type) {
469 			asym_cap = &capability->asym.xform_capa;
470 			break;
471 		}
472 	}
473 
474 	rte_cryptodev_trace_asym_capability_get(dev_info.driver_name,
475 		dev_info.driver_id, idx->type, asym_cap);
476 
477 	return asym_cap;
478 };
479 
480 int
481 rte_cryptodev_sym_capability_check_cipher(
482 		const struct rte_cryptodev_symmetric_capability *capability,
483 		uint16_t key_size, uint16_t iv_size)
484 {
485 	int ret = 0; /* success */
486 
487 	if (param_range_check(key_size, &capability->cipher.key_size) != 0) {
488 		ret = -1;
489 		goto done;
490 	}
491 
492 	if (param_range_check(iv_size, &capability->cipher.iv_size) != 0)
493 		ret = -1;
494 
495 done:
496 	rte_cryptodev_trace_sym_capability_check_cipher(capability, key_size,
497 		iv_size, ret);
498 
499 	return ret;
500 }
501 
502 int
503 rte_cryptodev_sym_capability_check_auth(
504 		const struct rte_cryptodev_symmetric_capability *capability,
505 		uint16_t key_size, uint16_t digest_size, uint16_t iv_size)
506 {
507 	int ret = 0; /* success */
508 
509 	if (param_range_check(key_size, &capability->auth.key_size) != 0) {
510 		ret = -1;
511 		goto done;
512 	}
513 
514 	if (param_range_check(digest_size,
515 		&capability->auth.digest_size) != 0) {
516 		ret = -1;
517 		goto done;
518 	}
519 
520 	if (param_range_check(iv_size, &capability->auth.iv_size) != 0)
521 		ret = -1;
522 
523 done:
524 	rte_cryptodev_trace_sym_capability_check_auth(capability, key_size,
525 		digest_size, iv_size, ret);
526 
527 	return ret;
528 }
529 
530 int
531 rte_cryptodev_sym_capability_check_aead(
532 		const struct rte_cryptodev_symmetric_capability *capability,
533 		uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
534 		uint16_t iv_size)
535 {
536 	int ret = 0; /* success */
537 
538 	if (param_range_check(key_size, &capability->aead.key_size) != 0) {
539 		ret = -1;
540 		goto done;
541 	}
542 
543 	if (param_range_check(digest_size,
544 		&capability->aead.digest_size) != 0) {
545 		ret = -1;
546 		goto done;
547 	}
548 
549 	if (param_range_check(aad_size, &capability->aead.aad_size) != 0) {
550 		ret = -1;
551 		goto done;
552 	}
553 
554 	if (param_range_check(iv_size, &capability->aead.iv_size) != 0)
555 		ret = -1;
556 
557 done:
558 	rte_cryptodev_trace_sym_capability_check_aead(capability, key_size,
559 		digest_size, aad_size, iv_size, ret);
560 
561 	return ret;
562 }
563 
564 int
565 rte_cryptodev_asym_xform_capability_check_optype(
566 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
567 	enum rte_crypto_asym_op_type op_type)
568 {
569 	int ret = 0;
570 
571 	if (capability->op_types & (1 << op_type))
572 		ret = 1;
573 
574 	rte_cryptodev_trace_asym_xform_capability_check_optype(
575 		capability->op_types, op_type, ret);
576 
577 	return ret;
578 }
579 
580 int
581 rte_cryptodev_asym_xform_capability_check_modlen(
582 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
583 	uint16_t modlen)
584 {
585 	int ret = 0; /* success */
586 
587 	/* no need to check for limits, if min or max = 0 */
588 	if (capability->modlen.min != 0) {
589 		if (modlen < capability->modlen.min) {
590 			ret = -1;
591 			goto done;
592 		}
593 	}
594 
595 	if (capability->modlen.max != 0) {
596 		if (modlen > capability->modlen.max) {
597 			ret = -1;
598 			goto done;
599 		}
600 	}
601 
602 	/* in any case, check if given modlen is module increment */
603 	if (capability->modlen.increment != 0) {
604 		if (modlen % (capability->modlen.increment))
605 			ret = -1;
606 	}
607 
608 done:
609 	rte_cryptodev_trace_asym_xform_capability_check_modlen(capability,
610 		modlen, ret);
611 
612 	return ret;
613 }
614 
615 bool
616 rte_cryptodev_asym_xform_capability_check_hash(
617 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
618 	enum rte_crypto_auth_algorithm hash)
619 {
620 	bool ret = false;
621 
622 	if (capability->hash_algos & (1 << hash))
623 		ret = true;
624 
625 	rte_cryptodev_trace_asym_xform_capability_check_hash(
626 		capability->hash_algos, hash, ret);
627 
628 	return ret;
629 }
630 
631 /* spinlock for crypto device enq callbacks */
632 static rte_spinlock_t rte_cryptodev_callback_lock = RTE_SPINLOCK_INITIALIZER;
633 
634 static void
635 cryptodev_cb_cleanup(struct rte_cryptodev *dev)
636 {
637 	struct rte_cryptodev_cb_rcu *list;
638 	struct rte_cryptodev_cb *cb, *next;
639 	uint16_t qp_id;
640 
641 	if (dev->enq_cbs == NULL && dev->deq_cbs == NULL)
642 		return;
643 
644 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
645 		list = &dev->enq_cbs[qp_id];
646 		cb = list->next;
647 		while (cb != NULL) {
648 			next = cb->next;
649 			rte_free(cb);
650 			cb = next;
651 		}
652 
653 		rte_free(list->qsbr);
654 	}
655 
656 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
657 		list = &dev->deq_cbs[qp_id];
658 		cb = list->next;
659 		while (cb != NULL) {
660 			next = cb->next;
661 			rte_free(cb);
662 			cb = next;
663 		}
664 
665 		rte_free(list->qsbr);
666 	}
667 
668 	rte_free(dev->enq_cbs);
669 	dev->enq_cbs = NULL;
670 	rte_free(dev->deq_cbs);
671 	dev->deq_cbs = NULL;
672 }
673 
674 static int
675 cryptodev_cb_init(struct rte_cryptodev *dev)
676 {
677 	struct rte_cryptodev_cb_rcu *list;
678 	struct rte_rcu_qsbr *qsbr;
679 	uint16_t qp_id;
680 	size_t size;
681 
682 	/* Max thread set to 1, as one DP thread accessing a queue-pair */
683 	const uint32_t max_threads = 1;
684 
685 	dev->enq_cbs = rte_zmalloc(NULL,
686 				   sizeof(struct rte_cryptodev_cb_rcu) *
687 				   dev->data->nb_queue_pairs, 0);
688 	if (dev->enq_cbs == NULL) {
689 		CDEV_LOG_ERR("Failed to allocate memory for enq callbacks");
690 		return -ENOMEM;
691 	}
692 
693 	dev->deq_cbs = rte_zmalloc(NULL,
694 				   sizeof(struct rte_cryptodev_cb_rcu) *
695 				   dev->data->nb_queue_pairs, 0);
696 	if (dev->deq_cbs == NULL) {
697 		CDEV_LOG_ERR("Failed to allocate memory for deq callbacks");
698 		rte_free(dev->enq_cbs);
699 		return -ENOMEM;
700 	}
701 
702 	/* Create RCU QSBR variable */
703 	size = rte_rcu_qsbr_get_memsize(max_threads);
704 
705 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
706 		list = &dev->enq_cbs[qp_id];
707 		qsbr = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
708 		if (qsbr == NULL) {
709 			CDEV_LOG_ERR("Failed to allocate memory for RCU on "
710 				"queue_pair_id=%d", qp_id);
711 			goto cb_init_err;
712 		}
713 
714 		if (rte_rcu_qsbr_init(qsbr, max_threads)) {
715 			CDEV_LOG_ERR("Failed to initialize for RCU on "
716 				"queue_pair_id=%d", qp_id);
717 			goto cb_init_err;
718 		}
719 
720 		list->qsbr = qsbr;
721 	}
722 
723 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
724 		list = &dev->deq_cbs[qp_id];
725 		qsbr = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
726 		if (qsbr == NULL) {
727 			CDEV_LOG_ERR("Failed to allocate memory for RCU on "
728 				"queue_pair_id=%d", qp_id);
729 			goto cb_init_err;
730 		}
731 
732 		if (rte_rcu_qsbr_init(qsbr, max_threads)) {
733 			CDEV_LOG_ERR("Failed to initialize for RCU on "
734 				"queue_pair_id=%d", qp_id);
735 			goto cb_init_err;
736 		}
737 
738 		list->qsbr = qsbr;
739 	}
740 
741 	return 0;
742 
743 cb_init_err:
744 	cryptodev_cb_cleanup(dev);
745 	return -ENOMEM;
746 }
747 
748 const char *
749 rte_cryptodev_get_feature_name(uint64_t flag)
750 {
751 	rte_cryptodev_trace_get_feature_name(flag);
752 
753 	switch (flag) {
754 	case RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO:
755 		return "SYMMETRIC_CRYPTO";
756 	case RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO:
757 		return "ASYMMETRIC_CRYPTO";
758 	case RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING:
759 		return "SYM_OPERATION_CHAINING";
760 	case RTE_CRYPTODEV_FF_CPU_SSE:
761 		return "CPU_SSE";
762 	case RTE_CRYPTODEV_FF_CPU_AVX:
763 		return "CPU_AVX";
764 	case RTE_CRYPTODEV_FF_CPU_AVX2:
765 		return "CPU_AVX2";
766 	case RTE_CRYPTODEV_FF_CPU_AVX512:
767 		return "CPU_AVX512";
768 	case RTE_CRYPTODEV_FF_CPU_AESNI:
769 		return "CPU_AESNI";
770 	case RTE_CRYPTODEV_FF_HW_ACCELERATED:
771 		return "HW_ACCELERATED";
772 	case RTE_CRYPTODEV_FF_IN_PLACE_SGL:
773 		return "IN_PLACE_SGL";
774 	case RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT:
775 		return "OOP_SGL_IN_SGL_OUT";
776 	case RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT:
777 		return "OOP_SGL_IN_LB_OUT";
778 	case RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT:
779 		return "OOP_LB_IN_SGL_OUT";
780 	case RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT:
781 		return "OOP_LB_IN_LB_OUT";
782 	case RTE_CRYPTODEV_FF_CPU_NEON:
783 		return "CPU_NEON";
784 	case RTE_CRYPTODEV_FF_CPU_ARM_CE:
785 		return "CPU_ARM_CE";
786 	case RTE_CRYPTODEV_FF_SECURITY:
787 		return "SECURITY_PROTOCOL";
788 	case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP:
789 		return "RSA_PRIV_OP_KEY_EXP";
790 	case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT:
791 		return "RSA_PRIV_OP_KEY_QT";
792 	case RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED:
793 		return "DIGEST_ENCRYPTED";
794 	case RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO:
795 		return "SYM_CPU_CRYPTO";
796 	case RTE_CRYPTODEV_FF_ASYM_SESSIONLESS:
797 		return "ASYM_SESSIONLESS";
798 	case RTE_CRYPTODEV_FF_SYM_SESSIONLESS:
799 		return "SYM_SESSIONLESS";
800 	case RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA:
801 		return "NON_BYTE_ALIGNED_DATA";
802 	case RTE_CRYPTODEV_FF_CIPHER_MULTIPLE_DATA_UNITS:
803 		return "CIPHER_MULTIPLE_DATA_UNITS";
804 	case RTE_CRYPTODEV_FF_CIPHER_WRAPPED_KEY:
805 		return "CIPHER_WRAPPED_KEY";
806 	default:
807 		return NULL;
808 	}
809 }
810 
811 struct rte_cryptodev *
812 rte_cryptodev_pmd_get_dev(uint8_t dev_id)
813 {
814 	return &cryptodev_globals.devs[dev_id];
815 }
816 
817 struct rte_cryptodev *
818 rte_cryptodev_pmd_get_named_dev(const char *name)
819 {
820 	struct rte_cryptodev *dev;
821 	unsigned int i;
822 
823 	if (name == NULL)
824 		return NULL;
825 
826 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
827 		dev = &cryptodev_globals.devs[i];
828 
829 		if ((dev->attached == RTE_CRYPTODEV_ATTACHED) &&
830 				(strcmp(dev->data->name, name) == 0))
831 			return dev;
832 	}
833 
834 	return NULL;
835 }
836 
837 static inline uint8_t
838 rte_cryptodev_is_valid_device_data(uint8_t dev_id)
839 {
840 	if (dev_id >= RTE_CRYPTO_MAX_DEVS ||
841 			rte_crypto_devices[dev_id].data == NULL)
842 		return 0;
843 
844 	return 1;
845 }
846 
847 unsigned int
848 rte_cryptodev_is_valid_dev(uint8_t dev_id)
849 {
850 	struct rte_cryptodev *dev = NULL;
851 	unsigned int ret = 1;
852 
853 	if (!rte_cryptodev_is_valid_device_data(dev_id)) {
854 		ret = 0;
855 		goto done;
856 	}
857 
858 	dev = rte_cryptodev_pmd_get_dev(dev_id);
859 	if (dev->attached != RTE_CRYPTODEV_ATTACHED)
860 		ret = 0;
861 
862 done:
863 	rte_cryptodev_trace_is_valid_dev(dev_id, ret);
864 
865 	return ret;
866 }
867 
868 int
869 rte_cryptodev_get_dev_id(const char *name)
870 {
871 	unsigned i;
872 	int ret = -1;
873 
874 	if (name == NULL)
875 		return -1;
876 
877 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
878 		if (!rte_cryptodev_is_valid_device_data(i))
879 			continue;
880 		if ((strcmp(cryptodev_globals.devs[i].data->name, name)
881 				== 0) &&
882 				(cryptodev_globals.devs[i].attached ==
883 						RTE_CRYPTODEV_ATTACHED)) {
884 			ret = (int)i;
885 			break;
886 		}
887 	}
888 
889 	rte_cryptodev_trace_get_dev_id(name, ret);
890 
891 	return ret;
892 }
893 
894 uint8_t
895 rte_cryptodev_count(void)
896 {
897 	rte_cryptodev_trace_count(cryptodev_globals.nb_devs);
898 
899 	return cryptodev_globals.nb_devs;
900 }
901 
902 uint8_t
903 rte_cryptodev_device_count_by_driver(uint8_t driver_id)
904 {
905 	uint8_t i, dev_count = 0;
906 
907 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++)
908 		if (cryptodev_globals.devs[i].driver_id == driver_id &&
909 			cryptodev_globals.devs[i].attached ==
910 					RTE_CRYPTODEV_ATTACHED)
911 			dev_count++;
912 
913 	rte_cryptodev_trace_device_count_by_driver(driver_id, dev_count);
914 
915 	return dev_count;
916 }
917 
918 uint8_t
919 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
920 	uint8_t nb_devices)
921 {
922 	uint8_t i, count = 0;
923 	struct rte_cryptodev *devs = cryptodev_globals.devs;
924 
925 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS && count < nb_devices; i++) {
926 		if (!rte_cryptodev_is_valid_device_data(i))
927 			continue;
928 
929 		if (devs[i].attached == RTE_CRYPTODEV_ATTACHED) {
930 			int cmp;
931 
932 			cmp = strncmp(devs[i].device->driver->name,
933 					driver_name,
934 					strlen(driver_name) + 1);
935 
936 			if (cmp == 0)
937 				devices[count++] = devs[i].data->dev_id;
938 		}
939 	}
940 
941 	rte_cryptodev_trace_devices_get(driver_name, count);
942 
943 	return count;
944 }
945 
946 void *
947 rte_cryptodev_get_sec_ctx(uint8_t dev_id)
948 {
949 	void *sec_ctx = NULL;
950 
951 	if (dev_id < RTE_CRYPTO_MAX_DEVS &&
952 			(rte_crypto_devices[dev_id].feature_flags &
953 			RTE_CRYPTODEV_FF_SECURITY))
954 		sec_ctx = rte_crypto_devices[dev_id].security_ctx;
955 
956 	rte_cryptodev_trace_get_sec_ctx(dev_id, sec_ctx);
957 
958 	return sec_ctx;
959 }
960 
961 int
962 rte_cryptodev_socket_id(uint8_t dev_id)
963 {
964 	struct rte_cryptodev *dev;
965 
966 	if (!rte_cryptodev_is_valid_dev(dev_id))
967 		return -1;
968 
969 	dev = rte_cryptodev_pmd_get_dev(dev_id);
970 
971 	rte_cryptodev_trace_socket_id(dev_id, dev->data->name,
972 		dev->data->socket_id);
973 	return dev->data->socket_id;
974 }
975 
976 static inline int
977 rte_cryptodev_data_alloc(uint8_t dev_id, struct rte_cryptodev_data **data,
978 		int socket_id)
979 {
980 	char mz_name[RTE_MEMZONE_NAMESIZE];
981 	const struct rte_memzone *mz;
982 	int n;
983 
984 	/* generate memzone name */
985 	n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
986 	if (n >= (int)sizeof(mz_name))
987 		return -EINVAL;
988 
989 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
990 		mz = rte_memzone_reserve(mz_name,
991 				sizeof(struct rte_cryptodev_data),
992 				socket_id, 0);
993 		CDEV_LOG_DEBUG("PRIMARY:reserved memzone for %s (%p)",
994 				mz_name, mz);
995 	} else {
996 		mz = rte_memzone_lookup(mz_name);
997 		CDEV_LOG_DEBUG("SECONDARY:looked up memzone for %s (%p)",
998 				mz_name, mz);
999 	}
1000 
1001 	if (mz == NULL)
1002 		return -ENOMEM;
1003 
1004 	*data = mz->addr;
1005 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1006 		memset(*data, 0, sizeof(struct rte_cryptodev_data));
1007 
1008 	return 0;
1009 }
1010 
1011 static inline int
1012 rte_cryptodev_data_free(uint8_t dev_id, struct rte_cryptodev_data **data)
1013 {
1014 	char mz_name[RTE_MEMZONE_NAMESIZE];
1015 	const struct rte_memzone *mz;
1016 	int n;
1017 
1018 	/* generate memzone name */
1019 	n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
1020 	if (n >= (int)sizeof(mz_name))
1021 		return -EINVAL;
1022 
1023 	mz = rte_memzone_lookup(mz_name);
1024 	if (mz == NULL)
1025 		return -ENOMEM;
1026 
1027 	RTE_ASSERT(*data == mz->addr);
1028 	*data = NULL;
1029 
1030 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1031 		CDEV_LOG_DEBUG("PRIMARY:free memzone of %s (%p)",
1032 				mz_name, mz);
1033 		return rte_memzone_free(mz);
1034 	} else {
1035 		CDEV_LOG_DEBUG("SECONDARY:don't free memzone of %s (%p)",
1036 				mz_name, mz);
1037 	}
1038 
1039 	return 0;
1040 }
1041 
1042 static uint8_t
1043 rte_cryptodev_find_free_device_index(void)
1044 {
1045 	uint8_t dev_id;
1046 
1047 	for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++) {
1048 		if (rte_crypto_devices[dev_id].attached ==
1049 				RTE_CRYPTODEV_DETACHED)
1050 			return dev_id;
1051 	}
1052 	return RTE_CRYPTO_MAX_DEVS;
1053 }
1054 
1055 struct rte_cryptodev *
1056 rte_cryptodev_pmd_allocate(const char *name, int socket_id)
1057 {
1058 	struct rte_cryptodev *cryptodev;
1059 	uint8_t dev_id;
1060 
1061 	if (rte_cryptodev_pmd_get_named_dev(name) != NULL) {
1062 		CDEV_LOG_ERR("Crypto device with name %s already "
1063 				"allocated!", name);
1064 		return NULL;
1065 	}
1066 
1067 	dev_id = rte_cryptodev_find_free_device_index();
1068 	if (dev_id == RTE_CRYPTO_MAX_DEVS) {
1069 		CDEV_LOG_ERR("Reached maximum number of crypto devices");
1070 		return NULL;
1071 	}
1072 
1073 	cryptodev = rte_cryptodev_pmd_get_dev(dev_id);
1074 
1075 	if (cryptodev->data == NULL) {
1076 		struct rte_cryptodev_data **cryptodev_data =
1077 				&cryptodev_globals.data[dev_id];
1078 
1079 		int retval = rte_cryptodev_data_alloc(dev_id, cryptodev_data,
1080 				socket_id);
1081 
1082 		if (retval < 0 || *cryptodev_data == NULL)
1083 			return NULL;
1084 
1085 		cryptodev->data = *cryptodev_data;
1086 
1087 		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1088 			strlcpy(cryptodev->data->name, name,
1089 				RTE_CRYPTODEV_NAME_MAX_LEN);
1090 
1091 			cryptodev->data->dev_id = dev_id;
1092 			cryptodev->data->socket_id = socket_id;
1093 			cryptodev->data->dev_started = 0;
1094 			CDEV_LOG_DEBUG("PRIMARY:init data");
1095 		}
1096 
1097 		CDEV_LOG_DEBUG("Data for %s: dev_id %d, socket %d, started %d",
1098 				cryptodev->data->name,
1099 				cryptodev->data->dev_id,
1100 				cryptodev->data->socket_id,
1101 				cryptodev->data->dev_started);
1102 
1103 		/* init user callbacks */
1104 		TAILQ_INIT(&(cryptodev->link_intr_cbs));
1105 
1106 		cryptodev->attached = RTE_CRYPTODEV_ATTACHED;
1107 
1108 		cryptodev_globals.nb_devs++;
1109 	}
1110 
1111 	return cryptodev;
1112 }
1113 
1114 int
1115 rte_cryptodev_pmd_release_device(struct rte_cryptodev *cryptodev)
1116 {
1117 	int ret;
1118 	uint8_t dev_id;
1119 
1120 	if (cryptodev == NULL)
1121 		return -EINVAL;
1122 
1123 	dev_id = cryptodev->data->dev_id;
1124 
1125 	cryptodev_fp_ops_reset(rte_crypto_fp_ops + dev_id);
1126 
1127 	/* Close device only if device operations have been set */
1128 	if (cryptodev->dev_ops) {
1129 		ret = rte_cryptodev_close(dev_id);
1130 		if (ret < 0)
1131 			return ret;
1132 	}
1133 
1134 	ret = rte_cryptodev_data_free(dev_id, &cryptodev_globals.data[dev_id]);
1135 	if (ret < 0)
1136 		return ret;
1137 
1138 	cryptodev->attached = RTE_CRYPTODEV_DETACHED;
1139 	cryptodev_globals.nb_devs--;
1140 	return 0;
1141 }
1142 
1143 uint16_t
1144 rte_cryptodev_queue_pair_count(uint8_t dev_id)
1145 {
1146 	struct rte_cryptodev *dev;
1147 
1148 	if (!rte_cryptodev_is_valid_device_data(dev_id)) {
1149 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1150 		return 0;
1151 	}
1152 
1153 	dev = &rte_crypto_devices[dev_id];
1154 	rte_cryptodev_trace_queue_pair_count(dev, dev->data->name,
1155 		dev->data->socket_id, dev->data->dev_id,
1156 		dev->data->nb_queue_pairs);
1157 
1158 	return dev->data->nb_queue_pairs;
1159 }
1160 
1161 static int
1162 rte_cryptodev_queue_pairs_config(struct rte_cryptodev *dev, uint16_t nb_qpairs,
1163 		int socket_id)
1164 {
1165 	struct rte_cryptodev_info dev_info;
1166 	void **qp;
1167 	unsigned i;
1168 
1169 	if ((dev == NULL) || (nb_qpairs < 1)) {
1170 		CDEV_LOG_ERR("invalid param: dev %p, nb_queues %u",
1171 							dev, nb_qpairs);
1172 		return -EINVAL;
1173 	}
1174 
1175 	CDEV_LOG_DEBUG("Setup %d queues pairs on device %u",
1176 			nb_qpairs, dev->data->dev_id);
1177 
1178 	memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
1179 
1180 	if (*dev->dev_ops->dev_infos_get == NULL)
1181 		return -ENOTSUP;
1182 	(*dev->dev_ops->dev_infos_get)(dev, &dev_info);
1183 
1184 	if (nb_qpairs > (dev_info.max_nb_queue_pairs)) {
1185 		CDEV_LOG_ERR("Invalid num queue_pairs (%u) for dev %u",
1186 				nb_qpairs, dev->data->dev_id);
1187 	    return -EINVAL;
1188 	}
1189 
1190 	if (dev->data->queue_pairs == NULL) { /* first time configuration */
1191 		dev->data->queue_pairs = rte_zmalloc_socket(
1192 				"cryptodev->queue_pairs",
1193 				sizeof(dev->data->queue_pairs[0]) *
1194 				dev_info.max_nb_queue_pairs,
1195 				RTE_CACHE_LINE_SIZE, socket_id);
1196 
1197 		if (dev->data->queue_pairs == NULL) {
1198 			dev->data->nb_queue_pairs = 0;
1199 			CDEV_LOG_ERR("failed to get memory for qp meta data, "
1200 							"nb_queues %u",
1201 							nb_qpairs);
1202 			return -(ENOMEM);
1203 		}
1204 	} else { /* re-configure */
1205 		int ret;
1206 		uint16_t old_nb_queues = dev->data->nb_queue_pairs;
1207 
1208 		qp = dev->data->queue_pairs;
1209 
1210 		if (*dev->dev_ops->queue_pair_release == NULL)
1211 			return -ENOTSUP;
1212 
1213 		for (i = nb_qpairs; i < old_nb_queues; i++) {
1214 			ret = (*dev->dev_ops->queue_pair_release)(dev, i);
1215 			if (ret < 0)
1216 				return ret;
1217 			qp[i] = NULL;
1218 		}
1219 
1220 	}
1221 	dev->data->nb_queue_pairs = nb_qpairs;
1222 	return 0;
1223 }
1224 
1225 int
1226 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
1227 {
1228 	struct rte_cryptodev *dev;
1229 	int diag;
1230 
1231 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1232 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1233 		return -EINVAL;
1234 	}
1235 
1236 	dev = &rte_crypto_devices[dev_id];
1237 
1238 	if (dev->data->dev_started) {
1239 		CDEV_LOG_ERR(
1240 		    "device %d must be stopped to allow configuration", dev_id);
1241 		return -EBUSY;
1242 	}
1243 
1244 	if (*dev->dev_ops->dev_configure == NULL)
1245 		return -ENOTSUP;
1246 
1247 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1248 	cryptodev_cb_cleanup(dev);
1249 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1250 
1251 	/* Setup new number of queue pairs and reconfigure device. */
1252 	diag = rte_cryptodev_queue_pairs_config(dev, config->nb_queue_pairs,
1253 			config->socket_id);
1254 	if (diag != 0) {
1255 		CDEV_LOG_ERR("dev%d rte_crypto_dev_queue_pairs_config = %d",
1256 				dev_id, diag);
1257 		return diag;
1258 	}
1259 
1260 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1261 	diag = cryptodev_cb_init(dev);
1262 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1263 	if (diag) {
1264 		CDEV_LOG_ERR("Callback init failed for dev_id=%d", dev_id);
1265 		return diag;
1266 	}
1267 
1268 	rte_cryptodev_trace_configure(dev_id, config);
1269 	return (*dev->dev_ops->dev_configure)(dev, config);
1270 }
1271 
1272 int
1273 rte_cryptodev_start(uint8_t dev_id)
1274 {
1275 	struct rte_cryptodev *dev;
1276 	int diag;
1277 
1278 	CDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
1279 
1280 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1281 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1282 		return -EINVAL;
1283 	}
1284 
1285 	dev = &rte_crypto_devices[dev_id];
1286 
1287 	if (*dev->dev_ops->dev_start == NULL)
1288 		return -ENOTSUP;
1289 
1290 	if (dev->data->dev_started != 0) {
1291 		CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already started",
1292 			dev_id);
1293 		return 0;
1294 	}
1295 
1296 	diag = (*dev->dev_ops->dev_start)(dev);
1297 	/* expose selection of PMD fast-path functions */
1298 	cryptodev_fp_ops_set(rte_crypto_fp_ops + dev_id, dev);
1299 
1300 	rte_cryptodev_trace_start(dev_id, diag);
1301 	if (diag == 0)
1302 		dev->data->dev_started = 1;
1303 	else
1304 		return diag;
1305 
1306 	return 0;
1307 }
1308 
1309 void
1310 rte_cryptodev_stop(uint8_t dev_id)
1311 {
1312 	struct rte_cryptodev *dev;
1313 
1314 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1315 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1316 		return;
1317 	}
1318 
1319 	dev = &rte_crypto_devices[dev_id];
1320 
1321 	if (*dev->dev_ops->dev_stop == NULL)
1322 		return;
1323 
1324 	if (dev->data->dev_started == 0) {
1325 		CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already stopped",
1326 			dev_id);
1327 		return;
1328 	}
1329 
1330 	/* point fast-path functions to dummy ones */
1331 	cryptodev_fp_ops_reset(rte_crypto_fp_ops + dev_id);
1332 
1333 	(*dev->dev_ops->dev_stop)(dev);
1334 	rte_cryptodev_trace_stop(dev_id);
1335 	dev->data->dev_started = 0;
1336 }
1337 
1338 int
1339 rte_cryptodev_close(uint8_t dev_id)
1340 {
1341 	struct rte_cryptodev *dev;
1342 	int retval;
1343 
1344 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1345 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1346 		return -1;
1347 	}
1348 
1349 	dev = &rte_crypto_devices[dev_id];
1350 
1351 	/* Device must be stopped before it can be closed */
1352 	if (dev->data->dev_started == 1) {
1353 		CDEV_LOG_ERR("Device %u must be stopped before closing",
1354 				dev_id);
1355 		return -EBUSY;
1356 	}
1357 
1358 	/* We can't close the device if there are outstanding sessions in use */
1359 	if (dev->data->session_pool != NULL) {
1360 		if (!rte_mempool_full(dev->data->session_pool)) {
1361 			CDEV_LOG_ERR("dev_id=%u close failed, session mempool "
1362 					"has sessions still in use, free "
1363 					"all sessions before calling close",
1364 					(unsigned)dev_id);
1365 			return -EBUSY;
1366 		}
1367 	}
1368 
1369 	if (*dev->dev_ops->dev_close == NULL)
1370 		return -ENOTSUP;
1371 	retval = (*dev->dev_ops->dev_close)(dev);
1372 	rte_cryptodev_trace_close(dev_id, retval);
1373 
1374 	if (retval < 0)
1375 		return retval;
1376 
1377 	return 0;
1378 }
1379 
1380 int
1381 rte_cryptodev_get_qp_status(uint8_t dev_id, uint16_t queue_pair_id)
1382 {
1383 	struct rte_cryptodev *dev;
1384 	int ret = 0;
1385 
1386 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1387 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1388 		ret = -EINVAL;
1389 		goto done;
1390 	}
1391 
1392 	dev = &rte_crypto_devices[dev_id];
1393 	if (queue_pair_id >= dev->data->nb_queue_pairs) {
1394 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
1395 		ret = -EINVAL;
1396 		goto done;
1397 	}
1398 	void **qps = dev->data->queue_pairs;
1399 
1400 	if (qps[queue_pair_id])	{
1401 		CDEV_LOG_DEBUG("qp %d on dev %d is initialised",
1402 			queue_pair_id, dev_id);
1403 		ret = 1;
1404 		goto done;
1405 	}
1406 
1407 	CDEV_LOG_DEBUG("qp %d on dev %d is not initialised",
1408 		queue_pair_id, dev_id);
1409 
1410 done:
1411 	rte_cryptodev_trace_get_qp_status(dev_id, queue_pair_id, ret);
1412 
1413 	return ret;
1414 }
1415 
1416 static uint8_t
1417 rte_cryptodev_sym_is_valid_session_pool(struct rte_mempool *mp,
1418 	uint32_t sess_priv_size)
1419 {
1420 	struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1421 
1422 	if (!mp)
1423 		return 0;
1424 
1425 	pool_priv = rte_mempool_get_priv(mp);
1426 
1427 	if (!pool_priv || mp->private_data_size < sizeof(*pool_priv) ||
1428 			pool_priv->sess_data_sz < sess_priv_size)
1429 		return 0;
1430 
1431 	return 1;
1432 }
1433 
1434 int
1435 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
1436 		const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
1437 
1438 {
1439 	struct rte_cryptodev *dev;
1440 
1441 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1442 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1443 		return -EINVAL;
1444 	}
1445 
1446 	dev = &rte_crypto_devices[dev_id];
1447 	if (queue_pair_id >= dev->data->nb_queue_pairs) {
1448 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
1449 		return -EINVAL;
1450 	}
1451 
1452 	if (!qp_conf) {
1453 		CDEV_LOG_ERR("qp_conf cannot be NULL");
1454 		return -EINVAL;
1455 	}
1456 
1457 	if (qp_conf->mp_session) {
1458 		struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1459 
1460 		pool_priv = rte_mempool_get_priv(qp_conf->mp_session);
1461 		if (!pool_priv || qp_conf->mp_session->private_data_size <
1462 				sizeof(*pool_priv)) {
1463 			CDEV_LOG_ERR("Invalid mempool");
1464 			return -EINVAL;
1465 		}
1466 
1467 		if (!rte_cryptodev_sym_is_valid_session_pool(qp_conf->mp_session,
1468 					rte_cryptodev_sym_get_private_session_size(dev_id))) {
1469 			CDEV_LOG_ERR("Invalid mempool");
1470 			return -EINVAL;
1471 		}
1472 	}
1473 
1474 	if (dev->data->dev_started) {
1475 		CDEV_LOG_ERR(
1476 		    "device %d must be stopped to allow configuration", dev_id);
1477 		return -EBUSY;
1478 	}
1479 
1480 	if (*dev->dev_ops->queue_pair_setup == NULL)
1481 		return -ENOTSUP;
1482 
1483 	rte_cryptodev_trace_queue_pair_setup(dev_id, queue_pair_id, qp_conf);
1484 	return (*dev->dev_ops->queue_pair_setup)(dev, queue_pair_id, qp_conf,
1485 			socket_id);
1486 }
1487 
1488 struct rte_cryptodev_cb *
1489 rte_cryptodev_add_enq_callback(uint8_t dev_id,
1490 			       uint16_t qp_id,
1491 			       rte_cryptodev_callback_fn cb_fn,
1492 			       void *cb_arg)
1493 {
1494 	struct rte_cryptodev *dev;
1495 	struct rte_cryptodev_cb_rcu *list;
1496 	struct rte_cryptodev_cb *cb, *tail;
1497 
1498 	if (!cb_fn) {
1499 		CDEV_LOG_ERR("Callback is NULL on dev_id=%d", dev_id);
1500 		rte_errno = EINVAL;
1501 		return NULL;
1502 	}
1503 
1504 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1505 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1506 		rte_errno = ENODEV;
1507 		return NULL;
1508 	}
1509 
1510 	dev = &rte_crypto_devices[dev_id];
1511 	if (qp_id >= dev->data->nb_queue_pairs) {
1512 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1513 		rte_errno = ENODEV;
1514 		return NULL;
1515 	}
1516 
1517 	cb = rte_zmalloc(NULL, sizeof(*cb), 0);
1518 	if (cb == NULL) {
1519 		CDEV_LOG_ERR("Failed to allocate memory for callback on "
1520 			     "dev=%d, queue_pair_id=%d", dev_id, qp_id);
1521 		rte_errno = ENOMEM;
1522 		return NULL;
1523 	}
1524 
1525 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1526 
1527 	cb->fn = cb_fn;
1528 	cb->arg = cb_arg;
1529 
1530 	/* Add the callbacks in fifo order. */
1531 	list = &dev->enq_cbs[qp_id];
1532 	tail = list->next;
1533 
1534 	if (tail) {
1535 		while (tail->next)
1536 			tail = tail->next;
1537 		/* Stores to cb->fn and cb->param should complete before
1538 		 * cb is visible to data plane.
1539 		 */
1540 		rte_atomic_store_explicit(&tail->next, cb, rte_memory_order_release);
1541 	} else {
1542 		/* Stores to cb->fn and cb->param should complete before
1543 		 * cb is visible to data plane.
1544 		 */
1545 		rte_atomic_store_explicit(&list->next, cb, rte_memory_order_release);
1546 	}
1547 
1548 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1549 
1550 	rte_cryptodev_trace_add_enq_callback(dev_id, qp_id, cb_fn);
1551 	return cb;
1552 }
1553 
1554 int
1555 rte_cryptodev_remove_enq_callback(uint8_t dev_id,
1556 				  uint16_t qp_id,
1557 				  struct rte_cryptodev_cb *cb)
1558 {
1559 	struct rte_cryptodev *dev;
1560 	RTE_ATOMIC(struct rte_cryptodev_cb *) *prev_cb;
1561 	struct rte_cryptodev_cb *curr_cb;
1562 	struct rte_cryptodev_cb_rcu *list;
1563 	int ret;
1564 
1565 	ret = -EINVAL;
1566 
1567 	if (!cb) {
1568 		CDEV_LOG_ERR("Callback is NULL");
1569 		return -EINVAL;
1570 	}
1571 
1572 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1573 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1574 		return -ENODEV;
1575 	}
1576 
1577 	rte_cryptodev_trace_remove_enq_callback(dev_id, qp_id, cb->fn);
1578 
1579 	dev = &rte_crypto_devices[dev_id];
1580 	if (qp_id >= dev->data->nb_queue_pairs) {
1581 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1582 		return -ENODEV;
1583 	}
1584 
1585 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1586 	if (dev->enq_cbs == NULL) {
1587 		CDEV_LOG_ERR("Callback not initialized");
1588 		goto cb_err;
1589 	}
1590 
1591 	list = &dev->enq_cbs[qp_id];
1592 	if (list == NULL) {
1593 		CDEV_LOG_ERR("Callback list is NULL");
1594 		goto cb_err;
1595 	}
1596 
1597 	if (list->qsbr == NULL) {
1598 		CDEV_LOG_ERR("Rcu qsbr is NULL");
1599 		goto cb_err;
1600 	}
1601 
1602 	prev_cb = &list->next;
1603 	for (; *prev_cb != NULL; prev_cb = &curr_cb->next) {
1604 		curr_cb = *prev_cb;
1605 		if (curr_cb == cb) {
1606 			/* Remove the user cb from the callback list. */
1607 			rte_atomic_store_explicit(prev_cb, curr_cb->next,
1608 				rte_memory_order_relaxed);
1609 			ret = 0;
1610 			break;
1611 		}
1612 	}
1613 
1614 	if (!ret) {
1615 		/* Call sync with invalid thread id as this is part of
1616 		 * control plane API
1617 		 */
1618 		rte_rcu_qsbr_synchronize(list->qsbr, RTE_QSBR_THRID_INVALID);
1619 		rte_free(cb);
1620 	}
1621 
1622 cb_err:
1623 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1624 	return ret;
1625 }
1626 
1627 struct rte_cryptodev_cb *
1628 rte_cryptodev_add_deq_callback(uint8_t dev_id,
1629 			       uint16_t qp_id,
1630 			       rte_cryptodev_callback_fn cb_fn,
1631 			       void *cb_arg)
1632 {
1633 	struct rte_cryptodev *dev;
1634 	struct rte_cryptodev_cb_rcu *list;
1635 	struct rte_cryptodev_cb *cb, *tail;
1636 
1637 	if (!cb_fn) {
1638 		CDEV_LOG_ERR("Callback is NULL on dev_id=%d", dev_id);
1639 		rte_errno = EINVAL;
1640 		return NULL;
1641 	}
1642 
1643 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1644 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1645 		rte_errno = ENODEV;
1646 		return NULL;
1647 	}
1648 
1649 	dev = &rte_crypto_devices[dev_id];
1650 	if (qp_id >= dev->data->nb_queue_pairs) {
1651 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1652 		rte_errno = ENODEV;
1653 		return NULL;
1654 	}
1655 
1656 	cb = rte_zmalloc(NULL, sizeof(*cb), 0);
1657 	if (cb == NULL) {
1658 		CDEV_LOG_ERR("Failed to allocate memory for callback on "
1659 			     "dev=%d, queue_pair_id=%d", dev_id, qp_id);
1660 		rte_errno = ENOMEM;
1661 		return NULL;
1662 	}
1663 
1664 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1665 
1666 	cb->fn = cb_fn;
1667 	cb->arg = cb_arg;
1668 
1669 	/* Add the callbacks in fifo order. */
1670 	list = &dev->deq_cbs[qp_id];
1671 	tail = list->next;
1672 
1673 	if (tail) {
1674 		while (tail->next)
1675 			tail = tail->next;
1676 		/* Stores to cb->fn and cb->param should complete before
1677 		 * cb is visible to data plane.
1678 		 */
1679 		rte_atomic_store_explicit(&tail->next, cb, rte_memory_order_release);
1680 	} else {
1681 		/* Stores to cb->fn and cb->param should complete before
1682 		 * cb is visible to data plane.
1683 		 */
1684 		rte_atomic_store_explicit(&list->next, cb, rte_memory_order_release);
1685 	}
1686 
1687 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1688 
1689 	rte_cryptodev_trace_add_deq_callback(dev_id, qp_id, cb_fn);
1690 
1691 	return cb;
1692 }
1693 
1694 int
1695 rte_cryptodev_remove_deq_callback(uint8_t dev_id,
1696 				  uint16_t qp_id,
1697 				  struct rte_cryptodev_cb *cb)
1698 {
1699 	struct rte_cryptodev *dev;
1700 	RTE_ATOMIC(struct rte_cryptodev_cb *) *prev_cb;
1701 	struct rte_cryptodev_cb *curr_cb;
1702 	struct rte_cryptodev_cb_rcu *list;
1703 	int ret;
1704 
1705 	ret = -EINVAL;
1706 
1707 	if (!cb) {
1708 		CDEV_LOG_ERR("Callback is NULL");
1709 		return -EINVAL;
1710 	}
1711 
1712 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1713 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1714 		return -ENODEV;
1715 	}
1716 
1717 	rte_cryptodev_trace_remove_deq_callback(dev_id, qp_id, cb->fn);
1718 
1719 	dev = &rte_crypto_devices[dev_id];
1720 	if (qp_id >= dev->data->nb_queue_pairs) {
1721 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1722 		return -ENODEV;
1723 	}
1724 
1725 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1726 	if (dev->enq_cbs == NULL) {
1727 		CDEV_LOG_ERR("Callback not initialized");
1728 		goto cb_err;
1729 	}
1730 
1731 	list = &dev->deq_cbs[qp_id];
1732 	if (list == NULL) {
1733 		CDEV_LOG_ERR("Callback list is NULL");
1734 		goto cb_err;
1735 	}
1736 
1737 	if (list->qsbr == NULL) {
1738 		CDEV_LOG_ERR("Rcu qsbr is NULL");
1739 		goto cb_err;
1740 	}
1741 
1742 	prev_cb = &list->next;
1743 	for (; *prev_cb != NULL; prev_cb = &curr_cb->next) {
1744 		curr_cb = *prev_cb;
1745 		if (curr_cb == cb) {
1746 			/* Remove the user cb from the callback list. */
1747 			rte_atomic_store_explicit(prev_cb, curr_cb->next,
1748 				rte_memory_order_relaxed);
1749 			ret = 0;
1750 			break;
1751 		}
1752 	}
1753 
1754 	if (!ret) {
1755 		/* Call sync with invalid thread id as this is part of
1756 		 * control plane API
1757 		 */
1758 		rte_rcu_qsbr_synchronize(list->qsbr, RTE_QSBR_THRID_INVALID);
1759 		rte_free(cb);
1760 	}
1761 
1762 cb_err:
1763 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1764 	return ret;
1765 }
1766 
1767 int
1768 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats)
1769 {
1770 	struct rte_cryptodev *dev;
1771 
1772 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1773 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1774 		return -ENODEV;
1775 	}
1776 
1777 	if (stats == NULL) {
1778 		CDEV_LOG_ERR("Invalid stats ptr");
1779 		return -EINVAL;
1780 	}
1781 
1782 	dev = &rte_crypto_devices[dev_id];
1783 	memset(stats, 0, sizeof(*stats));
1784 
1785 	if (*dev->dev_ops->stats_get == NULL)
1786 		return -ENOTSUP;
1787 	(*dev->dev_ops->stats_get)(dev, stats);
1788 
1789 	rte_cryptodev_trace_stats_get(dev_id, stats);
1790 	return 0;
1791 }
1792 
1793 void
1794 rte_cryptodev_stats_reset(uint8_t dev_id)
1795 {
1796 	struct rte_cryptodev *dev;
1797 
1798 	rte_cryptodev_trace_stats_reset(dev_id);
1799 
1800 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1801 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1802 		return;
1803 	}
1804 
1805 	dev = &rte_crypto_devices[dev_id];
1806 
1807 	if (*dev->dev_ops->stats_reset == NULL)
1808 		return;
1809 	(*dev->dev_ops->stats_reset)(dev);
1810 }
1811 
1812 void
1813 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
1814 {
1815 	struct rte_cryptodev *dev;
1816 
1817 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1818 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1819 		return;
1820 	}
1821 
1822 	dev = &rte_crypto_devices[dev_id];
1823 
1824 	memset(dev_info, 0, sizeof(struct rte_cryptodev_info));
1825 
1826 	if (*dev->dev_ops->dev_infos_get == NULL)
1827 		return;
1828 	(*dev->dev_ops->dev_infos_get)(dev, dev_info);
1829 
1830 	dev_info->driver_name = dev->device->driver->name;
1831 	dev_info->device = dev->device;
1832 
1833 	rte_cryptodev_trace_info_get(dev_id, dev_info->driver_name);
1834 
1835 }
1836 
1837 int
1838 rte_cryptodev_callback_register(uint8_t dev_id,
1839 			enum rte_cryptodev_event_type event,
1840 			rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1841 {
1842 	struct rte_cryptodev *dev;
1843 	struct rte_cryptodev_callback *user_cb;
1844 
1845 	if (!cb_fn)
1846 		return -EINVAL;
1847 
1848 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1849 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1850 		return -EINVAL;
1851 	}
1852 
1853 	dev = &rte_crypto_devices[dev_id];
1854 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
1855 
1856 	TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
1857 		if (user_cb->cb_fn == cb_fn &&
1858 			user_cb->cb_arg == cb_arg &&
1859 			user_cb->event == event) {
1860 			break;
1861 		}
1862 	}
1863 
1864 	/* create a new callback. */
1865 	if (user_cb == NULL) {
1866 		user_cb = rte_zmalloc("INTR_USER_CALLBACK",
1867 				sizeof(struct rte_cryptodev_callback), 0);
1868 		if (user_cb != NULL) {
1869 			user_cb->cb_fn = cb_fn;
1870 			user_cb->cb_arg = cb_arg;
1871 			user_cb->event = event;
1872 			TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
1873 		}
1874 	}
1875 
1876 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1877 
1878 	rte_cryptodev_trace_callback_register(dev_id, event, cb_fn);
1879 	return (user_cb == NULL) ? -ENOMEM : 0;
1880 }
1881 
1882 int
1883 rte_cryptodev_callback_unregister(uint8_t dev_id,
1884 			enum rte_cryptodev_event_type event,
1885 			rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1886 {
1887 	int ret;
1888 	struct rte_cryptodev *dev;
1889 	struct rte_cryptodev_callback *cb, *next;
1890 
1891 	if (!cb_fn)
1892 		return -EINVAL;
1893 
1894 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1895 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1896 		return -EINVAL;
1897 	}
1898 
1899 	dev = &rte_crypto_devices[dev_id];
1900 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
1901 
1902 	ret = 0;
1903 	for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
1904 
1905 		next = TAILQ_NEXT(cb, next);
1906 
1907 		if (cb->cb_fn != cb_fn || cb->event != event ||
1908 				(cb->cb_arg != (void *)-1 &&
1909 				cb->cb_arg != cb_arg))
1910 			continue;
1911 
1912 		/*
1913 		 * if this callback is not executing right now,
1914 		 * then remove it.
1915 		 */
1916 		if (cb->active == 0) {
1917 			TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
1918 			rte_free(cb);
1919 		} else {
1920 			ret = -EAGAIN;
1921 		}
1922 	}
1923 
1924 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1925 
1926 	rte_cryptodev_trace_callback_unregister(dev_id, event, cb_fn);
1927 	return ret;
1928 }
1929 
1930 void
1931 rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev,
1932 	enum rte_cryptodev_event_type event)
1933 {
1934 	struct rte_cryptodev_callback *cb_lst;
1935 	struct rte_cryptodev_callback dev_cb;
1936 
1937 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
1938 	TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
1939 		if (cb_lst->cb_fn == NULL || cb_lst->event != event)
1940 			continue;
1941 		dev_cb = *cb_lst;
1942 		cb_lst->active = 1;
1943 		rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1944 		dev_cb.cb_fn(dev->data->dev_id, dev_cb.event,
1945 						dev_cb.cb_arg);
1946 		rte_spinlock_lock(&rte_cryptodev_cb_lock);
1947 		cb_lst->active = 0;
1948 	}
1949 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1950 }
1951 
1952 int
1953 rte_cryptodev_queue_pair_event_error_query(uint8_t dev_id, uint16_t qp_id)
1954 {
1955 	struct rte_cryptodev *dev;
1956 
1957 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1958 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1959 		return -EINVAL;
1960 	}
1961 	dev = &rte_crypto_devices[dev_id];
1962 
1963 	if (qp_id >= dev->data->nb_queue_pairs)
1964 		return -EINVAL;
1965 	if (*dev->dev_ops->queue_pair_event_error_query == NULL)
1966 		return -ENOTSUP;
1967 
1968 	return dev->dev_ops->queue_pair_event_error_query(dev, qp_id);
1969 }
1970 
1971 struct rte_mempool *
1972 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
1973 	uint32_t elt_size, uint32_t cache_size, uint16_t user_data_size,
1974 	int socket_id)
1975 {
1976 	struct rte_mempool *mp;
1977 	struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1978 	uint32_t obj_sz;
1979 
1980 	obj_sz = sizeof(struct rte_cryptodev_sym_session) + elt_size + user_data_size;
1981 
1982 	obj_sz = RTE_ALIGN_CEIL(obj_sz, RTE_CACHE_LINE_SIZE);
1983 	mp = rte_mempool_create(name, nb_elts, obj_sz, cache_size,
1984 			(uint32_t)(sizeof(*pool_priv)), NULL, NULL,
1985 			NULL, NULL,
1986 			socket_id, 0);
1987 	if (mp == NULL) {
1988 		CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d",
1989 			__func__, name, rte_errno);
1990 		return NULL;
1991 	}
1992 
1993 	pool_priv = rte_mempool_get_priv(mp);
1994 	if (!pool_priv) {
1995 		CDEV_LOG_ERR("%s(name=%s) failed to get private data",
1996 			__func__, name);
1997 		rte_mempool_free(mp);
1998 		return NULL;
1999 	}
2000 
2001 	pool_priv->sess_data_sz = elt_size;
2002 	pool_priv->user_data_sz = user_data_size;
2003 
2004 	rte_cryptodev_trace_sym_session_pool_create(name, nb_elts,
2005 		elt_size, cache_size, user_data_size, mp);
2006 	return mp;
2007 }
2008 
2009 struct rte_mempool *
2010 rte_cryptodev_asym_session_pool_create(const char *name, uint32_t nb_elts,
2011 	uint32_t cache_size, uint16_t user_data_size, int socket_id)
2012 {
2013 	struct rte_mempool *mp;
2014 	struct rte_cryptodev_asym_session_pool_private_data *pool_priv;
2015 	uint32_t obj_sz, obj_sz_aligned;
2016 	uint8_t dev_id;
2017 	unsigned int priv_sz, max_priv_sz = 0;
2018 
2019 	for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++)
2020 		if (rte_cryptodev_is_valid_dev(dev_id)) {
2021 			priv_sz = rte_cryptodev_asym_get_private_session_size(dev_id);
2022 			if (priv_sz > max_priv_sz)
2023 				max_priv_sz = priv_sz;
2024 		}
2025 	if (max_priv_sz == 0) {
2026 		CDEV_LOG_INFO("Could not set max private session size");
2027 		return NULL;
2028 	}
2029 
2030 	obj_sz = rte_cryptodev_asym_get_header_session_size() + max_priv_sz +
2031 			user_data_size;
2032 	obj_sz_aligned =  RTE_ALIGN_CEIL(obj_sz, RTE_CACHE_LINE_SIZE);
2033 
2034 	mp = rte_mempool_create(name, nb_elts, obj_sz_aligned, cache_size,
2035 			(uint32_t)(sizeof(*pool_priv)),
2036 			NULL, NULL, NULL, NULL,
2037 			socket_id, 0);
2038 	if (mp == NULL) {
2039 		CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d",
2040 			__func__, name, rte_errno);
2041 		return NULL;
2042 	}
2043 
2044 	pool_priv = rte_mempool_get_priv(mp);
2045 	if (!pool_priv) {
2046 		CDEV_LOG_ERR("%s(name=%s) failed to get private data",
2047 			__func__, name);
2048 		rte_mempool_free(mp);
2049 		return NULL;
2050 	}
2051 	pool_priv->max_priv_session_sz = max_priv_sz;
2052 	pool_priv->user_data_sz = user_data_size;
2053 
2054 	rte_cryptodev_trace_asym_session_pool_create(name, nb_elts,
2055 		user_data_size, cache_size, mp);
2056 	return mp;
2057 }
2058 
2059 void *
2060 rte_cryptodev_sym_session_create(uint8_t dev_id,
2061 		struct rte_crypto_sym_xform *xforms,
2062 		struct rte_mempool *mp)
2063 {
2064 	struct rte_cryptodev *dev;
2065 	struct rte_cryptodev_sym_session *sess;
2066 	struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
2067 	uint32_t sess_priv_sz;
2068 	int ret;
2069 
2070 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
2071 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
2072 		rte_errno = EINVAL;
2073 		return NULL;
2074 	}
2075 
2076 	if (xforms == NULL) {
2077 		CDEV_LOG_ERR("Invalid xform");
2078 		rte_errno = EINVAL;
2079 		return NULL;
2080 	}
2081 
2082 	sess_priv_sz = rte_cryptodev_sym_get_private_session_size(dev_id);
2083 	if (!rte_cryptodev_sym_is_valid_session_pool(mp, sess_priv_sz)) {
2084 		CDEV_LOG_ERR("Invalid mempool");
2085 		rte_errno = EINVAL;
2086 		return NULL;
2087 	}
2088 
2089 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2090 
2091 	/* Allocate a session structure from the session pool */
2092 	if (rte_mempool_get(mp, (void **)&sess)) {
2093 		CDEV_LOG_ERR("couldn't get object from session mempool");
2094 		rte_errno = ENOMEM;
2095 		return NULL;
2096 	}
2097 
2098 	pool_priv = rte_mempool_get_priv(mp);
2099 	sess->driver_id = dev->driver_id;
2100 	sess->sess_data_sz = pool_priv->sess_data_sz;
2101 	sess->user_data_sz = pool_priv->user_data_sz;
2102 	sess->driver_priv_data_iova = rte_mempool_virt2iova(sess) +
2103 		offsetof(struct rte_cryptodev_sym_session, driver_priv_data);
2104 
2105 	if (dev->dev_ops->sym_session_configure == NULL) {
2106 		rte_errno = ENOTSUP;
2107 		goto error_exit;
2108 	}
2109 	memset(sess->driver_priv_data, 0, pool_priv->sess_data_sz + pool_priv->user_data_sz);
2110 
2111 	ret = dev->dev_ops->sym_session_configure(dev, xforms, sess);
2112 	if (ret < 0) {
2113 		rte_errno = -ret;
2114 		goto error_exit;
2115 	}
2116 	sess->driver_id = dev->driver_id;
2117 
2118 	rte_cryptodev_trace_sym_session_create(dev_id, sess, xforms, mp);
2119 
2120 	return (void *)sess;
2121 error_exit:
2122 	rte_mempool_put(mp, (void *)sess);
2123 	return NULL;
2124 }
2125 
2126 int
2127 rte_cryptodev_asym_session_create(uint8_t dev_id,
2128 		struct rte_crypto_asym_xform *xforms, struct rte_mempool *mp,
2129 		void **session)
2130 {
2131 	struct rte_cryptodev_asym_session *sess;
2132 	uint32_t session_priv_data_sz;
2133 	struct rte_cryptodev_asym_session_pool_private_data *pool_priv;
2134 	unsigned int session_header_size =
2135 			rte_cryptodev_asym_get_header_session_size();
2136 	struct rte_cryptodev *dev;
2137 	int ret;
2138 
2139 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
2140 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
2141 		return -EINVAL;
2142 	}
2143 
2144 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2145 
2146 	if (dev == NULL)
2147 		return -EINVAL;
2148 
2149 	if (!mp) {
2150 		CDEV_LOG_ERR("invalid mempool");
2151 		return -EINVAL;
2152 	}
2153 
2154 	session_priv_data_sz = rte_cryptodev_asym_get_private_session_size(
2155 			dev_id);
2156 	pool_priv = rte_mempool_get_priv(mp);
2157 
2158 	if (pool_priv->max_priv_session_sz < session_priv_data_sz) {
2159 		CDEV_LOG_DEBUG(
2160 			"The private session data size used when creating the mempool is smaller than this device's private session data.");
2161 		return -EINVAL;
2162 	}
2163 
2164 	/* Verify if provided mempool can hold elements big enough. */
2165 	if (mp->elt_size < session_header_size + session_priv_data_sz) {
2166 		CDEV_LOG_ERR(
2167 			"mempool elements too small to hold session objects");
2168 		return -EINVAL;
2169 	}
2170 
2171 	/* Allocate a session structure from the session pool */
2172 	if (rte_mempool_get(mp, session)) {
2173 		CDEV_LOG_ERR("couldn't get object from session mempool");
2174 		return -ENOMEM;
2175 	}
2176 
2177 	sess = *session;
2178 	sess->driver_id = dev->driver_id;
2179 	sess->user_data_sz = pool_priv->user_data_sz;
2180 	sess->max_priv_data_sz = pool_priv->max_priv_session_sz;
2181 
2182 	/* Clear device session pointer.*/
2183 	memset(sess->sess_private_data, 0, session_priv_data_sz + sess->user_data_sz);
2184 
2185 	if (*dev->dev_ops->asym_session_configure == NULL)
2186 		return -ENOTSUP;
2187 
2188 	if (sess->sess_private_data[0] == 0) {
2189 		ret = dev->dev_ops->asym_session_configure(dev, xforms, sess);
2190 		if (ret < 0) {
2191 			CDEV_LOG_ERR(
2192 				"dev_id %d failed to configure session details",
2193 				dev_id);
2194 			return ret;
2195 		}
2196 	}
2197 
2198 	rte_cryptodev_trace_asym_session_create(dev_id, xforms, mp, sess);
2199 	return 0;
2200 }
2201 
2202 int
2203 rte_cryptodev_sym_session_free(uint8_t dev_id, void *_sess)
2204 {
2205 	struct rte_cryptodev *dev;
2206 	struct rte_mempool *sess_mp;
2207 	struct rte_cryptodev_sym_session *sess = _sess;
2208 	struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
2209 
2210 	if (sess == NULL)
2211 		return -EINVAL;
2212 
2213 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
2214 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
2215 		return -EINVAL;
2216 	}
2217 
2218 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2219 
2220 	if (dev == NULL || sess == NULL)
2221 		return -EINVAL;
2222 
2223 	sess_mp = rte_mempool_from_obj(sess);
2224 	if (!sess_mp)
2225 		return -EINVAL;
2226 	pool_priv = rte_mempool_get_priv(sess_mp);
2227 
2228 	if (sess->driver_id != dev->driver_id) {
2229 		CDEV_LOG_ERR("Session created by driver %u but freed by %u",
2230 			sess->driver_id, dev->driver_id);
2231 		return -EINVAL;
2232 	}
2233 
2234 	if (*dev->dev_ops->sym_session_clear == NULL)
2235 		return -ENOTSUP;
2236 
2237 	dev->dev_ops->sym_session_clear(dev, sess);
2238 
2239 	memset(sess->driver_priv_data, 0, pool_priv->sess_data_sz + pool_priv->user_data_sz);
2240 
2241 	/* Return session to mempool */
2242 	rte_mempool_put(sess_mp, sess);
2243 
2244 	rte_cryptodev_trace_sym_session_free(dev_id, sess);
2245 	return 0;
2246 }
2247 
2248 int
2249 rte_cryptodev_asym_session_free(uint8_t dev_id, void *sess)
2250 {
2251 	struct rte_mempool *sess_mp;
2252 	struct rte_cryptodev *dev;
2253 
2254 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
2255 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
2256 		return -EINVAL;
2257 	}
2258 
2259 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2260 
2261 	if (dev == NULL || sess == NULL)
2262 		return -EINVAL;
2263 
2264 	if (*dev->dev_ops->asym_session_clear == NULL)
2265 		return -ENOTSUP;
2266 
2267 	dev->dev_ops->asym_session_clear(dev, sess);
2268 
2269 	rte_free(((struct rte_cryptodev_asym_session *)sess)->event_mdata);
2270 
2271 	/* Return session to mempool */
2272 	sess_mp = rte_mempool_from_obj(sess);
2273 	rte_mempool_put(sess_mp, sess);
2274 
2275 	rte_cryptodev_trace_asym_session_free(dev_id, sess);
2276 	return 0;
2277 }
2278 
2279 unsigned int
2280 rte_cryptodev_asym_get_header_session_size(void)
2281 {
2282 	return sizeof(struct rte_cryptodev_asym_session);
2283 }
2284 
2285 unsigned int
2286 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
2287 {
2288 	struct rte_cryptodev *dev;
2289 	unsigned int priv_sess_size;
2290 
2291 	if (!rte_cryptodev_is_valid_dev(dev_id))
2292 		return 0;
2293 
2294 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2295 
2296 	if (*dev->dev_ops->sym_session_get_size == NULL)
2297 		return 0;
2298 
2299 	priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev);
2300 
2301 	rte_cryptodev_trace_sym_get_private_session_size(dev_id,
2302 		priv_sess_size);
2303 
2304 	return priv_sess_size;
2305 }
2306 
2307 unsigned int
2308 rte_cryptodev_asym_get_private_session_size(uint8_t dev_id)
2309 {
2310 	struct rte_cryptodev *dev;
2311 	unsigned int priv_sess_size;
2312 
2313 	if (!rte_cryptodev_is_valid_dev(dev_id))
2314 		return 0;
2315 
2316 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2317 
2318 	if (*dev->dev_ops->asym_session_get_size == NULL)
2319 		return 0;
2320 
2321 	priv_sess_size = (*dev->dev_ops->asym_session_get_size)(dev);
2322 
2323 	rte_cryptodev_trace_asym_get_private_session_size(dev_id,
2324 		priv_sess_size);
2325 
2326 	return priv_sess_size;
2327 }
2328 
2329 int
2330 rte_cryptodev_sym_session_set_user_data(void *_sess, void *data,
2331 		uint16_t size)
2332 {
2333 	struct rte_cryptodev_sym_session *sess = _sess;
2334 
2335 	if (sess == NULL)
2336 		return -EINVAL;
2337 
2338 	if (sess->user_data_sz < size)
2339 		return -ENOMEM;
2340 
2341 	rte_memcpy(sess->driver_priv_data + sess->sess_data_sz, data, size);
2342 
2343 	rte_cryptodev_trace_sym_session_set_user_data(sess, data, size);
2344 
2345 	return 0;
2346 }
2347 
2348 void *
2349 rte_cryptodev_sym_session_get_user_data(void *_sess)
2350 {
2351 	struct rte_cryptodev_sym_session *sess = _sess;
2352 	void *data = NULL;
2353 
2354 	if (sess == NULL || sess->user_data_sz == 0)
2355 		return NULL;
2356 
2357 	data = (void *)(sess->driver_priv_data + sess->sess_data_sz);
2358 
2359 	rte_cryptodev_trace_sym_session_get_user_data(sess, data);
2360 
2361 	return data;
2362 }
2363 
2364 int
2365 rte_cryptodev_asym_session_set_user_data(void *session, void *data, uint16_t size)
2366 {
2367 	struct rte_cryptodev_asym_session *sess = session;
2368 	if (sess == NULL)
2369 		return -EINVAL;
2370 
2371 	if (sess->user_data_sz < size)
2372 		return -ENOMEM;
2373 
2374 	rte_memcpy(sess->sess_private_data +
2375 			sess->max_priv_data_sz,
2376 			data, size);
2377 
2378 	rte_cryptodev_trace_asym_session_set_user_data(sess, data, size);
2379 
2380 	return 0;
2381 }
2382 
2383 void *
2384 rte_cryptodev_asym_session_get_user_data(void *session)
2385 {
2386 	struct rte_cryptodev_asym_session *sess = session;
2387 	void *data = NULL;
2388 
2389 	if (sess == NULL || sess->user_data_sz == 0)
2390 		return NULL;
2391 
2392 	data = (void *)(sess->sess_private_data + sess->max_priv_data_sz);
2393 
2394 	rte_cryptodev_trace_asym_session_get_user_data(sess, data);
2395 
2396 	return data;
2397 }
2398 
2399 static inline void
2400 sym_crypto_fill_status(struct rte_crypto_sym_vec *vec, int32_t errnum)
2401 {
2402 	uint32_t i;
2403 	for (i = 0; i < vec->num; i++)
2404 		vec->status[i] = errnum;
2405 }
2406 
2407 uint32_t
2408 rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id,
2409 	void *_sess, union rte_crypto_sym_ofs ofs,
2410 	struct rte_crypto_sym_vec *vec)
2411 {
2412 	struct rte_cryptodev *dev;
2413 	struct rte_cryptodev_sym_session *sess = _sess;
2414 
2415 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
2416 		sym_crypto_fill_status(vec, EINVAL);
2417 		return 0;
2418 	}
2419 
2420 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2421 
2422 	if (*dev->dev_ops->sym_cpu_process == NULL ||
2423 		!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO)) {
2424 		sym_crypto_fill_status(vec, ENOTSUP);
2425 		return 0;
2426 	}
2427 
2428 	rte_cryptodev_trace_sym_cpu_crypto_process(dev_id, sess);
2429 
2430 	return dev->dev_ops->sym_cpu_process(dev, sess, ofs, vec);
2431 }
2432 
2433 int
2434 rte_cryptodev_get_raw_dp_ctx_size(uint8_t dev_id)
2435 {
2436 	struct rte_cryptodev *dev;
2437 	int32_t size = sizeof(struct rte_crypto_raw_dp_ctx);
2438 	int32_t priv_size;
2439 
2440 	if (!rte_cryptodev_is_valid_dev(dev_id))
2441 		return -EINVAL;
2442 
2443 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2444 
2445 	if (*dev->dev_ops->sym_get_raw_dp_ctx_size == NULL ||
2446 		!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP)) {
2447 		return -ENOTSUP;
2448 	}
2449 
2450 	priv_size = (*dev->dev_ops->sym_get_raw_dp_ctx_size)(dev);
2451 	if (priv_size < 0)
2452 		return -ENOTSUP;
2453 
2454 	rte_cryptodev_trace_get_raw_dp_ctx_size(dev_id);
2455 
2456 	return RTE_ALIGN_CEIL((size + priv_size), 8);
2457 }
2458 
2459 int
2460 rte_cryptodev_configure_raw_dp_ctx(uint8_t dev_id, uint16_t qp_id,
2461 	struct rte_crypto_raw_dp_ctx *ctx,
2462 	enum rte_crypto_op_sess_type sess_type,
2463 	union rte_cryptodev_session_ctx session_ctx,
2464 	uint8_t is_update)
2465 {
2466 	struct rte_cryptodev *dev;
2467 
2468 	if (!rte_cryptodev_get_qp_status(dev_id, qp_id))
2469 		return -EINVAL;
2470 
2471 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2472 	if (!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP)
2473 			|| dev->dev_ops->sym_configure_raw_dp_ctx == NULL)
2474 		return -ENOTSUP;
2475 
2476 	rte_cryptodev_trace_configure_raw_dp_ctx(dev_id, qp_id, sess_type);
2477 
2478 	return (*dev->dev_ops->sym_configure_raw_dp_ctx)(dev, qp_id, ctx,
2479 			sess_type, session_ctx, is_update);
2480 }
2481 
2482 int
2483 rte_cryptodev_session_event_mdata_set(uint8_t dev_id, void *sess,
2484 	enum rte_crypto_op_type op_type,
2485 	enum rte_crypto_op_sess_type sess_type,
2486 	void *ev_mdata,
2487 	uint16_t size)
2488 {
2489 	struct rte_cryptodev *dev;
2490 
2491 	if (sess == NULL || ev_mdata == NULL)
2492 		return -EINVAL;
2493 
2494 	if (!rte_cryptodev_is_valid_dev(dev_id))
2495 		goto skip_pmd_op;
2496 
2497 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2498 	if (dev->dev_ops->session_ev_mdata_set == NULL)
2499 		goto skip_pmd_op;
2500 
2501 	rte_cryptodev_trace_session_event_mdata_set(dev_id, sess, op_type,
2502 		sess_type, ev_mdata, size);
2503 
2504 	return (*dev->dev_ops->session_ev_mdata_set)(dev, sess, op_type,
2505 			sess_type, ev_mdata);
2506 
2507 skip_pmd_op:
2508 	if (op_type == RTE_CRYPTO_OP_TYPE_SYMMETRIC)
2509 		return rte_cryptodev_sym_session_set_user_data(sess, ev_mdata,
2510 				size);
2511 	else if (op_type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
2512 		struct rte_cryptodev_asym_session *s = sess;
2513 
2514 		if (s->event_mdata == NULL) {
2515 			s->event_mdata = rte_malloc(NULL, size, 0);
2516 			if (s->event_mdata == NULL)
2517 				return -ENOMEM;
2518 		}
2519 		rte_memcpy(s->event_mdata, ev_mdata, size);
2520 
2521 		return 0;
2522 	} else
2523 		return -ENOTSUP;
2524 }
2525 
2526 uint32_t
2527 rte_cryptodev_raw_enqueue_burst(struct rte_crypto_raw_dp_ctx *ctx,
2528 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
2529 	void **user_data, int *enqueue_status)
2530 {
2531 	return (*ctx->enqueue_burst)(ctx->qp_data, ctx->drv_ctx_data, vec,
2532 			ofs, user_data, enqueue_status);
2533 }
2534 
2535 int
2536 rte_cryptodev_raw_enqueue_done(struct rte_crypto_raw_dp_ctx *ctx,
2537 		uint32_t n)
2538 {
2539 	return (*ctx->enqueue_done)(ctx->qp_data, ctx->drv_ctx_data, n);
2540 }
2541 
2542 uint32_t
2543 rte_cryptodev_raw_dequeue_burst(struct rte_crypto_raw_dp_ctx *ctx,
2544 	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
2545 	uint32_t max_nb_to_dequeue,
2546 	rte_cryptodev_raw_post_dequeue_t post_dequeue,
2547 	void **out_user_data, uint8_t is_user_data_array,
2548 	uint32_t *n_success_jobs, int *status)
2549 {
2550 	return (*ctx->dequeue_burst)(ctx->qp_data, ctx->drv_ctx_data,
2551 		get_dequeue_count, max_nb_to_dequeue, post_dequeue,
2552 		out_user_data, is_user_data_array, n_success_jobs, status);
2553 }
2554 
2555 int
2556 rte_cryptodev_raw_dequeue_done(struct rte_crypto_raw_dp_ctx *ctx,
2557 		uint32_t n)
2558 {
2559 	return (*ctx->dequeue_done)(ctx->qp_data, ctx->drv_ctx_data, n);
2560 }
2561 
2562 /** Initialise rte_crypto_op mempool element */
2563 static void
2564 rte_crypto_op_init(struct rte_mempool *mempool,
2565 		void *opaque_arg,
2566 		void *_op_data,
2567 		__rte_unused unsigned i)
2568 {
2569 	struct rte_crypto_op *op = _op_data;
2570 	enum rte_crypto_op_type type = *(enum rte_crypto_op_type *)opaque_arg;
2571 
2572 	memset(_op_data, 0, mempool->elt_size);
2573 
2574 	__rte_crypto_op_reset(op, type);
2575 
2576 	op->phys_addr = rte_mempool_virt2iova(_op_data);
2577 	op->mempool = mempool;
2578 }
2579 
2580 
2581 struct rte_mempool *
2582 rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
2583 		unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
2584 		int socket_id)
2585 {
2586 	struct rte_crypto_op_pool_private *priv;
2587 
2588 	unsigned elt_size = sizeof(struct rte_crypto_op) +
2589 			priv_size;
2590 
2591 	if (type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
2592 		elt_size += sizeof(struct rte_crypto_sym_op);
2593 	} else if (type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
2594 		elt_size += sizeof(struct rte_crypto_asym_op);
2595 	} else if (type == RTE_CRYPTO_OP_TYPE_UNDEFINED) {
2596 		elt_size += RTE_MAX(sizeof(struct rte_crypto_sym_op),
2597 		                    sizeof(struct rte_crypto_asym_op));
2598 	} else {
2599 		CDEV_LOG_ERR("Invalid op_type");
2600 		return NULL;
2601 	}
2602 
2603 	/* lookup mempool in case already allocated */
2604 	struct rte_mempool *mp = rte_mempool_lookup(name);
2605 
2606 	if (mp != NULL) {
2607 		priv = (struct rte_crypto_op_pool_private *)
2608 				rte_mempool_get_priv(mp);
2609 
2610 		if (mp->elt_size != elt_size ||
2611 				mp->cache_size < cache_size ||
2612 				mp->size < nb_elts ||
2613 				priv->priv_size <  priv_size) {
2614 			mp = NULL;
2615 			CDEV_LOG_ERR("Mempool %s already exists but with "
2616 					"incompatible parameters", name);
2617 			return NULL;
2618 		}
2619 		return mp;
2620 	}
2621 
2622 	mp = rte_mempool_create(
2623 			name,
2624 			nb_elts,
2625 			elt_size,
2626 			cache_size,
2627 			sizeof(struct rte_crypto_op_pool_private),
2628 			NULL,
2629 			NULL,
2630 			rte_crypto_op_init,
2631 			&type,
2632 			socket_id,
2633 			0);
2634 
2635 	if (mp == NULL) {
2636 		CDEV_LOG_ERR("Failed to create mempool %s", name);
2637 		return NULL;
2638 	}
2639 
2640 	priv = (struct rte_crypto_op_pool_private *)
2641 			rte_mempool_get_priv(mp);
2642 
2643 	priv->priv_size = priv_size;
2644 	priv->type = type;
2645 
2646 	rte_cryptodev_trace_op_pool_create(name, socket_id, type, nb_elts, mp);
2647 	return mp;
2648 }
2649 
2650 int
2651 rte_cryptodev_pmd_create_dev_name(char *name, const char *dev_name_prefix)
2652 {
2653 	struct rte_cryptodev *dev = NULL;
2654 	uint32_t i = 0;
2655 
2656 	if (name == NULL)
2657 		return -EINVAL;
2658 
2659 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
2660 		int ret = snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
2661 				"%s_%u", dev_name_prefix, i);
2662 
2663 		if (ret < 0)
2664 			return ret;
2665 
2666 		dev = rte_cryptodev_pmd_get_named_dev(name);
2667 		if (!dev)
2668 			return 0;
2669 	}
2670 
2671 	return -1;
2672 }
2673 
2674 TAILQ_HEAD(cryptodev_driver_list, cryptodev_driver);
2675 
2676 static struct cryptodev_driver_list cryptodev_driver_list =
2677 	TAILQ_HEAD_INITIALIZER(cryptodev_driver_list);
2678 
2679 int
2680 rte_cryptodev_driver_id_get(const char *name)
2681 {
2682 	struct cryptodev_driver *driver;
2683 	const char *driver_name;
2684 	int driver_id = -1;
2685 
2686 	if (name == NULL) {
2687 		CDEV_LOG_DEBUG("name pointer NULL");
2688 		return -1;
2689 	}
2690 
2691 	TAILQ_FOREACH(driver, &cryptodev_driver_list, next) {
2692 		driver_name = driver->driver->name;
2693 		if (strncmp(driver_name, name, strlen(driver_name) + 1) == 0) {
2694 			driver_id = driver->id;
2695 			break;
2696 		}
2697 	}
2698 
2699 	rte_cryptodev_trace_driver_id_get(name, driver_id);
2700 
2701 	return driver_id;
2702 }
2703 
2704 const char *
2705 rte_cryptodev_name_get(uint8_t dev_id)
2706 {
2707 	struct rte_cryptodev *dev;
2708 
2709 	if (!rte_cryptodev_is_valid_device_data(dev_id)) {
2710 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
2711 		return NULL;
2712 	}
2713 
2714 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2715 	if (dev == NULL)
2716 		return NULL;
2717 
2718 	rte_cryptodev_trace_name_get(dev_id, dev->data->name);
2719 
2720 	return dev->data->name;
2721 }
2722 
2723 const char *
2724 rte_cryptodev_driver_name_get(uint8_t driver_id)
2725 {
2726 	struct cryptodev_driver *driver;
2727 
2728 	TAILQ_FOREACH(driver, &cryptodev_driver_list, next) {
2729 		if (driver->id == driver_id) {
2730 			rte_cryptodev_trace_driver_name_get(driver_id,
2731 				driver->driver->name);
2732 			return driver->driver->name;
2733 		}
2734 	}
2735 	return NULL;
2736 }
2737 
2738 uint8_t
2739 rte_cryptodev_allocate_driver(struct cryptodev_driver *crypto_drv,
2740 		const struct rte_driver *drv)
2741 {
2742 	crypto_drv->driver = drv;
2743 	crypto_drv->id = nb_drivers;
2744 
2745 	TAILQ_INSERT_TAIL(&cryptodev_driver_list, crypto_drv, next);
2746 
2747 	rte_cryptodev_trace_allocate_driver(drv->name);
2748 
2749 	return nb_drivers++;
2750 }
2751 
2752 RTE_INIT(cryptodev_init_fp_ops)
2753 {
2754 	uint32_t i;
2755 
2756 	for (i = 0; i != RTE_DIM(rte_crypto_fp_ops); i++)
2757 		cryptodev_fp_ops_reset(rte_crypto_fp_ops + i);
2758 }
2759 
2760 static int
2761 cryptodev_handle_dev_list(const char *cmd __rte_unused,
2762 		const char *params __rte_unused,
2763 		struct rte_tel_data *d)
2764 {
2765 	int dev_id;
2766 
2767 	if (rte_cryptodev_count() < 1)
2768 		return -EINVAL;
2769 
2770 	rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
2771 	for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++)
2772 		if (rte_cryptodev_is_valid_dev(dev_id))
2773 			rte_tel_data_add_array_int(d, dev_id);
2774 
2775 	return 0;
2776 }
2777 
2778 static int
2779 cryptodev_handle_dev_info(const char *cmd __rte_unused,
2780 		const char *params, struct rte_tel_data *d)
2781 {
2782 	struct rte_cryptodev_info cryptodev_info;
2783 	int dev_id;
2784 	char *end_param;
2785 
2786 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
2787 		return -EINVAL;
2788 
2789 	dev_id = strtoul(params, &end_param, 0);
2790 	if (*end_param != '\0')
2791 		CDEV_LOG_ERR("Extra parameters passed to command, ignoring");
2792 	if (!rte_cryptodev_is_valid_dev(dev_id))
2793 		return -EINVAL;
2794 
2795 	rte_cryptodev_info_get(dev_id, &cryptodev_info);
2796 
2797 	rte_tel_data_start_dict(d);
2798 	rte_tel_data_add_dict_string(d, "device_name",
2799 		cryptodev_info.device->name);
2800 	rte_tel_data_add_dict_uint(d, "max_nb_queue_pairs",
2801 		cryptodev_info.max_nb_queue_pairs);
2802 
2803 	return 0;
2804 }
2805 
2806 #define ADD_DICT_STAT(s) rte_tel_data_add_dict_uint(d, #s, cryptodev_stats.s)
2807 
2808 static int
2809 cryptodev_handle_dev_stats(const char *cmd __rte_unused,
2810 		const char *params,
2811 		struct rte_tel_data *d)
2812 {
2813 	struct rte_cryptodev_stats cryptodev_stats;
2814 	int dev_id, ret;
2815 	char *end_param;
2816 
2817 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
2818 		return -EINVAL;
2819 
2820 	dev_id = strtoul(params, &end_param, 0);
2821 	if (*end_param != '\0')
2822 		CDEV_LOG_ERR("Extra parameters passed to command, ignoring");
2823 	if (!rte_cryptodev_is_valid_dev(dev_id))
2824 		return -EINVAL;
2825 
2826 	ret = rte_cryptodev_stats_get(dev_id, &cryptodev_stats);
2827 	if (ret < 0)
2828 		return ret;
2829 
2830 	rte_tel_data_start_dict(d);
2831 	ADD_DICT_STAT(enqueued_count);
2832 	ADD_DICT_STAT(dequeued_count);
2833 	ADD_DICT_STAT(enqueue_err_count);
2834 	ADD_DICT_STAT(dequeue_err_count);
2835 
2836 	return 0;
2837 }
2838 
2839 #define CRYPTO_CAPS_SZ                                             \
2840 	(RTE_ALIGN_CEIL(sizeof(struct rte_cryptodev_capabilities), \
2841 					sizeof(uint64_t)) /        \
2842 	 sizeof(uint64_t))
2843 
2844 static int
2845 crypto_caps_array(struct rte_tel_data *d,
2846 		  const struct rte_cryptodev_capabilities *capabilities)
2847 {
2848 	const struct rte_cryptodev_capabilities *dev_caps;
2849 	uint64_t caps_val[CRYPTO_CAPS_SZ];
2850 	unsigned int i = 0, j;
2851 
2852 	rte_tel_data_start_array(d, RTE_TEL_UINT_VAL);
2853 
2854 	while ((dev_caps = &capabilities[i++])->op !=
2855 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
2856 		memset(&caps_val, 0, CRYPTO_CAPS_SZ * sizeof(caps_val[0]));
2857 		rte_memcpy(caps_val, dev_caps, sizeof(capabilities[0]));
2858 		for (j = 0; j < CRYPTO_CAPS_SZ; j++)
2859 			rte_tel_data_add_array_uint(d, caps_val[j]);
2860 	}
2861 
2862 	return i;
2863 }
2864 
2865 static int
2866 cryptodev_handle_dev_caps(const char *cmd __rte_unused, const char *params,
2867 			  struct rte_tel_data *d)
2868 {
2869 	struct rte_cryptodev_info dev_info;
2870 	struct rte_tel_data *crypto_caps;
2871 	int crypto_caps_n;
2872 	char *end_param;
2873 	int dev_id;
2874 
2875 	if (!params || strlen(params) == 0 || !isdigit(*params))
2876 		return -EINVAL;
2877 
2878 	dev_id = strtoul(params, &end_param, 0);
2879 	if (*end_param != '\0')
2880 		CDEV_LOG_ERR("Extra parameters passed to command, ignoring");
2881 	if (!rte_cryptodev_is_valid_dev(dev_id))
2882 		return -EINVAL;
2883 
2884 	rte_tel_data_start_dict(d);
2885 	crypto_caps = rte_tel_data_alloc();
2886 	if (!crypto_caps)
2887 		return -ENOMEM;
2888 
2889 	rte_cryptodev_info_get(dev_id, &dev_info);
2890 	crypto_caps_n = crypto_caps_array(crypto_caps, dev_info.capabilities);
2891 	rte_tel_data_add_dict_container(d, "crypto_caps", crypto_caps, 0);
2892 	rte_tel_data_add_dict_int(d, "crypto_caps_n", crypto_caps_n);
2893 
2894 	return 0;
2895 }
2896 
2897 RTE_INIT(cryptodev_init_telemetry)
2898 {
2899 	rte_telemetry_register_cmd("/cryptodev/info", cryptodev_handle_dev_info,
2900 			"Returns information for a cryptodev. Parameters: int dev_id");
2901 	rte_telemetry_register_cmd("/cryptodev/list",
2902 			cryptodev_handle_dev_list,
2903 			"Returns list of available crypto devices by IDs. No parameters.");
2904 	rte_telemetry_register_cmd("/cryptodev/stats",
2905 			cryptodev_handle_dev_stats,
2906 			"Returns the stats for a cryptodev. Parameters: int dev_id");
2907 	rte_telemetry_register_cmd("/cryptodev/caps",
2908 			cryptodev_handle_dev_caps,
2909 			"Returns the capabilities for a cryptodev. Parameters: int dev_id");
2910 }
2911