xref: /dpdk/lib/cryptodev/rte_cryptodev.c (revision 8c76e2f6937730782baa210bf456bd19da2a9600)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020 Intel Corporation
3  */
4 
5 #include <sys/queue.h>
6 #include <ctype.h>
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <string.h>
10 #include <errno.h>
11 #include <stdint.h>
12 #include <inttypes.h>
13 
14 #include <rte_log.h>
15 #include <rte_debug.h>
16 #include <dev_driver.h>
17 #include <rte_memory.h>
18 #include <rte_memcpy.h>
19 #include <rte_memzone.h>
20 #include <rte_eal.h>
21 #include <rte_common.h>
22 #include <rte_mempool.h>
23 #include <rte_malloc.h>
24 #include <rte_errno.h>
25 #include <rte_spinlock.h>
26 #include <rte_string_fns.h>
27 #include <rte_telemetry.h>
28 
29 #include "rte_crypto.h"
30 #include "rte_cryptodev.h"
31 #include "cryptodev_pmd.h"
32 #include "rte_cryptodev_trace.h"
33 
34 static uint8_t nb_drivers;
35 
36 static struct rte_cryptodev rte_crypto_devices[RTE_CRYPTO_MAX_DEVS];
37 
38 struct rte_cryptodev *rte_cryptodevs = rte_crypto_devices;
39 
40 static struct rte_cryptodev_global cryptodev_globals = {
41 		.devs			= rte_crypto_devices,
42 		.data			= { NULL },
43 		.nb_devs		= 0
44 };
45 
46 /* Public fastpath APIs. */
47 struct rte_crypto_fp_ops rte_crypto_fp_ops[RTE_CRYPTO_MAX_DEVS];
48 
49 /* spinlock for crypto device callbacks */
50 static rte_spinlock_t rte_cryptodev_cb_lock = RTE_SPINLOCK_INITIALIZER;
51 
52 /**
53  * The user application callback description.
54  *
55  * It contains callback address to be registered by user application,
56  * the pointer to the parameters for callback, and the event type.
57  */
58 struct rte_cryptodev_callback {
59 	TAILQ_ENTRY(rte_cryptodev_callback) next; /**< Callbacks list */
60 	rte_cryptodev_cb_fn cb_fn;		/**< Callback address */
61 	void *cb_arg;				/**< Parameter for callback */
62 	enum rte_cryptodev_event_type event;	/**< Interrupt event type */
63 	uint32_t active;			/**< Callback is executing */
64 };
65 
66 /**
67  * The crypto cipher algorithm strings identifiers.
68  * It could be used in application command line.
69  */
70 const char *
71 rte_crypto_cipher_algorithm_strings[] = {
72 	[RTE_CRYPTO_CIPHER_3DES_CBC]	= "3des-cbc",
73 	[RTE_CRYPTO_CIPHER_3DES_ECB]	= "3des-ecb",
74 	[RTE_CRYPTO_CIPHER_3DES_CTR]	= "3des-ctr",
75 
76 	[RTE_CRYPTO_CIPHER_AES_CBC]	= "aes-cbc",
77 	[RTE_CRYPTO_CIPHER_AES_CTR]	= "aes-ctr",
78 	[RTE_CRYPTO_CIPHER_AES_DOCSISBPI]	= "aes-docsisbpi",
79 	[RTE_CRYPTO_CIPHER_AES_ECB]	= "aes-ecb",
80 	[RTE_CRYPTO_CIPHER_AES_F8]	= "aes-f8",
81 	[RTE_CRYPTO_CIPHER_AES_XTS]	= "aes-xts",
82 
83 	[RTE_CRYPTO_CIPHER_ARC4]	= "arc4",
84 
85 	[RTE_CRYPTO_CIPHER_DES_CBC]     = "des-cbc",
86 	[RTE_CRYPTO_CIPHER_DES_DOCSISBPI]	= "des-docsisbpi",
87 
88 	[RTE_CRYPTO_CIPHER_NULL]	= "null",
89 
90 	[RTE_CRYPTO_CIPHER_KASUMI_F8]	= "kasumi-f8",
91 	[RTE_CRYPTO_CIPHER_SNOW3G_UEA2]	= "snow3g-uea2",
92 	[RTE_CRYPTO_CIPHER_ZUC_EEA3]	= "zuc-eea3"
93 };
94 
95 /**
96  * The crypto cipher operation strings identifiers.
97  * It could be used in application command line.
98  */
99 const char *
100 rte_crypto_cipher_operation_strings[] = {
101 		[RTE_CRYPTO_CIPHER_OP_ENCRYPT]	= "encrypt",
102 		[RTE_CRYPTO_CIPHER_OP_DECRYPT]	= "decrypt"
103 };
104 
105 /**
106  * The crypto auth algorithm strings identifiers.
107  * It could be used in application command line.
108  */
109 const char *
110 rte_crypto_auth_algorithm_strings[] = {
111 	[RTE_CRYPTO_AUTH_AES_CBC_MAC]	= "aes-cbc-mac",
112 	[RTE_CRYPTO_AUTH_AES_CMAC]	= "aes-cmac",
113 	[RTE_CRYPTO_AUTH_AES_GMAC]	= "aes-gmac",
114 	[RTE_CRYPTO_AUTH_AES_XCBC_MAC]	= "aes-xcbc-mac",
115 
116 	[RTE_CRYPTO_AUTH_MD5]		= "md5",
117 	[RTE_CRYPTO_AUTH_MD5_HMAC]	= "md5-hmac",
118 
119 	[RTE_CRYPTO_AUTH_NULL]		= "null",
120 
121 	[RTE_CRYPTO_AUTH_SHA1]		= "sha1",
122 	[RTE_CRYPTO_AUTH_SHA1_HMAC]	= "sha1-hmac",
123 
124 	[RTE_CRYPTO_AUTH_SHA224]	= "sha2-224",
125 	[RTE_CRYPTO_AUTH_SHA224_HMAC]	= "sha2-224-hmac",
126 	[RTE_CRYPTO_AUTH_SHA256]	= "sha2-256",
127 	[RTE_CRYPTO_AUTH_SHA256_HMAC]	= "sha2-256-hmac",
128 	[RTE_CRYPTO_AUTH_SHA384]	= "sha2-384",
129 	[RTE_CRYPTO_AUTH_SHA384_HMAC]	= "sha2-384-hmac",
130 	[RTE_CRYPTO_AUTH_SHA512]	= "sha2-512",
131 	[RTE_CRYPTO_AUTH_SHA512_HMAC]	= "sha2-512-hmac",
132 
133 	[RTE_CRYPTO_AUTH_SHA3_224]	= "sha3-224",
134 	[RTE_CRYPTO_AUTH_SHA3_224_HMAC] = "sha3-224-hmac",
135 	[RTE_CRYPTO_AUTH_SHA3_256]	= "sha3-256",
136 	[RTE_CRYPTO_AUTH_SHA3_256_HMAC] = "sha3-256-hmac",
137 	[RTE_CRYPTO_AUTH_SHA3_384]	= "sha3-384",
138 	[RTE_CRYPTO_AUTH_SHA3_384_HMAC] = "sha3-384-hmac",
139 	[RTE_CRYPTO_AUTH_SHA3_512]	= "sha3-512",
140 	[RTE_CRYPTO_AUTH_SHA3_512_HMAC]	= "sha3-512-hmac",
141 
142 	[RTE_CRYPTO_AUTH_KASUMI_F9]	= "kasumi-f9",
143 	[RTE_CRYPTO_AUTH_SNOW3G_UIA2]	= "snow3g-uia2",
144 	[RTE_CRYPTO_AUTH_ZUC_EIA3]	= "zuc-eia3"
145 };
146 
147 /**
148  * The crypto AEAD algorithm strings identifiers.
149  * It could be used in application command line.
150  */
151 const char *
152 rte_crypto_aead_algorithm_strings[] = {
153 	[RTE_CRYPTO_AEAD_AES_CCM]	= "aes-ccm",
154 	[RTE_CRYPTO_AEAD_AES_GCM]	= "aes-gcm",
155 	[RTE_CRYPTO_AEAD_CHACHA20_POLY1305] = "chacha20-poly1305"
156 };
157 
158 /**
159  * The crypto AEAD operation strings identifiers.
160  * It could be used in application command line.
161  */
162 const char *
163 rte_crypto_aead_operation_strings[] = {
164 	[RTE_CRYPTO_AEAD_OP_ENCRYPT]	= "encrypt",
165 	[RTE_CRYPTO_AEAD_OP_DECRYPT]	= "decrypt"
166 };
167 
168 /**
169  * Asymmetric crypto transform operation strings identifiers.
170  */
171 const char *rte_crypto_asym_xform_strings[] = {
172 	[RTE_CRYPTO_ASYM_XFORM_NONE]	= "none",
173 	[RTE_CRYPTO_ASYM_XFORM_RSA]	= "rsa",
174 	[RTE_CRYPTO_ASYM_XFORM_MODEX]	= "modexp",
175 	[RTE_CRYPTO_ASYM_XFORM_MODINV]	= "modinv",
176 	[RTE_CRYPTO_ASYM_XFORM_DH]	= "dh",
177 	[RTE_CRYPTO_ASYM_XFORM_DSA]	= "dsa",
178 	[RTE_CRYPTO_ASYM_XFORM_ECDSA]	= "ecdsa",
179 	[RTE_CRYPTO_ASYM_XFORM_ECPM]	= "ecpm",
180 };
181 
182 /**
183  * Asymmetric crypto operation strings identifiers.
184  */
185 const char *rte_crypto_asym_op_strings[] = {
186 	[RTE_CRYPTO_ASYM_OP_ENCRYPT]	= "encrypt",
187 	[RTE_CRYPTO_ASYM_OP_DECRYPT]	= "decrypt",
188 	[RTE_CRYPTO_ASYM_OP_SIGN]	= "sign",
189 	[RTE_CRYPTO_ASYM_OP_VERIFY]	= "verify"
190 };
191 
192 /**
193  * Asymmetric crypto key exchange operation strings identifiers.
194  */
195 const char *rte_crypto_asym_ke_strings[] = {
196 	[RTE_CRYPTO_ASYM_KE_PRIV_KEY_GENERATE] = "priv_key_generate",
197 	[RTE_CRYPTO_ASYM_KE_PUB_KEY_GENERATE] = "pub_key_generate",
198 	[RTE_CRYPTO_ASYM_KE_SHARED_SECRET_COMPUTE] = "sharedsecret_compute",
199 	[RTE_CRYPTO_ASYM_KE_PUB_KEY_VERIFY] = "pub_ec_key_verify"
200 };
201 
202 /**
203  * The private data structure stored in the sym session mempool private data.
204  */
205 struct rte_cryptodev_sym_session_pool_private_data {
206 	uint16_t nb_drivers;
207 	/**< number of elements in sess_data array */
208 	uint16_t user_data_sz;
209 	/**< session user data will be placed after sess_data */
210 };
211 
212 /**
213  * The private data structure stored in the asym session mempool private data.
214  */
215 struct rte_cryptodev_asym_session_pool_private_data {
216 	uint16_t max_priv_session_sz;
217 	/**< Size of private session data used when creating mempool */
218 	uint16_t user_data_sz;
219 	/**< Session user data will be placed after sess_private_data */
220 };
221 
222 int
223 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum,
224 		const char *algo_string)
225 {
226 	unsigned int i;
227 
228 	for (i = 1; i < RTE_DIM(rte_crypto_cipher_algorithm_strings); i++) {
229 		if (strcmp(algo_string, rte_crypto_cipher_algorithm_strings[i]) == 0) {
230 			*algo_enum = (enum rte_crypto_cipher_algorithm) i;
231 			return 0;
232 		}
233 	}
234 
235 	/* Invalid string */
236 	return -1;
237 }
238 
239 int
240 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum,
241 		const char *algo_string)
242 {
243 	unsigned int i;
244 
245 	for (i = 1; i < RTE_DIM(rte_crypto_auth_algorithm_strings); i++) {
246 		if (strcmp(algo_string, rte_crypto_auth_algorithm_strings[i]) == 0) {
247 			*algo_enum = (enum rte_crypto_auth_algorithm) i;
248 			return 0;
249 		}
250 	}
251 
252 	/* Invalid string */
253 	return -1;
254 }
255 
256 int
257 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum,
258 		const char *algo_string)
259 {
260 	unsigned int i;
261 
262 	for (i = 1; i < RTE_DIM(rte_crypto_aead_algorithm_strings); i++) {
263 		if (strcmp(algo_string, rte_crypto_aead_algorithm_strings[i]) == 0) {
264 			*algo_enum = (enum rte_crypto_aead_algorithm) i;
265 			return 0;
266 		}
267 	}
268 
269 	/* Invalid string */
270 	return -1;
271 }
272 
273 int
274 rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum,
275 		const char *xform_string)
276 {
277 	unsigned int i;
278 
279 	for (i = 1; i < RTE_DIM(rte_crypto_asym_xform_strings); i++) {
280 		if (strcmp(xform_string,
281 			rte_crypto_asym_xform_strings[i]) == 0) {
282 			*xform_enum = (enum rte_crypto_asym_xform_type) i;
283 			return 0;
284 		}
285 	}
286 
287 	/* Invalid string */
288 	return -1;
289 }
290 
291 /**
292  * The crypto auth operation strings identifiers.
293  * It could be used in application command line.
294  */
295 const char *
296 rte_crypto_auth_operation_strings[] = {
297 		[RTE_CRYPTO_AUTH_OP_VERIFY]	= "verify",
298 		[RTE_CRYPTO_AUTH_OP_GENERATE]	= "generate"
299 };
300 
301 const struct rte_cryptodev_symmetric_capability *
302 rte_cryptodev_sym_capability_get(uint8_t dev_id,
303 		const struct rte_cryptodev_sym_capability_idx *idx)
304 {
305 	const struct rte_cryptodev_capabilities *capability;
306 	struct rte_cryptodev_info dev_info;
307 	int i = 0;
308 
309 	rte_cryptodev_info_get(dev_id, &dev_info);
310 
311 	while ((capability = &dev_info.capabilities[i++])->op !=
312 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
313 		if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
314 			continue;
315 
316 		if (capability->sym.xform_type != idx->type)
317 			continue;
318 
319 		if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
320 			capability->sym.auth.algo == idx->algo.auth)
321 			return &capability->sym;
322 
323 		if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
324 			capability->sym.cipher.algo == idx->algo.cipher)
325 			return &capability->sym;
326 
327 		if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
328 				capability->sym.aead.algo == idx->algo.aead)
329 			return &capability->sym;
330 	}
331 
332 	return NULL;
333 }
334 
335 static int
336 param_range_check(uint16_t size, const struct rte_crypto_param_range *range)
337 {
338 	unsigned int next_size;
339 
340 	/* Check lower/upper bounds */
341 	if (size < range->min)
342 		return -1;
343 
344 	if (size > range->max)
345 		return -1;
346 
347 	/* If range is actually only one value, size is correct */
348 	if (range->increment == 0)
349 		return 0;
350 
351 	/* Check if value is one of the supported sizes */
352 	for (next_size = range->min; next_size <= range->max;
353 			next_size += range->increment)
354 		if (size == next_size)
355 			return 0;
356 
357 	return -1;
358 }
359 
360 const struct rte_cryptodev_asymmetric_xform_capability *
361 rte_cryptodev_asym_capability_get(uint8_t dev_id,
362 		const struct rte_cryptodev_asym_capability_idx *idx)
363 {
364 	const struct rte_cryptodev_capabilities *capability;
365 	struct rte_cryptodev_info dev_info;
366 	unsigned int i = 0;
367 
368 	memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
369 	rte_cryptodev_info_get(dev_id, &dev_info);
370 
371 	while ((capability = &dev_info.capabilities[i++])->op !=
372 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
373 		if (capability->op != RTE_CRYPTO_OP_TYPE_ASYMMETRIC)
374 			continue;
375 
376 		if (capability->asym.xform_capa.xform_type == idx->type)
377 			return &capability->asym.xform_capa;
378 	}
379 	return NULL;
380 };
381 
382 int
383 rte_cryptodev_sym_capability_check_cipher(
384 		const struct rte_cryptodev_symmetric_capability *capability,
385 		uint16_t key_size, uint16_t iv_size)
386 {
387 	if (param_range_check(key_size, &capability->cipher.key_size) != 0)
388 		return -1;
389 
390 	if (param_range_check(iv_size, &capability->cipher.iv_size) != 0)
391 		return -1;
392 
393 	return 0;
394 }
395 
396 int
397 rte_cryptodev_sym_capability_check_auth(
398 		const struct rte_cryptodev_symmetric_capability *capability,
399 		uint16_t key_size, uint16_t digest_size, uint16_t iv_size)
400 {
401 	if (param_range_check(key_size, &capability->auth.key_size) != 0)
402 		return -1;
403 
404 	if (param_range_check(digest_size, &capability->auth.digest_size) != 0)
405 		return -1;
406 
407 	if (param_range_check(iv_size, &capability->auth.iv_size) != 0)
408 		return -1;
409 
410 	return 0;
411 }
412 
413 int
414 rte_cryptodev_sym_capability_check_aead(
415 		const struct rte_cryptodev_symmetric_capability *capability,
416 		uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
417 		uint16_t iv_size)
418 {
419 	if (param_range_check(key_size, &capability->aead.key_size) != 0)
420 		return -1;
421 
422 	if (param_range_check(digest_size, &capability->aead.digest_size) != 0)
423 		return -1;
424 
425 	if (param_range_check(aad_size, &capability->aead.aad_size) != 0)
426 		return -1;
427 
428 	if (param_range_check(iv_size, &capability->aead.iv_size) != 0)
429 		return -1;
430 
431 	return 0;
432 }
433 int
434 rte_cryptodev_asym_xform_capability_check_optype(
435 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
436 	enum rte_crypto_asym_op_type op_type)
437 {
438 	if (capability->op_types & (1 << op_type))
439 		return 1;
440 
441 	return 0;
442 }
443 
444 int
445 rte_cryptodev_asym_xform_capability_check_modlen(
446 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
447 	uint16_t modlen)
448 {
449 	/* no need to check for limits, if min or max = 0 */
450 	if (capability->modlen.min != 0) {
451 		if (modlen < capability->modlen.min)
452 			return -1;
453 	}
454 
455 	if (capability->modlen.max != 0) {
456 		if (modlen > capability->modlen.max)
457 			return -1;
458 	}
459 
460 	/* in any case, check if given modlen is module increment */
461 	if (capability->modlen.increment != 0) {
462 		if (modlen % (capability->modlen.increment))
463 			return -1;
464 	}
465 
466 	return 0;
467 }
468 
469 /* spinlock for crypto device enq callbacks */
470 static rte_spinlock_t rte_cryptodev_callback_lock = RTE_SPINLOCK_INITIALIZER;
471 
472 static void
473 cryptodev_cb_cleanup(struct rte_cryptodev *dev)
474 {
475 	struct rte_cryptodev_cb_rcu *list;
476 	struct rte_cryptodev_cb *cb, *next;
477 	uint16_t qp_id;
478 
479 	if (dev->enq_cbs == NULL && dev->deq_cbs == NULL)
480 		return;
481 
482 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
483 		list = &dev->enq_cbs[qp_id];
484 		cb = list->next;
485 		while (cb != NULL) {
486 			next = cb->next;
487 			rte_free(cb);
488 			cb = next;
489 		}
490 
491 		rte_free(list->qsbr);
492 	}
493 
494 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
495 		list = &dev->deq_cbs[qp_id];
496 		cb = list->next;
497 		while (cb != NULL) {
498 			next = cb->next;
499 			rte_free(cb);
500 			cb = next;
501 		}
502 
503 		rte_free(list->qsbr);
504 	}
505 
506 	rte_free(dev->enq_cbs);
507 	dev->enq_cbs = NULL;
508 	rte_free(dev->deq_cbs);
509 	dev->deq_cbs = NULL;
510 }
511 
512 static int
513 cryptodev_cb_init(struct rte_cryptodev *dev)
514 {
515 	struct rte_cryptodev_cb_rcu *list;
516 	struct rte_rcu_qsbr *qsbr;
517 	uint16_t qp_id;
518 	size_t size;
519 
520 	/* Max thread set to 1, as one DP thread accessing a queue-pair */
521 	const uint32_t max_threads = 1;
522 
523 	dev->enq_cbs = rte_zmalloc(NULL,
524 				   sizeof(struct rte_cryptodev_cb_rcu) *
525 				   dev->data->nb_queue_pairs, 0);
526 	if (dev->enq_cbs == NULL) {
527 		CDEV_LOG_ERR("Failed to allocate memory for enq callbacks");
528 		return -ENOMEM;
529 	}
530 
531 	dev->deq_cbs = rte_zmalloc(NULL,
532 				   sizeof(struct rte_cryptodev_cb_rcu) *
533 				   dev->data->nb_queue_pairs, 0);
534 	if (dev->deq_cbs == NULL) {
535 		CDEV_LOG_ERR("Failed to allocate memory for deq callbacks");
536 		rte_free(dev->enq_cbs);
537 		return -ENOMEM;
538 	}
539 
540 	/* Create RCU QSBR variable */
541 	size = rte_rcu_qsbr_get_memsize(max_threads);
542 
543 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
544 		list = &dev->enq_cbs[qp_id];
545 		qsbr = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
546 		if (qsbr == NULL) {
547 			CDEV_LOG_ERR("Failed to allocate memory for RCU on "
548 				"queue_pair_id=%d", qp_id);
549 			goto cb_init_err;
550 		}
551 
552 		if (rte_rcu_qsbr_init(qsbr, max_threads)) {
553 			CDEV_LOG_ERR("Failed to initialize for RCU on "
554 				"queue_pair_id=%d", qp_id);
555 			goto cb_init_err;
556 		}
557 
558 		list->qsbr = qsbr;
559 	}
560 
561 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
562 		list = &dev->deq_cbs[qp_id];
563 		qsbr = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
564 		if (qsbr == NULL) {
565 			CDEV_LOG_ERR("Failed to allocate memory for RCU on "
566 				"queue_pair_id=%d", qp_id);
567 			goto cb_init_err;
568 		}
569 
570 		if (rte_rcu_qsbr_init(qsbr, max_threads)) {
571 			CDEV_LOG_ERR("Failed to initialize for RCU on "
572 				"queue_pair_id=%d", qp_id);
573 			goto cb_init_err;
574 		}
575 
576 		list->qsbr = qsbr;
577 	}
578 
579 	return 0;
580 
581 cb_init_err:
582 	cryptodev_cb_cleanup(dev);
583 	return -ENOMEM;
584 }
585 
586 const char *
587 rte_cryptodev_get_feature_name(uint64_t flag)
588 {
589 	switch (flag) {
590 	case RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO:
591 		return "SYMMETRIC_CRYPTO";
592 	case RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO:
593 		return "ASYMMETRIC_CRYPTO";
594 	case RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING:
595 		return "SYM_OPERATION_CHAINING";
596 	case RTE_CRYPTODEV_FF_CPU_SSE:
597 		return "CPU_SSE";
598 	case RTE_CRYPTODEV_FF_CPU_AVX:
599 		return "CPU_AVX";
600 	case RTE_CRYPTODEV_FF_CPU_AVX2:
601 		return "CPU_AVX2";
602 	case RTE_CRYPTODEV_FF_CPU_AVX512:
603 		return "CPU_AVX512";
604 	case RTE_CRYPTODEV_FF_CPU_AESNI:
605 		return "CPU_AESNI";
606 	case RTE_CRYPTODEV_FF_HW_ACCELERATED:
607 		return "HW_ACCELERATED";
608 	case RTE_CRYPTODEV_FF_IN_PLACE_SGL:
609 		return "IN_PLACE_SGL";
610 	case RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT:
611 		return "OOP_SGL_IN_SGL_OUT";
612 	case RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT:
613 		return "OOP_SGL_IN_LB_OUT";
614 	case RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT:
615 		return "OOP_LB_IN_SGL_OUT";
616 	case RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT:
617 		return "OOP_LB_IN_LB_OUT";
618 	case RTE_CRYPTODEV_FF_CPU_NEON:
619 		return "CPU_NEON";
620 	case RTE_CRYPTODEV_FF_CPU_ARM_CE:
621 		return "CPU_ARM_CE";
622 	case RTE_CRYPTODEV_FF_SECURITY:
623 		return "SECURITY_PROTOCOL";
624 	case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP:
625 		return "RSA_PRIV_OP_KEY_EXP";
626 	case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT:
627 		return "RSA_PRIV_OP_KEY_QT";
628 	case RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED:
629 		return "DIGEST_ENCRYPTED";
630 	case RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO:
631 		return "SYM_CPU_CRYPTO";
632 	case RTE_CRYPTODEV_FF_ASYM_SESSIONLESS:
633 		return "ASYM_SESSIONLESS";
634 	case RTE_CRYPTODEV_FF_SYM_SESSIONLESS:
635 		return "SYM_SESSIONLESS";
636 	case RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA:
637 		return "NON_BYTE_ALIGNED_DATA";
638 	case RTE_CRYPTODEV_FF_CIPHER_MULTIPLE_DATA_UNITS:
639 		return "CIPHER_MULTIPLE_DATA_UNITS";
640 	case RTE_CRYPTODEV_FF_CIPHER_WRAPPED_KEY:
641 		return "CIPHER_WRAPPED_KEY";
642 	default:
643 		return NULL;
644 	}
645 }
646 
647 struct rte_cryptodev *
648 rte_cryptodev_pmd_get_dev(uint8_t dev_id)
649 {
650 	return &cryptodev_globals.devs[dev_id];
651 }
652 
653 struct rte_cryptodev *
654 rte_cryptodev_pmd_get_named_dev(const char *name)
655 {
656 	struct rte_cryptodev *dev;
657 	unsigned int i;
658 
659 	if (name == NULL)
660 		return NULL;
661 
662 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
663 		dev = &cryptodev_globals.devs[i];
664 
665 		if ((dev->attached == RTE_CRYPTODEV_ATTACHED) &&
666 				(strcmp(dev->data->name, name) == 0))
667 			return dev;
668 	}
669 
670 	return NULL;
671 }
672 
673 static inline uint8_t
674 rte_cryptodev_is_valid_device_data(uint8_t dev_id)
675 {
676 	if (dev_id >= RTE_CRYPTO_MAX_DEVS ||
677 			rte_crypto_devices[dev_id].data == NULL)
678 		return 0;
679 
680 	return 1;
681 }
682 
683 unsigned int
684 rte_cryptodev_is_valid_dev(uint8_t dev_id)
685 {
686 	struct rte_cryptodev *dev = NULL;
687 
688 	if (!rte_cryptodev_is_valid_device_data(dev_id))
689 		return 0;
690 
691 	dev = rte_cryptodev_pmd_get_dev(dev_id);
692 	if (dev->attached != RTE_CRYPTODEV_ATTACHED)
693 		return 0;
694 	else
695 		return 1;
696 }
697 
698 
699 int
700 rte_cryptodev_get_dev_id(const char *name)
701 {
702 	unsigned i;
703 
704 	if (name == NULL)
705 		return -1;
706 
707 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
708 		if (!rte_cryptodev_is_valid_device_data(i))
709 			continue;
710 		if ((strcmp(cryptodev_globals.devs[i].data->name, name)
711 				== 0) &&
712 				(cryptodev_globals.devs[i].attached ==
713 						RTE_CRYPTODEV_ATTACHED))
714 			return i;
715 	}
716 
717 	return -1;
718 }
719 
720 uint8_t
721 rte_cryptodev_count(void)
722 {
723 	return cryptodev_globals.nb_devs;
724 }
725 
726 uint8_t
727 rte_cryptodev_device_count_by_driver(uint8_t driver_id)
728 {
729 	uint8_t i, dev_count = 0;
730 
731 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++)
732 		if (cryptodev_globals.devs[i].driver_id == driver_id &&
733 			cryptodev_globals.devs[i].attached ==
734 					RTE_CRYPTODEV_ATTACHED)
735 			dev_count++;
736 
737 	return dev_count;
738 }
739 
740 uint8_t
741 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
742 	uint8_t nb_devices)
743 {
744 	uint8_t i, count = 0;
745 	struct rte_cryptodev *devs = cryptodev_globals.devs;
746 
747 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS && count < nb_devices; i++) {
748 		if (!rte_cryptodev_is_valid_device_data(i))
749 			continue;
750 
751 		if (devs[i].attached == RTE_CRYPTODEV_ATTACHED) {
752 			int cmp;
753 
754 			cmp = strncmp(devs[i].device->driver->name,
755 					driver_name,
756 					strlen(driver_name) + 1);
757 
758 			if (cmp == 0)
759 				devices[count++] = devs[i].data->dev_id;
760 		}
761 	}
762 
763 	return count;
764 }
765 
766 void *
767 rte_cryptodev_get_sec_ctx(uint8_t dev_id)
768 {
769 	if (dev_id < RTE_CRYPTO_MAX_DEVS &&
770 			(rte_crypto_devices[dev_id].feature_flags &
771 			RTE_CRYPTODEV_FF_SECURITY))
772 		return rte_crypto_devices[dev_id].security_ctx;
773 
774 	return NULL;
775 }
776 
777 int
778 rte_cryptodev_socket_id(uint8_t dev_id)
779 {
780 	struct rte_cryptodev *dev;
781 
782 	if (!rte_cryptodev_is_valid_dev(dev_id))
783 		return -1;
784 
785 	dev = rte_cryptodev_pmd_get_dev(dev_id);
786 
787 	return dev->data->socket_id;
788 }
789 
790 static inline int
791 rte_cryptodev_data_alloc(uint8_t dev_id, struct rte_cryptodev_data **data,
792 		int socket_id)
793 {
794 	char mz_name[RTE_MEMZONE_NAMESIZE];
795 	const struct rte_memzone *mz;
796 	int n;
797 
798 	/* generate memzone name */
799 	n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
800 	if (n >= (int)sizeof(mz_name))
801 		return -EINVAL;
802 
803 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
804 		mz = rte_memzone_reserve(mz_name,
805 				sizeof(struct rte_cryptodev_data),
806 				socket_id, 0);
807 		CDEV_LOG_DEBUG("PRIMARY:reserved memzone for %s (%p)",
808 				mz_name, mz);
809 	} else {
810 		mz = rte_memzone_lookup(mz_name);
811 		CDEV_LOG_DEBUG("SECONDARY:looked up memzone for %s (%p)",
812 				mz_name, mz);
813 	}
814 
815 	if (mz == NULL)
816 		return -ENOMEM;
817 
818 	*data = mz->addr;
819 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
820 		memset(*data, 0, sizeof(struct rte_cryptodev_data));
821 
822 	return 0;
823 }
824 
825 static inline int
826 rte_cryptodev_data_free(uint8_t dev_id, struct rte_cryptodev_data **data)
827 {
828 	char mz_name[RTE_MEMZONE_NAMESIZE];
829 	const struct rte_memzone *mz;
830 	int n;
831 
832 	/* generate memzone name */
833 	n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
834 	if (n >= (int)sizeof(mz_name))
835 		return -EINVAL;
836 
837 	mz = rte_memzone_lookup(mz_name);
838 	if (mz == NULL)
839 		return -ENOMEM;
840 
841 	RTE_ASSERT(*data == mz->addr);
842 	*data = NULL;
843 
844 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
845 		CDEV_LOG_DEBUG("PRIMARY:free memzone of %s (%p)",
846 				mz_name, mz);
847 		return rte_memzone_free(mz);
848 	} else {
849 		CDEV_LOG_DEBUG("SECONDARY:don't free memzone of %s (%p)",
850 				mz_name, mz);
851 	}
852 
853 	return 0;
854 }
855 
856 static uint8_t
857 rte_cryptodev_find_free_device_index(void)
858 {
859 	uint8_t dev_id;
860 
861 	for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++) {
862 		if (rte_crypto_devices[dev_id].attached ==
863 				RTE_CRYPTODEV_DETACHED)
864 			return dev_id;
865 	}
866 	return RTE_CRYPTO_MAX_DEVS;
867 }
868 
869 struct rte_cryptodev *
870 rte_cryptodev_pmd_allocate(const char *name, int socket_id)
871 {
872 	struct rte_cryptodev *cryptodev;
873 	uint8_t dev_id;
874 
875 	if (rte_cryptodev_pmd_get_named_dev(name) != NULL) {
876 		CDEV_LOG_ERR("Crypto device with name %s already "
877 				"allocated!", name);
878 		return NULL;
879 	}
880 
881 	dev_id = rte_cryptodev_find_free_device_index();
882 	if (dev_id == RTE_CRYPTO_MAX_DEVS) {
883 		CDEV_LOG_ERR("Reached maximum number of crypto devices");
884 		return NULL;
885 	}
886 
887 	cryptodev = rte_cryptodev_pmd_get_dev(dev_id);
888 
889 	if (cryptodev->data == NULL) {
890 		struct rte_cryptodev_data **cryptodev_data =
891 				&cryptodev_globals.data[dev_id];
892 
893 		int retval = rte_cryptodev_data_alloc(dev_id, cryptodev_data,
894 				socket_id);
895 
896 		if (retval < 0 || *cryptodev_data == NULL)
897 			return NULL;
898 
899 		cryptodev->data = *cryptodev_data;
900 
901 		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
902 			strlcpy(cryptodev->data->name, name,
903 				RTE_CRYPTODEV_NAME_MAX_LEN);
904 
905 			cryptodev->data->dev_id = dev_id;
906 			cryptodev->data->socket_id = socket_id;
907 			cryptodev->data->dev_started = 0;
908 			CDEV_LOG_DEBUG("PRIMARY:init data");
909 		}
910 
911 		CDEV_LOG_DEBUG("Data for %s: dev_id %d, socket %d, started %d",
912 				cryptodev->data->name,
913 				cryptodev->data->dev_id,
914 				cryptodev->data->socket_id,
915 				cryptodev->data->dev_started);
916 
917 		/* init user callbacks */
918 		TAILQ_INIT(&(cryptodev->link_intr_cbs));
919 
920 		cryptodev->attached = RTE_CRYPTODEV_ATTACHED;
921 
922 		cryptodev_globals.nb_devs++;
923 	}
924 
925 	return cryptodev;
926 }
927 
928 int
929 rte_cryptodev_pmd_release_device(struct rte_cryptodev *cryptodev)
930 {
931 	int ret;
932 	uint8_t dev_id;
933 
934 	if (cryptodev == NULL)
935 		return -EINVAL;
936 
937 	dev_id = cryptodev->data->dev_id;
938 
939 	cryptodev_fp_ops_reset(rte_crypto_fp_ops + dev_id);
940 
941 	/* Close device only if device operations have been set */
942 	if (cryptodev->dev_ops) {
943 		ret = rte_cryptodev_close(dev_id);
944 		if (ret < 0)
945 			return ret;
946 	}
947 
948 	ret = rte_cryptodev_data_free(dev_id, &cryptodev_globals.data[dev_id]);
949 	if (ret < 0)
950 		return ret;
951 
952 	cryptodev->attached = RTE_CRYPTODEV_DETACHED;
953 	cryptodev_globals.nb_devs--;
954 	return 0;
955 }
956 
957 uint16_t
958 rte_cryptodev_queue_pair_count(uint8_t dev_id)
959 {
960 	struct rte_cryptodev *dev;
961 
962 	if (!rte_cryptodev_is_valid_device_data(dev_id)) {
963 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
964 		return 0;
965 	}
966 
967 	dev = &rte_crypto_devices[dev_id];
968 	return dev->data->nb_queue_pairs;
969 }
970 
971 static int
972 rte_cryptodev_queue_pairs_config(struct rte_cryptodev *dev, uint16_t nb_qpairs,
973 		int socket_id)
974 {
975 	struct rte_cryptodev_info dev_info;
976 	void **qp;
977 	unsigned i;
978 
979 	if ((dev == NULL) || (nb_qpairs < 1)) {
980 		CDEV_LOG_ERR("invalid param: dev %p, nb_queues %u",
981 							dev, nb_qpairs);
982 		return -EINVAL;
983 	}
984 
985 	CDEV_LOG_DEBUG("Setup %d queues pairs on device %u",
986 			nb_qpairs, dev->data->dev_id);
987 
988 	memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
989 
990 	if (*dev->dev_ops->dev_infos_get == NULL)
991 		return -ENOTSUP;
992 	(*dev->dev_ops->dev_infos_get)(dev, &dev_info);
993 
994 	if (nb_qpairs > (dev_info.max_nb_queue_pairs)) {
995 		CDEV_LOG_ERR("Invalid num queue_pairs (%u) for dev %u",
996 				nb_qpairs, dev->data->dev_id);
997 	    return -EINVAL;
998 	}
999 
1000 	if (dev->data->queue_pairs == NULL) { /* first time configuration */
1001 		dev->data->queue_pairs = rte_zmalloc_socket(
1002 				"cryptodev->queue_pairs",
1003 				sizeof(dev->data->queue_pairs[0]) *
1004 				dev_info.max_nb_queue_pairs,
1005 				RTE_CACHE_LINE_SIZE, socket_id);
1006 
1007 		if (dev->data->queue_pairs == NULL) {
1008 			dev->data->nb_queue_pairs = 0;
1009 			CDEV_LOG_ERR("failed to get memory for qp meta data, "
1010 							"nb_queues %u",
1011 							nb_qpairs);
1012 			return -(ENOMEM);
1013 		}
1014 	} else { /* re-configure */
1015 		int ret;
1016 		uint16_t old_nb_queues = dev->data->nb_queue_pairs;
1017 
1018 		qp = dev->data->queue_pairs;
1019 
1020 		if (*dev->dev_ops->queue_pair_release == NULL)
1021 			return -ENOTSUP;
1022 
1023 		for (i = nb_qpairs; i < old_nb_queues; i++) {
1024 			ret = (*dev->dev_ops->queue_pair_release)(dev, i);
1025 			if (ret < 0)
1026 				return ret;
1027 			qp[i] = NULL;
1028 		}
1029 
1030 	}
1031 	dev->data->nb_queue_pairs = nb_qpairs;
1032 	return 0;
1033 }
1034 
1035 int
1036 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
1037 {
1038 	struct rte_cryptodev *dev;
1039 	int diag;
1040 
1041 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1042 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1043 		return -EINVAL;
1044 	}
1045 
1046 	dev = &rte_crypto_devices[dev_id];
1047 
1048 	if (dev->data->dev_started) {
1049 		CDEV_LOG_ERR(
1050 		    "device %d must be stopped to allow configuration", dev_id);
1051 		return -EBUSY;
1052 	}
1053 
1054 	if (*dev->dev_ops->dev_configure == NULL)
1055 		return -ENOTSUP;
1056 
1057 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1058 	cryptodev_cb_cleanup(dev);
1059 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1060 
1061 	/* Setup new number of queue pairs and reconfigure device. */
1062 	diag = rte_cryptodev_queue_pairs_config(dev, config->nb_queue_pairs,
1063 			config->socket_id);
1064 	if (diag != 0) {
1065 		CDEV_LOG_ERR("dev%d rte_crypto_dev_queue_pairs_config = %d",
1066 				dev_id, diag);
1067 		return diag;
1068 	}
1069 
1070 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1071 	diag = cryptodev_cb_init(dev);
1072 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1073 	if (diag) {
1074 		CDEV_LOG_ERR("Callback init failed for dev_id=%d", dev_id);
1075 		return diag;
1076 	}
1077 
1078 	rte_cryptodev_trace_configure(dev_id, config);
1079 	return (*dev->dev_ops->dev_configure)(dev, config);
1080 }
1081 
1082 int
1083 rte_cryptodev_start(uint8_t dev_id)
1084 {
1085 	struct rte_cryptodev *dev;
1086 	int diag;
1087 
1088 	CDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
1089 
1090 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1091 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1092 		return -EINVAL;
1093 	}
1094 
1095 	dev = &rte_crypto_devices[dev_id];
1096 
1097 	if (*dev->dev_ops->dev_start == NULL)
1098 		return -ENOTSUP;
1099 
1100 	if (dev->data->dev_started != 0) {
1101 		CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already started",
1102 			dev_id);
1103 		return 0;
1104 	}
1105 
1106 	diag = (*dev->dev_ops->dev_start)(dev);
1107 	/* expose selection of PMD fast-path functions */
1108 	cryptodev_fp_ops_set(rte_crypto_fp_ops + dev_id, dev);
1109 
1110 	rte_cryptodev_trace_start(dev_id, diag);
1111 	if (diag == 0)
1112 		dev->data->dev_started = 1;
1113 	else
1114 		return diag;
1115 
1116 	return 0;
1117 }
1118 
1119 void
1120 rte_cryptodev_stop(uint8_t dev_id)
1121 {
1122 	struct rte_cryptodev *dev;
1123 
1124 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1125 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1126 		return;
1127 	}
1128 
1129 	dev = &rte_crypto_devices[dev_id];
1130 
1131 	if (*dev->dev_ops->dev_stop == NULL)
1132 		return;
1133 
1134 	if (dev->data->dev_started == 0) {
1135 		CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already stopped",
1136 			dev_id);
1137 		return;
1138 	}
1139 
1140 	/* point fast-path functions to dummy ones */
1141 	cryptodev_fp_ops_reset(rte_crypto_fp_ops + dev_id);
1142 
1143 	(*dev->dev_ops->dev_stop)(dev);
1144 	rte_cryptodev_trace_stop(dev_id);
1145 	dev->data->dev_started = 0;
1146 }
1147 
1148 int
1149 rte_cryptodev_close(uint8_t dev_id)
1150 {
1151 	struct rte_cryptodev *dev;
1152 	int retval;
1153 
1154 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1155 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1156 		return -1;
1157 	}
1158 
1159 	dev = &rte_crypto_devices[dev_id];
1160 
1161 	/* Device must be stopped before it can be closed */
1162 	if (dev->data->dev_started == 1) {
1163 		CDEV_LOG_ERR("Device %u must be stopped before closing",
1164 				dev_id);
1165 		return -EBUSY;
1166 	}
1167 
1168 	/* We can't close the device if there are outstanding sessions in use */
1169 	if (dev->data->session_pool != NULL) {
1170 		if (!rte_mempool_full(dev->data->session_pool)) {
1171 			CDEV_LOG_ERR("dev_id=%u close failed, session mempool "
1172 					"has sessions still in use, free "
1173 					"all sessions before calling close",
1174 					(unsigned)dev_id);
1175 			return -EBUSY;
1176 		}
1177 	}
1178 
1179 	if (*dev->dev_ops->dev_close == NULL)
1180 		return -ENOTSUP;
1181 	retval = (*dev->dev_ops->dev_close)(dev);
1182 	rte_cryptodev_trace_close(dev_id, retval);
1183 
1184 	if (retval < 0)
1185 		return retval;
1186 
1187 	return 0;
1188 }
1189 
1190 int
1191 rte_cryptodev_get_qp_status(uint8_t dev_id, uint16_t queue_pair_id)
1192 {
1193 	struct rte_cryptodev *dev;
1194 
1195 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1196 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1197 		return -EINVAL;
1198 	}
1199 
1200 	dev = &rte_crypto_devices[dev_id];
1201 	if (queue_pair_id >= dev->data->nb_queue_pairs) {
1202 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
1203 		return -EINVAL;
1204 	}
1205 	void **qps = dev->data->queue_pairs;
1206 
1207 	if (qps[queue_pair_id])	{
1208 		CDEV_LOG_DEBUG("qp %d on dev %d is initialised",
1209 			queue_pair_id, dev_id);
1210 		return 1;
1211 	}
1212 
1213 	CDEV_LOG_DEBUG("qp %d on dev %d is not initialised",
1214 		queue_pair_id, dev_id);
1215 
1216 	return 0;
1217 }
1218 
1219 int
1220 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
1221 		const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
1222 
1223 {
1224 	struct rte_cryptodev *dev;
1225 
1226 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1227 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1228 		return -EINVAL;
1229 	}
1230 
1231 	dev = &rte_crypto_devices[dev_id];
1232 	if (queue_pair_id >= dev->data->nb_queue_pairs) {
1233 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
1234 		return -EINVAL;
1235 	}
1236 
1237 	if (!qp_conf) {
1238 		CDEV_LOG_ERR("qp_conf cannot be NULL\n");
1239 		return -EINVAL;
1240 	}
1241 
1242 	if ((qp_conf->mp_session && !qp_conf->mp_session_private) ||
1243 			(!qp_conf->mp_session && qp_conf->mp_session_private)) {
1244 		CDEV_LOG_ERR("Invalid mempools\n");
1245 		return -EINVAL;
1246 	}
1247 
1248 	if (qp_conf->mp_session) {
1249 		struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1250 		uint32_t obj_size = qp_conf->mp_session->elt_size;
1251 		uint32_t obj_priv_size = qp_conf->mp_session_private->elt_size;
1252 		struct rte_cryptodev_sym_session s = {0};
1253 
1254 		pool_priv = rte_mempool_get_priv(qp_conf->mp_session);
1255 		if (!pool_priv || qp_conf->mp_session->private_data_size <
1256 				sizeof(*pool_priv)) {
1257 			CDEV_LOG_ERR("Invalid mempool\n");
1258 			return -EINVAL;
1259 		}
1260 
1261 		s.nb_drivers = pool_priv->nb_drivers;
1262 		s.user_data_sz = pool_priv->user_data_sz;
1263 
1264 		if ((rte_cryptodev_sym_get_existing_header_session_size(&s) >
1265 			obj_size) || (s.nb_drivers <= dev->driver_id) ||
1266 			rte_cryptodev_sym_get_private_session_size(dev_id) >
1267 				obj_priv_size) {
1268 			CDEV_LOG_ERR("Invalid mempool\n");
1269 			return -EINVAL;
1270 		}
1271 	}
1272 
1273 	if (dev->data->dev_started) {
1274 		CDEV_LOG_ERR(
1275 		    "device %d must be stopped to allow configuration", dev_id);
1276 		return -EBUSY;
1277 	}
1278 
1279 	if (*dev->dev_ops->queue_pair_setup == NULL)
1280 		return -ENOTSUP;
1281 
1282 	rte_cryptodev_trace_queue_pair_setup(dev_id, queue_pair_id, qp_conf);
1283 	return (*dev->dev_ops->queue_pair_setup)(dev, queue_pair_id, qp_conf,
1284 			socket_id);
1285 }
1286 
1287 struct rte_cryptodev_cb *
1288 rte_cryptodev_add_enq_callback(uint8_t dev_id,
1289 			       uint16_t qp_id,
1290 			       rte_cryptodev_callback_fn cb_fn,
1291 			       void *cb_arg)
1292 {
1293 	struct rte_cryptodev *dev;
1294 	struct rte_cryptodev_cb_rcu *list;
1295 	struct rte_cryptodev_cb *cb, *tail;
1296 
1297 	if (!cb_fn) {
1298 		CDEV_LOG_ERR("Callback is NULL on dev_id=%d", dev_id);
1299 		rte_errno = EINVAL;
1300 		return NULL;
1301 	}
1302 
1303 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1304 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1305 		rte_errno = ENODEV;
1306 		return NULL;
1307 	}
1308 
1309 	dev = &rte_crypto_devices[dev_id];
1310 	if (qp_id >= dev->data->nb_queue_pairs) {
1311 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1312 		rte_errno = ENODEV;
1313 		return NULL;
1314 	}
1315 
1316 	cb = rte_zmalloc(NULL, sizeof(*cb), 0);
1317 	if (cb == NULL) {
1318 		CDEV_LOG_ERR("Failed to allocate memory for callback on "
1319 			     "dev=%d, queue_pair_id=%d", dev_id, qp_id);
1320 		rte_errno = ENOMEM;
1321 		return NULL;
1322 	}
1323 
1324 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1325 
1326 	cb->fn = cb_fn;
1327 	cb->arg = cb_arg;
1328 
1329 	/* Add the callbacks in fifo order. */
1330 	list = &dev->enq_cbs[qp_id];
1331 	tail = list->next;
1332 
1333 	if (tail) {
1334 		while (tail->next)
1335 			tail = tail->next;
1336 		/* Stores to cb->fn and cb->param should complete before
1337 		 * cb is visible to data plane.
1338 		 */
1339 		__atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
1340 	} else {
1341 		/* Stores to cb->fn and cb->param should complete before
1342 		 * cb is visible to data plane.
1343 		 */
1344 		__atomic_store_n(&list->next, cb, __ATOMIC_RELEASE);
1345 	}
1346 
1347 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1348 
1349 	return cb;
1350 }
1351 
1352 int
1353 rte_cryptodev_remove_enq_callback(uint8_t dev_id,
1354 				  uint16_t qp_id,
1355 				  struct rte_cryptodev_cb *cb)
1356 {
1357 	struct rte_cryptodev *dev;
1358 	struct rte_cryptodev_cb **prev_cb, *curr_cb;
1359 	struct rte_cryptodev_cb_rcu *list;
1360 	int ret;
1361 
1362 	ret = -EINVAL;
1363 
1364 	if (!cb) {
1365 		CDEV_LOG_ERR("Callback is NULL");
1366 		return -EINVAL;
1367 	}
1368 
1369 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1370 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1371 		return -ENODEV;
1372 	}
1373 
1374 	dev = &rte_crypto_devices[dev_id];
1375 	if (qp_id >= dev->data->nb_queue_pairs) {
1376 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1377 		return -ENODEV;
1378 	}
1379 
1380 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1381 	if (dev->enq_cbs == NULL) {
1382 		CDEV_LOG_ERR("Callback not initialized");
1383 		goto cb_err;
1384 	}
1385 
1386 	list = &dev->enq_cbs[qp_id];
1387 	if (list == NULL) {
1388 		CDEV_LOG_ERR("Callback list is NULL");
1389 		goto cb_err;
1390 	}
1391 
1392 	if (list->qsbr == NULL) {
1393 		CDEV_LOG_ERR("Rcu qsbr is NULL");
1394 		goto cb_err;
1395 	}
1396 
1397 	prev_cb = &list->next;
1398 	for (; *prev_cb != NULL; prev_cb = &curr_cb->next) {
1399 		curr_cb = *prev_cb;
1400 		if (curr_cb == cb) {
1401 			/* Remove the user cb from the callback list. */
1402 			__atomic_store_n(prev_cb, curr_cb->next,
1403 				__ATOMIC_RELAXED);
1404 			ret = 0;
1405 			break;
1406 		}
1407 	}
1408 
1409 	if (!ret) {
1410 		/* Call sync with invalid thread id as this is part of
1411 		 * control plane API
1412 		 */
1413 		rte_rcu_qsbr_synchronize(list->qsbr, RTE_QSBR_THRID_INVALID);
1414 		rte_free(cb);
1415 	}
1416 
1417 cb_err:
1418 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1419 	return ret;
1420 }
1421 
1422 struct rte_cryptodev_cb *
1423 rte_cryptodev_add_deq_callback(uint8_t dev_id,
1424 			       uint16_t qp_id,
1425 			       rte_cryptodev_callback_fn cb_fn,
1426 			       void *cb_arg)
1427 {
1428 	struct rte_cryptodev *dev;
1429 	struct rte_cryptodev_cb_rcu *list;
1430 	struct rte_cryptodev_cb *cb, *tail;
1431 
1432 	if (!cb_fn) {
1433 		CDEV_LOG_ERR("Callback is NULL on dev_id=%d", dev_id);
1434 		rte_errno = EINVAL;
1435 		return NULL;
1436 	}
1437 
1438 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1439 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1440 		rte_errno = ENODEV;
1441 		return NULL;
1442 	}
1443 
1444 	dev = &rte_crypto_devices[dev_id];
1445 	if (qp_id >= dev->data->nb_queue_pairs) {
1446 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1447 		rte_errno = ENODEV;
1448 		return NULL;
1449 	}
1450 
1451 	cb = rte_zmalloc(NULL, sizeof(*cb), 0);
1452 	if (cb == NULL) {
1453 		CDEV_LOG_ERR("Failed to allocate memory for callback on "
1454 			     "dev=%d, queue_pair_id=%d", dev_id, qp_id);
1455 		rte_errno = ENOMEM;
1456 		return NULL;
1457 	}
1458 
1459 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1460 
1461 	cb->fn = cb_fn;
1462 	cb->arg = cb_arg;
1463 
1464 	/* Add the callbacks in fifo order. */
1465 	list = &dev->deq_cbs[qp_id];
1466 	tail = list->next;
1467 
1468 	if (tail) {
1469 		while (tail->next)
1470 			tail = tail->next;
1471 		/* Stores to cb->fn and cb->param should complete before
1472 		 * cb is visible to data plane.
1473 		 */
1474 		__atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
1475 	} else {
1476 		/* Stores to cb->fn and cb->param should complete before
1477 		 * cb is visible to data plane.
1478 		 */
1479 		__atomic_store_n(&list->next, cb, __ATOMIC_RELEASE);
1480 	}
1481 
1482 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1483 
1484 	return cb;
1485 }
1486 
1487 int
1488 rte_cryptodev_remove_deq_callback(uint8_t dev_id,
1489 				  uint16_t qp_id,
1490 				  struct rte_cryptodev_cb *cb)
1491 {
1492 	struct rte_cryptodev *dev;
1493 	struct rte_cryptodev_cb **prev_cb, *curr_cb;
1494 	struct rte_cryptodev_cb_rcu *list;
1495 	int ret;
1496 
1497 	ret = -EINVAL;
1498 
1499 	if (!cb) {
1500 		CDEV_LOG_ERR("Callback is NULL");
1501 		return -EINVAL;
1502 	}
1503 
1504 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1505 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1506 		return -ENODEV;
1507 	}
1508 
1509 	dev = &rte_crypto_devices[dev_id];
1510 	if (qp_id >= dev->data->nb_queue_pairs) {
1511 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1512 		return -ENODEV;
1513 	}
1514 
1515 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1516 	if (dev->enq_cbs == NULL) {
1517 		CDEV_LOG_ERR("Callback not initialized");
1518 		goto cb_err;
1519 	}
1520 
1521 	list = &dev->deq_cbs[qp_id];
1522 	if (list == NULL) {
1523 		CDEV_LOG_ERR("Callback list is NULL");
1524 		goto cb_err;
1525 	}
1526 
1527 	if (list->qsbr == NULL) {
1528 		CDEV_LOG_ERR("Rcu qsbr is NULL");
1529 		goto cb_err;
1530 	}
1531 
1532 	prev_cb = &list->next;
1533 	for (; *prev_cb != NULL; prev_cb = &curr_cb->next) {
1534 		curr_cb = *prev_cb;
1535 		if (curr_cb == cb) {
1536 			/* Remove the user cb from the callback list. */
1537 			__atomic_store_n(prev_cb, curr_cb->next,
1538 				__ATOMIC_RELAXED);
1539 			ret = 0;
1540 			break;
1541 		}
1542 	}
1543 
1544 	if (!ret) {
1545 		/* Call sync with invalid thread id as this is part of
1546 		 * control plane API
1547 		 */
1548 		rte_rcu_qsbr_synchronize(list->qsbr, RTE_QSBR_THRID_INVALID);
1549 		rte_free(cb);
1550 	}
1551 
1552 cb_err:
1553 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1554 	return ret;
1555 }
1556 
1557 int
1558 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats)
1559 {
1560 	struct rte_cryptodev *dev;
1561 
1562 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1563 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1564 		return -ENODEV;
1565 	}
1566 
1567 	if (stats == NULL) {
1568 		CDEV_LOG_ERR("Invalid stats ptr");
1569 		return -EINVAL;
1570 	}
1571 
1572 	dev = &rte_crypto_devices[dev_id];
1573 	memset(stats, 0, sizeof(*stats));
1574 
1575 	if (*dev->dev_ops->stats_get == NULL)
1576 		return -ENOTSUP;
1577 	(*dev->dev_ops->stats_get)(dev, stats);
1578 	return 0;
1579 }
1580 
1581 void
1582 rte_cryptodev_stats_reset(uint8_t dev_id)
1583 {
1584 	struct rte_cryptodev *dev;
1585 
1586 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1587 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1588 		return;
1589 	}
1590 
1591 	dev = &rte_crypto_devices[dev_id];
1592 
1593 	if (*dev->dev_ops->stats_reset == NULL)
1594 		return;
1595 	(*dev->dev_ops->stats_reset)(dev);
1596 }
1597 
1598 void
1599 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
1600 {
1601 	struct rte_cryptodev *dev;
1602 
1603 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1604 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1605 		return;
1606 	}
1607 
1608 	dev = &rte_crypto_devices[dev_id];
1609 
1610 	memset(dev_info, 0, sizeof(struct rte_cryptodev_info));
1611 
1612 	if (*dev->dev_ops->dev_infos_get == NULL)
1613 		return;
1614 	(*dev->dev_ops->dev_infos_get)(dev, dev_info);
1615 
1616 	dev_info->driver_name = dev->device->driver->name;
1617 	dev_info->device = dev->device;
1618 }
1619 
1620 int
1621 rte_cryptodev_callback_register(uint8_t dev_id,
1622 			enum rte_cryptodev_event_type event,
1623 			rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1624 {
1625 	struct rte_cryptodev *dev;
1626 	struct rte_cryptodev_callback *user_cb;
1627 
1628 	if (!cb_fn)
1629 		return -EINVAL;
1630 
1631 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1632 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1633 		return -EINVAL;
1634 	}
1635 
1636 	dev = &rte_crypto_devices[dev_id];
1637 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
1638 
1639 	TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
1640 		if (user_cb->cb_fn == cb_fn &&
1641 			user_cb->cb_arg == cb_arg &&
1642 			user_cb->event == event) {
1643 			break;
1644 		}
1645 	}
1646 
1647 	/* create a new callback. */
1648 	if (user_cb == NULL) {
1649 		user_cb = rte_zmalloc("INTR_USER_CALLBACK",
1650 				sizeof(struct rte_cryptodev_callback), 0);
1651 		if (user_cb != NULL) {
1652 			user_cb->cb_fn = cb_fn;
1653 			user_cb->cb_arg = cb_arg;
1654 			user_cb->event = event;
1655 			TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
1656 		}
1657 	}
1658 
1659 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1660 	return (user_cb == NULL) ? -ENOMEM : 0;
1661 }
1662 
1663 int
1664 rte_cryptodev_callback_unregister(uint8_t dev_id,
1665 			enum rte_cryptodev_event_type event,
1666 			rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1667 {
1668 	int ret;
1669 	struct rte_cryptodev *dev;
1670 	struct rte_cryptodev_callback *cb, *next;
1671 
1672 	if (!cb_fn)
1673 		return -EINVAL;
1674 
1675 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1676 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1677 		return -EINVAL;
1678 	}
1679 
1680 	dev = &rte_crypto_devices[dev_id];
1681 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
1682 
1683 	ret = 0;
1684 	for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
1685 
1686 		next = TAILQ_NEXT(cb, next);
1687 
1688 		if (cb->cb_fn != cb_fn || cb->event != event ||
1689 				(cb->cb_arg != (void *)-1 &&
1690 				cb->cb_arg != cb_arg))
1691 			continue;
1692 
1693 		/*
1694 		 * if this callback is not executing right now,
1695 		 * then remove it.
1696 		 */
1697 		if (cb->active == 0) {
1698 			TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
1699 			rte_free(cb);
1700 		} else {
1701 			ret = -EAGAIN;
1702 		}
1703 	}
1704 
1705 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1706 	return ret;
1707 }
1708 
1709 void
1710 rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev,
1711 	enum rte_cryptodev_event_type event)
1712 {
1713 	struct rte_cryptodev_callback *cb_lst;
1714 	struct rte_cryptodev_callback dev_cb;
1715 
1716 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
1717 	TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
1718 		if (cb_lst->cb_fn == NULL || cb_lst->event != event)
1719 			continue;
1720 		dev_cb = *cb_lst;
1721 		cb_lst->active = 1;
1722 		rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1723 		dev_cb.cb_fn(dev->data->dev_id, dev_cb.event,
1724 						dev_cb.cb_arg);
1725 		rte_spinlock_lock(&rte_cryptodev_cb_lock);
1726 		cb_lst->active = 0;
1727 	}
1728 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1729 }
1730 
1731 int
1732 rte_cryptodev_sym_session_init(uint8_t dev_id,
1733 		struct rte_cryptodev_sym_session *sess,
1734 		struct rte_crypto_sym_xform *xforms,
1735 		struct rte_mempool *mp)
1736 {
1737 	struct rte_cryptodev *dev;
1738 	uint32_t sess_priv_sz = rte_cryptodev_sym_get_private_session_size(
1739 			dev_id);
1740 	uint8_t index;
1741 	int ret;
1742 
1743 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1744 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1745 		return -EINVAL;
1746 	}
1747 
1748 	dev = rte_cryptodev_pmd_get_dev(dev_id);
1749 
1750 	if (sess == NULL || xforms == NULL || dev == NULL || mp == NULL)
1751 		return -EINVAL;
1752 
1753 	if (mp->elt_size < sess_priv_sz)
1754 		return -EINVAL;
1755 
1756 	index = dev->driver_id;
1757 	if (index >= sess->nb_drivers)
1758 		return -EINVAL;
1759 
1760 	if (*dev->dev_ops->sym_session_configure == NULL)
1761 		return -ENOTSUP;
1762 
1763 	if (sess->sess_data[index].refcnt == 0) {
1764 		ret = dev->dev_ops->sym_session_configure(dev, xforms,
1765 							sess, mp);
1766 		if (ret < 0) {
1767 			CDEV_LOG_ERR(
1768 				"dev_id %d failed to configure session details",
1769 				dev_id);
1770 			return ret;
1771 		}
1772 	}
1773 
1774 	rte_cryptodev_trace_sym_session_init(dev_id, sess, xforms, mp);
1775 	sess->sess_data[index].refcnt++;
1776 	return 0;
1777 }
1778 
1779 struct rte_mempool *
1780 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
1781 	uint32_t elt_size, uint32_t cache_size, uint16_t user_data_size,
1782 	int socket_id)
1783 {
1784 	struct rte_mempool *mp;
1785 	struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1786 	uint32_t obj_sz;
1787 
1788 	obj_sz = rte_cryptodev_sym_get_header_session_size() + user_data_size;
1789 	if (obj_sz > elt_size)
1790 		CDEV_LOG_INFO("elt_size %u is expanded to %u\n", elt_size,
1791 				obj_sz);
1792 	else
1793 		obj_sz = elt_size;
1794 
1795 	mp = rte_mempool_create(name, nb_elts, obj_sz, cache_size,
1796 			(uint32_t)(sizeof(*pool_priv)),
1797 			NULL, NULL, NULL, NULL,
1798 			socket_id, 0);
1799 	if (mp == NULL) {
1800 		CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d\n",
1801 			__func__, name, rte_errno);
1802 		return NULL;
1803 	}
1804 
1805 	pool_priv = rte_mempool_get_priv(mp);
1806 	if (!pool_priv) {
1807 		CDEV_LOG_ERR("%s(name=%s) failed to get private data\n",
1808 			__func__, name);
1809 		rte_mempool_free(mp);
1810 		return NULL;
1811 	}
1812 
1813 	pool_priv->nb_drivers = nb_drivers;
1814 	pool_priv->user_data_sz = user_data_size;
1815 
1816 	rte_cryptodev_trace_sym_session_pool_create(name, nb_elts,
1817 		elt_size, cache_size, user_data_size, mp);
1818 	return mp;
1819 }
1820 
1821 struct rte_mempool *
1822 rte_cryptodev_asym_session_pool_create(const char *name, uint32_t nb_elts,
1823 	uint32_t cache_size, uint16_t user_data_size, int socket_id)
1824 {
1825 	struct rte_mempool *mp;
1826 	struct rte_cryptodev_asym_session_pool_private_data *pool_priv;
1827 	uint32_t obj_sz, obj_sz_aligned;
1828 	uint8_t dev_id;
1829 	unsigned int priv_sz, max_priv_sz = 0;
1830 
1831 	for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++)
1832 		if (rte_cryptodev_is_valid_dev(dev_id)) {
1833 			priv_sz = rte_cryptodev_asym_get_private_session_size(dev_id);
1834 			if (priv_sz > max_priv_sz)
1835 				max_priv_sz = priv_sz;
1836 		}
1837 	if (max_priv_sz == 0) {
1838 		CDEV_LOG_INFO("Could not set max private session size\n");
1839 		return NULL;
1840 	}
1841 
1842 	obj_sz = rte_cryptodev_asym_get_header_session_size() + max_priv_sz +
1843 			user_data_size;
1844 	obj_sz_aligned =  RTE_ALIGN_CEIL(obj_sz, RTE_CACHE_LINE_SIZE);
1845 
1846 	mp = rte_mempool_create(name, nb_elts, obj_sz_aligned, cache_size,
1847 			(uint32_t)(sizeof(*pool_priv)),
1848 			NULL, NULL, NULL, NULL,
1849 			socket_id, 0);
1850 	if (mp == NULL) {
1851 		CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d\n",
1852 			__func__, name, rte_errno);
1853 		return NULL;
1854 	}
1855 
1856 	pool_priv = rte_mempool_get_priv(mp);
1857 	if (!pool_priv) {
1858 		CDEV_LOG_ERR("%s(name=%s) failed to get private data\n",
1859 			__func__, name);
1860 		rte_mempool_free(mp);
1861 		return NULL;
1862 	}
1863 	pool_priv->max_priv_session_sz = max_priv_sz;
1864 	pool_priv->user_data_sz = user_data_size;
1865 
1866 	rte_cryptodev_trace_asym_session_pool_create(name, nb_elts,
1867 		user_data_size, cache_size, mp);
1868 	return mp;
1869 }
1870 
1871 static unsigned int
1872 rte_cryptodev_sym_session_data_size(struct rte_cryptodev_sym_session *sess)
1873 {
1874 	return (sizeof(sess->sess_data[0]) * sess->nb_drivers) +
1875 			sess->user_data_sz;
1876 }
1877 
1878 static uint8_t
1879 rte_cryptodev_sym_is_valid_session_pool(struct rte_mempool *mp)
1880 {
1881 	struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1882 
1883 	if (!mp)
1884 		return 0;
1885 
1886 	pool_priv = rte_mempool_get_priv(mp);
1887 
1888 	if (!pool_priv || mp->private_data_size < sizeof(*pool_priv) ||
1889 			pool_priv->nb_drivers != nb_drivers ||
1890 			mp->elt_size <
1891 				rte_cryptodev_sym_get_header_session_size()
1892 				+ pool_priv->user_data_sz)
1893 		return 0;
1894 
1895 	return 1;
1896 }
1897 
1898 struct rte_cryptodev_sym_session *
1899 rte_cryptodev_sym_session_create(struct rte_mempool *mp)
1900 {
1901 	struct rte_cryptodev_sym_session *sess;
1902 	struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1903 
1904 	if (!rte_cryptodev_sym_is_valid_session_pool(mp)) {
1905 		CDEV_LOG_ERR("Invalid mempool\n");
1906 		return NULL;
1907 	}
1908 
1909 	pool_priv = rte_mempool_get_priv(mp);
1910 
1911 	/* Allocate a session structure from the session pool */
1912 	if (rte_mempool_get(mp, (void **)&sess)) {
1913 		CDEV_LOG_ERR("couldn't get object from session mempool");
1914 		return NULL;
1915 	}
1916 
1917 	sess->nb_drivers = pool_priv->nb_drivers;
1918 	sess->user_data_sz = pool_priv->user_data_sz;
1919 	sess->opaque_data = 0;
1920 
1921 	/* Clear device session pointer.
1922 	 * Include the flag indicating presence of user data
1923 	 */
1924 	memset(sess->sess_data, 0,
1925 			rte_cryptodev_sym_session_data_size(sess));
1926 
1927 	rte_cryptodev_trace_sym_session_create(mp, sess);
1928 	return sess;
1929 }
1930 
1931 int
1932 rte_cryptodev_asym_session_create(uint8_t dev_id,
1933 		struct rte_crypto_asym_xform *xforms, struct rte_mempool *mp,
1934 		void **session)
1935 {
1936 	struct rte_cryptodev_asym_session *sess;
1937 	uint32_t session_priv_data_sz;
1938 	struct rte_cryptodev_asym_session_pool_private_data *pool_priv;
1939 	unsigned int session_header_size =
1940 			rte_cryptodev_asym_get_header_session_size();
1941 	struct rte_cryptodev *dev;
1942 	int ret;
1943 
1944 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1945 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1946 		return -EINVAL;
1947 	}
1948 
1949 	dev = rte_cryptodev_pmd_get_dev(dev_id);
1950 
1951 	if (dev == NULL)
1952 		return -EINVAL;
1953 
1954 	if (!mp) {
1955 		CDEV_LOG_ERR("invalid mempool\n");
1956 		return -EINVAL;
1957 	}
1958 
1959 	session_priv_data_sz = rte_cryptodev_asym_get_private_session_size(
1960 			dev_id);
1961 	pool_priv = rte_mempool_get_priv(mp);
1962 
1963 	if (pool_priv->max_priv_session_sz < session_priv_data_sz) {
1964 		CDEV_LOG_DEBUG(
1965 			"The private session data size used when creating the mempool is smaller than this device's private session data.");
1966 		return -EINVAL;
1967 	}
1968 
1969 	/* Verify if provided mempool can hold elements big enough. */
1970 	if (mp->elt_size < session_header_size + session_priv_data_sz) {
1971 		CDEV_LOG_ERR(
1972 			"mempool elements too small to hold session objects");
1973 		return -EINVAL;
1974 	}
1975 
1976 	/* Allocate a session structure from the session pool */
1977 	if (rte_mempool_get(mp, session)) {
1978 		CDEV_LOG_ERR("couldn't get object from session mempool");
1979 		return -ENOMEM;
1980 	}
1981 
1982 	sess = *session;
1983 	sess->driver_id = dev->driver_id;
1984 	sess->user_data_sz = pool_priv->user_data_sz;
1985 	sess->max_priv_data_sz = pool_priv->max_priv_session_sz;
1986 
1987 	/* Clear device session pointer.*/
1988 	memset(sess->sess_private_data, 0, session_priv_data_sz + sess->user_data_sz);
1989 
1990 	if (*dev->dev_ops->asym_session_configure == NULL)
1991 		return -ENOTSUP;
1992 
1993 	if (sess->sess_private_data[0] == 0) {
1994 		ret = dev->dev_ops->asym_session_configure(dev, xforms, sess);
1995 		if (ret < 0) {
1996 			CDEV_LOG_ERR(
1997 				"dev_id %d failed to configure session details",
1998 				dev_id);
1999 			return ret;
2000 		}
2001 	}
2002 
2003 	rte_cryptodev_trace_asym_session_create(dev_id, xforms, mp, sess);
2004 	return 0;
2005 }
2006 
2007 int
2008 rte_cryptodev_sym_session_clear(uint8_t dev_id,
2009 		struct rte_cryptodev_sym_session *sess)
2010 {
2011 	struct rte_cryptodev *dev;
2012 	uint8_t driver_id;
2013 
2014 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
2015 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
2016 		return -EINVAL;
2017 	}
2018 
2019 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2020 
2021 	if (dev == NULL || sess == NULL)
2022 		return -EINVAL;
2023 
2024 	driver_id = dev->driver_id;
2025 	if (sess->sess_data[driver_id].refcnt == 0)
2026 		return 0;
2027 	if (--sess->sess_data[driver_id].refcnt != 0)
2028 		return -EBUSY;
2029 
2030 	if (*dev->dev_ops->sym_session_clear == NULL)
2031 		return -ENOTSUP;
2032 
2033 	dev->dev_ops->sym_session_clear(dev, sess);
2034 
2035 	rte_cryptodev_trace_sym_session_clear(dev_id, sess);
2036 	return 0;
2037 }
2038 
2039 int
2040 rte_cryptodev_sym_session_free(struct rte_cryptodev_sym_session *sess)
2041 {
2042 	uint8_t i;
2043 	struct rte_mempool *sess_mp;
2044 
2045 	if (sess == NULL)
2046 		return -EINVAL;
2047 
2048 	/* Check that all device private data has been freed */
2049 	for (i = 0; i < sess->nb_drivers; i++) {
2050 		if (sess->sess_data[i].refcnt != 0)
2051 			return -EBUSY;
2052 	}
2053 
2054 	/* Return session to mempool */
2055 	sess_mp = rte_mempool_from_obj(sess);
2056 	rte_mempool_put(sess_mp, sess);
2057 
2058 	rte_cryptodev_trace_sym_session_free(sess);
2059 	return 0;
2060 }
2061 
2062 int
2063 rte_cryptodev_asym_session_free(uint8_t dev_id, void *sess)
2064 {
2065 	struct rte_mempool *sess_mp;
2066 	struct rte_cryptodev *dev;
2067 
2068 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
2069 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
2070 		return -EINVAL;
2071 	}
2072 
2073 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2074 
2075 	if (dev == NULL || sess == NULL)
2076 		return -EINVAL;
2077 
2078 	if (*dev->dev_ops->asym_session_clear == NULL)
2079 		return -ENOTSUP;
2080 
2081 	dev->dev_ops->asym_session_clear(dev, sess);
2082 
2083 	rte_free(((struct rte_cryptodev_asym_session *)sess)->event_mdata);
2084 
2085 	/* Return session to mempool */
2086 	sess_mp = rte_mempool_from_obj(sess);
2087 	rte_mempool_put(sess_mp, sess);
2088 
2089 	rte_cryptodev_trace_asym_session_free(dev_id, sess);
2090 	return 0;
2091 }
2092 
2093 unsigned int
2094 rte_cryptodev_sym_get_header_session_size(void)
2095 {
2096 	/*
2097 	 * Header contains pointers to the private data of all registered
2098 	 * drivers and all necessary information to ensure safely clear
2099 	 * or free al session.
2100 	 */
2101 	struct rte_cryptodev_sym_session s = {0};
2102 
2103 	s.nb_drivers = nb_drivers;
2104 
2105 	return (unsigned int)(sizeof(s) +
2106 			rte_cryptodev_sym_session_data_size(&s));
2107 }
2108 
2109 unsigned int
2110 rte_cryptodev_sym_get_existing_header_session_size(
2111 		struct rte_cryptodev_sym_session *sess)
2112 {
2113 	if (!sess)
2114 		return 0;
2115 	else
2116 		return (unsigned int)(sizeof(*sess) +
2117 				rte_cryptodev_sym_session_data_size(sess));
2118 }
2119 
2120 unsigned int
2121 rte_cryptodev_asym_get_header_session_size(void)
2122 {
2123 	return sizeof(struct rte_cryptodev_asym_session);
2124 }
2125 
2126 unsigned int
2127 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
2128 {
2129 	struct rte_cryptodev *dev;
2130 	unsigned int priv_sess_size;
2131 
2132 	if (!rte_cryptodev_is_valid_dev(dev_id))
2133 		return 0;
2134 
2135 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2136 
2137 	if (*dev->dev_ops->sym_session_get_size == NULL)
2138 		return 0;
2139 
2140 	priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev);
2141 
2142 	return priv_sess_size;
2143 }
2144 
2145 unsigned int
2146 rte_cryptodev_asym_get_private_session_size(uint8_t dev_id)
2147 {
2148 	struct rte_cryptodev *dev;
2149 	unsigned int priv_sess_size;
2150 
2151 	if (!rte_cryptodev_is_valid_dev(dev_id))
2152 		return 0;
2153 
2154 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2155 
2156 	if (*dev->dev_ops->asym_session_get_size == NULL)
2157 		return 0;
2158 
2159 	priv_sess_size = (*dev->dev_ops->asym_session_get_size)(dev);
2160 
2161 	return priv_sess_size;
2162 }
2163 
2164 int
2165 rte_cryptodev_sym_session_set_user_data(
2166 					struct rte_cryptodev_sym_session *sess,
2167 					void *data,
2168 					uint16_t size)
2169 {
2170 	if (sess == NULL)
2171 		return -EINVAL;
2172 
2173 	if (sess->user_data_sz < size)
2174 		return -ENOMEM;
2175 
2176 	rte_memcpy(sess->sess_data + sess->nb_drivers, data, size);
2177 	return 0;
2178 }
2179 
2180 void *
2181 rte_cryptodev_sym_session_get_user_data(
2182 					struct rte_cryptodev_sym_session *sess)
2183 {
2184 	if (sess == NULL || sess->user_data_sz == 0)
2185 		return NULL;
2186 
2187 	return (void *)(sess->sess_data + sess->nb_drivers);
2188 }
2189 
2190 int
2191 rte_cryptodev_asym_session_set_user_data(void *session, void *data, uint16_t size)
2192 {
2193 	struct rte_cryptodev_asym_session *sess = session;
2194 	if (sess == NULL)
2195 		return -EINVAL;
2196 
2197 	if (sess->user_data_sz < size)
2198 		return -ENOMEM;
2199 
2200 	rte_memcpy(sess->sess_private_data +
2201 			sess->max_priv_data_sz,
2202 			data, size);
2203 	return 0;
2204 }
2205 
2206 void *
2207 rte_cryptodev_asym_session_get_user_data(void *session)
2208 {
2209 	struct rte_cryptodev_asym_session *sess = session;
2210 	if (sess == NULL || sess->user_data_sz == 0)
2211 		return NULL;
2212 
2213 	return (void *)(sess->sess_private_data +
2214 			sess->max_priv_data_sz);
2215 }
2216 
2217 static inline void
2218 sym_crypto_fill_status(struct rte_crypto_sym_vec *vec, int32_t errnum)
2219 {
2220 	uint32_t i;
2221 	for (i = 0; i < vec->num; i++)
2222 		vec->status[i] = errnum;
2223 }
2224 
2225 uint32_t
2226 rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id,
2227 	struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs ofs,
2228 	struct rte_crypto_sym_vec *vec)
2229 {
2230 	struct rte_cryptodev *dev;
2231 
2232 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
2233 		sym_crypto_fill_status(vec, EINVAL);
2234 		return 0;
2235 	}
2236 
2237 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2238 
2239 	if (*dev->dev_ops->sym_cpu_process == NULL ||
2240 		!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO)) {
2241 		sym_crypto_fill_status(vec, ENOTSUP);
2242 		return 0;
2243 	}
2244 
2245 	return dev->dev_ops->sym_cpu_process(dev, sess, ofs, vec);
2246 }
2247 
2248 int
2249 rte_cryptodev_get_raw_dp_ctx_size(uint8_t dev_id)
2250 {
2251 	struct rte_cryptodev *dev;
2252 	int32_t size = sizeof(struct rte_crypto_raw_dp_ctx);
2253 	int32_t priv_size;
2254 
2255 	if (!rte_cryptodev_is_valid_dev(dev_id))
2256 		return -EINVAL;
2257 
2258 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2259 
2260 	if (*dev->dev_ops->sym_get_raw_dp_ctx_size == NULL ||
2261 		!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP)) {
2262 		return -ENOTSUP;
2263 	}
2264 
2265 	priv_size = (*dev->dev_ops->sym_get_raw_dp_ctx_size)(dev);
2266 	if (priv_size < 0)
2267 		return -ENOTSUP;
2268 
2269 	return RTE_ALIGN_CEIL((size + priv_size), 8);
2270 }
2271 
2272 int
2273 rte_cryptodev_configure_raw_dp_ctx(uint8_t dev_id, uint16_t qp_id,
2274 	struct rte_crypto_raw_dp_ctx *ctx,
2275 	enum rte_crypto_op_sess_type sess_type,
2276 	union rte_cryptodev_session_ctx session_ctx,
2277 	uint8_t is_update)
2278 {
2279 	struct rte_cryptodev *dev;
2280 
2281 	if (!rte_cryptodev_get_qp_status(dev_id, qp_id))
2282 		return -EINVAL;
2283 
2284 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2285 	if (!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP)
2286 			|| dev->dev_ops->sym_configure_raw_dp_ctx == NULL)
2287 		return -ENOTSUP;
2288 
2289 	return (*dev->dev_ops->sym_configure_raw_dp_ctx)(dev, qp_id, ctx,
2290 			sess_type, session_ctx, is_update);
2291 }
2292 
2293 int
2294 rte_cryptodev_session_event_mdata_set(uint8_t dev_id, void *sess,
2295 	enum rte_crypto_op_type op_type,
2296 	enum rte_crypto_op_sess_type sess_type,
2297 	void *ev_mdata,
2298 	uint16_t size)
2299 {
2300 	struct rte_cryptodev *dev;
2301 
2302 	if (sess == NULL || ev_mdata == NULL)
2303 		return -EINVAL;
2304 
2305 	if (!rte_cryptodev_is_valid_dev(dev_id))
2306 		goto skip_pmd_op;
2307 
2308 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2309 	if (dev->dev_ops->session_ev_mdata_set == NULL)
2310 		goto skip_pmd_op;
2311 
2312 	return (*dev->dev_ops->session_ev_mdata_set)(dev, sess, op_type,
2313 			sess_type, ev_mdata);
2314 
2315 skip_pmd_op:
2316 	if (op_type == RTE_CRYPTO_OP_TYPE_SYMMETRIC)
2317 		return rte_cryptodev_sym_session_set_user_data(sess, ev_mdata,
2318 				size);
2319 	else if (op_type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
2320 		struct rte_cryptodev_asym_session *s = sess;
2321 
2322 		if (s->event_mdata == NULL) {
2323 			s->event_mdata = rte_malloc(NULL, size, 0);
2324 			if (s->event_mdata == NULL)
2325 				return -ENOMEM;
2326 		}
2327 		rte_memcpy(s->event_mdata, ev_mdata, size);
2328 
2329 		return 0;
2330 	} else
2331 		return -ENOTSUP;
2332 }
2333 
2334 uint32_t
2335 rte_cryptodev_raw_enqueue_burst(struct rte_crypto_raw_dp_ctx *ctx,
2336 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
2337 	void **user_data, int *enqueue_status)
2338 {
2339 	return (*ctx->enqueue_burst)(ctx->qp_data, ctx->drv_ctx_data, vec,
2340 			ofs, user_data, enqueue_status);
2341 }
2342 
2343 int
2344 rte_cryptodev_raw_enqueue_done(struct rte_crypto_raw_dp_ctx *ctx,
2345 		uint32_t n)
2346 {
2347 	return (*ctx->enqueue_done)(ctx->qp_data, ctx->drv_ctx_data, n);
2348 }
2349 
2350 uint32_t
2351 rte_cryptodev_raw_dequeue_burst(struct rte_crypto_raw_dp_ctx *ctx,
2352 	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
2353 	uint32_t max_nb_to_dequeue,
2354 	rte_cryptodev_raw_post_dequeue_t post_dequeue,
2355 	void **out_user_data, uint8_t is_user_data_array,
2356 	uint32_t *n_success_jobs, int *status)
2357 {
2358 	return (*ctx->dequeue_burst)(ctx->qp_data, ctx->drv_ctx_data,
2359 		get_dequeue_count, max_nb_to_dequeue, post_dequeue,
2360 		out_user_data, is_user_data_array, n_success_jobs, status);
2361 }
2362 
2363 int
2364 rte_cryptodev_raw_dequeue_done(struct rte_crypto_raw_dp_ctx *ctx,
2365 		uint32_t n)
2366 {
2367 	return (*ctx->dequeue_done)(ctx->qp_data, ctx->drv_ctx_data, n);
2368 }
2369 
2370 /** Initialise rte_crypto_op mempool element */
2371 static void
2372 rte_crypto_op_init(struct rte_mempool *mempool,
2373 		void *opaque_arg,
2374 		void *_op_data,
2375 		__rte_unused unsigned i)
2376 {
2377 	struct rte_crypto_op *op = _op_data;
2378 	enum rte_crypto_op_type type = *(enum rte_crypto_op_type *)opaque_arg;
2379 
2380 	memset(_op_data, 0, mempool->elt_size);
2381 
2382 	__rte_crypto_op_reset(op, type);
2383 
2384 	op->phys_addr = rte_mem_virt2iova(_op_data);
2385 	op->mempool = mempool;
2386 }
2387 
2388 
2389 struct rte_mempool *
2390 rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
2391 		unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
2392 		int socket_id)
2393 {
2394 	struct rte_crypto_op_pool_private *priv;
2395 
2396 	unsigned elt_size = sizeof(struct rte_crypto_op) +
2397 			priv_size;
2398 
2399 	if (type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
2400 		elt_size += sizeof(struct rte_crypto_sym_op);
2401 	} else if (type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
2402 		elt_size += sizeof(struct rte_crypto_asym_op);
2403 	} else if (type == RTE_CRYPTO_OP_TYPE_UNDEFINED) {
2404 		elt_size += RTE_MAX(sizeof(struct rte_crypto_sym_op),
2405 		                    sizeof(struct rte_crypto_asym_op));
2406 	} else {
2407 		CDEV_LOG_ERR("Invalid op_type\n");
2408 		return NULL;
2409 	}
2410 
2411 	/* lookup mempool in case already allocated */
2412 	struct rte_mempool *mp = rte_mempool_lookup(name);
2413 
2414 	if (mp != NULL) {
2415 		priv = (struct rte_crypto_op_pool_private *)
2416 				rte_mempool_get_priv(mp);
2417 
2418 		if (mp->elt_size != elt_size ||
2419 				mp->cache_size < cache_size ||
2420 				mp->size < nb_elts ||
2421 				priv->priv_size <  priv_size) {
2422 			mp = NULL;
2423 			CDEV_LOG_ERR("Mempool %s already exists but with "
2424 					"incompatible parameters", name);
2425 			return NULL;
2426 		}
2427 		return mp;
2428 	}
2429 
2430 	mp = rte_mempool_create(
2431 			name,
2432 			nb_elts,
2433 			elt_size,
2434 			cache_size,
2435 			sizeof(struct rte_crypto_op_pool_private),
2436 			NULL,
2437 			NULL,
2438 			rte_crypto_op_init,
2439 			&type,
2440 			socket_id,
2441 			0);
2442 
2443 	if (mp == NULL) {
2444 		CDEV_LOG_ERR("Failed to create mempool %s", name);
2445 		return NULL;
2446 	}
2447 
2448 	priv = (struct rte_crypto_op_pool_private *)
2449 			rte_mempool_get_priv(mp);
2450 
2451 	priv->priv_size = priv_size;
2452 	priv->type = type;
2453 
2454 	return mp;
2455 }
2456 
2457 int
2458 rte_cryptodev_pmd_create_dev_name(char *name, const char *dev_name_prefix)
2459 {
2460 	struct rte_cryptodev *dev = NULL;
2461 	uint32_t i = 0;
2462 
2463 	if (name == NULL)
2464 		return -EINVAL;
2465 
2466 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
2467 		int ret = snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
2468 				"%s_%u", dev_name_prefix, i);
2469 
2470 		if (ret < 0)
2471 			return ret;
2472 
2473 		dev = rte_cryptodev_pmd_get_named_dev(name);
2474 		if (!dev)
2475 			return 0;
2476 	}
2477 
2478 	return -1;
2479 }
2480 
2481 TAILQ_HEAD(cryptodev_driver_list, cryptodev_driver);
2482 
2483 static struct cryptodev_driver_list cryptodev_driver_list =
2484 	TAILQ_HEAD_INITIALIZER(cryptodev_driver_list);
2485 
2486 int
2487 rte_cryptodev_driver_id_get(const char *name)
2488 {
2489 	struct cryptodev_driver *driver;
2490 	const char *driver_name;
2491 
2492 	if (name == NULL) {
2493 		RTE_LOG(DEBUG, CRYPTODEV, "name pointer NULL");
2494 		return -1;
2495 	}
2496 
2497 	TAILQ_FOREACH(driver, &cryptodev_driver_list, next) {
2498 		driver_name = driver->driver->name;
2499 		if (strncmp(driver_name, name, strlen(driver_name) + 1) == 0)
2500 			return driver->id;
2501 	}
2502 	return -1;
2503 }
2504 
2505 const char *
2506 rte_cryptodev_name_get(uint8_t dev_id)
2507 {
2508 	struct rte_cryptodev *dev;
2509 
2510 	if (!rte_cryptodev_is_valid_device_data(dev_id)) {
2511 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
2512 		return NULL;
2513 	}
2514 
2515 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2516 	if (dev == NULL)
2517 		return NULL;
2518 
2519 	return dev->data->name;
2520 }
2521 
2522 const char *
2523 rte_cryptodev_driver_name_get(uint8_t driver_id)
2524 {
2525 	struct cryptodev_driver *driver;
2526 
2527 	TAILQ_FOREACH(driver, &cryptodev_driver_list, next)
2528 		if (driver->id == driver_id)
2529 			return driver->driver->name;
2530 	return NULL;
2531 }
2532 
2533 uint8_t
2534 rte_cryptodev_allocate_driver(struct cryptodev_driver *crypto_drv,
2535 		const struct rte_driver *drv)
2536 {
2537 	crypto_drv->driver = drv;
2538 	crypto_drv->id = nb_drivers;
2539 
2540 	TAILQ_INSERT_TAIL(&cryptodev_driver_list, crypto_drv, next);
2541 
2542 	return nb_drivers++;
2543 }
2544 
2545 RTE_INIT(cryptodev_init_fp_ops)
2546 {
2547 	uint32_t i;
2548 
2549 	for (i = 0; i != RTE_DIM(rte_crypto_fp_ops); i++)
2550 		cryptodev_fp_ops_reset(rte_crypto_fp_ops + i);
2551 }
2552 
2553 static int
2554 cryptodev_handle_dev_list(const char *cmd __rte_unused,
2555 		const char *params __rte_unused,
2556 		struct rte_tel_data *d)
2557 {
2558 	int dev_id;
2559 
2560 	if (rte_cryptodev_count() < 1)
2561 		return -EINVAL;
2562 
2563 	rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
2564 	for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++)
2565 		if (rte_cryptodev_is_valid_dev(dev_id))
2566 			rte_tel_data_add_array_int(d, dev_id);
2567 
2568 	return 0;
2569 }
2570 
2571 static int
2572 cryptodev_handle_dev_info(const char *cmd __rte_unused,
2573 		const char *params, struct rte_tel_data *d)
2574 {
2575 	struct rte_cryptodev_info cryptodev_info;
2576 	int dev_id;
2577 	char *end_param;
2578 
2579 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
2580 		return -EINVAL;
2581 
2582 	dev_id = strtoul(params, &end_param, 0);
2583 	if (*end_param != '\0')
2584 		CDEV_LOG_ERR("Extra parameters passed to command, ignoring");
2585 	if (!rte_cryptodev_is_valid_dev(dev_id))
2586 		return -EINVAL;
2587 
2588 	rte_cryptodev_info_get(dev_id, &cryptodev_info);
2589 
2590 	rte_tel_data_start_dict(d);
2591 	rte_tel_data_add_dict_string(d, "device_name",
2592 		cryptodev_info.device->name);
2593 	rte_tel_data_add_dict_int(d, "max_nb_queue_pairs",
2594 		cryptodev_info.max_nb_queue_pairs);
2595 
2596 	return 0;
2597 }
2598 
2599 #define ADD_DICT_STAT(s) rte_tel_data_add_dict_u64(d, #s, cryptodev_stats.s)
2600 
2601 static int
2602 cryptodev_handle_dev_stats(const char *cmd __rte_unused,
2603 		const char *params,
2604 		struct rte_tel_data *d)
2605 {
2606 	struct rte_cryptodev_stats cryptodev_stats;
2607 	int dev_id, ret;
2608 	char *end_param;
2609 
2610 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
2611 		return -EINVAL;
2612 
2613 	dev_id = strtoul(params, &end_param, 0);
2614 	if (*end_param != '\0')
2615 		CDEV_LOG_ERR("Extra parameters passed to command, ignoring");
2616 	if (!rte_cryptodev_is_valid_dev(dev_id))
2617 		return -EINVAL;
2618 
2619 	ret = rte_cryptodev_stats_get(dev_id, &cryptodev_stats);
2620 	if (ret < 0)
2621 		return ret;
2622 
2623 	rte_tel_data_start_dict(d);
2624 	ADD_DICT_STAT(enqueued_count);
2625 	ADD_DICT_STAT(dequeued_count);
2626 	ADD_DICT_STAT(enqueue_err_count);
2627 	ADD_DICT_STAT(dequeue_err_count);
2628 
2629 	return 0;
2630 }
2631 
2632 #define CRYPTO_CAPS_SZ                                             \
2633 	(RTE_ALIGN_CEIL(sizeof(struct rte_cryptodev_capabilities), \
2634 					sizeof(uint64_t)) /        \
2635 	 sizeof(uint64_t))
2636 
2637 static int
2638 crypto_caps_array(struct rte_tel_data *d,
2639 		  const struct rte_cryptodev_capabilities *capabilities)
2640 {
2641 	const struct rte_cryptodev_capabilities *dev_caps;
2642 	uint64_t caps_val[CRYPTO_CAPS_SZ];
2643 	unsigned int i = 0, j;
2644 
2645 	rte_tel_data_start_array(d, RTE_TEL_U64_VAL);
2646 
2647 	while ((dev_caps = &capabilities[i++])->op !=
2648 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
2649 		memset(&caps_val, 0, CRYPTO_CAPS_SZ * sizeof(caps_val[0]));
2650 		rte_memcpy(caps_val, dev_caps, sizeof(capabilities[0]));
2651 		for (j = 0; j < CRYPTO_CAPS_SZ; j++)
2652 			rte_tel_data_add_array_u64(d, caps_val[j]);
2653 	}
2654 
2655 	return i;
2656 }
2657 
2658 static int
2659 cryptodev_handle_dev_caps(const char *cmd __rte_unused, const char *params,
2660 			  struct rte_tel_data *d)
2661 {
2662 	struct rte_cryptodev_info dev_info;
2663 	struct rte_tel_data *crypto_caps;
2664 	int crypto_caps_n;
2665 	char *end_param;
2666 	int dev_id;
2667 
2668 	if (!params || strlen(params) == 0 || !isdigit(*params))
2669 		return -EINVAL;
2670 
2671 	dev_id = strtoul(params, &end_param, 0);
2672 	if (*end_param != '\0')
2673 		CDEV_LOG_ERR("Extra parameters passed to command, ignoring");
2674 	if (!rte_cryptodev_is_valid_dev(dev_id))
2675 		return -EINVAL;
2676 
2677 	rte_tel_data_start_dict(d);
2678 	crypto_caps = rte_tel_data_alloc();
2679 	if (!crypto_caps)
2680 		return -ENOMEM;
2681 
2682 	rte_cryptodev_info_get(dev_id, &dev_info);
2683 	crypto_caps_n = crypto_caps_array(crypto_caps, dev_info.capabilities);
2684 	rte_tel_data_add_dict_container(d, "crypto_caps", crypto_caps, 0);
2685 	rte_tel_data_add_dict_int(d, "crypto_caps_n", crypto_caps_n);
2686 
2687 	return 0;
2688 }
2689 
2690 RTE_INIT(cryptodev_init_telemetry)
2691 {
2692 	rte_telemetry_register_cmd("/cryptodev/info", cryptodev_handle_dev_info,
2693 			"Returns information for a cryptodev. Parameters: int dev_id");
2694 	rte_telemetry_register_cmd("/cryptodev/list",
2695 			cryptodev_handle_dev_list,
2696 			"Returns list of available crypto devices by IDs. No parameters.");
2697 	rte_telemetry_register_cmd("/cryptodev/stats",
2698 			cryptodev_handle_dev_stats,
2699 			"Returns the stats for a cryptodev. Parameters: int dev_id");
2700 	rte_telemetry_register_cmd("/cryptodev/caps",
2701 			cryptodev_handle_dev_caps,
2702 			"Returns the capabilities for a cryptodev. Parameters: int dev_id");
2703 }
2704