xref: /dpdk/lib/cryptodev/rte_cryptodev.c (revision 69f9d8aa357d2299e057b7e335f340e20a0c5e7e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020 Intel Corporation
3  */
4 
5 #include <sys/types.h>
6 #include <sys/queue.h>
7 #include <ctype.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <stdarg.h>
12 #include <errno.h>
13 #include <stdint.h>
14 #include <inttypes.h>
15 
16 #include <rte_byteorder.h>
17 #include <rte_log.h>
18 #include <rte_debug.h>
19 #include <rte_dev.h>
20 #include <rte_interrupts.h>
21 #include <rte_memory.h>
22 #include <rte_memcpy.h>
23 #include <rte_memzone.h>
24 #include <rte_launch.h>
25 #include <rte_tailq.h>
26 #include <rte_eal.h>
27 #include <rte_per_lcore.h>
28 #include <rte_lcore.h>
29 #include <rte_atomic.h>
30 #include <rte_branch_prediction.h>
31 #include <rte_common.h>
32 #include <rte_mempool.h>
33 #include <rte_malloc.h>
34 #include <rte_mbuf.h>
35 #include <rte_errno.h>
36 #include <rte_spinlock.h>
37 #include <rte_string_fns.h>
38 #include <rte_telemetry.h>
39 
40 #include "rte_crypto.h"
41 #include "rte_cryptodev.h"
42 #include "cryptodev_pmd.h"
43 #include "rte_cryptodev_trace.h"
44 
45 static uint8_t nb_drivers;
46 
47 static struct rte_cryptodev rte_crypto_devices[RTE_CRYPTO_MAX_DEVS];
48 
49 struct rte_cryptodev *rte_cryptodevs = rte_crypto_devices;
50 
51 static struct rte_cryptodev_global cryptodev_globals = {
52 		.devs			= rte_crypto_devices,
53 		.data			= { NULL },
54 		.nb_devs		= 0
55 };
56 
57 /* Public fastpath APIs. */
58 struct rte_crypto_fp_ops rte_crypto_fp_ops[RTE_CRYPTO_MAX_DEVS];
59 
60 /* spinlock for crypto device callbacks */
61 static rte_spinlock_t rte_cryptodev_cb_lock = RTE_SPINLOCK_INITIALIZER;
62 
63 /**
64  * The user application callback description.
65  *
66  * It contains callback address to be registered by user application,
67  * the pointer to the parameters for callback, and the event type.
68  */
69 struct rte_cryptodev_callback {
70 	TAILQ_ENTRY(rte_cryptodev_callback) next; /**< Callbacks list */
71 	rte_cryptodev_cb_fn cb_fn;		/**< Callback address */
72 	void *cb_arg;				/**< Parameter for callback */
73 	enum rte_cryptodev_event_type event;	/**< Interrupt event type */
74 	uint32_t active;			/**< Callback is executing */
75 };
76 
77 /**
78  * The crypto cipher algorithm strings identifiers.
79  * It could be used in application command line.
80  */
81 const char *
82 rte_crypto_cipher_algorithm_strings[] = {
83 	[RTE_CRYPTO_CIPHER_3DES_CBC]	= "3des-cbc",
84 	[RTE_CRYPTO_CIPHER_3DES_ECB]	= "3des-ecb",
85 	[RTE_CRYPTO_CIPHER_3DES_CTR]	= "3des-ctr",
86 
87 	[RTE_CRYPTO_CIPHER_AES_CBC]	= "aes-cbc",
88 	[RTE_CRYPTO_CIPHER_AES_CTR]	= "aes-ctr",
89 	[RTE_CRYPTO_CIPHER_AES_DOCSISBPI]	= "aes-docsisbpi",
90 	[RTE_CRYPTO_CIPHER_AES_ECB]	= "aes-ecb",
91 	[RTE_CRYPTO_CIPHER_AES_F8]	= "aes-f8",
92 	[RTE_CRYPTO_CIPHER_AES_XTS]	= "aes-xts",
93 
94 	[RTE_CRYPTO_CIPHER_ARC4]	= "arc4",
95 
96 	[RTE_CRYPTO_CIPHER_DES_CBC]     = "des-cbc",
97 	[RTE_CRYPTO_CIPHER_DES_DOCSISBPI]	= "des-docsisbpi",
98 
99 	[RTE_CRYPTO_CIPHER_NULL]	= "null",
100 
101 	[RTE_CRYPTO_CIPHER_KASUMI_F8]	= "kasumi-f8",
102 	[RTE_CRYPTO_CIPHER_SNOW3G_UEA2]	= "snow3g-uea2",
103 	[RTE_CRYPTO_CIPHER_ZUC_EEA3]	= "zuc-eea3"
104 };
105 
106 /**
107  * The crypto cipher operation strings identifiers.
108  * It could be used in application command line.
109  */
110 const char *
111 rte_crypto_cipher_operation_strings[] = {
112 		[RTE_CRYPTO_CIPHER_OP_ENCRYPT]	= "encrypt",
113 		[RTE_CRYPTO_CIPHER_OP_DECRYPT]	= "decrypt"
114 };
115 
116 /**
117  * The crypto auth algorithm strings identifiers.
118  * It could be used in application command line.
119  */
120 const char *
121 rte_crypto_auth_algorithm_strings[] = {
122 	[RTE_CRYPTO_AUTH_AES_CBC_MAC]	= "aes-cbc-mac",
123 	[RTE_CRYPTO_AUTH_AES_CMAC]	= "aes-cmac",
124 	[RTE_CRYPTO_AUTH_AES_GMAC]	= "aes-gmac",
125 	[RTE_CRYPTO_AUTH_AES_XCBC_MAC]	= "aes-xcbc-mac",
126 
127 	[RTE_CRYPTO_AUTH_MD5]		= "md5",
128 	[RTE_CRYPTO_AUTH_MD5_HMAC]	= "md5-hmac",
129 
130 	[RTE_CRYPTO_AUTH_NULL]		= "null",
131 
132 	[RTE_CRYPTO_AUTH_SHA1]		= "sha1",
133 	[RTE_CRYPTO_AUTH_SHA1_HMAC]	= "sha1-hmac",
134 
135 	[RTE_CRYPTO_AUTH_SHA224]	= "sha2-224",
136 	[RTE_CRYPTO_AUTH_SHA224_HMAC]	= "sha2-224-hmac",
137 	[RTE_CRYPTO_AUTH_SHA256]	= "sha2-256",
138 	[RTE_CRYPTO_AUTH_SHA256_HMAC]	= "sha2-256-hmac",
139 	[RTE_CRYPTO_AUTH_SHA384]	= "sha2-384",
140 	[RTE_CRYPTO_AUTH_SHA384_HMAC]	= "sha2-384-hmac",
141 	[RTE_CRYPTO_AUTH_SHA512]	= "sha2-512",
142 	[RTE_CRYPTO_AUTH_SHA512_HMAC]	= "sha2-512-hmac",
143 
144 	[RTE_CRYPTO_AUTH_KASUMI_F9]	= "kasumi-f9",
145 	[RTE_CRYPTO_AUTH_SNOW3G_UIA2]	= "snow3g-uia2",
146 	[RTE_CRYPTO_AUTH_ZUC_EIA3]	= "zuc-eia3"
147 };
148 
149 /**
150  * The crypto AEAD algorithm strings identifiers.
151  * It could be used in application command line.
152  */
153 const char *
154 rte_crypto_aead_algorithm_strings[] = {
155 	[RTE_CRYPTO_AEAD_AES_CCM]	= "aes-ccm",
156 	[RTE_CRYPTO_AEAD_AES_GCM]	= "aes-gcm",
157 	[RTE_CRYPTO_AEAD_CHACHA20_POLY1305] = "chacha20-poly1305"
158 };
159 
160 /**
161  * The crypto AEAD operation strings identifiers.
162  * It could be used in application command line.
163  */
164 const char *
165 rte_crypto_aead_operation_strings[] = {
166 	[RTE_CRYPTO_AEAD_OP_ENCRYPT]	= "encrypt",
167 	[RTE_CRYPTO_AEAD_OP_DECRYPT]	= "decrypt"
168 };
169 
170 /**
171  * Asymmetric crypto transform operation strings identifiers.
172  */
173 const char *rte_crypto_asym_xform_strings[] = {
174 	[RTE_CRYPTO_ASYM_XFORM_NONE]	= "none",
175 	[RTE_CRYPTO_ASYM_XFORM_RSA]	= "rsa",
176 	[RTE_CRYPTO_ASYM_XFORM_MODEX]	= "modexp",
177 	[RTE_CRYPTO_ASYM_XFORM_MODINV]	= "modinv",
178 	[RTE_CRYPTO_ASYM_XFORM_DH]	= "dh",
179 	[RTE_CRYPTO_ASYM_XFORM_DSA]	= "dsa",
180 	[RTE_CRYPTO_ASYM_XFORM_ECDSA]	= "ecdsa",
181 	[RTE_CRYPTO_ASYM_XFORM_ECPM]	= "ecpm",
182 };
183 
184 /**
185  * Asymmetric crypto operation strings identifiers.
186  */
187 const char *rte_crypto_asym_op_strings[] = {
188 	[RTE_CRYPTO_ASYM_OP_ENCRYPT]	= "encrypt",
189 	[RTE_CRYPTO_ASYM_OP_DECRYPT]	= "decrypt",
190 	[RTE_CRYPTO_ASYM_OP_SIGN]	= "sign",
191 	[RTE_CRYPTO_ASYM_OP_VERIFY]	= "verify",
192 	[RTE_CRYPTO_ASYM_OP_PRIVATE_KEY_GENERATE]	= "priv_key_generate",
193 	[RTE_CRYPTO_ASYM_OP_PUBLIC_KEY_GENERATE] = "pub_key_generate",
194 	[RTE_CRYPTO_ASYM_OP_SHARED_SECRET_COMPUTE] = "sharedsecret_compute",
195 };
196 
197 /**
198  * The private data structure stored in the session mempool private data.
199  */
200 struct rte_cryptodev_sym_session_pool_private_data {
201 	uint16_t nb_drivers;
202 	/**< number of elements in sess_data array */
203 	uint16_t user_data_sz;
204 	/**< session user data will be placed after sess_data */
205 };
206 
207 int
208 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum,
209 		const char *algo_string)
210 {
211 	unsigned int i;
212 
213 	for (i = 1; i < RTE_DIM(rte_crypto_cipher_algorithm_strings); i++) {
214 		if (strcmp(algo_string, rte_crypto_cipher_algorithm_strings[i]) == 0) {
215 			*algo_enum = (enum rte_crypto_cipher_algorithm) i;
216 			return 0;
217 		}
218 	}
219 
220 	/* Invalid string */
221 	return -1;
222 }
223 
224 int
225 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum,
226 		const char *algo_string)
227 {
228 	unsigned int i;
229 
230 	for (i = 1; i < RTE_DIM(rte_crypto_auth_algorithm_strings); i++) {
231 		if (strcmp(algo_string, rte_crypto_auth_algorithm_strings[i]) == 0) {
232 			*algo_enum = (enum rte_crypto_auth_algorithm) i;
233 			return 0;
234 		}
235 	}
236 
237 	/* Invalid string */
238 	return -1;
239 }
240 
241 int
242 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum,
243 		const char *algo_string)
244 {
245 	unsigned int i;
246 
247 	for (i = 1; i < RTE_DIM(rte_crypto_aead_algorithm_strings); i++) {
248 		if (strcmp(algo_string, rte_crypto_aead_algorithm_strings[i]) == 0) {
249 			*algo_enum = (enum rte_crypto_aead_algorithm) i;
250 			return 0;
251 		}
252 	}
253 
254 	/* Invalid string */
255 	return -1;
256 }
257 
258 int
259 rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum,
260 		const char *xform_string)
261 {
262 	unsigned int i;
263 
264 	for (i = 1; i < RTE_DIM(rte_crypto_asym_xform_strings); i++) {
265 		if (strcmp(xform_string,
266 			rte_crypto_asym_xform_strings[i]) == 0) {
267 			*xform_enum = (enum rte_crypto_asym_xform_type) i;
268 			return 0;
269 		}
270 	}
271 
272 	/* Invalid string */
273 	return -1;
274 }
275 
276 /**
277  * The crypto auth operation strings identifiers.
278  * It could be used in application command line.
279  */
280 const char *
281 rte_crypto_auth_operation_strings[] = {
282 		[RTE_CRYPTO_AUTH_OP_VERIFY]	= "verify",
283 		[RTE_CRYPTO_AUTH_OP_GENERATE]	= "generate"
284 };
285 
286 const struct rte_cryptodev_symmetric_capability *
287 rte_cryptodev_sym_capability_get(uint8_t dev_id,
288 		const struct rte_cryptodev_sym_capability_idx *idx)
289 {
290 	const struct rte_cryptodev_capabilities *capability;
291 	struct rte_cryptodev_info dev_info;
292 	int i = 0;
293 
294 	rte_cryptodev_info_get(dev_id, &dev_info);
295 
296 	while ((capability = &dev_info.capabilities[i++])->op !=
297 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
298 		if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
299 			continue;
300 
301 		if (capability->sym.xform_type != idx->type)
302 			continue;
303 
304 		if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
305 			capability->sym.auth.algo == idx->algo.auth)
306 			return &capability->sym;
307 
308 		if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
309 			capability->sym.cipher.algo == idx->algo.cipher)
310 			return &capability->sym;
311 
312 		if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
313 				capability->sym.aead.algo == idx->algo.aead)
314 			return &capability->sym;
315 	}
316 
317 	return NULL;
318 }
319 
320 static int
321 param_range_check(uint16_t size, const struct rte_crypto_param_range *range)
322 {
323 	unsigned int next_size;
324 
325 	/* Check lower/upper bounds */
326 	if (size < range->min)
327 		return -1;
328 
329 	if (size > range->max)
330 		return -1;
331 
332 	/* If range is actually only one value, size is correct */
333 	if (range->increment == 0)
334 		return 0;
335 
336 	/* Check if value is one of the supported sizes */
337 	for (next_size = range->min; next_size <= range->max;
338 			next_size += range->increment)
339 		if (size == next_size)
340 			return 0;
341 
342 	return -1;
343 }
344 
345 const struct rte_cryptodev_asymmetric_xform_capability *
346 rte_cryptodev_asym_capability_get(uint8_t dev_id,
347 		const struct rte_cryptodev_asym_capability_idx *idx)
348 {
349 	const struct rte_cryptodev_capabilities *capability;
350 	struct rte_cryptodev_info dev_info;
351 	unsigned int i = 0;
352 
353 	memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
354 	rte_cryptodev_info_get(dev_id, &dev_info);
355 
356 	while ((capability = &dev_info.capabilities[i++])->op !=
357 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
358 		if (capability->op != RTE_CRYPTO_OP_TYPE_ASYMMETRIC)
359 			continue;
360 
361 		if (capability->asym.xform_capa.xform_type == idx->type)
362 			return &capability->asym.xform_capa;
363 	}
364 	return NULL;
365 };
366 
367 int
368 rte_cryptodev_sym_capability_check_cipher(
369 		const struct rte_cryptodev_symmetric_capability *capability,
370 		uint16_t key_size, uint16_t iv_size)
371 {
372 	if (param_range_check(key_size, &capability->cipher.key_size) != 0)
373 		return -1;
374 
375 	if (param_range_check(iv_size, &capability->cipher.iv_size) != 0)
376 		return -1;
377 
378 	return 0;
379 }
380 
381 int
382 rte_cryptodev_sym_capability_check_auth(
383 		const struct rte_cryptodev_symmetric_capability *capability,
384 		uint16_t key_size, uint16_t digest_size, uint16_t iv_size)
385 {
386 	if (param_range_check(key_size, &capability->auth.key_size) != 0)
387 		return -1;
388 
389 	if (param_range_check(digest_size, &capability->auth.digest_size) != 0)
390 		return -1;
391 
392 	if (param_range_check(iv_size, &capability->auth.iv_size) != 0)
393 		return -1;
394 
395 	return 0;
396 }
397 
398 int
399 rte_cryptodev_sym_capability_check_aead(
400 		const struct rte_cryptodev_symmetric_capability *capability,
401 		uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
402 		uint16_t iv_size)
403 {
404 	if (param_range_check(key_size, &capability->aead.key_size) != 0)
405 		return -1;
406 
407 	if (param_range_check(digest_size, &capability->aead.digest_size) != 0)
408 		return -1;
409 
410 	if (param_range_check(aad_size, &capability->aead.aad_size) != 0)
411 		return -1;
412 
413 	if (param_range_check(iv_size, &capability->aead.iv_size) != 0)
414 		return -1;
415 
416 	return 0;
417 }
418 int
419 rte_cryptodev_asym_xform_capability_check_optype(
420 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
421 	enum rte_crypto_asym_op_type op_type)
422 {
423 	if (capability->op_types & (1 << op_type))
424 		return 1;
425 
426 	return 0;
427 }
428 
429 int
430 rte_cryptodev_asym_xform_capability_check_modlen(
431 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
432 	uint16_t modlen)
433 {
434 	/* no need to check for limits, if min or max = 0 */
435 	if (capability->modlen.min != 0) {
436 		if (modlen < capability->modlen.min)
437 			return -1;
438 	}
439 
440 	if (capability->modlen.max != 0) {
441 		if (modlen > capability->modlen.max)
442 			return -1;
443 	}
444 
445 	/* in any case, check if given modlen is module increment */
446 	if (capability->modlen.increment != 0) {
447 		if (modlen % (capability->modlen.increment))
448 			return -1;
449 	}
450 
451 	return 0;
452 }
453 
454 /* spinlock for crypto device enq callbacks */
455 static rte_spinlock_t rte_cryptodev_callback_lock = RTE_SPINLOCK_INITIALIZER;
456 
457 static void
458 cryptodev_cb_cleanup(struct rte_cryptodev *dev)
459 {
460 	struct rte_cryptodev_cb_rcu *list;
461 	struct rte_cryptodev_cb *cb, *next;
462 	uint16_t qp_id;
463 
464 	if (dev->enq_cbs == NULL && dev->deq_cbs == NULL)
465 		return;
466 
467 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
468 		list = &dev->enq_cbs[qp_id];
469 		cb = list->next;
470 		while (cb != NULL) {
471 			next = cb->next;
472 			rte_free(cb);
473 			cb = next;
474 		}
475 
476 		rte_free(list->qsbr);
477 	}
478 
479 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
480 		list = &dev->deq_cbs[qp_id];
481 		cb = list->next;
482 		while (cb != NULL) {
483 			next = cb->next;
484 			rte_free(cb);
485 			cb = next;
486 		}
487 
488 		rte_free(list->qsbr);
489 	}
490 
491 	rte_free(dev->enq_cbs);
492 	dev->enq_cbs = NULL;
493 	rte_free(dev->deq_cbs);
494 	dev->deq_cbs = NULL;
495 }
496 
497 static int
498 cryptodev_cb_init(struct rte_cryptodev *dev)
499 {
500 	struct rte_cryptodev_cb_rcu *list;
501 	struct rte_rcu_qsbr *qsbr;
502 	uint16_t qp_id;
503 	size_t size;
504 
505 	/* Max thread set to 1, as one DP thread accessing a queue-pair */
506 	const uint32_t max_threads = 1;
507 
508 	dev->enq_cbs = rte_zmalloc(NULL,
509 				   sizeof(struct rte_cryptodev_cb_rcu) *
510 				   dev->data->nb_queue_pairs, 0);
511 	if (dev->enq_cbs == NULL) {
512 		CDEV_LOG_ERR("Failed to allocate memory for enq callbacks");
513 		return -ENOMEM;
514 	}
515 
516 	dev->deq_cbs = rte_zmalloc(NULL,
517 				   sizeof(struct rte_cryptodev_cb_rcu) *
518 				   dev->data->nb_queue_pairs, 0);
519 	if (dev->deq_cbs == NULL) {
520 		CDEV_LOG_ERR("Failed to allocate memory for deq callbacks");
521 		rte_free(dev->enq_cbs);
522 		return -ENOMEM;
523 	}
524 
525 	/* Create RCU QSBR variable */
526 	size = rte_rcu_qsbr_get_memsize(max_threads);
527 
528 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
529 		list = &dev->enq_cbs[qp_id];
530 		qsbr = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
531 		if (qsbr == NULL) {
532 			CDEV_LOG_ERR("Failed to allocate memory for RCU on "
533 				"queue_pair_id=%d", qp_id);
534 			goto cb_init_err;
535 		}
536 
537 		if (rte_rcu_qsbr_init(qsbr, max_threads)) {
538 			CDEV_LOG_ERR("Failed to initialize for RCU on "
539 				"queue_pair_id=%d", qp_id);
540 			goto cb_init_err;
541 		}
542 
543 		list->qsbr = qsbr;
544 	}
545 
546 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
547 		list = &dev->deq_cbs[qp_id];
548 		qsbr = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
549 		if (qsbr == NULL) {
550 			CDEV_LOG_ERR("Failed to allocate memory for RCU on "
551 				"queue_pair_id=%d", qp_id);
552 			goto cb_init_err;
553 		}
554 
555 		if (rte_rcu_qsbr_init(qsbr, max_threads)) {
556 			CDEV_LOG_ERR("Failed to initialize for RCU on "
557 				"queue_pair_id=%d", qp_id);
558 			goto cb_init_err;
559 		}
560 
561 		list->qsbr = qsbr;
562 	}
563 
564 	return 0;
565 
566 cb_init_err:
567 	cryptodev_cb_cleanup(dev);
568 	return -ENOMEM;
569 }
570 
571 const char *
572 rte_cryptodev_get_feature_name(uint64_t flag)
573 {
574 	switch (flag) {
575 	case RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO:
576 		return "SYMMETRIC_CRYPTO";
577 	case RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO:
578 		return "ASYMMETRIC_CRYPTO";
579 	case RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING:
580 		return "SYM_OPERATION_CHAINING";
581 	case RTE_CRYPTODEV_FF_CPU_SSE:
582 		return "CPU_SSE";
583 	case RTE_CRYPTODEV_FF_CPU_AVX:
584 		return "CPU_AVX";
585 	case RTE_CRYPTODEV_FF_CPU_AVX2:
586 		return "CPU_AVX2";
587 	case RTE_CRYPTODEV_FF_CPU_AVX512:
588 		return "CPU_AVX512";
589 	case RTE_CRYPTODEV_FF_CPU_AESNI:
590 		return "CPU_AESNI";
591 	case RTE_CRYPTODEV_FF_HW_ACCELERATED:
592 		return "HW_ACCELERATED";
593 	case RTE_CRYPTODEV_FF_IN_PLACE_SGL:
594 		return "IN_PLACE_SGL";
595 	case RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT:
596 		return "OOP_SGL_IN_SGL_OUT";
597 	case RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT:
598 		return "OOP_SGL_IN_LB_OUT";
599 	case RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT:
600 		return "OOP_LB_IN_SGL_OUT";
601 	case RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT:
602 		return "OOP_LB_IN_LB_OUT";
603 	case RTE_CRYPTODEV_FF_CPU_NEON:
604 		return "CPU_NEON";
605 	case RTE_CRYPTODEV_FF_CPU_ARM_CE:
606 		return "CPU_ARM_CE";
607 	case RTE_CRYPTODEV_FF_SECURITY:
608 		return "SECURITY_PROTOCOL";
609 	case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP:
610 		return "RSA_PRIV_OP_KEY_EXP";
611 	case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT:
612 		return "RSA_PRIV_OP_KEY_QT";
613 	case RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED:
614 		return "DIGEST_ENCRYPTED";
615 	case RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO:
616 		return "SYM_CPU_CRYPTO";
617 	case RTE_CRYPTODEV_FF_ASYM_SESSIONLESS:
618 		return "ASYM_SESSIONLESS";
619 	case RTE_CRYPTODEV_FF_SYM_SESSIONLESS:
620 		return "SYM_SESSIONLESS";
621 	case RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA:
622 		return "NON_BYTE_ALIGNED_DATA";
623 	case RTE_CRYPTODEV_FF_CIPHER_MULTIPLE_DATA_UNITS:
624 		return "CIPHER_MULTIPLE_DATA_UNITS";
625 	case RTE_CRYPTODEV_FF_CIPHER_WRAPPED_KEY:
626 		return "CIPHER_WRAPPED_KEY";
627 	default:
628 		return NULL;
629 	}
630 }
631 
632 struct rte_cryptodev *
633 rte_cryptodev_pmd_get_dev(uint8_t dev_id)
634 {
635 	return &cryptodev_globals.devs[dev_id];
636 }
637 
638 struct rte_cryptodev *
639 rte_cryptodev_pmd_get_named_dev(const char *name)
640 {
641 	struct rte_cryptodev *dev;
642 	unsigned int i;
643 
644 	if (name == NULL)
645 		return NULL;
646 
647 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
648 		dev = &cryptodev_globals.devs[i];
649 
650 		if ((dev->attached == RTE_CRYPTODEV_ATTACHED) &&
651 				(strcmp(dev->data->name, name) == 0))
652 			return dev;
653 	}
654 
655 	return NULL;
656 }
657 
658 static inline uint8_t
659 rte_cryptodev_is_valid_device_data(uint8_t dev_id)
660 {
661 	if (dev_id >= RTE_CRYPTO_MAX_DEVS ||
662 			rte_crypto_devices[dev_id].data == NULL)
663 		return 0;
664 
665 	return 1;
666 }
667 
668 unsigned int
669 rte_cryptodev_is_valid_dev(uint8_t dev_id)
670 {
671 	struct rte_cryptodev *dev = NULL;
672 
673 	if (!rte_cryptodev_is_valid_device_data(dev_id))
674 		return 0;
675 
676 	dev = rte_cryptodev_pmd_get_dev(dev_id);
677 	if (dev->attached != RTE_CRYPTODEV_ATTACHED)
678 		return 0;
679 	else
680 		return 1;
681 }
682 
683 
684 int
685 rte_cryptodev_get_dev_id(const char *name)
686 {
687 	unsigned i;
688 
689 	if (name == NULL)
690 		return -1;
691 
692 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
693 		if (!rte_cryptodev_is_valid_device_data(i))
694 			continue;
695 		if ((strcmp(cryptodev_globals.devs[i].data->name, name)
696 				== 0) &&
697 				(cryptodev_globals.devs[i].attached ==
698 						RTE_CRYPTODEV_ATTACHED))
699 			return i;
700 	}
701 
702 	return -1;
703 }
704 
705 uint8_t
706 rte_cryptodev_count(void)
707 {
708 	return cryptodev_globals.nb_devs;
709 }
710 
711 uint8_t
712 rte_cryptodev_device_count_by_driver(uint8_t driver_id)
713 {
714 	uint8_t i, dev_count = 0;
715 
716 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++)
717 		if (cryptodev_globals.devs[i].driver_id == driver_id &&
718 			cryptodev_globals.devs[i].attached ==
719 					RTE_CRYPTODEV_ATTACHED)
720 			dev_count++;
721 
722 	return dev_count;
723 }
724 
725 uint8_t
726 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
727 	uint8_t nb_devices)
728 {
729 	uint8_t i, count = 0;
730 	struct rte_cryptodev *devs = cryptodev_globals.devs;
731 
732 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS && count < nb_devices; i++) {
733 		if (!rte_cryptodev_is_valid_device_data(i))
734 			continue;
735 
736 		if (devs[i].attached == RTE_CRYPTODEV_ATTACHED) {
737 			int cmp;
738 
739 			cmp = strncmp(devs[i].device->driver->name,
740 					driver_name,
741 					strlen(driver_name) + 1);
742 
743 			if (cmp == 0)
744 				devices[count++] = devs[i].data->dev_id;
745 		}
746 	}
747 
748 	return count;
749 }
750 
751 void *
752 rte_cryptodev_get_sec_ctx(uint8_t dev_id)
753 {
754 	if (dev_id < RTE_CRYPTO_MAX_DEVS &&
755 			(rte_crypto_devices[dev_id].feature_flags &
756 			RTE_CRYPTODEV_FF_SECURITY))
757 		return rte_crypto_devices[dev_id].security_ctx;
758 
759 	return NULL;
760 }
761 
762 int
763 rte_cryptodev_socket_id(uint8_t dev_id)
764 {
765 	struct rte_cryptodev *dev;
766 
767 	if (!rte_cryptodev_is_valid_dev(dev_id))
768 		return -1;
769 
770 	dev = rte_cryptodev_pmd_get_dev(dev_id);
771 
772 	return dev->data->socket_id;
773 }
774 
775 static inline int
776 rte_cryptodev_data_alloc(uint8_t dev_id, struct rte_cryptodev_data **data,
777 		int socket_id)
778 {
779 	char mz_name[RTE_MEMZONE_NAMESIZE];
780 	const struct rte_memzone *mz;
781 	int n;
782 
783 	/* generate memzone name */
784 	n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
785 	if (n >= (int)sizeof(mz_name))
786 		return -EINVAL;
787 
788 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
789 		mz = rte_memzone_reserve(mz_name,
790 				sizeof(struct rte_cryptodev_data),
791 				socket_id, 0);
792 		CDEV_LOG_DEBUG("PRIMARY:reserved memzone for %s (%p)",
793 				mz_name, mz);
794 	} else {
795 		mz = rte_memzone_lookup(mz_name);
796 		CDEV_LOG_DEBUG("SECONDARY:looked up memzone for %s (%p)",
797 				mz_name, mz);
798 	}
799 
800 	if (mz == NULL)
801 		return -ENOMEM;
802 
803 	*data = mz->addr;
804 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
805 		memset(*data, 0, sizeof(struct rte_cryptodev_data));
806 
807 	return 0;
808 }
809 
810 static inline int
811 rte_cryptodev_data_free(uint8_t dev_id, struct rte_cryptodev_data **data)
812 {
813 	char mz_name[RTE_MEMZONE_NAMESIZE];
814 	const struct rte_memzone *mz;
815 	int n;
816 
817 	/* generate memzone name */
818 	n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
819 	if (n >= (int)sizeof(mz_name))
820 		return -EINVAL;
821 
822 	mz = rte_memzone_lookup(mz_name);
823 	if (mz == NULL)
824 		return -ENOMEM;
825 
826 	RTE_ASSERT(*data == mz->addr);
827 	*data = NULL;
828 
829 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
830 		CDEV_LOG_DEBUG("PRIMARY:free memzone of %s (%p)",
831 				mz_name, mz);
832 		return rte_memzone_free(mz);
833 	} else {
834 		CDEV_LOG_DEBUG("SECONDARY:don't free memzone of %s (%p)",
835 				mz_name, mz);
836 	}
837 
838 	return 0;
839 }
840 
841 static uint8_t
842 rte_cryptodev_find_free_device_index(void)
843 {
844 	uint8_t dev_id;
845 
846 	for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++) {
847 		if (rte_crypto_devices[dev_id].attached ==
848 				RTE_CRYPTODEV_DETACHED)
849 			return dev_id;
850 	}
851 	return RTE_CRYPTO_MAX_DEVS;
852 }
853 
854 struct rte_cryptodev *
855 rte_cryptodev_pmd_allocate(const char *name, int socket_id)
856 {
857 	struct rte_cryptodev *cryptodev;
858 	uint8_t dev_id;
859 
860 	if (rte_cryptodev_pmd_get_named_dev(name) != NULL) {
861 		CDEV_LOG_ERR("Crypto device with name %s already "
862 				"allocated!", name);
863 		return NULL;
864 	}
865 
866 	dev_id = rte_cryptodev_find_free_device_index();
867 	if (dev_id == RTE_CRYPTO_MAX_DEVS) {
868 		CDEV_LOG_ERR("Reached maximum number of crypto devices");
869 		return NULL;
870 	}
871 
872 	cryptodev = rte_cryptodev_pmd_get_dev(dev_id);
873 
874 	if (cryptodev->data == NULL) {
875 		struct rte_cryptodev_data **cryptodev_data =
876 				&cryptodev_globals.data[dev_id];
877 
878 		int retval = rte_cryptodev_data_alloc(dev_id, cryptodev_data,
879 				socket_id);
880 
881 		if (retval < 0 || *cryptodev_data == NULL)
882 			return NULL;
883 
884 		cryptodev->data = *cryptodev_data;
885 
886 		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
887 			strlcpy(cryptodev->data->name, name,
888 				RTE_CRYPTODEV_NAME_MAX_LEN);
889 
890 			cryptodev->data->dev_id = dev_id;
891 			cryptodev->data->socket_id = socket_id;
892 			cryptodev->data->dev_started = 0;
893 			CDEV_LOG_DEBUG("PRIMARY:init data");
894 		}
895 
896 		CDEV_LOG_DEBUG("Data for %s: dev_id %d, socket %d, started %d",
897 				cryptodev->data->name,
898 				cryptodev->data->dev_id,
899 				cryptodev->data->socket_id,
900 				cryptodev->data->dev_started);
901 
902 		/* init user callbacks */
903 		TAILQ_INIT(&(cryptodev->link_intr_cbs));
904 
905 		cryptodev->attached = RTE_CRYPTODEV_ATTACHED;
906 
907 		cryptodev_globals.nb_devs++;
908 	}
909 
910 	return cryptodev;
911 }
912 
913 int
914 rte_cryptodev_pmd_release_device(struct rte_cryptodev *cryptodev)
915 {
916 	int ret;
917 	uint8_t dev_id;
918 
919 	if (cryptodev == NULL)
920 		return -EINVAL;
921 
922 	dev_id = cryptodev->data->dev_id;
923 
924 	cryptodev_fp_ops_reset(rte_crypto_fp_ops + dev_id);
925 
926 	/* Close device only if device operations have been set */
927 	if (cryptodev->dev_ops) {
928 		ret = rte_cryptodev_close(dev_id);
929 		if (ret < 0)
930 			return ret;
931 	}
932 
933 	ret = rte_cryptodev_data_free(dev_id, &cryptodev_globals.data[dev_id]);
934 	if (ret < 0)
935 		return ret;
936 
937 	cryptodev->attached = RTE_CRYPTODEV_DETACHED;
938 	cryptodev_globals.nb_devs--;
939 	return 0;
940 }
941 
942 uint16_t
943 rte_cryptodev_queue_pair_count(uint8_t dev_id)
944 {
945 	struct rte_cryptodev *dev;
946 
947 	if (!rte_cryptodev_is_valid_device_data(dev_id)) {
948 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
949 		return 0;
950 	}
951 
952 	dev = &rte_crypto_devices[dev_id];
953 	return dev->data->nb_queue_pairs;
954 }
955 
956 static int
957 rte_cryptodev_queue_pairs_config(struct rte_cryptodev *dev, uint16_t nb_qpairs,
958 		int socket_id)
959 {
960 	struct rte_cryptodev_info dev_info;
961 	void **qp;
962 	unsigned i;
963 
964 	if ((dev == NULL) || (nb_qpairs < 1)) {
965 		CDEV_LOG_ERR("invalid param: dev %p, nb_queues %u",
966 							dev, nb_qpairs);
967 		return -EINVAL;
968 	}
969 
970 	CDEV_LOG_DEBUG("Setup %d queues pairs on device %u",
971 			nb_qpairs, dev->data->dev_id);
972 
973 	memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
974 
975 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
976 	(*dev->dev_ops->dev_infos_get)(dev, &dev_info);
977 
978 	if (nb_qpairs > (dev_info.max_nb_queue_pairs)) {
979 		CDEV_LOG_ERR("Invalid num queue_pairs (%u) for dev %u",
980 				nb_qpairs, dev->data->dev_id);
981 	    return -EINVAL;
982 	}
983 
984 	if (dev->data->queue_pairs == NULL) { /* first time configuration */
985 		dev->data->queue_pairs = rte_zmalloc_socket(
986 				"cryptodev->queue_pairs",
987 				sizeof(dev->data->queue_pairs[0]) *
988 				dev_info.max_nb_queue_pairs,
989 				RTE_CACHE_LINE_SIZE, socket_id);
990 
991 		if (dev->data->queue_pairs == NULL) {
992 			dev->data->nb_queue_pairs = 0;
993 			CDEV_LOG_ERR("failed to get memory for qp meta data, "
994 							"nb_queues %u",
995 							nb_qpairs);
996 			return -(ENOMEM);
997 		}
998 	} else { /* re-configure */
999 		int ret;
1000 		uint16_t old_nb_queues = dev->data->nb_queue_pairs;
1001 
1002 		qp = dev->data->queue_pairs;
1003 
1004 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_release,
1005 				-ENOTSUP);
1006 
1007 		for (i = nb_qpairs; i < old_nb_queues; i++) {
1008 			ret = (*dev->dev_ops->queue_pair_release)(dev, i);
1009 			if (ret < 0)
1010 				return ret;
1011 			qp[i] = NULL;
1012 		}
1013 
1014 	}
1015 	dev->data->nb_queue_pairs = nb_qpairs;
1016 	return 0;
1017 }
1018 
1019 int
1020 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
1021 {
1022 	struct rte_cryptodev *dev;
1023 	int diag;
1024 
1025 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1026 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1027 		return -EINVAL;
1028 	}
1029 
1030 	dev = &rte_crypto_devices[dev_id];
1031 
1032 	if (dev->data->dev_started) {
1033 		CDEV_LOG_ERR(
1034 		    "device %d must be stopped to allow configuration", dev_id);
1035 		return -EBUSY;
1036 	}
1037 
1038 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
1039 
1040 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1041 	cryptodev_cb_cleanup(dev);
1042 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1043 
1044 	/* Setup new number of queue pairs and reconfigure device. */
1045 	diag = rte_cryptodev_queue_pairs_config(dev, config->nb_queue_pairs,
1046 			config->socket_id);
1047 	if (diag != 0) {
1048 		CDEV_LOG_ERR("dev%d rte_crypto_dev_queue_pairs_config = %d",
1049 				dev_id, diag);
1050 		return diag;
1051 	}
1052 
1053 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1054 	diag = cryptodev_cb_init(dev);
1055 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1056 	if (diag) {
1057 		CDEV_LOG_ERR("Callback init failed for dev_id=%d", dev_id);
1058 		return diag;
1059 	}
1060 
1061 	rte_cryptodev_trace_configure(dev_id, config);
1062 	return (*dev->dev_ops->dev_configure)(dev, config);
1063 }
1064 
1065 int
1066 rte_cryptodev_start(uint8_t dev_id)
1067 {
1068 	struct rte_cryptodev *dev;
1069 	int diag;
1070 
1071 	CDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
1072 
1073 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1074 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1075 		return -EINVAL;
1076 	}
1077 
1078 	dev = &rte_crypto_devices[dev_id];
1079 
1080 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1081 
1082 	if (dev->data->dev_started != 0) {
1083 		CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already started",
1084 			dev_id);
1085 		return 0;
1086 	}
1087 
1088 	diag = (*dev->dev_ops->dev_start)(dev);
1089 	/* expose selection of PMD fast-path functions */
1090 	cryptodev_fp_ops_set(rte_crypto_fp_ops + dev_id, dev);
1091 
1092 	rte_cryptodev_trace_start(dev_id, diag);
1093 	if (diag == 0)
1094 		dev->data->dev_started = 1;
1095 	else
1096 		return diag;
1097 
1098 	return 0;
1099 }
1100 
1101 void
1102 rte_cryptodev_stop(uint8_t dev_id)
1103 {
1104 	struct rte_cryptodev *dev;
1105 
1106 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1107 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1108 		return;
1109 	}
1110 
1111 	dev = &rte_crypto_devices[dev_id];
1112 
1113 	RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1114 
1115 	if (dev->data->dev_started == 0) {
1116 		CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already stopped",
1117 			dev_id);
1118 		return;
1119 	}
1120 
1121 	/* point fast-path functions to dummy ones */
1122 	cryptodev_fp_ops_reset(rte_crypto_fp_ops + dev_id);
1123 
1124 	(*dev->dev_ops->dev_stop)(dev);
1125 	rte_cryptodev_trace_stop(dev_id);
1126 	dev->data->dev_started = 0;
1127 }
1128 
1129 int
1130 rte_cryptodev_close(uint8_t dev_id)
1131 {
1132 	struct rte_cryptodev *dev;
1133 	int retval;
1134 
1135 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1136 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1137 		return -1;
1138 	}
1139 
1140 	dev = &rte_crypto_devices[dev_id];
1141 
1142 	/* Device must be stopped before it can be closed */
1143 	if (dev->data->dev_started == 1) {
1144 		CDEV_LOG_ERR("Device %u must be stopped before closing",
1145 				dev_id);
1146 		return -EBUSY;
1147 	}
1148 
1149 	/* We can't close the device if there are outstanding sessions in use */
1150 	if (dev->data->session_pool != NULL) {
1151 		if (!rte_mempool_full(dev->data->session_pool)) {
1152 			CDEV_LOG_ERR("dev_id=%u close failed, session mempool "
1153 					"has sessions still in use, free "
1154 					"all sessions before calling close",
1155 					(unsigned)dev_id);
1156 			return -EBUSY;
1157 		}
1158 	}
1159 
1160 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
1161 	retval = (*dev->dev_ops->dev_close)(dev);
1162 	rte_cryptodev_trace_close(dev_id, retval);
1163 
1164 	if (retval < 0)
1165 		return retval;
1166 
1167 	return 0;
1168 }
1169 
1170 int
1171 rte_cryptodev_get_qp_status(uint8_t dev_id, uint16_t queue_pair_id)
1172 {
1173 	struct rte_cryptodev *dev;
1174 
1175 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1176 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1177 		return -EINVAL;
1178 	}
1179 
1180 	dev = &rte_crypto_devices[dev_id];
1181 	if (queue_pair_id >= dev->data->nb_queue_pairs) {
1182 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
1183 		return -EINVAL;
1184 	}
1185 	void **qps = dev->data->queue_pairs;
1186 
1187 	if (qps[queue_pair_id])	{
1188 		CDEV_LOG_DEBUG("qp %d on dev %d is initialised",
1189 			queue_pair_id, dev_id);
1190 		return 1;
1191 	}
1192 
1193 	CDEV_LOG_DEBUG("qp %d on dev %d is not initialised",
1194 		queue_pair_id, dev_id);
1195 
1196 	return 0;
1197 }
1198 
1199 int
1200 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
1201 		const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
1202 
1203 {
1204 	struct rte_cryptodev *dev;
1205 
1206 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1207 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1208 		return -EINVAL;
1209 	}
1210 
1211 	dev = &rte_crypto_devices[dev_id];
1212 	if (queue_pair_id >= dev->data->nb_queue_pairs) {
1213 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
1214 		return -EINVAL;
1215 	}
1216 
1217 	if (!qp_conf) {
1218 		CDEV_LOG_ERR("qp_conf cannot be NULL\n");
1219 		return -EINVAL;
1220 	}
1221 
1222 	if ((qp_conf->mp_session && !qp_conf->mp_session_private) ||
1223 			(!qp_conf->mp_session && qp_conf->mp_session_private)) {
1224 		CDEV_LOG_ERR("Invalid mempools\n");
1225 		return -EINVAL;
1226 	}
1227 
1228 	if (qp_conf->mp_session) {
1229 		struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1230 		uint32_t obj_size = qp_conf->mp_session->elt_size;
1231 		uint32_t obj_priv_size = qp_conf->mp_session_private->elt_size;
1232 		struct rte_cryptodev_sym_session s = {0};
1233 
1234 		pool_priv = rte_mempool_get_priv(qp_conf->mp_session);
1235 		if (!pool_priv || qp_conf->mp_session->private_data_size <
1236 				sizeof(*pool_priv)) {
1237 			CDEV_LOG_ERR("Invalid mempool\n");
1238 			return -EINVAL;
1239 		}
1240 
1241 		s.nb_drivers = pool_priv->nb_drivers;
1242 		s.user_data_sz = pool_priv->user_data_sz;
1243 
1244 		if ((rte_cryptodev_sym_get_existing_header_session_size(&s) >
1245 			obj_size) || (s.nb_drivers <= dev->driver_id) ||
1246 			rte_cryptodev_sym_get_private_session_size(dev_id) >
1247 				obj_priv_size) {
1248 			CDEV_LOG_ERR("Invalid mempool\n");
1249 			return -EINVAL;
1250 		}
1251 	}
1252 
1253 	if (dev->data->dev_started) {
1254 		CDEV_LOG_ERR(
1255 		    "device %d must be stopped to allow configuration", dev_id);
1256 		return -EBUSY;
1257 	}
1258 
1259 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_setup, -ENOTSUP);
1260 
1261 	rte_cryptodev_trace_queue_pair_setup(dev_id, queue_pair_id, qp_conf);
1262 	return (*dev->dev_ops->queue_pair_setup)(dev, queue_pair_id, qp_conf,
1263 			socket_id);
1264 }
1265 
1266 struct rte_cryptodev_cb *
1267 rte_cryptodev_add_enq_callback(uint8_t dev_id,
1268 			       uint16_t qp_id,
1269 			       rte_cryptodev_callback_fn cb_fn,
1270 			       void *cb_arg)
1271 {
1272 	struct rte_cryptodev *dev;
1273 	struct rte_cryptodev_cb_rcu *list;
1274 	struct rte_cryptodev_cb *cb, *tail;
1275 
1276 	if (!cb_fn) {
1277 		CDEV_LOG_ERR("Callback is NULL on dev_id=%d", dev_id);
1278 		rte_errno = EINVAL;
1279 		return NULL;
1280 	}
1281 
1282 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1283 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1284 		rte_errno = ENODEV;
1285 		return NULL;
1286 	}
1287 
1288 	dev = &rte_crypto_devices[dev_id];
1289 	if (qp_id >= dev->data->nb_queue_pairs) {
1290 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1291 		rte_errno = ENODEV;
1292 		return NULL;
1293 	}
1294 
1295 	cb = rte_zmalloc(NULL, sizeof(*cb), 0);
1296 	if (cb == NULL) {
1297 		CDEV_LOG_ERR("Failed to allocate memory for callback on "
1298 			     "dev=%d, queue_pair_id=%d", dev_id, qp_id);
1299 		rte_errno = ENOMEM;
1300 		return NULL;
1301 	}
1302 
1303 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1304 
1305 	cb->fn = cb_fn;
1306 	cb->arg = cb_arg;
1307 
1308 	/* Add the callbacks in fifo order. */
1309 	list = &dev->enq_cbs[qp_id];
1310 	tail = list->next;
1311 
1312 	if (tail) {
1313 		while (tail->next)
1314 			tail = tail->next;
1315 		/* Stores to cb->fn and cb->param should complete before
1316 		 * cb is visible to data plane.
1317 		 */
1318 		__atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
1319 	} else {
1320 		/* Stores to cb->fn and cb->param should complete before
1321 		 * cb is visible to data plane.
1322 		 */
1323 		__atomic_store_n(&list->next, cb, __ATOMIC_RELEASE);
1324 	}
1325 
1326 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1327 
1328 	return cb;
1329 }
1330 
1331 int
1332 rte_cryptodev_remove_enq_callback(uint8_t dev_id,
1333 				  uint16_t qp_id,
1334 				  struct rte_cryptodev_cb *cb)
1335 {
1336 	struct rte_cryptodev *dev;
1337 	struct rte_cryptodev_cb **prev_cb, *curr_cb;
1338 	struct rte_cryptodev_cb_rcu *list;
1339 	int ret;
1340 
1341 	ret = -EINVAL;
1342 
1343 	if (!cb) {
1344 		CDEV_LOG_ERR("Callback is NULL");
1345 		return -EINVAL;
1346 	}
1347 
1348 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1349 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1350 		return -ENODEV;
1351 	}
1352 
1353 	dev = &rte_crypto_devices[dev_id];
1354 	if (qp_id >= dev->data->nb_queue_pairs) {
1355 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1356 		return -ENODEV;
1357 	}
1358 
1359 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1360 	if (dev->enq_cbs == NULL) {
1361 		CDEV_LOG_ERR("Callback not initialized");
1362 		goto cb_err;
1363 	}
1364 
1365 	list = &dev->enq_cbs[qp_id];
1366 	if (list == NULL) {
1367 		CDEV_LOG_ERR("Callback list is NULL");
1368 		goto cb_err;
1369 	}
1370 
1371 	if (list->qsbr == NULL) {
1372 		CDEV_LOG_ERR("Rcu qsbr is NULL");
1373 		goto cb_err;
1374 	}
1375 
1376 	prev_cb = &list->next;
1377 	for (; *prev_cb != NULL; prev_cb = &curr_cb->next) {
1378 		curr_cb = *prev_cb;
1379 		if (curr_cb == cb) {
1380 			/* Remove the user cb from the callback list. */
1381 			__atomic_store_n(prev_cb, curr_cb->next,
1382 				__ATOMIC_RELAXED);
1383 			ret = 0;
1384 			break;
1385 		}
1386 	}
1387 
1388 	if (!ret) {
1389 		/* Call sync with invalid thread id as this is part of
1390 		 * control plane API
1391 		 */
1392 		rte_rcu_qsbr_synchronize(list->qsbr, RTE_QSBR_THRID_INVALID);
1393 		rte_free(cb);
1394 	}
1395 
1396 cb_err:
1397 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1398 	return ret;
1399 }
1400 
1401 struct rte_cryptodev_cb *
1402 rte_cryptodev_add_deq_callback(uint8_t dev_id,
1403 			       uint16_t qp_id,
1404 			       rte_cryptodev_callback_fn cb_fn,
1405 			       void *cb_arg)
1406 {
1407 	struct rte_cryptodev *dev;
1408 	struct rte_cryptodev_cb_rcu *list;
1409 	struct rte_cryptodev_cb *cb, *tail;
1410 
1411 	if (!cb_fn) {
1412 		CDEV_LOG_ERR("Callback is NULL on dev_id=%d", dev_id);
1413 		rte_errno = EINVAL;
1414 		return NULL;
1415 	}
1416 
1417 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1418 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1419 		rte_errno = ENODEV;
1420 		return NULL;
1421 	}
1422 
1423 	dev = &rte_crypto_devices[dev_id];
1424 	if (qp_id >= dev->data->nb_queue_pairs) {
1425 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1426 		rte_errno = ENODEV;
1427 		return NULL;
1428 	}
1429 
1430 	cb = rte_zmalloc(NULL, sizeof(*cb), 0);
1431 	if (cb == NULL) {
1432 		CDEV_LOG_ERR("Failed to allocate memory for callback on "
1433 			     "dev=%d, queue_pair_id=%d", dev_id, qp_id);
1434 		rte_errno = ENOMEM;
1435 		return NULL;
1436 	}
1437 
1438 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1439 
1440 	cb->fn = cb_fn;
1441 	cb->arg = cb_arg;
1442 
1443 	/* Add the callbacks in fifo order. */
1444 	list = &dev->deq_cbs[qp_id];
1445 	tail = list->next;
1446 
1447 	if (tail) {
1448 		while (tail->next)
1449 			tail = tail->next;
1450 		/* Stores to cb->fn and cb->param should complete before
1451 		 * cb is visible to data plane.
1452 		 */
1453 		__atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
1454 	} else {
1455 		/* Stores to cb->fn and cb->param should complete before
1456 		 * cb is visible to data plane.
1457 		 */
1458 		__atomic_store_n(&list->next, cb, __ATOMIC_RELEASE);
1459 	}
1460 
1461 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1462 
1463 	return cb;
1464 }
1465 
1466 int
1467 rte_cryptodev_remove_deq_callback(uint8_t dev_id,
1468 				  uint16_t qp_id,
1469 				  struct rte_cryptodev_cb *cb)
1470 {
1471 	struct rte_cryptodev *dev;
1472 	struct rte_cryptodev_cb **prev_cb, *curr_cb;
1473 	struct rte_cryptodev_cb_rcu *list;
1474 	int ret;
1475 
1476 	ret = -EINVAL;
1477 
1478 	if (!cb) {
1479 		CDEV_LOG_ERR("Callback is NULL");
1480 		return -EINVAL;
1481 	}
1482 
1483 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1484 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1485 		return -ENODEV;
1486 	}
1487 
1488 	dev = &rte_crypto_devices[dev_id];
1489 	if (qp_id >= dev->data->nb_queue_pairs) {
1490 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1491 		return -ENODEV;
1492 	}
1493 
1494 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1495 	if (dev->enq_cbs == NULL) {
1496 		CDEV_LOG_ERR("Callback not initialized");
1497 		goto cb_err;
1498 	}
1499 
1500 	list = &dev->deq_cbs[qp_id];
1501 	if (list == NULL) {
1502 		CDEV_LOG_ERR("Callback list is NULL");
1503 		goto cb_err;
1504 	}
1505 
1506 	if (list->qsbr == NULL) {
1507 		CDEV_LOG_ERR("Rcu qsbr is NULL");
1508 		goto cb_err;
1509 	}
1510 
1511 	prev_cb = &list->next;
1512 	for (; *prev_cb != NULL; prev_cb = &curr_cb->next) {
1513 		curr_cb = *prev_cb;
1514 		if (curr_cb == cb) {
1515 			/* Remove the user cb from the callback list. */
1516 			__atomic_store_n(prev_cb, curr_cb->next,
1517 				__ATOMIC_RELAXED);
1518 			ret = 0;
1519 			break;
1520 		}
1521 	}
1522 
1523 	if (!ret) {
1524 		/* Call sync with invalid thread id as this is part of
1525 		 * control plane API
1526 		 */
1527 		rte_rcu_qsbr_synchronize(list->qsbr, RTE_QSBR_THRID_INVALID);
1528 		rte_free(cb);
1529 	}
1530 
1531 cb_err:
1532 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1533 	return ret;
1534 }
1535 
1536 int
1537 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats)
1538 {
1539 	struct rte_cryptodev *dev;
1540 
1541 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1542 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1543 		return -ENODEV;
1544 	}
1545 
1546 	if (stats == NULL) {
1547 		CDEV_LOG_ERR("Invalid stats ptr");
1548 		return -EINVAL;
1549 	}
1550 
1551 	dev = &rte_crypto_devices[dev_id];
1552 	memset(stats, 0, sizeof(*stats));
1553 
1554 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1555 	(*dev->dev_ops->stats_get)(dev, stats);
1556 	return 0;
1557 }
1558 
1559 void
1560 rte_cryptodev_stats_reset(uint8_t dev_id)
1561 {
1562 	struct rte_cryptodev *dev;
1563 
1564 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1565 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1566 		return;
1567 	}
1568 
1569 	dev = &rte_crypto_devices[dev_id];
1570 
1571 	RTE_FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
1572 	(*dev->dev_ops->stats_reset)(dev);
1573 }
1574 
1575 void
1576 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
1577 {
1578 	struct rte_cryptodev *dev;
1579 
1580 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1581 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1582 		return;
1583 	}
1584 
1585 	dev = &rte_crypto_devices[dev_id];
1586 
1587 	memset(dev_info, 0, sizeof(struct rte_cryptodev_info));
1588 
1589 	RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1590 	(*dev->dev_ops->dev_infos_get)(dev, dev_info);
1591 
1592 	dev_info->driver_name = dev->device->driver->name;
1593 	dev_info->device = dev->device;
1594 }
1595 
1596 int
1597 rte_cryptodev_callback_register(uint8_t dev_id,
1598 			enum rte_cryptodev_event_type event,
1599 			rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1600 {
1601 	struct rte_cryptodev *dev;
1602 	struct rte_cryptodev_callback *user_cb;
1603 
1604 	if (!cb_fn)
1605 		return -EINVAL;
1606 
1607 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1608 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1609 		return -EINVAL;
1610 	}
1611 
1612 	dev = &rte_crypto_devices[dev_id];
1613 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
1614 
1615 	TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
1616 		if (user_cb->cb_fn == cb_fn &&
1617 			user_cb->cb_arg == cb_arg &&
1618 			user_cb->event == event) {
1619 			break;
1620 		}
1621 	}
1622 
1623 	/* create a new callback. */
1624 	if (user_cb == NULL) {
1625 		user_cb = rte_zmalloc("INTR_USER_CALLBACK",
1626 				sizeof(struct rte_cryptodev_callback), 0);
1627 		if (user_cb != NULL) {
1628 			user_cb->cb_fn = cb_fn;
1629 			user_cb->cb_arg = cb_arg;
1630 			user_cb->event = event;
1631 			TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
1632 		}
1633 	}
1634 
1635 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1636 	return (user_cb == NULL) ? -ENOMEM : 0;
1637 }
1638 
1639 int
1640 rte_cryptodev_callback_unregister(uint8_t dev_id,
1641 			enum rte_cryptodev_event_type event,
1642 			rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1643 {
1644 	int ret;
1645 	struct rte_cryptodev *dev;
1646 	struct rte_cryptodev_callback *cb, *next;
1647 
1648 	if (!cb_fn)
1649 		return -EINVAL;
1650 
1651 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1652 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1653 		return -EINVAL;
1654 	}
1655 
1656 	dev = &rte_crypto_devices[dev_id];
1657 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
1658 
1659 	ret = 0;
1660 	for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
1661 
1662 		next = TAILQ_NEXT(cb, next);
1663 
1664 		if (cb->cb_fn != cb_fn || cb->event != event ||
1665 				(cb->cb_arg != (void *)-1 &&
1666 				cb->cb_arg != cb_arg))
1667 			continue;
1668 
1669 		/*
1670 		 * if this callback is not executing right now,
1671 		 * then remove it.
1672 		 */
1673 		if (cb->active == 0) {
1674 			TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
1675 			rte_free(cb);
1676 		} else {
1677 			ret = -EAGAIN;
1678 		}
1679 	}
1680 
1681 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1682 	return ret;
1683 }
1684 
1685 void
1686 rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev,
1687 	enum rte_cryptodev_event_type event)
1688 {
1689 	struct rte_cryptodev_callback *cb_lst;
1690 	struct rte_cryptodev_callback dev_cb;
1691 
1692 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
1693 	TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
1694 		if (cb_lst->cb_fn == NULL || cb_lst->event != event)
1695 			continue;
1696 		dev_cb = *cb_lst;
1697 		cb_lst->active = 1;
1698 		rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1699 		dev_cb.cb_fn(dev->data->dev_id, dev_cb.event,
1700 						dev_cb.cb_arg);
1701 		rte_spinlock_lock(&rte_cryptodev_cb_lock);
1702 		cb_lst->active = 0;
1703 	}
1704 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1705 }
1706 
1707 int
1708 rte_cryptodev_sym_session_init(uint8_t dev_id,
1709 		struct rte_cryptodev_sym_session *sess,
1710 		struct rte_crypto_sym_xform *xforms,
1711 		struct rte_mempool *mp)
1712 {
1713 	struct rte_cryptodev *dev;
1714 	uint32_t sess_priv_sz = rte_cryptodev_sym_get_private_session_size(
1715 			dev_id);
1716 	uint8_t index;
1717 	int ret;
1718 
1719 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1720 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1721 		return -EINVAL;
1722 	}
1723 
1724 	dev = rte_cryptodev_pmd_get_dev(dev_id);
1725 
1726 	if (sess == NULL || xforms == NULL || dev == NULL || mp == NULL)
1727 		return -EINVAL;
1728 
1729 	if (mp->elt_size < sess_priv_sz)
1730 		return -EINVAL;
1731 
1732 	index = dev->driver_id;
1733 	if (index >= sess->nb_drivers)
1734 		return -EINVAL;
1735 
1736 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_configure, -ENOTSUP);
1737 
1738 	if (sess->sess_data[index].refcnt == 0) {
1739 		ret = dev->dev_ops->sym_session_configure(dev, xforms,
1740 							sess, mp);
1741 		if (ret < 0) {
1742 			CDEV_LOG_ERR(
1743 				"dev_id %d failed to configure session details",
1744 				dev_id);
1745 			return ret;
1746 		}
1747 	}
1748 
1749 	rte_cryptodev_trace_sym_session_init(dev_id, sess, xforms, mp);
1750 	sess->sess_data[index].refcnt++;
1751 	return 0;
1752 }
1753 
1754 int
1755 rte_cryptodev_asym_session_init(uint8_t dev_id,
1756 		struct rte_cryptodev_asym_session *sess,
1757 		struct rte_crypto_asym_xform *xforms,
1758 		struct rte_mempool *mp)
1759 {
1760 	struct rte_cryptodev *dev;
1761 	uint8_t index;
1762 	int ret;
1763 
1764 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1765 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1766 		return -EINVAL;
1767 	}
1768 
1769 	dev = rte_cryptodev_pmd_get_dev(dev_id);
1770 
1771 	if (sess == NULL || xforms == NULL || dev == NULL)
1772 		return -EINVAL;
1773 
1774 	index = dev->driver_id;
1775 
1776 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_configure,
1777 				-ENOTSUP);
1778 
1779 	if (sess->sess_private_data[index] == NULL) {
1780 		ret = dev->dev_ops->asym_session_configure(dev,
1781 							xforms,
1782 							sess, mp);
1783 		if (ret < 0) {
1784 			CDEV_LOG_ERR(
1785 				"dev_id %d failed to configure session details",
1786 				dev_id);
1787 			return ret;
1788 		}
1789 	}
1790 
1791 	rte_cryptodev_trace_asym_session_init(dev_id, sess, xforms, mp);
1792 	return 0;
1793 }
1794 
1795 struct rte_mempool *
1796 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
1797 	uint32_t elt_size, uint32_t cache_size, uint16_t user_data_size,
1798 	int socket_id)
1799 {
1800 	struct rte_mempool *mp;
1801 	struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1802 	uint32_t obj_sz;
1803 
1804 	obj_sz = rte_cryptodev_sym_get_header_session_size() + user_data_size;
1805 	if (obj_sz > elt_size)
1806 		CDEV_LOG_INFO("elt_size %u is expanded to %u\n", elt_size,
1807 				obj_sz);
1808 	else
1809 		obj_sz = elt_size;
1810 
1811 	mp = rte_mempool_create(name, nb_elts, obj_sz, cache_size,
1812 			(uint32_t)(sizeof(*pool_priv)),
1813 			NULL, NULL, NULL, NULL,
1814 			socket_id, 0);
1815 	if (mp == NULL) {
1816 		CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d\n",
1817 			__func__, name, rte_errno);
1818 		return NULL;
1819 	}
1820 
1821 	pool_priv = rte_mempool_get_priv(mp);
1822 	if (!pool_priv) {
1823 		CDEV_LOG_ERR("%s(name=%s) failed to get private data\n",
1824 			__func__, name);
1825 		rte_mempool_free(mp);
1826 		return NULL;
1827 	}
1828 
1829 	pool_priv->nb_drivers = nb_drivers;
1830 	pool_priv->user_data_sz = user_data_size;
1831 
1832 	rte_cryptodev_trace_sym_session_pool_create(name, nb_elts,
1833 		elt_size, cache_size, user_data_size, mp);
1834 	return mp;
1835 }
1836 
1837 static unsigned int
1838 rte_cryptodev_sym_session_data_size(struct rte_cryptodev_sym_session *sess)
1839 {
1840 	return (sizeof(sess->sess_data[0]) * sess->nb_drivers) +
1841 			sess->user_data_sz;
1842 }
1843 
1844 static uint8_t
1845 rte_cryptodev_sym_is_valid_session_pool(struct rte_mempool *mp)
1846 {
1847 	struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1848 
1849 	if (!mp)
1850 		return 0;
1851 
1852 	pool_priv = rte_mempool_get_priv(mp);
1853 
1854 	if (!pool_priv || mp->private_data_size < sizeof(*pool_priv) ||
1855 			pool_priv->nb_drivers != nb_drivers ||
1856 			mp->elt_size <
1857 				rte_cryptodev_sym_get_header_session_size()
1858 				+ pool_priv->user_data_sz)
1859 		return 0;
1860 
1861 	return 1;
1862 }
1863 
1864 struct rte_cryptodev_sym_session *
1865 rte_cryptodev_sym_session_create(struct rte_mempool *mp)
1866 {
1867 	struct rte_cryptodev_sym_session *sess;
1868 	struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1869 
1870 	if (!rte_cryptodev_sym_is_valid_session_pool(mp)) {
1871 		CDEV_LOG_ERR("Invalid mempool\n");
1872 		return NULL;
1873 	}
1874 
1875 	pool_priv = rte_mempool_get_priv(mp);
1876 
1877 	/* Allocate a session structure from the session pool */
1878 	if (rte_mempool_get(mp, (void **)&sess)) {
1879 		CDEV_LOG_ERR("couldn't get object from session mempool");
1880 		return NULL;
1881 	}
1882 
1883 	sess->nb_drivers = pool_priv->nb_drivers;
1884 	sess->user_data_sz = pool_priv->user_data_sz;
1885 	sess->opaque_data = 0;
1886 
1887 	/* Clear device session pointer.
1888 	 * Include the flag indicating presence of user data
1889 	 */
1890 	memset(sess->sess_data, 0,
1891 			rte_cryptodev_sym_session_data_size(sess));
1892 
1893 	rte_cryptodev_trace_sym_session_create(mp, sess);
1894 	return sess;
1895 }
1896 
1897 struct rte_cryptodev_asym_session *
1898 rte_cryptodev_asym_session_create(struct rte_mempool *mp)
1899 {
1900 	struct rte_cryptodev_asym_session *sess;
1901 	unsigned int session_size =
1902 			rte_cryptodev_asym_get_header_session_size();
1903 
1904 	if (!mp) {
1905 		CDEV_LOG_ERR("invalid mempool\n");
1906 		return NULL;
1907 	}
1908 
1909 	/* Verify if provided mempool can hold elements big enough. */
1910 	if (mp->elt_size < session_size) {
1911 		CDEV_LOG_ERR(
1912 			"mempool elements too small to hold session objects");
1913 		return NULL;
1914 	}
1915 
1916 	/* Allocate a session structure from the session pool */
1917 	if (rte_mempool_get(mp, (void **)&sess)) {
1918 		CDEV_LOG_ERR("couldn't get object from session mempool");
1919 		return NULL;
1920 	}
1921 
1922 	/* Clear device session pointer.
1923 	 * Include the flag indicating presence of private data
1924 	 */
1925 	memset(sess, 0, session_size);
1926 
1927 	rte_cryptodev_trace_asym_session_create(mp, sess);
1928 	return sess;
1929 }
1930 
1931 int
1932 rte_cryptodev_sym_session_clear(uint8_t dev_id,
1933 		struct rte_cryptodev_sym_session *sess)
1934 {
1935 	struct rte_cryptodev *dev;
1936 	uint8_t driver_id;
1937 
1938 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1939 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1940 		return -EINVAL;
1941 	}
1942 
1943 	dev = rte_cryptodev_pmd_get_dev(dev_id);
1944 
1945 	if (dev == NULL || sess == NULL)
1946 		return -EINVAL;
1947 
1948 	driver_id = dev->driver_id;
1949 	if (sess->sess_data[driver_id].refcnt == 0)
1950 		return 0;
1951 	if (--sess->sess_data[driver_id].refcnt != 0)
1952 		return -EBUSY;
1953 
1954 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_clear, -ENOTSUP);
1955 
1956 	dev->dev_ops->sym_session_clear(dev, sess);
1957 
1958 	rte_cryptodev_trace_sym_session_clear(dev_id, sess);
1959 	return 0;
1960 }
1961 
1962 int
1963 rte_cryptodev_asym_session_clear(uint8_t dev_id,
1964 		struct rte_cryptodev_asym_session *sess)
1965 {
1966 	struct rte_cryptodev *dev;
1967 
1968 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1969 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1970 		return -EINVAL;
1971 	}
1972 
1973 	dev = rte_cryptodev_pmd_get_dev(dev_id);
1974 
1975 	if (dev == NULL || sess == NULL)
1976 		return -EINVAL;
1977 
1978 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_clear, -ENOTSUP);
1979 
1980 	dev->dev_ops->asym_session_clear(dev, sess);
1981 
1982 	rte_cryptodev_trace_sym_session_clear(dev_id, sess);
1983 	return 0;
1984 }
1985 
1986 int
1987 rte_cryptodev_sym_session_free(struct rte_cryptodev_sym_session *sess)
1988 {
1989 	uint8_t i;
1990 	struct rte_mempool *sess_mp;
1991 
1992 	if (sess == NULL)
1993 		return -EINVAL;
1994 
1995 	/* Check that all device private data has been freed */
1996 	for (i = 0; i < sess->nb_drivers; i++) {
1997 		if (sess->sess_data[i].refcnt != 0)
1998 			return -EBUSY;
1999 	}
2000 
2001 	/* Return session to mempool */
2002 	sess_mp = rte_mempool_from_obj(sess);
2003 	rte_mempool_put(sess_mp, sess);
2004 
2005 	rte_cryptodev_trace_sym_session_free(sess);
2006 	return 0;
2007 }
2008 
2009 int
2010 rte_cryptodev_asym_session_free(struct rte_cryptodev_asym_session *sess)
2011 {
2012 	uint8_t i;
2013 	void *sess_priv;
2014 	struct rte_mempool *sess_mp;
2015 
2016 	if (sess == NULL)
2017 		return -EINVAL;
2018 
2019 	/* Check that all device private data has been freed */
2020 	for (i = 0; i < nb_drivers; i++) {
2021 		sess_priv = get_asym_session_private_data(sess, i);
2022 		if (sess_priv != NULL)
2023 			return -EBUSY;
2024 	}
2025 
2026 	/* Return session to mempool */
2027 	sess_mp = rte_mempool_from_obj(sess);
2028 	rte_mempool_put(sess_mp, sess);
2029 
2030 	rte_cryptodev_trace_asym_session_free(sess);
2031 	return 0;
2032 }
2033 
2034 unsigned int
2035 rte_cryptodev_sym_get_header_session_size(void)
2036 {
2037 	/*
2038 	 * Header contains pointers to the private data of all registered
2039 	 * drivers and all necessary information to ensure safely clear
2040 	 * or free al session.
2041 	 */
2042 	struct rte_cryptodev_sym_session s = {0};
2043 
2044 	s.nb_drivers = nb_drivers;
2045 
2046 	return (unsigned int)(sizeof(s) +
2047 			rte_cryptodev_sym_session_data_size(&s));
2048 }
2049 
2050 unsigned int
2051 rte_cryptodev_sym_get_existing_header_session_size(
2052 		struct rte_cryptodev_sym_session *sess)
2053 {
2054 	if (!sess)
2055 		return 0;
2056 	else
2057 		return (unsigned int)(sizeof(*sess) +
2058 				rte_cryptodev_sym_session_data_size(sess));
2059 }
2060 
2061 unsigned int
2062 rte_cryptodev_asym_get_header_session_size(void)
2063 {
2064 	/*
2065 	 * Header contains pointers to the private data
2066 	 * of all registered drivers, and a flag which
2067 	 * indicates presence of private data
2068 	 */
2069 	return ((sizeof(void *) * nb_drivers) + sizeof(uint8_t));
2070 }
2071 
2072 unsigned int
2073 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
2074 {
2075 	struct rte_cryptodev *dev;
2076 	unsigned int priv_sess_size;
2077 
2078 	if (!rte_cryptodev_is_valid_dev(dev_id))
2079 		return 0;
2080 
2081 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2082 
2083 	if (*dev->dev_ops->sym_session_get_size == NULL)
2084 		return 0;
2085 
2086 	priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev);
2087 
2088 	return priv_sess_size;
2089 }
2090 
2091 unsigned int
2092 rte_cryptodev_asym_get_private_session_size(uint8_t dev_id)
2093 {
2094 	struct rte_cryptodev *dev;
2095 	unsigned int header_size = sizeof(void *) * nb_drivers;
2096 	unsigned int priv_sess_size;
2097 
2098 	if (!rte_cryptodev_is_valid_dev(dev_id))
2099 		return 0;
2100 
2101 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2102 
2103 	if (*dev->dev_ops->asym_session_get_size == NULL)
2104 		return 0;
2105 
2106 	priv_sess_size = (*dev->dev_ops->asym_session_get_size)(dev);
2107 	if (priv_sess_size < header_size)
2108 		return header_size;
2109 
2110 	return priv_sess_size;
2111 
2112 }
2113 
2114 int
2115 rte_cryptodev_sym_session_set_user_data(
2116 					struct rte_cryptodev_sym_session *sess,
2117 					void *data,
2118 					uint16_t size)
2119 {
2120 	if (sess == NULL)
2121 		return -EINVAL;
2122 
2123 	if (sess->user_data_sz < size)
2124 		return -ENOMEM;
2125 
2126 	rte_memcpy(sess->sess_data + sess->nb_drivers, data, size);
2127 	return 0;
2128 }
2129 
2130 void *
2131 rte_cryptodev_sym_session_get_user_data(
2132 					struct rte_cryptodev_sym_session *sess)
2133 {
2134 	if (sess == NULL || sess->user_data_sz == 0)
2135 		return NULL;
2136 
2137 	return (void *)(sess->sess_data + sess->nb_drivers);
2138 }
2139 
2140 static inline void
2141 sym_crypto_fill_status(struct rte_crypto_sym_vec *vec, int32_t errnum)
2142 {
2143 	uint32_t i;
2144 	for (i = 0; i < vec->num; i++)
2145 		vec->status[i] = errnum;
2146 }
2147 
2148 uint32_t
2149 rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id,
2150 	struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs ofs,
2151 	struct rte_crypto_sym_vec *vec)
2152 {
2153 	struct rte_cryptodev *dev;
2154 
2155 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
2156 		sym_crypto_fill_status(vec, EINVAL);
2157 		return 0;
2158 	}
2159 
2160 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2161 
2162 	if (*dev->dev_ops->sym_cpu_process == NULL ||
2163 		!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO)) {
2164 		sym_crypto_fill_status(vec, ENOTSUP);
2165 		return 0;
2166 	}
2167 
2168 	return dev->dev_ops->sym_cpu_process(dev, sess, ofs, vec);
2169 }
2170 
2171 int
2172 rte_cryptodev_get_raw_dp_ctx_size(uint8_t dev_id)
2173 {
2174 	struct rte_cryptodev *dev;
2175 	int32_t size = sizeof(struct rte_crypto_raw_dp_ctx);
2176 	int32_t priv_size;
2177 
2178 	if (!rte_cryptodev_is_valid_dev(dev_id))
2179 		return -EINVAL;
2180 
2181 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2182 
2183 	if (*dev->dev_ops->sym_get_raw_dp_ctx_size == NULL ||
2184 		!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP)) {
2185 		return -ENOTSUP;
2186 	}
2187 
2188 	priv_size = (*dev->dev_ops->sym_get_raw_dp_ctx_size)(dev);
2189 	if (priv_size < 0)
2190 		return -ENOTSUP;
2191 
2192 	return RTE_ALIGN_CEIL((size + priv_size), 8);
2193 }
2194 
2195 int
2196 rte_cryptodev_configure_raw_dp_ctx(uint8_t dev_id, uint16_t qp_id,
2197 	struct rte_crypto_raw_dp_ctx *ctx,
2198 	enum rte_crypto_op_sess_type sess_type,
2199 	union rte_cryptodev_session_ctx session_ctx,
2200 	uint8_t is_update)
2201 {
2202 	struct rte_cryptodev *dev;
2203 
2204 	if (!rte_cryptodev_get_qp_status(dev_id, qp_id))
2205 		return -EINVAL;
2206 
2207 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2208 	if (!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP)
2209 			|| dev->dev_ops->sym_configure_raw_dp_ctx == NULL)
2210 		return -ENOTSUP;
2211 
2212 	return (*dev->dev_ops->sym_configure_raw_dp_ctx)(dev, qp_id, ctx,
2213 			sess_type, session_ctx, is_update);
2214 }
2215 
2216 uint32_t
2217 rte_cryptodev_raw_enqueue_burst(struct rte_crypto_raw_dp_ctx *ctx,
2218 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
2219 	void **user_data, int *enqueue_status)
2220 {
2221 	return (*ctx->enqueue_burst)(ctx->qp_data, ctx->drv_ctx_data, vec,
2222 			ofs, user_data, enqueue_status);
2223 }
2224 
2225 int
2226 rte_cryptodev_raw_enqueue_done(struct rte_crypto_raw_dp_ctx *ctx,
2227 		uint32_t n)
2228 {
2229 	return (*ctx->enqueue_done)(ctx->qp_data, ctx->drv_ctx_data, n);
2230 }
2231 
2232 uint32_t
2233 rte_cryptodev_raw_dequeue_burst(struct rte_crypto_raw_dp_ctx *ctx,
2234 	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
2235 	uint32_t max_nb_to_dequeue,
2236 	rte_cryptodev_raw_post_dequeue_t post_dequeue,
2237 	void **out_user_data, uint8_t is_user_data_array,
2238 	uint32_t *n_success_jobs, int *status)
2239 {
2240 	return (*ctx->dequeue_burst)(ctx->qp_data, ctx->drv_ctx_data,
2241 		get_dequeue_count, max_nb_to_dequeue, post_dequeue,
2242 		out_user_data, is_user_data_array, n_success_jobs, status);
2243 }
2244 
2245 int
2246 rte_cryptodev_raw_dequeue_done(struct rte_crypto_raw_dp_ctx *ctx,
2247 		uint32_t n)
2248 {
2249 	return (*ctx->dequeue_done)(ctx->qp_data, ctx->drv_ctx_data, n);
2250 }
2251 
2252 /** Initialise rte_crypto_op mempool element */
2253 static void
2254 rte_crypto_op_init(struct rte_mempool *mempool,
2255 		void *opaque_arg,
2256 		void *_op_data,
2257 		__rte_unused unsigned i)
2258 {
2259 	struct rte_crypto_op *op = _op_data;
2260 	enum rte_crypto_op_type type = *(enum rte_crypto_op_type *)opaque_arg;
2261 
2262 	memset(_op_data, 0, mempool->elt_size);
2263 
2264 	__rte_crypto_op_reset(op, type);
2265 
2266 	op->phys_addr = rte_mem_virt2iova(_op_data);
2267 	op->mempool = mempool;
2268 }
2269 
2270 
2271 struct rte_mempool *
2272 rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
2273 		unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
2274 		int socket_id)
2275 {
2276 	struct rte_crypto_op_pool_private *priv;
2277 
2278 	unsigned elt_size = sizeof(struct rte_crypto_op) +
2279 			priv_size;
2280 
2281 	if (type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
2282 		elt_size += sizeof(struct rte_crypto_sym_op);
2283 	} else if (type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
2284 		elt_size += sizeof(struct rte_crypto_asym_op);
2285 	} else if (type == RTE_CRYPTO_OP_TYPE_UNDEFINED) {
2286 		elt_size += RTE_MAX(sizeof(struct rte_crypto_sym_op),
2287 		                    sizeof(struct rte_crypto_asym_op));
2288 	} else {
2289 		CDEV_LOG_ERR("Invalid op_type\n");
2290 		return NULL;
2291 	}
2292 
2293 	/* lookup mempool in case already allocated */
2294 	struct rte_mempool *mp = rte_mempool_lookup(name);
2295 
2296 	if (mp != NULL) {
2297 		priv = (struct rte_crypto_op_pool_private *)
2298 				rte_mempool_get_priv(mp);
2299 
2300 		if (mp->elt_size != elt_size ||
2301 				mp->cache_size < cache_size ||
2302 				mp->size < nb_elts ||
2303 				priv->priv_size <  priv_size) {
2304 			mp = NULL;
2305 			CDEV_LOG_ERR("Mempool %s already exists but with "
2306 					"incompatible parameters", name);
2307 			return NULL;
2308 		}
2309 		return mp;
2310 	}
2311 
2312 	mp = rte_mempool_create(
2313 			name,
2314 			nb_elts,
2315 			elt_size,
2316 			cache_size,
2317 			sizeof(struct rte_crypto_op_pool_private),
2318 			NULL,
2319 			NULL,
2320 			rte_crypto_op_init,
2321 			&type,
2322 			socket_id,
2323 			0);
2324 
2325 	if (mp == NULL) {
2326 		CDEV_LOG_ERR("Failed to create mempool %s", name);
2327 		return NULL;
2328 	}
2329 
2330 	priv = (struct rte_crypto_op_pool_private *)
2331 			rte_mempool_get_priv(mp);
2332 
2333 	priv->priv_size = priv_size;
2334 	priv->type = type;
2335 
2336 	return mp;
2337 }
2338 
2339 int
2340 rte_cryptodev_pmd_create_dev_name(char *name, const char *dev_name_prefix)
2341 {
2342 	struct rte_cryptodev *dev = NULL;
2343 	uint32_t i = 0;
2344 
2345 	if (name == NULL)
2346 		return -EINVAL;
2347 
2348 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
2349 		int ret = snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
2350 				"%s_%u", dev_name_prefix, i);
2351 
2352 		if (ret < 0)
2353 			return ret;
2354 
2355 		dev = rte_cryptodev_pmd_get_named_dev(name);
2356 		if (!dev)
2357 			return 0;
2358 	}
2359 
2360 	return -1;
2361 }
2362 
2363 TAILQ_HEAD(cryptodev_driver_list, cryptodev_driver);
2364 
2365 static struct cryptodev_driver_list cryptodev_driver_list =
2366 	TAILQ_HEAD_INITIALIZER(cryptodev_driver_list);
2367 
2368 int
2369 rte_cryptodev_driver_id_get(const char *name)
2370 {
2371 	struct cryptodev_driver *driver;
2372 	const char *driver_name;
2373 
2374 	if (name == NULL) {
2375 		RTE_LOG(DEBUG, CRYPTODEV, "name pointer NULL");
2376 		return -1;
2377 	}
2378 
2379 	TAILQ_FOREACH(driver, &cryptodev_driver_list, next) {
2380 		driver_name = driver->driver->name;
2381 		if (strncmp(driver_name, name, strlen(driver_name) + 1) == 0)
2382 			return driver->id;
2383 	}
2384 	return -1;
2385 }
2386 
2387 const char *
2388 rte_cryptodev_name_get(uint8_t dev_id)
2389 {
2390 	struct rte_cryptodev *dev;
2391 
2392 	if (!rte_cryptodev_is_valid_device_data(dev_id)) {
2393 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
2394 		return NULL;
2395 	}
2396 
2397 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2398 	if (dev == NULL)
2399 		return NULL;
2400 
2401 	return dev->data->name;
2402 }
2403 
2404 const char *
2405 rte_cryptodev_driver_name_get(uint8_t driver_id)
2406 {
2407 	struct cryptodev_driver *driver;
2408 
2409 	TAILQ_FOREACH(driver, &cryptodev_driver_list, next)
2410 		if (driver->id == driver_id)
2411 			return driver->driver->name;
2412 	return NULL;
2413 }
2414 
2415 uint8_t
2416 rte_cryptodev_allocate_driver(struct cryptodev_driver *crypto_drv,
2417 		const struct rte_driver *drv)
2418 {
2419 	crypto_drv->driver = drv;
2420 	crypto_drv->id = nb_drivers;
2421 
2422 	TAILQ_INSERT_TAIL(&cryptodev_driver_list, crypto_drv, next);
2423 
2424 	return nb_drivers++;
2425 }
2426 
2427 RTE_INIT(cryptodev_init_fp_ops)
2428 {
2429 	uint32_t i;
2430 
2431 	for (i = 0; i != RTE_DIM(rte_crypto_fp_ops); i++)
2432 		cryptodev_fp_ops_reset(rte_crypto_fp_ops + i);
2433 }
2434 
2435 static int
2436 cryptodev_handle_dev_list(const char *cmd __rte_unused,
2437 		const char *params __rte_unused,
2438 		struct rte_tel_data *d)
2439 {
2440 	int dev_id;
2441 
2442 	if (rte_cryptodev_count() < 1)
2443 		return -EINVAL;
2444 
2445 	rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
2446 	for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++)
2447 		if (rte_cryptodev_is_valid_dev(dev_id))
2448 			rte_tel_data_add_array_int(d, dev_id);
2449 
2450 	return 0;
2451 }
2452 
2453 static int
2454 cryptodev_handle_dev_info(const char *cmd __rte_unused,
2455 		const char *params, struct rte_tel_data *d)
2456 {
2457 	struct rte_cryptodev_info cryptodev_info;
2458 	int dev_id;
2459 	char *end_param;
2460 
2461 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
2462 		return -EINVAL;
2463 
2464 	dev_id = strtoul(params, &end_param, 0);
2465 	if (*end_param != '\0')
2466 		CDEV_LOG_ERR("Extra parameters passed to command, ignoring");
2467 	if (!rte_cryptodev_is_valid_dev(dev_id))
2468 		return -EINVAL;
2469 
2470 	rte_cryptodev_info_get(dev_id, &cryptodev_info);
2471 
2472 	rte_tel_data_start_dict(d);
2473 	rte_tel_data_add_dict_string(d, "device_name",
2474 		cryptodev_info.device->name);
2475 	rte_tel_data_add_dict_int(d, "max_nb_queue_pairs",
2476 		cryptodev_info.max_nb_queue_pairs);
2477 
2478 	return 0;
2479 }
2480 
2481 #define ADD_DICT_STAT(s) rte_tel_data_add_dict_u64(d, #s, cryptodev_stats.s)
2482 
2483 static int
2484 cryptodev_handle_dev_stats(const char *cmd __rte_unused,
2485 		const char *params,
2486 		struct rte_tel_data *d)
2487 {
2488 	struct rte_cryptodev_stats cryptodev_stats;
2489 	int dev_id, ret;
2490 	char *end_param;
2491 
2492 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
2493 		return -EINVAL;
2494 
2495 	dev_id = strtoul(params, &end_param, 0);
2496 	if (*end_param != '\0')
2497 		CDEV_LOG_ERR("Extra parameters passed to command, ignoring");
2498 	if (!rte_cryptodev_is_valid_dev(dev_id))
2499 		return -EINVAL;
2500 
2501 	ret = rte_cryptodev_stats_get(dev_id, &cryptodev_stats);
2502 	if (ret < 0)
2503 		return ret;
2504 
2505 	rte_tel_data_start_dict(d);
2506 	ADD_DICT_STAT(enqueued_count);
2507 	ADD_DICT_STAT(dequeued_count);
2508 	ADD_DICT_STAT(enqueue_err_count);
2509 	ADD_DICT_STAT(dequeue_err_count);
2510 
2511 	return 0;
2512 }
2513 
2514 #define CRYPTO_CAPS_SZ                                             \
2515 	(RTE_ALIGN_CEIL(sizeof(struct rte_cryptodev_capabilities), \
2516 					sizeof(uint64_t)) /        \
2517 	 sizeof(uint64_t))
2518 
2519 static int
2520 crypto_caps_array(struct rte_tel_data *d,
2521 		  const struct rte_cryptodev_capabilities *capabilities)
2522 {
2523 	const struct rte_cryptodev_capabilities *dev_caps;
2524 	uint64_t caps_val[CRYPTO_CAPS_SZ];
2525 	unsigned int i = 0, j;
2526 
2527 	rte_tel_data_start_array(d, RTE_TEL_U64_VAL);
2528 
2529 	while ((dev_caps = &capabilities[i++])->op !=
2530 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
2531 		memset(&caps_val, 0, CRYPTO_CAPS_SZ * sizeof(caps_val[0]));
2532 		rte_memcpy(caps_val, dev_caps, sizeof(capabilities[0]));
2533 		for (j = 0; j < CRYPTO_CAPS_SZ; j++)
2534 			rte_tel_data_add_array_u64(d, caps_val[j]);
2535 	}
2536 
2537 	return i;
2538 }
2539 
2540 static int
2541 cryptodev_handle_dev_caps(const char *cmd __rte_unused, const char *params,
2542 			  struct rte_tel_data *d)
2543 {
2544 	struct rte_cryptodev_info dev_info;
2545 	struct rte_tel_data *crypto_caps;
2546 	int crypto_caps_n;
2547 	char *end_param;
2548 	int dev_id;
2549 
2550 	if (!params || strlen(params) == 0 || !isdigit(*params))
2551 		return -EINVAL;
2552 
2553 	dev_id = strtoul(params, &end_param, 0);
2554 	if (*end_param != '\0')
2555 		CDEV_LOG_ERR("Extra parameters passed to command, ignoring");
2556 	if (!rte_cryptodev_is_valid_dev(dev_id))
2557 		return -EINVAL;
2558 
2559 	rte_tel_data_start_dict(d);
2560 	crypto_caps = rte_tel_data_alloc();
2561 	if (!crypto_caps)
2562 		return -ENOMEM;
2563 
2564 	rte_cryptodev_info_get(dev_id, &dev_info);
2565 	crypto_caps_n = crypto_caps_array(crypto_caps, dev_info.capabilities);
2566 	rte_tel_data_add_dict_container(d, "crypto_caps", crypto_caps, 0);
2567 	rte_tel_data_add_dict_int(d, "crypto_caps_n", crypto_caps_n);
2568 
2569 	return 0;
2570 }
2571 
2572 RTE_INIT(cryptodev_init_telemetry)
2573 {
2574 	rte_telemetry_register_cmd("/cryptodev/info", cryptodev_handle_dev_info,
2575 			"Returns information for a cryptodev. Parameters: int dev_id");
2576 	rte_telemetry_register_cmd("/cryptodev/list",
2577 			cryptodev_handle_dev_list,
2578 			"Returns list of available crypto devices by IDs. No parameters.");
2579 	rte_telemetry_register_cmd("/cryptodev/stats",
2580 			cryptodev_handle_dev_stats,
2581 			"Returns the stats for a cryptodev. Parameters: int dev_id");
2582 	rte_telemetry_register_cmd("/cryptodev/caps",
2583 			cryptodev_handle_dev_caps,
2584 			"Returns the capabilities for a cryptodev. Parameters: int dev_id");
2585 }
2586