xref: /dpdk/lib/cryptodev/rte_cryptodev.c (revision 8f1d23ece06adff5eae9f1b4365bdbbd3abee2b2)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020 Intel Corporation
3  */
4 
5 #include <sys/queue.h>
6 #include <ctype.h>
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <string.h>
10 #include <errno.h>
11 #include <stdint.h>
12 #include <inttypes.h>
13 
14 #include <rte_log.h>
15 #include <rte_debug.h>
16 #include <rte_dev.h>
17 #include <rte_memory.h>
18 #include <rte_memcpy.h>
19 #include <rte_memzone.h>
20 #include <rte_eal.h>
21 #include <rte_common.h>
22 #include <rte_mempool.h>
23 #include <rte_malloc.h>
24 #include <rte_errno.h>
25 #include <rte_spinlock.h>
26 #include <rte_string_fns.h>
27 #include <rte_telemetry.h>
28 
29 #include "rte_crypto.h"
30 #include "rte_cryptodev.h"
31 #include "cryptodev_pmd.h"
32 #include "rte_cryptodev_trace.h"
33 
34 static uint8_t nb_drivers;
35 
36 static struct rte_cryptodev rte_crypto_devices[RTE_CRYPTO_MAX_DEVS];
37 
38 struct rte_cryptodev *rte_cryptodevs = rte_crypto_devices;
39 
40 static struct rte_cryptodev_global cryptodev_globals = {
41 		.devs			= rte_crypto_devices,
42 		.data			= { NULL },
43 		.nb_devs		= 0
44 };
45 
46 /* Public fastpath APIs. */
47 struct rte_crypto_fp_ops rte_crypto_fp_ops[RTE_CRYPTO_MAX_DEVS];
48 
49 /* spinlock for crypto device callbacks */
50 static rte_spinlock_t rte_cryptodev_cb_lock = RTE_SPINLOCK_INITIALIZER;
51 
52 /**
53  * The user application callback description.
54  *
55  * It contains callback address to be registered by user application,
56  * the pointer to the parameters for callback, and the event type.
57  */
58 struct rte_cryptodev_callback {
59 	TAILQ_ENTRY(rte_cryptodev_callback) next; /**< Callbacks list */
60 	rte_cryptodev_cb_fn cb_fn;		/**< Callback address */
61 	void *cb_arg;				/**< Parameter for callback */
62 	enum rte_cryptodev_event_type event;	/**< Interrupt event type */
63 	uint32_t active;			/**< Callback is executing */
64 };
65 
66 /**
67  * The crypto cipher algorithm strings identifiers.
68  * It could be used in application command line.
69  */
70 const char *
71 rte_crypto_cipher_algorithm_strings[] = {
72 	[RTE_CRYPTO_CIPHER_3DES_CBC]	= "3des-cbc",
73 	[RTE_CRYPTO_CIPHER_3DES_ECB]	= "3des-ecb",
74 	[RTE_CRYPTO_CIPHER_3DES_CTR]	= "3des-ctr",
75 
76 	[RTE_CRYPTO_CIPHER_AES_CBC]	= "aes-cbc",
77 	[RTE_CRYPTO_CIPHER_AES_CTR]	= "aes-ctr",
78 	[RTE_CRYPTO_CIPHER_AES_DOCSISBPI]	= "aes-docsisbpi",
79 	[RTE_CRYPTO_CIPHER_AES_ECB]	= "aes-ecb",
80 	[RTE_CRYPTO_CIPHER_AES_F8]	= "aes-f8",
81 	[RTE_CRYPTO_CIPHER_AES_XTS]	= "aes-xts",
82 
83 	[RTE_CRYPTO_CIPHER_ARC4]	= "arc4",
84 
85 	[RTE_CRYPTO_CIPHER_DES_CBC]     = "des-cbc",
86 	[RTE_CRYPTO_CIPHER_DES_DOCSISBPI]	= "des-docsisbpi",
87 
88 	[RTE_CRYPTO_CIPHER_NULL]	= "null",
89 
90 	[RTE_CRYPTO_CIPHER_KASUMI_F8]	= "kasumi-f8",
91 	[RTE_CRYPTO_CIPHER_SNOW3G_UEA2]	= "snow3g-uea2",
92 	[RTE_CRYPTO_CIPHER_ZUC_EEA3]	= "zuc-eea3"
93 };
94 
95 /**
96  * The crypto cipher operation strings identifiers.
97  * It could be used in application command line.
98  */
99 const char *
100 rte_crypto_cipher_operation_strings[] = {
101 		[RTE_CRYPTO_CIPHER_OP_ENCRYPT]	= "encrypt",
102 		[RTE_CRYPTO_CIPHER_OP_DECRYPT]	= "decrypt"
103 };
104 
105 /**
106  * The crypto auth algorithm strings identifiers.
107  * It could be used in application command line.
108  */
109 const char *
110 rte_crypto_auth_algorithm_strings[] = {
111 	[RTE_CRYPTO_AUTH_AES_CBC_MAC]	= "aes-cbc-mac",
112 	[RTE_CRYPTO_AUTH_AES_CMAC]	= "aes-cmac",
113 	[RTE_CRYPTO_AUTH_AES_GMAC]	= "aes-gmac",
114 	[RTE_CRYPTO_AUTH_AES_XCBC_MAC]	= "aes-xcbc-mac",
115 
116 	[RTE_CRYPTO_AUTH_MD5]		= "md5",
117 	[RTE_CRYPTO_AUTH_MD5_HMAC]	= "md5-hmac",
118 
119 	[RTE_CRYPTO_AUTH_NULL]		= "null",
120 
121 	[RTE_CRYPTO_AUTH_SHA1]		= "sha1",
122 	[RTE_CRYPTO_AUTH_SHA1_HMAC]	= "sha1-hmac",
123 
124 	[RTE_CRYPTO_AUTH_SHA224]	= "sha2-224",
125 	[RTE_CRYPTO_AUTH_SHA224_HMAC]	= "sha2-224-hmac",
126 	[RTE_CRYPTO_AUTH_SHA256]	= "sha2-256",
127 	[RTE_CRYPTO_AUTH_SHA256_HMAC]	= "sha2-256-hmac",
128 	[RTE_CRYPTO_AUTH_SHA384]	= "sha2-384",
129 	[RTE_CRYPTO_AUTH_SHA384_HMAC]	= "sha2-384-hmac",
130 	[RTE_CRYPTO_AUTH_SHA512]	= "sha2-512",
131 	[RTE_CRYPTO_AUTH_SHA512_HMAC]	= "sha2-512-hmac",
132 
133 	[RTE_CRYPTO_AUTH_KASUMI_F9]	= "kasumi-f9",
134 	[RTE_CRYPTO_AUTH_SNOW3G_UIA2]	= "snow3g-uia2",
135 	[RTE_CRYPTO_AUTH_ZUC_EIA3]	= "zuc-eia3"
136 };
137 
138 /**
139  * The crypto AEAD algorithm strings identifiers.
140  * It could be used in application command line.
141  */
142 const char *
143 rte_crypto_aead_algorithm_strings[] = {
144 	[RTE_CRYPTO_AEAD_AES_CCM]	= "aes-ccm",
145 	[RTE_CRYPTO_AEAD_AES_GCM]	= "aes-gcm",
146 	[RTE_CRYPTO_AEAD_CHACHA20_POLY1305] = "chacha20-poly1305"
147 };
148 
149 /**
150  * The crypto AEAD operation strings identifiers.
151  * It could be used in application command line.
152  */
153 const char *
154 rte_crypto_aead_operation_strings[] = {
155 	[RTE_CRYPTO_AEAD_OP_ENCRYPT]	= "encrypt",
156 	[RTE_CRYPTO_AEAD_OP_DECRYPT]	= "decrypt"
157 };
158 
159 /**
160  * Asymmetric crypto transform operation strings identifiers.
161  */
162 const char *rte_crypto_asym_xform_strings[] = {
163 	[RTE_CRYPTO_ASYM_XFORM_NONE]	= "none",
164 	[RTE_CRYPTO_ASYM_XFORM_RSA]	= "rsa",
165 	[RTE_CRYPTO_ASYM_XFORM_MODEX]	= "modexp",
166 	[RTE_CRYPTO_ASYM_XFORM_MODINV]	= "modinv",
167 	[RTE_CRYPTO_ASYM_XFORM_DH]	= "dh",
168 	[RTE_CRYPTO_ASYM_XFORM_DSA]	= "dsa",
169 	[RTE_CRYPTO_ASYM_XFORM_ECDSA]	= "ecdsa",
170 	[RTE_CRYPTO_ASYM_XFORM_ECPM]	= "ecpm",
171 };
172 
173 /**
174  * Asymmetric crypto operation strings identifiers.
175  */
176 const char *rte_crypto_asym_op_strings[] = {
177 	[RTE_CRYPTO_ASYM_OP_ENCRYPT]	= "encrypt",
178 	[RTE_CRYPTO_ASYM_OP_DECRYPT]	= "decrypt",
179 	[RTE_CRYPTO_ASYM_OP_SIGN]	= "sign",
180 	[RTE_CRYPTO_ASYM_OP_VERIFY]	= "verify"
181 };
182 
183 /**
184  * Asymmetric crypto key exchange operation strings identifiers.
185  */
186 const char *rte_crypto_asym_ke_strings[] = {
187 	[RTE_CRYPTO_ASYM_KE_PRIV_KEY_GENERATE] = "priv_key_generate",
188 	[RTE_CRYPTO_ASYM_KE_PUB_KEY_GENERATE] = "pub_key_generate",
189 	[RTE_CRYPTO_ASYM_KE_SHARED_SECRET_COMPUTE] = "sharedsecret_compute",
190 	[RTE_CRYPTO_ASYM_KE_PUB_KEY_VERIFY] = "pub_ec_key_verify"
191 };
192 
193 /**
194  * The private data structure stored in the sym session mempool private data.
195  */
196 struct rte_cryptodev_sym_session_pool_private_data {
197 	uint16_t nb_drivers;
198 	/**< number of elements in sess_data array */
199 	uint16_t user_data_sz;
200 	/**< session user data will be placed after sess_data */
201 };
202 
203 /**
204  * The private data structure stored in the asym session mempool private data.
205  */
206 struct rte_cryptodev_asym_session_pool_private_data {
207 	uint16_t max_priv_session_sz;
208 	/**< Size of private session data used when creating mempool */
209 	uint16_t user_data_sz;
210 	/**< Session user data will be placed after sess_private_data */
211 };
212 
213 int
214 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum,
215 		const char *algo_string)
216 {
217 	unsigned int i;
218 
219 	for (i = 1; i < RTE_DIM(rte_crypto_cipher_algorithm_strings); i++) {
220 		if (strcmp(algo_string, rte_crypto_cipher_algorithm_strings[i]) == 0) {
221 			*algo_enum = (enum rte_crypto_cipher_algorithm) i;
222 			return 0;
223 		}
224 	}
225 
226 	/* Invalid string */
227 	return -1;
228 }
229 
230 int
231 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum,
232 		const char *algo_string)
233 {
234 	unsigned int i;
235 
236 	for (i = 1; i < RTE_DIM(rte_crypto_auth_algorithm_strings); i++) {
237 		if (strcmp(algo_string, rte_crypto_auth_algorithm_strings[i]) == 0) {
238 			*algo_enum = (enum rte_crypto_auth_algorithm) i;
239 			return 0;
240 		}
241 	}
242 
243 	/* Invalid string */
244 	return -1;
245 }
246 
247 int
248 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum,
249 		const char *algo_string)
250 {
251 	unsigned int i;
252 
253 	for (i = 1; i < RTE_DIM(rte_crypto_aead_algorithm_strings); i++) {
254 		if (strcmp(algo_string, rte_crypto_aead_algorithm_strings[i]) == 0) {
255 			*algo_enum = (enum rte_crypto_aead_algorithm) i;
256 			return 0;
257 		}
258 	}
259 
260 	/* Invalid string */
261 	return -1;
262 }
263 
264 int
265 rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum,
266 		const char *xform_string)
267 {
268 	unsigned int i;
269 
270 	for (i = 1; i < RTE_DIM(rte_crypto_asym_xform_strings); i++) {
271 		if (strcmp(xform_string,
272 			rte_crypto_asym_xform_strings[i]) == 0) {
273 			*xform_enum = (enum rte_crypto_asym_xform_type) i;
274 			return 0;
275 		}
276 	}
277 
278 	/* Invalid string */
279 	return -1;
280 }
281 
282 /**
283  * The crypto auth operation strings identifiers.
284  * It could be used in application command line.
285  */
286 const char *
287 rte_crypto_auth_operation_strings[] = {
288 		[RTE_CRYPTO_AUTH_OP_VERIFY]	= "verify",
289 		[RTE_CRYPTO_AUTH_OP_GENERATE]	= "generate"
290 };
291 
292 const struct rte_cryptodev_symmetric_capability *
293 rte_cryptodev_sym_capability_get(uint8_t dev_id,
294 		const struct rte_cryptodev_sym_capability_idx *idx)
295 {
296 	const struct rte_cryptodev_capabilities *capability;
297 	struct rte_cryptodev_info dev_info;
298 	int i = 0;
299 
300 	rte_cryptodev_info_get(dev_id, &dev_info);
301 
302 	while ((capability = &dev_info.capabilities[i++])->op !=
303 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
304 		if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
305 			continue;
306 
307 		if (capability->sym.xform_type != idx->type)
308 			continue;
309 
310 		if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
311 			capability->sym.auth.algo == idx->algo.auth)
312 			return &capability->sym;
313 
314 		if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
315 			capability->sym.cipher.algo == idx->algo.cipher)
316 			return &capability->sym;
317 
318 		if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
319 				capability->sym.aead.algo == idx->algo.aead)
320 			return &capability->sym;
321 	}
322 
323 	return NULL;
324 }
325 
326 static int
327 param_range_check(uint16_t size, const struct rte_crypto_param_range *range)
328 {
329 	unsigned int next_size;
330 
331 	/* Check lower/upper bounds */
332 	if (size < range->min)
333 		return -1;
334 
335 	if (size > range->max)
336 		return -1;
337 
338 	/* If range is actually only one value, size is correct */
339 	if (range->increment == 0)
340 		return 0;
341 
342 	/* Check if value is one of the supported sizes */
343 	for (next_size = range->min; next_size <= range->max;
344 			next_size += range->increment)
345 		if (size == next_size)
346 			return 0;
347 
348 	return -1;
349 }
350 
351 const struct rte_cryptodev_asymmetric_xform_capability *
352 rte_cryptodev_asym_capability_get(uint8_t dev_id,
353 		const struct rte_cryptodev_asym_capability_idx *idx)
354 {
355 	const struct rte_cryptodev_capabilities *capability;
356 	struct rte_cryptodev_info dev_info;
357 	unsigned int i = 0;
358 
359 	memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
360 	rte_cryptodev_info_get(dev_id, &dev_info);
361 
362 	while ((capability = &dev_info.capabilities[i++])->op !=
363 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
364 		if (capability->op != RTE_CRYPTO_OP_TYPE_ASYMMETRIC)
365 			continue;
366 
367 		if (capability->asym.xform_capa.xform_type == idx->type)
368 			return &capability->asym.xform_capa;
369 	}
370 	return NULL;
371 };
372 
373 int
374 rte_cryptodev_sym_capability_check_cipher(
375 		const struct rte_cryptodev_symmetric_capability *capability,
376 		uint16_t key_size, uint16_t iv_size)
377 {
378 	if (param_range_check(key_size, &capability->cipher.key_size) != 0)
379 		return -1;
380 
381 	if (param_range_check(iv_size, &capability->cipher.iv_size) != 0)
382 		return -1;
383 
384 	return 0;
385 }
386 
387 int
388 rte_cryptodev_sym_capability_check_auth(
389 		const struct rte_cryptodev_symmetric_capability *capability,
390 		uint16_t key_size, uint16_t digest_size, uint16_t iv_size)
391 {
392 	if (param_range_check(key_size, &capability->auth.key_size) != 0)
393 		return -1;
394 
395 	if (param_range_check(digest_size, &capability->auth.digest_size) != 0)
396 		return -1;
397 
398 	if (param_range_check(iv_size, &capability->auth.iv_size) != 0)
399 		return -1;
400 
401 	return 0;
402 }
403 
404 int
405 rte_cryptodev_sym_capability_check_aead(
406 		const struct rte_cryptodev_symmetric_capability *capability,
407 		uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
408 		uint16_t iv_size)
409 {
410 	if (param_range_check(key_size, &capability->aead.key_size) != 0)
411 		return -1;
412 
413 	if (param_range_check(digest_size, &capability->aead.digest_size) != 0)
414 		return -1;
415 
416 	if (param_range_check(aad_size, &capability->aead.aad_size) != 0)
417 		return -1;
418 
419 	if (param_range_check(iv_size, &capability->aead.iv_size) != 0)
420 		return -1;
421 
422 	return 0;
423 }
424 int
425 rte_cryptodev_asym_xform_capability_check_optype(
426 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
427 	enum rte_crypto_asym_op_type op_type)
428 {
429 	if (capability->op_types & (1 << op_type))
430 		return 1;
431 
432 	return 0;
433 }
434 
435 int
436 rte_cryptodev_asym_xform_capability_check_modlen(
437 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
438 	uint16_t modlen)
439 {
440 	/* no need to check for limits, if min or max = 0 */
441 	if (capability->modlen.min != 0) {
442 		if (modlen < capability->modlen.min)
443 			return -1;
444 	}
445 
446 	if (capability->modlen.max != 0) {
447 		if (modlen > capability->modlen.max)
448 			return -1;
449 	}
450 
451 	/* in any case, check if given modlen is module increment */
452 	if (capability->modlen.increment != 0) {
453 		if (modlen % (capability->modlen.increment))
454 			return -1;
455 	}
456 
457 	return 0;
458 }
459 
460 /* spinlock for crypto device enq callbacks */
461 static rte_spinlock_t rte_cryptodev_callback_lock = RTE_SPINLOCK_INITIALIZER;
462 
463 static void
464 cryptodev_cb_cleanup(struct rte_cryptodev *dev)
465 {
466 	struct rte_cryptodev_cb_rcu *list;
467 	struct rte_cryptodev_cb *cb, *next;
468 	uint16_t qp_id;
469 
470 	if (dev->enq_cbs == NULL && dev->deq_cbs == NULL)
471 		return;
472 
473 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
474 		list = &dev->enq_cbs[qp_id];
475 		cb = list->next;
476 		while (cb != NULL) {
477 			next = cb->next;
478 			rte_free(cb);
479 			cb = next;
480 		}
481 
482 		rte_free(list->qsbr);
483 	}
484 
485 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
486 		list = &dev->deq_cbs[qp_id];
487 		cb = list->next;
488 		while (cb != NULL) {
489 			next = cb->next;
490 			rte_free(cb);
491 			cb = next;
492 		}
493 
494 		rte_free(list->qsbr);
495 	}
496 
497 	rte_free(dev->enq_cbs);
498 	dev->enq_cbs = NULL;
499 	rte_free(dev->deq_cbs);
500 	dev->deq_cbs = NULL;
501 }
502 
503 static int
504 cryptodev_cb_init(struct rte_cryptodev *dev)
505 {
506 	struct rte_cryptodev_cb_rcu *list;
507 	struct rte_rcu_qsbr *qsbr;
508 	uint16_t qp_id;
509 	size_t size;
510 
511 	/* Max thread set to 1, as one DP thread accessing a queue-pair */
512 	const uint32_t max_threads = 1;
513 
514 	dev->enq_cbs = rte_zmalloc(NULL,
515 				   sizeof(struct rte_cryptodev_cb_rcu) *
516 				   dev->data->nb_queue_pairs, 0);
517 	if (dev->enq_cbs == NULL) {
518 		CDEV_LOG_ERR("Failed to allocate memory for enq callbacks");
519 		return -ENOMEM;
520 	}
521 
522 	dev->deq_cbs = rte_zmalloc(NULL,
523 				   sizeof(struct rte_cryptodev_cb_rcu) *
524 				   dev->data->nb_queue_pairs, 0);
525 	if (dev->deq_cbs == NULL) {
526 		CDEV_LOG_ERR("Failed to allocate memory for deq callbacks");
527 		rte_free(dev->enq_cbs);
528 		return -ENOMEM;
529 	}
530 
531 	/* Create RCU QSBR variable */
532 	size = rte_rcu_qsbr_get_memsize(max_threads);
533 
534 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
535 		list = &dev->enq_cbs[qp_id];
536 		qsbr = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
537 		if (qsbr == NULL) {
538 			CDEV_LOG_ERR("Failed to allocate memory for RCU on "
539 				"queue_pair_id=%d", qp_id);
540 			goto cb_init_err;
541 		}
542 
543 		if (rte_rcu_qsbr_init(qsbr, max_threads)) {
544 			CDEV_LOG_ERR("Failed to initialize for RCU on "
545 				"queue_pair_id=%d", qp_id);
546 			goto cb_init_err;
547 		}
548 
549 		list->qsbr = qsbr;
550 	}
551 
552 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
553 		list = &dev->deq_cbs[qp_id];
554 		qsbr = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
555 		if (qsbr == NULL) {
556 			CDEV_LOG_ERR("Failed to allocate memory for RCU on "
557 				"queue_pair_id=%d", qp_id);
558 			goto cb_init_err;
559 		}
560 
561 		if (rte_rcu_qsbr_init(qsbr, max_threads)) {
562 			CDEV_LOG_ERR("Failed to initialize for RCU on "
563 				"queue_pair_id=%d", qp_id);
564 			goto cb_init_err;
565 		}
566 
567 		list->qsbr = qsbr;
568 	}
569 
570 	return 0;
571 
572 cb_init_err:
573 	cryptodev_cb_cleanup(dev);
574 	return -ENOMEM;
575 }
576 
577 const char *
578 rte_cryptodev_get_feature_name(uint64_t flag)
579 {
580 	switch (flag) {
581 	case RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO:
582 		return "SYMMETRIC_CRYPTO";
583 	case RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO:
584 		return "ASYMMETRIC_CRYPTO";
585 	case RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING:
586 		return "SYM_OPERATION_CHAINING";
587 	case RTE_CRYPTODEV_FF_CPU_SSE:
588 		return "CPU_SSE";
589 	case RTE_CRYPTODEV_FF_CPU_AVX:
590 		return "CPU_AVX";
591 	case RTE_CRYPTODEV_FF_CPU_AVX2:
592 		return "CPU_AVX2";
593 	case RTE_CRYPTODEV_FF_CPU_AVX512:
594 		return "CPU_AVX512";
595 	case RTE_CRYPTODEV_FF_CPU_AESNI:
596 		return "CPU_AESNI";
597 	case RTE_CRYPTODEV_FF_HW_ACCELERATED:
598 		return "HW_ACCELERATED";
599 	case RTE_CRYPTODEV_FF_IN_PLACE_SGL:
600 		return "IN_PLACE_SGL";
601 	case RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT:
602 		return "OOP_SGL_IN_SGL_OUT";
603 	case RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT:
604 		return "OOP_SGL_IN_LB_OUT";
605 	case RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT:
606 		return "OOP_LB_IN_SGL_OUT";
607 	case RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT:
608 		return "OOP_LB_IN_LB_OUT";
609 	case RTE_CRYPTODEV_FF_CPU_NEON:
610 		return "CPU_NEON";
611 	case RTE_CRYPTODEV_FF_CPU_ARM_CE:
612 		return "CPU_ARM_CE";
613 	case RTE_CRYPTODEV_FF_SECURITY:
614 		return "SECURITY_PROTOCOL";
615 	case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP:
616 		return "RSA_PRIV_OP_KEY_EXP";
617 	case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT:
618 		return "RSA_PRIV_OP_KEY_QT";
619 	case RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED:
620 		return "DIGEST_ENCRYPTED";
621 	case RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO:
622 		return "SYM_CPU_CRYPTO";
623 	case RTE_CRYPTODEV_FF_ASYM_SESSIONLESS:
624 		return "ASYM_SESSIONLESS";
625 	case RTE_CRYPTODEV_FF_SYM_SESSIONLESS:
626 		return "SYM_SESSIONLESS";
627 	case RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA:
628 		return "NON_BYTE_ALIGNED_DATA";
629 	case RTE_CRYPTODEV_FF_CIPHER_MULTIPLE_DATA_UNITS:
630 		return "CIPHER_MULTIPLE_DATA_UNITS";
631 	case RTE_CRYPTODEV_FF_CIPHER_WRAPPED_KEY:
632 		return "CIPHER_WRAPPED_KEY";
633 	default:
634 		return NULL;
635 	}
636 }
637 
638 struct rte_cryptodev *
639 rte_cryptodev_pmd_get_dev(uint8_t dev_id)
640 {
641 	return &cryptodev_globals.devs[dev_id];
642 }
643 
644 struct rte_cryptodev *
645 rte_cryptodev_pmd_get_named_dev(const char *name)
646 {
647 	struct rte_cryptodev *dev;
648 	unsigned int i;
649 
650 	if (name == NULL)
651 		return NULL;
652 
653 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
654 		dev = &cryptodev_globals.devs[i];
655 
656 		if ((dev->attached == RTE_CRYPTODEV_ATTACHED) &&
657 				(strcmp(dev->data->name, name) == 0))
658 			return dev;
659 	}
660 
661 	return NULL;
662 }
663 
664 static inline uint8_t
665 rte_cryptodev_is_valid_device_data(uint8_t dev_id)
666 {
667 	if (dev_id >= RTE_CRYPTO_MAX_DEVS ||
668 			rte_crypto_devices[dev_id].data == NULL)
669 		return 0;
670 
671 	return 1;
672 }
673 
674 unsigned int
675 rte_cryptodev_is_valid_dev(uint8_t dev_id)
676 {
677 	struct rte_cryptodev *dev = NULL;
678 
679 	if (!rte_cryptodev_is_valid_device_data(dev_id))
680 		return 0;
681 
682 	dev = rte_cryptodev_pmd_get_dev(dev_id);
683 	if (dev->attached != RTE_CRYPTODEV_ATTACHED)
684 		return 0;
685 	else
686 		return 1;
687 }
688 
689 
690 int
691 rte_cryptodev_get_dev_id(const char *name)
692 {
693 	unsigned i;
694 
695 	if (name == NULL)
696 		return -1;
697 
698 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
699 		if (!rte_cryptodev_is_valid_device_data(i))
700 			continue;
701 		if ((strcmp(cryptodev_globals.devs[i].data->name, name)
702 				== 0) &&
703 				(cryptodev_globals.devs[i].attached ==
704 						RTE_CRYPTODEV_ATTACHED))
705 			return i;
706 	}
707 
708 	return -1;
709 }
710 
711 uint8_t
712 rte_cryptodev_count(void)
713 {
714 	return cryptodev_globals.nb_devs;
715 }
716 
717 uint8_t
718 rte_cryptodev_device_count_by_driver(uint8_t driver_id)
719 {
720 	uint8_t i, dev_count = 0;
721 
722 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++)
723 		if (cryptodev_globals.devs[i].driver_id == driver_id &&
724 			cryptodev_globals.devs[i].attached ==
725 					RTE_CRYPTODEV_ATTACHED)
726 			dev_count++;
727 
728 	return dev_count;
729 }
730 
731 uint8_t
732 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
733 	uint8_t nb_devices)
734 {
735 	uint8_t i, count = 0;
736 	struct rte_cryptodev *devs = cryptodev_globals.devs;
737 
738 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS && count < nb_devices; i++) {
739 		if (!rte_cryptodev_is_valid_device_data(i))
740 			continue;
741 
742 		if (devs[i].attached == RTE_CRYPTODEV_ATTACHED) {
743 			int cmp;
744 
745 			cmp = strncmp(devs[i].device->driver->name,
746 					driver_name,
747 					strlen(driver_name) + 1);
748 
749 			if (cmp == 0)
750 				devices[count++] = devs[i].data->dev_id;
751 		}
752 	}
753 
754 	return count;
755 }
756 
757 void *
758 rte_cryptodev_get_sec_ctx(uint8_t dev_id)
759 {
760 	if (dev_id < RTE_CRYPTO_MAX_DEVS &&
761 			(rte_crypto_devices[dev_id].feature_flags &
762 			RTE_CRYPTODEV_FF_SECURITY))
763 		return rte_crypto_devices[dev_id].security_ctx;
764 
765 	return NULL;
766 }
767 
768 int
769 rte_cryptodev_socket_id(uint8_t dev_id)
770 {
771 	struct rte_cryptodev *dev;
772 
773 	if (!rte_cryptodev_is_valid_dev(dev_id))
774 		return -1;
775 
776 	dev = rte_cryptodev_pmd_get_dev(dev_id);
777 
778 	return dev->data->socket_id;
779 }
780 
781 static inline int
782 rte_cryptodev_data_alloc(uint8_t dev_id, struct rte_cryptodev_data **data,
783 		int socket_id)
784 {
785 	char mz_name[RTE_MEMZONE_NAMESIZE];
786 	const struct rte_memzone *mz;
787 	int n;
788 
789 	/* generate memzone name */
790 	n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
791 	if (n >= (int)sizeof(mz_name))
792 		return -EINVAL;
793 
794 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
795 		mz = rte_memzone_reserve(mz_name,
796 				sizeof(struct rte_cryptodev_data),
797 				socket_id, 0);
798 		CDEV_LOG_DEBUG("PRIMARY:reserved memzone for %s (%p)",
799 				mz_name, mz);
800 	} else {
801 		mz = rte_memzone_lookup(mz_name);
802 		CDEV_LOG_DEBUG("SECONDARY:looked up memzone for %s (%p)",
803 				mz_name, mz);
804 	}
805 
806 	if (mz == NULL)
807 		return -ENOMEM;
808 
809 	*data = mz->addr;
810 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
811 		memset(*data, 0, sizeof(struct rte_cryptodev_data));
812 
813 	return 0;
814 }
815 
816 static inline int
817 rte_cryptodev_data_free(uint8_t dev_id, struct rte_cryptodev_data **data)
818 {
819 	char mz_name[RTE_MEMZONE_NAMESIZE];
820 	const struct rte_memzone *mz;
821 	int n;
822 
823 	/* generate memzone name */
824 	n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
825 	if (n >= (int)sizeof(mz_name))
826 		return -EINVAL;
827 
828 	mz = rte_memzone_lookup(mz_name);
829 	if (mz == NULL)
830 		return -ENOMEM;
831 
832 	RTE_ASSERT(*data == mz->addr);
833 	*data = NULL;
834 
835 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
836 		CDEV_LOG_DEBUG("PRIMARY:free memzone of %s (%p)",
837 				mz_name, mz);
838 		return rte_memzone_free(mz);
839 	} else {
840 		CDEV_LOG_DEBUG("SECONDARY:don't free memzone of %s (%p)",
841 				mz_name, mz);
842 	}
843 
844 	return 0;
845 }
846 
847 static uint8_t
848 rte_cryptodev_find_free_device_index(void)
849 {
850 	uint8_t dev_id;
851 
852 	for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++) {
853 		if (rte_crypto_devices[dev_id].attached ==
854 				RTE_CRYPTODEV_DETACHED)
855 			return dev_id;
856 	}
857 	return RTE_CRYPTO_MAX_DEVS;
858 }
859 
860 struct rte_cryptodev *
861 rte_cryptodev_pmd_allocate(const char *name, int socket_id)
862 {
863 	struct rte_cryptodev *cryptodev;
864 	uint8_t dev_id;
865 
866 	if (rte_cryptodev_pmd_get_named_dev(name) != NULL) {
867 		CDEV_LOG_ERR("Crypto device with name %s already "
868 				"allocated!", name);
869 		return NULL;
870 	}
871 
872 	dev_id = rte_cryptodev_find_free_device_index();
873 	if (dev_id == RTE_CRYPTO_MAX_DEVS) {
874 		CDEV_LOG_ERR("Reached maximum number of crypto devices");
875 		return NULL;
876 	}
877 
878 	cryptodev = rte_cryptodev_pmd_get_dev(dev_id);
879 
880 	if (cryptodev->data == NULL) {
881 		struct rte_cryptodev_data **cryptodev_data =
882 				&cryptodev_globals.data[dev_id];
883 
884 		int retval = rte_cryptodev_data_alloc(dev_id, cryptodev_data,
885 				socket_id);
886 
887 		if (retval < 0 || *cryptodev_data == NULL)
888 			return NULL;
889 
890 		cryptodev->data = *cryptodev_data;
891 
892 		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
893 			strlcpy(cryptodev->data->name, name,
894 				RTE_CRYPTODEV_NAME_MAX_LEN);
895 
896 			cryptodev->data->dev_id = dev_id;
897 			cryptodev->data->socket_id = socket_id;
898 			cryptodev->data->dev_started = 0;
899 			CDEV_LOG_DEBUG("PRIMARY:init data");
900 		}
901 
902 		CDEV_LOG_DEBUG("Data for %s: dev_id %d, socket %d, started %d",
903 				cryptodev->data->name,
904 				cryptodev->data->dev_id,
905 				cryptodev->data->socket_id,
906 				cryptodev->data->dev_started);
907 
908 		/* init user callbacks */
909 		TAILQ_INIT(&(cryptodev->link_intr_cbs));
910 
911 		cryptodev->attached = RTE_CRYPTODEV_ATTACHED;
912 
913 		cryptodev_globals.nb_devs++;
914 	}
915 
916 	return cryptodev;
917 }
918 
919 int
920 rte_cryptodev_pmd_release_device(struct rte_cryptodev *cryptodev)
921 {
922 	int ret;
923 	uint8_t dev_id;
924 
925 	if (cryptodev == NULL)
926 		return -EINVAL;
927 
928 	dev_id = cryptodev->data->dev_id;
929 
930 	cryptodev_fp_ops_reset(rte_crypto_fp_ops + dev_id);
931 
932 	/* Close device only if device operations have been set */
933 	if (cryptodev->dev_ops) {
934 		ret = rte_cryptodev_close(dev_id);
935 		if (ret < 0)
936 			return ret;
937 	}
938 
939 	ret = rte_cryptodev_data_free(dev_id, &cryptodev_globals.data[dev_id]);
940 	if (ret < 0)
941 		return ret;
942 
943 	cryptodev->attached = RTE_CRYPTODEV_DETACHED;
944 	cryptodev_globals.nb_devs--;
945 	return 0;
946 }
947 
948 uint16_t
949 rte_cryptodev_queue_pair_count(uint8_t dev_id)
950 {
951 	struct rte_cryptodev *dev;
952 
953 	if (!rte_cryptodev_is_valid_device_data(dev_id)) {
954 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
955 		return 0;
956 	}
957 
958 	dev = &rte_crypto_devices[dev_id];
959 	return dev->data->nb_queue_pairs;
960 }
961 
962 static int
963 rte_cryptodev_queue_pairs_config(struct rte_cryptodev *dev, uint16_t nb_qpairs,
964 		int socket_id)
965 {
966 	struct rte_cryptodev_info dev_info;
967 	void **qp;
968 	unsigned i;
969 
970 	if ((dev == NULL) || (nb_qpairs < 1)) {
971 		CDEV_LOG_ERR("invalid param: dev %p, nb_queues %u",
972 							dev, nb_qpairs);
973 		return -EINVAL;
974 	}
975 
976 	CDEV_LOG_DEBUG("Setup %d queues pairs on device %u",
977 			nb_qpairs, dev->data->dev_id);
978 
979 	memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
980 
981 	if (*dev->dev_ops->dev_infos_get == NULL)
982 		return -ENOTSUP;
983 	(*dev->dev_ops->dev_infos_get)(dev, &dev_info);
984 
985 	if (nb_qpairs > (dev_info.max_nb_queue_pairs)) {
986 		CDEV_LOG_ERR("Invalid num queue_pairs (%u) for dev %u",
987 				nb_qpairs, dev->data->dev_id);
988 	    return -EINVAL;
989 	}
990 
991 	if (dev->data->queue_pairs == NULL) { /* first time configuration */
992 		dev->data->queue_pairs = rte_zmalloc_socket(
993 				"cryptodev->queue_pairs",
994 				sizeof(dev->data->queue_pairs[0]) *
995 				dev_info.max_nb_queue_pairs,
996 				RTE_CACHE_LINE_SIZE, socket_id);
997 
998 		if (dev->data->queue_pairs == NULL) {
999 			dev->data->nb_queue_pairs = 0;
1000 			CDEV_LOG_ERR("failed to get memory for qp meta data, "
1001 							"nb_queues %u",
1002 							nb_qpairs);
1003 			return -(ENOMEM);
1004 		}
1005 	} else { /* re-configure */
1006 		int ret;
1007 		uint16_t old_nb_queues = dev->data->nb_queue_pairs;
1008 
1009 		qp = dev->data->queue_pairs;
1010 
1011 		if (*dev->dev_ops->queue_pair_release == NULL)
1012 			return -ENOTSUP;
1013 
1014 		for (i = nb_qpairs; i < old_nb_queues; i++) {
1015 			ret = (*dev->dev_ops->queue_pair_release)(dev, i);
1016 			if (ret < 0)
1017 				return ret;
1018 			qp[i] = NULL;
1019 		}
1020 
1021 	}
1022 	dev->data->nb_queue_pairs = nb_qpairs;
1023 	return 0;
1024 }
1025 
1026 int
1027 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
1028 {
1029 	struct rte_cryptodev *dev;
1030 	int diag;
1031 
1032 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1033 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1034 		return -EINVAL;
1035 	}
1036 
1037 	dev = &rte_crypto_devices[dev_id];
1038 
1039 	if (dev->data->dev_started) {
1040 		CDEV_LOG_ERR(
1041 		    "device %d must be stopped to allow configuration", dev_id);
1042 		return -EBUSY;
1043 	}
1044 
1045 	if (*dev->dev_ops->dev_configure == NULL)
1046 		return -ENOTSUP;
1047 
1048 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1049 	cryptodev_cb_cleanup(dev);
1050 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1051 
1052 	/* Setup new number of queue pairs and reconfigure device. */
1053 	diag = rte_cryptodev_queue_pairs_config(dev, config->nb_queue_pairs,
1054 			config->socket_id);
1055 	if (diag != 0) {
1056 		CDEV_LOG_ERR("dev%d rte_crypto_dev_queue_pairs_config = %d",
1057 				dev_id, diag);
1058 		return diag;
1059 	}
1060 
1061 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1062 	diag = cryptodev_cb_init(dev);
1063 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1064 	if (diag) {
1065 		CDEV_LOG_ERR("Callback init failed for dev_id=%d", dev_id);
1066 		return diag;
1067 	}
1068 
1069 	rte_cryptodev_trace_configure(dev_id, config);
1070 	return (*dev->dev_ops->dev_configure)(dev, config);
1071 }
1072 
1073 int
1074 rte_cryptodev_start(uint8_t dev_id)
1075 {
1076 	struct rte_cryptodev *dev;
1077 	int diag;
1078 
1079 	CDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
1080 
1081 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1082 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1083 		return -EINVAL;
1084 	}
1085 
1086 	dev = &rte_crypto_devices[dev_id];
1087 
1088 	if (*dev->dev_ops->dev_start == NULL)
1089 		return -ENOTSUP;
1090 
1091 	if (dev->data->dev_started != 0) {
1092 		CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already started",
1093 			dev_id);
1094 		return 0;
1095 	}
1096 
1097 	diag = (*dev->dev_ops->dev_start)(dev);
1098 	/* expose selection of PMD fast-path functions */
1099 	cryptodev_fp_ops_set(rte_crypto_fp_ops + dev_id, dev);
1100 
1101 	rte_cryptodev_trace_start(dev_id, diag);
1102 	if (diag == 0)
1103 		dev->data->dev_started = 1;
1104 	else
1105 		return diag;
1106 
1107 	return 0;
1108 }
1109 
1110 void
1111 rte_cryptodev_stop(uint8_t dev_id)
1112 {
1113 	struct rte_cryptodev *dev;
1114 
1115 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1116 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1117 		return;
1118 	}
1119 
1120 	dev = &rte_crypto_devices[dev_id];
1121 
1122 	if (*dev->dev_ops->dev_stop == NULL)
1123 		return;
1124 
1125 	if (dev->data->dev_started == 0) {
1126 		CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already stopped",
1127 			dev_id);
1128 		return;
1129 	}
1130 
1131 	/* point fast-path functions to dummy ones */
1132 	cryptodev_fp_ops_reset(rte_crypto_fp_ops + dev_id);
1133 
1134 	(*dev->dev_ops->dev_stop)(dev);
1135 	rte_cryptodev_trace_stop(dev_id);
1136 	dev->data->dev_started = 0;
1137 }
1138 
1139 int
1140 rte_cryptodev_close(uint8_t dev_id)
1141 {
1142 	struct rte_cryptodev *dev;
1143 	int retval;
1144 
1145 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1146 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1147 		return -1;
1148 	}
1149 
1150 	dev = &rte_crypto_devices[dev_id];
1151 
1152 	/* Device must be stopped before it can be closed */
1153 	if (dev->data->dev_started == 1) {
1154 		CDEV_LOG_ERR("Device %u must be stopped before closing",
1155 				dev_id);
1156 		return -EBUSY;
1157 	}
1158 
1159 	/* We can't close the device if there are outstanding sessions in use */
1160 	if (dev->data->session_pool != NULL) {
1161 		if (!rte_mempool_full(dev->data->session_pool)) {
1162 			CDEV_LOG_ERR("dev_id=%u close failed, session mempool "
1163 					"has sessions still in use, free "
1164 					"all sessions before calling close",
1165 					(unsigned)dev_id);
1166 			return -EBUSY;
1167 		}
1168 	}
1169 
1170 	if (*dev->dev_ops->dev_close == NULL)
1171 		return -ENOTSUP;
1172 	retval = (*dev->dev_ops->dev_close)(dev);
1173 	rte_cryptodev_trace_close(dev_id, retval);
1174 
1175 	if (retval < 0)
1176 		return retval;
1177 
1178 	return 0;
1179 }
1180 
1181 int
1182 rte_cryptodev_get_qp_status(uint8_t dev_id, uint16_t queue_pair_id)
1183 {
1184 	struct rte_cryptodev *dev;
1185 
1186 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1187 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1188 		return -EINVAL;
1189 	}
1190 
1191 	dev = &rte_crypto_devices[dev_id];
1192 	if (queue_pair_id >= dev->data->nb_queue_pairs) {
1193 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
1194 		return -EINVAL;
1195 	}
1196 	void **qps = dev->data->queue_pairs;
1197 
1198 	if (qps[queue_pair_id])	{
1199 		CDEV_LOG_DEBUG("qp %d on dev %d is initialised",
1200 			queue_pair_id, dev_id);
1201 		return 1;
1202 	}
1203 
1204 	CDEV_LOG_DEBUG("qp %d on dev %d is not initialised",
1205 		queue_pair_id, dev_id);
1206 
1207 	return 0;
1208 }
1209 
1210 int
1211 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
1212 		const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
1213 
1214 {
1215 	struct rte_cryptodev *dev;
1216 
1217 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1218 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1219 		return -EINVAL;
1220 	}
1221 
1222 	dev = &rte_crypto_devices[dev_id];
1223 	if (queue_pair_id >= dev->data->nb_queue_pairs) {
1224 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
1225 		return -EINVAL;
1226 	}
1227 
1228 	if (!qp_conf) {
1229 		CDEV_LOG_ERR("qp_conf cannot be NULL\n");
1230 		return -EINVAL;
1231 	}
1232 
1233 	if ((qp_conf->mp_session && !qp_conf->mp_session_private) ||
1234 			(!qp_conf->mp_session && qp_conf->mp_session_private)) {
1235 		CDEV_LOG_ERR("Invalid mempools\n");
1236 		return -EINVAL;
1237 	}
1238 
1239 	if (qp_conf->mp_session) {
1240 		struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1241 		uint32_t obj_size = qp_conf->mp_session->elt_size;
1242 		uint32_t obj_priv_size = qp_conf->mp_session_private->elt_size;
1243 		struct rte_cryptodev_sym_session s = {0};
1244 
1245 		pool_priv = rte_mempool_get_priv(qp_conf->mp_session);
1246 		if (!pool_priv || qp_conf->mp_session->private_data_size <
1247 				sizeof(*pool_priv)) {
1248 			CDEV_LOG_ERR("Invalid mempool\n");
1249 			return -EINVAL;
1250 		}
1251 
1252 		s.nb_drivers = pool_priv->nb_drivers;
1253 		s.user_data_sz = pool_priv->user_data_sz;
1254 
1255 		if ((rte_cryptodev_sym_get_existing_header_session_size(&s) >
1256 			obj_size) || (s.nb_drivers <= dev->driver_id) ||
1257 			rte_cryptodev_sym_get_private_session_size(dev_id) >
1258 				obj_priv_size) {
1259 			CDEV_LOG_ERR("Invalid mempool\n");
1260 			return -EINVAL;
1261 		}
1262 	}
1263 
1264 	if (dev->data->dev_started) {
1265 		CDEV_LOG_ERR(
1266 		    "device %d must be stopped to allow configuration", dev_id);
1267 		return -EBUSY;
1268 	}
1269 
1270 	if (*dev->dev_ops->queue_pair_setup == NULL)
1271 		return -ENOTSUP;
1272 
1273 	rte_cryptodev_trace_queue_pair_setup(dev_id, queue_pair_id, qp_conf);
1274 	return (*dev->dev_ops->queue_pair_setup)(dev, queue_pair_id, qp_conf,
1275 			socket_id);
1276 }
1277 
1278 struct rte_cryptodev_cb *
1279 rte_cryptodev_add_enq_callback(uint8_t dev_id,
1280 			       uint16_t qp_id,
1281 			       rte_cryptodev_callback_fn cb_fn,
1282 			       void *cb_arg)
1283 {
1284 	struct rte_cryptodev *dev;
1285 	struct rte_cryptodev_cb_rcu *list;
1286 	struct rte_cryptodev_cb *cb, *tail;
1287 
1288 	if (!cb_fn) {
1289 		CDEV_LOG_ERR("Callback is NULL on dev_id=%d", dev_id);
1290 		rte_errno = EINVAL;
1291 		return NULL;
1292 	}
1293 
1294 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1295 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1296 		rte_errno = ENODEV;
1297 		return NULL;
1298 	}
1299 
1300 	dev = &rte_crypto_devices[dev_id];
1301 	if (qp_id >= dev->data->nb_queue_pairs) {
1302 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1303 		rte_errno = ENODEV;
1304 		return NULL;
1305 	}
1306 
1307 	cb = rte_zmalloc(NULL, sizeof(*cb), 0);
1308 	if (cb == NULL) {
1309 		CDEV_LOG_ERR("Failed to allocate memory for callback on "
1310 			     "dev=%d, queue_pair_id=%d", dev_id, qp_id);
1311 		rte_errno = ENOMEM;
1312 		return NULL;
1313 	}
1314 
1315 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1316 
1317 	cb->fn = cb_fn;
1318 	cb->arg = cb_arg;
1319 
1320 	/* Add the callbacks in fifo order. */
1321 	list = &dev->enq_cbs[qp_id];
1322 	tail = list->next;
1323 
1324 	if (tail) {
1325 		while (tail->next)
1326 			tail = tail->next;
1327 		/* Stores to cb->fn and cb->param should complete before
1328 		 * cb is visible to data plane.
1329 		 */
1330 		__atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
1331 	} else {
1332 		/* Stores to cb->fn and cb->param should complete before
1333 		 * cb is visible to data plane.
1334 		 */
1335 		__atomic_store_n(&list->next, cb, __ATOMIC_RELEASE);
1336 	}
1337 
1338 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1339 
1340 	return cb;
1341 }
1342 
1343 int
1344 rte_cryptodev_remove_enq_callback(uint8_t dev_id,
1345 				  uint16_t qp_id,
1346 				  struct rte_cryptodev_cb *cb)
1347 {
1348 	struct rte_cryptodev *dev;
1349 	struct rte_cryptodev_cb **prev_cb, *curr_cb;
1350 	struct rte_cryptodev_cb_rcu *list;
1351 	int ret;
1352 
1353 	ret = -EINVAL;
1354 
1355 	if (!cb) {
1356 		CDEV_LOG_ERR("Callback is NULL");
1357 		return -EINVAL;
1358 	}
1359 
1360 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1361 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1362 		return -ENODEV;
1363 	}
1364 
1365 	dev = &rte_crypto_devices[dev_id];
1366 	if (qp_id >= dev->data->nb_queue_pairs) {
1367 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1368 		return -ENODEV;
1369 	}
1370 
1371 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1372 	if (dev->enq_cbs == NULL) {
1373 		CDEV_LOG_ERR("Callback not initialized");
1374 		goto cb_err;
1375 	}
1376 
1377 	list = &dev->enq_cbs[qp_id];
1378 	if (list == NULL) {
1379 		CDEV_LOG_ERR("Callback list is NULL");
1380 		goto cb_err;
1381 	}
1382 
1383 	if (list->qsbr == NULL) {
1384 		CDEV_LOG_ERR("Rcu qsbr is NULL");
1385 		goto cb_err;
1386 	}
1387 
1388 	prev_cb = &list->next;
1389 	for (; *prev_cb != NULL; prev_cb = &curr_cb->next) {
1390 		curr_cb = *prev_cb;
1391 		if (curr_cb == cb) {
1392 			/* Remove the user cb from the callback list. */
1393 			__atomic_store_n(prev_cb, curr_cb->next,
1394 				__ATOMIC_RELAXED);
1395 			ret = 0;
1396 			break;
1397 		}
1398 	}
1399 
1400 	if (!ret) {
1401 		/* Call sync with invalid thread id as this is part of
1402 		 * control plane API
1403 		 */
1404 		rte_rcu_qsbr_synchronize(list->qsbr, RTE_QSBR_THRID_INVALID);
1405 		rte_free(cb);
1406 	}
1407 
1408 cb_err:
1409 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1410 	return ret;
1411 }
1412 
1413 struct rte_cryptodev_cb *
1414 rte_cryptodev_add_deq_callback(uint8_t dev_id,
1415 			       uint16_t qp_id,
1416 			       rte_cryptodev_callback_fn cb_fn,
1417 			       void *cb_arg)
1418 {
1419 	struct rte_cryptodev *dev;
1420 	struct rte_cryptodev_cb_rcu *list;
1421 	struct rte_cryptodev_cb *cb, *tail;
1422 
1423 	if (!cb_fn) {
1424 		CDEV_LOG_ERR("Callback is NULL on dev_id=%d", dev_id);
1425 		rte_errno = EINVAL;
1426 		return NULL;
1427 	}
1428 
1429 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1430 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1431 		rte_errno = ENODEV;
1432 		return NULL;
1433 	}
1434 
1435 	dev = &rte_crypto_devices[dev_id];
1436 	if (qp_id >= dev->data->nb_queue_pairs) {
1437 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1438 		rte_errno = ENODEV;
1439 		return NULL;
1440 	}
1441 
1442 	cb = rte_zmalloc(NULL, sizeof(*cb), 0);
1443 	if (cb == NULL) {
1444 		CDEV_LOG_ERR("Failed to allocate memory for callback on "
1445 			     "dev=%d, queue_pair_id=%d", dev_id, qp_id);
1446 		rte_errno = ENOMEM;
1447 		return NULL;
1448 	}
1449 
1450 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1451 
1452 	cb->fn = cb_fn;
1453 	cb->arg = cb_arg;
1454 
1455 	/* Add the callbacks in fifo order. */
1456 	list = &dev->deq_cbs[qp_id];
1457 	tail = list->next;
1458 
1459 	if (tail) {
1460 		while (tail->next)
1461 			tail = tail->next;
1462 		/* Stores to cb->fn and cb->param should complete before
1463 		 * cb is visible to data plane.
1464 		 */
1465 		__atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
1466 	} else {
1467 		/* Stores to cb->fn and cb->param should complete before
1468 		 * cb is visible to data plane.
1469 		 */
1470 		__atomic_store_n(&list->next, cb, __ATOMIC_RELEASE);
1471 	}
1472 
1473 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1474 
1475 	return cb;
1476 }
1477 
1478 int
1479 rte_cryptodev_remove_deq_callback(uint8_t dev_id,
1480 				  uint16_t qp_id,
1481 				  struct rte_cryptodev_cb *cb)
1482 {
1483 	struct rte_cryptodev *dev;
1484 	struct rte_cryptodev_cb **prev_cb, *curr_cb;
1485 	struct rte_cryptodev_cb_rcu *list;
1486 	int ret;
1487 
1488 	ret = -EINVAL;
1489 
1490 	if (!cb) {
1491 		CDEV_LOG_ERR("Callback is NULL");
1492 		return -EINVAL;
1493 	}
1494 
1495 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1496 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1497 		return -ENODEV;
1498 	}
1499 
1500 	dev = &rte_crypto_devices[dev_id];
1501 	if (qp_id >= dev->data->nb_queue_pairs) {
1502 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1503 		return -ENODEV;
1504 	}
1505 
1506 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1507 	if (dev->enq_cbs == NULL) {
1508 		CDEV_LOG_ERR("Callback not initialized");
1509 		goto cb_err;
1510 	}
1511 
1512 	list = &dev->deq_cbs[qp_id];
1513 	if (list == NULL) {
1514 		CDEV_LOG_ERR("Callback list is NULL");
1515 		goto cb_err;
1516 	}
1517 
1518 	if (list->qsbr == NULL) {
1519 		CDEV_LOG_ERR("Rcu qsbr is NULL");
1520 		goto cb_err;
1521 	}
1522 
1523 	prev_cb = &list->next;
1524 	for (; *prev_cb != NULL; prev_cb = &curr_cb->next) {
1525 		curr_cb = *prev_cb;
1526 		if (curr_cb == cb) {
1527 			/* Remove the user cb from the callback list. */
1528 			__atomic_store_n(prev_cb, curr_cb->next,
1529 				__ATOMIC_RELAXED);
1530 			ret = 0;
1531 			break;
1532 		}
1533 	}
1534 
1535 	if (!ret) {
1536 		/* Call sync with invalid thread id as this is part of
1537 		 * control plane API
1538 		 */
1539 		rte_rcu_qsbr_synchronize(list->qsbr, RTE_QSBR_THRID_INVALID);
1540 		rte_free(cb);
1541 	}
1542 
1543 cb_err:
1544 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1545 	return ret;
1546 }
1547 
1548 int
1549 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats)
1550 {
1551 	struct rte_cryptodev *dev;
1552 
1553 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1554 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1555 		return -ENODEV;
1556 	}
1557 
1558 	if (stats == NULL) {
1559 		CDEV_LOG_ERR("Invalid stats ptr");
1560 		return -EINVAL;
1561 	}
1562 
1563 	dev = &rte_crypto_devices[dev_id];
1564 	memset(stats, 0, sizeof(*stats));
1565 
1566 	if (*dev->dev_ops->stats_get == NULL)
1567 		return -ENOTSUP;
1568 	(*dev->dev_ops->stats_get)(dev, stats);
1569 	return 0;
1570 }
1571 
1572 void
1573 rte_cryptodev_stats_reset(uint8_t dev_id)
1574 {
1575 	struct rte_cryptodev *dev;
1576 
1577 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1578 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1579 		return;
1580 	}
1581 
1582 	dev = &rte_crypto_devices[dev_id];
1583 
1584 	if (*dev->dev_ops->stats_reset == NULL)
1585 		return;
1586 	(*dev->dev_ops->stats_reset)(dev);
1587 }
1588 
1589 void
1590 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
1591 {
1592 	struct rte_cryptodev *dev;
1593 
1594 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1595 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1596 		return;
1597 	}
1598 
1599 	dev = &rte_crypto_devices[dev_id];
1600 
1601 	memset(dev_info, 0, sizeof(struct rte_cryptodev_info));
1602 
1603 	if (*dev->dev_ops->dev_infos_get == NULL)
1604 		return;
1605 	(*dev->dev_ops->dev_infos_get)(dev, dev_info);
1606 
1607 	dev_info->driver_name = dev->device->driver->name;
1608 	dev_info->device = dev->device;
1609 }
1610 
1611 int
1612 rte_cryptodev_callback_register(uint8_t dev_id,
1613 			enum rte_cryptodev_event_type event,
1614 			rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1615 {
1616 	struct rte_cryptodev *dev;
1617 	struct rte_cryptodev_callback *user_cb;
1618 
1619 	if (!cb_fn)
1620 		return -EINVAL;
1621 
1622 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1623 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1624 		return -EINVAL;
1625 	}
1626 
1627 	dev = &rte_crypto_devices[dev_id];
1628 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
1629 
1630 	TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
1631 		if (user_cb->cb_fn == cb_fn &&
1632 			user_cb->cb_arg == cb_arg &&
1633 			user_cb->event == event) {
1634 			break;
1635 		}
1636 	}
1637 
1638 	/* create a new callback. */
1639 	if (user_cb == NULL) {
1640 		user_cb = rte_zmalloc("INTR_USER_CALLBACK",
1641 				sizeof(struct rte_cryptodev_callback), 0);
1642 		if (user_cb != NULL) {
1643 			user_cb->cb_fn = cb_fn;
1644 			user_cb->cb_arg = cb_arg;
1645 			user_cb->event = event;
1646 			TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
1647 		}
1648 	}
1649 
1650 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1651 	return (user_cb == NULL) ? -ENOMEM : 0;
1652 }
1653 
1654 int
1655 rte_cryptodev_callback_unregister(uint8_t dev_id,
1656 			enum rte_cryptodev_event_type event,
1657 			rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1658 {
1659 	int ret;
1660 	struct rte_cryptodev *dev;
1661 	struct rte_cryptodev_callback *cb, *next;
1662 
1663 	if (!cb_fn)
1664 		return -EINVAL;
1665 
1666 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1667 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1668 		return -EINVAL;
1669 	}
1670 
1671 	dev = &rte_crypto_devices[dev_id];
1672 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
1673 
1674 	ret = 0;
1675 	for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
1676 
1677 		next = TAILQ_NEXT(cb, next);
1678 
1679 		if (cb->cb_fn != cb_fn || cb->event != event ||
1680 				(cb->cb_arg != (void *)-1 &&
1681 				cb->cb_arg != cb_arg))
1682 			continue;
1683 
1684 		/*
1685 		 * if this callback is not executing right now,
1686 		 * then remove it.
1687 		 */
1688 		if (cb->active == 0) {
1689 			TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
1690 			rte_free(cb);
1691 		} else {
1692 			ret = -EAGAIN;
1693 		}
1694 	}
1695 
1696 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1697 	return ret;
1698 }
1699 
1700 void
1701 rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev,
1702 	enum rte_cryptodev_event_type event)
1703 {
1704 	struct rte_cryptodev_callback *cb_lst;
1705 	struct rte_cryptodev_callback dev_cb;
1706 
1707 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
1708 	TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
1709 		if (cb_lst->cb_fn == NULL || cb_lst->event != event)
1710 			continue;
1711 		dev_cb = *cb_lst;
1712 		cb_lst->active = 1;
1713 		rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1714 		dev_cb.cb_fn(dev->data->dev_id, dev_cb.event,
1715 						dev_cb.cb_arg);
1716 		rte_spinlock_lock(&rte_cryptodev_cb_lock);
1717 		cb_lst->active = 0;
1718 	}
1719 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1720 }
1721 
1722 int
1723 rte_cryptodev_sym_session_init(uint8_t dev_id,
1724 		struct rte_cryptodev_sym_session *sess,
1725 		struct rte_crypto_sym_xform *xforms,
1726 		struct rte_mempool *mp)
1727 {
1728 	struct rte_cryptodev *dev;
1729 	uint32_t sess_priv_sz = rte_cryptodev_sym_get_private_session_size(
1730 			dev_id);
1731 	uint8_t index;
1732 	int ret;
1733 
1734 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1735 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1736 		return -EINVAL;
1737 	}
1738 
1739 	dev = rte_cryptodev_pmd_get_dev(dev_id);
1740 
1741 	if (sess == NULL || xforms == NULL || dev == NULL || mp == NULL)
1742 		return -EINVAL;
1743 
1744 	if (mp->elt_size < sess_priv_sz)
1745 		return -EINVAL;
1746 
1747 	index = dev->driver_id;
1748 	if (index >= sess->nb_drivers)
1749 		return -EINVAL;
1750 
1751 	if (*dev->dev_ops->sym_session_configure == NULL)
1752 		return -ENOTSUP;
1753 
1754 	if (sess->sess_data[index].refcnt == 0) {
1755 		ret = dev->dev_ops->sym_session_configure(dev, xforms,
1756 							sess, mp);
1757 		if (ret < 0) {
1758 			CDEV_LOG_ERR(
1759 				"dev_id %d failed to configure session details",
1760 				dev_id);
1761 			return ret;
1762 		}
1763 	}
1764 
1765 	rte_cryptodev_trace_sym_session_init(dev_id, sess, xforms, mp);
1766 	sess->sess_data[index].refcnt++;
1767 	return 0;
1768 }
1769 
1770 struct rte_mempool *
1771 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
1772 	uint32_t elt_size, uint32_t cache_size, uint16_t user_data_size,
1773 	int socket_id)
1774 {
1775 	struct rte_mempool *mp;
1776 	struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1777 	uint32_t obj_sz;
1778 
1779 	obj_sz = rte_cryptodev_sym_get_header_session_size() + user_data_size;
1780 	if (obj_sz > elt_size)
1781 		CDEV_LOG_INFO("elt_size %u is expanded to %u\n", elt_size,
1782 				obj_sz);
1783 	else
1784 		obj_sz = elt_size;
1785 
1786 	mp = rte_mempool_create(name, nb_elts, obj_sz, cache_size,
1787 			(uint32_t)(sizeof(*pool_priv)),
1788 			NULL, NULL, NULL, NULL,
1789 			socket_id, 0);
1790 	if (mp == NULL) {
1791 		CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d\n",
1792 			__func__, name, rte_errno);
1793 		return NULL;
1794 	}
1795 
1796 	pool_priv = rte_mempool_get_priv(mp);
1797 	if (!pool_priv) {
1798 		CDEV_LOG_ERR("%s(name=%s) failed to get private data\n",
1799 			__func__, name);
1800 		rte_mempool_free(mp);
1801 		return NULL;
1802 	}
1803 
1804 	pool_priv->nb_drivers = nb_drivers;
1805 	pool_priv->user_data_sz = user_data_size;
1806 
1807 	rte_cryptodev_trace_sym_session_pool_create(name, nb_elts,
1808 		elt_size, cache_size, user_data_size, mp);
1809 	return mp;
1810 }
1811 
1812 struct rte_mempool *
1813 rte_cryptodev_asym_session_pool_create(const char *name, uint32_t nb_elts,
1814 	uint32_t cache_size, uint16_t user_data_size, int socket_id)
1815 {
1816 	struct rte_mempool *mp;
1817 	struct rte_cryptodev_asym_session_pool_private_data *pool_priv;
1818 	uint32_t obj_sz, obj_sz_aligned;
1819 	uint8_t dev_id;
1820 	unsigned int priv_sz, max_priv_sz = 0;
1821 
1822 	for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++)
1823 		if (rte_cryptodev_is_valid_dev(dev_id)) {
1824 			priv_sz = rte_cryptodev_asym_get_private_session_size(dev_id);
1825 			if (priv_sz > max_priv_sz)
1826 				max_priv_sz = priv_sz;
1827 		}
1828 	if (max_priv_sz == 0) {
1829 		CDEV_LOG_INFO("Could not set max private session size\n");
1830 		return NULL;
1831 	}
1832 
1833 	obj_sz = rte_cryptodev_asym_get_header_session_size() + max_priv_sz +
1834 			user_data_size;
1835 	obj_sz_aligned =  RTE_ALIGN_CEIL(obj_sz, RTE_CACHE_LINE_SIZE);
1836 
1837 	mp = rte_mempool_create(name, nb_elts, obj_sz_aligned, cache_size,
1838 			(uint32_t)(sizeof(*pool_priv)),
1839 			NULL, NULL, NULL, NULL,
1840 			socket_id, 0);
1841 	if (mp == NULL) {
1842 		CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d\n",
1843 			__func__, name, rte_errno);
1844 		return NULL;
1845 	}
1846 
1847 	pool_priv = rte_mempool_get_priv(mp);
1848 	if (!pool_priv) {
1849 		CDEV_LOG_ERR("%s(name=%s) failed to get private data\n",
1850 			__func__, name);
1851 		rte_mempool_free(mp);
1852 		return NULL;
1853 	}
1854 	pool_priv->max_priv_session_sz = max_priv_sz;
1855 	pool_priv->user_data_sz = user_data_size;
1856 
1857 	rte_cryptodev_trace_asym_session_pool_create(name, nb_elts,
1858 		user_data_size, cache_size, mp);
1859 	return mp;
1860 }
1861 
1862 static unsigned int
1863 rte_cryptodev_sym_session_data_size(struct rte_cryptodev_sym_session *sess)
1864 {
1865 	return (sizeof(sess->sess_data[0]) * sess->nb_drivers) +
1866 			sess->user_data_sz;
1867 }
1868 
1869 static uint8_t
1870 rte_cryptodev_sym_is_valid_session_pool(struct rte_mempool *mp)
1871 {
1872 	struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1873 
1874 	if (!mp)
1875 		return 0;
1876 
1877 	pool_priv = rte_mempool_get_priv(mp);
1878 
1879 	if (!pool_priv || mp->private_data_size < sizeof(*pool_priv) ||
1880 			pool_priv->nb_drivers != nb_drivers ||
1881 			mp->elt_size <
1882 				rte_cryptodev_sym_get_header_session_size()
1883 				+ pool_priv->user_data_sz)
1884 		return 0;
1885 
1886 	return 1;
1887 }
1888 
1889 struct rte_cryptodev_sym_session *
1890 rte_cryptodev_sym_session_create(struct rte_mempool *mp)
1891 {
1892 	struct rte_cryptodev_sym_session *sess;
1893 	struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1894 
1895 	if (!rte_cryptodev_sym_is_valid_session_pool(mp)) {
1896 		CDEV_LOG_ERR("Invalid mempool\n");
1897 		return NULL;
1898 	}
1899 
1900 	pool_priv = rte_mempool_get_priv(mp);
1901 
1902 	/* Allocate a session structure from the session pool */
1903 	if (rte_mempool_get(mp, (void **)&sess)) {
1904 		CDEV_LOG_ERR("couldn't get object from session mempool");
1905 		return NULL;
1906 	}
1907 
1908 	sess->nb_drivers = pool_priv->nb_drivers;
1909 	sess->user_data_sz = pool_priv->user_data_sz;
1910 	sess->opaque_data = 0;
1911 
1912 	/* Clear device session pointer.
1913 	 * Include the flag indicating presence of user data
1914 	 */
1915 	memset(sess->sess_data, 0,
1916 			rte_cryptodev_sym_session_data_size(sess));
1917 
1918 	rte_cryptodev_trace_sym_session_create(mp, sess);
1919 	return sess;
1920 }
1921 
1922 int
1923 rte_cryptodev_asym_session_create(uint8_t dev_id,
1924 		struct rte_crypto_asym_xform *xforms, struct rte_mempool *mp,
1925 		void **session)
1926 {
1927 	struct rte_cryptodev_asym_session *sess;
1928 	uint32_t session_priv_data_sz;
1929 	struct rte_cryptodev_asym_session_pool_private_data *pool_priv;
1930 	unsigned int session_header_size =
1931 			rte_cryptodev_asym_get_header_session_size();
1932 	struct rte_cryptodev *dev;
1933 	int ret;
1934 
1935 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1936 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1937 		return -EINVAL;
1938 	}
1939 
1940 	dev = rte_cryptodev_pmd_get_dev(dev_id);
1941 
1942 	if (dev == NULL)
1943 		return -EINVAL;
1944 
1945 	if (!mp) {
1946 		CDEV_LOG_ERR("invalid mempool\n");
1947 		return -EINVAL;
1948 	}
1949 
1950 	session_priv_data_sz = rte_cryptodev_asym_get_private_session_size(
1951 			dev_id);
1952 	pool_priv = rte_mempool_get_priv(mp);
1953 
1954 	if (pool_priv->max_priv_session_sz < session_priv_data_sz) {
1955 		CDEV_LOG_DEBUG(
1956 			"The private session data size used when creating the mempool is smaller than this device's private session data.");
1957 		return -EINVAL;
1958 	}
1959 
1960 	/* Verify if provided mempool can hold elements big enough. */
1961 	if (mp->elt_size < session_header_size + session_priv_data_sz) {
1962 		CDEV_LOG_ERR(
1963 			"mempool elements too small to hold session objects");
1964 		return -EINVAL;
1965 	}
1966 
1967 	/* Allocate a session structure from the session pool */
1968 	if (rte_mempool_get(mp, session)) {
1969 		CDEV_LOG_ERR("couldn't get object from session mempool");
1970 		return -ENOMEM;
1971 	}
1972 
1973 	sess = *session;
1974 	sess->driver_id = dev->driver_id;
1975 	sess->user_data_sz = pool_priv->user_data_sz;
1976 	sess->max_priv_data_sz = pool_priv->max_priv_session_sz;
1977 
1978 	/* Clear device session pointer.*/
1979 	memset(sess->sess_private_data, 0, session_priv_data_sz + sess->user_data_sz);
1980 
1981 	if (*dev->dev_ops->asym_session_configure == NULL)
1982 		return -ENOTSUP;
1983 
1984 	if (sess->sess_private_data[0] == 0) {
1985 		ret = dev->dev_ops->asym_session_configure(dev, xforms, sess);
1986 		if (ret < 0) {
1987 			CDEV_LOG_ERR(
1988 				"dev_id %d failed to configure session details",
1989 				dev_id);
1990 			return ret;
1991 		}
1992 	}
1993 
1994 	rte_cryptodev_trace_asym_session_create(dev_id, xforms, mp, sess);
1995 	return 0;
1996 }
1997 
1998 int
1999 rte_cryptodev_sym_session_clear(uint8_t dev_id,
2000 		struct rte_cryptodev_sym_session *sess)
2001 {
2002 	struct rte_cryptodev *dev;
2003 	uint8_t driver_id;
2004 
2005 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
2006 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
2007 		return -EINVAL;
2008 	}
2009 
2010 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2011 
2012 	if (dev == NULL || sess == NULL)
2013 		return -EINVAL;
2014 
2015 	driver_id = dev->driver_id;
2016 	if (sess->sess_data[driver_id].refcnt == 0)
2017 		return 0;
2018 	if (--sess->sess_data[driver_id].refcnt != 0)
2019 		return -EBUSY;
2020 
2021 	if (*dev->dev_ops->sym_session_clear == NULL)
2022 		return -ENOTSUP;
2023 
2024 	dev->dev_ops->sym_session_clear(dev, sess);
2025 
2026 	rte_cryptodev_trace_sym_session_clear(dev_id, sess);
2027 	return 0;
2028 }
2029 
2030 int
2031 rte_cryptodev_sym_session_free(struct rte_cryptodev_sym_session *sess)
2032 {
2033 	uint8_t i;
2034 	struct rte_mempool *sess_mp;
2035 
2036 	if (sess == NULL)
2037 		return -EINVAL;
2038 
2039 	/* Check that all device private data has been freed */
2040 	for (i = 0; i < sess->nb_drivers; i++) {
2041 		if (sess->sess_data[i].refcnt != 0)
2042 			return -EBUSY;
2043 	}
2044 
2045 	/* Return session to mempool */
2046 	sess_mp = rte_mempool_from_obj(sess);
2047 	rte_mempool_put(sess_mp, sess);
2048 
2049 	rte_cryptodev_trace_sym_session_free(sess);
2050 	return 0;
2051 }
2052 
2053 int
2054 rte_cryptodev_asym_session_free(uint8_t dev_id, void *sess)
2055 {
2056 	struct rte_mempool *sess_mp;
2057 	struct rte_cryptodev *dev;
2058 
2059 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
2060 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
2061 		return -EINVAL;
2062 	}
2063 
2064 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2065 
2066 	if (dev == NULL || sess == NULL)
2067 		return -EINVAL;
2068 
2069 	if (*dev->dev_ops->asym_session_clear == NULL)
2070 		return -ENOTSUP;
2071 
2072 	dev->dev_ops->asym_session_clear(dev, sess);
2073 
2074 	rte_free(((struct rte_cryptodev_asym_session *)sess)->event_mdata);
2075 
2076 	/* Return session to mempool */
2077 	sess_mp = rte_mempool_from_obj(sess);
2078 	rte_mempool_put(sess_mp, sess);
2079 
2080 	rte_cryptodev_trace_asym_session_free(dev_id, sess);
2081 	return 0;
2082 }
2083 
2084 unsigned int
2085 rte_cryptodev_sym_get_header_session_size(void)
2086 {
2087 	/*
2088 	 * Header contains pointers to the private data of all registered
2089 	 * drivers and all necessary information to ensure safely clear
2090 	 * or free al session.
2091 	 */
2092 	struct rte_cryptodev_sym_session s = {0};
2093 
2094 	s.nb_drivers = nb_drivers;
2095 
2096 	return (unsigned int)(sizeof(s) +
2097 			rte_cryptodev_sym_session_data_size(&s));
2098 }
2099 
2100 unsigned int
2101 rte_cryptodev_sym_get_existing_header_session_size(
2102 		struct rte_cryptodev_sym_session *sess)
2103 {
2104 	if (!sess)
2105 		return 0;
2106 	else
2107 		return (unsigned int)(sizeof(*sess) +
2108 				rte_cryptodev_sym_session_data_size(sess));
2109 }
2110 
2111 unsigned int
2112 rte_cryptodev_asym_get_header_session_size(void)
2113 {
2114 	return sizeof(struct rte_cryptodev_asym_session);
2115 }
2116 
2117 unsigned int
2118 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
2119 {
2120 	struct rte_cryptodev *dev;
2121 	unsigned int priv_sess_size;
2122 
2123 	if (!rte_cryptodev_is_valid_dev(dev_id))
2124 		return 0;
2125 
2126 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2127 
2128 	if (*dev->dev_ops->sym_session_get_size == NULL)
2129 		return 0;
2130 
2131 	priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev);
2132 
2133 	return priv_sess_size;
2134 }
2135 
2136 unsigned int
2137 rte_cryptodev_asym_get_private_session_size(uint8_t dev_id)
2138 {
2139 	struct rte_cryptodev *dev;
2140 	unsigned int priv_sess_size;
2141 
2142 	if (!rte_cryptodev_is_valid_dev(dev_id))
2143 		return 0;
2144 
2145 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2146 
2147 	if (*dev->dev_ops->asym_session_get_size == NULL)
2148 		return 0;
2149 
2150 	priv_sess_size = (*dev->dev_ops->asym_session_get_size)(dev);
2151 
2152 	return priv_sess_size;
2153 }
2154 
2155 int
2156 rte_cryptodev_sym_session_set_user_data(
2157 					struct rte_cryptodev_sym_session *sess,
2158 					void *data,
2159 					uint16_t size)
2160 {
2161 	if (sess == NULL)
2162 		return -EINVAL;
2163 
2164 	if (sess->user_data_sz < size)
2165 		return -ENOMEM;
2166 
2167 	rte_memcpy(sess->sess_data + sess->nb_drivers, data, size);
2168 	return 0;
2169 }
2170 
2171 void *
2172 rte_cryptodev_sym_session_get_user_data(
2173 					struct rte_cryptodev_sym_session *sess)
2174 {
2175 	if (sess == NULL || sess->user_data_sz == 0)
2176 		return NULL;
2177 
2178 	return (void *)(sess->sess_data + sess->nb_drivers);
2179 }
2180 
2181 int
2182 rte_cryptodev_asym_session_set_user_data(void *session, void *data, uint16_t size)
2183 {
2184 	struct rte_cryptodev_asym_session *sess = session;
2185 	if (sess == NULL)
2186 		return -EINVAL;
2187 
2188 	if (sess->user_data_sz < size)
2189 		return -ENOMEM;
2190 
2191 	rte_memcpy(sess->sess_private_data +
2192 			sess->max_priv_data_sz,
2193 			data, size);
2194 	return 0;
2195 }
2196 
2197 void *
2198 rte_cryptodev_asym_session_get_user_data(void *session)
2199 {
2200 	struct rte_cryptodev_asym_session *sess = session;
2201 	if (sess == NULL || sess->user_data_sz == 0)
2202 		return NULL;
2203 
2204 	return (void *)(sess->sess_private_data +
2205 			sess->max_priv_data_sz);
2206 }
2207 
2208 static inline void
2209 sym_crypto_fill_status(struct rte_crypto_sym_vec *vec, int32_t errnum)
2210 {
2211 	uint32_t i;
2212 	for (i = 0; i < vec->num; i++)
2213 		vec->status[i] = errnum;
2214 }
2215 
2216 uint32_t
2217 rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id,
2218 	struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs ofs,
2219 	struct rte_crypto_sym_vec *vec)
2220 {
2221 	struct rte_cryptodev *dev;
2222 
2223 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
2224 		sym_crypto_fill_status(vec, EINVAL);
2225 		return 0;
2226 	}
2227 
2228 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2229 
2230 	if (*dev->dev_ops->sym_cpu_process == NULL ||
2231 		!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO)) {
2232 		sym_crypto_fill_status(vec, ENOTSUP);
2233 		return 0;
2234 	}
2235 
2236 	return dev->dev_ops->sym_cpu_process(dev, sess, ofs, vec);
2237 }
2238 
2239 int
2240 rte_cryptodev_get_raw_dp_ctx_size(uint8_t dev_id)
2241 {
2242 	struct rte_cryptodev *dev;
2243 	int32_t size = sizeof(struct rte_crypto_raw_dp_ctx);
2244 	int32_t priv_size;
2245 
2246 	if (!rte_cryptodev_is_valid_dev(dev_id))
2247 		return -EINVAL;
2248 
2249 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2250 
2251 	if (*dev->dev_ops->sym_get_raw_dp_ctx_size == NULL ||
2252 		!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP)) {
2253 		return -ENOTSUP;
2254 	}
2255 
2256 	priv_size = (*dev->dev_ops->sym_get_raw_dp_ctx_size)(dev);
2257 	if (priv_size < 0)
2258 		return -ENOTSUP;
2259 
2260 	return RTE_ALIGN_CEIL((size + priv_size), 8);
2261 }
2262 
2263 int
2264 rte_cryptodev_configure_raw_dp_ctx(uint8_t dev_id, uint16_t qp_id,
2265 	struct rte_crypto_raw_dp_ctx *ctx,
2266 	enum rte_crypto_op_sess_type sess_type,
2267 	union rte_cryptodev_session_ctx session_ctx,
2268 	uint8_t is_update)
2269 {
2270 	struct rte_cryptodev *dev;
2271 
2272 	if (!rte_cryptodev_get_qp_status(dev_id, qp_id))
2273 		return -EINVAL;
2274 
2275 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2276 	if (!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP)
2277 			|| dev->dev_ops->sym_configure_raw_dp_ctx == NULL)
2278 		return -ENOTSUP;
2279 
2280 	return (*dev->dev_ops->sym_configure_raw_dp_ctx)(dev, qp_id, ctx,
2281 			sess_type, session_ctx, is_update);
2282 }
2283 
2284 int
2285 rte_cryptodev_session_event_mdata_set(uint8_t dev_id, void *sess,
2286 	enum rte_crypto_op_type op_type,
2287 	enum rte_crypto_op_sess_type sess_type,
2288 	void *ev_mdata,
2289 	uint16_t size)
2290 {
2291 	struct rte_cryptodev *dev;
2292 
2293 	if (sess == NULL || ev_mdata == NULL)
2294 		return -EINVAL;
2295 
2296 	if (!rte_cryptodev_is_valid_dev(dev_id))
2297 		goto skip_pmd_op;
2298 
2299 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2300 	if (dev->dev_ops->session_ev_mdata_set == NULL)
2301 		goto skip_pmd_op;
2302 
2303 	return (*dev->dev_ops->session_ev_mdata_set)(dev, sess, op_type,
2304 			sess_type, ev_mdata);
2305 
2306 skip_pmd_op:
2307 	if (op_type == RTE_CRYPTO_OP_TYPE_SYMMETRIC)
2308 		return rte_cryptodev_sym_session_set_user_data(sess, ev_mdata,
2309 				size);
2310 	else if (op_type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
2311 		struct rte_cryptodev_asym_session *s = sess;
2312 
2313 		if (s->event_mdata == NULL) {
2314 			s->event_mdata = rte_malloc(NULL, size, 0);
2315 			if (s->event_mdata == NULL)
2316 				return -ENOMEM;
2317 		}
2318 		rte_memcpy(s->event_mdata, ev_mdata, size);
2319 
2320 		return 0;
2321 	} else
2322 		return -ENOTSUP;
2323 }
2324 
2325 uint32_t
2326 rte_cryptodev_raw_enqueue_burst(struct rte_crypto_raw_dp_ctx *ctx,
2327 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
2328 	void **user_data, int *enqueue_status)
2329 {
2330 	return (*ctx->enqueue_burst)(ctx->qp_data, ctx->drv_ctx_data, vec,
2331 			ofs, user_data, enqueue_status);
2332 }
2333 
2334 int
2335 rte_cryptodev_raw_enqueue_done(struct rte_crypto_raw_dp_ctx *ctx,
2336 		uint32_t n)
2337 {
2338 	return (*ctx->enqueue_done)(ctx->qp_data, ctx->drv_ctx_data, n);
2339 }
2340 
2341 uint32_t
2342 rte_cryptodev_raw_dequeue_burst(struct rte_crypto_raw_dp_ctx *ctx,
2343 	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
2344 	uint32_t max_nb_to_dequeue,
2345 	rte_cryptodev_raw_post_dequeue_t post_dequeue,
2346 	void **out_user_data, uint8_t is_user_data_array,
2347 	uint32_t *n_success_jobs, int *status)
2348 {
2349 	return (*ctx->dequeue_burst)(ctx->qp_data, ctx->drv_ctx_data,
2350 		get_dequeue_count, max_nb_to_dequeue, post_dequeue,
2351 		out_user_data, is_user_data_array, n_success_jobs, status);
2352 }
2353 
2354 int
2355 rte_cryptodev_raw_dequeue_done(struct rte_crypto_raw_dp_ctx *ctx,
2356 		uint32_t n)
2357 {
2358 	return (*ctx->dequeue_done)(ctx->qp_data, ctx->drv_ctx_data, n);
2359 }
2360 
2361 /** Initialise rte_crypto_op mempool element */
2362 static void
2363 rte_crypto_op_init(struct rte_mempool *mempool,
2364 		void *opaque_arg,
2365 		void *_op_data,
2366 		__rte_unused unsigned i)
2367 {
2368 	struct rte_crypto_op *op = _op_data;
2369 	enum rte_crypto_op_type type = *(enum rte_crypto_op_type *)opaque_arg;
2370 
2371 	memset(_op_data, 0, mempool->elt_size);
2372 
2373 	__rte_crypto_op_reset(op, type);
2374 
2375 	op->phys_addr = rte_mem_virt2iova(_op_data);
2376 	op->mempool = mempool;
2377 }
2378 
2379 
2380 struct rte_mempool *
2381 rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
2382 		unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
2383 		int socket_id)
2384 {
2385 	struct rte_crypto_op_pool_private *priv;
2386 
2387 	unsigned elt_size = sizeof(struct rte_crypto_op) +
2388 			priv_size;
2389 
2390 	if (type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
2391 		elt_size += sizeof(struct rte_crypto_sym_op);
2392 	} else if (type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
2393 		elt_size += sizeof(struct rte_crypto_asym_op);
2394 	} else if (type == RTE_CRYPTO_OP_TYPE_UNDEFINED) {
2395 		elt_size += RTE_MAX(sizeof(struct rte_crypto_sym_op),
2396 		                    sizeof(struct rte_crypto_asym_op));
2397 	} else {
2398 		CDEV_LOG_ERR("Invalid op_type\n");
2399 		return NULL;
2400 	}
2401 
2402 	/* lookup mempool in case already allocated */
2403 	struct rte_mempool *mp = rte_mempool_lookup(name);
2404 
2405 	if (mp != NULL) {
2406 		priv = (struct rte_crypto_op_pool_private *)
2407 				rte_mempool_get_priv(mp);
2408 
2409 		if (mp->elt_size != elt_size ||
2410 				mp->cache_size < cache_size ||
2411 				mp->size < nb_elts ||
2412 				priv->priv_size <  priv_size) {
2413 			mp = NULL;
2414 			CDEV_LOG_ERR("Mempool %s already exists but with "
2415 					"incompatible parameters", name);
2416 			return NULL;
2417 		}
2418 		return mp;
2419 	}
2420 
2421 	mp = rte_mempool_create(
2422 			name,
2423 			nb_elts,
2424 			elt_size,
2425 			cache_size,
2426 			sizeof(struct rte_crypto_op_pool_private),
2427 			NULL,
2428 			NULL,
2429 			rte_crypto_op_init,
2430 			&type,
2431 			socket_id,
2432 			0);
2433 
2434 	if (mp == NULL) {
2435 		CDEV_LOG_ERR("Failed to create mempool %s", name);
2436 		return NULL;
2437 	}
2438 
2439 	priv = (struct rte_crypto_op_pool_private *)
2440 			rte_mempool_get_priv(mp);
2441 
2442 	priv->priv_size = priv_size;
2443 	priv->type = type;
2444 
2445 	return mp;
2446 }
2447 
2448 int
2449 rte_cryptodev_pmd_create_dev_name(char *name, const char *dev_name_prefix)
2450 {
2451 	struct rte_cryptodev *dev = NULL;
2452 	uint32_t i = 0;
2453 
2454 	if (name == NULL)
2455 		return -EINVAL;
2456 
2457 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
2458 		int ret = snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
2459 				"%s_%u", dev_name_prefix, i);
2460 
2461 		if (ret < 0)
2462 			return ret;
2463 
2464 		dev = rte_cryptodev_pmd_get_named_dev(name);
2465 		if (!dev)
2466 			return 0;
2467 	}
2468 
2469 	return -1;
2470 }
2471 
2472 TAILQ_HEAD(cryptodev_driver_list, cryptodev_driver);
2473 
2474 static struct cryptodev_driver_list cryptodev_driver_list =
2475 	TAILQ_HEAD_INITIALIZER(cryptodev_driver_list);
2476 
2477 int
2478 rte_cryptodev_driver_id_get(const char *name)
2479 {
2480 	struct cryptodev_driver *driver;
2481 	const char *driver_name;
2482 
2483 	if (name == NULL) {
2484 		RTE_LOG(DEBUG, CRYPTODEV, "name pointer NULL");
2485 		return -1;
2486 	}
2487 
2488 	TAILQ_FOREACH(driver, &cryptodev_driver_list, next) {
2489 		driver_name = driver->driver->name;
2490 		if (strncmp(driver_name, name, strlen(driver_name) + 1) == 0)
2491 			return driver->id;
2492 	}
2493 	return -1;
2494 }
2495 
2496 const char *
2497 rte_cryptodev_name_get(uint8_t dev_id)
2498 {
2499 	struct rte_cryptodev *dev;
2500 
2501 	if (!rte_cryptodev_is_valid_device_data(dev_id)) {
2502 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
2503 		return NULL;
2504 	}
2505 
2506 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2507 	if (dev == NULL)
2508 		return NULL;
2509 
2510 	return dev->data->name;
2511 }
2512 
2513 const char *
2514 rte_cryptodev_driver_name_get(uint8_t driver_id)
2515 {
2516 	struct cryptodev_driver *driver;
2517 
2518 	TAILQ_FOREACH(driver, &cryptodev_driver_list, next)
2519 		if (driver->id == driver_id)
2520 			return driver->driver->name;
2521 	return NULL;
2522 }
2523 
2524 uint8_t
2525 rte_cryptodev_allocate_driver(struct cryptodev_driver *crypto_drv,
2526 		const struct rte_driver *drv)
2527 {
2528 	crypto_drv->driver = drv;
2529 	crypto_drv->id = nb_drivers;
2530 
2531 	TAILQ_INSERT_TAIL(&cryptodev_driver_list, crypto_drv, next);
2532 
2533 	return nb_drivers++;
2534 }
2535 
2536 RTE_INIT(cryptodev_init_fp_ops)
2537 {
2538 	uint32_t i;
2539 
2540 	for (i = 0; i != RTE_DIM(rte_crypto_fp_ops); i++)
2541 		cryptodev_fp_ops_reset(rte_crypto_fp_ops + i);
2542 }
2543 
2544 static int
2545 cryptodev_handle_dev_list(const char *cmd __rte_unused,
2546 		const char *params __rte_unused,
2547 		struct rte_tel_data *d)
2548 {
2549 	int dev_id;
2550 
2551 	if (rte_cryptodev_count() < 1)
2552 		return -EINVAL;
2553 
2554 	rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
2555 	for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++)
2556 		if (rte_cryptodev_is_valid_dev(dev_id))
2557 			rte_tel_data_add_array_int(d, dev_id);
2558 
2559 	return 0;
2560 }
2561 
2562 static int
2563 cryptodev_handle_dev_info(const char *cmd __rte_unused,
2564 		const char *params, struct rte_tel_data *d)
2565 {
2566 	struct rte_cryptodev_info cryptodev_info;
2567 	int dev_id;
2568 	char *end_param;
2569 
2570 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
2571 		return -EINVAL;
2572 
2573 	dev_id = strtoul(params, &end_param, 0);
2574 	if (*end_param != '\0')
2575 		CDEV_LOG_ERR("Extra parameters passed to command, ignoring");
2576 	if (!rte_cryptodev_is_valid_dev(dev_id))
2577 		return -EINVAL;
2578 
2579 	rte_cryptodev_info_get(dev_id, &cryptodev_info);
2580 
2581 	rte_tel_data_start_dict(d);
2582 	rte_tel_data_add_dict_string(d, "device_name",
2583 		cryptodev_info.device->name);
2584 	rte_tel_data_add_dict_int(d, "max_nb_queue_pairs",
2585 		cryptodev_info.max_nb_queue_pairs);
2586 
2587 	return 0;
2588 }
2589 
2590 #define ADD_DICT_STAT(s) rte_tel_data_add_dict_u64(d, #s, cryptodev_stats.s)
2591 
2592 static int
2593 cryptodev_handle_dev_stats(const char *cmd __rte_unused,
2594 		const char *params,
2595 		struct rte_tel_data *d)
2596 {
2597 	struct rte_cryptodev_stats cryptodev_stats;
2598 	int dev_id, ret;
2599 	char *end_param;
2600 
2601 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
2602 		return -EINVAL;
2603 
2604 	dev_id = strtoul(params, &end_param, 0);
2605 	if (*end_param != '\0')
2606 		CDEV_LOG_ERR("Extra parameters passed to command, ignoring");
2607 	if (!rte_cryptodev_is_valid_dev(dev_id))
2608 		return -EINVAL;
2609 
2610 	ret = rte_cryptodev_stats_get(dev_id, &cryptodev_stats);
2611 	if (ret < 0)
2612 		return ret;
2613 
2614 	rte_tel_data_start_dict(d);
2615 	ADD_DICT_STAT(enqueued_count);
2616 	ADD_DICT_STAT(dequeued_count);
2617 	ADD_DICT_STAT(enqueue_err_count);
2618 	ADD_DICT_STAT(dequeue_err_count);
2619 
2620 	return 0;
2621 }
2622 
2623 #define CRYPTO_CAPS_SZ                                             \
2624 	(RTE_ALIGN_CEIL(sizeof(struct rte_cryptodev_capabilities), \
2625 					sizeof(uint64_t)) /        \
2626 	 sizeof(uint64_t))
2627 
2628 static int
2629 crypto_caps_array(struct rte_tel_data *d,
2630 		  const struct rte_cryptodev_capabilities *capabilities)
2631 {
2632 	const struct rte_cryptodev_capabilities *dev_caps;
2633 	uint64_t caps_val[CRYPTO_CAPS_SZ];
2634 	unsigned int i = 0, j;
2635 
2636 	rte_tel_data_start_array(d, RTE_TEL_U64_VAL);
2637 
2638 	while ((dev_caps = &capabilities[i++])->op !=
2639 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
2640 		memset(&caps_val, 0, CRYPTO_CAPS_SZ * sizeof(caps_val[0]));
2641 		rte_memcpy(caps_val, dev_caps, sizeof(capabilities[0]));
2642 		for (j = 0; j < CRYPTO_CAPS_SZ; j++)
2643 			rte_tel_data_add_array_u64(d, caps_val[j]);
2644 	}
2645 
2646 	return i;
2647 }
2648 
2649 static int
2650 cryptodev_handle_dev_caps(const char *cmd __rte_unused, const char *params,
2651 			  struct rte_tel_data *d)
2652 {
2653 	struct rte_cryptodev_info dev_info;
2654 	struct rte_tel_data *crypto_caps;
2655 	int crypto_caps_n;
2656 	char *end_param;
2657 	int dev_id;
2658 
2659 	if (!params || strlen(params) == 0 || !isdigit(*params))
2660 		return -EINVAL;
2661 
2662 	dev_id = strtoul(params, &end_param, 0);
2663 	if (*end_param != '\0')
2664 		CDEV_LOG_ERR("Extra parameters passed to command, ignoring");
2665 	if (!rte_cryptodev_is_valid_dev(dev_id))
2666 		return -EINVAL;
2667 
2668 	rte_tel_data_start_dict(d);
2669 	crypto_caps = rte_tel_data_alloc();
2670 	if (!crypto_caps)
2671 		return -ENOMEM;
2672 
2673 	rte_cryptodev_info_get(dev_id, &dev_info);
2674 	crypto_caps_n = crypto_caps_array(crypto_caps, dev_info.capabilities);
2675 	rte_tel_data_add_dict_container(d, "crypto_caps", crypto_caps, 0);
2676 	rte_tel_data_add_dict_int(d, "crypto_caps_n", crypto_caps_n);
2677 
2678 	return 0;
2679 }
2680 
2681 RTE_INIT(cryptodev_init_telemetry)
2682 {
2683 	rte_telemetry_register_cmd("/cryptodev/info", cryptodev_handle_dev_info,
2684 			"Returns information for a cryptodev. Parameters: int dev_id");
2685 	rte_telemetry_register_cmd("/cryptodev/list",
2686 			cryptodev_handle_dev_list,
2687 			"Returns list of available crypto devices by IDs. No parameters.");
2688 	rte_telemetry_register_cmd("/cryptodev/stats",
2689 			cryptodev_handle_dev_stats,
2690 			"Returns the stats for a cryptodev. Parameters: int dev_id");
2691 	rte_telemetry_register_cmd("/cryptodev/caps",
2692 			cryptodev_handle_dev_caps,
2693 			"Returns the capabilities for a cryptodev. Parameters: int dev_id");
2694 }
2695