xref: /dpdk/lib/cryptodev/rte_cryptodev.c (revision 35ffc5b0952f350392d946a11f431ea24a66fa2a)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020 Intel Corporation
3  */
4 
5 #include <sys/queue.h>
6 #include <ctype.h>
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <string.h>
10 #include <errno.h>
11 #include <stdint.h>
12 #include <inttypes.h>
13 
14 #include <rte_log.h>
15 #include <rte_debug.h>
16 #include <dev_driver.h>
17 #include <rte_memory.h>
18 #include <rte_memcpy.h>
19 #include <rte_memzone.h>
20 #include <rte_eal.h>
21 #include <rte_common.h>
22 #include <rte_mempool.h>
23 #include <rte_malloc.h>
24 #include <rte_errno.h>
25 #include <rte_spinlock.h>
26 #include <rte_string_fns.h>
27 #include <rte_telemetry.h>
28 
29 #include "rte_crypto.h"
30 #include "rte_cryptodev.h"
31 #include "cryptodev_pmd.h"
32 #include "rte_cryptodev_trace.h"
33 
34 static uint8_t nb_drivers;
35 
36 static struct rte_cryptodev rte_crypto_devices[RTE_CRYPTO_MAX_DEVS];
37 
38 struct rte_cryptodev *rte_cryptodevs = rte_crypto_devices;
39 
40 static struct rte_cryptodev_global cryptodev_globals = {
41 		.devs			= rte_crypto_devices,
42 		.data			= { NULL },
43 		.nb_devs		= 0
44 };
45 
46 /* Public fastpath APIs. */
47 struct rte_crypto_fp_ops rte_crypto_fp_ops[RTE_CRYPTO_MAX_DEVS];
48 
49 /* spinlock for crypto device callbacks */
50 static rte_spinlock_t rte_cryptodev_cb_lock = RTE_SPINLOCK_INITIALIZER;
51 
52 /**
53  * The user application callback description.
54  *
55  * It contains callback address to be registered by user application,
56  * the pointer to the parameters for callback, and the event type.
57  */
58 struct rte_cryptodev_callback {
59 	TAILQ_ENTRY(rte_cryptodev_callback) next; /**< Callbacks list */
60 	rte_cryptodev_cb_fn cb_fn;		/**< Callback address */
61 	void *cb_arg;				/**< Parameter for callback */
62 	enum rte_cryptodev_event_type event;	/**< Interrupt event type */
63 	uint32_t active;			/**< Callback is executing */
64 };
65 
66 /**
67  * The crypto cipher algorithm strings identifiers.
68  * It could be used in application command line.
69  */
70 const char *
71 rte_crypto_cipher_algorithm_strings[] = {
72 	[RTE_CRYPTO_CIPHER_3DES_CBC]	= "3des-cbc",
73 	[RTE_CRYPTO_CIPHER_3DES_ECB]	= "3des-ecb",
74 	[RTE_CRYPTO_CIPHER_3DES_CTR]	= "3des-ctr",
75 
76 	[RTE_CRYPTO_CIPHER_AES_CBC]	= "aes-cbc",
77 	[RTE_CRYPTO_CIPHER_AES_CTR]	= "aes-ctr",
78 	[RTE_CRYPTO_CIPHER_AES_DOCSISBPI]	= "aes-docsisbpi",
79 	[RTE_CRYPTO_CIPHER_AES_ECB]	= "aes-ecb",
80 	[RTE_CRYPTO_CIPHER_AES_F8]	= "aes-f8",
81 	[RTE_CRYPTO_CIPHER_AES_XTS]	= "aes-xts",
82 
83 	[RTE_CRYPTO_CIPHER_ARC4]	= "arc4",
84 
85 	[RTE_CRYPTO_CIPHER_DES_CBC]     = "des-cbc",
86 	[RTE_CRYPTO_CIPHER_DES_DOCSISBPI]	= "des-docsisbpi",
87 
88 	[RTE_CRYPTO_CIPHER_NULL]	= "null",
89 
90 	[RTE_CRYPTO_CIPHER_KASUMI_F8]	= "kasumi-f8",
91 	[RTE_CRYPTO_CIPHER_SNOW3G_UEA2]	= "snow3g-uea2",
92 	[RTE_CRYPTO_CIPHER_ZUC_EEA3]	= "zuc-eea3",
93 	[RTE_CRYPTO_CIPHER_SM4_ECB]	= "sm4-ecb",
94 	[RTE_CRYPTO_CIPHER_SM4_CBC]	= "sm4-cbc",
95 	[RTE_CRYPTO_CIPHER_SM4_CTR]	= "sm4-ctr"
96 };
97 
98 /**
99  * The crypto cipher operation strings identifiers.
100  * It could be used in application command line.
101  */
102 const char *
103 rte_crypto_cipher_operation_strings[] = {
104 		[RTE_CRYPTO_CIPHER_OP_ENCRYPT]	= "encrypt",
105 		[RTE_CRYPTO_CIPHER_OP_DECRYPT]	= "decrypt"
106 };
107 
108 /**
109  * The crypto auth algorithm strings identifiers.
110  * It could be used in application command line.
111  */
112 const char *
113 rte_crypto_auth_algorithm_strings[] = {
114 	[RTE_CRYPTO_AUTH_AES_CBC_MAC]	= "aes-cbc-mac",
115 	[RTE_CRYPTO_AUTH_AES_CMAC]	= "aes-cmac",
116 	[RTE_CRYPTO_AUTH_AES_GMAC]	= "aes-gmac",
117 	[RTE_CRYPTO_AUTH_AES_XCBC_MAC]	= "aes-xcbc-mac",
118 
119 	[RTE_CRYPTO_AUTH_MD5]		= "md5",
120 	[RTE_CRYPTO_AUTH_MD5_HMAC]	= "md5-hmac",
121 
122 	[RTE_CRYPTO_AUTH_NULL]		= "null",
123 
124 	[RTE_CRYPTO_AUTH_SHA1]		= "sha1",
125 	[RTE_CRYPTO_AUTH_SHA1_HMAC]	= "sha1-hmac",
126 
127 	[RTE_CRYPTO_AUTH_SHA224]	= "sha2-224",
128 	[RTE_CRYPTO_AUTH_SHA224_HMAC]	= "sha2-224-hmac",
129 	[RTE_CRYPTO_AUTH_SHA256]	= "sha2-256",
130 	[RTE_CRYPTO_AUTH_SHA256_HMAC]	= "sha2-256-hmac",
131 	[RTE_CRYPTO_AUTH_SHA384]	= "sha2-384",
132 	[RTE_CRYPTO_AUTH_SHA384_HMAC]	= "sha2-384-hmac",
133 	[RTE_CRYPTO_AUTH_SHA512]	= "sha2-512",
134 	[RTE_CRYPTO_AUTH_SHA512_HMAC]	= "sha2-512-hmac",
135 
136 	[RTE_CRYPTO_AUTH_SHA3_224]	= "sha3-224",
137 	[RTE_CRYPTO_AUTH_SHA3_224_HMAC] = "sha3-224-hmac",
138 	[RTE_CRYPTO_AUTH_SHA3_256]	= "sha3-256",
139 	[RTE_CRYPTO_AUTH_SHA3_256_HMAC] = "sha3-256-hmac",
140 	[RTE_CRYPTO_AUTH_SHA3_384]	= "sha3-384",
141 	[RTE_CRYPTO_AUTH_SHA3_384_HMAC] = "sha3-384-hmac",
142 	[RTE_CRYPTO_AUTH_SHA3_512]	= "sha3-512",
143 	[RTE_CRYPTO_AUTH_SHA3_512_HMAC]	= "sha3-512-hmac",
144 
145 	[RTE_CRYPTO_AUTH_KASUMI_F9]	= "kasumi-f9",
146 	[RTE_CRYPTO_AUTH_SNOW3G_UIA2]	= "snow3g-uia2",
147 	[RTE_CRYPTO_AUTH_ZUC_EIA3]	= "zuc-eia3",
148 	[RTE_CRYPTO_AUTH_SM3]		= "sm3"
149 };
150 
151 /**
152  * The crypto AEAD algorithm strings identifiers.
153  * It could be used in application command line.
154  */
155 const char *
156 rte_crypto_aead_algorithm_strings[] = {
157 	[RTE_CRYPTO_AEAD_AES_CCM]	= "aes-ccm",
158 	[RTE_CRYPTO_AEAD_AES_GCM]	= "aes-gcm",
159 	[RTE_CRYPTO_AEAD_CHACHA20_POLY1305] = "chacha20-poly1305"
160 };
161 
162 /**
163  * The crypto AEAD operation strings identifiers.
164  * It could be used in application command line.
165  */
166 const char *
167 rte_crypto_aead_operation_strings[] = {
168 	[RTE_CRYPTO_AEAD_OP_ENCRYPT]	= "encrypt",
169 	[RTE_CRYPTO_AEAD_OP_DECRYPT]	= "decrypt"
170 };
171 
172 /**
173  * Asymmetric crypto transform operation strings identifiers.
174  */
175 const char *rte_crypto_asym_xform_strings[] = {
176 	[RTE_CRYPTO_ASYM_XFORM_NONE]	= "none",
177 	[RTE_CRYPTO_ASYM_XFORM_RSA]	= "rsa",
178 	[RTE_CRYPTO_ASYM_XFORM_MODEX]	= "modexp",
179 	[RTE_CRYPTO_ASYM_XFORM_MODINV]	= "modinv",
180 	[RTE_CRYPTO_ASYM_XFORM_DH]	= "dh",
181 	[RTE_CRYPTO_ASYM_XFORM_DSA]	= "dsa",
182 	[RTE_CRYPTO_ASYM_XFORM_ECDSA]	= "ecdsa",
183 	[RTE_CRYPTO_ASYM_XFORM_ECPM]	= "ecpm",
184 };
185 
186 /**
187  * Asymmetric crypto operation strings identifiers.
188  */
189 const char *rte_crypto_asym_op_strings[] = {
190 	[RTE_CRYPTO_ASYM_OP_ENCRYPT]	= "encrypt",
191 	[RTE_CRYPTO_ASYM_OP_DECRYPT]	= "decrypt",
192 	[RTE_CRYPTO_ASYM_OP_SIGN]	= "sign",
193 	[RTE_CRYPTO_ASYM_OP_VERIFY]	= "verify"
194 };
195 
196 /**
197  * Asymmetric crypto key exchange operation strings identifiers.
198  */
199 const char *rte_crypto_asym_ke_strings[] = {
200 	[RTE_CRYPTO_ASYM_KE_PRIV_KEY_GENERATE] = "priv_key_generate",
201 	[RTE_CRYPTO_ASYM_KE_PUB_KEY_GENERATE] = "pub_key_generate",
202 	[RTE_CRYPTO_ASYM_KE_SHARED_SECRET_COMPUTE] = "sharedsecret_compute",
203 	[RTE_CRYPTO_ASYM_KE_PUB_KEY_VERIFY] = "pub_ec_key_verify"
204 };
205 
206 /**
207  * The private data structure stored in the sym session mempool private data.
208  */
209 struct rte_cryptodev_sym_session_pool_private_data {
210 	uint16_t nb_drivers;
211 	/**< number of elements in sess_data array */
212 	uint16_t user_data_sz;
213 	/**< session user data will be placed after sess_data */
214 };
215 
216 /**
217  * The private data structure stored in the asym session mempool private data.
218  */
219 struct rte_cryptodev_asym_session_pool_private_data {
220 	uint16_t max_priv_session_sz;
221 	/**< Size of private session data used when creating mempool */
222 	uint16_t user_data_sz;
223 	/**< Session user data will be placed after sess_private_data */
224 };
225 
226 int
227 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum,
228 		const char *algo_string)
229 {
230 	unsigned int i;
231 
232 	for (i = 1; i < RTE_DIM(rte_crypto_cipher_algorithm_strings); i++) {
233 		if (strcmp(algo_string, rte_crypto_cipher_algorithm_strings[i]) == 0) {
234 			*algo_enum = (enum rte_crypto_cipher_algorithm) i;
235 			return 0;
236 		}
237 	}
238 
239 	/* Invalid string */
240 	return -1;
241 }
242 
243 int
244 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum,
245 		const char *algo_string)
246 {
247 	unsigned int i;
248 
249 	for (i = 1; i < RTE_DIM(rte_crypto_auth_algorithm_strings); i++) {
250 		if (strcmp(algo_string, rte_crypto_auth_algorithm_strings[i]) == 0) {
251 			*algo_enum = (enum rte_crypto_auth_algorithm) i;
252 			return 0;
253 		}
254 	}
255 
256 	/* Invalid string */
257 	return -1;
258 }
259 
260 int
261 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum,
262 		const char *algo_string)
263 {
264 	unsigned int i;
265 
266 	for (i = 1; i < RTE_DIM(rte_crypto_aead_algorithm_strings); i++) {
267 		if (strcmp(algo_string, rte_crypto_aead_algorithm_strings[i]) == 0) {
268 			*algo_enum = (enum rte_crypto_aead_algorithm) i;
269 			return 0;
270 		}
271 	}
272 
273 	/* Invalid string */
274 	return -1;
275 }
276 
277 int
278 rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum,
279 		const char *xform_string)
280 {
281 	unsigned int i;
282 
283 	for (i = 1; i < RTE_DIM(rte_crypto_asym_xform_strings); i++) {
284 		if (strcmp(xform_string,
285 			rte_crypto_asym_xform_strings[i]) == 0) {
286 			*xform_enum = (enum rte_crypto_asym_xform_type) i;
287 			return 0;
288 		}
289 	}
290 
291 	/* Invalid string */
292 	return -1;
293 }
294 
295 /**
296  * The crypto auth operation strings identifiers.
297  * It could be used in application command line.
298  */
299 const char *
300 rte_crypto_auth_operation_strings[] = {
301 		[RTE_CRYPTO_AUTH_OP_VERIFY]	= "verify",
302 		[RTE_CRYPTO_AUTH_OP_GENERATE]	= "generate"
303 };
304 
305 const struct rte_cryptodev_symmetric_capability *
306 rte_cryptodev_sym_capability_get(uint8_t dev_id,
307 		const struct rte_cryptodev_sym_capability_idx *idx)
308 {
309 	const struct rte_cryptodev_capabilities *capability;
310 	struct rte_cryptodev_info dev_info;
311 	int i = 0;
312 
313 	rte_cryptodev_info_get(dev_id, &dev_info);
314 
315 	while ((capability = &dev_info.capabilities[i++])->op !=
316 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
317 		if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
318 			continue;
319 
320 		if (capability->sym.xform_type != idx->type)
321 			continue;
322 
323 		if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
324 			capability->sym.auth.algo == idx->algo.auth)
325 			return &capability->sym;
326 
327 		if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
328 			capability->sym.cipher.algo == idx->algo.cipher)
329 			return &capability->sym;
330 
331 		if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
332 				capability->sym.aead.algo == idx->algo.aead)
333 			return &capability->sym;
334 	}
335 
336 	return NULL;
337 }
338 
339 static int
340 param_range_check(uint16_t size, const struct rte_crypto_param_range *range)
341 {
342 	unsigned int next_size;
343 
344 	/* Check lower/upper bounds */
345 	if (size < range->min)
346 		return -1;
347 
348 	if (size > range->max)
349 		return -1;
350 
351 	/* If range is actually only one value, size is correct */
352 	if (range->increment == 0)
353 		return 0;
354 
355 	/* Check if value is one of the supported sizes */
356 	for (next_size = range->min; next_size <= range->max;
357 			next_size += range->increment)
358 		if (size == next_size)
359 			return 0;
360 
361 	return -1;
362 }
363 
364 const struct rte_cryptodev_asymmetric_xform_capability *
365 rte_cryptodev_asym_capability_get(uint8_t dev_id,
366 		const struct rte_cryptodev_asym_capability_idx *idx)
367 {
368 	const struct rte_cryptodev_capabilities *capability;
369 	struct rte_cryptodev_info dev_info;
370 	unsigned int i = 0;
371 
372 	memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
373 	rte_cryptodev_info_get(dev_id, &dev_info);
374 
375 	while ((capability = &dev_info.capabilities[i++])->op !=
376 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
377 		if (capability->op != RTE_CRYPTO_OP_TYPE_ASYMMETRIC)
378 			continue;
379 
380 		if (capability->asym.xform_capa.xform_type == idx->type)
381 			return &capability->asym.xform_capa;
382 	}
383 	return NULL;
384 };
385 
386 int
387 rte_cryptodev_sym_capability_check_cipher(
388 		const struct rte_cryptodev_symmetric_capability *capability,
389 		uint16_t key_size, uint16_t iv_size)
390 {
391 	if (param_range_check(key_size, &capability->cipher.key_size) != 0)
392 		return -1;
393 
394 	if (param_range_check(iv_size, &capability->cipher.iv_size) != 0)
395 		return -1;
396 
397 	return 0;
398 }
399 
400 int
401 rte_cryptodev_sym_capability_check_auth(
402 		const struct rte_cryptodev_symmetric_capability *capability,
403 		uint16_t key_size, uint16_t digest_size, uint16_t iv_size)
404 {
405 	if (param_range_check(key_size, &capability->auth.key_size) != 0)
406 		return -1;
407 
408 	if (param_range_check(digest_size, &capability->auth.digest_size) != 0)
409 		return -1;
410 
411 	if (param_range_check(iv_size, &capability->auth.iv_size) != 0)
412 		return -1;
413 
414 	return 0;
415 }
416 
417 int
418 rte_cryptodev_sym_capability_check_aead(
419 		const struct rte_cryptodev_symmetric_capability *capability,
420 		uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
421 		uint16_t iv_size)
422 {
423 	if (param_range_check(key_size, &capability->aead.key_size) != 0)
424 		return -1;
425 
426 	if (param_range_check(digest_size, &capability->aead.digest_size) != 0)
427 		return -1;
428 
429 	if (param_range_check(aad_size, &capability->aead.aad_size) != 0)
430 		return -1;
431 
432 	if (param_range_check(iv_size, &capability->aead.iv_size) != 0)
433 		return -1;
434 
435 	return 0;
436 }
437 int
438 rte_cryptodev_asym_xform_capability_check_optype(
439 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
440 	enum rte_crypto_asym_op_type op_type)
441 {
442 	if (capability->op_types & (1 << op_type))
443 		return 1;
444 
445 	return 0;
446 }
447 
448 int
449 rte_cryptodev_asym_xform_capability_check_modlen(
450 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
451 	uint16_t modlen)
452 {
453 	/* no need to check for limits, if min or max = 0 */
454 	if (capability->modlen.min != 0) {
455 		if (modlen < capability->modlen.min)
456 			return -1;
457 	}
458 
459 	if (capability->modlen.max != 0) {
460 		if (modlen > capability->modlen.max)
461 			return -1;
462 	}
463 
464 	/* in any case, check if given modlen is module increment */
465 	if (capability->modlen.increment != 0) {
466 		if (modlen % (capability->modlen.increment))
467 			return -1;
468 	}
469 
470 	return 0;
471 }
472 
473 /* spinlock for crypto device enq callbacks */
474 static rte_spinlock_t rte_cryptodev_callback_lock = RTE_SPINLOCK_INITIALIZER;
475 
476 static void
477 cryptodev_cb_cleanup(struct rte_cryptodev *dev)
478 {
479 	struct rte_cryptodev_cb_rcu *list;
480 	struct rte_cryptodev_cb *cb, *next;
481 	uint16_t qp_id;
482 
483 	if (dev->enq_cbs == NULL && dev->deq_cbs == NULL)
484 		return;
485 
486 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
487 		list = &dev->enq_cbs[qp_id];
488 		cb = list->next;
489 		while (cb != NULL) {
490 			next = cb->next;
491 			rte_free(cb);
492 			cb = next;
493 		}
494 
495 		rte_free(list->qsbr);
496 	}
497 
498 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
499 		list = &dev->deq_cbs[qp_id];
500 		cb = list->next;
501 		while (cb != NULL) {
502 			next = cb->next;
503 			rte_free(cb);
504 			cb = next;
505 		}
506 
507 		rte_free(list->qsbr);
508 	}
509 
510 	rte_free(dev->enq_cbs);
511 	dev->enq_cbs = NULL;
512 	rte_free(dev->deq_cbs);
513 	dev->deq_cbs = NULL;
514 }
515 
516 static int
517 cryptodev_cb_init(struct rte_cryptodev *dev)
518 {
519 	struct rte_cryptodev_cb_rcu *list;
520 	struct rte_rcu_qsbr *qsbr;
521 	uint16_t qp_id;
522 	size_t size;
523 
524 	/* Max thread set to 1, as one DP thread accessing a queue-pair */
525 	const uint32_t max_threads = 1;
526 
527 	dev->enq_cbs = rte_zmalloc(NULL,
528 				   sizeof(struct rte_cryptodev_cb_rcu) *
529 				   dev->data->nb_queue_pairs, 0);
530 	if (dev->enq_cbs == NULL) {
531 		CDEV_LOG_ERR("Failed to allocate memory for enq callbacks");
532 		return -ENOMEM;
533 	}
534 
535 	dev->deq_cbs = rte_zmalloc(NULL,
536 				   sizeof(struct rte_cryptodev_cb_rcu) *
537 				   dev->data->nb_queue_pairs, 0);
538 	if (dev->deq_cbs == NULL) {
539 		CDEV_LOG_ERR("Failed to allocate memory for deq callbacks");
540 		rte_free(dev->enq_cbs);
541 		return -ENOMEM;
542 	}
543 
544 	/* Create RCU QSBR variable */
545 	size = rte_rcu_qsbr_get_memsize(max_threads);
546 
547 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
548 		list = &dev->enq_cbs[qp_id];
549 		qsbr = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
550 		if (qsbr == NULL) {
551 			CDEV_LOG_ERR("Failed to allocate memory for RCU on "
552 				"queue_pair_id=%d", qp_id);
553 			goto cb_init_err;
554 		}
555 
556 		if (rte_rcu_qsbr_init(qsbr, max_threads)) {
557 			CDEV_LOG_ERR("Failed to initialize for RCU on "
558 				"queue_pair_id=%d", qp_id);
559 			goto cb_init_err;
560 		}
561 
562 		list->qsbr = qsbr;
563 	}
564 
565 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
566 		list = &dev->deq_cbs[qp_id];
567 		qsbr = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
568 		if (qsbr == NULL) {
569 			CDEV_LOG_ERR("Failed to allocate memory for RCU on "
570 				"queue_pair_id=%d", qp_id);
571 			goto cb_init_err;
572 		}
573 
574 		if (rte_rcu_qsbr_init(qsbr, max_threads)) {
575 			CDEV_LOG_ERR("Failed to initialize for RCU on "
576 				"queue_pair_id=%d", qp_id);
577 			goto cb_init_err;
578 		}
579 
580 		list->qsbr = qsbr;
581 	}
582 
583 	return 0;
584 
585 cb_init_err:
586 	cryptodev_cb_cleanup(dev);
587 	return -ENOMEM;
588 }
589 
590 const char *
591 rte_cryptodev_get_feature_name(uint64_t flag)
592 {
593 	switch (flag) {
594 	case RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO:
595 		return "SYMMETRIC_CRYPTO";
596 	case RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO:
597 		return "ASYMMETRIC_CRYPTO";
598 	case RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING:
599 		return "SYM_OPERATION_CHAINING";
600 	case RTE_CRYPTODEV_FF_CPU_SSE:
601 		return "CPU_SSE";
602 	case RTE_CRYPTODEV_FF_CPU_AVX:
603 		return "CPU_AVX";
604 	case RTE_CRYPTODEV_FF_CPU_AVX2:
605 		return "CPU_AVX2";
606 	case RTE_CRYPTODEV_FF_CPU_AVX512:
607 		return "CPU_AVX512";
608 	case RTE_CRYPTODEV_FF_CPU_AESNI:
609 		return "CPU_AESNI";
610 	case RTE_CRYPTODEV_FF_HW_ACCELERATED:
611 		return "HW_ACCELERATED";
612 	case RTE_CRYPTODEV_FF_IN_PLACE_SGL:
613 		return "IN_PLACE_SGL";
614 	case RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT:
615 		return "OOP_SGL_IN_SGL_OUT";
616 	case RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT:
617 		return "OOP_SGL_IN_LB_OUT";
618 	case RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT:
619 		return "OOP_LB_IN_SGL_OUT";
620 	case RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT:
621 		return "OOP_LB_IN_LB_OUT";
622 	case RTE_CRYPTODEV_FF_CPU_NEON:
623 		return "CPU_NEON";
624 	case RTE_CRYPTODEV_FF_CPU_ARM_CE:
625 		return "CPU_ARM_CE";
626 	case RTE_CRYPTODEV_FF_SECURITY:
627 		return "SECURITY_PROTOCOL";
628 	case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP:
629 		return "RSA_PRIV_OP_KEY_EXP";
630 	case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT:
631 		return "RSA_PRIV_OP_KEY_QT";
632 	case RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED:
633 		return "DIGEST_ENCRYPTED";
634 	case RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO:
635 		return "SYM_CPU_CRYPTO";
636 	case RTE_CRYPTODEV_FF_ASYM_SESSIONLESS:
637 		return "ASYM_SESSIONLESS";
638 	case RTE_CRYPTODEV_FF_SYM_SESSIONLESS:
639 		return "SYM_SESSIONLESS";
640 	case RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA:
641 		return "NON_BYTE_ALIGNED_DATA";
642 	case RTE_CRYPTODEV_FF_CIPHER_MULTIPLE_DATA_UNITS:
643 		return "CIPHER_MULTIPLE_DATA_UNITS";
644 	case RTE_CRYPTODEV_FF_CIPHER_WRAPPED_KEY:
645 		return "CIPHER_WRAPPED_KEY";
646 	default:
647 		return NULL;
648 	}
649 }
650 
651 struct rte_cryptodev *
652 rte_cryptodev_pmd_get_dev(uint8_t dev_id)
653 {
654 	return &cryptodev_globals.devs[dev_id];
655 }
656 
657 struct rte_cryptodev *
658 rte_cryptodev_pmd_get_named_dev(const char *name)
659 {
660 	struct rte_cryptodev *dev;
661 	unsigned int i;
662 
663 	if (name == NULL)
664 		return NULL;
665 
666 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
667 		dev = &cryptodev_globals.devs[i];
668 
669 		if ((dev->attached == RTE_CRYPTODEV_ATTACHED) &&
670 				(strcmp(dev->data->name, name) == 0))
671 			return dev;
672 	}
673 
674 	return NULL;
675 }
676 
677 static inline uint8_t
678 rte_cryptodev_is_valid_device_data(uint8_t dev_id)
679 {
680 	if (dev_id >= RTE_CRYPTO_MAX_DEVS ||
681 			rte_crypto_devices[dev_id].data == NULL)
682 		return 0;
683 
684 	return 1;
685 }
686 
687 unsigned int
688 rte_cryptodev_is_valid_dev(uint8_t dev_id)
689 {
690 	struct rte_cryptodev *dev = NULL;
691 
692 	if (!rte_cryptodev_is_valid_device_data(dev_id))
693 		return 0;
694 
695 	dev = rte_cryptodev_pmd_get_dev(dev_id);
696 	if (dev->attached != RTE_CRYPTODEV_ATTACHED)
697 		return 0;
698 	else
699 		return 1;
700 }
701 
702 
703 int
704 rte_cryptodev_get_dev_id(const char *name)
705 {
706 	unsigned i;
707 
708 	if (name == NULL)
709 		return -1;
710 
711 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
712 		if (!rte_cryptodev_is_valid_device_data(i))
713 			continue;
714 		if ((strcmp(cryptodev_globals.devs[i].data->name, name)
715 				== 0) &&
716 				(cryptodev_globals.devs[i].attached ==
717 						RTE_CRYPTODEV_ATTACHED))
718 			return i;
719 	}
720 
721 	return -1;
722 }
723 
724 uint8_t
725 rte_cryptodev_count(void)
726 {
727 	return cryptodev_globals.nb_devs;
728 }
729 
730 uint8_t
731 rte_cryptodev_device_count_by_driver(uint8_t driver_id)
732 {
733 	uint8_t i, dev_count = 0;
734 
735 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++)
736 		if (cryptodev_globals.devs[i].driver_id == driver_id &&
737 			cryptodev_globals.devs[i].attached ==
738 					RTE_CRYPTODEV_ATTACHED)
739 			dev_count++;
740 
741 	return dev_count;
742 }
743 
744 uint8_t
745 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
746 	uint8_t nb_devices)
747 {
748 	uint8_t i, count = 0;
749 	struct rte_cryptodev *devs = cryptodev_globals.devs;
750 
751 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS && count < nb_devices; i++) {
752 		if (!rte_cryptodev_is_valid_device_data(i))
753 			continue;
754 
755 		if (devs[i].attached == RTE_CRYPTODEV_ATTACHED) {
756 			int cmp;
757 
758 			cmp = strncmp(devs[i].device->driver->name,
759 					driver_name,
760 					strlen(driver_name) + 1);
761 
762 			if (cmp == 0)
763 				devices[count++] = devs[i].data->dev_id;
764 		}
765 	}
766 
767 	return count;
768 }
769 
770 void *
771 rte_cryptodev_get_sec_ctx(uint8_t dev_id)
772 {
773 	if (dev_id < RTE_CRYPTO_MAX_DEVS &&
774 			(rte_crypto_devices[dev_id].feature_flags &
775 			RTE_CRYPTODEV_FF_SECURITY))
776 		return rte_crypto_devices[dev_id].security_ctx;
777 
778 	return NULL;
779 }
780 
781 int
782 rte_cryptodev_socket_id(uint8_t dev_id)
783 {
784 	struct rte_cryptodev *dev;
785 
786 	if (!rte_cryptodev_is_valid_dev(dev_id))
787 		return -1;
788 
789 	dev = rte_cryptodev_pmd_get_dev(dev_id);
790 
791 	return dev->data->socket_id;
792 }
793 
794 static inline int
795 rte_cryptodev_data_alloc(uint8_t dev_id, struct rte_cryptodev_data **data,
796 		int socket_id)
797 {
798 	char mz_name[RTE_MEMZONE_NAMESIZE];
799 	const struct rte_memzone *mz;
800 	int n;
801 
802 	/* generate memzone name */
803 	n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
804 	if (n >= (int)sizeof(mz_name))
805 		return -EINVAL;
806 
807 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
808 		mz = rte_memzone_reserve(mz_name,
809 				sizeof(struct rte_cryptodev_data),
810 				socket_id, 0);
811 		CDEV_LOG_DEBUG("PRIMARY:reserved memzone for %s (%p)",
812 				mz_name, mz);
813 	} else {
814 		mz = rte_memzone_lookup(mz_name);
815 		CDEV_LOG_DEBUG("SECONDARY:looked up memzone for %s (%p)",
816 				mz_name, mz);
817 	}
818 
819 	if (mz == NULL)
820 		return -ENOMEM;
821 
822 	*data = mz->addr;
823 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
824 		memset(*data, 0, sizeof(struct rte_cryptodev_data));
825 
826 	return 0;
827 }
828 
829 static inline int
830 rte_cryptodev_data_free(uint8_t dev_id, struct rte_cryptodev_data **data)
831 {
832 	char mz_name[RTE_MEMZONE_NAMESIZE];
833 	const struct rte_memzone *mz;
834 	int n;
835 
836 	/* generate memzone name */
837 	n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
838 	if (n >= (int)sizeof(mz_name))
839 		return -EINVAL;
840 
841 	mz = rte_memzone_lookup(mz_name);
842 	if (mz == NULL)
843 		return -ENOMEM;
844 
845 	RTE_ASSERT(*data == mz->addr);
846 	*data = NULL;
847 
848 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
849 		CDEV_LOG_DEBUG("PRIMARY:free memzone of %s (%p)",
850 				mz_name, mz);
851 		return rte_memzone_free(mz);
852 	} else {
853 		CDEV_LOG_DEBUG("SECONDARY:don't free memzone of %s (%p)",
854 				mz_name, mz);
855 	}
856 
857 	return 0;
858 }
859 
860 static uint8_t
861 rte_cryptodev_find_free_device_index(void)
862 {
863 	uint8_t dev_id;
864 
865 	for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++) {
866 		if (rte_crypto_devices[dev_id].attached ==
867 				RTE_CRYPTODEV_DETACHED)
868 			return dev_id;
869 	}
870 	return RTE_CRYPTO_MAX_DEVS;
871 }
872 
873 struct rte_cryptodev *
874 rte_cryptodev_pmd_allocate(const char *name, int socket_id)
875 {
876 	struct rte_cryptodev *cryptodev;
877 	uint8_t dev_id;
878 
879 	if (rte_cryptodev_pmd_get_named_dev(name) != NULL) {
880 		CDEV_LOG_ERR("Crypto device with name %s already "
881 				"allocated!", name);
882 		return NULL;
883 	}
884 
885 	dev_id = rte_cryptodev_find_free_device_index();
886 	if (dev_id == RTE_CRYPTO_MAX_DEVS) {
887 		CDEV_LOG_ERR("Reached maximum number of crypto devices");
888 		return NULL;
889 	}
890 
891 	cryptodev = rte_cryptodev_pmd_get_dev(dev_id);
892 
893 	if (cryptodev->data == NULL) {
894 		struct rte_cryptodev_data **cryptodev_data =
895 				&cryptodev_globals.data[dev_id];
896 
897 		int retval = rte_cryptodev_data_alloc(dev_id, cryptodev_data,
898 				socket_id);
899 
900 		if (retval < 0 || *cryptodev_data == NULL)
901 			return NULL;
902 
903 		cryptodev->data = *cryptodev_data;
904 
905 		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
906 			strlcpy(cryptodev->data->name, name,
907 				RTE_CRYPTODEV_NAME_MAX_LEN);
908 
909 			cryptodev->data->dev_id = dev_id;
910 			cryptodev->data->socket_id = socket_id;
911 			cryptodev->data->dev_started = 0;
912 			CDEV_LOG_DEBUG("PRIMARY:init data");
913 		}
914 
915 		CDEV_LOG_DEBUG("Data for %s: dev_id %d, socket %d, started %d",
916 				cryptodev->data->name,
917 				cryptodev->data->dev_id,
918 				cryptodev->data->socket_id,
919 				cryptodev->data->dev_started);
920 
921 		/* init user callbacks */
922 		TAILQ_INIT(&(cryptodev->link_intr_cbs));
923 
924 		cryptodev->attached = RTE_CRYPTODEV_ATTACHED;
925 
926 		cryptodev_globals.nb_devs++;
927 	}
928 
929 	return cryptodev;
930 }
931 
932 int
933 rte_cryptodev_pmd_release_device(struct rte_cryptodev *cryptodev)
934 {
935 	int ret;
936 	uint8_t dev_id;
937 
938 	if (cryptodev == NULL)
939 		return -EINVAL;
940 
941 	dev_id = cryptodev->data->dev_id;
942 
943 	cryptodev_fp_ops_reset(rte_crypto_fp_ops + dev_id);
944 
945 	/* Close device only if device operations have been set */
946 	if (cryptodev->dev_ops) {
947 		ret = rte_cryptodev_close(dev_id);
948 		if (ret < 0)
949 			return ret;
950 	}
951 
952 	ret = rte_cryptodev_data_free(dev_id, &cryptodev_globals.data[dev_id]);
953 	if (ret < 0)
954 		return ret;
955 
956 	cryptodev->attached = RTE_CRYPTODEV_DETACHED;
957 	cryptodev_globals.nb_devs--;
958 	return 0;
959 }
960 
961 uint16_t
962 rte_cryptodev_queue_pair_count(uint8_t dev_id)
963 {
964 	struct rte_cryptodev *dev;
965 
966 	if (!rte_cryptodev_is_valid_device_data(dev_id)) {
967 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
968 		return 0;
969 	}
970 
971 	dev = &rte_crypto_devices[dev_id];
972 	return dev->data->nb_queue_pairs;
973 }
974 
975 static int
976 rte_cryptodev_queue_pairs_config(struct rte_cryptodev *dev, uint16_t nb_qpairs,
977 		int socket_id)
978 {
979 	struct rte_cryptodev_info dev_info;
980 	void **qp;
981 	unsigned i;
982 
983 	if ((dev == NULL) || (nb_qpairs < 1)) {
984 		CDEV_LOG_ERR("invalid param: dev %p, nb_queues %u",
985 							dev, nb_qpairs);
986 		return -EINVAL;
987 	}
988 
989 	CDEV_LOG_DEBUG("Setup %d queues pairs on device %u",
990 			nb_qpairs, dev->data->dev_id);
991 
992 	memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
993 
994 	if (*dev->dev_ops->dev_infos_get == NULL)
995 		return -ENOTSUP;
996 	(*dev->dev_ops->dev_infos_get)(dev, &dev_info);
997 
998 	if (nb_qpairs > (dev_info.max_nb_queue_pairs)) {
999 		CDEV_LOG_ERR("Invalid num queue_pairs (%u) for dev %u",
1000 				nb_qpairs, dev->data->dev_id);
1001 	    return -EINVAL;
1002 	}
1003 
1004 	if (dev->data->queue_pairs == NULL) { /* first time configuration */
1005 		dev->data->queue_pairs = rte_zmalloc_socket(
1006 				"cryptodev->queue_pairs",
1007 				sizeof(dev->data->queue_pairs[0]) *
1008 				dev_info.max_nb_queue_pairs,
1009 				RTE_CACHE_LINE_SIZE, socket_id);
1010 
1011 		if (dev->data->queue_pairs == NULL) {
1012 			dev->data->nb_queue_pairs = 0;
1013 			CDEV_LOG_ERR("failed to get memory for qp meta data, "
1014 							"nb_queues %u",
1015 							nb_qpairs);
1016 			return -(ENOMEM);
1017 		}
1018 	} else { /* re-configure */
1019 		int ret;
1020 		uint16_t old_nb_queues = dev->data->nb_queue_pairs;
1021 
1022 		qp = dev->data->queue_pairs;
1023 
1024 		if (*dev->dev_ops->queue_pair_release == NULL)
1025 			return -ENOTSUP;
1026 
1027 		for (i = nb_qpairs; i < old_nb_queues; i++) {
1028 			ret = (*dev->dev_ops->queue_pair_release)(dev, i);
1029 			if (ret < 0)
1030 				return ret;
1031 			qp[i] = NULL;
1032 		}
1033 
1034 	}
1035 	dev->data->nb_queue_pairs = nb_qpairs;
1036 	return 0;
1037 }
1038 
1039 int
1040 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
1041 {
1042 	struct rte_cryptodev *dev;
1043 	int diag;
1044 
1045 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1046 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1047 		return -EINVAL;
1048 	}
1049 
1050 	dev = &rte_crypto_devices[dev_id];
1051 
1052 	if (dev->data->dev_started) {
1053 		CDEV_LOG_ERR(
1054 		    "device %d must be stopped to allow configuration", dev_id);
1055 		return -EBUSY;
1056 	}
1057 
1058 	if (*dev->dev_ops->dev_configure == NULL)
1059 		return -ENOTSUP;
1060 
1061 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1062 	cryptodev_cb_cleanup(dev);
1063 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1064 
1065 	/* Setup new number of queue pairs and reconfigure device. */
1066 	diag = rte_cryptodev_queue_pairs_config(dev, config->nb_queue_pairs,
1067 			config->socket_id);
1068 	if (diag != 0) {
1069 		CDEV_LOG_ERR("dev%d rte_crypto_dev_queue_pairs_config = %d",
1070 				dev_id, diag);
1071 		return diag;
1072 	}
1073 
1074 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1075 	diag = cryptodev_cb_init(dev);
1076 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1077 	if (diag) {
1078 		CDEV_LOG_ERR("Callback init failed for dev_id=%d", dev_id);
1079 		return diag;
1080 	}
1081 
1082 	rte_cryptodev_trace_configure(dev_id, config);
1083 	return (*dev->dev_ops->dev_configure)(dev, config);
1084 }
1085 
1086 int
1087 rte_cryptodev_start(uint8_t dev_id)
1088 {
1089 	struct rte_cryptodev *dev;
1090 	int diag;
1091 
1092 	CDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
1093 
1094 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1095 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1096 		return -EINVAL;
1097 	}
1098 
1099 	dev = &rte_crypto_devices[dev_id];
1100 
1101 	if (*dev->dev_ops->dev_start == NULL)
1102 		return -ENOTSUP;
1103 
1104 	if (dev->data->dev_started != 0) {
1105 		CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already started",
1106 			dev_id);
1107 		return 0;
1108 	}
1109 
1110 	diag = (*dev->dev_ops->dev_start)(dev);
1111 	/* expose selection of PMD fast-path functions */
1112 	cryptodev_fp_ops_set(rte_crypto_fp_ops + dev_id, dev);
1113 
1114 	rte_cryptodev_trace_start(dev_id, diag);
1115 	if (diag == 0)
1116 		dev->data->dev_started = 1;
1117 	else
1118 		return diag;
1119 
1120 	return 0;
1121 }
1122 
1123 void
1124 rte_cryptodev_stop(uint8_t dev_id)
1125 {
1126 	struct rte_cryptodev *dev;
1127 
1128 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1129 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1130 		return;
1131 	}
1132 
1133 	dev = &rte_crypto_devices[dev_id];
1134 
1135 	if (*dev->dev_ops->dev_stop == NULL)
1136 		return;
1137 
1138 	if (dev->data->dev_started == 0) {
1139 		CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already stopped",
1140 			dev_id);
1141 		return;
1142 	}
1143 
1144 	/* point fast-path functions to dummy ones */
1145 	cryptodev_fp_ops_reset(rte_crypto_fp_ops + dev_id);
1146 
1147 	(*dev->dev_ops->dev_stop)(dev);
1148 	rte_cryptodev_trace_stop(dev_id);
1149 	dev->data->dev_started = 0;
1150 }
1151 
1152 int
1153 rte_cryptodev_close(uint8_t dev_id)
1154 {
1155 	struct rte_cryptodev *dev;
1156 	int retval;
1157 
1158 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1159 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1160 		return -1;
1161 	}
1162 
1163 	dev = &rte_crypto_devices[dev_id];
1164 
1165 	/* Device must be stopped before it can be closed */
1166 	if (dev->data->dev_started == 1) {
1167 		CDEV_LOG_ERR("Device %u must be stopped before closing",
1168 				dev_id);
1169 		return -EBUSY;
1170 	}
1171 
1172 	/* We can't close the device if there are outstanding sessions in use */
1173 	if (dev->data->session_pool != NULL) {
1174 		if (!rte_mempool_full(dev->data->session_pool)) {
1175 			CDEV_LOG_ERR("dev_id=%u close failed, session mempool "
1176 					"has sessions still in use, free "
1177 					"all sessions before calling close",
1178 					(unsigned)dev_id);
1179 			return -EBUSY;
1180 		}
1181 	}
1182 
1183 	if (*dev->dev_ops->dev_close == NULL)
1184 		return -ENOTSUP;
1185 	retval = (*dev->dev_ops->dev_close)(dev);
1186 	rte_cryptodev_trace_close(dev_id, retval);
1187 
1188 	if (retval < 0)
1189 		return retval;
1190 
1191 	return 0;
1192 }
1193 
1194 int
1195 rte_cryptodev_get_qp_status(uint8_t dev_id, uint16_t queue_pair_id)
1196 {
1197 	struct rte_cryptodev *dev;
1198 
1199 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1200 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1201 		return -EINVAL;
1202 	}
1203 
1204 	dev = &rte_crypto_devices[dev_id];
1205 	if (queue_pair_id >= dev->data->nb_queue_pairs) {
1206 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
1207 		return -EINVAL;
1208 	}
1209 	void **qps = dev->data->queue_pairs;
1210 
1211 	if (qps[queue_pair_id])	{
1212 		CDEV_LOG_DEBUG("qp %d on dev %d is initialised",
1213 			queue_pair_id, dev_id);
1214 		return 1;
1215 	}
1216 
1217 	CDEV_LOG_DEBUG("qp %d on dev %d is not initialised",
1218 		queue_pair_id, dev_id);
1219 
1220 	return 0;
1221 }
1222 
1223 int
1224 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
1225 		const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
1226 
1227 {
1228 	struct rte_cryptodev *dev;
1229 
1230 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1231 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1232 		return -EINVAL;
1233 	}
1234 
1235 	dev = &rte_crypto_devices[dev_id];
1236 	if (queue_pair_id >= dev->data->nb_queue_pairs) {
1237 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
1238 		return -EINVAL;
1239 	}
1240 
1241 	if (!qp_conf) {
1242 		CDEV_LOG_ERR("qp_conf cannot be NULL\n");
1243 		return -EINVAL;
1244 	}
1245 
1246 	if ((qp_conf->mp_session && !qp_conf->mp_session_private) ||
1247 			(!qp_conf->mp_session && qp_conf->mp_session_private)) {
1248 		CDEV_LOG_ERR("Invalid mempools\n");
1249 		return -EINVAL;
1250 	}
1251 
1252 	if (qp_conf->mp_session) {
1253 		struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1254 		uint32_t obj_size = qp_conf->mp_session->elt_size;
1255 		uint32_t obj_priv_size = qp_conf->mp_session_private->elt_size;
1256 		struct rte_cryptodev_sym_session s = {0};
1257 
1258 		pool_priv = rte_mempool_get_priv(qp_conf->mp_session);
1259 		if (!pool_priv || qp_conf->mp_session->private_data_size <
1260 				sizeof(*pool_priv)) {
1261 			CDEV_LOG_ERR("Invalid mempool\n");
1262 			return -EINVAL;
1263 		}
1264 
1265 		s.nb_drivers = pool_priv->nb_drivers;
1266 		s.user_data_sz = pool_priv->user_data_sz;
1267 
1268 		if ((rte_cryptodev_sym_get_existing_header_session_size(&s) >
1269 			obj_size) || (s.nb_drivers <= dev->driver_id) ||
1270 			rte_cryptodev_sym_get_private_session_size(dev_id) >
1271 				obj_priv_size) {
1272 			CDEV_LOG_ERR("Invalid mempool\n");
1273 			return -EINVAL;
1274 		}
1275 	}
1276 
1277 	if (dev->data->dev_started) {
1278 		CDEV_LOG_ERR(
1279 		    "device %d must be stopped to allow configuration", dev_id);
1280 		return -EBUSY;
1281 	}
1282 
1283 	if (*dev->dev_ops->queue_pair_setup == NULL)
1284 		return -ENOTSUP;
1285 
1286 	rte_cryptodev_trace_queue_pair_setup(dev_id, queue_pair_id, qp_conf);
1287 	return (*dev->dev_ops->queue_pair_setup)(dev, queue_pair_id, qp_conf,
1288 			socket_id);
1289 }
1290 
1291 struct rte_cryptodev_cb *
1292 rte_cryptodev_add_enq_callback(uint8_t dev_id,
1293 			       uint16_t qp_id,
1294 			       rte_cryptodev_callback_fn cb_fn,
1295 			       void *cb_arg)
1296 {
1297 	struct rte_cryptodev *dev;
1298 	struct rte_cryptodev_cb_rcu *list;
1299 	struct rte_cryptodev_cb *cb, *tail;
1300 
1301 	if (!cb_fn) {
1302 		CDEV_LOG_ERR("Callback is NULL on dev_id=%d", dev_id);
1303 		rte_errno = EINVAL;
1304 		return NULL;
1305 	}
1306 
1307 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1308 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1309 		rte_errno = ENODEV;
1310 		return NULL;
1311 	}
1312 
1313 	dev = &rte_crypto_devices[dev_id];
1314 	if (qp_id >= dev->data->nb_queue_pairs) {
1315 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1316 		rte_errno = ENODEV;
1317 		return NULL;
1318 	}
1319 
1320 	cb = rte_zmalloc(NULL, sizeof(*cb), 0);
1321 	if (cb == NULL) {
1322 		CDEV_LOG_ERR("Failed to allocate memory for callback on "
1323 			     "dev=%d, queue_pair_id=%d", dev_id, qp_id);
1324 		rte_errno = ENOMEM;
1325 		return NULL;
1326 	}
1327 
1328 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1329 
1330 	cb->fn = cb_fn;
1331 	cb->arg = cb_arg;
1332 
1333 	/* Add the callbacks in fifo order. */
1334 	list = &dev->enq_cbs[qp_id];
1335 	tail = list->next;
1336 
1337 	if (tail) {
1338 		while (tail->next)
1339 			tail = tail->next;
1340 		/* Stores to cb->fn and cb->param should complete before
1341 		 * cb is visible to data plane.
1342 		 */
1343 		__atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
1344 	} else {
1345 		/* Stores to cb->fn and cb->param should complete before
1346 		 * cb is visible to data plane.
1347 		 */
1348 		__atomic_store_n(&list->next, cb, __ATOMIC_RELEASE);
1349 	}
1350 
1351 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1352 
1353 	return cb;
1354 }
1355 
1356 int
1357 rte_cryptodev_remove_enq_callback(uint8_t dev_id,
1358 				  uint16_t qp_id,
1359 				  struct rte_cryptodev_cb *cb)
1360 {
1361 	struct rte_cryptodev *dev;
1362 	struct rte_cryptodev_cb **prev_cb, *curr_cb;
1363 	struct rte_cryptodev_cb_rcu *list;
1364 	int ret;
1365 
1366 	ret = -EINVAL;
1367 
1368 	if (!cb) {
1369 		CDEV_LOG_ERR("Callback is NULL");
1370 		return -EINVAL;
1371 	}
1372 
1373 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1374 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1375 		return -ENODEV;
1376 	}
1377 
1378 	dev = &rte_crypto_devices[dev_id];
1379 	if (qp_id >= dev->data->nb_queue_pairs) {
1380 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1381 		return -ENODEV;
1382 	}
1383 
1384 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1385 	if (dev->enq_cbs == NULL) {
1386 		CDEV_LOG_ERR("Callback not initialized");
1387 		goto cb_err;
1388 	}
1389 
1390 	list = &dev->enq_cbs[qp_id];
1391 	if (list == NULL) {
1392 		CDEV_LOG_ERR("Callback list is NULL");
1393 		goto cb_err;
1394 	}
1395 
1396 	if (list->qsbr == NULL) {
1397 		CDEV_LOG_ERR("Rcu qsbr is NULL");
1398 		goto cb_err;
1399 	}
1400 
1401 	prev_cb = &list->next;
1402 	for (; *prev_cb != NULL; prev_cb = &curr_cb->next) {
1403 		curr_cb = *prev_cb;
1404 		if (curr_cb == cb) {
1405 			/* Remove the user cb from the callback list. */
1406 			__atomic_store_n(prev_cb, curr_cb->next,
1407 				__ATOMIC_RELAXED);
1408 			ret = 0;
1409 			break;
1410 		}
1411 	}
1412 
1413 	if (!ret) {
1414 		/* Call sync with invalid thread id as this is part of
1415 		 * control plane API
1416 		 */
1417 		rte_rcu_qsbr_synchronize(list->qsbr, RTE_QSBR_THRID_INVALID);
1418 		rte_free(cb);
1419 	}
1420 
1421 cb_err:
1422 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1423 	return ret;
1424 }
1425 
1426 struct rte_cryptodev_cb *
1427 rte_cryptodev_add_deq_callback(uint8_t dev_id,
1428 			       uint16_t qp_id,
1429 			       rte_cryptodev_callback_fn cb_fn,
1430 			       void *cb_arg)
1431 {
1432 	struct rte_cryptodev *dev;
1433 	struct rte_cryptodev_cb_rcu *list;
1434 	struct rte_cryptodev_cb *cb, *tail;
1435 
1436 	if (!cb_fn) {
1437 		CDEV_LOG_ERR("Callback is NULL on dev_id=%d", dev_id);
1438 		rte_errno = EINVAL;
1439 		return NULL;
1440 	}
1441 
1442 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1443 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1444 		rte_errno = ENODEV;
1445 		return NULL;
1446 	}
1447 
1448 	dev = &rte_crypto_devices[dev_id];
1449 	if (qp_id >= dev->data->nb_queue_pairs) {
1450 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1451 		rte_errno = ENODEV;
1452 		return NULL;
1453 	}
1454 
1455 	cb = rte_zmalloc(NULL, sizeof(*cb), 0);
1456 	if (cb == NULL) {
1457 		CDEV_LOG_ERR("Failed to allocate memory for callback on "
1458 			     "dev=%d, queue_pair_id=%d", dev_id, qp_id);
1459 		rte_errno = ENOMEM;
1460 		return NULL;
1461 	}
1462 
1463 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1464 
1465 	cb->fn = cb_fn;
1466 	cb->arg = cb_arg;
1467 
1468 	/* Add the callbacks in fifo order. */
1469 	list = &dev->deq_cbs[qp_id];
1470 	tail = list->next;
1471 
1472 	if (tail) {
1473 		while (tail->next)
1474 			tail = tail->next;
1475 		/* Stores to cb->fn and cb->param should complete before
1476 		 * cb is visible to data plane.
1477 		 */
1478 		__atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
1479 	} else {
1480 		/* Stores to cb->fn and cb->param should complete before
1481 		 * cb is visible to data plane.
1482 		 */
1483 		__atomic_store_n(&list->next, cb, __ATOMIC_RELEASE);
1484 	}
1485 
1486 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1487 
1488 	return cb;
1489 }
1490 
1491 int
1492 rte_cryptodev_remove_deq_callback(uint8_t dev_id,
1493 				  uint16_t qp_id,
1494 				  struct rte_cryptodev_cb *cb)
1495 {
1496 	struct rte_cryptodev *dev;
1497 	struct rte_cryptodev_cb **prev_cb, *curr_cb;
1498 	struct rte_cryptodev_cb_rcu *list;
1499 	int ret;
1500 
1501 	ret = -EINVAL;
1502 
1503 	if (!cb) {
1504 		CDEV_LOG_ERR("Callback is NULL");
1505 		return -EINVAL;
1506 	}
1507 
1508 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1509 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1510 		return -ENODEV;
1511 	}
1512 
1513 	dev = &rte_crypto_devices[dev_id];
1514 	if (qp_id >= dev->data->nb_queue_pairs) {
1515 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1516 		return -ENODEV;
1517 	}
1518 
1519 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1520 	if (dev->enq_cbs == NULL) {
1521 		CDEV_LOG_ERR("Callback not initialized");
1522 		goto cb_err;
1523 	}
1524 
1525 	list = &dev->deq_cbs[qp_id];
1526 	if (list == NULL) {
1527 		CDEV_LOG_ERR("Callback list is NULL");
1528 		goto cb_err;
1529 	}
1530 
1531 	if (list->qsbr == NULL) {
1532 		CDEV_LOG_ERR("Rcu qsbr is NULL");
1533 		goto cb_err;
1534 	}
1535 
1536 	prev_cb = &list->next;
1537 	for (; *prev_cb != NULL; prev_cb = &curr_cb->next) {
1538 		curr_cb = *prev_cb;
1539 		if (curr_cb == cb) {
1540 			/* Remove the user cb from the callback list. */
1541 			__atomic_store_n(prev_cb, curr_cb->next,
1542 				__ATOMIC_RELAXED);
1543 			ret = 0;
1544 			break;
1545 		}
1546 	}
1547 
1548 	if (!ret) {
1549 		/* Call sync with invalid thread id as this is part of
1550 		 * control plane API
1551 		 */
1552 		rte_rcu_qsbr_synchronize(list->qsbr, RTE_QSBR_THRID_INVALID);
1553 		rte_free(cb);
1554 	}
1555 
1556 cb_err:
1557 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1558 	return ret;
1559 }
1560 
1561 int
1562 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats)
1563 {
1564 	struct rte_cryptodev *dev;
1565 
1566 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1567 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1568 		return -ENODEV;
1569 	}
1570 
1571 	if (stats == NULL) {
1572 		CDEV_LOG_ERR("Invalid stats ptr");
1573 		return -EINVAL;
1574 	}
1575 
1576 	dev = &rte_crypto_devices[dev_id];
1577 	memset(stats, 0, sizeof(*stats));
1578 
1579 	if (*dev->dev_ops->stats_get == NULL)
1580 		return -ENOTSUP;
1581 	(*dev->dev_ops->stats_get)(dev, stats);
1582 	return 0;
1583 }
1584 
1585 void
1586 rte_cryptodev_stats_reset(uint8_t dev_id)
1587 {
1588 	struct rte_cryptodev *dev;
1589 
1590 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1591 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1592 		return;
1593 	}
1594 
1595 	dev = &rte_crypto_devices[dev_id];
1596 
1597 	if (*dev->dev_ops->stats_reset == NULL)
1598 		return;
1599 	(*dev->dev_ops->stats_reset)(dev);
1600 }
1601 
1602 void
1603 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
1604 {
1605 	struct rte_cryptodev *dev;
1606 
1607 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1608 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1609 		return;
1610 	}
1611 
1612 	dev = &rte_crypto_devices[dev_id];
1613 
1614 	memset(dev_info, 0, sizeof(struct rte_cryptodev_info));
1615 
1616 	if (*dev->dev_ops->dev_infos_get == NULL)
1617 		return;
1618 	(*dev->dev_ops->dev_infos_get)(dev, dev_info);
1619 
1620 	dev_info->driver_name = dev->device->driver->name;
1621 	dev_info->device = dev->device;
1622 }
1623 
1624 int
1625 rte_cryptodev_callback_register(uint8_t dev_id,
1626 			enum rte_cryptodev_event_type event,
1627 			rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1628 {
1629 	struct rte_cryptodev *dev;
1630 	struct rte_cryptodev_callback *user_cb;
1631 
1632 	if (!cb_fn)
1633 		return -EINVAL;
1634 
1635 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1636 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1637 		return -EINVAL;
1638 	}
1639 
1640 	dev = &rte_crypto_devices[dev_id];
1641 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
1642 
1643 	TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
1644 		if (user_cb->cb_fn == cb_fn &&
1645 			user_cb->cb_arg == cb_arg &&
1646 			user_cb->event == event) {
1647 			break;
1648 		}
1649 	}
1650 
1651 	/* create a new callback. */
1652 	if (user_cb == NULL) {
1653 		user_cb = rte_zmalloc("INTR_USER_CALLBACK",
1654 				sizeof(struct rte_cryptodev_callback), 0);
1655 		if (user_cb != NULL) {
1656 			user_cb->cb_fn = cb_fn;
1657 			user_cb->cb_arg = cb_arg;
1658 			user_cb->event = event;
1659 			TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
1660 		}
1661 	}
1662 
1663 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1664 	return (user_cb == NULL) ? -ENOMEM : 0;
1665 }
1666 
1667 int
1668 rte_cryptodev_callback_unregister(uint8_t dev_id,
1669 			enum rte_cryptodev_event_type event,
1670 			rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1671 {
1672 	int ret;
1673 	struct rte_cryptodev *dev;
1674 	struct rte_cryptodev_callback *cb, *next;
1675 
1676 	if (!cb_fn)
1677 		return -EINVAL;
1678 
1679 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1680 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1681 		return -EINVAL;
1682 	}
1683 
1684 	dev = &rte_crypto_devices[dev_id];
1685 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
1686 
1687 	ret = 0;
1688 	for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
1689 
1690 		next = TAILQ_NEXT(cb, next);
1691 
1692 		if (cb->cb_fn != cb_fn || cb->event != event ||
1693 				(cb->cb_arg != (void *)-1 &&
1694 				cb->cb_arg != cb_arg))
1695 			continue;
1696 
1697 		/*
1698 		 * if this callback is not executing right now,
1699 		 * then remove it.
1700 		 */
1701 		if (cb->active == 0) {
1702 			TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
1703 			rte_free(cb);
1704 		} else {
1705 			ret = -EAGAIN;
1706 		}
1707 	}
1708 
1709 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1710 	return ret;
1711 }
1712 
1713 void
1714 rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev,
1715 	enum rte_cryptodev_event_type event)
1716 {
1717 	struct rte_cryptodev_callback *cb_lst;
1718 	struct rte_cryptodev_callback dev_cb;
1719 
1720 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
1721 	TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
1722 		if (cb_lst->cb_fn == NULL || cb_lst->event != event)
1723 			continue;
1724 		dev_cb = *cb_lst;
1725 		cb_lst->active = 1;
1726 		rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1727 		dev_cb.cb_fn(dev->data->dev_id, dev_cb.event,
1728 						dev_cb.cb_arg);
1729 		rte_spinlock_lock(&rte_cryptodev_cb_lock);
1730 		cb_lst->active = 0;
1731 	}
1732 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1733 }
1734 
1735 int
1736 rte_cryptodev_sym_session_init(uint8_t dev_id,
1737 		struct rte_cryptodev_sym_session *sess,
1738 		struct rte_crypto_sym_xform *xforms,
1739 		struct rte_mempool *mp)
1740 {
1741 	struct rte_cryptodev *dev;
1742 	uint32_t sess_priv_sz = rte_cryptodev_sym_get_private_session_size(
1743 			dev_id);
1744 	uint8_t index;
1745 	int ret;
1746 
1747 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1748 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1749 		return -EINVAL;
1750 	}
1751 
1752 	dev = rte_cryptodev_pmd_get_dev(dev_id);
1753 
1754 	if (sess == NULL || xforms == NULL || dev == NULL || mp == NULL)
1755 		return -EINVAL;
1756 
1757 	if (mp->elt_size < sess_priv_sz)
1758 		return -EINVAL;
1759 
1760 	index = dev->driver_id;
1761 	if (index >= sess->nb_drivers)
1762 		return -EINVAL;
1763 
1764 	if (*dev->dev_ops->sym_session_configure == NULL)
1765 		return -ENOTSUP;
1766 
1767 	if (sess->sess_data[index].refcnt == 0) {
1768 		ret = dev->dev_ops->sym_session_configure(dev, xforms,
1769 							sess, mp);
1770 		if (ret < 0) {
1771 			CDEV_LOG_ERR(
1772 				"dev_id %d failed to configure session details",
1773 				dev_id);
1774 			return ret;
1775 		}
1776 	}
1777 
1778 	rte_cryptodev_trace_sym_session_init(dev_id, sess, xforms, mp);
1779 	sess->sess_data[index].refcnt++;
1780 	return 0;
1781 }
1782 
1783 struct rte_mempool *
1784 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
1785 	uint32_t elt_size, uint32_t cache_size, uint16_t user_data_size,
1786 	int socket_id)
1787 {
1788 	struct rte_mempool *mp;
1789 	struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1790 	uint32_t obj_sz;
1791 
1792 	obj_sz = rte_cryptodev_sym_get_header_session_size() + user_data_size;
1793 	if (obj_sz > elt_size)
1794 		CDEV_LOG_INFO("elt_size %u is expanded to %u\n", elt_size,
1795 				obj_sz);
1796 	else
1797 		obj_sz = elt_size;
1798 
1799 	mp = rte_mempool_create(name, nb_elts, obj_sz, cache_size,
1800 			(uint32_t)(sizeof(*pool_priv)),
1801 			NULL, NULL, NULL, NULL,
1802 			socket_id, 0);
1803 	if (mp == NULL) {
1804 		CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d\n",
1805 			__func__, name, rte_errno);
1806 		return NULL;
1807 	}
1808 
1809 	pool_priv = rte_mempool_get_priv(mp);
1810 	if (!pool_priv) {
1811 		CDEV_LOG_ERR("%s(name=%s) failed to get private data\n",
1812 			__func__, name);
1813 		rte_mempool_free(mp);
1814 		return NULL;
1815 	}
1816 
1817 	pool_priv->nb_drivers = nb_drivers;
1818 	pool_priv->user_data_sz = user_data_size;
1819 
1820 	rte_cryptodev_trace_sym_session_pool_create(name, nb_elts,
1821 		elt_size, cache_size, user_data_size, mp);
1822 	return mp;
1823 }
1824 
1825 struct rte_mempool *
1826 rte_cryptodev_asym_session_pool_create(const char *name, uint32_t nb_elts,
1827 	uint32_t cache_size, uint16_t user_data_size, int socket_id)
1828 {
1829 	struct rte_mempool *mp;
1830 	struct rte_cryptodev_asym_session_pool_private_data *pool_priv;
1831 	uint32_t obj_sz, obj_sz_aligned;
1832 	uint8_t dev_id;
1833 	unsigned int priv_sz, max_priv_sz = 0;
1834 
1835 	for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++)
1836 		if (rte_cryptodev_is_valid_dev(dev_id)) {
1837 			priv_sz = rte_cryptodev_asym_get_private_session_size(dev_id);
1838 			if (priv_sz > max_priv_sz)
1839 				max_priv_sz = priv_sz;
1840 		}
1841 	if (max_priv_sz == 0) {
1842 		CDEV_LOG_INFO("Could not set max private session size\n");
1843 		return NULL;
1844 	}
1845 
1846 	obj_sz = rte_cryptodev_asym_get_header_session_size() + max_priv_sz +
1847 			user_data_size;
1848 	obj_sz_aligned =  RTE_ALIGN_CEIL(obj_sz, RTE_CACHE_LINE_SIZE);
1849 
1850 	mp = rte_mempool_create(name, nb_elts, obj_sz_aligned, cache_size,
1851 			(uint32_t)(sizeof(*pool_priv)),
1852 			NULL, NULL, NULL, NULL,
1853 			socket_id, 0);
1854 	if (mp == NULL) {
1855 		CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d\n",
1856 			__func__, name, rte_errno);
1857 		return NULL;
1858 	}
1859 
1860 	pool_priv = rte_mempool_get_priv(mp);
1861 	if (!pool_priv) {
1862 		CDEV_LOG_ERR("%s(name=%s) failed to get private data\n",
1863 			__func__, name);
1864 		rte_mempool_free(mp);
1865 		return NULL;
1866 	}
1867 	pool_priv->max_priv_session_sz = max_priv_sz;
1868 	pool_priv->user_data_sz = user_data_size;
1869 
1870 	rte_cryptodev_trace_asym_session_pool_create(name, nb_elts,
1871 		user_data_size, cache_size, mp);
1872 	return mp;
1873 }
1874 
1875 static unsigned int
1876 rte_cryptodev_sym_session_data_size(struct rte_cryptodev_sym_session *sess)
1877 {
1878 	return (sizeof(sess->sess_data[0]) * sess->nb_drivers) +
1879 			sess->user_data_sz;
1880 }
1881 
1882 static uint8_t
1883 rte_cryptodev_sym_is_valid_session_pool(struct rte_mempool *mp)
1884 {
1885 	struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1886 
1887 	if (!mp)
1888 		return 0;
1889 
1890 	pool_priv = rte_mempool_get_priv(mp);
1891 
1892 	if (!pool_priv || mp->private_data_size < sizeof(*pool_priv) ||
1893 			pool_priv->nb_drivers != nb_drivers ||
1894 			mp->elt_size <
1895 				rte_cryptodev_sym_get_header_session_size()
1896 				+ pool_priv->user_data_sz)
1897 		return 0;
1898 
1899 	return 1;
1900 }
1901 
1902 struct rte_cryptodev_sym_session *
1903 rte_cryptodev_sym_session_create(struct rte_mempool *mp)
1904 {
1905 	struct rte_cryptodev_sym_session *sess;
1906 	struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1907 
1908 	if (!rte_cryptodev_sym_is_valid_session_pool(mp)) {
1909 		CDEV_LOG_ERR("Invalid mempool\n");
1910 		return NULL;
1911 	}
1912 
1913 	pool_priv = rte_mempool_get_priv(mp);
1914 
1915 	/* Allocate a session structure from the session pool */
1916 	if (rte_mempool_get(mp, (void **)&sess)) {
1917 		CDEV_LOG_ERR("couldn't get object from session mempool");
1918 		return NULL;
1919 	}
1920 
1921 	sess->nb_drivers = pool_priv->nb_drivers;
1922 	sess->user_data_sz = pool_priv->user_data_sz;
1923 	sess->opaque_data = 0;
1924 
1925 	/* Clear device session pointer.
1926 	 * Include the flag indicating presence of user data
1927 	 */
1928 	memset(sess->sess_data, 0,
1929 			rte_cryptodev_sym_session_data_size(sess));
1930 
1931 	rte_cryptodev_trace_sym_session_create(mp, sess);
1932 	return sess;
1933 }
1934 
1935 int
1936 rte_cryptodev_asym_session_create(uint8_t dev_id,
1937 		struct rte_crypto_asym_xform *xforms, struct rte_mempool *mp,
1938 		void **session)
1939 {
1940 	struct rte_cryptodev_asym_session *sess;
1941 	uint32_t session_priv_data_sz;
1942 	struct rte_cryptodev_asym_session_pool_private_data *pool_priv;
1943 	unsigned int session_header_size =
1944 			rte_cryptodev_asym_get_header_session_size();
1945 	struct rte_cryptodev *dev;
1946 	int ret;
1947 
1948 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1949 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1950 		return -EINVAL;
1951 	}
1952 
1953 	dev = rte_cryptodev_pmd_get_dev(dev_id);
1954 
1955 	if (dev == NULL)
1956 		return -EINVAL;
1957 
1958 	if (!mp) {
1959 		CDEV_LOG_ERR("invalid mempool\n");
1960 		return -EINVAL;
1961 	}
1962 
1963 	session_priv_data_sz = rte_cryptodev_asym_get_private_session_size(
1964 			dev_id);
1965 	pool_priv = rte_mempool_get_priv(mp);
1966 
1967 	if (pool_priv->max_priv_session_sz < session_priv_data_sz) {
1968 		CDEV_LOG_DEBUG(
1969 			"The private session data size used when creating the mempool is smaller than this device's private session data.");
1970 		return -EINVAL;
1971 	}
1972 
1973 	/* Verify if provided mempool can hold elements big enough. */
1974 	if (mp->elt_size < session_header_size + session_priv_data_sz) {
1975 		CDEV_LOG_ERR(
1976 			"mempool elements too small to hold session objects");
1977 		return -EINVAL;
1978 	}
1979 
1980 	/* Allocate a session structure from the session pool */
1981 	if (rte_mempool_get(mp, session)) {
1982 		CDEV_LOG_ERR("couldn't get object from session mempool");
1983 		return -ENOMEM;
1984 	}
1985 
1986 	sess = *session;
1987 	sess->driver_id = dev->driver_id;
1988 	sess->user_data_sz = pool_priv->user_data_sz;
1989 	sess->max_priv_data_sz = pool_priv->max_priv_session_sz;
1990 
1991 	/* Clear device session pointer.*/
1992 	memset(sess->sess_private_data, 0, session_priv_data_sz + sess->user_data_sz);
1993 
1994 	if (*dev->dev_ops->asym_session_configure == NULL)
1995 		return -ENOTSUP;
1996 
1997 	if (sess->sess_private_data[0] == 0) {
1998 		ret = dev->dev_ops->asym_session_configure(dev, xforms, sess);
1999 		if (ret < 0) {
2000 			CDEV_LOG_ERR(
2001 				"dev_id %d failed to configure session details",
2002 				dev_id);
2003 			return ret;
2004 		}
2005 	}
2006 
2007 	rte_cryptodev_trace_asym_session_create(dev_id, xforms, mp, sess);
2008 	return 0;
2009 }
2010 
2011 int
2012 rte_cryptodev_sym_session_clear(uint8_t dev_id,
2013 		struct rte_cryptodev_sym_session *sess)
2014 {
2015 	struct rte_cryptodev *dev;
2016 	uint8_t driver_id;
2017 
2018 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
2019 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
2020 		return -EINVAL;
2021 	}
2022 
2023 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2024 
2025 	if (dev == NULL || sess == NULL)
2026 		return -EINVAL;
2027 
2028 	driver_id = dev->driver_id;
2029 	if (sess->sess_data[driver_id].refcnt == 0)
2030 		return 0;
2031 	if (--sess->sess_data[driver_id].refcnt != 0)
2032 		return -EBUSY;
2033 
2034 	if (*dev->dev_ops->sym_session_clear == NULL)
2035 		return -ENOTSUP;
2036 
2037 	dev->dev_ops->sym_session_clear(dev, sess);
2038 
2039 	rte_cryptodev_trace_sym_session_clear(dev_id, sess);
2040 	return 0;
2041 }
2042 
2043 int
2044 rte_cryptodev_sym_session_free(struct rte_cryptodev_sym_session *sess)
2045 {
2046 	uint8_t i;
2047 	struct rte_mempool *sess_mp;
2048 
2049 	if (sess == NULL)
2050 		return -EINVAL;
2051 
2052 	/* Check that all device private data has been freed */
2053 	for (i = 0; i < sess->nb_drivers; i++) {
2054 		if (sess->sess_data[i].refcnt != 0)
2055 			return -EBUSY;
2056 	}
2057 
2058 	/* Return session to mempool */
2059 	sess_mp = rte_mempool_from_obj(sess);
2060 	rte_mempool_put(sess_mp, sess);
2061 
2062 	rte_cryptodev_trace_sym_session_free(sess);
2063 	return 0;
2064 }
2065 
2066 int
2067 rte_cryptodev_asym_session_free(uint8_t dev_id, void *sess)
2068 {
2069 	struct rte_mempool *sess_mp;
2070 	struct rte_cryptodev *dev;
2071 
2072 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
2073 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
2074 		return -EINVAL;
2075 	}
2076 
2077 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2078 
2079 	if (dev == NULL || sess == NULL)
2080 		return -EINVAL;
2081 
2082 	if (*dev->dev_ops->asym_session_clear == NULL)
2083 		return -ENOTSUP;
2084 
2085 	dev->dev_ops->asym_session_clear(dev, sess);
2086 
2087 	rte_free(((struct rte_cryptodev_asym_session *)sess)->event_mdata);
2088 
2089 	/* Return session to mempool */
2090 	sess_mp = rte_mempool_from_obj(sess);
2091 	rte_mempool_put(sess_mp, sess);
2092 
2093 	rte_cryptodev_trace_asym_session_free(dev_id, sess);
2094 	return 0;
2095 }
2096 
2097 unsigned int
2098 rte_cryptodev_sym_get_header_session_size(void)
2099 {
2100 	/*
2101 	 * Header contains pointers to the private data of all registered
2102 	 * drivers and all necessary information to ensure safely clear
2103 	 * or free al session.
2104 	 */
2105 	struct rte_cryptodev_sym_session s = {0};
2106 
2107 	s.nb_drivers = nb_drivers;
2108 
2109 	return (unsigned int)(sizeof(s) +
2110 			rte_cryptodev_sym_session_data_size(&s));
2111 }
2112 
2113 unsigned int
2114 rte_cryptodev_sym_get_existing_header_session_size(
2115 		struct rte_cryptodev_sym_session *sess)
2116 {
2117 	if (!sess)
2118 		return 0;
2119 	else
2120 		return (unsigned int)(sizeof(*sess) +
2121 				rte_cryptodev_sym_session_data_size(sess));
2122 }
2123 
2124 unsigned int
2125 rte_cryptodev_asym_get_header_session_size(void)
2126 {
2127 	return sizeof(struct rte_cryptodev_asym_session);
2128 }
2129 
2130 unsigned int
2131 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
2132 {
2133 	struct rte_cryptodev *dev;
2134 	unsigned int priv_sess_size;
2135 
2136 	if (!rte_cryptodev_is_valid_dev(dev_id))
2137 		return 0;
2138 
2139 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2140 
2141 	if (*dev->dev_ops->sym_session_get_size == NULL)
2142 		return 0;
2143 
2144 	priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev);
2145 
2146 	return priv_sess_size;
2147 }
2148 
2149 unsigned int
2150 rte_cryptodev_asym_get_private_session_size(uint8_t dev_id)
2151 {
2152 	struct rte_cryptodev *dev;
2153 	unsigned int priv_sess_size;
2154 
2155 	if (!rte_cryptodev_is_valid_dev(dev_id))
2156 		return 0;
2157 
2158 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2159 
2160 	if (*dev->dev_ops->asym_session_get_size == NULL)
2161 		return 0;
2162 
2163 	priv_sess_size = (*dev->dev_ops->asym_session_get_size)(dev);
2164 
2165 	return priv_sess_size;
2166 }
2167 
2168 int
2169 rte_cryptodev_sym_session_set_user_data(
2170 					struct rte_cryptodev_sym_session *sess,
2171 					void *data,
2172 					uint16_t size)
2173 {
2174 	if (sess == NULL)
2175 		return -EINVAL;
2176 
2177 	if (sess->user_data_sz < size)
2178 		return -ENOMEM;
2179 
2180 	rte_memcpy(sess->sess_data + sess->nb_drivers, data, size);
2181 	return 0;
2182 }
2183 
2184 void *
2185 rte_cryptodev_sym_session_get_user_data(
2186 					struct rte_cryptodev_sym_session *sess)
2187 {
2188 	if (sess == NULL || sess->user_data_sz == 0)
2189 		return NULL;
2190 
2191 	return (void *)(sess->sess_data + sess->nb_drivers);
2192 }
2193 
2194 int
2195 rte_cryptodev_asym_session_set_user_data(void *session, void *data, uint16_t size)
2196 {
2197 	struct rte_cryptodev_asym_session *sess = session;
2198 	if (sess == NULL)
2199 		return -EINVAL;
2200 
2201 	if (sess->user_data_sz < size)
2202 		return -ENOMEM;
2203 
2204 	rte_memcpy(sess->sess_private_data +
2205 			sess->max_priv_data_sz,
2206 			data, size);
2207 	return 0;
2208 }
2209 
2210 void *
2211 rte_cryptodev_asym_session_get_user_data(void *session)
2212 {
2213 	struct rte_cryptodev_asym_session *sess = session;
2214 	if (sess == NULL || sess->user_data_sz == 0)
2215 		return NULL;
2216 
2217 	return (void *)(sess->sess_private_data +
2218 			sess->max_priv_data_sz);
2219 }
2220 
2221 static inline void
2222 sym_crypto_fill_status(struct rte_crypto_sym_vec *vec, int32_t errnum)
2223 {
2224 	uint32_t i;
2225 	for (i = 0; i < vec->num; i++)
2226 		vec->status[i] = errnum;
2227 }
2228 
2229 uint32_t
2230 rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id,
2231 	struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs ofs,
2232 	struct rte_crypto_sym_vec *vec)
2233 {
2234 	struct rte_cryptodev *dev;
2235 
2236 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
2237 		sym_crypto_fill_status(vec, EINVAL);
2238 		return 0;
2239 	}
2240 
2241 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2242 
2243 	if (*dev->dev_ops->sym_cpu_process == NULL ||
2244 		!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO)) {
2245 		sym_crypto_fill_status(vec, ENOTSUP);
2246 		return 0;
2247 	}
2248 
2249 	return dev->dev_ops->sym_cpu_process(dev, sess, ofs, vec);
2250 }
2251 
2252 int
2253 rte_cryptodev_get_raw_dp_ctx_size(uint8_t dev_id)
2254 {
2255 	struct rte_cryptodev *dev;
2256 	int32_t size = sizeof(struct rte_crypto_raw_dp_ctx);
2257 	int32_t priv_size;
2258 
2259 	if (!rte_cryptodev_is_valid_dev(dev_id))
2260 		return -EINVAL;
2261 
2262 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2263 
2264 	if (*dev->dev_ops->sym_get_raw_dp_ctx_size == NULL ||
2265 		!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP)) {
2266 		return -ENOTSUP;
2267 	}
2268 
2269 	priv_size = (*dev->dev_ops->sym_get_raw_dp_ctx_size)(dev);
2270 	if (priv_size < 0)
2271 		return -ENOTSUP;
2272 
2273 	return RTE_ALIGN_CEIL((size + priv_size), 8);
2274 }
2275 
2276 int
2277 rte_cryptodev_configure_raw_dp_ctx(uint8_t dev_id, uint16_t qp_id,
2278 	struct rte_crypto_raw_dp_ctx *ctx,
2279 	enum rte_crypto_op_sess_type sess_type,
2280 	union rte_cryptodev_session_ctx session_ctx,
2281 	uint8_t is_update)
2282 {
2283 	struct rte_cryptodev *dev;
2284 
2285 	if (!rte_cryptodev_get_qp_status(dev_id, qp_id))
2286 		return -EINVAL;
2287 
2288 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2289 	if (!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP)
2290 			|| dev->dev_ops->sym_configure_raw_dp_ctx == NULL)
2291 		return -ENOTSUP;
2292 
2293 	return (*dev->dev_ops->sym_configure_raw_dp_ctx)(dev, qp_id, ctx,
2294 			sess_type, session_ctx, is_update);
2295 }
2296 
2297 int
2298 rte_cryptodev_session_event_mdata_set(uint8_t dev_id, void *sess,
2299 	enum rte_crypto_op_type op_type,
2300 	enum rte_crypto_op_sess_type sess_type,
2301 	void *ev_mdata,
2302 	uint16_t size)
2303 {
2304 	struct rte_cryptodev *dev;
2305 
2306 	if (sess == NULL || ev_mdata == NULL)
2307 		return -EINVAL;
2308 
2309 	if (!rte_cryptodev_is_valid_dev(dev_id))
2310 		goto skip_pmd_op;
2311 
2312 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2313 	if (dev->dev_ops->session_ev_mdata_set == NULL)
2314 		goto skip_pmd_op;
2315 
2316 	return (*dev->dev_ops->session_ev_mdata_set)(dev, sess, op_type,
2317 			sess_type, ev_mdata);
2318 
2319 skip_pmd_op:
2320 	if (op_type == RTE_CRYPTO_OP_TYPE_SYMMETRIC)
2321 		return rte_cryptodev_sym_session_set_user_data(sess, ev_mdata,
2322 				size);
2323 	else if (op_type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
2324 		struct rte_cryptodev_asym_session *s = sess;
2325 
2326 		if (s->event_mdata == NULL) {
2327 			s->event_mdata = rte_malloc(NULL, size, 0);
2328 			if (s->event_mdata == NULL)
2329 				return -ENOMEM;
2330 		}
2331 		rte_memcpy(s->event_mdata, ev_mdata, size);
2332 
2333 		return 0;
2334 	} else
2335 		return -ENOTSUP;
2336 }
2337 
2338 uint32_t
2339 rte_cryptodev_raw_enqueue_burst(struct rte_crypto_raw_dp_ctx *ctx,
2340 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
2341 	void **user_data, int *enqueue_status)
2342 {
2343 	return (*ctx->enqueue_burst)(ctx->qp_data, ctx->drv_ctx_data, vec,
2344 			ofs, user_data, enqueue_status);
2345 }
2346 
2347 int
2348 rte_cryptodev_raw_enqueue_done(struct rte_crypto_raw_dp_ctx *ctx,
2349 		uint32_t n)
2350 {
2351 	return (*ctx->enqueue_done)(ctx->qp_data, ctx->drv_ctx_data, n);
2352 }
2353 
2354 uint32_t
2355 rte_cryptodev_raw_dequeue_burst(struct rte_crypto_raw_dp_ctx *ctx,
2356 	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
2357 	uint32_t max_nb_to_dequeue,
2358 	rte_cryptodev_raw_post_dequeue_t post_dequeue,
2359 	void **out_user_data, uint8_t is_user_data_array,
2360 	uint32_t *n_success_jobs, int *status)
2361 {
2362 	return (*ctx->dequeue_burst)(ctx->qp_data, ctx->drv_ctx_data,
2363 		get_dequeue_count, max_nb_to_dequeue, post_dequeue,
2364 		out_user_data, is_user_data_array, n_success_jobs, status);
2365 }
2366 
2367 int
2368 rte_cryptodev_raw_dequeue_done(struct rte_crypto_raw_dp_ctx *ctx,
2369 		uint32_t n)
2370 {
2371 	return (*ctx->dequeue_done)(ctx->qp_data, ctx->drv_ctx_data, n);
2372 }
2373 
2374 /** Initialise rte_crypto_op mempool element */
2375 static void
2376 rte_crypto_op_init(struct rte_mempool *mempool,
2377 		void *opaque_arg,
2378 		void *_op_data,
2379 		__rte_unused unsigned i)
2380 {
2381 	struct rte_crypto_op *op = _op_data;
2382 	enum rte_crypto_op_type type = *(enum rte_crypto_op_type *)opaque_arg;
2383 
2384 	memset(_op_data, 0, mempool->elt_size);
2385 
2386 	__rte_crypto_op_reset(op, type);
2387 
2388 	op->phys_addr = rte_mem_virt2iova(_op_data);
2389 	op->mempool = mempool;
2390 }
2391 
2392 
2393 struct rte_mempool *
2394 rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
2395 		unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
2396 		int socket_id)
2397 {
2398 	struct rte_crypto_op_pool_private *priv;
2399 
2400 	unsigned elt_size = sizeof(struct rte_crypto_op) +
2401 			priv_size;
2402 
2403 	if (type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
2404 		elt_size += sizeof(struct rte_crypto_sym_op);
2405 	} else if (type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
2406 		elt_size += sizeof(struct rte_crypto_asym_op);
2407 	} else if (type == RTE_CRYPTO_OP_TYPE_UNDEFINED) {
2408 		elt_size += RTE_MAX(sizeof(struct rte_crypto_sym_op),
2409 		                    sizeof(struct rte_crypto_asym_op));
2410 	} else {
2411 		CDEV_LOG_ERR("Invalid op_type\n");
2412 		return NULL;
2413 	}
2414 
2415 	/* lookup mempool in case already allocated */
2416 	struct rte_mempool *mp = rte_mempool_lookup(name);
2417 
2418 	if (mp != NULL) {
2419 		priv = (struct rte_crypto_op_pool_private *)
2420 				rte_mempool_get_priv(mp);
2421 
2422 		if (mp->elt_size != elt_size ||
2423 				mp->cache_size < cache_size ||
2424 				mp->size < nb_elts ||
2425 				priv->priv_size <  priv_size) {
2426 			mp = NULL;
2427 			CDEV_LOG_ERR("Mempool %s already exists but with "
2428 					"incompatible parameters", name);
2429 			return NULL;
2430 		}
2431 		return mp;
2432 	}
2433 
2434 	mp = rte_mempool_create(
2435 			name,
2436 			nb_elts,
2437 			elt_size,
2438 			cache_size,
2439 			sizeof(struct rte_crypto_op_pool_private),
2440 			NULL,
2441 			NULL,
2442 			rte_crypto_op_init,
2443 			&type,
2444 			socket_id,
2445 			0);
2446 
2447 	if (mp == NULL) {
2448 		CDEV_LOG_ERR("Failed to create mempool %s", name);
2449 		return NULL;
2450 	}
2451 
2452 	priv = (struct rte_crypto_op_pool_private *)
2453 			rte_mempool_get_priv(mp);
2454 
2455 	priv->priv_size = priv_size;
2456 	priv->type = type;
2457 
2458 	return mp;
2459 }
2460 
2461 int
2462 rte_cryptodev_pmd_create_dev_name(char *name, const char *dev_name_prefix)
2463 {
2464 	struct rte_cryptodev *dev = NULL;
2465 	uint32_t i = 0;
2466 
2467 	if (name == NULL)
2468 		return -EINVAL;
2469 
2470 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
2471 		int ret = snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
2472 				"%s_%u", dev_name_prefix, i);
2473 
2474 		if (ret < 0)
2475 			return ret;
2476 
2477 		dev = rte_cryptodev_pmd_get_named_dev(name);
2478 		if (!dev)
2479 			return 0;
2480 	}
2481 
2482 	return -1;
2483 }
2484 
2485 TAILQ_HEAD(cryptodev_driver_list, cryptodev_driver);
2486 
2487 static struct cryptodev_driver_list cryptodev_driver_list =
2488 	TAILQ_HEAD_INITIALIZER(cryptodev_driver_list);
2489 
2490 int
2491 rte_cryptodev_driver_id_get(const char *name)
2492 {
2493 	struct cryptodev_driver *driver;
2494 	const char *driver_name;
2495 
2496 	if (name == NULL) {
2497 		RTE_LOG(DEBUG, CRYPTODEV, "name pointer NULL");
2498 		return -1;
2499 	}
2500 
2501 	TAILQ_FOREACH(driver, &cryptodev_driver_list, next) {
2502 		driver_name = driver->driver->name;
2503 		if (strncmp(driver_name, name, strlen(driver_name) + 1) == 0)
2504 			return driver->id;
2505 	}
2506 	return -1;
2507 }
2508 
2509 const char *
2510 rte_cryptodev_name_get(uint8_t dev_id)
2511 {
2512 	struct rte_cryptodev *dev;
2513 
2514 	if (!rte_cryptodev_is_valid_device_data(dev_id)) {
2515 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
2516 		return NULL;
2517 	}
2518 
2519 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2520 	if (dev == NULL)
2521 		return NULL;
2522 
2523 	return dev->data->name;
2524 }
2525 
2526 const char *
2527 rte_cryptodev_driver_name_get(uint8_t driver_id)
2528 {
2529 	struct cryptodev_driver *driver;
2530 
2531 	TAILQ_FOREACH(driver, &cryptodev_driver_list, next)
2532 		if (driver->id == driver_id)
2533 			return driver->driver->name;
2534 	return NULL;
2535 }
2536 
2537 uint8_t
2538 rte_cryptodev_allocate_driver(struct cryptodev_driver *crypto_drv,
2539 		const struct rte_driver *drv)
2540 {
2541 	crypto_drv->driver = drv;
2542 	crypto_drv->id = nb_drivers;
2543 
2544 	TAILQ_INSERT_TAIL(&cryptodev_driver_list, crypto_drv, next);
2545 
2546 	return nb_drivers++;
2547 }
2548 
2549 RTE_INIT(cryptodev_init_fp_ops)
2550 {
2551 	uint32_t i;
2552 
2553 	for (i = 0; i != RTE_DIM(rte_crypto_fp_ops); i++)
2554 		cryptodev_fp_ops_reset(rte_crypto_fp_ops + i);
2555 }
2556 
2557 static int
2558 cryptodev_handle_dev_list(const char *cmd __rte_unused,
2559 		const char *params __rte_unused,
2560 		struct rte_tel_data *d)
2561 {
2562 	int dev_id;
2563 
2564 	if (rte_cryptodev_count() < 1)
2565 		return -EINVAL;
2566 
2567 	rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
2568 	for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++)
2569 		if (rte_cryptodev_is_valid_dev(dev_id))
2570 			rte_tel_data_add_array_int(d, dev_id);
2571 
2572 	return 0;
2573 }
2574 
2575 static int
2576 cryptodev_handle_dev_info(const char *cmd __rte_unused,
2577 		const char *params, struct rte_tel_data *d)
2578 {
2579 	struct rte_cryptodev_info cryptodev_info;
2580 	int dev_id;
2581 	char *end_param;
2582 
2583 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
2584 		return -EINVAL;
2585 
2586 	dev_id = strtoul(params, &end_param, 0);
2587 	if (*end_param != '\0')
2588 		CDEV_LOG_ERR("Extra parameters passed to command, ignoring");
2589 	if (!rte_cryptodev_is_valid_dev(dev_id))
2590 		return -EINVAL;
2591 
2592 	rte_cryptodev_info_get(dev_id, &cryptodev_info);
2593 
2594 	rte_tel_data_start_dict(d);
2595 	rte_tel_data_add_dict_string(d, "device_name",
2596 		cryptodev_info.device->name);
2597 	rte_tel_data_add_dict_int(d, "max_nb_queue_pairs",
2598 		cryptodev_info.max_nb_queue_pairs);
2599 
2600 	return 0;
2601 }
2602 
2603 #define ADD_DICT_STAT(s) rte_tel_data_add_dict_u64(d, #s, cryptodev_stats.s)
2604 
2605 static int
2606 cryptodev_handle_dev_stats(const char *cmd __rte_unused,
2607 		const char *params,
2608 		struct rte_tel_data *d)
2609 {
2610 	struct rte_cryptodev_stats cryptodev_stats;
2611 	int dev_id, ret;
2612 	char *end_param;
2613 
2614 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
2615 		return -EINVAL;
2616 
2617 	dev_id = strtoul(params, &end_param, 0);
2618 	if (*end_param != '\0')
2619 		CDEV_LOG_ERR("Extra parameters passed to command, ignoring");
2620 	if (!rte_cryptodev_is_valid_dev(dev_id))
2621 		return -EINVAL;
2622 
2623 	ret = rte_cryptodev_stats_get(dev_id, &cryptodev_stats);
2624 	if (ret < 0)
2625 		return ret;
2626 
2627 	rte_tel_data_start_dict(d);
2628 	ADD_DICT_STAT(enqueued_count);
2629 	ADD_DICT_STAT(dequeued_count);
2630 	ADD_DICT_STAT(enqueue_err_count);
2631 	ADD_DICT_STAT(dequeue_err_count);
2632 
2633 	return 0;
2634 }
2635 
2636 #define CRYPTO_CAPS_SZ                                             \
2637 	(RTE_ALIGN_CEIL(sizeof(struct rte_cryptodev_capabilities), \
2638 					sizeof(uint64_t)) /        \
2639 	 sizeof(uint64_t))
2640 
2641 static int
2642 crypto_caps_array(struct rte_tel_data *d,
2643 		  const struct rte_cryptodev_capabilities *capabilities)
2644 {
2645 	const struct rte_cryptodev_capabilities *dev_caps;
2646 	uint64_t caps_val[CRYPTO_CAPS_SZ];
2647 	unsigned int i = 0, j;
2648 
2649 	rte_tel_data_start_array(d, RTE_TEL_U64_VAL);
2650 
2651 	while ((dev_caps = &capabilities[i++])->op !=
2652 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
2653 		memset(&caps_val, 0, CRYPTO_CAPS_SZ * sizeof(caps_val[0]));
2654 		rte_memcpy(caps_val, dev_caps, sizeof(capabilities[0]));
2655 		for (j = 0; j < CRYPTO_CAPS_SZ; j++)
2656 			rte_tel_data_add_array_u64(d, caps_val[j]);
2657 	}
2658 
2659 	return i;
2660 }
2661 
2662 static int
2663 cryptodev_handle_dev_caps(const char *cmd __rte_unused, const char *params,
2664 			  struct rte_tel_data *d)
2665 {
2666 	struct rte_cryptodev_info dev_info;
2667 	struct rte_tel_data *crypto_caps;
2668 	int crypto_caps_n;
2669 	char *end_param;
2670 	int dev_id;
2671 
2672 	if (!params || strlen(params) == 0 || !isdigit(*params))
2673 		return -EINVAL;
2674 
2675 	dev_id = strtoul(params, &end_param, 0);
2676 	if (*end_param != '\0')
2677 		CDEV_LOG_ERR("Extra parameters passed to command, ignoring");
2678 	if (!rte_cryptodev_is_valid_dev(dev_id))
2679 		return -EINVAL;
2680 
2681 	rte_tel_data_start_dict(d);
2682 	crypto_caps = rte_tel_data_alloc();
2683 	if (!crypto_caps)
2684 		return -ENOMEM;
2685 
2686 	rte_cryptodev_info_get(dev_id, &dev_info);
2687 	crypto_caps_n = crypto_caps_array(crypto_caps, dev_info.capabilities);
2688 	rte_tel_data_add_dict_container(d, "crypto_caps", crypto_caps, 0);
2689 	rte_tel_data_add_dict_int(d, "crypto_caps_n", crypto_caps_n);
2690 
2691 	return 0;
2692 }
2693 
2694 RTE_INIT(cryptodev_init_telemetry)
2695 {
2696 	rte_telemetry_register_cmd("/cryptodev/info", cryptodev_handle_dev_info,
2697 			"Returns information for a cryptodev. Parameters: int dev_id");
2698 	rte_telemetry_register_cmd("/cryptodev/list",
2699 			cryptodev_handle_dev_list,
2700 			"Returns list of available crypto devices by IDs. No parameters.");
2701 	rte_telemetry_register_cmd("/cryptodev/stats",
2702 			cryptodev_handle_dev_stats,
2703 			"Returns the stats for a cryptodev. Parameters: int dev_id");
2704 	rte_telemetry_register_cmd("/cryptodev/caps",
2705 			cryptodev_handle_dev_caps,
2706 			"Returns the capabilities for a cryptodev. Parameters: int dev_id");
2707 }
2708