xref: /dpdk/lib/cryptodev/rte_cryptodev.c (revision efb1a06bb3f8dbcce5e43b49d23d73aaf80b2c8f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020 Intel Corporation
3  */
4 
5 #include <sys/queue.h>
6 #include <ctype.h>
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <string.h>
10 #include <errno.h>
11 #include <stdint.h>
12 #include <inttypes.h>
13 
14 #include <rte_log.h>
15 #include <rte_debug.h>
16 #include <rte_dev.h>
17 #include <rte_memory.h>
18 #include <rte_memcpy.h>
19 #include <rte_memzone.h>
20 #include <rte_eal.h>
21 #include <rte_common.h>
22 #include <rte_mempool.h>
23 #include <rte_malloc.h>
24 #include <rte_errno.h>
25 #include <rte_spinlock.h>
26 #include <rte_string_fns.h>
27 #include <rte_telemetry.h>
28 
29 #include "rte_crypto.h"
30 #include "rte_cryptodev.h"
31 #include "cryptodev_pmd.h"
32 #include "rte_cryptodev_trace.h"
33 
34 static uint8_t nb_drivers;
35 
36 static struct rte_cryptodev rte_crypto_devices[RTE_CRYPTO_MAX_DEVS];
37 
38 struct rte_cryptodev *rte_cryptodevs = rte_crypto_devices;
39 
40 static struct rte_cryptodev_global cryptodev_globals = {
41 		.devs			= rte_crypto_devices,
42 		.data			= { NULL },
43 		.nb_devs		= 0
44 };
45 
46 /* Public fastpath APIs. */
47 struct rte_crypto_fp_ops rte_crypto_fp_ops[RTE_CRYPTO_MAX_DEVS];
48 
49 /* spinlock for crypto device callbacks */
50 static rte_spinlock_t rte_cryptodev_cb_lock = RTE_SPINLOCK_INITIALIZER;
51 
52 /**
53  * The user application callback description.
54  *
55  * It contains callback address to be registered by user application,
56  * the pointer to the parameters for callback, and the event type.
57  */
58 struct rte_cryptodev_callback {
59 	TAILQ_ENTRY(rte_cryptodev_callback) next; /**< Callbacks list */
60 	rte_cryptodev_cb_fn cb_fn;		/**< Callback address */
61 	void *cb_arg;				/**< Parameter for callback */
62 	enum rte_cryptodev_event_type event;	/**< Interrupt event type */
63 	uint32_t active;			/**< Callback is executing */
64 };
65 
66 /**
67  * The crypto cipher algorithm strings identifiers.
68  * It could be used in application command line.
69  */
70 const char *
71 rte_crypto_cipher_algorithm_strings[] = {
72 	[RTE_CRYPTO_CIPHER_3DES_CBC]	= "3des-cbc",
73 	[RTE_CRYPTO_CIPHER_3DES_ECB]	= "3des-ecb",
74 	[RTE_CRYPTO_CIPHER_3DES_CTR]	= "3des-ctr",
75 
76 	[RTE_CRYPTO_CIPHER_AES_CBC]	= "aes-cbc",
77 	[RTE_CRYPTO_CIPHER_AES_CTR]	= "aes-ctr",
78 	[RTE_CRYPTO_CIPHER_AES_DOCSISBPI]	= "aes-docsisbpi",
79 	[RTE_CRYPTO_CIPHER_AES_ECB]	= "aes-ecb",
80 	[RTE_CRYPTO_CIPHER_AES_F8]	= "aes-f8",
81 	[RTE_CRYPTO_CIPHER_AES_XTS]	= "aes-xts",
82 
83 	[RTE_CRYPTO_CIPHER_ARC4]	= "arc4",
84 
85 	[RTE_CRYPTO_CIPHER_DES_CBC]     = "des-cbc",
86 	[RTE_CRYPTO_CIPHER_DES_DOCSISBPI]	= "des-docsisbpi",
87 
88 	[RTE_CRYPTO_CIPHER_NULL]	= "null",
89 
90 	[RTE_CRYPTO_CIPHER_KASUMI_F8]	= "kasumi-f8",
91 	[RTE_CRYPTO_CIPHER_SNOW3G_UEA2]	= "snow3g-uea2",
92 	[RTE_CRYPTO_CIPHER_ZUC_EEA3]	= "zuc-eea3"
93 };
94 
95 /**
96  * The crypto cipher operation strings identifiers.
97  * It could be used in application command line.
98  */
99 const char *
100 rte_crypto_cipher_operation_strings[] = {
101 		[RTE_CRYPTO_CIPHER_OP_ENCRYPT]	= "encrypt",
102 		[RTE_CRYPTO_CIPHER_OP_DECRYPT]	= "decrypt"
103 };
104 
105 /**
106  * The crypto auth algorithm strings identifiers.
107  * It could be used in application command line.
108  */
109 const char *
110 rte_crypto_auth_algorithm_strings[] = {
111 	[RTE_CRYPTO_AUTH_AES_CBC_MAC]	= "aes-cbc-mac",
112 	[RTE_CRYPTO_AUTH_AES_CMAC]	= "aes-cmac",
113 	[RTE_CRYPTO_AUTH_AES_GMAC]	= "aes-gmac",
114 	[RTE_CRYPTO_AUTH_AES_XCBC_MAC]	= "aes-xcbc-mac",
115 
116 	[RTE_CRYPTO_AUTH_MD5]		= "md5",
117 	[RTE_CRYPTO_AUTH_MD5_HMAC]	= "md5-hmac",
118 
119 	[RTE_CRYPTO_AUTH_NULL]		= "null",
120 
121 	[RTE_CRYPTO_AUTH_SHA1]		= "sha1",
122 	[RTE_CRYPTO_AUTH_SHA1_HMAC]	= "sha1-hmac",
123 
124 	[RTE_CRYPTO_AUTH_SHA224]	= "sha2-224",
125 	[RTE_CRYPTO_AUTH_SHA224_HMAC]	= "sha2-224-hmac",
126 	[RTE_CRYPTO_AUTH_SHA256]	= "sha2-256",
127 	[RTE_CRYPTO_AUTH_SHA256_HMAC]	= "sha2-256-hmac",
128 	[RTE_CRYPTO_AUTH_SHA384]	= "sha2-384",
129 	[RTE_CRYPTO_AUTH_SHA384_HMAC]	= "sha2-384-hmac",
130 	[RTE_CRYPTO_AUTH_SHA512]	= "sha2-512",
131 	[RTE_CRYPTO_AUTH_SHA512_HMAC]	= "sha2-512-hmac",
132 
133 	[RTE_CRYPTO_AUTH_KASUMI_F9]	= "kasumi-f9",
134 	[RTE_CRYPTO_AUTH_SNOW3G_UIA2]	= "snow3g-uia2",
135 	[RTE_CRYPTO_AUTH_ZUC_EIA3]	= "zuc-eia3"
136 };
137 
138 /**
139  * The crypto AEAD algorithm strings identifiers.
140  * It could be used in application command line.
141  */
142 const char *
143 rte_crypto_aead_algorithm_strings[] = {
144 	[RTE_CRYPTO_AEAD_AES_CCM]	= "aes-ccm",
145 	[RTE_CRYPTO_AEAD_AES_GCM]	= "aes-gcm",
146 	[RTE_CRYPTO_AEAD_CHACHA20_POLY1305] = "chacha20-poly1305"
147 };
148 
149 /**
150  * The crypto AEAD operation strings identifiers.
151  * It could be used in application command line.
152  */
153 const char *
154 rte_crypto_aead_operation_strings[] = {
155 	[RTE_CRYPTO_AEAD_OP_ENCRYPT]	= "encrypt",
156 	[RTE_CRYPTO_AEAD_OP_DECRYPT]	= "decrypt"
157 };
158 
159 /**
160  * Asymmetric crypto transform operation strings identifiers.
161  */
162 const char *rte_crypto_asym_xform_strings[] = {
163 	[RTE_CRYPTO_ASYM_XFORM_NONE]	= "none",
164 	[RTE_CRYPTO_ASYM_XFORM_RSA]	= "rsa",
165 	[RTE_CRYPTO_ASYM_XFORM_MODEX]	= "modexp",
166 	[RTE_CRYPTO_ASYM_XFORM_MODINV]	= "modinv",
167 	[RTE_CRYPTO_ASYM_XFORM_DH]	= "dh",
168 	[RTE_CRYPTO_ASYM_XFORM_DSA]	= "dsa",
169 	[RTE_CRYPTO_ASYM_XFORM_ECDSA]	= "ecdsa",
170 	[RTE_CRYPTO_ASYM_XFORM_ECPM]	= "ecpm",
171 };
172 
173 /**
174  * Asymmetric crypto operation strings identifiers.
175  */
176 const char *rte_crypto_asym_op_strings[] = {
177 	[RTE_CRYPTO_ASYM_OP_ENCRYPT]	= "encrypt",
178 	[RTE_CRYPTO_ASYM_OP_DECRYPT]	= "decrypt",
179 	[RTE_CRYPTO_ASYM_OP_SIGN]	= "sign",
180 	[RTE_CRYPTO_ASYM_OP_VERIFY]	= "verify",
181 	[RTE_CRYPTO_ASYM_OP_PRIVATE_KEY_GENERATE]	= "priv_key_generate",
182 	[RTE_CRYPTO_ASYM_OP_PUBLIC_KEY_GENERATE] = "pub_key_generate",
183 	[RTE_CRYPTO_ASYM_OP_SHARED_SECRET_COMPUTE] = "sharedsecret_compute",
184 };
185 
186 /**
187  * The private data structure stored in the sym session mempool private data.
188  */
189 struct rte_cryptodev_sym_session_pool_private_data {
190 	uint16_t nb_drivers;
191 	/**< number of elements in sess_data array */
192 	uint16_t user_data_sz;
193 	/**< session user data will be placed after sess_data */
194 };
195 
196 /**
197  * The private data structure stored in the asym session mempool private data.
198  */
199 struct rte_cryptodev_asym_session_pool_private_data {
200 	uint16_t max_priv_session_sz;
201 	/**< Size of private session data used when creating mempool */
202 	uint16_t user_data_sz;
203 	/**< Session user data will be placed after sess_private_data */
204 };
205 
206 int
207 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum,
208 		const char *algo_string)
209 {
210 	unsigned int i;
211 
212 	for (i = 1; i < RTE_DIM(rte_crypto_cipher_algorithm_strings); i++) {
213 		if (strcmp(algo_string, rte_crypto_cipher_algorithm_strings[i]) == 0) {
214 			*algo_enum = (enum rte_crypto_cipher_algorithm) i;
215 			return 0;
216 		}
217 	}
218 
219 	/* Invalid string */
220 	return -1;
221 }
222 
223 int
224 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum,
225 		const char *algo_string)
226 {
227 	unsigned int i;
228 
229 	for (i = 1; i < RTE_DIM(rte_crypto_auth_algorithm_strings); i++) {
230 		if (strcmp(algo_string, rte_crypto_auth_algorithm_strings[i]) == 0) {
231 			*algo_enum = (enum rte_crypto_auth_algorithm) i;
232 			return 0;
233 		}
234 	}
235 
236 	/* Invalid string */
237 	return -1;
238 }
239 
240 int
241 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum,
242 		const char *algo_string)
243 {
244 	unsigned int i;
245 
246 	for (i = 1; i < RTE_DIM(rte_crypto_aead_algorithm_strings); i++) {
247 		if (strcmp(algo_string, rte_crypto_aead_algorithm_strings[i]) == 0) {
248 			*algo_enum = (enum rte_crypto_aead_algorithm) i;
249 			return 0;
250 		}
251 	}
252 
253 	/* Invalid string */
254 	return -1;
255 }
256 
257 int
258 rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum,
259 		const char *xform_string)
260 {
261 	unsigned int i;
262 
263 	for (i = 1; i < RTE_DIM(rte_crypto_asym_xform_strings); i++) {
264 		if (strcmp(xform_string,
265 			rte_crypto_asym_xform_strings[i]) == 0) {
266 			*xform_enum = (enum rte_crypto_asym_xform_type) i;
267 			return 0;
268 		}
269 	}
270 
271 	/* Invalid string */
272 	return -1;
273 }
274 
275 /**
276  * The crypto auth operation strings identifiers.
277  * It could be used in application command line.
278  */
279 const char *
280 rte_crypto_auth_operation_strings[] = {
281 		[RTE_CRYPTO_AUTH_OP_VERIFY]	= "verify",
282 		[RTE_CRYPTO_AUTH_OP_GENERATE]	= "generate"
283 };
284 
285 const struct rte_cryptodev_symmetric_capability *
286 rte_cryptodev_sym_capability_get(uint8_t dev_id,
287 		const struct rte_cryptodev_sym_capability_idx *idx)
288 {
289 	const struct rte_cryptodev_capabilities *capability;
290 	struct rte_cryptodev_info dev_info;
291 	int i = 0;
292 
293 	rte_cryptodev_info_get(dev_id, &dev_info);
294 
295 	while ((capability = &dev_info.capabilities[i++])->op !=
296 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
297 		if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
298 			continue;
299 
300 		if (capability->sym.xform_type != idx->type)
301 			continue;
302 
303 		if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
304 			capability->sym.auth.algo == idx->algo.auth)
305 			return &capability->sym;
306 
307 		if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
308 			capability->sym.cipher.algo == idx->algo.cipher)
309 			return &capability->sym;
310 
311 		if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
312 				capability->sym.aead.algo == idx->algo.aead)
313 			return &capability->sym;
314 	}
315 
316 	return NULL;
317 }
318 
319 static int
320 param_range_check(uint16_t size, const struct rte_crypto_param_range *range)
321 {
322 	unsigned int next_size;
323 
324 	/* Check lower/upper bounds */
325 	if (size < range->min)
326 		return -1;
327 
328 	if (size > range->max)
329 		return -1;
330 
331 	/* If range is actually only one value, size is correct */
332 	if (range->increment == 0)
333 		return 0;
334 
335 	/* Check if value is one of the supported sizes */
336 	for (next_size = range->min; next_size <= range->max;
337 			next_size += range->increment)
338 		if (size == next_size)
339 			return 0;
340 
341 	return -1;
342 }
343 
344 const struct rte_cryptodev_asymmetric_xform_capability *
345 rte_cryptodev_asym_capability_get(uint8_t dev_id,
346 		const struct rte_cryptodev_asym_capability_idx *idx)
347 {
348 	const struct rte_cryptodev_capabilities *capability;
349 	struct rte_cryptodev_info dev_info;
350 	unsigned int i = 0;
351 
352 	memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
353 	rte_cryptodev_info_get(dev_id, &dev_info);
354 
355 	while ((capability = &dev_info.capabilities[i++])->op !=
356 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
357 		if (capability->op != RTE_CRYPTO_OP_TYPE_ASYMMETRIC)
358 			continue;
359 
360 		if (capability->asym.xform_capa.xform_type == idx->type)
361 			return &capability->asym.xform_capa;
362 	}
363 	return NULL;
364 };
365 
366 int
367 rte_cryptodev_sym_capability_check_cipher(
368 		const struct rte_cryptodev_symmetric_capability *capability,
369 		uint16_t key_size, uint16_t iv_size)
370 {
371 	if (param_range_check(key_size, &capability->cipher.key_size) != 0)
372 		return -1;
373 
374 	if (param_range_check(iv_size, &capability->cipher.iv_size) != 0)
375 		return -1;
376 
377 	return 0;
378 }
379 
380 int
381 rte_cryptodev_sym_capability_check_auth(
382 		const struct rte_cryptodev_symmetric_capability *capability,
383 		uint16_t key_size, uint16_t digest_size, uint16_t iv_size)
384 {
385 	if (param_range_check(key_size, &capability->auth.key_size) != 0)
386 		return -1;
387 
388 	if (param_range_check(digest_size, &capability->auth.digest_size) != 0)
389 		return -1;
390 
391 	if (param_range_check(iv_size, &capability->auth.iv_size) != 0)
392 		return -1;
393 
394 	return 0;
395 }
396 
397 int
398 rte_cryptodev_sym_capability_check_aead(
399 		const struct rte_cryptodev_symmetric_capability *capability,
400 		uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
401 		uint16_t iv_size)
402 {
403 	if (param_range_check(key_size, &capability->aead.key_size) != 0)
404 		return -1;
405 
406 	if (param_range_check(digest_size, &capability->aead.digest_size) != 0)
407 		return -1;
408 
409 	if (param_range_check(aad_size, &capability->aead.aad_size) != 0)
410 		return -1;
411 
412 	if (param_range_check(iv_size, &capability->aead.iv_size) != 0)
413 		return -1;
414 
415 	return 0;
416 }
417 int
418 rte_cryptodev_asym_xform_capability_check_optype(
419 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
420 	enum rte_crypto_asym_op_type op_type)
421 {
422 	if (capability->op_types & (1 << op_type))
423 		return 1;
424 
425 	return 0;
426 }
427 
428 int
429 rte_cryptodev_asym_xform_capability_check_modlen(
430 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
431 	uint16_t modlen)
432 {
433 	/* no need to check for limits, if min or max = 0 */
434 	if (capability->modlen.min != 0) {
435 		if (modlen < capability->modlen.min)
436 			return -1;
437 	}
438 
439 	if (capability->modlen.max != 0) {
440 		if (modlen > capability->modlen.max)
441 			return -1;
442 	}
443 
444 	/* in any case, check if given modlen is module increment */
445 	if (capability->modlen.increment != 0) {
446 		if (modlen % (capability->modlen.increment))
447 			return -1;
448 	}
449 
450 	return 0;
451 }
452 
453 /* spinlock for crypto device enq callbacks */
454 static rte_spinlock_t rte_cryptodev_callback_lock = RTE_SPINLOCK_INITIALIZER;
455 
456 static void
457 cryptodev_cb_cleanup(struct rte_cryptodev *dev)
458 {
459 	struct rte_cryptodev_cb_rcu *list;
460 	struct rte_cryptodev_cb *cb, *next;
461 	uint16_t qp_id;
462 
463 	if (dev->enq_cbs == NULL && dev->deq_cbs == NULL)
464 		return;
465 
466 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
467 		list = &dev->enq_cbs[qp_id];
468 		cb = list->next;
469 		while (cb != NULL) {
470 			next = cb->next;
471 			rte_free(cb);
472 			cb = next;
473 		}
474 
475 		rte_free(list->qsbr);
476 	}
477 
478 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
479 		list = &dev->deq_cbs[qp_id];
480 		cb = list->next;
481 		while (cb != NULL) {
482 			next = cb->next;
483 			rte_free(cb);
484 			cb = next;
485 		}
486 
487 		rte_free(list->qsbr);
488 	}
489 
490 	rte_free(dev->enq_cbs);
491 	dev->enq_cbs = NULL;
492 	rte_free(dev->deq_cbs);
493 	dev->deq_cbs = NULL;
494 }
495 
496 static int
497 cryptodev_cb_init(struct rte_cryptodev *dev)
498 {
499 	struct rte_cryptodev_cb_rcu *list;
500 	struct rte_rcu_qsbr *qsbr;
501 	uint16_t qp_id;
502 	size_t size;
503 
504 	/* Max thread set to 1, as one DP thread accessing a queue-pair */
505 	const uint32_t max_threads = 1;
506 
507 	dev->enq_cbs = rte_zmalloc(NULL,
508 				   sizeof(struct rte_cryptodev_cb_rcu) *
509 				   dev->data->nb_queue_pairs, 0);
510 	if (dev->enq_cbs == NULL) {
511 		CDEV_LOG_ERR("Failed to allocate memory for enq callbacks");
512 		return -ENOMEM;
513 	}
514 
515 	dev->deq_cbs = rte_zmalloc(NULL,
516 				   sizeof(struct rte_cryptodev_cb_rcu) *
517 				   dev->data->nb_queue_pairs, 0);
518 	if (dev->deq_cbs == NULL) {
519 		CDEV_LOG_ERR("Failed to allocate memory for deq callbacks");
520 		rte_free(dev->enq_cbs);
521 		return -ENOMEM;
522 	}
523 
524 	/* Create RCU QSBR variable */
525 	size = rte_rcu_qsbr_get_memsize(max_threads);
526 
527 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
528 		list = &dev->enq_cbs[qp_id];
529 		qsbr = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
530 		if (qsbr == NULL) {
531 			CDEV_LOG_ERR("Failed to allocate memory for RCU on "
532 				"queue_pair_id=%d", qp_id);
533 			goto cb_init_err;
534 		}
535 
536 		if (rte_rcu_qsbr_init(qsbr, max_threads)) {
537 			CDEV_LOG_ERR("Failed to initialize for RCU on "
538 				"queue_pair_id=%d", qp_id);
539 			goto cb_init_err;
540 		}
541 
542 		list->qsbr = qsbr;
543 	}
544 
545 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
546 		list = &dev->deq_cbs[qp_id];
547 		qsbr = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
548 		if (qsbr == NULL) {
549 			CDEV_LOG_ERR("Failed to allocate memory for RCU on "
550 				"queue_pair_id=%d", qp_id);
551 			goto cb_init_err;
552 		}
553 
554 		if (rte_rcu_qsbr_init(qsbr, max_threads)) {
555 			CDEV_LOG_ERR("Failed to initialize for RCU on "
556 				"queue_pair_id=%d", qp_id);
557 			goto cb_init_err;
558 		}
559 
560 		list->qsbr = qsbr;
561 	}
562 
563 	return 0;
564 
565 cb_init_err:
566 	cryptodev_cb_cleanup(dev);
567 	return -ENOMEM;
568 }
569 
570 const char *
571 rte_cryptodev_get_feature_name(uint64_t flag)
572 {
573 	switch (flag) {
574 	case RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO:
575 		return "SYMMETRIC_CRYPTO";
576 	case RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO:
577 		return "ASYMMETRIC_CRYPTO";
578 	case RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING:
579 		return "SYM_OPERATION_CHAINING";
580 	case RTE_CRYPTODEV_FF_CPU_SSE:
581 		return "CPU_SSE";
582 	case RTE_CRYPTODEV_FF_CPU_AVX:
583 		return "CPU_AVX";
584 	case RTE_CRYPTODEV_FF_CPU_AVX2:
585 		return "CPU_AVX2";
586 	case RTE_CRYPTODEV_FF_CPU_AVX512:
587 		return "CPU_AVX512";
588 	case RTE_CRYPTODEV_FF_CPU_AESNI:
589 		return "CPU_AESNI";
590 	case RTE_CRYPTODEV_FF_HW_ACCELERATED:
591 		return "HW_ACCELERATED";
592 	case RTE_CRYPTODEV_FF_IN_PLACE_SGL:
593 		return "IN_PLACE_SGL";
594 	case RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT:
595 		return "OOP_SGL_IN_SGL_OUT";
596 	case RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT:
597 		return "OOP_SGL_IN_LB_OUT";
598 	case RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT:
599 		return "OOP_LB_IN_SGL_OUT";
600 	case RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT:
601 		return "OOP_LB_IN_LB_OUT";
602 	case RTE_CRYPTODEV_FF_CPU_NEON:
603 		return "CPU_NEON";
604 	case RTE_CRYPTODEV_FF_CPU_ARM_CE:
605 		return "CPU_ARM_CE";
606 	case RTE_CRYPTODEV_FF_SECURITY:
607 		return "SECURITY_PROTOCOL";
608 	case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP:
609 		return "RSA_PRIV_OP_KEY_EXP";
610 	case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT:
611 		return "RSA_PRIV_OP_KEY_QT";
612 	case RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED:
613 		return "DIGEST_ENCRYPTED";
614 	case RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO:
615 		return "SYM_CPU_CRYPTO";
616 	case RTE_CRYPTODEV_FF_ASYM_SESSIONLESS:
617 		return "ASYM_SESSIONLESS";
618 	case RTE_CRYPTODEV_FF_SYM_SESSIONLESS:
619 		return "SYM_SESSIONLESS";
620 	case RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA:
621 		return "NON_BYTE_ALIGNED_DATA";
622 	case RTE_CRYPTODEV_FF_CIPHER_MULTIPLE_DATA_UNITS:
623 		return "CIPHER_MULTIPLE_DATA_UNITS";
624 	case RTE_CRYPTODEV_FF_CIPHER_WRAPPED_KEY:
625 		return "CIPHER_WRAPPED_KEY";
626 	default:
627 		return NULL;
628 	}
629 }
630 
631 struct rte_cryptodev *
632 rte_cryptodev_pmd_get_dev(uint8_t dev_id)
633 {
634 	return &cryptodev_globals.devs[dev_id];
635 }
636 
637 struct rte_cryptodev *
638 rte_cryptodev_pmd_get_named_dev(const char *name)
639 {
640 	struct rte_cryptodev *dev;
641 	unsigned int i;
642 
643 	if (name == NULL)
644 		return NULL;
645 
646 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
647 		dev = &cryptodev_globals.devs[i];
648 
649 		if ((dev->attached == RTE_CRYPTODEV_ATTACHED) &&
650 				(strcmp(dev->data->name, name) == 0))
651 			return dev;
652 	}
653 
654 	return NULL;
655 }
656 
657 static inline uint8_t
658 rte_cryptodev_is_valid_device_data(uint8_t dev_id)
659 {
660 	if (dev_id >= RTE_CRYPTO_MAX_DEVS ||
661 			rte_crypto_devices[dev_id].data == NULL)
662 		return 0;
663 
664 	return 1;
665 }
666 
667 unsigned int
668 rte_cryptodev_is_valid_dev(uint8_t dev_id)
669 {
670 	struct rte_cryptodev *dev = NULL;
671 
672 	if (!rte_cryptodev_is_valid_device_data(dev_id))
673 		return 0;
674 
675 	dev = rte_cryptodev_pmd_get_dev(dev_id);
676 	if (dev->attached != RTE_CRYPTODEV_ATTACHED)
677 		return 0;
678 	else
679 		return 1;
680 }
681 
682 
683 int
684 rte_cryptodev_get_dev_id(const char *name)
685 {
686 	unsigned i;
687 
688 	if (name == NULL)
689 		return -1;
690 
691 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
692 		if (!rte_cryptodev_is_valid_device_data(i))
693 			continue;
694 		if ((strcmp(cryptodev_globals.devs[i].data->name, name)
695 				== 0) &&
696 				(cryptodev_globals.devs[i].attached ==
697 						RTE_CRYPTODEV_ATTACHED))
698 			return i;
699 	}
700 
701 	return -1;
702 }
703 
704 uint8_t
705 rte_cryptodev_count(void)
706 {
707 	return cryptodev_globals.nb_devs;
708 }
709 
710 uint8_t
711 rte_cryptodev_device_count_by_driver(uint8_t driver_id)
712 {
713 	uint8_t i, dev_count = 0;
714 
715 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++)
716 		if (cryptodev_globals.devs[i].driver_id == driver_id &&
717 			cryptodev_globals.devs[i].attached ==
718 					RTE_CRYPTODEV_ATTACHED)
719 			dev_count++;
720 
721 	return dev_count;
722 }
723 
724 uint8_t
725 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
726 	uint8_t nb_devices)
727 {
728 	uint8_t i, count = 0;
729 	struct rte_cryptodev *devs = cryptodev_globals.devs;
730 
731 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS && count < nb_devices; i++) {
732 		if (!rte_cryptodev_is_valid_device_data(i))
733 			continue;
734 
735 		if (devs[i].attached == RTE_CRYPTODEV_ATTACHED) {
736 			int cmp;
737 
738 			cmp = strncmp(devs[i].device->driver->name,
739 					driver_name,
740 					strlen(driver_name) + 1);
741 
742 			if (cmp == 0)
743 				devices[count++] = devs[i].data->dev_id;
744 		}
745 	}
746 
747 	return count;
748 }
749 
750 void *
751 rte_cryptodev_get_sec_ctx(uint8_t dev_id)
752 {
753 	if (dev_id < RTE_CRYPTO_MAX_DEVS &&
754 			(rte_crypto_devices[dev_id].feature_flags &
755 			RTE_CRYPTODEV_FF_SECURITY))
756 		return rte_crypto_devices[dev_id].security_ctx;
757 
758 	return NULL;
759 }
760 
761 int
762 rte_cryptodev_socket_id(uint8_t dev_id)
763 {
764 	struct rte_cryptodev *dev;
765 
766 	if (!rte_cryptodev_is_valid_dev(dev_id))
767 		return -1;
768 
769 	dev = rte_cryptodev_pmd_get_dev(dev_id);
770 
771 	return dev->data->socket_id;
772 }
773 
774 static inline int
775 rte_cryptodev_data_alloc(uint8_t dev_id, struct rte_cryptodev_data **data,
776 		int socket_id)
777 {
778 	char mz_name[RTE_MEMZONE_NAMESIZE];
779 	const struct rte_memzone *mz;
780 	int n;
781 
782 	/* generate memzone name */
783 	n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
784 	if (n >= (int)sizeof(mz_name))
785 		return -EINVAL;
786 
787 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
788 		mz = rte_memzone_reserve(mz_name,
789 				sizeof(struct rte_cryptodev_data),
790 				socket_id, 0);
791 		CDEV_LOG_DEBUG("PRIMARY:reserved memzone for %s (%p)",
792 				mz_name, mz);
793 	} else {
794 		mz = rte_memzone_lookup(mz_name);
795 		CDEV_LOG_DEBUG("SECONDARY:looked up memzone for %s (%p)",
796 				mz_name, mz);
797 	}
798 
799 	if (mz == NULL)
800 		return -ENOMEM;
801 
802 	*data = mz->addr;
803 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
804 		memset(*data, 0, sizeof(struct rte_cryptodev_data));
805 
806 	return 0;
807 }
808 
809 static inline int
810 rte_cryptodev_data_free(uint8_t dev_id, struct rte_cryptodev_data **data)
811 {
812 	char mz_name[RTE_MEMZONE_NAMESIZE];
813 	const struct rte_memzone *mz;
814 	int n;
815 
816 	/* generate memzone name */
817 	n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
818 	if (n >= (int)sizeof(mz_name))
819 		return -EINVAL;
820 
821 	mz = rte_memzone_lookup(mz_name);
822 	if (mz == NULL)
823 		return -ENOMEM;
824 
825 	RTE_ASSERT(*data == mz->addr);
826 	*data = NULL;
827 
828 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
829 		CDEV_LOG_DEBUG("PRIMARY:free memzone of %s (%p)",
830 				mz_name, mz);
831 		return rte_memzone_free(mz);
832 	} else {
833 		CDEV_LOG_DEBUG("SECONDARY:don't free memzone of %s (%p)",
834 				mz_name, mz);
835 	}
836 
837 	return 0;
838 }
839 
840 static uint8_t
841 rte_cryptodev_find_free_device_index(void)
842 {
843 	uint8_t dev_id;
844 
845 	for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++) {
846 		if (rte_crypto_devices[dev_id].attached ==
847 				RTE_CRYPTODEV_DETACHED)
848 			return dev_id;
849 	}
850 	return RTE_CRYPTO_MAX_DEVS;
851 }
852 
853 struct rte_cryptodev *
854 rte_cryptodev_pmd_allocate(const char *name, int socket_id)
855 {
856 	struct rte_cryptodev *cryptodev;
857 	uint8_t dev_id;
858 
859 	if (rte_cryptodev_pmd_get_named_dev(name) != NULL) {
860 		CDEV_LOG_ERR("Crypto device with name %s already "
861 				"allocated!", name);
862 		return NULL;
863 	}
864 
865 	dev_id = rte_cryptodev_find_free_device_index();
866 	if (dev_id == RTE_CRYPTO_MAX_DEVS) {
867 		CDEV_LOG_ERR("Reached maximum number of crypto devices");
868 		return NULL;
869 	}
870 
871 	cryptodev = rte_cryptodev_pmd_get_dev(dev_id);
872 
873 	if (cryptodev->data == NULL) {
874 		struct rte_cryptodev_data **cryptodev_data =
875 				&cryptodev_globals.data[dev_id];
876 
877 		int retval = rte_cryptodev_data_alloc(dev_id, cryptodev_data,
878 				socket_id);
879 
880 		if (retval < 0 || *cryptodev_data == NULL)
881 			return NULL;
882 
883 		cryptodev->data = *cryptodev_data;
884 
885 		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
886 			strlcpy(cryptodev->data->name, name,
887 				RTE_CRYPTODEV_NAME_MAX_LEN);
888 
889 			cryptodev->data->dev_id = dev_id;
890 			cryptodev->data->socket_id = socket_id;
891 			cryptodev->data->dev_started = 0;
892 			CDEV_LOG_DEBUG("PRIMARY:init data");
893 		}
894 
895 		CDEV_LOG_DEBUG("Data for %s: dev_id %d, socket %d, started %d",
896 				cryptodev->data->name,
897 				cryptodev->data->dev_id,
898 				cryptodev->data->socket_id,
899 				cryptodev->data->dev_started);
900 
901 		/* init user callbacks */
902 		TAILQ_INIT(&(cryptodev->link_intr_cbs));
903 
904 		cryptodev->attached = RTE_CRYPTODEV_ATTACHED;
905 
906 		cryptodev_globals.nb_devs++;
907 	}
908 
909 	return cryptodev;
910 }
911 
912 int
913 rte_cryptodev_pmd_release_device(struct rte_cryptodev *cryptodev)
914 {
915 	int ret;
916 	uint8_t dev_id;
917 
918 	if (cryptodev == NULL)
919 		return -EINVAL;
920 
921 	dev_id = cryptodev->data->dev_id;
922 
923 	cryptodev_fp_ops_reset(rte_crypto_fp_ops + dev_id);
924 
925 	/* Close device only if device operations have been set */
926 	if (cryptodev->dev_ops) {
927 		ret = rte_cryptodev_close(dev_id);
928 		if (ret < 0)
929 			return ret;
930 	}
931 
932 	ret = rte_cryptodev_data_free(dev_id, &cryptodev_globals.data[dev_id]);
933 	if (ret < 0)
934 		return ret;
935 
936 	cryptodev->attached = RTE_CRYPTODEV_DETACHED;
937 	cryptodev_globals.nb_devs--;
938 	return 0;
939 }
940 
941 uint16_t
942 rte_cryptodev_queue_pair_count(uint8_t dev_id)
943 {
944 	struct rte_cryptodev *dev;
945 
946 	if (!rte_cryptodev_is_valid_device_data(dev_id)) {
947 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
948 		return 0;
949 	}
950 
951 	dev = &rte_crypto_devices[dev_id];
952 	return dev->data->nb_queue_pairs;
953 }
954 
955 static int
956 rte_cryptodev_queue_pairs_config(struct rte_cryptodev *dev, uint16_t nb_qpairs,
957 		int socket_id)
958 {
959 	struct rte_cryptodev_info dev_info;
960 	void **qp;
961 	unsigned i;
962 
963 	if ((dev == NULL) || (nb_qpairs < 1)) {
964 		CDEV_LOG_ERR("invalid param: dev %p, nb_queues %u",
965 							dev, nb_qpairs);
966 		return -EINVAL;
967 	}
968 
969 	CDEV_LOG_DEBUG("Setup %d queues pairs on device %u",
970 			nb_qpairs, dev->data->dev_id);
971 
972 	memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
973 
974 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
975 	(*dev->dev_ops->dev_infos_get)(dev, &dev_info);
976 
977 	if (nb_qpairs > (dev_info.max_nb_queue_pairs)) {
978 		CDEV_LOG_ERR("Invalid num queue_pairs (%u) for dev %u",
979 				nb_qpairs, dev->data->dev_id);
980 	    return -EINVAL;
981 	}
982 
983 	if (dev->data->queue_pairs == NULL) { /* first time configuration */
984 		dev->data->queue_pairs = rte_zmalloc_socket(
985 				"cryptodev->queue_pairs",
986 				sizeof(dev->data->queue_pairs[0]) *
987 				dev_info.max_nb_queue_pairs,
988 				RTE_CACHE_LINE_SIZE, socket_id);
989 
990 		if (dev->data->queue_pairs == NULL) {
991 			dev->data->nb_queue_pairs = 0;
992 			CDEV_LOG_ERR("failed to get memory for qp meta data, "
993 							"nb_queues %u",
994 							nb_qpairs);
995 			return -(ENOMEM);
996 		}
997 	} else { /* re-configure */
998 		int ret;
999 		uint16_t old_nb_queues = dev->data->nb_queue_pairs;
1000 
1001 		qp = dev->data->queue_pairs;
1002 
1003 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_release,
1004 				-ENOTSUP);
1005 
1006 		for (i = nb_qpairs; i < old_nb_queues; i++) {
1007 			ret = (*dev->dev_ops->queue_pair_release)(dev, i);
1008 			if (ret < 0)
1009 				return ret;
1010 			qp[i] = NULL;
1011 		}
1012 
1013 	}
1014 	dev->data->nb_queue_pairs = nb_qpairs;
1015 	return 0;
1016 }
1017 
1018 int
1019 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
1020 {
1021 	struct rte_cryptodev *dev;
1022 	int diag;
1023 
1024 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1025 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1026 		return -EINVAL;
1027 	}
1028 
1029 	dev = &rte_crypto_devices[dev_id];
1030 
1031 	if (dev->data->dev_started) {
1032 		CDEV_LOG_ERR(
1033 		    "device %d must be stopped to allow configuration", dev_id);
1034 		return -EBUSY;
1035 	}
1036 
1037 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
1038 
1039 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1040 	cryptodev_cb_cleanup(dev);
1041 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1042 
1043 	/* Setup new number of queue pairs and reconfigure device. */
1044 	diag = rte_cryptodev_queue_pairs_config(dev, config->nb_queue_pairs,
1045 			config->socket_id);
1046 	if (diag != 0) {
1047 		CDEV_LOG_ERR("dev%d rte_crypto_dev_queue_pairs_config = %d",
1048 				dev_id, diag);
1049 		return diag;
1050 	}
1051 
1052 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1053 	diag = cryptodev_cb_init(dev);
1054 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1055 	if (diag) {
1056 		CDEV_LOG_ERR("Callback init failed for dev_id=%d", dev_id);
1057 		return diag;
1058 	}
1059 
1060 	rte_cryptodev_trace_configure(dev_id, config);
1061 	return (*dev->dev_ops->dev_configure)(dev, config);
1062 }
1063 
1064 int
1065 rte_cryptodev_start(uint8_t dev_id)
1066 {
1067 	struct rte_cryptodev *dev;
1068 	int diag;
1069 
1070 	CDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
1071 
1072 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1073 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1074 		return -EINVAL;
1075 	}
1076 
1077 	dev = &rte_crypto_devices[dev_id];
1078 
1079 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1080 
1081 	if (dev->data->dev_started != 0) {
1082 		CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already started",
1083 			dev_id);
1084 		return 0;
1085 	}
1086 
1087 	diag = (*dev->dev_ops->dev_start)(dev);
1088 	/* expose selection of PMD fast-path functions */
1089 	cryptodev_fp_ops_set(rte_crypto_fp_ops + dev_id, dev);
1090 
1091 	rte_cryptodev_trace_start(dev_id, diag);
1092 	if (diag == 0)
1093 		dev->data->dev_started = 1;
1094 	else
1095 		return diag;
1096 
1097 	return 0;
1098 }
1099 
1100 void
1101 rte_cryptodev_stop(uint8_t dev_id)
1102 {
1103 	struct rte_cryptodev *dev;
1104 
1105 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1106 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1107 		return;
1108 	}
1109 
1110 	dev = &rte_crypto_devices[dev_id];
1111 
1112 	RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1113 
1114 	if (dev->data->dev_started == 0) {
1115 		CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already stopped",
1116 			dev_id);
1117 		return;
1118 	}
1119 
1120 	/* point fast-path functions to dummy ones */
1121 	cryptodev_fp_ops_reset(rte_crypto_fp_ops + dev_id);
1122 
1123 	(*dev->dev_ops->dev_stop)(dev);
1124 	rte_cryptodev_trace_stop(dev_id);
1125 	dev->data->dev_started = 0;
1126 }
1127 
1128 int
1129 rte_cryptodev_close(uint8_t dev_id)
1130 {
1131 	struct rte_cryptodev *dev;
1132 	int retval;
1133 
1134 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1135 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1136 		return -1;
1137 	}
1138 
1139 	dev = &rte_crypto_devices[dev_id];
1140 
1141 	/* Device must be stopped before it can be closed */
1142 	if (dev->data->dev_started == 1) {
1143 		CDEV_LOG_ERR("Device %u must be stopped before closing",
1144 				dev_id);
1145 		return -EBUSY;
1146 	}
1147 
1148 	/* We can't close the device if there are outstanding sessions in use */
1149 	if (dev->data->session_pool != NULL) {
1150 		if (!rte_mempool_full(dev->data->session_pool)) {
1151 			CDEV_LOG_ERR("dev_id=%u close failed, session mempool "
1152 					"has sessions still in use, free "
1153 					"all sessions before calling close",
1154 					(unsigned)dev_id);
1155 			return -EBUSY;
1156 		}
1157 	}
1158 
1159 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
1160 	retval = (*dev->dev_ops->dev_close)(dev);
1161 	rte_cryptodev_trace_close(dev_id, retval);
1162 
1163 	if (retval < 0)
1164 		return retval;
1165 
1166 	return 0;
1167 }
1168 
1169 int
1170 rte_cryptodev_get_qp_status(uint8_t dev_id, uint16_t queue_pair_id)
1171 {
1172 	struct rte_cryptodev *dev;
1173 
1174 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1175 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1176 		return -EINVAL;
1177 	}
1178 
1179 	dev = &rte_crypto_devices[dev_id];
1180 	if (queue_pair_id >= dev->data->nb_queue_pairs) {
1181 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
1182 		return -EINVAL;
1183 	}
1184 	void **qps = dev->data->queue_pairs;
1185 
1186 	if (qps[queue_pair_id])	{
1187 		CDEV_LOG_DEBUG("qp %d on dev %d is initialised",
1188 			queue_pair_id, dev_id);
1189 		return 1;
1190 	}
1191 
1192 	CDEV_LOG_DEBUG("qp %d on dev %d is not initialised",
1193 		queue_pair_id, dev_id);
1194 
1195 	return 0;
1196 }
1197 
1198 int
1199 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
1200 		const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
1201 
1202 {
1203 	struct rte_cryptodev *dev;
1204 
1205 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1206 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1207 		return -EINVAL;
1208 	}
1209 
1210 	dev = &rte_crypto_devices[dev_id];
1211 	if (queue_pair_id >= dev->data->nb_queue_pairs) {
1212 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
1213 		return -EINVAL;
1214 	}
1215 
1216 	if (!qp_conf) {
1217 		CDEV_LOG_ERR("qp_conf cannot be NULL\n");
1218 		return -EINVAL;
1219 	}
1220 
1221 	if ((qp_conf->mp_session && !qp_conf->mp_session_private) ||
1222 			(!qp_conf->mp_session && qp_conf->mp_session_private)) {
1223 		CDEV_LOG_ERR("Invalid mempools\n");
1224 		return -EINVAL;
1225 	}
1226 
1227 	if (qp_conf->mp_session) {
1228 		struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1229 		uint32_t obj_size = qp_conf->mp_session->elt_size;
1230 		uint32_t obj_priv_size = qp_conf->mp_session_private->elt_size;
1231 		struct rte_cryptodev_sym_session s = {0};
1232 
1233 		pool_priv = rte_mempool_get_priv(qp_conf->mp_session);
1234 		if (!pool_priv || qp_conf->mp_session->private_data_size <
1235 				sizeof(*pool_priv)) {
1236 			CDEV_LOG_ERR("Invalid mempool\n");
1237 			return -EINVAL;
1238 		}
1239 
1240 		s.nb_drivers = pool_priv->nb_drivers;
1241 		s.user_data_sz = pool_priv->user_data_sz;
1242 
1243 		if ((rte_cryptodev_sym_get_existing_header_session_size(&s) >
1244 			obj_size) || (s.nb_drivers <= dev->driver_id) ||
1245 			rte_cryptodev_sym_get_private_session_size(dev_id) >
1246 				obj_priv_size) {
1247 			CDEV_LOG_ERR("Invalid mempool\n");
1248 			return -EINVAL;
1249 		}
1250 	}
1251 
1252 	if (dev->data->dev_started) {
1253 		CDEV_LOG_ERR(
1254 		    "device %d must be stopped to allow configuration", dev_id);
1255 		return -EBUSY;
1256 	}
1257 
1258 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_setup, -ENOTSUP);
1259 
1260 	rte_cryptodev_trace_queue_pair_setup(dev_id, queue_pair_id, qp_conf);
1261 	return (*dev->dev_ops->queue_pair_setup)(dev, queue_pair_id, qp_conf,
1262 			socket_id);
1263 }
1264 
1265 struct rte_cryptodev_cb *
1266 rte_cryptodev_add_enq_callback(uint8_t dev_id,
1267 			       uint16_t qp_id,
1268 			       rte_cryptodev_callback_fn cb_fn,
1269 			       void *cb_arg)
1270 {
1271 	struct rte_cryptodev *dev;
1272 	struct rte_cryptodev_cb_rcu *list;
1273 	struct rte_cryptodev_cb *cb, *tail;
1274 
1275 	if (!cb_fn) {
1276 		CDEV_LOG_ERR("Callback is NULL on dev_id=%d", dev_id);
1277 		rte_errno = EINVAL;
1278 		return NULL;
1279 	}
1280 
1281 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1282 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1283 		rte_errno = ENODEV;
1284 		return NULL;
1285 	}
1286 
1287 	dev = &rte_crypto_devices[dev_id];
1288 	if (qp_id >= dev->data->nb_queue_pairs) {
1289 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1290 		rte_errno = ENODEV;
1291 		return NULL;
1292 	}
1293 
1294 	cb = rte_zmalloc(NULL, sizeof(*cb), 0);
1295 	if (cb == NULL) {
1296 		CDEV_LOG_ERR("Failed to allocate memory for callback on "
1297 			     "dev=%d, queue_pair_id=%d", dev_id, qp_id);
1298 		rte_errno = ENOMEM;
1299 		return NULL;
1300 	}
1301 
1302 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1303 
1304 	cb->fn = cb_fn;
1305 	cb->arg = cb_arg;
1306 
1307 	/* Add the callbacks in fifo order. */
1308 	list = &dev->enq_cbs[qp_id];
1309 	tail = list->next;
1310 
1311 	if (tail) {
1312 		while (tail->next)
1313 			tail = tail->next;
1314 		/* Stores to cb->fn and cb->param should complete before
1315 		 * cb is visible to data plane.
1316 		 */
1317 		__atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
1318 	} else {
1319 		/* Stores to cb->fn and cb->param should complete before
1320 		 * cb is visible to data plane.
1321 		 */
1322 		__atomic_store_n(&list->next, cb, __ATOMIC_RELEASE);
1323 	}
1324 
1325 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1326 
1327 	return cb;
1328 }
1329 
1330 int
1331 rte_cryptodev_remove_enq_callback(uint8_t dev_id,
1332 				  uint16_t qp_id,
1333 				  struct rte_cryptodev_cb *cb)
1334 {
1335 	struct rte_cryptodev *dev;
1336 	struct rte_cryptodev_cb **prev_cb, *curr_cb;
1337 	struct rte_cryptodev_cb_rcu *list;
1338 	int ret;
1339 
1340 	ret = -EINVAL;
1341 
1342 	if (!cb) {
1343 		CDEV_LOG_ERR("Callback is NULL");
1344 		return -EINVAL;
1345 	}
1346 
1347 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1348 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1349 		return -ENODEV;
1350 	}
1351 
1352 	dev = &rte_crypto_devices[dev_id];
1353 	if (qp_id >= dev->data->nb_queue_pairs) {
1354 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1355 		return -ENODEV;
1356 	}
1357 
1358 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1359 	if (dev->enq_cbs == NULL) {
1360 		CDEV_LOG_ERR("Callback not initialized");
1361 		goto cb_err;
1362 	}
1363 
1364 	list = &dev->enq_cbs[qp_id];
1365 	if (list == NULL) {
1366 		CDEV_LOG_ERR("Callback list is NULL");
1367 		goto cb_err;
1368 	}
1369 
1370 	if (list->qsbr == NULL) {
1371 		CDEV_LOG_ERR("Rcu qsbr is NULL");
1372 		goto cb_err;
1373 	}
1374 
1375 	prev_cb = &list->next;
1376 	for (; *prev_cb != NULL; prev_cb = &curr_cb->next) {
1377 		curr_cb = *prev_cb;
1378 		if (curr_cb == cb) {
1379 			/* Remove the user cb from the callback list. */
1380 			__atomic_store_n(prev_cb, curr_cb->next,
1381 				__ATOMIC_RELAXED);
1382 			ret = 0;
1383 			break;
1384 		}
1385 	}
1386 
1387 	if (!ret) {
1388 		/* Call sync with invalid thread id as this is part of
1389 		 * control plane API
1390 		 */
1391 		rte_rcu_qsbr_synchronize(list->qsbr, RTE_QSBR_THRID_INVALID);
1392 		rte_free(cb);
1393 	}
1394 
1395 cb_err:
1396 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1397 	return ret;
1398 }
1399 
1400 struct rte_cryptodev_cb *
1401 rte_cryptodev_add_deq_callback(uint8_t dev_id,
1402 			       uint16_t qp_id,
1403 			       rte_cryptodev_callback_fn cb_fn,
1404 			       void *cb_arg)
1405 {
1406 	struct rte_cryptodev *dev;
1407 	struct rte_cryptodev_cb_rcu *list;
1408 	struct rte_cryptodev_cb *cb, *tail;
1409 
1410 	if (!cb_fn) {
1411 		CDEV_LOG_ERR("Callback is NULL on dev_id=%d", dev_id);
1412 		rte_errno = EINVAL;
1413 		return NULL;
1414 	}
1415 
1416 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1417 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1418 		rte_errno = ENODEV;
1419 		return NULL;
1420 	}
1421 
1422 	dev = &rte_crypto_devices[dev_id];
1423 	if (qp_id >= dev->data->nb_queue_pairs) {
1424 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1425 		rte_errno = ENODEV;
1426 		return NULL;
1427 	}
1428 
1429 	cb = rte_zmalloc(NULL, sizeof(*cb), 0);
1430 	if (cb == NULL) {
1431 		CDEV_LOG_ERR("Failed to allocate memory for callback on "
1432 			     "dev=%d, queue_pair_id=%d", dev_id, qp_id);
1433 		rte_errno = ENOMEM;
1434 		return NULL;
1435 	}
1436 
1437 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1438 
1439 	cb->fn = cb_fn;
1440 	cb->arg = cb_arg;
1441 
1442 	/* Add the callbacks in fifo order. */
1443 	list = &dev->deq_cbs[qp_id];
1444 	tail = list->next;
1445 
1446 	if (tail) {
1447 		while (tail->next)
1448 			tail = tail->next;
1449 		/* Stores to cb->fn and cb->param should complete before
1450 		 * cb is visible to data plane.
1451 		 */
1452 		__atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
1453 	} else {
1454 		/* Stores to cb->fn and cb->param should complete before
1455 		 * cb is visible to data plane.
1456 		 */
1457 		__atomic_store_n(&list->next, cb, __ATOMIC_RELEASE);
1458 	}
1459 
1460 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1461 
1462 	return cb;
1463 }
1464 
1465 int
1466 rte_cryptodev_remove_deq_callback(uint8_t dev_id,
1467 				  uint16_t qp_id,
1468 				  struct rte_cryptodev_cb *cb)
1469 {
1470 	struct rte_cryptodev *dev;
1471 	struct rte_cryptodev_cb **prev_cb, *curr_cb;
1472 	struct rte_cryptodev_cb_rcu *list;
1473 	int ret;
1474 
1475 	ret = -EINVAL;
1476 
1477 	if (!cb) {
1478 		CDEV_LOG_ERR("Callback is NULL");
1479 		return -EINVAL;
1480 	}
1481 
1482 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1483 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1484 		return -ENODEV;
1485 	}
1486 
1487 	dev = &rte_crypto_devices[dev_id];
1488 	if (qp_id >= dev->data->nb_queue_pairs) {
1489 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1490 		return -ENODEV;
1491 	}
1492 
1493 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1494 	if (dev->enq_cbs == NULL) {
1495 		CDEV_LOG_ERR("Callback not initialized");
1496 		goto cb_err;
1497 	}
1498 
1499 	list = &dev->deq_cbs[qp_id];
1500 	if (list == NULL) {
1501 		CDEV_LOG_ERR("Callback list is NULL");
1502 		goto cb_err;
1503 	}
1504 
1505 	if (list->qsbr == NULL) {
1506 		CDEV_LOG_ERR("Rcu qsbr is NULL");
1507 		goto cb_err;
1508 	}
1509 
1510 	prev_cb = &list->next;
1511 	for (; *prev_cb != NULL; prev_cb = &curr_cb->next) {
1512 		curr_cb = *prev_cb;
1513 		if (curr_cb == cb) {
1514 			/* Remove the user cb from the callback list. */
1515 			__atomic_store_n(prev_cb, curr_cb->next,
1516 				__ATOMIC_RELAXED);
1517 			ret = 0;
1518 			break;
1519 		}
1520 	}
1521 
1522 	if (!ret) {
1523 		/* Call sync with invalid thread id as this is part of
1524 		 * control plane API
1525 		 */
1526 		rte_rcu_qsbr_synchronize(list->qsbr, RTE_QSBR_THRID_INVALID);
1527 		rte_free(cb);
1528 	}
1529 
1530 cb_err:
1531 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1532 	return ret;
1533 }
1534 
1535 int
1536 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats)
1537 {
1538 	struct rte_cryptodev *dev;
1539 
1540 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1541 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1542 		return -ENODEV;
1543 	}
1544 
1545 	if (stats == NULL) {
1546 		CDEV_LOG_ERR("Invalid stats ptr");
1547 		return -EINVAL;
1548 	}
1549 
1550 	dev = &rte_crypto_devices[dev_id];
1551 	memset(stats, 0, sizeof(*stats));
1552 
1553 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1554 	(*dev->dev_ops->stats_get)(dev, stats);
1555 	return 0;
1556 }
1557 
1558 void
1559 rte_cryptodev_stats_reset(uint8_t dev_id)
1560 {
1561 	struct rte_cryptodev *dev;
1562 
1563 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1564 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1565 		return;
1566 	}
1567 
1568 	dev = &rte_crypto_devices[dev_id];
1569 
1570 	RTE_FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
1571 	(*dev->dev_ops->stats_reset)(dev);
1572 }
1573 
1574 void
1575 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
1576 {
1577 	struct rte_cryptodev *dev;
1578 
1579 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1580 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1581 		return;
1582 	}
1583 
1584 	dev = &rte_crypto_devices[dev_id];
1585 
1586 	memset(dev_info, 0, sizeof(struct rte_cryptodev_info));
1587 
1588 	RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1589 	(*dev->dev_ops->dev_infos_get)(dev, dev_info);
1590 
1591 	dev_info->driver_name = dev->device->driver->name;
1592 	dev_info->device = dev->device;
1593 }
1594 
1595 int
1596 rte_cryptodev_callback_register(uint8_t dev_id,
1597 			enum rte_cryptodev_event_type event,
1598 			rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1599 {
1600 	struct rte_cryptodev *dev;
1601 	struct rte_cryptodev_callback *user_cb;
1602 
1603 	if (!cb_fn)
1604 		return -EINVAL;
1605 
1606 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1607 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1608 		return -EINVAL;
1609 	}
1610 
1611 	dev = &rte_crypto_devices[dev_id];
1612 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
1613 
1614 	TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
1615 		if (user_cb->cb_fn == cb_fn &&
1616 			user_cb->cb_arg == cb_arg &&
1617 			user_cb->event == event) {
1618 			break;
1619 		}
1620 	}
1621 
1622 	/* create a new callback. */
1623 	if (user_cb == NULL) {
1624 		user_cb = rte_zmalloc("INTR_USER_CALLBACK",
1625 				sizeof(struct rte_cryptodev_callback), 0);
1626 		if (user_cb != NULL) {
1627 			user_cb->cb_fn = cb_fn;
1628 			user_cb->cb_arg = cb_arg;
1629 			user_cb->event = event;
1630 			TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
1631 		}
1632 	}
1633 
1634 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1635 	return (user_cb == NULL) ? -ENOMEM : 0;
1636 }
1637 
1638 int
1639 rte_cryptodev_callback_unregister(uint8_t dev_id,
1640 			enum rte_cryptodev_event_type event,
1641 			rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1642 {
1643 	int ret;
1644 	struct rte_cryptodev *dev;
1645 	struct rte_cryptodev_callback *cb, *next;
1646 
1647 	if (!cb_fn)
1648 		return -EINVAL;
1649 
1650 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1651 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1652 		return -EINVAL;
1653 	}
1654 
1655 	dev = &rte_crypto_devices[dev_id];
1656 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
1657 
1658 	ret = 0;
1659 	for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
1660 
1661 		next = TAILQ_NEXT(cb, next);
1662 
1663 		if (cb->cb_fn != cb_fn || cb->event != event ||
1664 				(cb->cb_arg != (void *)-1 &&
1665 				cb->cb_arg != cb_arg))
1666 			continue;
1667 
1668 		/*
1669 		 * if this callback is not executing right now,
1670 		 * then remove it.
1671 		 */
1672 		if (cb->active == 0) {
1673 			TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
1674 			rte_free(cb);
1675 		} else {
1676 			ret = -EAGAIN;
1677 		}
1678 	}
1679 
1680 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1681 	return ret;
1682 }
1683 
1684 void
1685 rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev,
1686 	enum rte_cryptodev_event_type event)
1687 {
1688 	struct rte_cryptodev_callback *cb_lst;
1689 	struct rte_cryptodev_callback dev_cb;
1690 
1691 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
1692 	TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
1693 		if (cb_lst->cb_fn == NULL || cb_lst->event != event)
1694 			continue;
1695 		dev_cb = *cb_lst;
1696 		cb_lst->active = 1;
1697 		rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1698 		dev_cb.cb_fn(dev->data->dev_id, dev_cb.event,
1699 						dev_cb.cb_arg);
1700 		rte_spinlock_lock(&rte_cryptodev_cb_lock);
1701 		cb_lst->active = 0;
1702 	}
1703 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1704 }
1705 
1706 int
1707 rte_cryptodev_sym_session_init(uint8_t dev_id,
1708 		struct rte_cryptodev_sym_session *sess,
1709 		struct rte_crypto_sym_xform *xforms,
1710 		struct rte_mempool *mp)
1711 {
1712 	struct rte_cryptodev *dev;
1713 	uint32_t sess_priv_sz = rte_cryptodev_sym_get_private_session_size(
1714 			dev_id);
1715 	uint8_t index;
1716 	int ret;
1717 
1718 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1719 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1720 		return -EINVAL;
1721 	}
1722 
1723 	dev = rte_cryptodev_pmd_get_dev(dev_id);
1724 
1725 	if (sess == NULL || xforms == NULL || dev == NULL || mp == NULL)
1726 		return -EINVAL;
1727 
1728 	if (mp->elt_size < sess_priv_sz)
1729 		return -EINVAL;
1730 
1731 	index = dev->driver_id;
1732 	if (index >= sess->nb_drivers)
1733 		return -EINVAL;
1734 
1735 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_configure, -ENOTSUP);
1736 
1737 	if (sess->sess_data[index].refcnt == 0) {
1738 		ret = dev->dev_ops->sym_session_configure(dev, xforms,
1739 							sess, mp);
1740 		if (ret < 0) {
1741 			CDEV_LOG_ERR(
1742 				"dev_id %d failed to configure session details",
1743 				dev_id);
1744 			return ret;
1745 		}
1746 	}
1747 
1748 	rte_cryptodev_trace_sym_session_init(dev_id, sess, xforms, mp);
1749 	sess->sess_data[index].refcnt++;
1750 	return 0;
1751 }
1752 
1753 struct rte_mempool *
1754 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
1755 	uint32_t elt_size, uint32_t cache_size, uint16_t user_data_size,
1756 	int socket_id)
1757 {
1758 	struct rte_mempool *mp;
1759 	struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1760 	uint32_t obj_sz;
1761 
1762 	obj_sz = rte_cryptodev_sym_get_header_session_size() + user_data_size;
1763 	if (obj_sz > elt_size)
1764 		CDEV_LOG_INFO("elt_size %u is expanded to %u\n", elt_size,
1765 				obj_sz);
1766 	else
1767 		obj_sz = elt_size;
1768 
1769 	mp = rte_mempool_create(name, nb_elts, obj_sz, cache_size,
1770 			(uint32_t)(sizeof(*pool_priv)),
1771 			NULL, NULL, NULL, NULL,
1772 			socket_id, 0);
1773 	if (mp == NULL) {
1774 		CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d\n",
1775 			__func__, name, rte_errno);
1776 		return NULL;
1777 	}
1778 
1779 	pool_priv = rte_mempool_get_priv(mp);
1780 	if (!pool_priv) {
1781 		CDEV_LOG_ERR("%s(name=%s) failed to get private data\n",
1782 			__func__, name);
1783 		rte_mempool_free(mp);
1784 		return NULL;
1785 	}
1786 
1787 	pool_priv->nb_drivers = nb_drivers;
1788 	pool_priv->user_data_sz = user_data_size;
1789 
1790 	rte_cryptodev_trace_sym_session_pool_create(name, nb_elts,
1791 		elt_size, cache_size, user_data_size, mp);
1792 	return mp;
1793 }
1794 
1795 struct rte_mempool *
1796 rte_cryptodev_asym_session_pool_create(const char *name, uint32_t nb_elts,
1797 	uint32_t cache_size, uint16_t user_data_size, int socket_id)
1798 {
1799 	struct rte_mempool *mp;
1800 	struct rte_cryptodev_asym_session_pool_private_data *pool_priv;
1801 	uint32_t obj_sz, obj_sz_aligned;
1802 	uint8_t dev_id;
1803 	unsigned int priv_sz, max_priv_sz = 0;
1804 
1805 	for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++)
1806 		if (rte_cryptodev_is_valid_dev(dev_id)) {
1807 			priv_sz = rte_cryptodev_asym_get_private_session_size(dev_id);
1808 			if (priv_sz > max_priv_sz)
1809 				max_priv_sz = priv_sz;
1810 		}
1811 	if (max_priv_sz == 0) {
1812 		CDEV_LOG_INFO("Could not set max private session size\n");
1813 		return NULL;
1814 	}
1815 
1816 	obj_sz = rte_cryptodev_asym_get_header_session_size() + max_priv_sz +
1817 			user_data_size;
1818 	obj_sz_aligned =  RTE_ALIGN_CEIL(obj_sz, RTE_CACHE_LINE_SIZE);
1819 
1820 	mp = rte_mempool_create(name, nb_elts, obj_sz_aligned, cache_size,
1821 			(uint32_t)(sizeof(*pool_priv)),
1822 			NULL, NULL, NULL, NULL,
1823 			socket_id, 0);
1824 	if (mp == NULL) {
1825 		CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d\n",
1826 			__func__, name, rte_errno);
1827 		return NULL;
1828 	}
1829 
1830 	pool_priv = rte_mempool_get_priv(mp);
1831 	if (!pool_priv) {
1832 		CDEV_LOG_ERR("%s(name=%s) failed to get private data\n",
1833 			__func__, name);
1834 		rte_mempool_free(mp);
1835 		return NULL;
1836 	}
1837 	pool_priv->max_priv_session_sz = max_priv_sz;
1838 	pool_priv->user_data_sz = user_data_size;
1839 
1840 	rte_cryptodev_trace_asym_session_pool_create(name, nb_elts,
1841 		user_data_size, cache_size, mp);
1842 	return mp;
1843 }
1844 
1845 static unsigned int
1846 rte_cryptodev_sym_session_data_size(struct rte_cryptodev_sym_session *sess)
1847 {
1848 	return (sizeof(sess->sess_data[0]) * sess->nb_drivers) +
1849 			sess->user_data_sz;
1850 }
1851 
1852 static uint8_t
1853 rte_cryptodev_sym_is_valid_session_pool(struct rte_mempool *mp)
1854 {
1855 	struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1856 
1857 	if (!mp)
1858 		return 0;
1859 
1860 	pool_priv = rte_mempool_get_priv(mp);
1861 
1862 	if (!pool_priv || mp->private_data_size < sizeof(*pool_priv) ||
1863 			pool_priv->nb_drivers != nb_drivers ||
1864 			mp->elt_size <
1865 				rte_cryptodev_sym_get_header_session_size()
1866 				+ pool_priv->user_data_sz)
1867 		return 0;
1868 
1869 	return 1;
1870 }
1871 
1872 struct rte_cryptodev_sym_session *
1873 rte_cryptodev_sym_session_create(struct rte_mempool *mp)
1874 {
1875 	struct rte_cryptodev_sym_session *sess;
1876 	struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1877 
1878 	if (!rte_cryptodev_sym_is_valid_session_pool(mp)) {
1879 		CDEV_LOG_ERR("Invalid mempool\n");
1880 		return NULL;
1881 	}
1882 
1883 	pool_priv = rte_mempool_get_priv(mp);
1884 
1885 	/* Allocate a session structure from the session pool */
1886 	if (rte_mempool_get(mp, (void **)&sess)) {
1887 		CDEV_LOG_ERR("couldn't get object from session mempool");
1888 		return NULL;
1889 	}
1890 
1891 	sess->nb_drivers = pool_priv->nb_drivers;
1892 	sess->user_data_sz = pool_priv->user_data_sz;
1893 	sess->opaque_data = 0;
1894 
1895 	/* Clear device session pointer.
1896 	 * Include the flag indicating presence of user data
1897 	 */
1898 	memset(sess->sess_data, 0,
1899 			rte_cryptodev_sym_session_data_size(sess));
1900 
1901 	rte_cryptodev_trace_sym_session_create(mp, sess);
1902 	return sess;
1903 }
1904 
1905 int
1906 rte_cryptodev_asym_session_create(uint8_t dev_id,
1907 		struct rte_crypto_asym_xform *xforms, struct rte_mempool *mp,
1908 		void **session)
1909 {
1910 	struct rte_cryptodev_asym_session *sess;
1911 	uint32_t session_priv_data_sz;
1912 	struct rte_cryptodev_asym_session_pool_private_data *pool_priv;
1913 	unsigned int session_header_size =
1914 			rte_cryptodev_asym_get_header_session_size();
1915 	struct rte_cryptodev *dev;
1916 	int ret;
1917 
1918 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1919 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1920 		return -EINVAL;
1921 	}
1922 
1923 	dev = rte_cryptodev_pmd_get_dev(dev_id);
1924 
1925 	if (dev == NULL)
1926 		return -EINVAL;
1927 
1928 	if (!mp) {
1929 		CDEV_LOG_ERR("invalid mempool\n");
1930 		return -EINVAL;
1931 	}
1932 
1933 	session_priv_data_sz = rte_cryptodev_asym_get_private_session_size(
1934 			dev_id);
1935 	pool_priv = rte_mempool_get_priv(mp);
1936 
1937 	if (pool_priv->max_priv_session_sz < session_priv_data_sz) {
1938 		CDEV_LOG_DEBUG(
1939 			"The private session data size used when creating the mempool is smaller than this device's private session data.");
1940 		return -EINVAL;
1941 	}
1942 
1943 	/* Verify if provided mempool can hold elements big enough. */
1944 	if (mp->elt_size < session_header_size + session_priv_data_sz) {
1945 		CDEV_LOG_ERR(
1946 			"mempool elements too small to hold session objects");
1947 		return -EINVAL;
1948 	}
1949 
1950 	/* Allocate a session structure from the session pool */
1951 	if (rte_mempool_get(mp, session)) {
1952 		CDEV_LOG_ERR("couldn't get object from session mempool");
1953 		return -ENOMEM;
1954 	}
1955 
1956 	sess = *session;
1957 	sess->driver_id = dev->driver_id;
1958 	sess->user_data_sz = pool_priv->user_data_sz;
1959 	sess->max_priv_data_sz = pool_priv->max_priv_session_sz;
1960 
1961 	/* Clear device session pointer.*/
1962 	memset(sess->sess_private_data, 0, session_priv_data_sz + sess->user_data_sz);
1963 
1964 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_configure, -ENOTSUP);
1965 
1966 	if (sess->sess_private_data[0] == 0) {
1967 		ret = dev->dev_ops->asym_session_configure(dev, xforms, sess);
1968 		if (ret < 0) {
1969 			CDEV_LOG_ERR(
1970 				"dev_id %d failed to configure session details",
1971 				dev_id);
1972 			return ret;
1973 		}
1974 	}
1975 
1976 	rte_cryptodev_trace_asym_session_create(dev_id, xforms, mp, sess);
1977 	return 0;
1978 }
1979 
1980 int
1981 rte_cryptodev_sym_session_clear(uint8_t dev_id,
1982 		struct rte_cryptodev_sym_session *sess)
1983 {
1984 	struct rte_cryptodev *dev;
1985 	uint8_t driver_id;
1986 
1987 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1988 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1989 		return -EINVAL;
1990 	}
1991 
1992 	dev = rte_cryptodev_pmd_get_dev(dev_id);
1993 
1994 	if (dev == NULL || sess == NULL)
1995 		return -EINVAL;
1996 
1997 	driver_id = dev->driver_id;
1998 	if (sess->sess_data[driver_id].refcnt == 0)
1999 		return 0;
2000 	if (--sess->sess_data[driver_id].refcnt != 0)
2001 		return -EBUSY;
2002 
2003 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_clear, -ENOTSUP);
2004 
2005 	dev->dev_ops->sym_session_clear(dev, sess);
2006 
2007 	rte_cryptodev_trace_sym_session_clear(dev_id, sess);
2008 	return 0;
2009 }
2010 
2011 int
2012 rte_cryptodev_sym_session_free(struct rte_cryptodev_sym_session *sess)
2013 {
2014 	uint8_t i;
2015 	struct rte_mempool *sess_mp;
2016 
2017 	if (sess == NULL)
2018 		return -EINVAL;
2019 
2020 	/* Check that all device private data has been freed */
2021 	for (i = 0; i < sess->nb_drivers; i++) {
2022 		if (sess->sess_data[i].refcnt != 0)
2023 			return -EBUSY;
2024 	}
2025 
2026 	/* Return session to mempool */
2027 	sess_mp = rte_mempool_from_obj(sess);
2028 	rte_mempool_put(sess_mp, sess);
2029 
2030 	rte_cryptodev_trace_sym_session_free(sess);
2031 	return 0;
2032 }
2033 
2034 int
2035 rte_cryptodev_asym_session_free(uint8_t dev_id, void *sess)
2036 {
2037 	struct rte_mempool *sess_mp;
2038 	struct rte_cryptodev *dev;
2039 
2040 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
2041 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
2042 		return -EINVAL;
2043 	}
2044 
2045 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2046 
2047 	if (dev == NULL || sess == NULL)
2048 		return -EINVAL;
2049 
2050 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_clear, -ENOTSUP);
2051 
2052 	dev->dev_ops->asym_session_clear(dev, sess);
2053 
2054 	rte_free(((struct rte_cryptodev_asym_session *)sess)->event_mdata);
2055 
2056 	/* Return session to mempool */
2057 	sess_mp = rte_mempool_from_obj(sess);
2058 	rte_mempool_put(sess_mp, sess);
2059 
2060 	rte_cryptodev_trace_asym_session_free(dev_id, sess);
2061 	return 0;
2062 }
2063 
2064 unsigned int
2065 rte_cryptodev_sym_get_header_session_size(void)
2066 {
2067 	/*
2068 	 * Header contains pointers to the private data of all registered
2069 	 * drivers and all necessary information to ensure safely clear
2070 	 * or free al session.
2071 	 */
2072 	struct rte_cryptodev_sym_session s = {0};
2073 
2074 	s.nb_drivers = nb_drivers;
2075 
2076 	return (unsigned int)(sizeof(s) +
2077 			rte_cryptodev_sym_session_data_size(&s));
2078 }
2079 
2080 unsigned int
2081 rte_cryptodev_sym_get_existing_header_session_size(
2082 		struct rte_cryptodev_sym_session *sess)
2083 {
2084 	if (!sess)
2085 		return 0;
2086 	else
2087 		return (unsigned int)(sizeof(*sess) +
2088 				rte_cryptodev_sym_session_data_size(sess));
2089 }
2090 
2091 unsigned int
2092 rte_cryptodev_asym_get_header_session_size(void)
2093 {
2094 	return sizeof(struct rte_cryptodev_asym_session);
2095 }
2096 
2097 unsigned int
2098 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
2099 {
2100 	struct rte_cryptodev *dev;
2101 	unsigned int priv_sess_size;
2102 
2103 	if (!rte_cryptodev_is_valid_dev(dev_id))
2104 		return 0;
2105 
2106 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2107 
2108 	if (*dev->dev_ops->sym_session_get_size == NULL)
2109 		return 0;
2110 
2111 	priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev);
2112 
2113 	return priv_sess_size;
2114 }
2115 
2116 unsigned int
2117 rte_cryptodev_asym_get_private_session_size(uint8_t dev_id)
2118 {
2119 	struct rte_cryptodev *dev;
2120 	unsigned int priv_sess_size;
2121 
2122 	if (!rte_cryptodev_is_valid_dev(dev_id))
2123 		return 0;
2124 
2125 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2126 
2127 	if (*dev->dev_ops->asym_session_get_size == NULL)
2128 		return 0;
2129 
2130 	priv_sess_size = (*dev->dev_ops->asym_session_get_size)(dev);
2131 
2132 	return priv_sess_size;
2133 }
2134 
2135 int
2136 rte_cryptodev_sym_session_set_user_data(
2137 					struct rte_cryptodev_sym_session *sess,
2138 					void *data,
2139 					uint16_t size)
2140 {
2141 	if (sess == NULL)
2142 		return -EINVAL;
2143 
2144 	if (sess->user_data_sz < size)
2145 		return -ENOMEM;
2146 
2147 	rte_memcpy(sess->sess_data + sess->nb_drivers, data, size);
2148 	return 0;
2149 }
2150 
2151 void *
2152 rte_cryptodev_sym_session_get_user_data(
2153 					struct rte_cryptodev_sym_session *sess)
2154 {
2155 	if (sess == NULL || sess->user_data_sz == 0)
2156 		return NULL;
2157 
2158 	return (void *)(sess->sess_data + sess->nb_drivers);
2159 }
2160 
2161 int
2162 rte_cryptodev_asym_session_set_user_data(void *session, void *data, uint16_t size)
2163 {
2164 	struct rte_cryptodev_asym_session *sess = session;
2165 	if (sess == NULL)
2166 		return -EINVAL;
2167 
2168 	if (sess->user_data_sz < size)
2169 		return -ENOMEM;
2170 
2171 	rte_memcpy(sess->sess_private_data +
2172 			sess->max_priv_data_sz,
2173 			data, size);
2174 	return 0;
2175 }
2176 
2177 void *
2178 rte_cryptodev_asym_session_get_user_data(void *session)
2179 {
2180 	struct rte_cryptodev_asym_session *sess = session;
2181 	if (sess == NULL || sess->user_data_sz == 0)
2182 		return NULL;
2183 
2184 	return (void *)(sess->sess_private_data +
2185 			sess->max_priv_data_sz);
2186 }
2187 
2188 static inline void
2189 sym_crypto_fill_status(struct rte_crypto_sym_vec *vec, int32_t errnum)
2190 {
2191 	uint32_t i;
2192 	for (i = 0; i < vec->num; i++)
2193 		vec->status[i] = errnum;
2194 }
2195 
2196 uint32_t
2197 rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id,
2198 	struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs ofs,
2199 	struct rte_crypto_sym_vec *vec)
2200 {
2201 	struct rte_cryptodev *dev;
2202 
2203 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
2204 		sym_crypto_fill_status(vec, EINVAL);
2205 		return 0;
2206 	}
2207 
2208 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2209 
2210 	if (*dev->dev_ops->sym_cpu_process == NULL ||
2211 		!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO)) {
2212 		sym_crypto_fill_status(vec, ENOTSUP);
2213 		return 0;
2214 	}
2215 
2216 	return dev->dev_ops->sym_cpu_process(dev, sess, ofs, vec);
2217 }
2218 
2219 int
2220 rte_cryptodev_get_raw_dp_ctx_size(uint8_t dev_id)
2221 {
2222 	struct rte_cryptodev *dev;
2223 	int32_t size = sizeof(struct rte_crypto_raw_dp_ctx);
2224 	int32_t priv_size;
2225 
2226 	if (!rte_cryptodev_is_valid_dev(dev_id))
2227 		return -EINVAL;
2228 
2229 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2230 
2231 	if (*dev->dev_ops->sym_get_raw_dp_ctx_size == NULL ||
2232 		!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP)) {
2233 		return -ENOTSUP;
2234 	}
2235 
2236 	priv_size = (*dev->dev_ops->sym_get_raw_dp_ctx_size)(dev);
2237 	if (priv_size < 0)
2238 		return -ENOTSUP;
2239 
2240 	return RTE_ALIGN_CEIL((size + priv_size), 8);
2241 }
2242 
2243 int
2244 rte_cryptodev_configure_raw_dp_ctx(uint8_t dev_id, uint16_t qp_id,
2245 	struct rte_crypto_raw_dp_ctx *ctx,
2246 	enum rte_crypto_op_sess_type sess_type,
2247 	union rte_cryptodev_session_ctx session_ctx,
2248 	uint8_t is_update)
2249 {
2250 	struct rte_cryptodev *dev;
2251 
2252 	if (!rte_cryptodev_get_qp_status(dev_id, qp_id))
2253 		return -EINVAL;
2254 
2255 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2256 	if (!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP)
2257 			|| dev->dev_ops->sym_configure_raw_dp_ctx == NULL)
2258 		return -ENOTSUP;
2259 
2260 	return (*dev->dev_ops->sym_configure_raw_dp_ctx)(dev, qp_id, ctx,
2261 			sess_type, session_ctx, is_update);
2262 }
2263 
2264 int
2265 rte_cryptodev_session_event_mdata_set(uint8_t dev_id, void *sess,
2266 	enum rte_crypto_op_type op_type,
2267 	enum rte_crypto_op_sess_type sess_type,
2268 	void *ev_mdata,
2269 	uint16_t size)
2270 {
2271 	struct rte_cryptodev *dev;
2272 
2273 	if (sess == NULL || ev_mdata == NULL)
2274 		return -EINVAL;
2275 
2276 	if (!rte_cryptodev_is_valid_dev(dev_id))
2277 		goto skip_pmd_op;
2278 
2279 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2280 	if (dev->dev_ops->session_ev_mdata_set == NULL)
2281 		goto skip_pmd_op;
2282 
2283 	return (*dev->dev_ops->session_ev_mdata_set)(dev, sess, op_type,
2284 			sess_type, ev_mdata);
2285 
2286 skip_pmd_op:
2287 	if (op_type == RTE_CRYPTO_OP_TYPE_SYMMETRIC)
2288 		return rte_cryptodev_sym_session_set_user_data(sess, ev_mdata,
2289 				size);
2290 	else if (op_type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
2291 		struct rte_cryptodev_asym_session *s = sess;
2292 
2293 		if (s->event_mdata == NULL) {
2294 			s->event_mdata = rte_malloc(NULL, size, 0);
2295 			if (s->event_mdata == NULL)
2296 				return -ENOMEM;
2297 		}
2298 		rte_memcpy(s->event_mdata, ev_mdata, size);
2299 
2300 		return 0;
2301 	} else
2302 		return -ENOTSUP;
2303 }
2304 
2305 uint32_t
2306 rte_cryptodev_raw_enqueue_burst(struct rte_crypto_raw_dp_ctx *ctx,
2307 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
2308 	void **user_data, int *enqueue_status)
2309 {
2310 	return (*ctx->enqueue_burst)(ctx->qp_data, ctx->drv_ctx_data, vec,
2311 			ofs, user_data, enqueue_status);
2312 }
2313 
2314 int
2315 rte_cryptodev_raw_enqueue_done(struct rte_crypto_raw_dp_ctx *ctx,
2316 		uint32_t n)
2317 {
2318 	return (*ctx->enqueue_done)(ctx->qp_data, ctx->drv_ctx_data, n);
2319 }
2320 
2321 uint32_t
2322 rte_cryptodev_raw_dequeue_burst(struct rte_crypto_raw_dp_ctx *ctx,
2323 	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
2324 	uint32_t max_nb_to_dequeue,
2325 	rte_cryptodev_raw_post_dequeue_t post_dequeue,
2326 	void **out_user_data, uint8_t is_user_data_array,
2327 	uint32_t *n_success_jobs, int *status)
2328 {
2329 	return (*ctx->dequeue_burst)(ctx->qp_data, ctx->drv_ctx_data,
2330 		get_dequeue_count, max_nb_to_dequeue, post_dequeue,
2331 		out_user_data, is_user_data_array, n_success_jobs, status);
2332 }
2333 
2334 int
2335 rte_cryptodev_raw_dequeue_done(struct rte_crypto_raw_dp_ctx *ctx,
2336 		uint32_t n)
2337 {
2338 	return (*ctx->dequeue_done)(ctx->qp_data, ctx->drv_ctx_data, n);
2339 }
2340 
2341 /** Initialise rte_crypto_op mempool element */
2342 static void
2343 rte_crypto_op_init(struct rte_mempool *mempool,
2344 		void *opaque_arg,
2345 		void *_op_data,
2346 		__rte_unused unsigned i)
2347 {
2348 	struct rte_crypto_op *op = _op_data;
2349 	enum rte_crypto_op_type type = *(enum rte_crypto_op_type *)opaque_arg;
2350 
2351 	memset(_op_data, 0, mempool->elt_size);
2352 
2353 	__rte_crypto_op_reset(op, type);
2354 
2355 	op->phys_addr = rte_mem_virt2iova(_op_data);
2356 	op->mempool = mempool;
2357 }
2358 
2359 
2360 struct rte_mempool *
2361 rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
2362 		unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
2363 		int socket_id)
2364 {
2365 	struct rte_crypto_op_pool_private *priv;
2366 
2367 	unsigned elt_size = sizeof(struct rte_crypto_op) +
2368 			priv_size;
2369 
2370 	if (type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
2371 		elt_size += sizeof(struct rte_crypto_sym_op);
2372 	} else if (type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
2373 		elt_size += sizeof(struct rte_crypto_asym_op);
2374 	} else if (type == RTE_CRYPTO_OP_TYPE_UNDEFINED) {
2375 		elt_size += RTE_MAX(sizeof(struct rte_crypto_sym_op),
2376 		                    sizeof(struct rte_crypto_asym_op));
2377 	} else {
2378 		CDEV_LOG_ERR("Invalid op_type\n");
2379 		return NULL;
2380 	}
2381 
2382 	/* lookup mempool in case already allocated */
2383 	struct rte_mempool *mp = rte_mempool_lookup(name);
2384 
2385 	if (mp != NULL) {
2386 		priv = (struct rte_crypto_op_pool_private *)
2387 				rte_mempool_get_priv(mp);
2388 
2389 		if (mp->elt_size != elt_size ||
2390 				mp->cache_size < cache_size ||
2391 				mp->size < nb_elts ||
2392 				priv->priv_size <  priv_size) {
2393 			mp = NULL;
2394 			CDEV_LOG_ERR("Mempool %s already exists but with "
2395 					"incompatible parameters", name);
2396 			return NULL;
2397 		}
2398 		return mp;
2399 	}
2400 
2401 	mp = rte_mempool_create(
2402 			name,
2403 			nb_elts,
2404 			elt_size,
2405 			cache_size,
2406 			sizeof(struct rte_crypto_op_pool_private),
2407 			NULL,
2408 			NULL,
2409 			rte_crypto_op_init,
2410 			&type,
2411 			socket_id,
2412 			0);
2413 
2414 	if (mp == NULL) {
2415 		CDEV_LOG_ERR("Failed to create mempool %s", name);
2416 		return NULL;
2417 	}
2418 
2419 	priv = (struct rte_crypto_op_pool_private *)
2420 			rte_mempool_get_priv(mp);
2421 
2422 	priv->priv_size = priv_size;
2423 	priv->type = type;
2424 
2425 	return mp;
2426 }
2427 
2428 int
2429 rte_cryptodev_pmd_create_dev_name(char *name, const char *dev_name_prefix)
2430 {
2431 	struct rte_cryptodev *dev = NULL;
2432 	uint32_t i = 0;
2433 
2434 	if (name == NULL)
2435 		return -EINVAL;
2436 
2437 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
2438 		int ret = snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
2439 				"%s_%u", dev_name_prefix, i);
2440 
2441 		if (ret < 0)
2442 			return ret;
2443 
2444 		dev = rte_cryptodev_pmd_get_named_dev(name);
2445 		if (!dev)
2446 			return 0;
2447 	}
2448 
2449 	return -1;
2450 }
2451 
2452 TAILQ_HEAD(cryptodev_driver_list, cryptodev_driver);
2453 
2454 static struct cryptodev_driver_list cryptodev_driver_list =
2455 	TAILQ_HEAD_INITIALIZER(cryptodev_driver_list);
2456 
2457 int
2458 rte_cryptodev_driver_id_get(const char *name)
2459 {
2460 	struct cryptodev_driver *driver;
2461 	const char *driver_name;
2462 
2463 	if (name == NULL) {
2464 		RTE_LOG(DEBUG, CRYPTODEV, "name pointer NULL");
2465 		return -1;
2466 	}
2467 
2468 	TAILQ_FOREACH(driver, &cryptodev_driver_list, next) {
2469 		driver_name = driver->driver->name;
2470 		if (strncmp(driver_name, name, strlen(driver_name) + 1) == 0)
2471 			return driver->id;
2472 	}
2473 	return -1;
2474 }
2475 
2476 const char *
2477 rte_cryptodev_name_get(uint8_t dev_id)
2478 {
2479 	struct rte_cryptodev *dev;
2480 
2481 	if (!rte_cryptodev_is_valid_device_data(dev_id)) {
2482 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
2483 		return NULL;
2484 	}
2485 
2486 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2487 	if (dev == NULL)
2488 		return NULL;
2489 
2490 	return dev->data->name;
2491 }
2492 
2493 const char *
2494 rte_cryptodev_driver_name_get(uint8_t driver_id)
2495 {
2496 	struct cryptodev_driver *driver;
2497 
2498 	TAILQ_FOREACH(driver, &cryptodev_driver_list, next)
2499 		if (driver->id == driver_id)
2500 			return driver->driver->name;
2501 	return NULL;
2502 }
2503 
2504 uint8_t
2505 rte_cryptodev_allocate_driver(struct cryptodev_driver *crypto_drv,
2506 		const struct rte_driver *drv)
2507 {
2508 	crypto_drv->driver = drv;
2509 	crypto_drv->id = nb_drivers;
2510 
2511 	TAILQ_INSERT_TAIL(&cryptodev_driver_list, crypto_drv, next);
2512 
2513 	return nb_drivers++;
2514 }
2515 
2516 RTE_INIT(cryptodev_init_fp_ops)
2517 {
2518 	uint32_t i;
2519 
2520 	for (i = 0; i != RTE_DIM(rte_crypto_fp_ops); i++)
2521 		cryptodev_fp_ops_reset(rte_crypto_fp_ops + i);
2522 }
2523 
2524 static int
2525 cryptodev_handle_dev_list(const char *cmd __rte_unused,
2526 		const char *params __rte_unused,
2527 		struct rte_tel_data *d)
2528 {
2529 	int dev_id;
2530 
2531 	if (rte_cryptodev_count() < 1)
2532 		return -EINVAL;
2533 
2534 	rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
2535 	for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++)
2536 		if (rte_cryptodev_is_valid_dev(dev_id))
2537 			rte_tel_data_add_array_int(d, dev_id);
2538 
2539 	return 0;
2540 }
2541 
2542 static int
2543 cryptodev_handle_dev_info(const char *cmd __rte_unused,
2544 		const char *params, struct rte_tel_data *d)
2545 {
2546 	struct rte_cryptodev_info cryptodev_info;
2547 	int dev_id;
2548 	char *end_param;
2549 
2550 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
2551 		return -EINVAL;
2552 
2553 	dev_id = strtoul(params, &end_param, 0);
2554 	if (*end_param != '\0')
2555 		CDEV_LOG_ERR("Extra parameters passed to command, ignoring");
2556 	if (!rte_cryptodev_is_valid_dev(dev_id))
2557 		return -EINVAL;
2558 
2559 	rte_cryptodev_info_get(dev_id, &cryptodev_info);
2560 
2561 	rte_tel_data_start_dict(d);
2562 	rte_tel_data_add_dict_string(d, "device_name",
2563 		cryptodev_info.device->name);
2564 	rte_tel_data_add_dict_int(d, "max_nb_queue_pairs",
2565 		cryptodev_info.max_nb_queue_pairs);
2566 
2567 	return 0;
2568 }
2569 
2570 #define ADD_DICT_STAT(s) rte_tel_data_add_dict_u64(d, #s, cryptodev_stats.s)
2571 
2572 static int
2573 cryptodev_handle_dev_stats(const char *cmd __rte_unused,
2574 		const char *params,
2575 		struct rte_tel_data *d)
2576 {
2577 	struct rte_cryptodev_stats cryptodev_stats;
2578 	int dev_id, ret;
2579 	char *end_param;
2580 
2581 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
2582 		return -EINVAL;
2583 
2584 	dev_id = strtoul(params, &end_param, 0);
2585 	if (*end_param != '\0')
2586 		CDEV_LOG_ERR("Extra parameters passed to command, ignoring");
2587 	if (!rte_cryptodev_is_valid_dev(dev_id))
2588 		return -EINVAL;
2589 
2590 	ret = rte_cryptodev_stats_get(dev_id, &cryptodev_stats);
2591 	if (ret < 0)
2592 		return ret;
2593 
2594 	rte_tel_data_start_dict(d);
2595 	ADD_DICT_STAT(enqueued_count);
2596 	ADD_DICT_STAT(dequeued_count);
2597 	ADD_DICT_STAT(enqueue_err_count);
2598 	ADD_DICT_STAT(dequeue_err_count);
2599 
2600 	return 0;
2601 }
2602 
2603 #define CRYPTO_CAPS_SZ                                             \
2604 	(RTE_ALIGN_CEIL(sizeof(struct rte_cryptodev_capabilities), \
2605 					sizeof(uint64_t)) /        \
2606 	 sizeof(uint64_t))
2607 
2608 static int
2609 crypto_caps_array(struct rte_tel_data *d,
2610 		  const struct rte_cryptodev_capabilities *capabilities)
2611 {
2612 	const struct rte_cryptodev_capabilities *dev_caps;
2613 	uint64_t caps_val[CRYPTO_CAPS_SZ];
2614 	unsigned int i = 0, j;
2615 
2616 	rte_tel_data_start_array(d, RTE_TEL_U64_VAL);
2617 
2618 	while ((dev_caps = &capabilities[i++])->op !=
2619 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
2620 		memset(&caps_val, 0, CRYPTO_CAPS_SZ * sizeof(caps_val[0]));
2621 		rte_memcpy(caps_val, dev_caps, sizeof(capabilities[0]));
2622 		for (j = 0; j < CRYPTO_CAPS_SZ; j++)
2623 			rte_tel_data_add_array_u64(d, caps_val[j]);
2624 	}
2625 
2626 	return i;
2627 }
2628 
2629 static int
2630 cryptodev_handle_dev_caps(const char *cmd __rte_unused, const char *params,
2631 			  struct rte_tel_data *d)
2632 {
2633 	struct rte_cryptodev_info dev_info;
2634 	struct rte_tel_data *crypto_caps;
2635 	int crypto_caps_n;
2636 	char *end_param;
2637 	int dev_id;
2638 
2639 	if (!params || strlen(params) == 0 || !isdigit(*params))
2640 		return -EINVAL;
2641 
2642 	dev_id = strtoul(params, &end_param, 0);
2643 	if (*end_param != '\0')
2644 		CDEV_LOG_ERR("Extra parameters passed to command, ignoring");
2645 	if (!rte_cryptodev_is_valid_dev(dev_id))
2646 		return -EINVAL;
2647 
2648 	rte_tel_data_start_dict(d);
2649 	crypto_caps = rte_tel_data_alloc();
2650 	if (!crypto_caps)
2651 		return -ENOMEM;
2652 
2653 	rte_cryptodev_info_get(dev_id, &dev_info);
2654 	crypto_caps_n = crypto_caps_array(crypto_caps, dev_info.capabilities);
2655 	rte_tel_data_add_dict_container(d, "crypto_caps", crypto_caps, 0);
2656 	rte_tel_data_add_dict_int(d, "crypto_caps_n", crypto_caps_n);
2657 
2658 	return 0;
2659 }
2660 
2661 RTE_INIT(cryptodev_init_telemetry)
2662 {
2663 	rte_telemetry_register_cmd("/cryptodev/info", cryptodev_handle_dev_info,
2664 			"Returns information for a cryptodev. Parameters: int dev_id");
2665 	rte_telemetry_register_cmd("/cryptodev/list",
2666 			cryptodev_handle_dev_list,
2667 			"Returns list of available crypto devices by IDs. No parameters.");
2668 	rte_telemetry_register_cmd("/cryptodev/stats",
2669 			cryptodev_handle_dev_stats,
2670 			"Returns the stats for a cryptodev. Parameters: int dev_id");
2671 	rte_telemetry_register_cmd("/cryptodev/caps",
2672 			cryptodev_handle_dev_caps,
2673 			"Returns the capabilities for a cryptodev. Parameters: int dev_id");
2674 }
2675