xref: /dpdk/lib/cryptodev/rte_cryptodev.c (revision fd390896f4a3dd27ebdf551673960bece8aff966)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020 Intel Corporation
3  */
4 
5 #include <sys/types.h>
6 #include <sys/queue.h>
7 #include <ctype.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <stdarg.h>
12 #include <errno.h>
13 #include <stdint.h>
14 #include <inttypes.h>
15 
16 #include <rte_byteorder.h>
17 #include <rte_log.h>
18 #include <rte_debug.h>
19 #include <rte_dev.h>
20 #include <rte_interrupts.h>
21 #include <rte_memory.h>
22 #include <rte_memcpy.h>
23 #include <rte_memzone.h>
24 #include <rte_launch.h>
25 #include <rte_tailq.h>
26 #include <rte_eal.h>
27 #include <rte_per_lcore.h>
28 #include <rte_lcore.h>
29 #include <rte_atomic.h>
30 #include <rte_branch_prediction.h>
31 #include <rte_common.h>
32 #include <rte_mempool.h>
33 #include <rte_malloc.h>
34 #include <rte_mbuf.h>
35 #include <rte_errno.h>
36 #include <rte_spinlock.h>
37 #include <rte_string_fns.h>
38 
39 #include "rte_crypto.h"
40 #include "rte_cryptodev.h"
41 #include "cryptodev_pmd.h"
42 #include "rte_cryptodev_trace.h"
43 
44 static uint8_t nb_drivers;
45 
46 static struct rte_cryptodev rte_crypto_devices[RTE_CRYPTO_MAX_DEVS];
47 
48 struct rte_cryptodev *rte_cryptodevs = rte_crypto_devices;
49 
50 static struct rte_cryptodev_global cryptodev_globals = {
51 		.devs			= rte_crypto_devices,
52 		.data			= { NULL },
53 		.nb_devs		= 0
54 };
55 
56 /* Public fastpath APIs. */
57 struct rte_crypto_fp_ops rte_crypto_fp_ops[RTE_CRYPTO_MAX_DEVS];
58 
59 /* spinlock for crypto device callbacks */
60 static rte_spinlock_t rte_cryptodev_cb_lock = RTE_SPINLOCK_INITIALIZER;
61 
62 /**
63  * The user application callback description.
64  *
65  * It contains callback address to be registered by user application,
66  * the pointer to the parameters for callback, and the event type.
67  */
68 struct rte_cryptodev_callback {
69 	TAILQ_ENTRY(rte_cryptodev_callback) next; /**< Callbacks list */
70 	rte_cryptodev_cb_fn cb_fn;		/**< Callback address */
71 	void *cb_arg;				/**< Parameter for callback */
72 	enum rte_cryptodev_event_type event;	/**< Interrupt event type */
73 	uint32_t active;			/**< Callback is executing */
74 };
75 
76 /**
77  * The crypto cipher algorithm strings identifiers.
78  * It could be used in application command line.
79  */
80 const char *
81 rte_crypto_cipher_algorithm_strings[] = {
82 	[RTE_CRYPTO_CIPHER_3DES_CBC]	= "3des-cbc",
83 	[RTE_CRYPTO_CIPHER_3DES_ECB]	= "3des-ecb",
84 	[RTE_CRYPTO_CIPHER_3DES_CTR]	= "3des-ctr",
85 
86 	[RTE_CRYPTO_CIPHER_AES_CBC]	= "aes-cbc",
87 	[RTE_CRYPTO_CIPHER_AES_CTR]	= "aes-ctr",
88 	[RTE_CRYPTO_CIPHER_AES_DOCSISBPI]	= "aes-docsisbpi",
89 	[RTE_CRYPTO_CIPHER_AES_ECB]	= "aes-ecb",
90 	[RTE_CRYPTO_CIPHER_AES_F8]	= "aes-f8",
91 	[RTE_CRYPTO_CIPHER_AES_XTS]	= "aes-xts",
92 
93 	[RTE_CRYPTO_CIPHER_ARC4]	= "arc4",
94 
95 	[RTE_CRYPTO_CIPHER_DES_CBC]     = "des-cbc",
96 	[RTE_CRYPTO_CIPHER_DES_DOCSISBPI]	= "des-docsisbpi",
97 
98 	[RTE_CRYPTO_CIPHER_NULL]	= "null",
99 
100 	[RTE_CRYPTO_CIPHER_KASUMI_F8]	= "kasumi-f8",
101 	[RTE_CRYPTO_CIPHER_SNOW3G_UEA2]	= "snow3g-uea2",
102 	[RTE_CRYPTO_CIPHER_ZUC_EEA3]	= "zuc-eea3"
103 };
104 
105 /**
106  * The crypto cipher operation strings identifiers.
107  * It could be used in application command line.
108  */
109 const char *
110 rte_crypto_cipher_operation_strings[] = {
111 		[RTE_CRYPTO_CIPHER_OP_ENCRYPT]	= "encrypt",
112 		[RTE_CRYPTO_CIPHER_OP_DECRYPT]	= "decrypt"
113 };
114 
115 /**
116  * The crypto auth algorithm strings identifiers.
117  * It could be used in application command line.
118  */
119 const char *
120 rte_crypto_auth_algorithm_strings[] = {
121 	[RTE_CRYPTO_AUTH_AES_CBC_MAC]	= "aes-cbc-mac",
122 	[RTE_CRYPTO_AUTH_AES_CMAC]	= "aes-cmac",
123 	[RTE_CRYPTO_AUTH_AES_GMAC]	= "aes-gmac",
124 	[RTE_CRYPTO_AUTH_AES_XCBC_MAC]	= "aes-xcbc-mac",
125 
126 	[RTE_CRYPTO_AUTH_MD5]		= "md5",
127 	[RTE_CRYPTO_AUTH_MD5_HMAC]	= "md5-hmac",
128 
129 	[RTE_CRYPTO_AUTH_NULL]		= "null",
130 
131 	[RTE_CRYPTO_AUTH_SHA1]		= "sha1",
132 	[RTE_CRYPTO_AUTH_SHA1_HMAC]	= "sha1-hmac",
133 
134 	[RTE_CRYPTO_AUTH_SHA224]	= "sha2-224",
135 	[RTE_CRYPTO_AUTH_SHA224_HMAC]	= "sha2-224-hmac",
136 	[RTE_CRYPTO_AUTH_SHA256]	= "sha2-256",
137 	[RTE_CRYPTO_AUTH_SHA256_HMAC]	= "sha2-256-hmac",
138 	[RTE_CRYPTO_AUTH_SHA384]	= "sha2-384",
139 	[RTE_CRYPTO_AUTH_SHA384_HMAC]	= "sha2-384-hmac",
140 	[RTE_CRYPTO_AUTH_SHA512]	= "sha2-512",
141 	[RTE_CRYPTO_AUTH_SHA512_HMAC]	= "sha2-512-hmac",
142 
143 	[RTE_CRYPTO_AUTH_KASUMI_F9]	= "kasumi-f9",
144 	[RTE_CRYPTO_AUTH_SNOW3G_UIA2]	= "snow3g-uia2",
145 	[RTE_CRYPTO_AUTH_ZUC_EIA3]	= "zuc-eia3"
146 };
147 
148 /**
149  * The crypto AEAD algorithm strings identifiers.
150  * It could be used in application command line.
151  */
152 const char *
153 rte_crypto_aead_algorithm_strings[] = {
154 	[RTE_CRYPTO_AEAD_AES_CCM]	= "aes-ccm",
155 	[RTE_CRYPTO_AEAD_AES_GCM]	= "aes-gcm",
156 	[RTE_CRYPTO_AEAD_CHACHA20_POLY1305] = "chacha20-poly1305"
157 };
158 
159 /**
160  * The crypto AEAD operation strings identifiers.
161  * It could be used in application command line.
162  */
163 const char *
164 rte_crypto_aead_operation_strings[] = {
165 	[RTE_CRYPTO_AEAD_OP_ENCRYPT]	= "encrypt",
166 	[RTE_CRYPTO_AEAD_OP_DECRYPT]	= "decrypt"
167 };
168 
169 /**
170  * Asymmetric crypto transform operation strings identifiers.
171  */
172 const char *rte_crypto_asym_xform_strings[] = {
173 	[RTE_CRYPTO_ASYM_XFORM_NONE]	= "none",
174 	[RTE_CRYPTO_ASYM_XFORM_RSA]	= "rsa",
175 	[RTE_CRYPTO_ASYM_XFORM_MODEX]	= "modexp",
176 	[RTE_CRYPTO_ASYM_XFORM_MODINV]	= "modinv",
177 	[RTE_CRYPTO_ASYM_XFORM_DH]	= "dh",
178 	[RTE_CRYPTO_ASYM_XFORM_DSA]	= "dsa",
179 	[RTE_CRYPTO_ASYM_XFORM_ECDSA]	= "ecdsa",
180 	[RTE_CRYPTO_ASYM_XFORM_ECPM]	= "ecpm",
181 };
182 
183 /**
184  * Asymmetric crypto operation strings identifiers.
185  */
186 const char *rte_crypto_asym_op_strings[] = {
187 	[RTE_CRYPTO_ASYM_OP_ENCRYPT]	= "encrypt",
188 	[RTE_CRYPTO_ASYM_OP_DECRYPT]	= "decrypt",
189 	[RTE_CRYPTO_ASYM_OP_SIGN]	= "sign",
190 	[RTE_CRYPTO_ASYM_OP_VERIFY]	= "verify",
191 	[RTE_CRYPTO_ASYM_OP_PRIVATE_KEY_GENERATE]	= "priv_key_generate",
192 	[RTE_CRYPTO_ASYM_OP_PUBLIC_KEY_GENERATE] = "pub_key_generate",
193 	[RTE_CRYPTO_ASYM_OP_SHARED_SECRET_COMPUTE] = "sharedsecret_compute",
194 };
195 
196 /**
197  * The private data structure stored in the session mempool private data.
198  */
199 struct rte_cryptodev_sym_session_pool_private_data {
200 	uint16_t nb_drivers;
201 	/**< number of elements in sess_data array */
202 	uint16_t user_data_sz;
203 	/**< session user data will be placed after sess_data */
204 };
205 
206 int
207 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum,
208 		const char *algo_string)
209 {
210 	unsigned int i;
211 
212 	for (i = 1; i < RTE_DIM(rte_crypto_cipher_algorithm_strings); i++) {
213 		if (strcmp(algo_string, rte_crypto_cipher_algorithm_strings[i]) == 0) {
214 			*algo_enum = (enum rte_crypto_cipher_algorithm) i;
215 			return 0;
216 		}
217 	}
218 
219 	/* Invalid string */
220 	return -1;
221 }
222 
223 int
224 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum,
225 		const char *algo_string)
226 {
227 	unsigned int i;
228 
229 	for (i = 1; i < RTE_DIM(rte_crypto_auth_algorithm_strings); i++) {
230 		if (strcmp(algo_string, rte_crypto_auth_algorithm_strings[i]) == 0) {
231 			*algo_enum = (enum rte_crypto_auth_algorithm) i;
232 			return 0;
233 		}
234 	}
235 
236 	/* Invalid string */
237 	return -1;
238 }
239 
240 int
241 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum,
242 		const char *algo_string)
243 {
244 	unsigned int i;
245 
246 	for (i = 1; i < RTE_DIM(rte_crypto_aead_algorithm_strings); i++) {
247 		if (strcmp(algo_string, rte_crypto_aead_algorithm_strings[i]) == 0) {
248 			*algo_enum = (enum rte_crypto_aead_algorithm) i;
249 			return 0;
250 		}
251 	}
252 
253 	/* Invalid string */
254 	return -1;
255 }
256 
257 int
258 rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum,
259 		const char *xform_string)
260 {
261 	unsigned int i;
262 
263 	for (i = 1; i < RTE_DIM(rte_crypto_asym_xform_strings); i++) {
264 		if (strcmp(xform_string,
265 			rte_crypto_asym_xform_strings[i]) == 0) {
266 			*xform_enum = (enum rte_crypto_asym_xform_type) i;
267 			return 0;
268 		}
269 	}
270 
271 	/* Invalid string */
272 	return -1;
273 }
274 
275 /**
276  * The crypto auth operation strings identifiers.
277  * It could be used in application command line.
278  */
279 const char *
280 rte_crypto_auth_operation_strings[] = {
281 		[RTE_CRYPTO_AUTH_OP_VERIFY]	= "verify",
282 		[RTE_CRYPTO_AUTH_OP_GENERATE]	= "generate"
283 };
284 
285 const struct rte_cryptodev_symmetric_capability *
286 rte_cryptodev_sym_capability_get(uint8_t dev_id,
287 		const struct rte_cryptodev_sym_capability_idx *idx)
288 {
289 	const struct rte_cryptodev_capabilities *capability;
290 	struct rte_cryptodev_info dev_info;
291 	int i = 0;
292 
293 	rte_cryptodev_info_get(dev_id, &dev_info);
294 
295 	while ((capability = &dev_info.capabilities[i++])->op !=
296 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
297 		if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
298 			continue;
299 
300 		if (capability->sym.xform_type != idx->type)
301 			continue;
302 
303 		if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
304 			capability->sym.auth.algo == idx->algo.auth)
305 			return &capability->sym;
306 
307 		if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
308 			capability->sym.cipher.algo == idx->algo.cipher)
309 			return &capability->sym;
310 
311 		if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
312 				capability->sym.aead.algo == idx->algo.aead)
313 			return &capability->sym;
314 	}
315 
316 	return NULL;
317 }
318 
319 static int
320 param_range_check(uint16_t size, const struct rte_crypto_param_range *range)
321 {
322 	unsigned int next_size;
323 
324 	/* Check lower/upper bounds */
325 	if (size < range->min)
326 		return -1;
327 
328 	if (size > range->max)
329 		return -1;
330 
331 	/* If range is actually only one value, size is correct */
332 	if (range->increment == 0)
333 		return 0;
334 
335 	/* Check if value is one of the supported sizes */
336 	for (next_size = range->min; next_size <= range->max;
337 			next_size += range->increment)
338 		if (size == next_size)
339 			return 0;
340 
341 	return -1;
342 }
343 
344 const struct rte_cryptodev_asymmetric_xform_capability *
345 rte_cryptodev_asym_capability_get(uint8_t dev_id,
346 		const struct rte_cryptodev_asym_capability_idx *idx)
347 {
348 	const struct rte_cryptodev_capabilities *capability;
349 	struct rte_cryptodev_info dev_info;
350 	unsigned int i = 0;
351 
352 	memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
353 	rte_cryptodev_info_get(dev_id, &dev_info);
354 
355 	while ((capability = &dev_info.capabilities[i++])->op !=
356 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
357 		if (capability->op != RTE_CRYPTO_OP_TYPE_ASYMMETRIC)
358 			continue;
359 
360 		if (capability->asym.xform_capa.xform_type == idx->type)
361 			return &capability->asym.xform_capa;
362 	}
363 	return NULL;
364 };
365 
366 int
367 rte_cryptodev_sym_capability_check_cipher(
368 		const struct rte_cryptodev_symmetric_capability *capability,
369 		uint16_t key_size, uint16_t iv_size)
370 {
371 	if (param_range_check(key_size, &capability->cipher.key_size) != 0)
372 		return -1;
373 
374 	if (param_range_check(iv_size, &capability->cipher.iv_size) != 0)
375 		return -1;
376 
377 	return 0;
378 }
379 
380 int
381 rte_cryptodev_sym_capability_check_auth(
382 		const struct rte_cryptodev_symmetric_capability *capability,
383 		uint16_t key_size, uint16_t digest_size, uint16_t iv_size)
384 {
385 	if (param_range_check(key_size, &capability->auth.key_size) != 0)
386 		return -1;
387 
388 	if (param_range_check(digest_size, &capability->auth.digest_size) != 0)
389 		return -1;
390 
391 	if (param_range_check(iv_size, &capability->auth.iv_size) != 0)
392 		return -1;
393 
394 	return 0;
395 }
396 
397 int
398 rte_cryptodev_sym_capability_check_aead(
399 		const struct rte_cryptodev_symmetric_capability *capability,
400 		uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
401 		uint16_t iv_size)
402 {
403 	if (param_range_check(key_size, &capability->aead.key_size) != 0)
404 		return -1;
405 
406 	if (param_range_check(digest_size, &capability->aead.digest_size) != 0)
407 		return -1;
408 
409 	if (param_range_check(aad_size, &capability->aead.aad_size) != 0)
410 		return -1;
411 
412 	if (param_range_check(iv_size, &capability->aead.iv_size) != 0)
413 		return -1;
414 
415 	return 0;
416 }
417 int
418 rte_cryptodev_asym_xform_capability_check_optype(
419 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
420 	enum rte_crypto_asym_op_type op_type)
421 {
422 	if (capability->op_types & (1 << op_type))
423 		return 1;
424 
425 	return 0;
426 }
427 
428 int
429 rte_cryptodev_asym_xform_capability_check_modlen(
430 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
431 	uint16_t modlen)
432 {
433 	/* no need to check for limits, if min or max = 0 */
434 	if (capability->modlen.min != 0) {
435 		if (modlen < capability->modlen.min)
436 			return -1;
437 	}
438 
439 	if (capability->modlen.max != 0) {
440 		if (modlen > capability->modlen.max)
441 			return -1;
442 	}
443 
444 	/* in any case, check if given modlen is module increment */
445 	if (capability->modlen.increment != 0) {
446 		if (modlen % (capability->modlen.increment))
447 			return -1;
448 	}
449 
450 	return 0;
451 }
452 
453 /* spinlock for crypto device enq callbacks */
454 static rte_spinlock_t rte_cryptodev_callback_lock = RTE_SPINLOCK_INITIALIZER;
455 
456 static void
457 cryptodev_cb_cleanup(struct rte_cryptodev *dev)
458 {
459 	struct rte_cryptodev_cb_rcu *list;
460 	struct rte_cryptodev_cb *cb, *next;
461 	uint16_t qp_id;
462 
463 	if (dev->enq_cbs == NULL && dev->deq_cbs == NULL)
464 		return;
465 
466 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
467 		list = &dev->enq_cbs[qp_id];
468 		cb = list->next;
469 		while (cb != NULL) {
470 			next = cb->next;
471 			rte_free(cb);
472 			cb = next;
473 		}
474 
475 		rte_free(list->qsbr);
476 	}
477 
478 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
479 		list = &dev->deq_cbs[qp_id];
480 		cb = list->next;
481 		while (cb != NULL) {
482 			next = cb->next;
483 			rte_free(cb);
484 			cb = next;
485 		}
486 
487 		rte_free(list->qsbr);
488 	}
489 
490 	rte_free(dev->enq_cbs);
491 	dev->enq_cbs = NULL;
492 	rte_free(dev->deq_cbs);
493 	dev->deq_cbs = NULL;
494 }
495 
496 static int
497 cryptodev_cb_init(struct rte_cryptodev *dev)
498 {
499 	struct rte_cryptodev_cb_rcu *list;
500 	struct rte_rcu_qsbr *qsbr;
501 	uint16_t qp_id;
502 	size_t size;
503 
504 	/* Max thread set to 1, as one DP thread accessing a queue-pair */
505 	const uint32_t max_threads = 1;
506 
507 	dev->enq_cbs = rte_zmalloc(NULL,
508 				   sizeof(struct rte_cryptodev_cb_rcu) *
509 				   dev->data->nb_queue_pairs, 0);
510 	if (dev->enq_cbs == NULL) {
511 		CDEV_LOG_ERR("Failed to allocate memory for enq callbacks");
512 		return -ENOMEM;
513 	}
514 
515 	dev->deq_cbs = rte_zmalloc(NULL,
516 				   sizeof(struct rte_cryptodev_cb_rcu) *
517 				   dev->data->nb_queue_pairs, 0);
518 	if (dev->deq_cbs == NULL) {
519 		CDEV_LOG_ERR("Failed to allocate memory for deq callbacks");
520 		rte_free(dev->enq_cbs);
521 		return -ENOMEM;
522 	}
523 
524 	/* Create RCU QSBR variable */
525 	size = rte_rcu_qsbr_get_memsize(max_threads);
526 
527 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
528 		list = &dev->enq_cbs[qp_id];
529 		qsbr = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
530 		if (qsbr == NULL) {
531 			CDEV_LOG_ERR("Failed to allocate memory for RCU on "
532 				"queue_pair_id=%d", qp_id);
533 			goto cb_init_err;
534 		}
535 
536 		if (rte_rcu_qsbr_init(qsbr, max_threads)) {
537 			CDEV_LOG_ERR("Failed to initialize for RCU on "
538 				"queue_pair_id=%d", qp_id);
539 			goto cb_init_err;
540 		}
541 
542 		list->qsbr = qsbr;
543 	}
544 
545 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
546 		list = &dev->deq_cbs[qp_id];
547 		qsbr = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
548 		if (qsbr == NULL) {
549 			CDEV_LOG_ERR("Failed to allocate memory for RCU on "
550 				"queue_pair_id=%d", qp_id);
551 			goto cb_init_err;
552 		}
553 
554 		if (rte_rcu_qsbr_init(qsbr, max_threads)) {
555 			CDEV_LOG_ERR("Failed to initialize for RCU on "
556 				"queue_pair_id=%d", qp_id);
557 			goto cb_init_err;
558 		}
559 
560 		list->qsbr = qsbr;
561 	}
562 
563 	return 0;
564 
565 cb_init_err:
566 	cryptodev_cb_cleanup(dev);
567 	return -ENOMEM;
568 }
569 
570 const char *
571 rte_cryptodev_get_feature_name(uint64_t flag)
572 {
573 	switch (flag) {
574 	case RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO:
575 		return "SYMMETRIC_CRYPTO";
576 	case RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO:
577 		return "ASYMMETRIC_CRYPTO";
578 	case RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING:
579 		return "SYM_OPERATION_CHAINING";
580 	case RTE_CRYPTODEV_FF_CPU_SSE:
581 		return "CPU_SSE";
582 	case RTE_CRYPTODEV_FF_CPU_AVX:
583 		return "CPU_AVX";
584 	case RTE_CRYPTODEV_FF_CPU_AVX2:
585 		return "CPU_AVX2";
586 	case RTE_CRYPTODEV_FF_CPU_AVX512:
587 		return "CPU_AVX512";
588 	case RTE_CRYPTODEV_FF_CPU_AESNI:
589 		return "CPU_AESNI";
590 	case RTE_CRYPTODEV_FF_HW_ACCELERATED:
591 		return "HW_ACCELERATED";
592 	case RTE_CRYPTODEV_FF_IN_PLACE_SGL:
593 		return "IN_PLACE_SGL";
594 	case RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT:
595 		return "OOP_SGL_IN_SGL_OUT";
596 	case RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT:
597 		return "OOP_SGL_IN_LB_OUT";
598 	case RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT:
599 		return "OOP_LB_IN_SGL_OUT";
600 	case RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT:
601 		return "OOP_LB_IN_LB_OUT";
602 	case RTE_CRYPTODEV_FF_CPU_NEON:
603 		return "CPU_NEON";
604 	case RTE_CRYPTODEV_FF_CPU_ARM_CE:
605 		return "CPU_ARM_CE";
606 	case RTE_CRYPTODEV_FF_SECURITY:
607 		return "SECURITY_PROTOCOL";
608 	case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP:
609 		return "RSA_PRIV_OP_KEY_EXP";
610 	case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT:
611 		return "RSA_PRIV_OP_KEY_QT";
612 	case RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED:
613 		return "DIGEST_ENCRYPTED";
614 	case RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO:
615 		return "SYM_CPU_CRYPTO";
616 	case RTE_CRYPTODEV_FF_ASYM_SESSIONLESS:
617 		return "ASYM_SESSIONLESS";
618 	case RTE_CRYPTODEV_FF_SYM_SESSIONLESS:
619 		return "SYM_SESSIONLESS";
620 	case RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA:
621 		return "NON_BYTE_ALIGNED_DATA";
622 	case RTE_CRYPTODEV_FF_CIPHER_MULTIPLE_DATA_UNITS:
623 		return "CIPHER_MULTIPLE_DATA_UNITS";
624 	case RTE_CRYPTODEV_FF_CIPHER_WRAPPED_KEY:
625 		return "CIPHER_WRAPPED_KEY";
626 	default:
627 		return NULL;
628 	}
629 }
630 
631 struct rte_cryptodev *
632 rte_cryptodev_pmd_get_dev(uint8_t dev_id)
633 {
634 	return &cryptodev_globals.devs[dev_id];
635 }
636 
637 struct rte_cryptodev *
638 rte_cryptodev_pmd_get_named_dev(const char *name)
639 {
640 	struct rte_cryptodev *dev;
641 	unsigned int i;
642 
643 	if (name == NULL)
644 		return NULL;
645 
646 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
647 		dev = &cryptodev_globals.devs[i];
648 
649 		if ((dev->attached == RTE_CRYPTODEV_ATTACHED) &&
650 				(strcmp(dev->data->name, name) == 0))
651 			return dev;
652 	}
653 
654 	return NULL;
655 }
656 
657 static inline uint8_t
658 rte_cryptodev_is_valid_device_data(uint8_t dev_id)
659 {
660 	if (dev_id >= RTE_CRYPTO_MAX_DEVS ||
661 			rte_crypto_devices[dev_id].data == NULL)
662 		return 0;
663 
664 	return 1;
665 }
666 
667 unsigned int
668 rte_cryptodev_is_valid_dev(uint8_t dev_id)
669 {
670 	struct rte_cryptodev *dev = NULL;
671 
672 	if (!rte_cryptodev_is_valid_device_data(dev_id))
673 		return 0;
674 
675 	dev = rte_cryptodev_pmd_get_dev(dev_id);
676 	if (dev->attached != RTE_CRYPTODEV_ATTACHED)
677 		return 0;
678 	else
679 		return 1;
680 }
681 
682 
683 int
684 rte_cryptodev_get_dev_id(const char *name)
685 {
686 	unsigned i;
687 
688 	if (name == NULL)
689 		return -1;
690 
691 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
692 		if (!rte_cryptodev_is_valid_device_data(i))
693 			continue;
694 		if ((strcmp(cryptodev_globals.devs[i].data->name, name)
695 				== 0) &&
696 				(cryptodev_globals.devs[i].attached ==
697 						RTE_CRYPTODEV_ATTACHED))
698 			return i;
699 	}
700 
701 	return -1;
702 }
703 
704 uint8_t
705 rte_cryptodev_count(void)
706 {
707 	return cryptodev_globals.nb_devs;
708 }
709 
710 uint8_t
711 rte_cryptodev_device_count_by_driver(uint8_t driver_id)
712 {
713 	uint8_t i, dev_count = 0;
714 
715 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++)
716 		if (cryptodev_globals.devs[i].driver_id == driver_id &&
717 			cryptodev_globals.devs[i].attached ==
718 					RTE_CRYPTODEV_ATTACHED)
719 			dev_count++;
720 
721 	return dev_count;
722 }
723 
724 uint8_t
725 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
726 	uint8_t nb_devices)
727 {
728 	uint8_t i, count = 0;
729 	struct rte_cryptodev *devs = cryptodev_globals.devs;
730 
731 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS && count < nb_devices; i++) {
732 		if (!rte_cryptodev_is_valid_device_data(i))
733 			continue;
734 
735 		if (devs[i].attached == RTE_CRYPTODEV_ATTACHED) {
736 			int cmp;
737 
738 			cmp = strncmp(devs[i].device->driver->name,
739 					driver_name,
740 					strlen(driver_name) + 1);
741 
742 			if (cmp == 0)
743 				devices[count++] = devs[i].data->dev_id;
744 		}
745 	}
746 
747 	return count;
748 }
749 
750 void *
751 rte_cryptodev_get_sec_ctx(uint8_t dev_id)
752 {
753 	if (dev_id < RTE_CRYPTO_MAX_DEVS &&
754 			(rte_crypto_devices[dev_id].feature_flags &
755 			RTE_CRYPTODEV_FF_SECURITY))
756 		return rte_crypto_devices[dev_id].security_ctx;
757 
758 	return NULL;
759 }
760 
761 int
762 rte_cryptodev_socket_id(uint8_t dev_id)
763 {
764 	struct rte_cryptodev *dev;
765 
766 	if (!rte_cryptodev_is_valid_dev(dev_id))
767 		return -1;
768 
769 	dev = rte_cryptodev_pmd_get_dev(dev_id);
770 
771 	return dev->data->socket_id;
772 }
773 
774 static inline int
775 rte_cryptodev_data_alloc(uint8_t dev_id, struct rte_cryptodev_data **data,
776 		int socket_id)
777 {
778 	char mz_name[RTE_MEMZONE_NAMESIZE];
779 	const struct rte_memzone *mz;
780 	int n;
781 
782 	/* generate memzone name */
783 	n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
784 	if (n >= (int)sizeof(mz_name))
785 		return -EINVAL;
786 
787 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
788 		mz = rte_memzone_reserve(mz_name,
789 				sizeof(struct rte_cryptodev_data),
790 				socket_id, 0);
791 		CDEV_LOG_DEBUG("PRIMARY:reserved memzone for %s (%p)",
792 				mz_name, mz);
793 	} else {
794 		mz = rte_memzone_lookup(mz_name);
795 		CDEV_LOG_DEBUG("SECONDARY:looked up memzone for %s (%p)",
796 				mz_name, mz);
797 	}
798 
799 	if (mz == NULL)
800 		return -ENOMEM;
801 
802 	*data = mz->addr;
803 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
804 		memset(*data, 0, sizeof(struct rte_cryptodev_data));
805 
806 	return 0;
807 }
808 
809 static inline int
810 rte_cryptodev_data_free(uint8_t dev_id, struct rte_cryptodev_data **data)
811 {
812 	char mz_name[RTE_MEMZONE_NAMESIZE];
813 	const struct rte_memzone *mz;
814 	int n;
815 
816 	/* generate memzone name */
817 	n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
818 	if (n >= (int)sizeof(mz_name))
819 		return -EINVAL;
820 
821 	mz = rte_memzone_lookup(mz_name);
822 	if (mz == NULL)
823 		return -ENOMEM;
824 
825 	RTE_ASSERT(*data == mz->addr);
826 	*data = NULL;
827 
828 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
829 		CDEV_LOG_DEBUG("PRIMARY:free memzone of %s (%p)",
830 				mz_name, mz);
831 		return rte_memzone_free(mz);
832 	} else {
833 		CDEV_LOG_DEBUG("SECONDARY:don't free memzone of %s (%p)",
834 				mz_name, mz);
835 	}
836 
837 	return 0;
838 }
839 
840 static uint8_t
841 rte_cryptodev_find_free_device_index(void)
842 {
843 	uint8_t dev_id;
844 
845 	for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++) {
846 		if (rte_crypto_devices[dev_id].attached ==
847 				RTE_CRYPTODEV_DETACHED)
848 			return dev_id;
849 	}
850 	return RTE_CRYPTO_MAX_DEVS;
851 }
852 
853 struct rte_cryptodev *
854 rte_cryptodev_pmd_allocate(const char *name, int socket_id)
855 {
856 	struct rte_cryptodev *cryptodev;
857 	uint8_t dev_id;
858 
859 	if (rte_cryptodev_pmd_get_named_dev(name) != NULL) {
860 		CDEV_LOG_ERR("Crypto device with name %s already "
861 				"allocated!", name);
862 		return NULL;
863 	}
864 
865 	dev_id = rte_cryptodev_find_free_device_index();
866 	if (dev_id == RTE_CRYPTO_MAX_DEVS) {
867 		CDEV_LOG_ERR("Reached maximum number of crypto devices");
868 		return NULL;
869 	}
870 
871 	cryptodev = rte_cryptodev_pmd_get_dev(dev_id);
872 
873 	if (cryptodev->data == NULL) {
874 		struct rte_cryptodev_data **cryptodev_data =
875 				&cryptodev_globals.data[dev_id];
876 
877 		int retval = rte_cryptodev_data_alloc(dev_id, cryptodev_data,
878 				socket_id);
879 
880 		if (retval < 0 || *cryptodev_data == NULL)
881 			return NULL;
882 
883 		cryptodev->data = *cryptodev_data;
884 
885 		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
886 			strlcpy(cryptodev->data->name, name,
887 				RTE_CRYPTODEV_NAME_MAX_LEN);
888 
889 			cryptodev->data->dev_id = dev_id;
890 			cryptodev->data->socket_id = socket_id;
891 			cryptodev->data->dev_started = 0;
892 			CDEV_LOG_DEBUG("PRIMARY:init data");
893 		}
894 
895 		CDEV_LOG_DEBUG("Data for %s: dev_id %d, socket %d, started %d",
896 				cryptodev->data->name,
897 				cryptodev->data->dev_id,
898 				cryptodev->data->socket_id,
899 				cryptodev->data->dev_started);
900 
901 		/* init user callbacks */
902 		TAILQ_INIT(&(cryptodev->link_intr_cbs));
903 
904 		cryptodev->attached = RTE_CRYPTODEV_ATTACHED;
905 
906 		cryptodev_globals.nb_devs++;
907 	}
908 
909 	return cryptodev;
910 }
911 
912 int
913 rte_cryptodev_pmd_release_device(struct rte_cryptodev *cryptodev)
914 {
915 	int ret;
916 	uint8_t dev_id;
917 
918 	if (cryptodev == NULL)
919 		return -EINVAL;
920 
921 	dev_id = cryptodev->data->dev_id;
922 
923 	cryptodev_fp_ops_reset(rte_crypto_fp_ops + dev_id);
924 
925 	/* Close device only if device operations have been set */
926 	if (cryptodev->dev_ops) {
927 		ret = rte_cryptodev_close(dev_id);
928 		if (ret < 0)
929 			return ret;
930 	}
931 
932 	ret = rte_cryptodev_data_free(dev_id, &cryptodev_globals.data[dev_id]);
933 	if (ret < 0)
934 		return ret;
935 
936 	cryptodev->attached = RTE_CRYPTODEV_DETACHED;
937 	cryptodev_globals.nb_devs--;
938 	return 0;
939 }
940 
941 uint16_t
942 rte_cryptodev_queue_pair_count(uint8_t dev_id)
943 {
944 	struct rte_cryptodev *dev;
945 
946 	if (!rte_cryptodev_is_valid_device_data(dev_id)) {
947 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
948 		return 0;
949 	}
950 
951 	dev = &rte_crypto_devices[dev_id];
952 	return dev->data->nb_queue_pairs;
953 }
954 
955 static int
956 rte_cryptodev_queue_pairs_config(struct rte_cryptodev *dev, uint16_t nb_qpairs,
957 		int socket_id)
958 {
959 	struct rte_cryptodev_info dev_info;
960 	void **qp;
961 	unsigned i;
962 
963 	if ((dev == NULL) || (nb_qpairs < 1)) {
964 		CDEV_LOG_ERR("invalid param: dev %p, nb_queues %u",
965 							dev, nb_qpairs);
966 		return -EINVAL;
967 	}
968 
969 	CDEV_LOG_DEBUG("Setup %d queues pairs on device %u",
970 			nb_qpairs, dev->data->dev_id);
971 
972 	memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
973 
974 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
975 	(*dev->dev_ops->dev_infos_get)(dev, &dev_info);
976 
977 	if (nb_qpairs > (dev_info.max_nb_queue_pairs)) {
978 		CDEV_LOG_ERR("Invalid num queue_pairs (%u) for dev %u",
979 				nb_qpairs, dev->data->dev_id);
980 	    return -EINVAL;
981 	}
982 
983 	if (dev->data->queue_pairs == NULL) { /* first time configuration */
984 		dev->data->queue_pairs = rte_zmalloc_socket(
985 				"cryptodev->queue_pairs",
986 				sizeof(dev->data->queue_pairs[0]) *
987 				dev_info.max_nb_queue_pairs,
988 				RTE_CACHE_LINE_SIZE, socket_id);
989 
990 		if (dev->data->queue_pairs == NULL) {
991 			dev->data->nb_queue_pairs = 0;
992 			CDEV_LOG_ERR("failed to get memory for qp meta data, "
993 							"nb_queues %u",
994 							nb_qpairs);
995 			return -(ENOMEM);
996 		}
997 	} else { /* re-configure */
998 		int ret;
999 		uint16_t old_nb_queues = dev->data->nb_queue_pairs;
1000 
1001 		qp = dev->data->queue_pairs;
1002 
1003 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_release,
1004 				-ENOTSUP);
1005 
1006 		for (i = nb_qpairs; i < old_nb_queues; i++) {
1007 			ret = (*dev->dev_ops->queue_pair_release)(dev, i);
1008 			if (ret < 0)
1009 				return ret;
1010 			qp[i] = NULL;
1011 		}
1012 
1013 	}
1014 	dev->data->nb_queue_pairs = nb_qpairs;
1015 	return 0;
1016 }
1017 
1018 int
1019 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
1020 {
1021 	struct rte_cryptodev *dev;
1022 	int diag;
1023 
1024 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1025 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1026 		return -EINVAL;
1027 	}
1028 
1029 	dev = &rte_crypto_devices[dev_id];
1030 
1031 	if (dev->data->dev_started) {
1032 		CDEV_LOG_ERR(
1033 		    "device %d must be stopped to allow configuration", dev_id);
1034 		return -EBUSY;
1035 	}
1036 
1037 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
1038 
1039 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1040 	cryptodev_cb_cleanup(dev);
1041 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1042 
1043 	/* Setup new number of queue pairs and reconfigure device. */
1044 	diag = rte_cryptodev_queue_pairs_config(dev, config->nb_queue_pairs,
1045 			config->socket_id);
1046 	if (diag != 0) {
1047 		CDEV_LOG_ERR("dev%d rte_crypto_dev_queue_pairs_config = %d",
1048 				dev_id, diag);
1049 		return diag;
1050 	}
1051 
1052 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1053 	diag = cryptodev_cb_init(dev);
1054 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1055 	if (diag) {
1056 		CDEV_LOG_ERR("Callback init failed for dev_id=%d", dev_id);
1057 		return diag;
1058 	}
1059 
1060 	rte_cryptodev_trace_configure(dev_id, config);
1061 	return (*dev->dev_ops->dev_configure)(dev, config);
1062 }
1063 
1064 int
1065 rte_cryptodev_start(uint8_t dev_id)
1066 {
1067 	struct rte_cryptodev *dev;
1068 	int diag;
1069 
1070 	CDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
1071 
1072 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1073 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1074 		return -EINVAL;
1075 	}
1076 
1077 	dev = &rte_crypto_devices[dev_id];
1078 
1079 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1080 
1081 	if (dev->data->dev_started != 0) {
1082 		CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already started",
1083 			dev_id);
1084 		return 0;
1085 	}
1086 
1087 	diag = (*dev->dev_ops->dev_start)(dev);
1088 	/* expose selection of PMD fast-path functions */
1089 	cryptodev_fp_ops_set(rte_crypto_fp_ops + dev_id, dev);
1090 
1091 	rte_cryptodev_trace_start(dev_id, diag);
1092 	if (diag == 0)
1093 		dev->data->dev_started = 1;
1094 	else
1095 		return diag;
1096 
1097 	return 0;
1098 }
1099 
1100 void
1101 rte_cryptodev_stop(uint8_t dev_id)
1102 {
1103 	struct rte_cryptodev *dev;
1104 
1105 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1106 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1107 		return;
1108 	}
1109 
1110 	dev = &rte_crypto_devices[dev_id];
1111 
1112 	RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1113 
1114 	if (dev->data->dev_started == 0) {
1115 		CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already stopped",
1116 			dev_id);
1117 		return;
1118 	}
1119 
1120 	/* point fast-path functions to dummy ones */
1121 	cryptodev_fp_ops_reset(rte_crypto_fp_ops + dev_id);
1122 
1123 	(*dev->dev_ops->dev_stop)(dev);
1124 	rte_cryptodev_trace_stop(dev_id);
1125 	dev->data->dev_started = 0;
1126 }
1127 
1128 int
1129 rte_cryptodev_close(uint8_t dev_id)
1130 {
1131 	struct rte_cryptodev *dev;
1132 	int retval;
1133 
1134 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1135 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1136 		return -1;
1137 	}
1138 
1139 	dev = &rte_crypto_devices[dev_id];
1140 
1141 	/* Device must be stopped before it can be closed */
1142 	if (dev->data->dev_started == 1) {
1143 		CDEV_LOG_ERR("Device %u must be stopped before closing",
1144 				dev_id);
1145 		return -EBUSY;
1146 	}
1147 
1148 	/* We can't close the device if there are outstanding sessions in use */
1149 	if (dev->data->session_pool != NULL) {
1150 		if (!rte_mempool_full(dev->data->session_pool)) {
1151 			CDEV_LOG_ERR("dev_id=%u close failed, session mempool "
1152 					"has sessions still in use, free "
1153 					"all sessions before calling close",
1154 					(unsigned)dev_id);
1155 			return -EBUSY;
1156 		}
1157 	}
1158 
1159 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
1160 	retval = (*dev->dev_ops->dev_close)(dev);
1161 	rte_cryptodev_trace_close(dev_id, retval);
1162 
1163 	if (retval < 0)
1164 		return retval;
1165 
1166 	return 0;
1167 }
1168 
1169 int
1170 rte_cryptodev_get_qp_status(uint8_t dev_id, uint16_t queue_pair_id)
1171 {
1172 	struct rte_cryptodev *dev;
1173 
1174 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1175 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1176 		return -EINVAL;
1177 	}
1178 
1179 	dev = &rte_crypto_devices[dev_id];
1180 	if (queue_pair_id >= dev->data->nb_queue_pairs) {
1181 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
1182 		return -EINVAL;
1183 	}
1184 	void **qps = dev->data->queue_pairs;
1185 
1186 	if (qps[queue_pair_id])	{
1187 		CDEV_LOG_DEBUG("qp %d on dev %d is initialised",
1188 			queue_pair_id, dev_id);
1189 		return 1;
1190 	}
1191 
1192 	CDEV_LOG_DEBUG("qp %d on dev %d is not initialised",
1193 		queue_pair_id, dev_id);
1194 
1195 	return 0;
1196 }
1197 
1198 int
1199 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
1200 		const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
1201 
1202 {
1203 	struct rte_cryptodev *dev;
1204 
1205 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1206 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1207 		return -EINVAL;
1208 	}
1209 
1210 	dev = &rte_crypto_devices[dev_id];
1211 	if (queue_pair_id >= dev->data->nb_queue_pairs) {
1212 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
1213 		return -EINVAL;
1214 	}
1215 
1216 	if (!qp_conf) {
1217 		CDEV_LOG_ERR("qp_conf cannot be NULL\n");
1218 		return -EINVAL;
1219 	}
1220 
1221 	if ((qp_conf->mp_session && !qp_conf->mp_session_private) ||
1222 			(!qp_conf->mp_session && qp_conf->mp_session_private)) {
1223 		CDEV_LOG_ERR("Invalid mempools\n");
1224 		return -EINVAL;
1225 	}
1226 
1227 	if (qp_conf->mp_session) {
1228 		struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1229 		uint32_t obj_size = qp_conf->mp_session->elt_size;
1230 		uint32_t obj_priv_size = qp_conf->mp_session_private->elt_size;
1231 		struct rte_cryptodev_sym_session s = {0};
1232 
1233 		pool_priv = rte_mempool_get_priv(qp_conf->mp_session);
1234 		if (!pool_priv || qp_conf->mp_session->private_data_size <
1235 				sizeof(*pool_priv)) {
1236 			CDEV_LOG_ERR("Invalid mempool\n");
1237 			return -EINVAL;
1238 		}
1239 
1240 		s.nb_drivers = pool_priv->nb_drivers;
1241 		s.user_data_sz = pool_priv->user_data_sz;
1242 
1243 		if ((rte_cryptodev_sym_get_existing_header_session_size(&s) >
1244 			obj_size) || (s.nb_drivers <= dev->driver_id) ||
1245 			rte_cryptodev_sym_get_private_session_size(dev_id) >
1246 				obj_priv_size) {
1247 			CDEV_LOG_ERR("Invalid mempool\n");
1248 			return -EINVAL;
1249 		}
1250 	}
1251 
1252 	if (dev->data->dev_started) {
1253 		CDEV_LOG_ERR(
1254 		    "device %d must be stopped to allow configuration", dev_id);
1255 		return -EBUSY;
1256 	}
1257 
1258 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_setup, -ENOTSUP);
1259 
1260 	rte_cryptodev_trace_queue_pair_setup(dev_id, queue_pair_id, qp_conf);
1261 	return (*dev->dev_ops->queue_pair_setup)(dev, queue_pair_id, qp_conf,
1262 			socket_id);
1263 }
1264 
1265 struct rte_cryptodev_cb *
1266 rte_cryptodev_add_enq_callback(uint8_t dev_id,
1267 			       uint16_t qp_id,
1268 			       rte_cryptodev_callback_fn cb_fn,
1269 			       void *cb_arg)
1270 {
1271 	struct rte_cryptodev *dev;
1272 	struct rte_cryptodev_cb_rcu *list;
1273 	struct rte_cryptodev_cb *cb, *tail;
1274 
1275 	if (!cb_fn) {
1276 		CDEV_LOG_ERR("Callback is NULL on dev_id=%d", dev_id);
1277 		rte_errno = EINVAL;
1278 		return NULL;
1279 	}
1280 
1281 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1282 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1283 		rte_errno = ENODEV;
1284 		return NULL;
1285 	}
1286 
1287 	dev = &rte_crypto_devices[dev_id];
1288 	if (qp_id >= dev->data->nb_queue_pairs) {
1289 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1290 		rte_errno = ENODEV;
1291 		return NULL;
1292 	}
1293 
1294 	cb = rte_zmalloc(NULL, sizeof(*cb), 0);
1295 	if (cb == NULL) {
1296 		CDEV_LOG_ERR("Failed to allocate memory for callback on "
1297 			     "dev=%d, queue_pair_id=%d", dev_id, qp_id);
1298 		rte_errno = ENOMEM;
1299 		return NULL;
1300 	}
1301 
1302 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1303 
1304 	cb->fn = cb_fn;
1305 	cb->arg = cb_arg;
1306 
1307 	/* Add the callbacks in fifo order. */
1308 	list = &dev->enq_cbs[qp_id];
1309 	tail = list->next;
1310 
1311 	if (tail) {
1312 		while (tail->next)
1313 			tail = tail->next;
1314 		/* Stores to cb->fn and cb->param should complete before
1315 		 * cb is visible to data plane.
1316 		 */
1317 		__atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
1318 	} else {
1319 		/* Stores to cb->fn and cb->param should complete before
1320 		 * cb is visible to data plane.
1321 		 */
1322 		__atomic_store_n(&list->next, cb, __ATOMIC_RELEASE);
1323 	}
1324 
1325 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1326 
1327 	return cb;
1328 }
1329 
1330 int
1331 rte_cryptodev_remove_enq_callback(uint8_t dev_id,
1332 				  uint16_t qp_id,
1333 				  struct rte_cryptodev_cb *cb)
1334 {
1335 	struct rte_cryptodev *dev;
1336 	struct rte_cryptodev_cb **prev_cb, *curr_cb;
1337 	struct rte_cryptodev_cb_rcu *list;
1338 	int ret;
1339 
1340 	ret = -EINVAL;
1341 
1342 	if (!cb) {
1343 		CDEV_LOG_ERR("Callback is NULL");
1344 		return -EINVAL;
1345 	}
1346 
1347 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1348 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1349 		return -ENODEV;
1350 	}
1351 
1352 	dev = &rte_crypto_devices[dev_id];
1353 	if (qp_id >= dev->data->nb_queue_pairs) {
1354 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1355 		return -ENODEV;
1356 	}
1357 
1358 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1359 	if (dev->enq_cbs == NULL) {
1360 		CDEV_LOG_ERR("Callback not initialized");
1361 		goto cb_err;
1362 	}
1363 
1364 	list = &dev->enq_cbs[qp_id];
1365 	if (list == NULL) {
1366 		CDEV_LOG_ERR("Callback list is NULL");
1367 		goto cb_err;
1368 	}
1369 
1370 	if (list->qsbr == NULL) {
1371 		CDEV_LOG_ERR("Rcu qsbr is NULL");
1372 		goto cb_err;
1373 	}
1374 
1375 	prev_cb = &list->next;
1376 	for (; *prev_cb != NULL; prev_cb = &curr_cb->next) {
1377 		curr_cb = *prev_cb;
1378 		if (curr_cb == cb) {
1379 			/* Remove the user cb from the callback list. */
1380 			__atomic_store_n(prev_cb, curr_cb->next,
1381 				__ATOMIC_RELAXED);
1382 			ret = 0;
1383 			break;
1384 		}
1385 	}
1386 
1387 	if (!ret) {
1388 		/* Call sync with invalid thread id as this is part of
1389 		 * control plane API
1390 		 */
1391 		rte_rcu_qsbr_synchronize(list->qsbr, RTE_QSBR_THRID_INVALID);
1392 		rte_free(cb);
1393 	}
1394 
1395 cb_err:
1396 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1397 	return ret;
1398 }
1399 
1400 struct rte_cryptodev_cb *
1401 rte_cryptodev_add_deq_callback(uint8_t dev_id,
1402 			       uint16_t qp_id,
1403 			       rte_cryptodev_callback_fn cb_fn,
1404 			       void *cb_arg)
1405 {
1406 	struct rte_cryptodev *dev;
1407 	struct rte_cryptodev_cb_rcu *list;
1408 	struct rte_cryptodev_cb *cb, *tail;
1409 
1410 	if (!cb_fn) {
1411 		CDEV_LOG_ERR("Callback is NULL on dev_id=%d", dev_id);
1412 		rte_errno = EINVAL;
1413 		return NULL;
1414 	}
1415 
1416 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1417 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1418 		rte_errno = ENODEV;
1419 		return NULL;
1420 	}
1421 
1422 	dev = &rte_crypto_devices[dev_id];
1423 	if (qp_id >= dev->data->nb_queue_pairs) {
1424 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1425 		rte_errno = ENODEV;
1426 		return NULL;
1427 	}
1428 
1429 	cb = rte_zmalloc(NULL, sizeof(*cb), 0);
1430 	if (cb == NULL) {
1431 		CDEV_LOG_ERR("Failed to allocate memory for callback on "
1432 			     "dev=%d, queue_pair_id=%d", dev_id, qp_id);
1433 		rte_errno = ENOMEM;
1434 		return NULL;
1435 	}
1436 
1437 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1438 
1439 	cb->fn = cb_fn;
1440 	cb->arg = cb_arg;
1441 
1442 	/* Add the callbacks in fifo order. */
1443 	list = &dev->deq_cbs[qp_id];
1444 	tail = list->next;
1445 
1446 	if (tail) {
1447 		while (tail->next)
1448 			tail = tail->next;
1449 		/* Stores to cb->fn and cb->param should complete before
1450 		 * cb is visible to data plane.
1451 		 */
1452 		__atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
1453 	} else {
1454 		/* Stores to cb->fn and cb->param should complete before
1455 		 * cb is visible to data plane.
1456 		 */
1457 		__atomic_store_n(&list->next, cb, __ATOMIC_RELEASE);
1458 	}
1459 
1460 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1461 
1462 	return cb;
1463 }
1464 
1465 int
1466 rte_cryptodev_remove_deq_callback(uint8_t dev_id,
1467 				  uint16_t qp_id,
1468 				  struct rte_cryptodev_cb *cb)
1469 {
1470 	struct rte_cryptodev *dev;
1471 	struct rte_cryptodev_cb **prev_cb, *curr_cb;
1472 	struct rte_cryptodev_cb_rcu *list;
1473 	int ret;
1474 
1475 	ret = -EINVAL;
1476 
1477 	if (!cb) {
1478 		CDEV_LOG_ERR("Callback is NULL");
1479 		return -EINVAL;
1480 	}
1481 
1482 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1483 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1484 		return -ENODEV;
1485 	}
1486 
1487 	dev = &rte_crypto_devices[dev_id];
1488 	if (qp_id >= dev->data->nb_queue_pairs) {
1489 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1490 		return -ENODEV;
1491 	}
1492 
1493 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1494 	if (dev->enq_cbs == NULL) {
1495 		CDEV_LOG_ERR("Callback not initialized");
1496 		goto cb_err;
1497 	}
1498 
1499 	list = &dev->deq_cbs[qp_id];
1500 	if (list == NULL) {
1501 		CDEV_LOG_ERR("Callback list is NULL");
1502 		goto cb_err;
1503 	}
1504 
1505 	if (list->qsbr == NULL) {
1506 		CDEV_LOG_ERR("Rcu qsbr is NULL");
1507 		goto cb_err;
1508 	}
1509 
1510 	prev_cb = &list->next;
1511 	for (; *prev_cb != NULL; prev_cb = &curr_cb->next) {
1512 		curr_cb = *prev_cb;
1513 		if (curr_cb == cb) {
1514 			/* Remove the user cb from the callback list. */
1515 			__atomic_store_n(prev_cb, curr_cb->next,
1516 				__ATOMIC_RELAXED);
1517 			ret = 0;
1518 			break;
1519 		}
1520 	}
1521 
1522 	if (!ret) {
1523 		/* Call sync with invalid thread id as this is part of
1524 		 * control plane API
1525 		 */
1526 		rte_rcu_qsbr_synchronize(list->qsbr, RTE_QSBR_THRID_INVALID);
1527 		rte_free(cb);
1528 	}
1529 
1530 cb_err:
1531 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1532 	return ret;
1533 }
1534 
1535 int
1536 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats)
1537 {
1538 	struct rte_cryptodev *dev;
1539 
1540 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1541 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1542 		return -ENODEV;
1543 	}
1544 
1545 	if (stats == NULL) {
1546 		CDEV_LOG_ERR("Invalid stats ptr");
1547 		return -EINVAL;
1548 	}
1549 
1550 	dev = &rte_crypto_devices[dev_id];
1551 	memset(stats, 0, sizeof(*stats));
1552 
1553 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1554 	(*dev->dev_ops->stats_get)(dev, stats);
1555 	return 0;
1556 }
1557 
1558 void
1559 rte_cryptodev_stats_reset(uint8_t dev_id)
1560 {
1561 	struct rte_cryptodev *dev;
1562 
1563 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1564 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1565 		return;
1566 	}
1567 
1568 	dev = &rte_crypto_devices[dev_id];
1569 
1570 	RTE_FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
1571 	(*dev->dev_ops->stats_reset)(dev);
1572 }
1573 
1574 void
1575 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
1576 {
1577 	struct rte_cryptodev *dev;
1578 
1579 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1580 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1581 		return;
1582 	}
1583 
1584 	dev = &rte_crypto_devices[dev_id];
1585 
1586 	memset(dev_info, 0, sizeof(struct rte_cryptodev_info));
1587 
1588 	RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1589 	(*dev->dev_ops->dev_infos_get)(dev, dev_info);
1590 
1591 	dev_info->driver_name = dev->device->driver->name;
1592 	dev_info->device = dev->device;
1593 }
1594 
1595 int
1596 rte_cryptodev_callback_register(uint8_t dev_id,
1597 			enum rte_cryptodev_event_type event,
1598 			rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1599 {
1600 	struct rte_cryptodev *dev;
1601 	struct rte_cryptodev_callback *user_cb;
1602 
1603 	if (!cb_fn)
1604 		return -EINVAL;
1605 
1606 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1607 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1608 		return -EINVAL;
1609 	}
1610 
1611 	dev = &rte_crypto_devices[dev_id];
1612 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
1613 
1614 	TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
1615 		if (user_cb->cb_fn == cb_fn &&
1616 			user_cb->cb_arg == cb_arg &&
1617 			user_cb->event == event) {
1618 			break;
1619 		}
1620 	}
1621 
1622 	/* create a new callback. */
1623 	if (user_cb == NULL) {
1624 		user_cb = rte_zmalloc("INTR_USER_CALLBACK",
1625 				sizeof(struct rte_cryptodev_callback), 0);
1626 		if (user_cb != NULL) {
1627 			user_cb->cb_fn = cb_fn;
1628 			user_cb->cb_arg = cb_arg;
1629 			user_cb->event = event;
1630 			TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
1631 		}
1632 	}
1633 
1634 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1635 	return (user_cb == NULL) ? -ENOMEM : 0;
1636 }
1637 
1638 int
1639 rte_cryptodev_callback_unregister(uint8_t dev_id,
1640 			enum rte_cryptodev_event_type event,
1641 			rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1642 {
1643 	int ret;
1644 	struct rte_cryptodev *dev;
1645 	struct rte_cryptodev_callback *cb, *next;
1646 
1647 	if (!cb_fn)
1648 		return -EINVAL;
1649 
1650 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1651 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1652 		return -EINVAL;
1653 	}
1654 
1655 	dev = &rte_crypto_devices[dev_id];
1656 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
1657 
1658 	ret = 0;
1659 	for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
1660 
1661 		next = TAILQ_NEXT(cb, next);
1662 
1663 		if (cb->cb_fn != cb_fn || cb->event != event ||
1664 				(cb->cb_arg != (void *)-1 &&
1665 				cb->cb_arg != cb_arg))
1666 			continue;
1667 
1668 		/*
1669 		 * if this callback is not executing right now,
1670 		 * then remove it.
1671 		 */
1672 		if (cb->active == 0) {
1673 			TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
1674 			rte_free(cb);
1675 		} else {
1676 			ret = -EAGAIN;
1677 		}
1678 	}
1679 
1680 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1681 	return ret;
1682 }
1683 
1684 void
1685 rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev,
1686 	enum rte_cryptodev_event_type event)
1687 {
1688 	struct rte_cryptodev_callback *cb_lst;
1689 	struct rte_cryptodev_callback dev_cb;
1690 
1691 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
1692 	TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
1693 		if (cb_lst->cb_fn == NULL || cb_lst->event != event)
1694 			continue;
1695 		dev_cb = *cb_lst;
1696 		cb_lst->active = 1;
1697 		rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1698 		dev_cb.cb_fn(dev->data->dev_id, dev_cb.event,
1699 						dev_cb.cb_arg);
1700 		rte_spinlock_lock(&rte_cryptodev_cb_lock);
1701 		cb_lst->active = 0;
1702 	}
1703 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1704 }
1705 
1706 int
1707 rte_cryptodev_sym_session_init(uint8_t dev_id,
1708 		struct rte_cryptodev_sym_session *sess,
1709 		struct rte_crypto_sym_xform *xforms,
1710 		struct rte_mempool *mp)
1711 {
1712 	struct rte_cryptodev *dev;
1713 	uint32_t sess_priv_sz = rte_cryptodev_sym_get_private_session_size(
1714 			dev_id);
1715 	uint8_t index;
1716 	int ret;
1717 
1718 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1719 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1720 		return -EINVAL;
1721 	}
1722 
1723 	dev = rte_cryptodev_pmd_get_dev(dev_id);
1724 
1725 	if (sess == NULL || xforms == NULL || dev == NULL || mp == NULL)
1726 		return -EINVAL;
1727 
1728 	if (mp->elt_size < sess_priv_sz)
1729 		return -EINVAL;
1730 
1731 	index = dev->driver_id;
1732 	if (index >= sess->nb_drivers)
1733 		return -EINVAL;
1734 
1735 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_configure, -ENOTSUP);
1736 
1737 	if (sess->sess_data[index].refcnt == 0) {
1738 		ret = dev->dev_ops->sym_session_configure(dev, xforms,
1739 							sess, mp);
1740 		if (ret < 0) {
1741 			CDEV_LOG_ERR(
1742 				"dev_id %d failed to configure session details",
1743 				dev_id);
1744 			return ret;
1745 		}
1746 	}
1747 
1748 	rte_cryptodev_trace_sym_session_init(dev_id, sess, xforms, mp);
1749 	sess->sess_data[index].refcnt++;
1750 	return 0;
1751 }
1752 
1753 int
1754 rte_cryptodev_asym_session_init(uint8_t dev_id,
1755 		struct rte_cryptodev_asym_session *sess,
1756 		struct rte_crypto_asym_xform *xforms,
1757 		struct rte_mempool *mp)
1758 {
1759 	struct rte_cryptodev *dev;
1760 	uint8_t index;
1761 	int ret;
1762 
1763 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1764 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1765 		return -EINVAL;
1766 	}
1767 
1768 	dev = rte_cryptodev_pmd_get_dev(dev_id);
1769 
1770 	if (sess == NULL || xforms == NULL || dev == NULL)
1771 		return -EINVAL;
1772 
1773 	index = dev->driver_id;
1774 
1775 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_configure,
1776 				-ENOTSUP);
1777 
1778 	if (sess->sess_private_data[index] == NULL) {
1779 		ret = dev->dev_ops->asym_session_configure(dev,
1780 							xforms,
1781 							sess, mp);
1782 		if (ret < 0) {
1783 			CDEV_LOG_ERR(
1784 				"dev_id %d failed to configure session details",
1785 				dev_id);
1786 			return ret;
1787 		}
1788 	}
1789 
1790 	rte_cryptodev_trace_asym_session_init(dev_id, sess, xforms, mp);
1791 	return 0;
1792 }
1793 
1794 struct rte_mempool *
1795 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
1796 	uint32_t elt_size, uint32_t cache_size, uint16_t user_data_size,
1797 	int socket_id)
1798 {
1799 	struct rte_mempool *mp;
1800 	struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1801 	uint32_t obj_sz;
1802 
1803 	obj_sz = rte_cryptodev_sym_get_header_session_size() + user_data_size;
1804 	if (obj_sz > elt_size)
1805 		CDEV_LOG_INFO("elt_size %u is expanded to %u\n", elt_size,
1806 				obj_sz);
1807 	else
1808 		obj_sz = elt_size;
1809 
1810 	mp = rte_mempool_create(name, nb_elts, obj_sz, cache_size,
1811 			(uint32_t)(sizeof(*pool_priv)),
1812 			NULL, NULL, NULL, NULL,
1813 			socket_id, 0);
1814 	if (mp == NULL) {
1815 		CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d\n",
1816 			__func__, name, rte_errno);
1817 		return NULL;
1818 	}
1819 
1820 	pool_priv = rte_mempool_get_priv(mp);
1821 	if (!pool_priv) {
1822 		CDEV_LOG_ERR("%s(name=%s) failed to get private data\n",
1823 			__func__, name);
1824 		rte_mempool_free(mp);
1825 		return NULL;
1826 	}
1827 
1828 	pool_priv->nb_drivers = nb_drivers;
1829 	pool_priv->user_data_sz = user_data_size;
1830 
1831 	rte_cryptodev_trace_sym_session_pool_create(name, nb_elts,
1832 		elt_size, cache_size, user_data_size, mp);
1833 	return mp;
1834 }
1835 
1836 static unsigned int
1837 rte_cryptodev_sym_session_data_size(struct rte_cryptodev_sym_session *sess)
1838 {
1839 	return (sizeof(sess->sess_data[0]) * sess->nb_drivers) +
1840 			sess->user_data_sz;
1841 }
1842 
1843 static uint8_t
1844 rte_cryptodev_sym_is_valid_session_pool(struct rte_mempool *mp)
1845 {
1846 	struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1847 
1848 	if (!mp)
1849 		return 0;
1850 
1851 	pool_priv = rte_mempool_get_priv(mp);
1852 
1853 	if (!pool_priv || mp->private_data_size < sizeof(*pool_priv) ||
1854 			pool_priv->nb_drivers != nb_drivers ||
1855 			mp->elt_size <
1856 				rte_cryptodev_sym_get_header_session_size()
1857 				+ pool_priv->user_data_sz)
1858 		return 0;
1859 
1860 	return 1;
1861 }
1862 
1863 struct rte_cryptodev_sym_session *
1864 rte_cryptodev_sym_session_create(struct rte_mempool *mp)
1865 {
1866 	struct rte_cryptodev_sym_session *sess;
1867 	struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1868 
1869 	if (!rte_cryptodev_sym_is_valid_session_pool(mp)) {
1870 		CDEV_LOG_ERR("Invalid mempool\n");
1871 		return NULL;
1872 	}
1873 
1874 	pool_priv = rte_mempool_get_priv(mp);
1875 
1876 	/* Allocate a session structure from the session pool */
1877 	if (rte_mempool_get(mp, (void **)&sess)) {
1878 		CDEV_LOG_ERR("couldn't get object from session mempool");
1879 		return NULL;
1880 	}
1881 
1882 	sess->nb_drivers = pool_priv->nb_drivers;
1883 	sess->user_data_sz = pool_priv->user_data_sz;
1884 	sess->opaque_data = 0;
1885 
1886 	/* Clear device session pointer.
1887 	 * Include the flag indicating presence of user data
1888 	 */
1889 	memset(sess->sess_data, 0,
1890 			rte_cryptodev_sym_session_data_size(sess));
1891 
1892 	rte_cryptodev_trace_sym_session_create(mp, sess);
1893 	return sess;
1894 }
1895 
1896 struct rte_cryptodev_asym_session *
1897 rte_cryptodev_asym_session_create(struct rte_mempool *mp)
1898 {
1899 	struct rte_cryptodev_asym_session *sess;
1900 	unsigned int session_size =
1901 			rte_cryptodev_asym_get_header_session_size();
1902 
1903 	if (!mp) {
1904 		CDEV_LOG_ERR("invalid mempool\n");
1905 		return NULL;
1906 	}
1907 
1908 	/* Verify if provided mempool can hold elements big enough. */
1909 	if (mp->elt_size < session_size) {
1910 		CDEV_LOG_ERR(
1911 			"mempool elements too small to hold session objects");
1912 		return NULL;
1913 	}
1914 
1915 	/* Allocate a session structure from the session pool */
1916 	if (rte_mempool_get(mp, (void **)&sess)) {
1917 		CDEV_LOG_ERR("couldn't get object from session mempool");
1918 		return NULL;
1919 	}
1920 
1921 	/* Clear device session pointer.
1922 	 * Include the flag indicating presence of private data
1923 	 */
1924 	memset(sess, 0, session_size);
1925 
1926 	rte_cryptodev_trace_asym_session_create(mp, sess);
1927 	return sess;
1928 }
1929 
1930 int
1931 rte_cryptodev_sym_session_clear(uint8_t dev_id,
1932 		struct rte_cryptodev_sym_session *sess)
1933 {
1934 	struct rte_cryptodev *dev;
1935 	uint8_t driver_id;
1936 
1937 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1938 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1939 		return -EINVAL;
1940 	}
1941 
1942 	dev = rte_cryptodev_pmd_get_dev(dev_id);
1943 
1944 	if (dev == NULL || sess == NULL)
1945 		return -EINVAL;
1946 
1947 	driver_id = dev->driver_id;
1948 	if (sess->sess_data[driver_id].refcnt == 0)
1949 		return 0;
1950 	if (--sess->sess_data[driver_id].refcnt != 0)
1951 		return -EBUSY;
1952 
1953 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_clear, -ENOTSUP);
1954 
1955 	dev->dev_ops->sym_session_clear(dev, sess);
1956 
1957 	rte_cryptodev_trace_sym_session_clear(dev_id, sess);
1958 	return 0;
1959 }
1960 
1961 int
1962 rte_cryptodev_asym_session_clear(uint8_t dev_id,
1963 		struct rte_cryptodev_asym_session *sess)
1964 {
1965 	struct rte_cryptodev *dev;
1966 
1967 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1968 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1969 		return -EINVAL;
1970 	}
1971 
1972 	dev = rte_cryptodev_pmd_get_dev(dev_id);
1973 
1974 	if (dev == NULL || sess == NULL)
1975 		return -EINVAL;
1976 
1977 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_clear, -ENOTSUP);
1978 
1979 	dev->dev_ops->asym_session_clear(dev, sess);
1980 
1981 	rte_cryptodev_trace_sym_session_clear(dev_id, sess);
1982 	return 0;
1983 }
1984 
1985 int
1986 rte_cryptodev_sym_session_free(struct rte_cryptodev_sym_session *sess)
1987 {
1988 	uint8_t i;
1989 	struct rte_mempool *sess_mp;
1990 
1991 	if (sess == NULL)
1992 		return -EINVAL;
1993 
1994 	/* Check that all device private data has been freed */
1995 	for (i = 0; i < sess->nb_drivers; i++) {
1996 		if (sess->sess_data[i].refcnt != 0)
1997 			return -EBUSY;
1998 	}
1999 
2000 	/* Return session to mempool */
2001 	sess_mp = rte_mempool_from_obj(sess);
2002 	rte_mempool_put(sess_mp, sess);
2003 
2004 	rte_cryptodev_trace_sym_session_free(sess);
2005 	return 0;
2006 }
2007 
2008 int
2009 rte_cryptodev_asym_session_free(struct rte_cryptodev_asym_session *sess)
2010 {
2011 	uint8_t i;
2012 	void *sess_priv;
2013 	struct rte_mempool *sess_mp;
2014 
2015 	if (sess == NULL)
2016 		return -EINVAL;
2017 
2018 	/* Check that all device private data has been freed */
2019 	for (i = 0; i < nb_drivers; i++) {
2020 		sess_priv = get_asym_session_private_data(sess, i);
2021 		if (sess_priv != NULL)
2022 			return -EBUSY;
2023 	}
2024 
2025 	/* Return session to mempool */
2026 	sess_mp = rte_mempool_from_obj(sess);
2027 	rte_mempool_put(sess_mp, sess);
2028 
2029 	rte_cryptodev_trace_asym_session_free(sess);
2030 	return 0;
2031 }
2032 
2033 unsigned int
2034 rte_cryptodev_sym_get_header_session_size(void)
2035 {
2036 	/*
2037 	 * Header contains pointers to the private data of all registered
2038 	 * drivers and all necessary information to ensure safely clear
2039 	 * or free al session.
2040 	 */
2041 	struct rte_cryptodev_sym_session s = {0};
2042 
2043 	s.nb_drivers = nb_drivers;
2044 
2045 	return (unsigned int)(sizeof(s) +
2046 			rte_cryptodev_sym_session_data_size(&s));
2047 }
2048 
2049 unsigned int
2050 rte_cryptodev_sym_get_existing_header_session_size(
2051 		struct rte_cryptodev_sym_session *sess)
2052 {
2053 	if (!sess)
2054 		return 0;
2055 	else
2056 		return (unsigned int)(sizeof(*sess) +
2057 				rte_cryptodev_sym_session_data_size(sess));
2058 }
2059 
2060 unsigned int
2061 rte_cryptodev_asym_get_header_session_size(void)
2062 {
2063 	/*
2064 	 * Header contains pointers to the private data
2065 	 * of all registered drivers, and a flag which
2066 	 * indicates presence of private data
2067 	 */
2068 	return ((sizeof(void *) * nb_drivers) + sizeof(uint8_t));
2069 }
2070 
2071 unsigned int
2072 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
2073 {
2074 	struct rte_cryptodev *dev;
2075 	unsigned int priv_sess_size;
2076 
2077 	if (!rte_cryptodev_is_valid_dev(dev_id))
2078 		return 0;
2079 
2080 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2081 
2082 	if (*dev->dev_ops->sym_session_get_size == NULL)
2083 		return 0;
2084 
2085 	priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev);
2086 
2087 	return priv_sess_size;
2088 }
2089 
2090 unsigned int
2091 rte_cryptodev_asym_get_private_session_size(uint8_t dev_id)
2092 {
2093 	struct rte_cryptodev *dev;
2094 	unsigned int header_size = sizeof(void *) * nb_drivers;
2095 	unsigned int priv_sess_size;
2096 
2097 	if (!rte_cryptodev_is_valid_dev(dev_id))
2098 		return 0;
2099 
2100 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2101 
2102 	if (*dev->dev_ops->asym_session_get_size == NULL)
2103 		return 0;
2104 
2105 	priv_sess_size = (*dev->dev_ops->asym_session_get_size)(dev);
2106 	if (priv_sess_size < header_size)
2107 		return header_size;
2108 
2109 	return priv_sess_size;
2110 
2111 }
2112 
2113 int
2114 rte_cryptodev_sym_session_set_user_data(
2115 					struct rte_cryptodev_sym_session *sess,
2116 					void *data,
2117 					uint16_t size)
2118 {
2119 	if (sess == NULL)
2120 		return -EINVAL;
2121 
2122 	if (sess->user_data_sz < size)
2123 		return -ENOMEM;
2124 
2125 	rte_memcpy(sess->sess_data + sess->nb_drivers, data, size);
2126 	return 0;
2127 }
2128 
2129 void *
2130 rte_cryptodev_sym_session_get_user_data(
2131 					struct rte_cryptodev_sym_session *sess)
2132 {
2133 	if (sess == NULL || sess->user_data_sz == 0)
2134 		return NULL;
2135 
2136 	return (void *)(sess->sess_data + sess->nb_drivers);
2137 }
2138 
2139 static inline void
2140 sym_crypto_fill_status(struct rte_crypto_sym_vec *vec, int32_t errnum)
2141 {
2142 	uint32_t i;
2143 	for (i = 0; i < vec->num; i++)
2144 		vec->status[i] = errnum;
2145 }
2146 
2147 uint32_t
2148 rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id,
2149 	struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs ofs,
2150 	struct rte_crypto_sym_vec *vec)
2151 {
2152 	struct rte_cryptodev *dev;
2153 
2154 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
2155 		sym_crypto_fill_status(vec, EINVAL);
2156 		return 0;
2157 	}
2158 
2159 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2160 
2161 	if (*dev->dev_ops->sym_cpu_process == NULL ||
2162 		!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO)) {
2163 		sym_crypto_fill_status(vec, ENOTSUP);
2164 		return 0;
2165 	}
2166 
2167 	return dev->dev_ops->sym_cpu_process(dev, sess, ofs, vec);
2168 }
2169 
2170 int
2171 rte_cryptodev_get_raw_dp_ctx_size(uint8_t dev_id)
2172 {
2173 	struct rte_cryptodev *dev;
2174 	int32_t size = sizeof(struct rte_crypto_raw_dp_ctx);
2175 	int32_t priv_size;
2176 
2177 	if (!rte_cryptodev_is_valid_dev(dev_id))
2178 		return -EINVAL;
2179 
2180 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2181 
2182 	if (*dev->dev_ops->sym_get_raw_dp_ctx_size == NULL ||
2183 		!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP)) {
2184 		return -ENOTSUP;
2185 	}
2186 
2187 	priv_size = (*dev->dev_ops->sym_get_raw_dp_ctx_size)(dev);
2188 	if (priv_size < 0)
2189 		return -ENOTSUP;
2190 
2191 	return RTE_ALIGN_CEIL((size + priv_size), 8);
2192 }
2193 
2194 int
2195 rte_cryptodev_configure_raw_dp_ctx(uint8_t dev_id, uint16_t qp_id,
2196 	struct rte_crypto_raw_dp_ctx *ctx,
2197 	enum rte_crypto_op_sess_type sess_type,
2198 	union rte_cryptodev_session_ctx session_ctx,
2199 	uint8_t is_update)
2200 {
2201 	struct rte_cryptodev *dev;
2202 
2203 	if (!rte_cryptodev_get_qp_status(dev_id, qp_id))
2204 		return -EINVAL;
2205 
2206 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2207 	if (!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP)
2208 			|| dev->dev_ops->sym_configure_raw_dp_ctx == NULL)
2209 		return -ENOTSUP;
2210 
2211 	return (*dev->dev_ops->sym_configure_raw_dp_ctx)(dev, qp_id, ctx,
2212 			sess_type, session_ctx, is_update);
2213 }
2214 
2215 uint32_t
2216 rte_cryptodev_raw_enqueue_burst(struct rte_crypto_raw_dp_ctx *ctx,
2217 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
2218 	void **user_data, int *enqueue_status)
2219 {
2220 	return (*ctx->enqueue_burst)(ctx->qp_data, ctx->drv_ctx_data, vec,
2221 			ofs, user_data, enqueue_status);
2222 }
2223 
2224 int
2225 rte_cryptodev_raw_enqueue_done(struct rte_crypto_raw_dp_ctx *ctx,
2226 		uint32_t n)
2227 {
2228 	return (*ctx->enqueue_done)(ctx->qp_data, ctx->drv_ctx_data, n);
2229 }
2230 
2231 uint32_t
2232 rte_cryptodev_raw_dequeue_burst(struct rte_crypto_raw_dp_ctx *ctx,
2233 	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
2234 	uint32_t max_nb_to_dequeue,
2235 	rte_cryptodev_raw_post_dequeue_t post_dequeue,
2236 	void **out_user_data, uint8_t is_user_data_array,
2237 	uint32_t *n_success_jobs, int *status)
2238 {
2239 	return (*ctx->dequeue_burst)(ctx->qp_data, ctx->drv_ctx_data,
2240 		get_dequeue_count, max_nb_to_dequeue, post_dequeue,
2241 		out_user_data, is_user_data_array, n_success_jobs, status);
2242 }
2243 
2244 int
2245 rte_cryptodev_raw_dequeue_done(struct rte_crypto_raw_dp_ctx *ctx,
2246 		uint32_t n)
2247 {
2248 	return (*ctx->dequeue_done)(ctx->qp_data, ctx->drv_ctx_data, n);
2249 }
2250 
2251 /** Initialise rte_crypto_op mempool element */
2252 static void
2253 rte_crypto_op_init(struct rte_mempool *mempool,
2254 		void *opaque_arg,
2255 		void *_op_data,
2256 		__rte_unused unsigned i)
2257 {
2258 	struct rte_crypto_op *op = _op_data;
2259 	enum rte_crypto_op_type type = *(enum rte_crypto_op_type *)opaque_arg;
2260 
2261 	memset(_op_data, 0, mempool->elt_size);
2262 
2263 	__rte_crypto_op_reset(op, type);
2264 
2265 	op->phys_addr = rte_mem_virt2iova(_op_data);
2266 	op->mempool = mempool;
2267 }
2268 
2269 
2270 struct rte_mempool *
2271 rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
2272 		unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
2273 		int socket_id)
2274 {
2275 	struct rte_crypto_op_pool_private *priv;
2276 
2277 	unsigned elt_size = sizeof(struct rte_crypto_op) +
2278 			priv_size;
2279 
2280 	if (type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
2281 		elt_size += sizeof(struct rte_crypto_sym_op);
2282 	} else if (type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
2283 		elt_size += sizeof(struct rte_crypto_asym_op);
2284 	} else if (type == RTE_CRYPTO_OP_TYPE_UNDEFINED) {
2285 		elt_size += RTE_MAX(sizeof(struct rte_crypto_sym_op),
2286 		                    sizeof(struct rte_crypto_asym_op));
2287 	} else {
2288 		CDEV_LOG_ERR("Invalid op_type\n");
2289 		return NULL;
2290 	}
2291 
2292 	/* lookup mempool in case already allocated */
2293 	struct rte_mempool *mp = rte_mempool_lookup(name);
2294 
2295 	if (mp != NULL) {
2296 		priv = (struct rte_crypto_op_pool_private *)
2297 				rte_mempool_get_priv(mp);
2298 
2299 		if (mp->elt_size != elt_size ||
2300 				mp->cache_size < cache_size ||
2301 				mp->size < nb_elts ||
2302 				priv->priv_size <  priv_size) {
2303 			mp = NULL;
2304 			CDEV_LOG_ERR("Mempool %s already exists but with "
2305 					"incompatible parameters", name);
2306 			return NULL;
2307 		}
2308 		return mp;
2309 	}
2310 
2311 	mp = rte_mempool_create(
2312 			name,
2313 			nb_elts,
2314 			elt_size,
2315 			cache_size,
2316 			sizeof(struct rte_crypto_op_pool_private),
2317 			NULL,
2318 			NULL,
2319 			rte_crypto_op_init,
2320 			&type,
2321 			socket_id,
2322 			0);
2323 
2324 	if (mp == NULL) {
2325 		CDEV_LOG_ERR("Failed to create mempool %s", name);
2326 		return NULL;
2327 	}
2328 
2329 	priv = (struct rte_crypto_op_pool_private *)
2330 			rte_mempool_get_priv(mp);
2331 
2332 	priv->priv_size = priv_size;
2333 	priv->type = type;
2334 
2335 	return mp;
2336 }
2337 
2338 int
2339 rte_cryptodev_pmd_create_dev_name(char *name, const char *dev_name_prefix)
2340 {
2341 	struct rte_cryptodev *dev = NULL;
2342 	uint32_t i = 0;
2343 
2344 	if (name == NULL)
2345 		return -EINVAL;
2346 
2347 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
2348 		int ret = snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
2349 				"%s_%u", dev_name_prefix, i);
2350 
2351 		if (ret < 0)
2352 			return ret;
2353 
2354 		dev = rte_cryptodev_pmd_get_named_dev(name);
2355 		if (!dev)
2356 			return 0;
2357 	}
2358 
2359 	return -1;
2360 }
2361 
2362 TAILQ_HEAD(cryptodev_driver_list, cryptodev_driver);
2363 
2364 static struct cryptodev_driver_list cryptodev_driver_list =
2365 	TAILQ_HEAD_INITIALIZER(cryptodev_driver_list);
2366 
2367 int
2368 rte_cryptodev_driver_id_get(const char *name)
2369 {
2370 	struct cryptodev_driver *driver;
2371 	const char *driver_name;
2372 
2373 	if (name == NULL) {
2374 		RTE_LOG(DEBUG, CRYPTODEV, "name pointer NULL");
2375 		return -1;
2376 	}
2377 
2378 	TAILQ_FOREACH(driver, &cryptodev_driver_list, next) {
2379 		driver_name = driver->driver->name;
2380 		if (strncmp(driver_name, name, strlen(driver_name) + 1) == 0)
2381 			return driver->id;
2382 	}
2383 	return -1;
2384 }
2385 
2386 const char *
2387 rte_cryptodev_name_get(uint8_t dev_id)
2388 {
2389 	struct rte_cryptodev *dev;
2390 
2391 	if (!rte_cryptodev_is_valid_device_data(dev_id)) {
2392 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
2393 		return NULL;
2394 	}
2395 
2396 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2397 	if (dev == NULL)
2398 		return NULL;
2399 
2400 	return dev->data->name;
2401 }
2402 
2403 const char *
2404 rte_cryptodev_driver_name_get(uint8_t driver_id)
2405 {
2406 	struct cryptodev_driver *driver;
2407 
2408 	TAILQ_FOREACH(driver, &cryptodev_driver_list, next)
2409 		if (driver->id == driver_id)
2410 			return driver->driver->name;
2411 	return NULL;
2412 }
2413 
2414 uint8_t
2415 rte_cryptodev_allocate_driver(struct cryptodev_driver *crypto_drv,
2416 		const struct rte_driver *drv)
2417 {
2418 	crypto_drv->driver = drv;
2419 	crypto_drv->id = nb_drivers;
2420 
2421 	TAILQ_INSERT_TAIL(&cryptodev_driver_list, crypto_drv, next);
2422 
2423 	return nb_drivers++;
2424 }
2425 
2426 RTE_INIT(cryptodev_init_fp_ops)
2427 {
2428 	uint32_t i;
2429 
2430 	for (i = 0; i != RTE_DIM(rte_crypto_fp_ops); i++)
2431 		cryptodev_fp_ops_reset(rte_crypto_fp_ops + i);
2432 }
2433