xref: /dpdk/lib/cryptodev/rte_cryptodev.c (revision 03ab51eafda992874a48c392ca66ffb577fe2b71)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020 Intel Corporation
3  */
4 
5 #include <sys/types.h>
6 #include <sys/queue.h>
7 #include <ctype.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <stdarg.h>
12 #include <errno.h>
13 #include <stdint.h>
14 #include <inttypes.h>
15 
16 #include <rte_byteorder.h>
17 #include <rte_log.h>
18 #include <rte_debug.h>
19 #include <rte_dev.h>
20 #include <rte_interrupts.h>
21 #include <rte_memory.h>
22 #include <rte_memcpy.h>
23 #include <rte_memzone.h>
24 #include <rte_launch.h>
25 #include <rte_tailq.h>
26 #include <rte_eal.h>
27 #include <rte_per_lcore.h>
28 #include <rte_lcore.h>
29 #include <rte_atomic.h>
30 #include <rte_branch_prediction.h>
31 #include <rte_common.h>
32 #include <rte_mempool.h>
33 #include <rte_malloc.h>
34 #include <rte_mbuf.h>
35 #include <rte_errno.h>
36 #include <rte_spinlock.h>
37 #include <rte_string_fns.h>
38 
39 #include "rte_crypto.h"
40 #include "rte_cryptodev.h"
41 #include "cryptodev_pmd.h"
42 #include "rte_cryptodev_trace.h"
43 
44 static uint8_t nb_drivers;
45 
46 static struct rte_cryptodev rte_crypto_devices[RTE_CRYPTO_MAX_DEVS];
47 
48 struct rte_cryptodev *rte_cryptodevs = rte_crypto_devices;
49 
50 static struct rte_cryptodev_global cryptodev_globals = {
51 		.devs			= rte_crypto_devices,
52 		.data			= { NULL },
53 		.nb_devs		= 0
54 };
55 
56 /* spinlock for crypto device callbacks */
57 static rte_spinlock_t rte_cryptodev_cb_lock = RTE_SPINLOCK_INITIALIZER;
58 
59 /**
60  * The user application callback description.
61  *
62  * It contains callback address to be registered by user application,
63  * the pointer to the parameters for callback, and the event type.
64  */
65 struct rte_cryptodev_callback {
66 	TAILQ_ENTRY(rte_cryptodev_callback) next; /**< Callbacks list */
67 	rte_cryptodev_cb_fn cb_fn;		/**< Callback address */
68 	void *cb_arg;				/**< Parameter for callback */
69 	enum rte_cryptodev_event_type event;	/**< Interrupt event type */
70 	uint32_t active;			/**< Callback is executing */
71 };
72 
73 /**
74  * The crypto cipher algorithm strings identifiers.
75  * It could be used in application command line.
76  */
77 const char *
78 rte_crypto_cipher_algorithm_strings[] = {
79 	[RTE_CRYPTO_CIPHER_3DES_CBC]	= "3des-cbc",
80 	[RTE_CRYPTO_CIPHER_3DES_ECB]	= "3des-ecb",
81 	[RTE_CRYPTO_CIPHER_3DES_CTR]	= "3des-ctr",
82 
83 	[RTE_CRYPTO_CIPHER_AES_CBC]	= "aes-cbc",
84 	[RTE_CRYPTO_CIPHER_AES_CTR]	= "aes-ctr",
85 	[RTE_CRYPTO_CIPHER_AES_DOCSISBPI]	= "aes-docsisbpi",
86 	[RTE_CRYPTO_CIPHER_AES_ECB]	= "aes-ecb",
87 	[RTE_CRYPTO_CIPHER_AES_F8]	= "aes-f8",
88 	[RTE_CRYPTO_CIPHER_AES_XTS]	= "aes-xts",
89 
90 	[RTE_CRYPTO_CIPHER_ARC4]	= "arc4",
91 
92 	[RTE_CRYPTO_CIPHER_DES_CBC]     = "des-cbc",
93 	[RTE_CRYPTO_CIPHER_DES_DOCSISBPI]	= "des-docsisbpi",
94 
95 	[RTE_CRYPTO_CIPHER_NULL]	= "null",
96 
97 	[RTE_CRYPTO_CIPHER_KASUMI_F8]	= "kasumi-f8",
98 	[RTE_CRYPTO_CIPHER_SNOW3G_UEA2]	= "snow3g-uea2",
99 	[RTE_CRYPTO_CIPHER_ZUC_EEA3]	= "zuc-eea3"
100 };
101 
102 /**
103  * The crypto cipher operation strings identifiers.
104  * It could be used in application command line.
105  */
106 const char *
107 rte_crypto_cipher_operation_strings[] = {
108 		[RTE_CRYPTO_CIPHER_OP_ENCRYPT]	= "encrypt",
109 		[RTE_CRYPTO_CIPHER_OP_DECRYPT]	= "decrypt"
110 };
111 
112 /**
113  * The crypto auth algorithm strings identifiers.
114  * It could be used in application command line.
115  */
116 const char *
117 rte_crypto_auth_algorithm_strings[] = {
118 	[RTE_CRYPTO_AUTH_AES_CBC_MAC]	= "aes-cbc-mac",
119 	[RTE_CRYPTO_AUTH_AES_CMAC]	= "aes-cmac",
120 	[RTE_CRYPTO_AUTH_AES_GMAC]	= "aes-gmac",
121 	[RTE_CRYPTO_AUTH_AES_XCBC_MAC]	= "aes-xcbc-mac",
122 
123 	[RTE_CRYPTO_AUTH_MD5]		= "md5",
124 	[RTE_CRYPTO_AUTH_MD5_HMAC]	= "md5-hmac",
125 
126 	[RTE_CRYPTO_AUTH_NULL]		= "null",
127 
128 	[RTE_CRYPTO_AUTH_SHA1]		= "sha1",
129 	[RTE_CRYPTO_AUTH_SHA1_HMAC]	= "sha1-hmac",
130 
131 	[RTE_CRYPTO_AUTH_SHA224]	= "sha2-224",
132 	[RTE_CRYPTO_AUTH_SHA224_HMAC]	= "sha2-224-hmac",
133 	[RTE_CRYPTO_AUTH_SHA256]	= "sha2-256",
134 	[RTE_CRYPTO_AUTH_SHA256_HMAC]	= "sha2-256-hmac",
135 	[RTE_CRYPTO_AUTH_SHA384]	= "sha2-384",
136 	[RTE_CRYPTO_AUTH_SHA384_HMAC]	= "sha2-384-hmac",
137 	[RTE_CRYPTO_AUTH_SHA512]	= "sha2-512",
138 	[RTE_CRYPTO_AUTH_SHA512_HMAC]	= "sha2-512-hmac",
139 
140 	[RTE_CRYPTO_AUTH_KASUMI_F9]	= "kasumi-f9",
141 	[RTE_CRYPTO_AUTH_SNOW3G_UIA2]	= "snow3g-uia2",
142 	[RTE_CRYPTO_AUTH_ZUC_EIA3]	= "zuc-eia3"
143 };
144 
145 /**
146  * The crypto AEAD algorithm strings identifiers.
147  * It could be used in application command line.
148  */
149 const char *
150 rte_crypto_aead_algorithm_strings[] = {
151 	[RTE_CRYPTO_AEAD_AES_CCM]	= "aes-ccm",
152 	[RTE_CRYPTO_AEAD_AES_GCM]	= "aes-gcm",
153 	[RTE_CRYPTO_AEAD_CHACHA20_POLY1305] = "chacha20-poly1305"
154 };
155 
156 /**
157  * The crypto AEAD operation strings identifiers.
158  * It could be used in application command line.
159  */
160 const char *
161 rte_crypto_aead_operation_strings[] = {
162 	[RTE_CRYPTO_AEAD_OP_ENCRYPT]	= "encrypt",
163 	[RTE_CRYPTO_AEAD_OP_DECRYPT]	= "decrypt"
164 };
165 
166 /**
167  * Asymmetric crypto transform operation strings identifiers.
168  */
169 const char *rte_crypto_asym_xform_strings[] = {
170 	[RTE_CRYPTO_ASYM_XFORM_NONE]	= "none",
171 	[RTE_CRYPTO_ASYM_XFORM_RSA]	= "rsa",
172 	[RTE_CRYPTO_ASYM_XFORM_MODEX]	= "modexp",
173 	[RTE_CRYPTO_ASYM_XFORM_MODINV]	= "modinv",
174 	[RTE_CRYPTO_ASYM_XFORM_DH]	= "dh",
175 	[RTE_CRYPTO_ASYM_XFORM_DSA]	= "dsa",
176 	[RTE_CRYPTO_ASYM_XFORM_ECDSA]	= "ecdsa",
177 	[RTE_CRYPTO_ASYM_XFORM_ECPM]	= "ecpm",
178 };
179 
180 /**
181  * Asymmetric crypto operation strings identifiers.
182  */
183 const char *rte_crypto_asym_op_strings[] = {
184 	[RTE_CRYPTO_ASYM_OP_ENCRYPT]	= "encrypt",
185 	[RTE_CRYPTO_ASYM_OP_DECRYPT]	= "decrypt",
186 	[RTE_CRYPTO_ASYM_OP_SIGN]	= "sign",
187 	[RTE_CRYPTO_ASYM_OP_VERIFY]	= "verify",
188 	[RTE_CRYPTO_ASYM_OP_PRIVATE_KEY_GENERATE]	= "priv_key_generate",
189 	[RTE_CRYPTO_ASYM_OP_PUBLIC_KEY_GENERATE] = "pub_key_generate",
190 	[RTE_CRYPTO_ASYM_OP_SHARED_SECRET_COMPUTE] = "sharedsecret_compute",
191 };
192 
193 /**
194  * The private data structure stored in the session mempool private data.
195  */
196 struct rte_cryptodev_sym_session_pool_private_data {
197 	uint16_t nb_drivers;
198 	/**< number of elements in sess_data array */
199 	uint16_t user_data_sz;
200 	/**< session user data will be placed after sess_data */
201 };
202 
203 int
204 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum,
205 		const char *algo_string)
206 {
207 	unsigned int i;
208 
209 	for (i = 1; i < RTE_DIM(rte_crypto_cipher_algorithm_strings); i++) {
210 		if (strcmp(algo_string, rte_crypto_cipher_algorithm_strings[i]) == 0) {
211 			*algo_enum = (enum rte_crypto_cipher_algorithm) i;
212 			return 0;
213 		}
214 	}
215 
216 	/* Invalid string */
217 	return -1;
218 }
219 
220 int
221 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum,
222 		const char *algo_string)
223 {
224 	unsigned int i;
225 
226 	for (i = 1; i < RTE_DIM(rte_crypto_auth_algorithm_strings); i++) {
227 		if (strcmp(algo_string, rte_crypto_auth_algorithm_strings[i]) == 0) {
228 			*algo_enum = (enum rte_crypto_auth_algorithm) i;
229 			return 0;
230 		}
231 	}
232 
233 	/* Invalid string */
234 	return -1;
235 }
236 
237 int
238 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum,
239 		const char *algo_string)
240 {
241 	unsigned int i;
242 
243 	for (i = 1; i < RTE_DIM(rte_crypto_aead_algorithm_strings); i++) {
244 		if (strcmp(algo_string, rte_crypto_aead_algorithm_strings[i]) == 0) {
245 			*algo_enum = (enum rte_crypto_aead_algorithm) i;
246 			return 0;
247 		}
248 	}
249 
250 	/* Invalid string */
251 	return -1;
252 }
253 
254 int
255 rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum,
256 		const char *xform_string)
257 {
258 	unsigned int i;
259 
260 	for (i = 1; i < RTE_DIM(rte_crypto_asym_xform_strings); i++) {
261 		if (strcmp(xform_string,
262 			rte_crypto_asym_xform_strings[i]) == 0) {
263 			*xform_enum = (enum rte_crypto_asym_xform_type) i;
264 			return 0;
265 		}
266 	}
267 
268 	/* Invalid string */
269 	return -1;
270 }
271 
272 /**
273  * The crypto auth operation strings identifiers.
274  * It could be used in application command line.
275  */
276 const char *
277 rte_crypto_auth_operation_strings[] = {
278 		[RTE_CRYPTO_AUTH_OP_VERIFY]	= "verify",
279 		[RTE_CRYPTO_AUTH_OP_GENERATE]	= "generate"
280 };
281 
282 const struct rte_cryptodev_symmetric_capability *
283 rte_cryptodev_sym_capability_get(uint8_t dev_id,
284 		const struct rte_cryptodev_sym_capability_idx *idx)
285 {
286 	const struct rte_cryptodev_capabilities *capability;
287 	struct rte_cryptodev_info dev_info;
288 	int i = 0;
289 
290 	rte_cryptodev_info_get(dev_id, &dev_info);
291 
292 	while ((capability = &dev_info.capabilities[i++])->op !=
293 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
294 		if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
295 			continue;
296 
297 		if (capability->sym.xform_type != idx->type)
298 			continue;
299 
300 		if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
301 			capability->sym.auth.algo == idx->algo.auth)
302 			return &capability->sym;
303 
304 		if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
305 			capability->sym.cipher.algo == idx->algo.cipher)
306 			return &capability->sym;
307 
308 		if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
309 				capability->sym.aead.algo == idx->algo.aead)
310 			return &capability->sym;
311 	}
312 
313 	return NULL;
314 }
315 
316 static int
317 param_range_check(uint16_t size, const struct rte_crypto_param_range *range)
318 {
319 	unsigned int next_size;
320 
321 	/* Check lower/upper bounds */
322 	if (size < range->min)
323 		return -1;
324 
325 	if (size > range->max)
326 		return -1;
327 
328 	/* If range is actually only one value, size is correct */
329 	if (range->increment == 0)
330 		return 0;
331 
332 	/* Check if value is one of the supported sizes */
333 	for (next_size = range->min; next_size <= range->max;
334 			next_size += range->increment)
335 		if (size == next_size)
336 			return 0;
337 
338 	return -1;
339 }
340 
341 const struct rte_cryptodev_asymmetric_xform_capability *
342 rte_cryptodev_asym_capability_get(uint8_t dev_id,
343 		const struct rte_cryptodev_asym_capability_idx *idx)
344 {
345 	const struct rte_cryptodev_capabilities *capability;
346 	struct rte_cryptodev_info dev_info;
347 	unsigned int i = 0;
348 
349 	memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
350 	rte_cryptodev_info_get(dev_id, &dev_info);
351 
352 	while ((capability = &dev_info.capabilities[i++])->op !=
353 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
354 		if (capability->op != RTE_CRYPTO_OP_TYPE_ASYMMETRIC)
355 			continue;
356 
357 		if (capability->asym.xform_capa.xform_type == idx->type)
358 			return &capability->asym.xform_capa;
359 	}
360 	return NULL;
361 };
362 
363 int
364 rte_cryptodev_sym_capability_check_cipher(
365 		const struct rte_cryptodev_symmetric_capability *capability,
366 		uint16_t key_size, uint16_t iv_size)
367 {
368 	if (param_range_check(key_size, &capability->cipher.key_size) != 0)
369 		return -1;
370 
371 	if (param_range_check(iv_size, &capability->cipher.iv_size) != 0)
372 		return -1;
373 
374 	return 0;
375 }
376 
377 int
378 rte_cryptodev_sym_capability_check_auth(
379 		const struct rte_cryptodev_symmetric_capability *capability,
380 		uint16_t key_size, uint16_t digest_size, uint16_t iv_size)
381 {
382 	if (param_range_check(key_size, &capability->auth.key_size) != 0)
383 		return -1;
384 
385 	if (param_range_check(digest_size, &capability->auth.digest_size) != 0)
386 		return -1;
387 
388 	if (param_range_check(iv_size, &capability->auth.iv_size) != 0)
389 		return -1;
390 
391 	return 0;
392 }
393 
394 int
395 rte_cryptodev_sym_capability_check_aead(
396 		const struct rte_cryptodev_symmetric_capability *capability,
397 		uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
398 		uint16_t iv_size)
399 {
400 	if (param_range_check(key_size, &capability->aead.key_size) != 0)
401 		return -1;
402 
403 	if (param_range_check(digest_size, &capability->aead.digest_size) != 0)
404 		return -1;
405 
406 	if (param_range_check(aad_size, &capability->aead.aad_size) != 0)
407 		return -1;
408 
409 	if (param_range_check(iv_size, &capability->aead.iv_size) != 0)
410 		return -1;
411 
412 	return 0;
413 }
414 int
415 rte_cryptodev_asym_xform_capability_check_optype(
416 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
417 	enum rte_crypto_asym_op_type op_type)
418 {
419 	if (capability->op_types & (1 << op_type))
420 		return 1;
421 
422 	return 0;
423 }
424 
425 int
426 rte_cryptodev_asym_xform_capability_check_modlen(
427 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
428 	uint16_t modlen)
429 {
430 	/* no need to check for limits, if min or max = 0 */
431 	if (capability->modlen.min != 0) {
432 		if (modlen < capability->modlen.min)
433 			return -1;
434 	}
435 
436 	if (capability->modlen.max != 0) {
437 		if (modlen > capability->modlen.max)
438 			return -1;
439 	}
440 
441 	/* in any case, check if given modlen is module increment */
442 	if (capability->modlen.increment != 0) {
443 		if (modlen % (capability->modlen.increment))
444 			return -1;
445 	}
446 
447 	return 0;
448 }
449 
450 /* spinlock for crypto device enq callbacks */
451 static rte_spinlock_t rte_cryptodev_callback_lock = RTE_SPINLOCK_INITIALIZER;
452 
453 static void
454 cryptodev_cb_cleanup(struct rte_cryptodev *dev)
455 {
456 	struct rte_cryptodev_cb_rcu *list;
457 	struct rte_cryptodev_cb *cb, *next;
458 	uint16_t qp_id;
459 
460 	if (dev->enq_cbs == NULL && dev->deq_cbs == NULL)
461 		return;
462 
463 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
464 		list = &dev->enq_cbs[qp_id];
465 		cb = list->next;
466 		while (cb != NULL) {
467 			next = cb->next;
468 			rte_free(cb);
469 			cb = next;
470 		}
471 
472 		rte_free(list->qsbr);
473 	}
474 
475 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
476 		list = &dev->deq_cbs[qp_id];
477 		cb = list->next;
478 		while (cb != NULL) {
479 			next = cb->next;
480 			rte_free(cb);
481 			cb = next;
482 		}
483 
484 		rte_free(list->qsbr);
485 	}
486 
487 	rte_free(dev->enq_cbs);
488 	dev->enq_cbs = NULL;
489 	rte_free(dev->deq_cbs);
490 	dev->deq_cbs = NULL;
491 }
492 
493 static int
494 cryptodev_cb_init(struct rte_cryptodev *dev)
495 {
496 	struct rte_cryptodev_cb_rcu *list;
497 	struct rte_rcu_qsbr *qsbr;
498 	uint16_t qp_id;
499 	size_t size;
500 
501 	/* Max thread set to 1, as one DP thread accessing a queue-pair */
502 	const uint32_t max_threads = 1;
503 
504 	dev->enq_cbs = rte_zmalloc(NULL,
505 				   sizeof(struct rte_cryptodev_cb_rcu) *
506 				   dev->data->nb_queue_pairs, 0);
507 	if (dev->enq_cbs == NULL) {
508 		CDEV_LOG_ERR("Failed to allocate memory for enq callbacks");
509 		return -ENOMEM;
510 	}
511 
512 	dev->deq_cbs = rte_zmalloc(NULL,
513 				   sizeof(struct rte_cryptodev_cb_rcu) *
514 				   dev->data->nb_queue_pairs, 0);
515 	if (dev->deq_cbs == NULL) {
516 		CDEV_LOG_ERR("Failed to allocate memory for deq callbacks");
517 		rte_free(dev->enq_cbs);
518 		return -ENOMEM;
519 	}
520 
521 	/* Create RCU QSBR variable */
522 	size = rte_rcu_qsbr_get_memsize(max_threads);
523 
524 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
525 		list = &dev->enq_cbs[qp_id];
526 		qsbr = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
527 		if (qsbr == NULL) {
528 			CDEV_LOG_ERR("Failed to allocate memory for RCU on "
529 				"queue_pair_id=%d", qp_id);
530 			goto cb_init_err;
531 		}
532 
533 		if (rte_rcu_qsbr_init(qsbr, max_threads)) {
534 			CDEV_LOG_ERR("Failed to initialize for RCU on "
535 				"queue_pair_id=%d", qp_id);
536 			goto cb_init_err;
537 		}
538 
539 		list->qsbr = qsbr;
540 	}
541 
542 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
543 		list = &dev->deq_cbs[qp_id];
544 		qsbr = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
545 		if (qsbr == NULL) {
546 			CDEV_LOG_ERR("Failed to allocate memory for RCU on "
547 				"queue_pair_id=%d", qp_id);
548 			goto cb_init_err;
549 		}
550 
551 		if (rte_rcu_qsbr_init(qsbr, max_threads)) {
552 			CDEV_LOG_ERR("Failed to initialize for RCU on "
553 				"queue_pair_id=%d", qp_id);
554 			goto cb_init_err;
555 		}
556 
557 		list->qsbr = qsbr;
558 	}
559 
560 	return 0;
561 
562 cb_init_err:
563 	cryptodev_cb_cleanup(dev);
564 	return -ENOMEM;
565 }
566 
567 const char *
568 rte_cryptodev_get_feature_name(uint64_t flag)
569 {
570 	switch (flag) {
571 	case RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO:
572 		return "SYMMETRIC_CRYPTO";
573 	case RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO:
574 		return "ASYMMETRIC_CRYPTO";
575 	case RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING:
576 		return "SYM_OPERATION_CHAINING";
577 	case RTE_CRYPTODEV_FF_CPU_SSE:
578 		return "CPU_SSE";
579 	case RTE_CRYPTODEV_FF_CPU_AVX:
580 		return "CPU_AVX";
581 	case RTE_CRYPTODEV_FF_CPU_AVX2:
582 		return "CPU_AVX2";
583 	case RTE_CRYPTODEV_FF_CPU_AVX512:
584 		return "CPU_AVX512";
585 	case RTE_CRYPTODEV_FF_CPU_AESNI:
586 		return "CPU_AESNI";
587 	case RTE_CRYPTODEV_FF_HW_ACCELERATED:
588 		return "HW_ACCELERATED";
589 	case RTE_CRYPTODEV_FF_IN_PLACE_SGL:
590 		return "IN_PLACE_SGL";
591 	case RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT:
592 		return "OOP_SGL_IN_SGL_OUT";
593 	case RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT:
594 		return "OOP_SGL_IN_LB_OUT";
595 	case RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT:
596 		return "OOP_LB_IN_SGL_OUT";
597 	case RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT:
598 		return "OOP_LB_IN_LB_OUT";
599 	case RTE_CRYPTODEV_FF_CPU_NEON:
600 		return "CPU_NEON";
601 	case RTE_CRYPTODEV_FF_CPU_ARM_CE:
602 		return "CPU_ARM_CE";
603 	case RTE_CRYPTODEV_FF_SECURITY:
604 		return "SECURITY_PROTOCOL";
605 	case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP:
606 		return "RSA_PRIV_OP_KEY_EXP";
607 	case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT:
608 		return "RSA_PRIV_OP_KEY_QT";
609 	case RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED:
610 		return "DIGEST_ENCRYPTED";
611 	case RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO:
612 		return "SYM_CPU_CRYPTO";
613 	case RTE_CRYPTODEV_FF_ASYM_SESSIONLESS:
614 		return "ASYM_SESSIONLESS";
615 	case RTE_CRYPTODEV_FF_SYM_SESSIONLESS:
616 		return "SYM_SESSIONLESS";
617 	case RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA:
618 		return "NON_BYTE_ALIGNED_DATA";
619 	case RTE_CRYPTODEV_FF_CIPHER_MULTIPLE_DATA_UNITS:
620 		return "CIPHER_MULTIPLE_DATA_UNITS";
621 	case RTE_CRYPTODEV_FF_CIPHER_WRAPPED_KEY:
622 		return "CIPHER_WRAPPED_KEY";
623 	default:
624 		return NULL;
625 	}
626 }
627 
628 struct rte_cryptodev *
629 rte_cryptodev_pmd_get_dev(uint8_t dev_id)
630 {
631 	return &cryptodev_globals.devs[dev_id];
632 }
633 
634 struct rte_cryptodev *
635 rte_cryptodev_pmd_get_named_dev(const char *name)
636 {
637 	struct rte_cryptodev *dev;
638 	unsigned int i;
639 
640 	if (name == NULL)
641 		return NULL;
642 
643 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
644 		dev = &cryptodev_globals.devs[i];
645 
646 		if ((dev->attached == RTE_CRYPTODEV_ATTACHED) &&
647 				(strcmp(dev->data->name, name) == 0))
648 			return dev;
649 	}
650 
651 	return NULL;
652 }
653 
654 static inline uint8_t
655 rte_cryptodev_is_valid_device_data(uint8_t dev_id)
656 {
657 	if (dev_id >= RTE_CRYPTO_MAX_DEVS ||
658 			rte_crypto_devices[dev_id].data == NULL)
659 		return 0;
660 
661 	return 1;
662 }
663 
664 unsigned int
665 rte_cryptodev_is_valid_dev(uint8_t dev_id)
666 {
667 	struct rte_cryptodev *dev = NULL;
668 
669 	if (!rte_cryptodev_is_valid_device_data(dev_id))
670 		return 0;
671 
672 	dev = rte_cryptodev_pmd_get_dev(dev_id);
673 	if (dev->attached != RTE_CRYPTODEV_ATTACHED)
674 		return 0;
675 	else
676 		return 1;
677 }
678 
679 
680 int
681 rte_cryptodev_get_dev_id(const char *name)
682 {
683 	unsigned i;
684 
685 	if (name == NULL)
686 		return -1;
687 
688 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
689 		if (!rte_cryptodev_is_valid_device_data(i))
690 			continue;
691 		if ((strcmp(cryptodev_globals.devs[i].data->name, name)
692 				== 0) &&
693 				(cryptodev_globals.devs[i].attached ==
694 						RTE_CRYPTODEV_ATTACHED))
695 			return i;
696 	}
697 
698 	return -1;
699 }
700 
701 uint8_t
702 rte_cryptodev_count(void)
703 {
704 	return cryptodev_globals.nb_devs;
705 }
706 
707 uint8_t
708 rte_cryptodev_device_count_by_driver(uint8_t driver_id)
709 {
710 	uint8_t i, dev_count = 0;
711 
712 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++)
713 		if (cryptodev_globals.devs[i].driver_id == driver_id &&
714 			cryptodev_globals.devs[i].attached ==
715 					RTE_CRYPTODEV_ATTACHED)
716 			dev_count++;
717 
718 	return dev_count;
719 }
720 
721 uint8_t
722 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
723 	uint8_t nb_devices)
724 {
725 	uint8_t i, count = 0;
726 	struct rte_cryptodev *devs = cryptodev_globals.devs;
727 
728 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS && count < nb_devices; i++) {
729 		if (!rte_cryptodev_is_valid_device_data(i))
730 			continue;
731 
732 		if (devs[i].attached == RTE_CRYPTODEV_ATTACHED) {
733 			int cmp;
734 
735 			cmp = strncmp(devs[i].device->driver->name,
736 					driver_name,
737 					strlen(driver_name) + 1);
738 
739 			if (cmp == 0)
740 				devices[count++] = devs[i].data->dev_id;
741 		}
742 	}
743 
744 	return count;
745 }
746 
747 void *
748 rte_cryptodev_get_sec_ctx(uint8_t dev_id)
749 {
750 	if (dev_id < RTE_CRYPTO_MAX_DEVS &&
751 			(rte_crypto_devices[dev_id].feature_flags &
752 			RTE_CRYPTODEV_FF_SECURITY))
753 		return rte_crypto_devices[dev_id].security_ctx;
754 
755 	return NULL;
756 }
757 
758 int
759 rte_cryptodev_socket_id(uint8_t dev_id)
760 {
761 	struct rte_cryptodev *dev;
762 
763 	if (!rte_cryptodev_is_valid_dev(dev_id))
764 		return -1;
765 
766 	dev = rte_cryptodev_pmd_get_dev(dev_id);
767 
768 	return dev->data->socket_id;
769 }
770 
771 static inline int
772 rte_cryptodev_data_alloc(uint8_t dev_id, struct rte_cryptodev_data **data,
773 		int socket_id)
774 {
775 	char mz_name[RTE_MEMZONE_NAMESIZE];
776 	const struct rte_memzone *mz;
777 	int n;
778 
779 	/* generate memzone name */
780 	n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
781 	if (n >= (int)sizeof(mz_name))
782 		return -EINVAL;
783 
784 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
785 		mz = rte_memzone_reserve(mz_name,
786 				sizeof(struct rte_cryptodev_data),
787 				socket_id, 0);
788 		CDEV_LOG_DEBUG("PRIMARY:reserved memzone for %s (%p)",
789 				mz_name, mz);
790 	} else {
791 		mz = rte_memzone_lookup(mz_name);
792 		CDEV_LOG_DEBUG("SECONDARY:looked up memzone for %s (%p)",
793 				mz_name, mz);
794 	}
795 
796 	if (mz == NULL)
797 		return -ENOMEM;
798 
799 	*data = mz->addr;
800 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
801 		memset(*data, 0, sizeof(struct rte_cryptodev_data));
802 
803 	return 0;
804 }
805 
806 static inline int
807 rte_cryptodev_data_free(uint8_t dev_id, struct rte_cryptodev_data **data)
808 {
809 	char mz_name[RTE_MEMZONE_NAMESIZE];
810 	const struct rte_memzone *mz;
811 	int n;
812 
813 	/* generate memzone name */
814 	n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
815 	if (n >= (int)sizeof(mz_name))
816 		return -EINVAL;
817 
818 	mz = rte_memzone_lookup(mz_name);
819 	if (mz == NULL)
820 		return -ENOMEM;
821 
822 	RTE_ASSERT(*data == mz->addr);
823 	*data = NULL;
824 
825 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
826 		CDEV_LOG_DEBUG("PRIMARY:free memzone of %s (%p)",
827 				mz_name, mz);
828 		return rte_memzone_free(mz);
829 	} else {
830 		CDEV_LOG_DEBUG("SECONDARY:don't free memzone of %s (%p)",
831 				mz_name, mz);
832 	}
833 
834 	return 0;
835 }
836 
837 static uint8_t
838 rte_cryptodev_find_free_device_index(void)
839 {
840 	uint8_t dev_id;
841 
842 	for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++) {
843 		if (rte_crypto_devices[dev_id].attached ==
844 				RTE_CRYPTODEV_DETACHED)
845 			return dev_id;
846 	}
847 	return RTE_CRYPTO_MAX_DEVS;
848 }
849 
850 struct rte_cryptodev *
851 rte_cryptodev_pmd_allocate(const char *name, int socket_id)
852 {
853 	struct rte_cryptodev *cryptodev;
854 	uint8_t dev_id;
855 
856 	if (rte_cryptodev_pmd_get_named_dev(name) != NULL) {
857 		CDEV_LOG_ERR("Crypto device with name %s already "
858 				"allocated!", name);
859 		return NULL;
860 	}
861 
862 	dev_id = rte_cryptodev_find_free_device_index();
863 	if (dev_id == RTE_CRYPTO_MAX_DEVS) {
864 		CDEV_LOG_ERR("Reached maximum number of crypto devices");
865 		return NULL;
866 	}
867 
868 	cryptodev = rte_cryptodev_pmd_get_dev(dev_id);
869 
870 	if (cryptodev->data == NULL) {
871 		struct rte_cryptodev_data **cryptodev_data =
872 				&cryptodev_globals.data[dev_id];
873 
874 		int retval = rte_cryptodev_data_alloc(dev_id, cryptodev_data,
875 				socket_id);
876 
877 		if (retval < 0 || *cryptodev_data == NULL)
878 			return NULL;
879 
880 		cryptodev->data = *cryptodev_data;
881 
882 		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
883 			strlcpy(cryptodev->data->name, name,
884 				RTE_CRYPTODEV_NAME_MAX_LEN);
885 
886 			cryptodev->data->dev_id = dev_id;
887 			cryptodev->data->socket_id = socket_id;
888 			cryptodev->data->dev_started = 0;
889 			CDEV_LOG_DEBUG("PRIMARY:init data");
890 		}
891 
892 		CDEV_LOG_DEBUG("Data for %s: dev_id %d, socket %d, started %d",
893 				cryptodev->data->name,
894 				cryptodev->data->dev_id,
895 				cryptodev->data->socket_id,
896 				cryptodev->data->dev_started);
897 
898 		/* init user callbacks */
899 		TAILQ_INIT(&(cryptodev->link_intr_cbs));
900 
901 		cryptodev->attached = RTE_CRYPTODEV_ATTACHED;
902 
903 		cryptodev_globals.nb_devs++;
904 	}
905 
906 	return cryptodev;
907 }
908 
909 int
910 rte_cryptodev_pmd_release_device(struct rte_cryptodev *cryptodev)
911 {
912 	int ret;
913 	uint8_t dev_id;
914 
915 	if (cryptodev == NULL)
916 		return -EINVAL;
917 
918 	dev_id = cryptodev->data->dev_id;
919 
920 	/* Close device only if device operations have been set */
921 	if (cryptodev->dev_ops) {
922 		ret = rte_cryptodev_close(dev_id);
923 		if (ret < 0)
924 			return ret;
925 	}
926 
927 	ret = rte_cryptodev_data_free(dev_id, &cryptodev_globals.data[dev_id]);
928 	if (ret < 0)
929 		return ret;
930 
931 	cryptodev->attached = RTE_CRYPTODEV_DETACHED;
932 	cryptodev_globals.nb_devs--;
933 	return 0;
934 }
935 
936 uint16_t
937 rte_cryptodev_queue_pair_count(uint8_t dev_id)
938 {
939 	struct rte_cryptodev *dev;
940 
941 	if (!rte_cryptodev_is_valid_device_data(dev_id)) {
942 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
943 		return 0;
944 	}
945 
946 	dev = &rte_crypto_devices[dev_id];
947 	return dev->data->nb_queue_pairs;
948 }
949 
950 static int
951 rte_cryptodev_queue_pairs_config(struct rte_cryptodev *dev, uint16_t nb_qpairs,
952 		int socket_id)
953 {
954 	struct rte_cryptodev_info dev_info;
955 	void **qp;
956 	unsigned i;
957 
958 	if ((dev == NULL) || (nb_qpairs < 1)) {
959 		CDEV_LOG_ERR("invalid param: dev %p, nb_queues %u",
960 							dev, nb_qpairs);
961 		return -EINVAL;
962 	}
963 
964 	CDEV_LOG_DEBUG("Setup %d queues pairs on device %u",
965 			nb_qpairs, dev->data->dev_id);
966 
967 	memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
968 
969 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
970 	(*dev->dev_ops->dev_infos_get)(dev, &dev_info);
971 
972 	if (nb_qpairs > (dev_info.max_nb_queue_pairs)) {
973 		CDEV_LOG_ERR("Invalid num queue_pairs (%u) for dev %u",
974 				nb_qpairs, dev->data->dev_id);
975 	    return -EINVAL;
976 	}
977 
978 	if (dev->data->queue_pairs == NULL) { /* first time configuration */
979 		dev->data->queue_pairs = rte_zmalloc_socket(
980 				"cryptodev->queue_pairs",
981 				sizeof(dev->data->queue_pairs[0]) * nb_qpairs,
982 				RTE_CACHE_LINE_SIZE, socket_id);
983 
984 		if (dev->data->queue_pairs == NULL) {
985 			dev->data->nb_queue_pairs = 0;
986 			CDEV_LOG_ERR("failed to get memory for qp meta data, "
987 							"nb_queues %u",
988 							nb_qpairs);
989 			return -(ENOMEM);
990 		}
991 	} else { /* re-configure */
992 		int ret;
993 		uint16_t old_nb_queues = dev->data->nb_queue_pairs;
994 
995 		qp = dev->data->queue_pairs;
996 
997 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_release,
998 				-ENOTSUP);
999 
1000 		for (i = nb_qpairs; i < old_nb_queues; i++) {
1001 			ret = (*dev->dev_ops->queue_pair_release)(dev, i);
1002 			if (ret < 0)
1003 				return ret;
1004 		}
1005 
1006 		qp = rte_realloc(qp, sizeof(qp[0]) * nb_qpairs,
1007 				RTE_CACHE_LINE_SIZE);
1008 		if (qp == NULL) {
1009 			CDEV_LOG_ERR("failed to realloc qp meta data,"
1010 						" nb_queues %u", nb_qpairs);
1011 			return -(ENOMEM);
1012 		}
1013 
1014 		if (nb_qpairs > old_nb_queues) {
1015 			uint16_t new_qs = nb_qpairs - old_nb_queues;
1016 
1017 			memset(qp + old_nb_queues, 0,
1018 				sizeof(qp[0]) * new_qs);
1019 		}
1020 
1021 		dev->data->queue_pairs = qp;
1022 
1023 	}
1024 	dev->data->nb_queue_pairs = nb_qpairs;
1025 	return 0;
1026 }
1027 
1028 int
1029 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
1030 {
1031 	struct rte_cryptodev *dev;
1032 	int diag;
1033 
1034 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1035 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1036 		return -EINVAL;
1037 	}
1038 
1039 	dev = &rte_crypto_devices[dev_id];
1040 
1041 	if (dev->data->dev_started) {
1042 		CDEV_LOG_ERR(
1043 		    "device %d must be stopped to allow configuration", dev_id);
1044 		return -EBUSY;
1045 	}
1046 
1047 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
1048 
1049 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1050 	cryptodev_cb_cleanup(dev);
1051 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1052 
1053 	/* Setup new number of queue pairs and reconfigure device. */
1054 	diag = rte_cryptodev_queue_pairs_config(dev, config->nb_queue_pairs,
1055 			config->socket_id);
1056 	if (diag != 0) {
1057 		CDEV_LOG_ERR("dev%d rte_crypto_dev_queue_pairs_config = %d",
1058 				dev_id, diag);
1059 		return diag;
1060 	}
1061 
1062 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1063 	diag = cryptodev_cb_init(dev);
1064 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1065 	if (diag) {
1066 		CDEV_LOG_ERR("Callback init failed for dev_id=%d", dev_id);
1067 		return diag;
1068 	}
1069 
1070 	rte_cryptodev_trace_configure(dev_id, config);
1071 	return (*dev->dev_ops->dev_configure)(dev, config);
1072 }
1073 
1074 int
1075 rte_cryptodev_start(uint8_t dev_id)
1076 {
1077 	struct rte_cryptodev *dev;
1078 	int diag;
1079 
1080 	CDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
1081 
1082 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1083 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1084 		return -EINVAL;
1085 	}
1086 
1087 	dev = &rte_crypto_devices[dev_id];
1088 
1089 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1090 
1091 	if (dev->data->dev_started != 0) {
1092 		CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already started",
1093 			dev_id);
1094 		return 0;
1095 	}
1096 
1097 	diag = (*dev->dev_ops->dev_start)(dev);
1098 	rte_cryptodev_trace_start(dev_id, diag);
1099 	if (diag == 0)
1100 		dev->data->dev_started = 1;
1101 	else
1102 		return diag;
1103 
1104 	return 0;
1105 }
1106 
1107 void
1108 rte_cryptodev_stop(uint8_t dev_id)
1109 {
1110 	struct rte_cryptodev *dev;
1111 
1112 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1113 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1114 		return;
1115 	}
1116 
1117 	dev = &rte_crypto_devices[dev_id];
1118 
1119 	RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1120 
1121 	if (dev->data->dev_started == 0) {
1122 		CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already stopped",
1123 			dev_id);
1124 		return;
1125 	}
1126 
1127 	(*dev->dev_ops->dev_stop)(dev);
1128 	rte_cryptodev_trace_stop(dev_id);
1129 	dev->data->dev_started = 0;
1130 }
1131 
1132 int
1133 rte_cryptodev_close(uint8_t dev_id)
1134 {
1135 	struct rte_cryptodev *dev;
1136 	int retval;
1137 
1138 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1139 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1140 		return -1;
1141 	}
1142 
1143 	dev = &rte_crypto_devices[dev_id];
1144 
1145 	/* Device must be stopped before it can be closed */
1146 	if (dev->data->dev_started == 1) {
1147 		CDEV_LOG_ERR("Device %u must be stopped before closing",
1148 				dev_id);
1149 		return -EBUSY;
1150 	}
1151 
1152 	/* We can't close the device if there are outstanding sessions in use */
1153 	if (dev->data->session_pool != NULL) {
1154 		if (!rte_mempool_full(dev->data->session_pool)) {
1155 			CDEV_LOG_ERR("dev_id=%u close failed, session mempool "
1156 					"has sessions still in use, free "
1157 					"all sessions before calling close",
1158 					(unsigned)dev_id);
1159 			return -EBUSY;
1160 		}
1161 	}
1162 
1163 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
1164 	retval = (*dev->dev_ops->dev_close)(dev);
1165 	rte_cryptodev_trace_close(dev_id, retval);
1166 
1167 	if (retval < 0)
1168 		return retval;
1169 
1170 	return 0;
1171 }
1172 
1173 int
1174 rte_cryptodev_get_qp_status(uint8_t dev_id, uint16_t queue_pair_id)
1175 {
1176 	struct rte_cryptodev *dev;
1177 
1178 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1179 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1180 		return -EINVAL;
1181 	}
1182 
1183 	dev = &rte_crypto_devices[dev_id];
1184 	if (queue_pair_id >= dev->data->nb_queue_pairs) {
1185 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
1186 		return -EINVAL;
1187 	}
1188 	void **qps = dev->data->queue_pairs;
1189 
1190 	if (qps[queue_pair_id])	{
1191 		CDEV_LOG_DEBUG("qp %d on dev %d is initialised",
1192 			queue_pair_id, dev_id);
1193 		return 1;
1194 	}
1195 
1196 	CDEV_LOG_DEBUG("qp %d on dev %d is not initialised",
1197 		queue_pair_id, dev_id);
1198 
1199 	return 0;
1200 }
1201 
1202 int
1203 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
1204 		const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
1205 
1206 {
1207 	struct rte_cryptodev *dev;
1208 
1209 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1210 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1211 		return -EINVAL;
1212 	}
1213 
1214 	dev = &rte_crypto_devices[dev_id];
1215 	if (queue_pair_id >= dev->data->nb_queue_pairs) {
1216 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
1217 		return -EINVAL;
1218 	}
1219 
1220 	if (!qp_conf) {
1221 		CDEV_LOG_ERR("qp_conf cannot be NULL\n");
1222 		return -EINVAL;
1223 	}
1224 
1225 	if ((qp_conf->mp_session && !qp_conf->mp_session_private) ||
1226 			(!qp_conf->mp_session && qp_conf->mp_session_private)) {
1227 		CDEV_LOG_ERR("Invalid mempools\n");
1228 		return -EINVAL;
1229 	}
1230 
1231 	if (qp_conf->mp_session) {
1232 		struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1233 		uint32_t obj_size = qp_conf->mp_session->elt_size;
1234 		uint32_t obj_priv_size = qp_conf->mp_session_private->elt_size;
1235 		struct rte_cryptodev_sym_session s = {0};
1236 
1237 		pool_priv = rte_mempool_get_priv(qp_conf->mp_session);
1238 		if (!pool_priv || qp_conf->mp_session->private_data_size <
1239 				sizeof(*pool_priv)) {
1240 			CDEV_LOG_ERR("Invalid mempool\n");
1241 			return -EINVAL;
1242 		}
1243 
1244 		s.nb_drivers = pool_priv->nb_drivers;
1245 		s.user_data_sz = pool_priv->user_data_sz;
1246 
1247 		if ((rte_cryptodev_sym_get_existing_header_session_size(&s) >
1248 			obj_size) || (s.nb_drivers <= dev->driver_id) ||
1249 			rte_cryptodev_sym_get_private_session_size(dev_id) >
1250 				obj_priv_size) {
1251 			CDEV_LOG_ERR("Invalid mempool\n");
1252 			return -EINVAL;
1253 		}
1254 	}
1255 
1256 	if (dev->data->dev_started) {
1257 		CDEV_LOG_ERR(
1258 		    "device %d must be stopped to allow configuration", dev_id);
1259 		return -EBUSY;
1260 	}
1261 
1262 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_setup, -ENOTSUP);
1263 
1264 	rte_cryptodev_trace_queue_pair_setup(dev_id, queue_pair_id, qp_conf);
1265 	return (*dev->dev_ops->queue_pair_setup)(dev, queue_pair_id, qp_conf,
1266 			socket_id);
1267 }
1268 
1269 struct rte_cryptodev_cb *
1270 rte_cryptodev_add_enq_callback(uint8_t dev_id,
1271 			       uint16_t qp_id,
1272 			       rte_cryptodev_callback_fn cb_fn,
1273 			       void *cb_arg)
1274 {
1275 	struct rte_cryptodev *dev;
1276 	struct rte_cryptodev_cb_rcu *list;
1277 	struct rte_cryptodev_cb *cb, *tail;
1278 
1279 	if (!cb_fn) {
1280 		CDEV_LOG_ERR("Callback is NULL on dev_id=%d", dev_id);
1281 		rte_errno = EINVAL;
1282 		return NULL;
1283 	}
1284 
1285 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1286 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1287 		rte_errno = ENODEV;
1288 		return NULL;
1289 	}
1290 
1291 	dev = &rte_crypto_devices[dev_id];
1292 	if (qp_id >= dev->data->nb_queue_pairs) {
1293 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1294 		rte_errno = ENODEV;
1295 		return NULL;
1296 	}
1297 
1298 	cb = rte_zmalloc(NULL, sizeof(*cb), 0);
1299 	if (cb == NULL) {
1300 		CDEV_LOG_ERR("Failed to allocate memory for callback on "
1301 			     "dev=%d, queue_pair_id=%d", dev_id, qp_id);
1302 		rte_errno = ENOMEM;
1303 		return NULL;
1304 	}
1305 
1306 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1307 
1308 	cb->fn = cb_fn;
1309 	cb->arg = cb_arg;
1310 
1311 	/* Add the callbacks in fifo order. */
1312 	list = &dev->enq_cbs[qp_id];
1313 	tail = list->next;
1314 
1315 	if (tail) {
1316 		while (tail->next)
1317 			tail = tail->next;
1318 		/* Stores to cb->fn and cb->param should complete before
1319 		 * cb is visible to data plane.
1320 		 */
1321 		__atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
1322 	} else {
1323 		/* Stores to cb->fn and cb->param should complete before
1324 		 * cb is visible to data plane.
1325 		 */
1326 		__atomic_store_n(&list->next, cb, __ATOMIC_RELEASE);
1327 	}
1328 
1329 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1330 
1331 	return cb;
1332 }
1333 
1334 int
1335 rte_cryptodev_remove_enq_callback(uint8_t dev_id,
1336 				  uint16_t qp_id,
1337 				  struct rte_cryptodev_cb *cb)
1338 {
1339 	struct rte_cryptodev *dev;
1340 	struct rte_cryptodev_cb **prev_cb, *curr_cb;
1341 	struct rte_cryptodev_cb_rcu *list;
1342 	int ret;
1343 
1344 	ret = -EINVAL;
1345 
1346 	if (!cb) {
1347 		CDEV_LOG_ERR("Callback is NULL");
1348 		return -EINVAL;
1349 	}
1350 
1351 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1352 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1353 		return -ENODEV;
1354 	}
1355 
1356 	dev = &rte_crypto_devices[dev_id];
1357 	if (qp_id >= dev->data->nb_queue_pairs) {
1358 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1359 		return -ENODEV;
1360 	}
1361 
1362 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1363 	if (dev->enq_cbs == NULL) {
1364 		CDEV_LOG_ERR("Callback not initialized");
1365 		goto cb_err;
1366 	}
1367 
1368 	list = &dev->enq_cbs[qp_id];
1369 	if (list == NULL) {
1370 		CDEV_LOG_ERR("Callback list is NULL");
1371 		goto cb_err;
1372 	}
1373 
1374 	if (list->qsbr == NULL) {
1375 		CDEV_LOG_ERR("Rcu qsbr is NULL");
1376 		goto cb_err;
1377 	}
1378 
1379 	prev_cb = &list->next;
1380 	for (; *prev_cb != NULL; prev_cb = &curr_cb->next) {
1381 		curr_cb = *prev_cb;
1382 		if (curr_cb == cb) {
1383 			/* Remove the user cb from the callback list. */
1384 			__atomic_store_n(prev_cb, curr_cb->next,
1385 				__ATOMIC_RELAXED);
1386 			ret = 0;
1387 			break;
1388 		}
1389 	}
1390 
1391 	if (!ret) {
1392 		/* Call sync with invalid thread id as this is part of
1393 		 * control plane API
1394 		 */
1395 		rte_rcu_qsbr_synchronize(list->qsbr, RTE_QSBR_THRID_INVALID);
1396 		rte_free(cb);
1397 	}
1398 
1399 cb_err:
1400 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1401 	return ret;
1402 }
1403 
1404 struct rte_cryptodev_cb *
1405 rte_cryptodev_add_deq_callback(uint8_t dev_id,
1406 			       uint16_t qp_id,
1407 			       rte_cryptodev_callback_fn cb_fn,
1408 			       void *cb_arg)
1409 {
1410 	struct rte_cryptodev *dev;
1411 	struct rte_cryptodev_cb_rcu *list;
1412 	struct rte_cryptodev_cb *cb, *tail;
1413 
1414 	if (!cb_fn) {
1415 		CDEV_LOG_ERR("Callback is NULL on dev_id=%d", dev_id);
1416 		rte_errno = EINVAL;
1417 		return NULL;
1418 	}
1419 
1420 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1421 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1422 		rte_errno = ENODEV;
1423 		return NULL;
1424 	}
1425 
1426 	dev = &rte_crypto_devices[dev_id];
1427 	if (qp_id >= dev->data->nb_queue_pairs) {
1428 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1429 		rte_errno = ENODEV;
1430 		return NULL;
1431 	}
1432 
1433 	cb = rte_zmalloc(NULL, sizeof(*cb), 0);
1434 	if (cb == NULL) {
1435 		CDEV_LOG_ERR("Failed to allocate memory for callback on "
1436 			     "dev=%d, queue_pair_id=%d", dev_id, qp_id);
1437 		rte_errno = ENOMEM;
1438 		return NULL;
1439 	}
1440 
1441 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1442 
1443 	cb->fn = cb_fn;
1444 	cb->arg = cb_arg;
1445 
1446 	/* Add the callbacks in fifo order. */
1447 	list = &dev->deq_cbs[qp_id];
1448 	tail = list->next;
1449 
1450 	if (tail) {
1451 		while (tail->next)
1452 			tail = tail->next;
1453 		/* Stores to cb->fn and cb->param should complete before
1454 		 * cb is visible to data plane.
1455 		 */
1456 		__atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
1457 	} else {
1458 		/* Stores to cb->fn and cb->param should complete before
1459 		 * cb is visible to data plane.
1460 		 */
1461 		__atomic_store_n(&list->next, cb, __ATOMIC_RELEASE);
1462 	}
1463 
1464 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1465 
1466 	return cb;
1467 }
1468 
1469 int
1470 rte_cryptodev_remove_deq_callback(uint8_t dev_id,
1471 				  uint16_t qp_id,
1472 				  struct rte_cryptodev_cb *cb)
1473 {
1474 	struct rte_cryptodev *dev;
1475 	struct rte_cryptodev_cb **prev_cb, *curr_cb;
1476 	struct rte_cryptodev_cb_rcu *list;
1477 	int ret;
1478 
1479 	ret = -EINVAL;
1480 
1481 	if (!cb) {
1482 		CDEV_LOG_ERR("Callback is NULL");
1483 		return -EINVAL;
1484 	}
1485 
1486 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1487 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1488 		return -ENODEV;
1489 	}
1490 
1491 	dev = &rte_crypto_devices[dev_id];
1492 	if (qp_id >= dev->data->nb_queue_pairs) {
1493 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1494 		return -ENODEV;
1495 	}
1496 
1497 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1498 	if (dev->enq_cbs == NULL) {
1499 		CDEV_LOG_ERR("Callback not initialized");
1500 		goto cb_err;
1501 	}
1502 
1503 	list = &dev->deq_cbs[qp_id];
1504 	if (list == NULL) {
1505 		CDEV_LOG_ERR("Callback list is NULL");
1506 		goto cb_err;
1507 	}
1508 
1509 	if (list->qsbr == NULL) {
1510 		CDEV_LOG_ERR("Rcu qsbr is NULL");
1511 		goto cb_err;
1512 	}
1513 
1514 	prev_cb = &list->next;
1515 	for (; *prev_cb != NULL; prev_cb = &curr_cb->next) {
1516 		curr_cb = *prev_cb;
1517 		if (curr_cb == cb) {
1518 			/* Remove the user cb from the callback list. */
1519 			__atomic_store_n(prev_cb, curr_cb->next,
1520 				__ATOMIC_RELAXED);
1521 			ret = 0;
1522 			break;
1523 		}
1524 	}
1525 
1526 	if (!ret) {
1527 		/* Call sync with invalid thread id as this is part of
1528 		 * control plane API
1529 		 */
1530 		rte_rcu_qsbr_synchronize(list->qsbr, RTE_QSBR_THRID_INVALID);
1531 		rte_free(cb);
1532 	}
1533 
1534 cb_err:
1535 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1536 	return ret;
1537 }
1538 
1539 int
1540 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats)
1541 {
1542 	struct rte_cryptodev *dev;
1543 
1544 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1545 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1546 		return -ENODEV;
1547 	}
1548 
1549 	if (stats == NULL) {
1550 		CDEV_LOG_ERR("Invalid stats ptr");
1551 		return -EINVAL;
1552 	}
1553 
1554 	dev = &rte_crypto_devices[dev_id];
1555 	memset(stats, 0, sizeof(*stats));
1556 
1557 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1558 	(*dev->dev_ops->stats_get)(dev, stats);
1559 	return 0;
1560 }
1561 
1562 void
1563 rte_cryptodev_stats_reset(uint8_t dev_id)
1564 {
1565 	struct rte_cryptodev *dev;
1566 
1567 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1568 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1569 		return;
1570 	}
1571 
1572 	dev = &rte_crypto_devices[dev_id];
1573 
1574 	RTE_FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
1575 	(*dev->dev_ops->stats_reset)(dev);
1576 }
1577 
1578 void
1579 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
1580 {
1581 	struct rte_cryptodev *dev;
1582 
1583 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1584 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1585 		return;
1586 	}
1587 
1588 	dev = &rte_crypto_devices[dev_id];
1589 
1590 	memset(dev_info, 0, sizeof(struct rte_cryptodev_info));
1591 
1592 	RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1593 	(*dev->dev_ops->dev_infos_get)(dev, dev_info);
1594 
1595 	dev_info->driver_name = dev->device->driver->name;
1596 	dev_info->device = dev->device;
1597 }
1598 
1599 int
1600 rte_cryptodev_callback_register(uint8_t dev_id,
1601 			enum rte_cryptodev_event_type event,
1602 			rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1603 {
1604 	struct rte_cryptodev *dev;
1605 	struct rte_cryptodev_callback *user_cb;
1606 
1607 	if (!cb_fn)
1608 		return -EINVAL;
1609 
1610 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1611 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1612 		return -EINVAL;
1613 	}
1614 
1615 	dev = &rte_crypto_devices[dev_id];
1616 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
1617 
1618 	TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
1619 		if (user_cb->cb_fn == cb_fn &&
1620 			user_cb->cb_arg == cb_arg &&
1621 			user_cb->event == event) {
1622 			break;
1623 		}
1624 	}
1625 
1626 	/* create a new callback. */
1627 	if (user_cb == NULL) {
1628 		user_cb = rte_zmalloc("INTR_USER_CALLBACK",
1629 				sizeof(struct rte_cryptodev_callback), 0);
1630 		if (user_cb != NULL) {
1631 			user_cb->cb_fn = cb_fn;
1632 			user_cb->cb_arg = cb_arg;
1633 			user_cb->event = event;
1634 			TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
1635 		}
1636 	}
1637 
1638 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1639 	return (user_cb == NULL) ? -ENOMEM : 0;
1640 }
1641 
1642 int
1643 rte_cryptodev_callback_unregister(uint8_t dev_id,
1644 			enum rte_cryptodev_event_type event,
1645 			rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1646 {
1647 	int ret;
1648 	struct rte_cryptodev *dev;
1649 	struct rte_cryptodev_callback *cb, *next;
1650 
1651 	if (!cb_fn)
1652 		return -EINVAL;
1653 
1654 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1655 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1656 		return -EINVAL;
1657 	}
1658 
1659 	dev = &rte_crypto_devices[dev_id];
1660 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
1661 
1662 	ret = 0;
1663 	for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
1664 
1665 		next = TAILQ_NEXT(cb, next);
1666 
1667 		if (cb->cb_fn != cb_fn || cb->event != event ||
1668 				(cb->cb_arg != (void *)-1 &&
1669 				cb->cb_arg != cb_arg))
1670 			continue;
1671 
1672 		/*
1673 		 * if this callback is not executing right now,
1674 		 * then remove it.
1675 		 */
1676 		if (cb->active == 0) {
1677 			TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
1678 			rte_free(cb);
1679 		} else {
1680 			ret = -EAGAIN;
1681 		}
1682 	}
1683 
1684 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1685 	return ret;
1686 }
1687 
1688 void
1689 rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev,
1690 	enum rte_cryptodev_event_type event)
1691 {
1692 	struct rte_cryptodev_callback *cb_lst;
1693 	struct rte_cryptodev_callback dev_cb;
1694 
1695 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
1696 	TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
1697 		if (cb_lst->cb_fn == NULL || cb_lst->event != event)
1698 			continue;
1699 		dev_cb = *cb_lst;
1700 		cb_lst->active = 1;
1701 		rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1702 		dev_cb.cb_fn(dev->data->dev_id, dev_cb.event,
1703 						dev_cb.cb_arg);
1704 		rte_spinlock_lock(&rte_cryptodev_cb_lock);
1705 		cb_lst->active = 0;
1706 	}
1707 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1708 }
1709 
1710 int
1711 rte_cryptodev_sym_session_init(uint8_t dev_id,
1712 		struct rte_cryptodev_sym_session *sess,
1713 		struct rte_crypto_sym_xform *xforms,
1714 		struct rte_mempool *mp)
1715 {
1716 	struct rte_cryptodev *dev;
1717 	uint32_t sess_priv_sz = rte_cryptodev_sym_get_private_session_size(
1718 			dev_id);
1719 	uint8_t index;
1720 	int ret;
1721 
1722 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1723 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1724 		return -EINVAL;
1725 	}
1726 
1727 	dev = rte_cryptodev_pmd_get_dev(dev_id);
1728 
1729 	if (sess == NULL || xforms == NULL || dev == NULL || mp == NULL)
1730 		return -EINVAL;
1731 
1732 	if (mp->elt_size < sess_priv_sz)
1733 		return -EINVAL;
1734 
1735 	index = dev->driver_id;
1736 	if (index >= sess->nb_drivers)
1737 		return -EINVAL;
1738 
1739 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_configure, -ENOTSUP);
1740 
1741 	if (sess->sess_data[index].refcnt == 0) {
1742 		ret = dev->dev_ops->sym_session_configure(dev, xforms,
1743 							sess, mp);
1744 		if (ret < 0) {
1745 			CDEV_LOG_ERR(
1746 				"dev_id %d failed to configure session details",
1747 				dev_id);
1748 			return ret;
1749 		}
1750 	}
1751 
1752 	rte_cryptodev_trace_sym_session_init(dev_id, sess, xforms, mp);
1753 	sess->sess_data[index].refcnt++;
1754 	return 0;
1755 }
1756 
1757 int
1758 rte_cryptodev_asym_session_init(uint8_t dev_id,
1759 		struct rte_cryptodev_asym_session *sess,
1760 		struct rte_crypto_asym_xform *xforms,
1761 		struct rte_mempool *mp)
1762 {
1763 	struct rte_cryptodev *dev;
1764 	uint8_t index;
1765 	int ret;
1766 
1767 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1768 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1769 		return -EINVAL;
1770 	}
1771 
1772 	dev = rte_cryptodev_pmd_get_dev(dev_id);
1773 
1774 	if (sess == NULL || xforms == NULL || dev == NULL)
1775 		return -EINVAL;
1776 
1777 	index = dev->driver_id;
1778 
1779 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_configure,
1780 				-ENOTSUP);
1781 
1782 	if (sess->sess_private_data[index] == NULL) {
1783 		ret = dev->dev_ops->asym_session_configure(dev,
1784 							xforms,
1785 							sess, mp);
1786 		if (ret < 0) {
1787 			CDEV_LOG_ERR(
1788 				"dev_id %d failed to configure session details",
1789 				dev_id);
1790 			return ret;
1791 		}
1792 	}
1793 
1794 	rte_cryptodev_trace_asym_session_init(dev_id, sess, xforms, mp);
1795 	return 0;
1796 }
1797 
1798 struct rte_mempool *
1799 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
1800 	uint32_t elt_size, uint32_t cache_size, uint16_t user_data_size,
1801 	int socket_id)
1802 {
1803 	struct rte_mempool *mp;
1804 	struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1805 	uint32_t obj_sz;
1806 
1807 	obj_sz = rte_cryptodev_sym_get_header_session_size() + user_data_size;
1808 	if (obj_sz > elt_size)
1809 		CDEV_LOG_INFO("elt_size %u is expanded to %u\n", elt_size,
1810 				obj_sz);
1811 	else
1812 		obj_sz = elt_size;
1813 
1814 	mp = rte_mempool_create(name, nb_elts, obj_sz, cache_size,
1815 			(uint32_t)(sizeof(*pool_priv)),
1816 			NULL, NULL, NULL, NULL,
1817 			socket_id, 0);
1818 	if (mp == NULL) {
1819 		CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d\n",
1820 			__func__, name, rte_errno);
1821 		return NULL;
1822 	}
1823 
1824 	pool_priv = rte_mempool_get_priv(mp);
1825 	if (!pool_priv) {
1826 		CDEV_LOG_ERR("%s(name=%s) failed to get private data\n",
1827 			__func__, name);
1828 		rte_mempool_free(mp);
1829 		return NULL;
1830 	}
1831 
1832 	pool_priv->nb_drivers = nb_drivers;
1833 	pool_priv->user_data_sz = user_data_size;
1834 
1835 	rte_cryptodev_trace_sym_session_pool_create(name, nb_elts,
1836 		elt_size, cache_size, user_data_size, mp);
1837 	return mp;
1838 }
1839 
1840 static unsigned int
1841 rte_cryptodev_sym_session_data_size(struct rte_cryptodev_sym_session *sess)
1842 {
1843 	return (sizeof(sess->sess_data[0]) * sess->nb_drivers) +
1844 			sess->user_data_sz;
1845 }
1846 
1847 static uint8_t
1848 rte_cryptodev_sym_is_valid_session_pool(struct rte_mempool *mp)
1849 {
1850 	struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1851 
1852 	if (!mp)
1853 		return 0;
1854 
1855 	pool_priv = rte_mempool_get_priv(mp);
1856 
1857 	if (!pool_priv || mp->private_data_size < sizeof(*pool_priv) ||
1858 			pool_priv->nb_drivers != nb_drivers ||
1859 			mp->elt_size <
1860 				rte_cryptodev_sym_get_header_session_size()
1861 				+ pool_priv->user_data_sz)
1862 		return 0;
1863 
1864 	return 1;
1865 }
1866 
1867 struct rte_cryptodev_sym_session *
1868 rte_cryptodev_sym_session_create(struct rte_mempool *mp)
1869 {
1870 	struct rte_cryptodev_sym_session *sess;
1871 	struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1872 
1873 	if (!rte_cryptodev_sym_is_valid_session_pool(mp)) {
1874 		CDEV_LOG_ERR("Invalid mempool\n");
1875 		return NULL;
1876 	}
1877 
1878 	pool_priv = rte_mempool_get_priv(mp);
1879 
1880 	/* Allocate a session structure from the session pool */
1881 	if (rte_mempool_get(mp, (void **)&sess)) {
1882 		CDEV_LOG_ERR("couldn't get object from session mempool");
1883 		return NULL;
1884 	}
1885 
1886 	sess->nb_drivers = pool_priv->nb_drivers;
1887 	sess->user_data_sz = pool_priv->user_data_sz;
1888 	sess->opaque_data = 0;
1889 
1890 	/* Clear device session pointer.
1891 	 * Include the flag indicating presence of user data
1892 	 */
1893 	memset(sess->sess_data, 0,
1894 			rte_cryptodev_sym_session_data_size(sess));
1895 
1896 	rte_cryptodev_trace_sym_session_create(mp, sess);
1897 	return sess;
1898 }
1899 
1900 struct rte_cryptodev_asym_session *
1901 rte_cryptodev_asym_session_create(struct rte_mempool *mp)
1902 {
1903 	struct rte_cryptodev_asym_session *sess;
1904 	unsigned int session_size =
1905 			rte_cryptodev_asym_get_header_session_size();
1906 
1907 	if (!mp) {
1908 		CDEV_LOG_ERR("invalid mempool\n");
1909 		return NULL;
1910 	}
1911 
1912 	/* Verify if provided mempool can hold elements big enough. */
1913 	if (mp->elt_size < session_size) {
1914 		CDEV_LOG_ERR(
1915 			"mempool elements too small to hold session objects");
1916 		return NULL;
1917 	}
1918 
1919 	/* Allocate a session structure from the session pool */
1920 	if (rte_mempool_get(mp, (void **)&sess)) {
1921 		CDEV_LOG_ERR("couldn't get object from session mempool");
1922 		return NULL;
1923 	}
1924 
1925 	/* Clear device session pointer.
1926 	 * Include the flag indicating presence of private data
1927 	 */
1928 	memset(sess, 0, session_size);
1929 
1930 	rte_cryptodev_trace_asym_session_create(mp, sess);
1931 	return sess;
1932 }
1933 
1934 int
1935 rte_cryptodev_sym_session_clear(uint8_t dev_id,
1936 		struct rte_cryptodev_sym_session *sess)
1937 {
1938 	struct rte_cryptodev *dev;
1939 	uint8_t driver_id;
1940 
1941 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1942 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1943 		return -EINVAL;
1944 	}
1945 
1946 	dev = rte_cryptodev_pmd_get_dev(dev_id);
1947 
1948 	if (dev == NULL || sess == NULL)
1949 		return -EINVAL;
1950 
1951 	driver_id = dev->driver_id;
1952 	if (sess->sess_data[driver_id].refcnt == 0)
1953 		return 0;
1954 	if (--sess->sess_data[driver_id].refcnt != 0)
1955 		return -EBUSY;
1956 
1957 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_clear, -ENOTSUP);
1958 
1959 	dev->dev_ops->sym_session_clear(dev, sess);
1960 
1961 	rte_cryptodev_trace_sym_session_clear(dev_id, sess);
1962 	return 0;
1963 }
1964 
1965 int
1966 rte_cryptodev_asym_session_clear(uint8_t dev_id,
1967 		struct rte_cryptodev_asym_session *sess)
1968 {
1969 	struct rte_cryptodev *dev;
1970 
1971 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
1972 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1973 		return -EINVAL;
1974 	}
1975 
1976 	dev = rte_cryptodev_pmd_get_dev(dev_id);
1977 
1978 	if (dev == NULL || sess == NULL)
1979 		return -EINVAL;
1980 
1981 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_clear, -ENOTSUP);
1982 
1983 	dev->dev_ops->asym_session_clear(dev, sess);
1984 
1985 	rte_cryptodev_trace_sym_session_clear(dev_id, sess);
1986 	return 0;
1987 }
1988 
1989 int
1990 rte_cryptodev_sym_session_free(struct rte_cryptodev_sym_session *sess)
1991 {
1992 	uint8_t i;
1993 	struct rte_mempool *sess_mp;
1994 
1995 	if (sess == NULL)
1996 		return -EINVAL;
1997 
1998 	/* Check that all device private data has been freed */
1999 	for (i = 0; i < sess->nb_drivers; i++) {
2000 		if (sess->sess_data[i].refcnt != 0)
2001 			return -EBUSY;
2002 	}
2003 
2004 	/* Return session to mempool */
2005 	sess_mp = rte_mempool_from_obj(sess);
2006 	rte_mempool_put(sess_mp, sess);
2007 
2008 	rte_cryptodev_trace_sym_session_free(sess);
2009 	return 0;
2010 }
2011 
2012 int
2013 rte_cryptodev_asym_session_free(struct rte_cryptodev_asym_session *sess)
2014 {
2015 	uint8_t i;
2016 	void *sess_priv;
2017 	struct rte_mempool *sess_mp;
2018 
2019 	if (sess == NULL)
2020 		return -EINVAL;
2021 
2022 	/* Check that all device private data has been freed */
2023 	for (i = 0; i < nb_drivers; i++) {
2024 		sess_priv = get_asym_session_private_data(sess, i);
2025 		if (sess_priv != NULL)
2026 			return -EBUSY;
2027 	}
2028 
2029 	/* Return session to mempool */
2030 	sess_mp = rte_mempool_from_obj(sess);
2031 	rte_mempool_put(sess_mp, sess);
2032 
2033 	rte_cryptodev_trace_asym_session_free(sess);
2034 	return 0;
2035 }
2036 
2037 unsigned int
2038 rte_cryptodev_sym_get_header_session_size(void)
2039 {
2040 	/*
2041 	 * Header contains pointers to the private data of all registered
2042 	 * drivers and all necessary information to ensure safely clear
2043 	 * or free al session.
2044 	 */
2045 	struct rte_cryptodev_sym_session s = {0};
2046 
2047 	s.nb_drivers = nb_drivers;
2048 
2049 	return (unsigned int)(sizeof(s) +
2050 			rte_cryptodev_sym_session_data_size(&s));
2051 }
2052 
2053 unsigned int
2054 rte_cryptodev_sym_get_existing_header_session_size(
2055 		struct rte_cryptodev_sym_session *sess)
2056 {
2057 	if (!sess)
2058 		return 0;
2059 	else
2060 		return (unsigned int)(sizeof(*sess) +
2061 				rte_cryptodev_sym_session_data_size(sess));
2062 }
2063 
2064 unsigned int
2065 rte_cryptodev_asym_get_header_session_size(void)
2066 {
2067 	/*
2068 	 * Header contains pointers to the private data
2069 	 * of all registered drivers, and a flag which
2070 	 * indicates presence of private data
2071 	 */
2072 	return ((sizeof(void *) * nb_drivers) + sizeof(uint8_t));
2073 }
2074 
2075 unsigned int
2076 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
2077 {
2078 	struct rte_cryptodev *dev;
2079 	unsigned int priv_sess_size;
2080 
2081 	if (!rte_cryptodev_is_valid_dev(dev_id))
2082 		return 0;
2083 
2084 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2085 
2086 	if (*dev->dev_ops->sym_session_get_size == NULL)
2087 		return 0;
2088 
2089 	priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev);
2090 
2091 	return priv_sess_size;
2092 }
2093 
2094 unsigned int
2095 rte_cryptodev_asym_get_private_session_size(uint8_t dev_id)
2096 {
2097 	struct rte_cryptodev *dev;
2098 	unsigned int header_size = sizeof(void *) * nb_drivers;
2099 	unsigned int priv_sess_size;
2100 
2101 	if (!rte_cryptodev_is_valid_dev(dev_id))
2102 		return 0;
2103 
2104 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2105 
2106 	if (*dev->dev_ops->asym_session_get_size == NULL)
2107 		return 0;
2108 
2109 	priv_sess_size = (*dev->dev_ops->asym_session_get_size)(dev);
2110 	if (priv_sess_size < header_size)
2111 		return header_size;
2112 
2113 	return priv_sess_size;
2114 
2115 }
2116 
2117 int
2118 rte_cryptodev_sym_session_set_user_data(
2119 					struct rte_cryptodev_sym_session *sess,
2120 					void *data,
2121 					uint16_t size)
2122 {
2123 	if (sess == NULL)
2124 		return -EINVAL;
2125 
2126 	if (sess->user_data_sz < size)
2127 		return -ENOMEM;
2128 
2129 	rte_memcpy(sess->sess_data + sess->nb_drivers, data, size);
2130 	return 0;
2131 }
2132 
2133 void *
2134 rte_cryptodev_sym_session_get_user_data(
2135 					struct rte_cryptodev_sym_session *sess)
2136 {
2137 	if (sess == NULL || sess->user_data_sz == 0)
2138 		return NULL;
2139 
2140 	return (void *)(sess->sess_data + sess->nb_drivers);
2141 }
2142 
2143 static inline void
2144 sym_crypto_fill_status(struct rte_crypto_sym_vec *vec, int32_t errnum)
2145 {
2146 	uint32_t i;
2147 	for (i = 0; i < vec->num; i++)
2148 		vec->status[i] = errnum;
2149 }
2150 
2151 uint32_t
2152 rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id,
2153 	struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs ofs,
2154 	struct rte_crypto_sym_vec *vec)
2155 {
2156 	struct rte_cryptodev *dev;
2157 
2158 	if (!rte_cryptodev_is_valid_dev(dev_id)) {
2159 		sym_crypto_fill_status(vec, EINVAL);
2160 		return 0;
2161 	}
2162 
2163 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2164 
2165 	if (*dev->dev_ops->sym_cpu_process == NULL ||
2166 		!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO)) {
2167 		sym_crypto_fill_status(vec, ENOTSUP);
2168 		return 0;
2169 	}
2170 
2171 	return dev->dev_ops->sym_cpu_process(dev, sess, ofs, vec);
2172 }
2173 
2174 int
2175 rte_cryptodev_get_raw_dp_ctx_size(uint8_t dev_id)
2176 {
2177 	struct rte_cryptodev *dev;
2178 	int32_t size = sizeof(struct rte_crypto_raw_dp_ctx);
2179 	int32_t priv_size;
2180 
2181 	if (!rte_cryptodev_is_valid_dev(dev_id))
2182 		return -EINVAL;
2183 
2184 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2185 
2186 	if (*dev->dev_ops->sym_get_raw_dp_ctx_size == NULL ||
2187 		!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP)) {
2188 		return -ENOTSUP;
2189 	}
2190 
2191 	priv_size = (*dev->dev_ops->sym_get_raw_dp_ctx_size)(dev);
2192 	if (priv_size < 0)
2193 		return -ENOTSUP;
2194 
2195 	return RTE_ALIGN_CEIL((size + priv_size), 8);
2196 }
2197 
2198 int
2199 rte_cryptodev_configure_raw_dp_ctx(uint8_t dev_id, uint16_t qp_id,
2200 	struct rte_crypto_raw_dp_ctx *ctx,
2201 	enum rte_crypto_op_sess_type sess_type,
2202 	union rte_cryptodev_session_ctx session_ctx,
2203 	uint8_t is_update)
2204 {
2205 	struct rte_cryptodev *dev;
2206 
2207 	if (!rte_cryptodev_get_qp_status(dev_id, qp_id))
2208 		return -EINVAL;
2209 
2210 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2211 	if (!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP)
2212 			|| dev->dev_ops->sym_configure_raw_dp_ctx == NULL)
2213 		return -ENOTSUP;
2214 
2215 	return (*dev->dev_ops->sym_configure_raw_dp_ctx)(dev, qp_id, ctx,
2216 			sess_type, session_ctx, is_update);
2217 }
2218 
2219 uint32_t
2220 rte_cryptodev_raw_enqueue_burst(struct rte_crypto_raw_dp_ctx *ctx,
2221 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
2222 	void **user_data, int *enqueue_status)
2223 {
2224 	return (*ctx->enqueue_burst)(ctx->qp_data, ctx->drv_ctx_data, vec,
2225 			ofs, user_data, enqueue_status);
2226 }
2227 
2228 int
2229 rte_cryptodev_raw_enqueue_done(struct rte_crypto_raw_dp_ctx *ctx,
2230 		uint32_t n)
2231 {
2232 	return (*ctx->enqueue_done)(ctx->qp_data, ctx->drv_ctx_data, n);
2233 }
2234 
2235 uint32_t
2236 rte_cryptodev_raw_dequeue_burst(struct rte_crypto_raw_dp_ctx *ctx,
2237 	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
2238 	uint32_t max_nb_to_dequeue,
2239 	rte_cryptodev_raw_post_dequeue_t post_dequeue,
2240 	void **out_user_data, uint8_t is_user_data_array,
2241 	uint32_t *n_success_jobs, int *status)
2242 {
2243 	return (*ctx->dequeue_burst)(ctx->qp_data, ctx->drv_ctx_data,
2244 		get_dequeue_count, max_nb_to_dequeue, post_dequeue,
2245 		out_user_data, is_user_data_array, n_success_jobs, status);
2246 }
2247 
2248 int
2249 rte_cryptodev_raw_dequeue_done(struct rte_crypto_raw_dp_ctx *ctx,
2250 		uint32_t n)
2251 {
2252 	return (*ctx->dequeue_done)(ctx->qp_data, ctx->drv_ctx_data, n);
2253 }
2254 
2255 /** Initialise rte_crypto_op mempool element */
2256 static void
2257 rte_crypto_op_init(struct rte_mempool *mempool,
2258 		void *opaque_arg,
2259 		void *_op_data,
2260 		__rte_unused unsigned i)
2261 {
2262 	struct rte_crypto_op *op = _op_data;
2263 	enum rte_crypto_op_type type = *(enum rte_crypto_op_type *)opaque_arg;
2264 
2265 	memset(_op_data, 0, mempool->elt_size);
2266 
2267 	__rte_crypto_op_reset(op, type);
2268 
2269 	op->phys_addr = rte_mem_virt2iova(_op_data);
2270 	op->mempool = mempool;
2271 }
2272 
2273 
2274 struct rte_mempool *
2275 rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
2276 		unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
2277 		int socket_id)
2278 {
2279 	struct rte_crypto_op_pool_private *priv;
2280 
2281 	unsigned elt_size = sizeof(struct rte_crypto_op) +
2282 			priv_size;
2283 
2284 	if (type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
2285 		elt_size += sizeof(struct rte_crypto_sym_op);
2286 	} else if (type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
2287 		elt_size += sizeof(struct rte_crypto_asym_op);
2288 	} else if (type == RTE_CRYPTO_OP_TYPE_UNDEFINED) {
2289 		elt_size += RTE_MAX(sizeof(struct rte_crypto_sym_op),
2290 		                    sizeof(struct rte_crypto_asym_op));
2291 	} else {
2292 		CDEV_LOG_ERR("Invalid op_type\n");
2293 		return NULL;
2294 	}
2295 
2296 	/* lookup mempool in case already allocated */
2297 	struct rte_mempool *mp = rte_mempool_lookup(name);
2298 
2299 	if (mp != NULL) {
2300 		priv = (struct rte_crypto_op_pool_private *)
2301 				rte_mempool_get_priv(mp);
2302 
2303 		if (mp->elt_size != elt_size ||
2304 				mp->cache_size < cache_size ||
2305 				mp->size < nb_elts ||
2306 				priv->priv_size <  priv_size) {
2307 			mp = NULL;
2308 			CDEV_LOG_ERR("Mempool %s already exists but with "
2309 					"incompatible parameters", name);
2310 			return NULL;
2311 		}
2312 		return mp;
2313 	}
2314 
2315 	mp = rte_mempool_create(
2316 			name,
2317 			nb_elts,
2318 			elt_size,
2319 			cache_size,
2320 			sizeof(struct rte_crypto_op_pool_private),
2321 			NULL,
2322 			NULL,
2323 			rte_crypto_op_init,
2324 			&type,
2325 			socket_id,
2326 			0);
2327 
2328 	if (mp == NULL) {
2329 		CDEV_LOG_ERR("Failed to create mempool %s", name);
2330 		return NULL;
2331 	}
2332 
2333 	priv = (struct rte_crypto_op_pool_private *)
2334 			rte_mempool_get_priv(mp);
2335 
2336 	priv->priv_size = priv_size;
2337 	priv->type = type;
2338 
2339 	return mp;
2340 }
2341 
2342 int
2343 rte_cryptodev_pmd_create_dev_name(char *name, const char *dev_name_prefix)
2344 {
2345 	struct rte_cryptodev *dev = NULL;
2346 	uint32_t i = 0;
2347 
2348 	if (name == NULL)
2349 		return -EINVAL;
2350 
2351 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
2352 		int ret = snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
2353 				"%s_%u", dev_name_prefix, i);
2354 
2355 		if (ret < 0)
2356 			return ret;
2357 
2358 		dev = rte_cryptodev_pmd_get_named_dev(name);
2359 		if (!dev)
2360 			return 0;
2361 	}
2362 
2363 	return -1;
2364 }
2365 
2366 TAILQ_HEAD(cryptodev_driver_list, cryptodev_driver);
2367 
2368 static struct cryptodev_driver_list cryptodev_driver_list =
2369 	TAILQ_HEAD_INITIALIZER(cryptodev_driver_list);
2370 
2371 int
2372 rte_cryptodev_driver_id_get(const char *name)
2373 {
2374 	struct cryptodev_driver *driver;
2375 	const char *driver_name;
2376 
2377 	if (name == NULL) {
2378 		RTE_LOG(DEBUG, CRYPTODEV, "name pointer NULL");
2379 		return -1;
2380 	}
2381 
2382 	TAILQ_FOREACH(driver, &cryptodev_driver_list, next) {
2383 		driver_name = driver->driver->name;
2384 		if (strncmp(driver_name, name, strlen(driver_name) + 1) == 0)
2385 			return driver->id;
2386 	}
2387 	return -1;
2388 }
2389 
2390 const char *
2391 rte_cryptodev_name_get(uint8_t dev_id)
2392 {
2393 	struct rte_cryptodev *dev;
2394 
2395 	if (!rte_cryptodev_is_valid_device_data(dev_id)) {
2396 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
2397 		return NULL;
2398 	}
2399 
2400 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2401 	if (dev == NULL)
2402 		return NULL;
2403 
2404 	return dev->data->name;
2405 }
2406 
2407 const char *
2408 rte_cryptodev_driver_name_get(uint8_t driver_id)
2409 {
2410 	struct cryptodev_driver *driver;
2411 
2412 	TAILQ_FOREACH(driver, &cryptodev_driver_list, next)
2413 		if (driver->id == driver_id)
2414 			return driver->driver->name;
2415 	return NULL;
2416 }
2417 
2418 uint8_t
2419 rte_cryptodev_allocate_driver(struct cryptodev_driver *crypto_drv,
2420 		const struct rte_driver *drv)
2421 {
2422 	crypto_drv->driver = drv;
2423 	crypto_drv->id = nb_drivers;
2424 
2425 	TAILQ_INSERT_TAIL(&cryptodev_driver_list, crypto_drv, next);
2426 
2427 	return nb_drivers++;
2428 }
2429