xref: /dpdk/lib/cryptodev/rte_cryptodev.c (revision 99a2dd955fba6e4cc23b77d590a033650ced9c45)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020 Intel Corporation
3  */
4 
5 #include <sys/types.h>
6 #include <sys/queue.h>
7 #include <ctype.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <stdarg.h>
12 #include <errno.h>
13 #include <stdint.h>
14 #include <inttypes.h>
15 #include <netinet/in.h>
16 
17 #include <rte_byteorder.h>
18 #include <rte_log.h>
19 #include <rte_debug.h>
20 #include <rte_dev.h>
21 #include <rte_interrupts.h>
22 #include <rte_memory.h>
23 #include <rte_memcpy.h>
24 #include <rte_memzone.h>
25 #include <rte_launch.h>
26 #include <rte_tailq.h>
27 #include <rte_eal.h>
28 #include <rte_per_lcore.h>
29 #include <rte_lcore.h>
30 #include <rte_atomic.h>
31 #include <rte_branch_prediction.h>
32 #include <rte_common.h>
33 #include <rte_mempool.h>
34 #include <rte_malloc.h>
35 #include <rte_mbuf.h>
36 #include <rte_errno.h>
37 #include <rte_spinlock.h>
38 #include <rte_string_fns.h>
39 
40 #include "rte_crypto.h"
41 #include "rte_cryptodev.h"
42 #include "rte_cryptodev_pmd.h"
43 #include "rte_cryptodev_trace.h"
44 
45 static uint8_t nb_drivers;
46 
47 static struct rte_cryptodev rte_crypto_devices[RTE_CRYPTO_MAX_DEVS];
48 
49 struct rte_cryptodev *rte_cryptodevs = rte_crypto_devices;
50 
51 static struct rte_cryptodev_global cryptodev_globals = {
52 		.devs			= rte_crypto_devices,
53 		.data			= { NULL },
54 		.nb_devs		= 0
55 };
56 
57 /* spinlock for crypto device callbacks */
58 static rte_spinlock_t rte_cryptodev_cb_lock = RTE_SPINLOCK_INITIALIZER;
59 
60 /**
61  * The user application callback description.
62  *
63  * It contains callback address to be registered by user application,
64  * the pointer to the parameters for callback, and the event type.
65  */
66 struct rte_cryptodev_callback {
67 	TAILQ_ENTRY(rte_cryptodev_callback) next; /**< Callbacks list */
68 	rte_cryptodev_cb_fn cb_fn;		/**< Callback address */
69 	void *cb_arg;				/**< Parameter for callback */
70 	enum rte_cryptodev_event_type event;	/**< Interrupt event type */
71 	uint32_t active;			/**< Callback is executing */
72 };
73 
74 /**
75  * The crypto cipher algorithm strings identifiers.
76  * It could be used in application command line.
77  */
78 const char *
79 rte_crypto_cipher_algorithm_strings[] = {
80 	[RTE_CRYPTO_CIPHER_3DES_CBC]	= "3des-cbc",
81 	[RTE_CRYPTO_CIPHER_3DES_ECB]	= "3des-ecb",
82 	[RTE_CRYPTO_CIPHER_3DES_CTR]	= "3des-ctr",
83 
84 	[RTE_CRYPTO_CIPHER_AES_CBC]	= "aes-cbc",
85 	[RTE_CRYPTO_CIPHER_AES_CTR]	= "aes-ctr",
86 	[RTE_CRYPTO_CIPHER_AES_DOCSISBPI]	= "aes-docsisbpi",
87 	[RTE_CRYPTO_CIPHER_AES_ECB]	= "aes-ecb",
88 	[RTE_CRYPTO_CIPHER_AES_F8]	= "aes-f8",
89 	[RTE_CRYPTO_CIPHER_AES_XTS]	= "aes-xts",
90 
91 	[RTE_CRYPTO_CIPHER_ARC4]	= "arc4",
92 
93 	[RTE_CRYPTO_CIPHER_DES_CBC]     = "des-cbc",
94 	[RTE_CRYPTO_CIPHER_DES_DOCSISBPI]	= "des-docsisbpi",
95 
96 	[RTE_CRYPTO_CIPHER_NULL]	= "null",
97 
98 	[RTE_CRYPTO_CIPHER_KASUMI_F8]	= "kasumi-f8",
99 	[RTE_CRYPTO_CIPHER_SNOW3G_UEA2]	= "snow3g-uea2",
100 	[RTE_CRYPTO_CIPHER_ZUC_EEA3]	= "zuc-eea3"
101 };
102 
103 /**
104  * The crypto cipher operation strings identifiers.
105  * It could be used in application command line.
106  */
107 const char *
108 rte_crypto_cipher_operation_strings[] = {
109 		[RTE_CRYPTO_CIPHER_OP_ENCRYPT]	= "encrypt",
110 		[RTE_CRYPTO_CIPHER_OP_DECRYPT]	= "decrypt"
111 };
112 
113 /**
114  * The crypto auth algorithm strings identifiers.
115  * It could be used in application command line.
116  */
117 const char *
118 rte_crypto_auth_algorithm_strings[] = {
119 	[RTE_CRYPTO_AUTH_AES_CBC_MAC]	= "aes-cbc-mac",
120 	[RTE_CRYPTO_AUTH_AES_CMAC]	= "aes-cmac",
121 	[RTE_CRYPTO_AUTH_AES_GMAC]	= "aes-gmac",
122 	[RTE_CRYPTO_AUTH_AES_XCBC_MAC]	= "aes-xcbc-mac",
123 
124 	[RTE_CRYPTO_AUTH_MD5]		= "md5",
125 	[RTE_CRYPTO_AUTH_MD5_HMAC]	= "md5-hmac",
126 
127 	[RTE_CRYPTO_AUTH_NULL]		= "null",
128 
129 	[RTE_CRYPTO_AUTH_SHA1]		= "sha1",
130 	[RTE_CRYPTO_AUTH_SHA1_HMAC]	= "sha1-hmac",
131 
132 	[RTE_CRYPTO_AUTH_SHA224]	= "sha2-224",
133 	[RTE_CRYPTO_AUTH_SHA224_HMAC]	= "sha2-224-hmac",
134 	[RTE_CRYPTO_AUTH_SHA256]	= "sha2-256",
135 	[RTE_CRYPTO_AUTH_SHA256_HMAC]	= "sha2-256-hmac",
136 	[RTE_CRYPTO_AUTH_SHA384]	= "sha2-384",
137 	[RTE_CRYPTO_AUTH_SHA384_HMAC]	= "sha2-384-hmac",
138 	[RTE_CRYPTO_AUTH_SHA512]	= "sha2-512",
139 	[RTE_CRYPTO_AUTH_SHA512_HMAC]	= "sha2-512-hmac",
140 
141 	[RTE_CRYPTO_AUTH_KASUMI_F9]	= "kasumi-f9",
142 	[RTE_CRYPTO_AUTH_SNOW3G_UIA2]	= "snow3g-uia2",
143 	[RTE_CRYPTO_AUTH_ZUC_EIA3]	= "zuc-eia3"
144 };
145 
146 /**
147  * The crypto AEAD algorithm strings identifiers.
148  * It could be used in application command line.
149  */
150 const char *
151 rte_crypto_aead_algorithm_strings[] = {
152 	[RTE_CRYPTO_AEAD_AES_CCM]	= "aes-ccm",
153 	[RTE_CRYPTO_AEAD_AES_GCM]	= "aes-gcm",
154 	[RTE_CRYPTO_AEAD_CHACHA20_POLY1305] = "chacha20-poly1305"
155 };
156 
157 /**
158  * The crypto AEAD operation strings identifiers.
159  * It could be used in application command line.
160  */
161 const char *
162 rte_crypto_aead_operation_strings[] = {
163 	[RTE_CRYPTO_AEAD_OP_ENCRYPT]	= "encrypt",
164 	[RTE_CRYPTO_AEAD_OP_DECRYPT]	= "decrypt"
165 };
166 
167 /**
168  * Asymmetric crypto transform operation strings identifiers.
169  */
170 const char *rte_crypto_asym_xform_strings[] = {
171 	[RTE_CRYPTO_ASYM_XFORM_NONE]	= "none",
172 	[RTE_CRYPTO_ASYM_XFORM_RSA]	= "rsa",
173 	[RTE_CRYPTO_ASYM_XFORM_MODEX]	= "modexp",
174 	[RTE_CRYPTO_ASYM_XFORM_MODINV]	= "modinv",
175 	[RTE_CRYPTO_ASYM_XFORM_DH]	= "dh",
176 	[RTE_CRYPTO_ASYM_XFORM_DSA]	= "dsa",
177 	[RTE_CRYPTO_ASYM_XFORM_ECDSA]	= "ecdsa",
178 	[RTE_CRYPTO_ASYM_XFORM_ECPM]	= "ecpm",
179 };
180 
181 /**
182  * Asymmetric crypto operation strings identifiers.
183  */
184 const char *rte_crypto_asym_op_strings[] = {
185 	[RTE_CRYPTO_ASYM_OP_ENCRYPT]	= "encrypt",
186 	[RTE_CRYPTO_ASYM_OP_DECRYPT]	= "decrypt",
187 	[RTE_CRYPTO_ASYM_OP_SIGN]	= "sign",
188 	[RTE_CRYPTO_ASYM_OP_VERIFY]	= "verify",
189 	[RTE_CRYPTO_ASYM_OP_PRIVATE_KEY_GENERATE]	= "priv_key_generate",
190 	[RTE_CRYPTO_ASYM_OP_PUBLIC_KEY_GENERATE] = "pub_key_generate",
191 	[RTE_CRYPTO_ASYM_OP_SHARED_SECRET_COMPUTE] = "sharedsecret_compute",
192 };
193 
194 /**
195  * The private data structure stored in the session mempool private data.
196  */
197 struct rte_cryptodev_sym_session_pool_private_data {
198 	uint16_t nb_drivers;
199 	/**< number of elements in sess_data array */
200 	uint16_t user_data_sz;
201 	/**< session user data will be placed after sess_data */
202 };
203 
204 int
205 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum,
206 		const char *algo_string)
207 {
208 	unsigned int i;
209 
210 	for (i = 1; i < RTE_DIM(rte_crypto_cipher_algorithm_strings); i++) {
211 		if (strcmp(algo_string, rte_crypto_cipher_algorithm_strings[i]) == 0) {
212 			*algo_enum = (enum rte_crypto_cipher_algorithm) i;
213 			return 0;
214 		}
215 	}
216 
217 	/* Invalid string */
218 	return -1;
219 }
220 
221 int
222 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum,
223 		const char *algo_string)
224 {
225 	unsigned int i;
226 
227 	for (i = 1; i < RTE_DIM(rte_crypto_auth_algorithm_strings); i++) {
228 		if (strcmp(algo_string, rte_crypto_auth_algorithm_strings[i]) == 0) {
229 			*algo_enum = (enum rte_crypto_auth_algorithm) i;
230 			return 0;
231 		}
232 	}
233 
234 	/* Invalid string */
235 	return -1;
236 }
237 
238 int
239 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum,
240 		const char *algo_string)
241 {
242 	unsigned int i;
243 
244 	for (i = 1; i < RTE_DIM(rte_crypto_aead_algorithm_strings); i++) {
245 		if (strcmp(algo_string, rte_crypto_aead_algorithm_strings[i]) == 0) {
246 			*algo_enum = (enum rte_crypto_aead_algorithm) i;
247 			return 0;
248 		}
249 	}
250 
251 	/* Invalid string */
252 	return -1;
253 }
254 
255 int
256 rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum,
257 		const char *xform_string)
258 {
259 	unsigned int i;
260 
261 	for (i = 1; i < RTE_DIM(rte_crypto_asym_xform_strings); i++) {
262 		if (strcmp(xform_string,
263 			rte_crypto_asym_xform_strings[i]) == 0) {
264 			*xform_enum = (enum rte_crypto_asym_xform_type) i;
265 			return 0;
266 		}
267 	}
268 
269 	/* Invalid string */
270 	return -1;
271 }
272 
273 /**
274  * The crypto auth operation strings identifiers.
275  * It could be used in application command line.
276  */
277 const char *
278 rte_crypto_auth_operation_strings[] = {
279 		[RTE_CRYPTO_AUTH_OP_VERIFY]	= "verify",
280 		[RTE_CRYPTO_AUTH_OP_GENERATE]	= "generate"
281 };
282 
283 const struct rte_cryptodev_symmetric_capability *
284 rte_cryptodev_sym_capability_get(uint8_t dev_id,
285 		const struct rte_cryptodev_sym_capability_idx *idx)
286 {
287 	const struct rte_cryptodev_capabilities *capability;
288 	struct rte_cryptodev_info dev_info;
289 	int i = 0;
290 
291 	rte_cryptodev_info_get(dev_id, &dev_info);
292 
293 	while ((capability = &dev_info.capabilities[i++])->op !=
294 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
295 		if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
296 			continue;
297 
298 		if (capability->sym.xform_type != idx->type)
299 			continue;
300 
301 		if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
302 			capability->sym.auth.algo == idx->algo.auth)
303 			return &capability->sym;
304 
305 		if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
306 			capability->sym.cipher.algo == idx->algo.cipher)
307 			return &capability->sym;
308 
309 		if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
310 				capability->sym.aead.algo == idx->algo.aead)
311 			return &capability->sym;
312 	}
313 
314 	return NULL;
315 }
316 
317 static int
318 param_range_check(uint16_t size, const struct rte_crypto_param_range *range)
319 {
320 	unsigned int next_size;
321 
322 	/* Check lower/upper bounds */
323 	if (size < range->min)
324 		return -1;
325 
326 	if (size > range->max)
327 		return -1;
328 
329 	/* If range is actually only one value, size is correct */
330 	if (range->increment == 0)
331 		return 0;
332 
333 	/* Check if value is one of the supported sizes */
334 	for (next_size = range->min; next_size <= range->max;
335 			next_size += range->increment)
336 		if (size == next_size)
337 			return 0;
338 
339 	return -1;
340 }
341 
342 const struct rte_cryptodev_asymmetric_xform_capability *
343 rte_cryptodev_asym_capability_get(uint8_t dev_id,
344 		const struct rte_cryptodev_asym_capability_idx *idx)
345 {
346 	const struct rte_cryptodev_capabilities *capability;
347 	struct rte_cryptodev_info dev_info;
348 	unsigned int i = 0;
349 
350 	memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
351 	rte_cryptodev_info_get(dev_id, &dev_info);
352 
353 	while ((capability = &dev_info.capabilities[i++])->op !=
354 			RTE_CRYPTO_OP_TYPE_UNDEFINED) {
355 		if (capability->op != RTE_CRYPTO_OP_TYPE_ASYMMETRIC)
356 			continue;
357 
358 		if (capability->asym.xform_capa.xform_type == idx->type)
359 			return &capability->asym.xform_capa;
360 	}
361 	return NULL;
362 };
363 
364 int
365 rte_cryptodev_sym_capability_check_cipher(
366 		const struct rte_cryptodev_symmetric_capability *capability,
367 		uint16_t key_size, uint16_t iv_size)
368 {
369 	if (param_range_check(key_size, &capability->cipher.key_size) != 0)
370 		return -1;
371 
372 	if (param_range_check(iv_size, &capability->cipher.iv_size) != 0)
373 		return -1;
374 
375 	return 0;
376 }
377 
378 int
379 rte_cryptodev_sym_capability_check_auth(
380 		const struct rte_cryptodev_symmetric_capability *capability,
381 		uint16_t key_size, uint16_t digest_size, uint16_t iv_size)
382 {
383 	if (param_range_check(key_size, &capability->auth.key_size) != 0)
384 		return -1;
385 
386 	if (param_range_check(digest_size, &capability->auth.digest_size) != 0)
387 		return -1;
388 
389 	if (param_range_check(iv_size, &capability->auth.iv_size) != 0)
390 		return -1;
391 
392 	return 0;
393 }
394 
395 int
396 rte_cryptodev_sym_capability_check_aead(
397 		const struct rte_cryptodev_symmetric_capability *capability,
398 		uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
399 		uint16_t iv_size)
400 {
401 	if (param_range_check(key_size, &capability->aead.key_size) != 0)
402 		return -1;
403 
404 	if (param_range_check(digest_size, &capability->aead.digest_size) != 0)
405 		return -1;
406 
407 	if (param_range_check(aad_size, &capability->aead.aad_size) != 0)
408 		return -1;
409 
410 	if (param_range_check(iv_size, &capability->aead.iv_size) != 0)
411 		return -1;
412 
413 	return 0;
414 }
415 int
416 rte_cryptodev_asym_xform_capability_check_optype(
417 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
418 	enum rte_crypto_asym_op_type op_type)
419 {
420 	if (capability->op_types & (1 << op_type))
421 		return 1;
422 
423 	return 0;
424 }
425 
426 int
427 rte_cryptodev_asym_xform_capability_check_modlen(
428 	const struct rte_cryptodev_asymmetric_xform_capability *capability,
429 	uint16_t modlen)
430 {
431 	/* no need to check for limits, if min or max = 0 */
432 	if (capability->modlen.min != 0) {
433 		if (modlen < capability->modlen.min)
434 			return -1;
435 	}
436 
437 	if (capability->modlen.max != 0) {
438 		if (modlen > capability->modlen.max)
439 			return -1;
440 	}
441 
442 	/* in any case, check if given modlen is module increment */
443 	if (capability->modlen.increment != 0) {
444 		if (modlen % (capability->modlen.increment))
445 			return -1;
446 	}
447 
448 	return 0;
449 }
450 
451 /* spinlock for crypto device enq callbacks */
452 static rte_spinlock_t rte_cryptodev_callback_lock = RTE_SPINLOCK_INITIALIZER;
453 
454 static void
455 cryptodev_cb_cleanup(struct rte_cryptodev *dev)
456 {
457 	struct rte_cryptodev_cb_rcu *list;
458 	struct rte_cryptodev_cb *cb, *next;
459 	uint16_t qp_id;
460 
461 	if (dev->enq_cbs == NULL && dev->deq_cbs == NULL)
462 		return;
463 
464 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
465 		list = &dev->enq_cbs[qp_id];
466 		cb = list->next;
467 		while (cb != NULL) {
468 			next = cb->next;
469 			rte_free(cb);
470 			cb = next;
471 		}
472 
473 		rte_free(list->qsbr);
474 	}
475 
476 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
477 		list = &dev->deq_cbs[qp_id];
478 		cb = list->next;
479 		while (cb != NULL) {
480 			next = cb->next;
481 			rte_free(cb);
482 			cb = next;
483 		}
484 
485 		rte_free(list->qsbr);
486 	}
487 
488 	rte_free(dev->enq_cbs);
489 	dev->enq_cbs = NULL;
490 	rte_free(dev->deq_cbs);
491 	dev->deq_cbs = NULL;
492 }
493 
494 static int
495 cryptodev_cb_init(struct rte_cryptodev *dev)
496 {
497 	struct rte_cryptodev_cb_rcu *list;
498 	struct rte_rcu_qsbr *qsbr;
499 	uint16_t qp_id;
500 	size_t size;
501 
502 	/* Max thread set to 1, as one DP thread accessing a queue-pair */
503 	const uint32_t max_threads = 1;
504 
505 	dev->enq_cbs = rte_zmalloc(NULL,
506 				   sizeof(struct rte_cryptodev_cb_rcu) *
507 				   dev->data->nb_queue_pairs, 0);
508 	if (dev->enq_cbs == NULL) {
509 		CDEV_LOG_ERR("Failed to allocate memory for enq callbacks");
510 		return -ENOMEM;
511 	}
512 
513 	dev->deq_cbs = rte_zmalloc(NULL,
514 				   sizeof(struct rte_cryptodev_cb_rcu) *
515 				   dev->data->nb_queue_pairs, 0);
516 	if (dev->deq_cbs == NULL) {
517 		CDEV_LOG_ERR("Failed to allocate memory for deq callbacks");
518 		rte_free(dev->enq_cbs);
519 		return -ENOMEM;
520 	}
521 
522 	/* Create RCU QSBR variable */
523 	size = rte_rcu_qsbr_get_memsize(max_threads);
524 
525 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
526 		list = &dev->enq_cbs[qp_id];
527 		qsbr = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
528 		if (qsbr == NULL) {
529 			CDEV_LOG_ERR("Failed to allocate memory for RCU on "
530 				"queue_pair_id=%d", qp_id);
531 			goto cb_init_err;
532 		}
533 
534 		if (rte_rcu_qsbr_init(qsbr, max_threads)) {
535 			CDEV_LOG_ERR("Failed to initialize for RCU on "
536 				"queue_pair_id=%d", qp_id);
537 			goto cb_init_err;
538 		}
539 
540 		list->qsbr = qsbr;
541 	}
542 
543 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
544 		list = &dev->deq_cbs[qp_id];
545 		qsbr = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
546 		if (qsbr == NULL) {
547 			CDEV_LOG_ERR("Failed to allocate memory for RCU on "
548 				"queue_pair_id=%d", qp_id);
549 			goto cb_init_err;
550 		}
551 
552 		if (rte_rcu_qsbr_init(qsbr, max_threads)) {
553 			CDEV_LOG_ERR("Failed to initialize for RCU on "
554 				"queue_pair_id=%d", qp_id);
555 			goto cb_init_err;
556 		}
557 
558 		list->qsbr = qsbr;
559 	}
560 
561 	return 0;
562 
563 cb_init_err:
564 	cryptodev_cb_cleanup(dev);
565 	return -ENOMEM;
566 }
567 
568 const char *
569 rte_cryptodev_get_feature_name(uint64_t flag)
570 {
571 	switch (flag) {
572 	case RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO:
573 		return "SYMMETRIC_CRYPTO";
574 	case RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO:
575 		return "ASYMMETRIC_CRYPTO";
576 	case RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING:
577 		return "SYM_OPERATION_CHAINING";
578 	case RTE_CRYPTODEV_FF_CPU_SSE:
579 		return "CPU_SSE";
580 	case RTE_CRYPTODEV_FF_CPU_AVX:
581 		return "CPU_AVX";
582 	case RTE_CRYPTODEV_FF_CPU_AVX2:
583 		return "CPU_AVX2";
584 	case RTE_CRYPTODEV_FF_CPU_AVX512:
585 		return "CPU_AVX512";
586 	case RTE_CRYPTODEV_FF_CPU_AESNI:
587 		return "CPU_AESNI";
588 	case RTE_CRYPTODEV_FF_HW_ACCELERATED:
589 		return "HW_ACCELERATED";
590 	case RTE_CRYPTODEV_FF_IN_PLACE_SGL:
591 		return "IN_PLACE_SGL";
592 	case RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT:
593 		return "OOP_SGL_IN_SGL_OUT";
594 	case RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT:
595 		return "OOP_SGL_IN_LB_OUT";
596 	case RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT:
597 		return "OOP_LB_IN_SGL_OUT";
598 	case RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT:
599 		return "OOP_LB_IN_LB_OUT";
600 	case RTE_CRYPTODEV_FF_CPU_NEON:
601 		return "CPU_NEON";
602 	case RTE_CRYPTODEV_FF_CPU_ARM_CE:
603 		return "CPU_ARM_CE";
604 	case RTE_CRYPTODEV_FF_SECURITY:
605 		return "SECURITY_PROTOCOL";
606 	case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP:
607 		return "RSA_PRIV_OP_KEY_EXP";
608 	case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT:
609 		return "RSA_PRIV_OP_KEY_QT";
610 	case RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED:
611 		return "DIGEST_ENCRYPTED";
612 	case RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO:
613 		return "SYM_CPU_CRYPTO";
614 	case RTE_CRYPTODEV_FF_ASYM_SESSIONLESS:
615 		return "ASYM_SESSIONLESS";
616 	case RTE_CRYPTODEV_FF_SYM_SESSIONLESS:
617 		return "SYM_SESSIONLESS";
618 	case RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA:
619 		return "NON_BYTE_ALIGNED_DATA";
620 	case RTE_CRYPTODEV_FF_CIPHER_MULTIPLE_DATA_UNITS:
621 		return "CIPHER_MULTIPLE_DATA_UNITS";
622 	case RTE_CRYPTODEV_FF_CIPHER_WRAPPED_KEY:
623 		return "CIPHER_WRAPPED_KEY";
624 	default:
625 		return NULL;
626 	}
627 }
628 
629 struct rte_cryptodev *
630 rte_cryptodev_pmd_get_dev(uint8_t dev_id)
631 {
632 	return &cryptodev_globals.devs[dev_id];
633 }
634 
635 struct rte_cryptodev *
636 rte_cryptodev_pmd_get_named_dev(const char *name)
637 {
638 	struct rte_cryptodev *dev;
639 	unsigned int i;
640 
641 	if (name == NULL)
642 		return NULL;
643 
644 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
645 		dev = &cryptodev_globals.devs[i];
646 
647 		if ((dev->attached == RTE_CRYPTODEV_ATTACHED) &&
648 				(strcmp(dev->data->name, name) == 0))
649 			return dev;
650 	}
651 
652 	return NULL;
653 }
654 
655 static inline uint8_t
656 rte_cryptodev_is_valid_device_data(uint8_t dev_id)
657 {
658 	if (dev_id >= RTE_CRYPTO_MAX_DEVS ||
659 			rte_crypto_devices[dev_id].data == NULL)
660 		return 0;
661 
662 	return 1;
663 }
664 
665 unsigned int
666 rte_cryptodev_pmd_is_valid_dev(uint8_t dev_id)
667 {
668 	struct rte_cryptodev *dev = NULL;
669 
670 	if (!rte_cryptodev_is_valid_device_data(dev_id))
671 		return 0;
672 
673 	dev = rte_cryptodev_pmd_get_dev(dev_id);
674 	if (dev->attached != RTE_CRYPTODEV_ATTACHED)
675 		return 0;
676 	else
677 		return 1;
678 }
679 
680 
681 int
682 rte_cryptodev_get_dev_id(const char *name)
683 {
684 	unsigned i;
685 
686 	if (name == NULL)
687 		return -1;
688 
689 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
690 		if (!rte_cryptodev_is_valid_device_data(i))
691 			continue;
692 		if ((strcmp(cryptodev_globals.devs[i].data->name, name)
693 				== 0) &&
694 				(cryptodev_globals.devs[i].attached ==
695 						RTE_CRYPTODEV_ATTACHED))
696 			return i;
697 	}
698 
699 	return -1;
700 }
701 
702 uint8_t
703 rte_cryptodev_count(void)
704 {
705 	return cryptodev_globals.nb_devs;
706 }
707 
708 uint8_t
709 rte_cryptodev_device_count_by_driver(uint8_t driver_id)
710 {
711 	uint8_t i, dev_count = 0;
712 
713 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++)
714 		if (cryptodev_globals.devs[i].driver_id == driver_id &&
715 			cryptodev_globals.devs[i].attached ==
716 					RTE_CRYPTODEV_ATTACHED)
717 			dev_count++;
718 
719 	return dev_count;
720 }
721 
722 uint8_t
723 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
724 	uint8_t nb_devices)
725 {
726 	uint8_t i, count = 0;
727 	struct rte_cryptodev *devs = cryptodev_globals.devs;
728 
729 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS && count < nb_devices; i++) {
730 		if (!rte_cryptodev_is_valid_device_data(i))
731 			continue;
732 
733 		if (devs[i].attached == RTE_CRYPTODEV_ATTACHED) {
734 			int cmp;
735 
736 			cmp = strncmp(devs[i].device->driver->name,
737 					driver_name,
738 					strlen(driver_name) + 1);
739 
740 			if (cmp == 0)
741 				devices[count++] = devs[i].data->dev_id;
742 		}
743 	}
744 
745 	return count;
746 }
747 
748 void *
749 rte_cryptodev_get_sec_ctx(uint8_t dev_id)
750 {
751 	if (dev_id < RTE_CRYPTO_MAX_DEVS &&
752 			(rte_crypto_devices[dev_id].feature_flags &
753 			RTE_CRYPTODEV_FF_SECURITY))
754 		return rte_crypto_devices[dev_id].security_ctx;
755 
756 	return NULL;
757 }
758 
759 int
760 rte_cryptodev_socket_id(uint8_t dev_id)
761 {
762 	struct rte_cryptodev *dev;
763 
764 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
765 		return -1;
766 
767 	dev = rte_cryptodev_pmd_get_dev(dev_id);
768 
769 	return dev->data->socket_id;
770 }
771 
772 static inline int
773 rte_cryptodev_data_alloc(uint8_t dev_id, struct rte_cryptodev_data **data,
774 		int socket_id)
775 {
776 	char mz_name[RTE_MEMZONE_NAMESIZE];
777 	const struct rte_memzone *mz;
778 	int n;
779 
780 	/* generate memzone name */
781 	n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
782 	if (n >= (int)sizeof(mz_name))
783 		return -EINVAL;
784 
785 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
786 		mz = rte_memzone_reserve(mz_name,
787 				sizeof(struct rte_cryptodev_data),
788 				socket_id, 0);
789 		CDEV_LOG_DEBUG("PRIMARY:reserved memzone for %s (%p)",
790 				mz_name, mz);
791 	} else {
792 		mz = rte_memzone_lookup(mz_name);
793 		CDEV_LOG_DEBUG("SECONDARY:looked up memzone for %s (%p)",
794 				mz_name, mz);
795 	}
796 
797 	if (mz == NULL)
798 		return -ENOMEM;
799 
800 	*data = mz->addr;
801 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
802 		memset(*data, 0, sizeof(struct rte_cryptodev_data));
803 
804 	return 0;
805 }
806 
807 static inline int
808 rte_cryptodev_data_free(uint8_t dev_id, struct rte_cryptodev_data **data)
809 {
810 	char mz_name[RTE_MEMZONE_NAMESIZE];
811 	const struct rte_memzone *mz;
812 	int n;
813 
814 	/* generate memzone name */
815 	n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
816 	if (n >= (int)sizeof(mz_name))
817 		return -EINVAL;
818 
819 	mz = rte_memzone_lookup(mz_name);
820 	if (mz == NULL)
821 		return -ENOMEM;
822 
823 	RTE_ASSERT(*data == mz->addr);
824 	*data = NULL;
825 
826 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
827 		CDEV_LOG_DEBUG("PRIMARY:free memzone of %s (%p)",
828 				mz_name, mz);
829 		return rte_memzone_free(mz);
830 	} else {
831 		CDEV_LOG_DEBUG("SECONDARY:don't free memzone of %s (%p)",
832 				mz_name, mz);
833 	}
834 
835 	return 0;
836 }
837 
838 static uint8_t
839 rte_cryptodev_find_free_device_index(void)
840 {
841 	uint8_t dev_id;
842 
843 	for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++) {
844 		if (rte_crypto_devices[dev_id].attached ==
845 				RTE_CRYPTODEV_DETACHED)
846 			return dev_id;
847 	}
848 	return RTE_CRYPTO_MAX_DEVS;
849 }
850 
851 struct rte_cryptodev *
852 rte_cryptodev_pmd_allocate(const char *name, int socket_id)
853 {
854 	struct rte_cryptodev *cryptodev;
855 	uint8_t dev_id;
856 
857 	if (rte_cryptodev_pmd_get_named_dev(name) != NULL) {
858 		CDEV_LOG_ERR("Crypto device with name %s already "
859 				"allocated!", name);
860 		return NULL;
861 	}
862 
863 	dev_id = rte_cryptodev_find_free_device_index();
864 	if (dev_id == RTE_CRYPTO_MAX_DEVS) {
865 		CDEV_LOG_ERR("Reached maximum number of crypto devices");
866 		return NULL;
867 	}
868 
869 	cryptodev = rte_cryptodev_pmd_get_dev(dev_id);
870 
871 	if (cryptodev->data == NULL) {
872 		struct rte_cryptodev_data **cryptodev_data =
873 				&cryptodev_globals.data[dev_id];
874 
875 		int retval = rte_cryptodev_data_alloc(dev_id, cryptodev_data,
876 				socket_id);
877 
878 		if (retval < 0 || *cryptodev_data == NULL)
879 			return NULL;
880 
881 		cryptodev->data = *cryptodev_data;
882 
883 		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
884 			strlcpy(cryptodev->data->name, name,
885 				RTE_CRYPTODEV_NAME_MAX_LEN);
886 
887 			cryptodev->data->dev_id = dev_id;
888 			cryptodev->data->socket_id = socket_id;
889 			cryptodev->data->dev_started = 0;
890 			CDEV_LOG_DEBUG("PRIMARY:init data");
891 		}
892 
893 		CDEV_LOG_DEBUG("Data for %s: dev_id %d, socket %d, started %d",
894 				cryptodev->data->name,
895 				cryptodev->data->dev_id,
896 				cryptodev->data->socket_id,
897 				cryptodev->data->dev_started);
898 
899 		/* init user callbacks */
900 		TAILQ_INIT(&(cryptodev->link_intr_cbs));
901 
902 		cryptodev->attached = RTE_CRYPTODEV_ATTACHED;
903 
904 		cryptodev_globals.nb_devs++;
905 	}
906 
907 	return cryptodev;
908 }
909 
910 int
911 rte_cryptodev_pmd_release_device(struct rte_cryptodev *cryptodev)
912 {
913 	int ret;
914 	uint8_t dev_id;
915 
916 	if (cryptodev == NULL)
917 		return -EINVAL;
918 
919 	dev_id = cryptodev->data->dev_id;
920 
921 	/* Close device only if device operations have been set */
922 	if (cryptodev->dev_ops) {
923 		ret = rte_cryptodev_close(dev_id);
924 		if (ret < 0)
925 			return ret;
926 	}
927 
928 	ret = rte_cryptodev_data_free(dev_id, &cryptodev_globals.data[dev_id]);
929 	if (ret < 0)
930 		return ret;
931 
932 	cryptodev->attached = RTE_CRYPTODEV_DETACHED;
933 	cryptodev_globals.nb_devs--;
934 	return 0;
935 }
936 
937 uint16_t
938 rte_cryptodev_queue_pair_count(uint8_t dev_id)
939 {
940 	struct rte_cryptodev *dev;
941 
942 	if (!rte_cryptodev_is_valid_device_data(dev_id)) {
943 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
944 		return 0;
945 	}
946 
947 	dev = &rte_crypto_devices[dev_id];
948 	return dev->data->nb_queue_pairs;
949 }
950 
951 static int
952 rte_cryptodev_queue_pairs_config(struct rte_cryptodev *dev, uint16_t nb_qpairs,
953 		int socket_id)
954 {
955 	struct rte_cryptodev_info dev_info;
956 	void **qp;
957 	unsigned i;
958 
959 	if ((dev == NULL) || (nb_qpairs < 1)) {
960 		CDEV_LOG_ERR("invalid param: dev %p, nb_queues %u",
961 							dev, nb_qpairs);
962 		return -EINVAL;
963 	}
964 
965 	CDEV_LOG_DEBUG("Setup %d queues pairs on device %u",
966 			nb_qpairs, dev->data->dev_id);
967 
968 	memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
969 
970 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
971 	(*dev->dev_ops->dev_infos_get)(dev, &dev_info);
972 
973 	if (nb_qpairs > (dev_info.max_nb_queue_pairs)) {
974 		CDEV_LOG_ERR("Invalid num queue_pairs (%u) for dev %u",
975 				nb_qpairs, dev->data->dev_id);
976 	    return -EINVAL;
977 	}
978 
979 	if (dev->data->queue_pairs == NULL) { /* first time configuration */
980 		dev->data->queue_pairs = rte_zmalloc_socket(
981 				"cryptodev->queue_pairs",
982 				sizeof(dev->data->queue_pairs[0]) * nb_qpairs,
983 				RTE_CACHE_LINE_SIZE, socket_id);
984 
985 		if (dev->data->queue_pairs == NULL) {
986 			dev->data->nb_queue_pairs = 0;
987 			CDEV_LOG_ERR("failed to get memory for qp meta data, "
988 							"nb_queues %u",
989 							nb_qpairs);
990 			return -(ENOMEM);
991 		}
992 	} else { /* re-configure */
993 		int ret;
994 		uint16_t old_nb_queues = dev->data->nb_queue_pairs;
995 
996 		qp = dev->data->queue_pairs;
997 
998 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_release,
999 				-ENOTSUP);
1000 
1001 		for (i = nb_qpairs; i < old_nb_queues; i++) {
1002 			ret = (*dev->dev_ops->queue_pair_release)(dev, i);
1003 			if (ret < 0)
1004 				return ret;
1005 		}
1006 
1007 		qp = rte_realloc(qp, sizeof(qp[0]) * nb_qpairs,
1008 				RTE_CACHE_LINE_SIZE);
1009 		if (qp == NULL) {
1010 			CDEV_LOG_ERR("failed to realloc qp meta data,"
1011 						" nb_queues %u", nb_qpairs);
1012 			return -(ENOMEM);
1013 		}
1014 
1015 		if (nb_qpairs > old_nb_queues) {
1016 			uint16_t new_qs = nb_qpairs - old_nb_queues;
1017 
1018 			memset(qp + old_nb_queues, 0,
1019 				sizeof(qp[0]) * new_qs);
1020 		}
1021 
1022 		dev->data->queue_pairs = qp;
1023 
1024 	}
1025 	dev->data->nb_queue_pairs = nb_qpairs;
1026 	return 0;
1027 }
1028 
1029 int
1030 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
1031 {
1032 	struct rte_cryptodev *dev;
1033 	int diag;
1034 
1035 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1036 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1037 		return -EINVAL;
1038 	}
1039 
1040 	dev = &rte_crypto_devices[dev_id];
1041 
1042 	if (dev->data->dev_started) {
1043 		CDEV_LOG_ERR(
1044 		    "device %d must be stopped to allow configuration", dev_id);
1045 		return -EBUSY;
1046 	}
1047 
1048 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
1049 
1050 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1051 	cryptodev_cb_cleanup(dev);
1052 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1053 
1054 	/* Setup new number of queue pairs and reconfigure device. */
1055 	diag = rte_cryptodev_queue_pairs_config(dev, config->nb_queue_pairs,
1056 			config->socket_id);
1057 	if (diag != 0) {
1058 		CDEV_LOG_ERR("dev%d rte_crypto_dev_queue_pairs_config = %d",
1059 				dev_id, diag);
1060 		return diag;
1061 	}
1062 
1063 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1064 	diag = cryptodev_cb_init(dev);
1065 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1066 	if (diag) {
1067 		CDEV_LOG_ERR("Callback init failed for dev_id=%d", dev_id);
1068 		return diag;
1069 	}
1070 
1071 	rte_cryptodev_trace_configure(dev_id, config);
1072 	return (*dev->dev_ops->dev_configure)(dev, config);
1073 }
1074 
1075 int
1076 rte_cryptodev_start(uint8_t dev_id)
1077 {
1078 	struct rte_cryptodev *dev;
1079 	int diag;
1080 
1081 	CDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
1082 
1083 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1084 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1085 		return -EINVAL;
1086 	}
1087 
1088 	dev = &rte_crypto_devices[dev_id];
1089 
1090 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1091 
1092 	if (dev->data->dev_started != 0) {
1093 		CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already started",
1094 			dev_id);
1095 		return 0;
1096 	}
1097 
1098 	diag = (*dev->dev_ops->dev_start)(dev);
1099 	rte_cryptodev_trace_start(dev_id, diag);
1100 	if (diag == 0)
1101 		dev->data->dev_started = 1;
1102 	else
1103 		return diag;
1104 
1105 	return 0;
1106 }
1107 
1108 void
1109 rte_cryptodev_stop(uint8_t dev_id)
1110 {
1111 	struct rte_cryptodev *dev;
1112 
1113 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1114 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1115 		return;
1116 	}
1117 
1118 	dev = &rte_crypto_devices[dev_id];
1119 
1120 	RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1121 
1122 	if (dev->data->dev_started == 0) {
1123 		CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already stopped",
1124 			dev_id);
1125 		return;
1126 	}
1127 
1128 	(*dev->dev_ops->dev_stop)(dev);
1129 	rte_cryptodev_trace_stop(dev_id);
1130 	dev->data->dev_started = 0;
1131 }
1132 
1133 int
1134 rte_cryptodev_close(uint8_t dev_id)
1135 {
1136 	struct rte_cryptodev *dev;
1137 	int retval;
1138 
1139 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1140 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1141 		return -1;
1142 	}
1143 
1144 	dev = &rte_crypto_devices[dev_id];
1145 
1146 	/* Device must be stopped before it can be closed */
1147 	if (dev->data->dev_started == 1) {
1148 		CDEV_LOG_ERR("Device %u must be stopped before closing",
1149 				dev_id);
1150 		return -EBUSY;
1151 	}
1152 
1153 	/* We can't close the device if there are outstanding sessions in use */
1154 	if (dev->data->session_pool != NULL) {
1155 		if (!rte_mempool_full(dev->data->session_pool)) {
1156 			CDEV_LOG_ERR("dev_id=%u close failed, session mempool "
1157 					"has sessions still in use, free "
1158 					"all sessions before calling close",
1159 					(unsigned)dev_id);
1160 			return -EBUSY;
1161 		}
1162 	}
1163 
1164 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
1165 	retval = (*dev->dev_ops->dev_close)(dev);
1166 	rte_cryptodev_trace_close(dev_id, retval);
1167 
1168 	if (retval < 0)
1169 		return retval;
1170 
1171 	return 0;
1172 }
1173 
1174 int
1175 rte_cryptodev_get_qp_status(uint8_t dev_id, uint16_t queue_pair_id)
1176 {
1177 	struct rte_cryptodev *dev;
1178 
1179 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1180 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1181 		return -EINVAL;
1182 	}
1183 
1184 	dev = &rte_crypto_devices[dev_id];
1185 	if (queue_pair_id >= dev->data->nb_queue_pairs) {
1186 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
1187 		return -EINVAL;
1188 	}
1189 	void **qps = dev->data->queue_pairs;
1190 
1191 	if (qps[queue_pair_id])	{
1192 		CDEV_LOG_DEBUG("qp %d on dev %d is initialised",
1193 			queue_pair_id, dev_id);
1194 		return 1;
1195 	}
1196 
1197 	CDEV_LOG_DEBUG("qp %d on dev %d is not initialised",
1198 		queue_pair_id, dev_id);
1199 
1200 	return 0;
1201 }
1202 
1203 int
1204 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
1205 		const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
1206 
1207 {
1208 	struct rte_cryptodev *dev;
1209 
1210 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1211 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1212 		return -EINVAL;
1213 	}
1214 
1215 	dev = &rte_crypto_devices[dev_id];
1216 	if (queue_pair_id >= dev->data->nb_queue_pairs) {
1217 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
1218 		return -EINVAL;
1219 	}
1220 
1221 	if (!qp_conf) {
1222 		CDEV_LOG_ERR("qp_conf cannot be NULL\n");
1223 		return -EINVAL;
1224 	}
1225 
1226 	if ((qp_conf->mp_session && !qp_conf->mp_session_private) ||
1227 			(!qp_conf->mp_session && qp_conf->mp_session_private)) {
1228 		CDEV_LOG_ERR("Invalid mempools\n");
1229 		return -EINVAL;
1230 	}
1231 
1232 	if (qp_conf->mp_session) {
1233 		struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1234 		uint32_t obj_size = qp_conf->mp_session->elt_size;
1235 		uint32_t obj_priv_size = qp_conf->mp_session_private->elt_size;
1236 		struct rte_cryptodev_sym_session s = {0};
1237 
1238 		pool_priv = rte_mempool_get_priv(qp_conf->mp_session);
1239 		if (!pool_priv || qp_conf->mp_session->private_data_size <
1240 				sizeof(*pool_priv)) {
1241 			CDEV_LOG_ERR("Invalid mempool\n");
1242 			return -EINVAL;
1243 		}
1244 
1245 		s.nb_drivers = pool_priv->nb_drivers;
1246 		s.user_data_sz = pool_priv->user_data_sz;
1247 
1248 		if ((rte_cryptodev_sym_get_existing_header_session_size(&s) >
1249 			obj_size) || (s.nb_drivers <= dev->driver_id) ||
1250 			rte_cryptodev_sym_get_private_session_size(dev_id) >
1251 				obj_priv_size) {
1252 			CDEV_LOG_ERR("Invalid mempool\n");
1253 			return -EINVAL;
1254 		}
1255 	}
1256 
1257 	if (dev->data->dev_started) {
1258 		CDEV_LOG_ERR(
1259 		    "device %d must be stopped to allow configuration", dev_id);
1260 		return -EBUSY;
1261 	}
1262 
1263 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_setup, -ENOTSUP);
1264 
1265 	rte_cryptodev_trace_queue_pair_setup(dev_id, queue_pair_id, qp_conf);
1266 	return (*dev->dev_ops->queue_pair_setup)(dev, queue_pair_id, qp_conf,
1267 			socket_id);
1268 }
1269 
1270 struct rte_cryptodev_cb *
1271 rte_cryptodev_add_enq_callback(uint8_t dev_id,
1272 			       uint16_t qp_id,
1273 			       rte_cryptodev_callback_fn cb_fn,
1274 			       void *cb_arg)
1275 {
1276 	struct rte_cryptodev *dev;
1277 	struct rte_cryptodev_cb_rcu *list;
1278 	struct rte_cryptodev_cb *cb, *tail;
1279 
1280 	if (!cb_fn) {
1281 		CDEV_LOG_ERR("Callback is NULL on dev_id=%d", dev_id);
1282 		rte_errno = EINVAL;
1283 		return NULL;
1284 	}
1285 
1286 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1287 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1288 		rte_errno = ENODEV;
1289 		return NULL;
1290 	}
1291 
1292 	dev = &rte_crypto_devices[dev_id];
1293 	if (qp_id >= dev->data->nb_queue_pairs) {
1294 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1295 		rte_errno = ENODEV;
1296 		return NULL;
1297 	}
1298 
1299 	cb = rte_zmalloc(NULL, sizeof(*cb), 0);
1300 	if (cb == NULL) {
1301 		CDEV_LOG_ERR("Failed to allocate memory for callback on "
1302 			     "dev=%d, queue_pair_id=%d", dev_id, qp_id);
1303 		rte_errno = ENOMEM;
1304 		return NULL;
1305 	}
1306 
1307 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1308 
1309 	cb->fn = cb_fn;
1310 	cb->arg = cb_arg;
1311 
1312 	/* Add the callbacks in fifo order. */
1313 	list = &dev->enq_cbs[qp_id];
1314 	tail = list->next;
1315 
1316 	if (tail) {
1317 		while (tail->next)
1318 			tail = tail->next;
1319 		/* Stores to cb->fn and cb->param should complete before
1320 		 * cb is visible to data plane.
1321 		 */
1322 		__atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
1323 	} else {
1324 		/* Stores to cb->fn and cb->param should complete before
1325 		 * cb is visible to data plane.
1326 		 */
1327 		__atomic_store_n(&list->next, cb, __ATOMIC_RELEASE);
1328 	}
1329 
1330 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1331 
1332 	return cb;
1333 }
1334 
1335 int
1336 rte_cryptodev_remove_enq_callback(uint8_t dev_id,
1337 				  uint16_t qp_id,
1338 				  struct rte_cryptodev_cb *cb)
1339 {
1340 	struct rte_cryptodev *dev;
1341 	struct rte_cryptodev_cb **prev_cb, *curr_cb;
1342 	struct rte_cryptodev_cb_rcu *list;
1343 	int ret;
1344 
1345 	ret = -EINVAL;
1346 
1347 	if (!cb) {
1348 		CDEV_LOG_ERR("Callback is NULL");
1349 		return -EINVAL;
1350 	}
1351 
1352 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1353 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1354 		return -ENODEV;
1355 	}
1356 
1357 	dev = &rte_crypto_devices[dev_id];
1358 	if (qp_id >= dev->data->nb_queue_pairs) {
1359 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1360 		return -ENODEV;
1361 	}
1362 
1363 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1364 	if (dev->enq_cbs == NULL) {
1365 		CDEV_LOG_ERR("Callback not initialized");
1366 		goto cb_err;
1367 	}
1368 
1369 	list = &dev->enq_cbs[qp_id];
1370 	if (list == NULL) {
1371 		CDEV_LOG_ERR("Callback list is NULL");
1372 		goto cb_err;
1373 	}
1374 
1375 	if (list->qsbr == NULL) {
1376 		CDEV_LOG_ERR("Rcu qsbr is NULL");
1377 		goto cb_err;
1378 	}
1379 
1380 	prev_cb = &list->next;
1381 	for (; *prev_cb != NULL; prev_cb = &curr_cb->next) {
1382 		curr_cb = *prev_cb;
1383 		if (curr_cb == cb) {
1384 			/* Remove the user cb from the callback list. */
1385 			__atomic_store_n(prev_cb, curr_cb->next,
1386 				__ATOMIC_RELAXED);
1387 			ret = 0;
1388 			break;
1389 		}
1390 	}
1391 
1392 	if (!ret) {
1393 		/* Call sync with invalid thread id as this is part of
1394 		 * control plane API
1395 		 */
1396 		rte_rcu_qsbr_synchronize(list->qsbr, RTE_QSBR_THRID_INVALID);
1397 		rte_free(cb);
1398 	}
1399 
1400 cb_err:
1401 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1402 	return ret;
1403 }
1404 
1405 struct rte_cryptodev_cb *
1406 rte_cryptodev_add_deq_callback(uint8_t dev_id,
1407 			       uint16_t qp_id,
1408 			       rte_cryptodev_callback_fn cb_fn,
1409 			       void *cb_arg)
1410 {
1411 	struct rte_cryptodev *dev;
1412 	struct rte_cryptodev_cb_rcu *list;
1413 	struct rte_cryptodev_cb *cb, *tail;
1414 
1415 	if (!cb_fn) {
1416 		CDEV_LOG_ERR("Callback is NULL on dev_id=%d", dev_id);
1417 		rte_errno = EINVAL;
1418 		return NULL;
1419 	}
1420 
1421 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1422 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1423 		rte_errno = ENODEV;
1424 		return NULL;
1425 	}
1426 
1427 	dev = &rte_crypto_devices[dev_id];
1428 	if (qp_id >= dev->data->nb_queue_pairs) {
1429 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1430 		rte_errno = ENODEV;
1431 		return NULL;
1432 	}
1433 
1434 	cb = rte_zmalloc(NULL, sizeof(*cb), 0);
1435 	if (cb == NULL) {
1436 		CDEV_LOG_ERR("Failed to allocate memory for callback on "
1437 			     "dev=%d, queue_pair_id=%d", dev_id, qp_id);
1438 		rte_errno = ENOMEM;
1439 		return NULL;
1440 	}
1441 
1442 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1443 
1444 	cb->fn = cb_fn;
1445 	cb->arg = cb_arg;
1446 
1447 	/* Add the callbacks in fifo order. */
1448 	list = &dev->deq_cbs[qp_id];
1449 	tail = list->next;
1450 
1451 	if (tail) {
1452 		while (tail->next)
1453 			tail = tail->next;
1454 		/* Stores to cb->fn and cb->param should complete before
1455 		 * cb is visible to data plane.
1456 		 */
1457 		__atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
1458 	} else {
1459 		/* Stores to cb->fn and cb->param should complete before
1460 		 * cb is visible to data plane.
1461 		 */
1462 		__atomic_store_n(&list->next, cb, __ATOMIC_RELEASE);
1463 	}
1464 
1465 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1466 
1467 	return cb;
1468 }
1469 
1470 int
1471 rte_cryptodev_remove_deq_callback(uint8_t dev_id,
1472 				  uint16_t qp_id,
1473 				  struct rte_cryptodev_cb *cb)
1474 {
1475 	struct rte_cryptodev *dev;
1476 	struct rte_cryptodev_cb **prev_cb, *curr_cb;
1477 	struct rte_cryptodev_cb_rcu *list;
1478 	int ret;
1479 
1480 	ret = -EINVAL;
1481 
1482 	if (!cb) {
1483 		CDEV_LOG_ERR("Callback is NULL");
1484 		return -EINVAL;
1485 	}
1486 
1487 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1488 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1489 		return -ENODEV;
1490 	}
1491 
1492 	dev = &rte_crypto_devices[dev_id];
1493 	if (qp_id >= dev->data->nb_queue_pairs) {
1494 		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1495 		return -ENODEV;
1496 	}
1497 
1498 	rte_spinlock_lock(&rte_cryptodev_callback_lock);
1499 	if (dev->enq_cbs == NULL) {
1500 		CDEV_LOG_ERR("Callback not initialized");
1501 		goto cb_err;
1502 	}
1503 
1504 	list = &dev->deq_cbs[qp_id];
1505 	if (list == NULL) {
1506 		CDEV_LOG_ERR("Callback list is NULL");
1507 		goto cb_err;
1508 	}
1509 
1510 	if (list->qsbr == NULL) {
1511 		CDEV_LOG_ERR("Rcu qsbr is NULL");
1512 		goto cb_err;
1513 	}
1514 
1515 	prev_cb = &list->next;
1516 	for (; *prev_cb != NULL; prev_cb = &curr_cb->next) {
1517 		curr_cb = *prev_cb;
1518 		if (curr_cb == cb) {
1519 			/* Remove the user cb from the callback list. */
1520 			__atomic_store_n(prev_cb, curr_cb->next,
1521 				__ATOMIC_RELAXED);
1522 			ret = 0;
1523 			break;
1524 		}
1525 	}
1526 
1527 	if (!ret) {
1528 		/* Call sync with invalid thread id as this is part of
1529 		 * control plane API
1530 		 */
1531 		rte_rcu_qsbr_synchronize(list->qsbr, RTE_QSBR_THRID_INVALID);
1532 		rte_free(cb);
1533 	}
1534 
1535 cb_err:
1536 	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1537 	return ret;
1538 }
1539 
1540 int
1541 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats)
1542 {
1543 	struct rte_cryptodev *dev;
1544 
1545 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1546 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1547 		return -ENODEV;
1548 	}
1549 
1550 	if (stats == NULL) {
1551 		CDEV_LOG_ERR("Invalid stats ptr");
1552 		return -EINVAL;
1553 	}
1554 
1555 	dev = &rte_crypto_devices[dev_id];
1556 	memset(stats, 0, sizeof(*stats));
1557 
1558 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1559 	(*dev->dev_ops->stats_get)(dev, stats);
1560 	return 0;
1561 }
1562 
1563 void
1564 rte_cryptodev_stats_reset(uint8_t dev_id)
1565 {
1566 	struct rte_cryptodev *dev;
1567 
1568 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1569 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1570 		return;
1571 	}
1572 
1573 	dev = &rte_crypto_devices[dev_id];
1574 
1575 	RTE_FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
1576 	(*dev->dev_ops->stats_reset)(dev);
1577 }
1578 
1579 void
1580 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
1581 {
1582 	struct rte_cryptodev *dev;
1583 
1584 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1585 		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1586 		return;
1587 	}
1588 
1589 	dev = &rte_crypto_devices[dev_id];
1590 
1591 	memset(dev_info, 0, sizeof(struct rte_cryptodev_info));
1592 
1593 	RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1594 	(*dev->dev_ops->dev_infos_get)(dev, dev_info);
1595 
1596 	dev_info->driver_name = dev->device->driver->name;
1597 	dev_info->device = dev->device;
1598 }
1599 
1600 int
1601 rte_cryptodev_callback_register(uint8_t dev_id,
1602 			enum rte_cryptodev_event_type event,
1603 			rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1604 {
1605 	struct rte_cryptodev *dev;
1606 	struct rte_cryptodev_callback *user_cb;
1607 
1608 	if (!cb_fn)
1609 		return -EINVAL;
1610 
1611 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1612 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1613 		return -EINVAL;
1614 	}
1615 
1616 	dev = &rte_crypto_devices[dev_id];
1617 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
1618 
1619 	TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
1620 		if (user_cb->cb_fn == cb_fn &&
1621 			user_cb->cb_arg == cb_arg &&
1622 			user_cb->event == event) {
1623 			break;
1624 		}
1625 	}
1626 
1627 	/* create a new callback. */
1628 	if (user_cb == NULL) {
1629 		user_cb = rte_zmalloc("INTR_USER_CALLBACK",
1630 				sizeof(struct rte_cryptodev_callback), 0);
1631 		if (user_cb != NULL) {
1632 			user_cb->cb_fn = cb_fn;
1633 			user_cb->cb_arg = cb_arg;
1634 			user_cb->event = event;
1635 			TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
1636 		}
1637 	}
1638 
1639 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1640 	return (user_cb == NULL) ? -ENOMEM : 0;
1641 }
1642 
1643 int
1644 rte_cryptodev_callback_unregister(uint8_t dev_id,
1645 			enum rte_cryptodev_event_type event,
1646 			rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1647 {
1648 	int ret;
1649 	struct rte_cryptodev *dev;
1650 	struct rte_cryptodev_callback *cb, *next;
1651 
1652 	if (!cb_fn)
1653 		return -EINVAL;
1654 
1655 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1656 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1657 		return -EINVAL;
1658 	}
1659 
1660 	dev = &rte_crypto_devices[dev_id];
1661 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
1662 
1663 	ret = 0;
1664 	for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
1665 
1666 		next = TAILQ_NEXT(cb, next);
1667 
1668 		if (cb->cb_fn != cb_fn || cb->event != event ||
1669 				(cb->cb_arg != (void *)-1 &&
1670 				cb->cb_arg != cb_arg))
1671 			continue;
1672 
1673 		/*
1674 		 * if this callback is not executing right now,
1675 		 * then remove it.
1676 		 */
1677 		if (cb->active == 0) {
1678 			TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
1679 			rte_free(cb);
1680 		} else {
1681 			ret = -EAGAIN;
1682 		}
1683 	}
1684 
1685 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1686 	return ret;
1687 }
1688 
1689 void
1690 rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev,
1691 	enum rte_cryptodev_event_type event)
1692 {
1693 	struct rte_cryptodev_callback *cb_lst;
1694 	struct rte_cryptodev_callback dev_cb;
1695 
1696 	rte_spinlock_lock(&rte_cryptodev_cb_lock);
1697 	TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
1698 		if (cb_lst->cb_fn == NULL || cb_lst->event != event)
1699 			continue;
1700 		dev_cb = *cb_lst;
1701 		cb_lst->active = 1;
1702 		rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1703 		dev_cb.cb_fn(dev->data->dev_id, dev_cb.event,
1704 						dev_cb.cb_arg);
1705 		rte_spinlock_lock(&rte_cryptodev_cb_lock);
1706 		cb_lst->active = 0;
1707 	}
1708 	rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1709 }
1710 
1711 int
1712 rte_cryptodev_sym_session_init(uint8_t dev_id,
1713 		struct rte_cryptodev_sym_session *sess,
1714 		struct rte_crypto_sym_xform *xforms,
1715 		struct rte_mempool *mp)
1716 {
1717 	struct rte_cryptodev *dev;
1718 	uint32_t sess_priv_sz = rte_cryptodev_sym_get_private_session_size(
1719 			dev_id);
1720 	uint8_t index;
1721 	int ret;
1722 
1723 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1724 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1725 		return -EINVAL;
1726 	}
1727 
1728 	dev = rte_cryptodev_pmd_get_dev(dev_id);
1729 
1730 	if (sess == NULL || xforms == NULL || dev == NULL || mp == NULL)
1731 		return -EINVAL;
1732 
1733 	if (mp->elt_size < sess_priv_sz)
1734 		return -EINVAL;
1735 
1736 	index = dev->driver_id;
1737 	if (index >= sess->nb_drivers)
1738 		return -EINVAL;
1739 
1740 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_configure, -ENOTSUP);
1741 
1742 	if (sess->sess_data[index].refcnt == 0) {
1743 		ret = dev->dev_ops->sym_session_configure(dev, xforms,
1744 							sess, mp);
1745 		if (ret < 0) {
1746 			CDEV_LOG_ERR(
1747 				"dev_id %d failed to configure session details",
1748 				dev_id);
1749 			return ret;
1750 		}
1751 	}
1752 
1753 	rte_cryptodev_trace_sym_session_init(dev_id, sess, xforms, mp);
1754 	sess->sess_data[index].refcnt++;
1755 	return 0;
1756 }
1757 
1758 int
1759 rte_cryptodev_asym_session_init(uint8_t dev_id,
1760 		struct rte_cryptodev_asym_session *sess,
1761 		struct rte_crypto_asym_xform *xforms,
1762 		struct rte_mempool *mp)
1763 {
1764 	struct rte_cryptodev *dev;
1765 	uint8_t index;
1766 	int ret;
1767 
1768 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1769 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1770 		return -EINVAL;
1771 	}
1772 
1773 	dev = rte_cryptodev_pmd_get_dev(dev_id);
1774 
1775 	if (sess == NULL || xforms == NULL || dev == NULL)
1776 		return -EINVAL;
1777 
1778 	index = dev->driver_id;
1779 
1780 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_configure,
1781 				-ENOTSUP);
1782 
1783 	if (sess->sess_private_data[index] == NULL) {
1784 		ret = dev->dev_ops->asym_session_configure(dev,
1785 							xforms,
1786 							sess, mp);
1787 		if (ret < 0) {
1788 			CDEV_LOG_ERR(
1789 				"dev_id %d failed to configure session details",
1790 				dev_id);
1791 			return ret;
1792 		}
1793 	}
1794 
1795 	rte_cryptodev_trace_asym_session_init(dev_id, sess, xforms, mp);
1796 	return 0;
1797 }
1798 
1799 struct rte_mempool *
1800 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
1801 	uint32_t elt_size, uint32_t cache_size, uint16_t user_data_size,
1802 	int socket_id)
1803 {
1804 	struct rte_mempool *mp;
1805 	struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1806 	uint32_t obj_sz;
1807 
1808 	obj_sz = rte_cryptodev_sym_get_header_session_size() + user_data_size;
1809 	if (obj_sz > elt_size)
1810 		CDEV_LOG_INFO("elt_size %u is expanded to %u\n", elt_size,
1811 				obj_sz);
1812 	else
1813 		obj_sz = elt_size;
1814 
1815 	mp = rte_mempool_create(name, nb_elts, obj_sz, cache_size,
1816 			(uint32_t)(sizeof(*pool_priv)),
1817 			NULL, NULL, NULL, NULL,
1818 			socket_id, 0);
1819 	if (mp == NULL) {
1820 		CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d\n",
1821 			__func__, name, rte_errno);
1822 		return NULL;
1823 	}
1824 
1825 	pool_priv = rte_mempool_get_priv(mp);
1826 	if (!pool_priv) {
1827 		CDEV_LOG_ERR("%s(name=%s) failed to get private data\n",
1828 			__func__, name);
1829 		rte_mempool_free(mp);
1830 		return NULL;
1831 	}
1832 
1833 	pool_priv->nb_drivers = nb_drivers;
1834 	pool_priv->user_data_sz = user_data_size;
1835 
1836 	rte_cryptodev_trace_sym_session_pool_create(name, nb_elts,
1837 		elt_size, cache_size, user_data_size, mp);
1838 	return mp;
1839 }
1840 
1841 static unsigned int
1842 rte_cryptodev_sym_session_data_size(struct rte_cryptodev_sym_session *sess)
1843 {
1844 	return (sizeof(sess->sess_data[0]) * sess->nb_drivers) +
1845 			sess->user_data_sz;
1846 }
1847 
1848 static uint8_t
1849 rte_cryptodev_sym_is_valid_session_pool(struct rte_mempool *mp)
1850 {
1851 	struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1852 
1853 	if (!mp)
1854 		return 0;
1855 
1856 	pool_priv = rte_mempool_get_priv(mp);
1857 
1858 	if (!pool_priv || mp->private_data_size < sizeof(*pool_priv) ||
1859 			pool_priv->nb_drivers != nb_drivers ||
1860 			mp->elt_size <
1861 				rte_cryptodev_sym_get_header_session_size()
1862 				+ pool_priv->user_data_sz)
1863 		return 0;
1864 
1865 	return 1;
1866 }
1867 
1868 struct rte_cryptodev_sym_session *
1869 rte_cryptodev_sym_session_create(struct rte_mempool *mp)
1870 {
1871 	struct rte_cryptodev_sym_session *sess;
1872 	struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1873 
1874 	if (!rte_cryptodev_sym_is_valid_session_pool(mp)) {
1875 		CDEV_LOG_ERR("Invalid mempool\n");
1876 		return NULL;
1877 	}
1878 
1879 	pool_priv = rte_mempool_get_priv(mp);
1880 
1881 	/* Allocate a session structure from the session pool */
1882 	if (rte_mempool_get(mp, (void **)&sess)) {
1883 		CDEV_LOG_ERR("couldn't get object from session mempool");
1884 		return NULL;
1885 	}
1886 
1887 	sess->nb_drivers = pool_priv->nb_drivers;
1888 	sess->user_data_sz = pool_priv->user_data_sz;
1889 	sess->opaque_data = 0;
1890 
1891 	/* Clear device session pointer.
1892 	 * Include the flag indicating presence of user data
1893 	 */
1894 	memset(sess->sess_data, 0,
1895 			rte_cryptodev_sym_session_data_size(sess));
1896 
1897 	rte_cryptodev_trace_sym_session_create(mp, sess);
1898 	return sess;
1899 }
1900 
1901 struct rte_cryptodev_asym_session *
1902 rte_cryptodev_asym_session_create(struct rte_mempool *mp)
1903 {
1904 	struct rte_cryptodev_asym_session *sess;
1905 	unsigned int session_size =
1906 			rte_cryptodev_asym_get_header_session_size();
1907 
1908 	if (!mp) {
1909 		CDEV_LOG_ERR("invalid mempool\n");
1910 		return NULL;
1911 	}
1912 
1913 	/* Verify if provided mempool can hold elements big enough. */
1914 	if (mp->elt_size < session_size) {
1915 		CDEV_LOG_ERR(
1916 			"mempool elements too small to hold session objects");
1917 		return NULL;
1918 	}
1919 
1920 	/* Allocate a session structure from the session pool */
1921 	if (rte_mempool_get(mp, (void **)&sess)) {
1922 		CDEV_LOG_ERR("couldn't get object from session mempool");
1923 		return NULL;
1924 	}
1925 
1926 	/* Clear device session pointer.
1927 	 * Include the flag indicating presence of private data
1928 	 */
1929 	memset(sess, 0, session_size);
1930 
1931 	rte_cryptodev_trace_asym_session_create(mp, sess);
1932 	return sess;
1933 }
1934 
1935 int
1936 rte_cryptodev_sym_session_clear(uint8_t dev_id,
1937 		struct rte_cryptodev_sym_session *sess)
1938 {
1939 	struct rte_cryptodev *dev;
1940 	uint8_t driver_id;
1941 
1942 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1943 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1944 		return -EINVAL;
1945 	}
1946 
1947 	dev = rte_cryptodev_pmd_get_dev(dev_id);
1948 
1949 	if (dev == NULL || sess == NULL)
1950 		return -EINVAL;
1951 
1952 	driver_id = dev->driver_id;
1953 	if (sess->sess_data[driver_id].refcnt == 0)
1954 		return 0;
1955 	if (--sess->sess_data[driver_id].refcnt != 0)
1956 		return -EBUSY;
1957 
1958 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_clear, -ENOTSUP);
1959 
1960 	dev->dev_ops->sym_session_clear(dev, sess);
1961 
1962 	rte_cryptodev_trace_sym_session_clear(dev_id, sess);
1963 	return 0;
1964 }
1965 
1966 int
1967 rte_cryptodev_asym_session_clear(uint8_t dev_id,
1968 		struct rte_cryptodev_asym_session *sess)
1969 {
1970 	struct rte_cryptodev *dev;
1971 
1972 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1973 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1974 		return -EINVAL;
1975 	}
1976 
1977 	dev = rte_cryptodev_pmd_get_dev(dev_id);
1978 
1979 	if (dev == NULL || sess == NULL)
1980 		return -EINVAL;
1981 
1982 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_clear, -ENOTSUP);
1983 
1984 	dev->dev_ops->asym_session_clear(dev, sess);
1985 
1986 	rte_cryptodev_trace_sym_session_clear(dev_id, sess);
1987 	return 0;
1988 }
1989 
1990 int
1991 rte_cryptodev_sym_session_free(struct rte_cryptodev_sym_session *sess)
1992 {
1993 	uint8_t i;
1994 	struct rte_mempool *sess_mp;
1995 
1996 	if (sess == NULL)
1997 		return -EINVAL;
1998 
1999 	/* Check that all device private data has been freed */
2000 	for (i = 0; i < sess->nb_drivers; i++) {
2001 		if (sess->sess_data[i].refcnt != 0)
2002 			return -EBUSY;
2003 	}
2004 
2005 	/* Return session to mempool */
2006 	sess_mp = rte_mempool_from_obj(sess);
2007 	rte_mempool_put(sess_mp, sess);
2008 
2009 	rte_cryptodev_trace_sym_session_free(sess);
2010 	return 0;
2011 }
2012 
2013 int
2014 rte_cryptodev_asym_session_free(struct rte_cryptodev_asym_session *sess)
2015 {
2016 	uint8_t i;
2017 	void *sess_priv;
2018 	struct rte_mempool *sess_mp;
2019 
2020 	if (sess == NULL)
2021 		return -EINVAL;
2022 
2023 	/* Check that all device private data has been freed */
2024 	for (i = 0; i < nb_drivers; i++) {
2025 		sess_priv = get_asym_session_private_data(sess, i);
2026 		if (sess_priv != NULL)
2027 			return -EBUSY;
2028 	}
2029 
2030 	/* Return session to mempool */
2031 	sess_mp = rte_mempool_from_obj(sess);
2032 	rte_mempool_put(sess_mp, sess);
2033 
2034 	rte_cryptodev_trace_asym_session_free(sess);
2035 	return 0;
2036 }
2037 
2038 unsigned int
2039 rte_cryptodev_sym_get_header_session_size(void)
2040 {
2041 	/*
2042 	 * Header contains pointers to the private data of all registered
2043 	 * drivers and all necessary information to ensure safely clear
2044 	 * or free al session.
2045 	 */
2046 	struct rte_cryptodev_sym_session s = {0};
2047 
2048 	s.nb_drivers = nb_drivers;
2049 
2050 	return (unsigned int)(sizeof(s) +
2051 			rte_cryptodev_sym_session_data_size(&s));
2052 }
2053 
2054 unsigned int
2055 rte_cryptodev_sym_get_existing_header_session_size(
2056 		struct rte_cryptodev_sym_session *sess)
2057 {
2058 	if (!sess)
2059 		return 0;
2060 	else
2061 		return (unsigned int)(sizeof(*sess) +
2062 				rte_cryptodev_sym_session_data_size(sess));
2063 }
2064 
2065 unsigned int
2066 rte_cryptodev_asym_get_header_session_size(void)
2067 {
2068 	/*
2069 	 * Header contains pointers to the private data
2070 	 * of all registered drivers, and a flag which
2071 	 * indicates presence of private data
2072 	 */
2073 	return ((sizeof(void *) * nb_drivers) + sizeof(uint8_t));
2074 }
2075 
2076 unsigned int
2077 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
2078 {
2079 	struct rte_cryptodev *dev;
2080 	unsigned int priv_sess_size;
2081 
2082 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
2083 		return 0;
2084 
2085 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2086 
2087 	if (*dev->dev_ops->sym_session_get_size == NULL)
2088 		return 0;
2089 
2090 	priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev);
2091 
2092 	return priv_sess_size;
2093 }
2094 
2095 unsigned int
2096 rte_cryptodev_asym_get_private_session_size(uint8_t dev_id)
2097 {
2098 	struct rte_cryptodev *dev;
2099 	unsigned int header_size = sizeof(void *) * nb_drivers;
2100 	unsigned int priv_sess_size;
2101 
2102 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
2103 		return 0;
2104 
2105 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2106 
2107 	if (*dev->dev_ops->asym_session_get_size == NULL)
2108 		return 0;
2109 
2110 	priv_sess_size = (*dev->dev_ops->asym_session_get_size)(dev);
2111 	if (priv_sess_size < header_size)
2112 		return header_size;
2113 
2114 	return priv_sess_size;
2115 
2116 }
2117 
2118 int
2119 rte_cryptodev_sym_session_set_user_data(
2120 					struct rte_cryptodev_sym_session *sess,
2121 					void *data,
2122 					uint16_t size)
2123 {
2124 	if (sess == NULL)
2125 		return -EINVAL;
2126 
2127 	if (sess->user_data_sz < size)
2128 		return -ENOMEM;
2129 
2130 	rte_memcpy(sess->sess_data + sess->nb_drivers, data, size);
2131 	return 0;
2132 }
2133 
2134 void *
2135 rte_cryptodev_sym_session_get_user_data(
2136 					struct rte_cryptodev_sym_session *sess)
2137 {
2138 	if (sess == NULL || sess->user_data_sz == 0)
2139 		return NULL;
2140 
2141 	return (void *)(sess->sess_data + sess->nb_drivers);
2142 }
2143 
2144 static inline void
2145 sym_crypto_fill_status(struct rte_crypto_sym_vec *vec, int32_t errnum)
2146 {
2147 	uint32_t i;
2148 	for (i = 0; i < vec->num; i++)
2149 		vec->status[i] = errnum;
2150 }
2151 
2152 uint32_t
2153 rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id,
2154 	struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs ofs,
2155 	struct rte_crypto_sym_vec *vec)
2156 {
2157 	struct rte_cryptodev *dev;
2158 
2159 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
2160 		sym_crypto_fill_status(vec, EINVAL);
2161 		return 0;
2162 	}
2163 
2164 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2165 
2166 	if (*dev->dev_ops->sym_cpu_process == NULL ||
2167 		!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO)) {
2168 		sym_crypto_fill_status(vec, ENOTSUP);
2169 		return 0;
2170 	}
2171 
2172 	return dev->dev_ops->sym_cpu_process(dev, sess, ofs, vec);
2173 }
2174 
2175 int
2176 rte_cryptodev_get_raw_dp_ctx_size(uint8_t dev_id)
2177 {
2178 	struct rte_cryptodev *dev;
2179 	int32_t size = sizeof(struct rte_crypto_raw_dp_ctx);
2180 	int32_t priv_size;
2181 
2182 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
2183 		return -EINVAL;
2184 
2185 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2186 
2187 	if (*dev->dev_ops->sym_get_raw_dp_ctx_size == NULL ||
2188 		!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP)) {
2189 		return -ENOTSUP;
2190 	}
2191 
2192 	priv_size = (*dev->dev_ops->sym_get_raw_dp_ctx_size)(dev);
2193 	if (priv_size < 0)
2194 		return -ENOTSUP;
2195 
2196 	return RTE_ALIGN_CEIL((size + priv_size), 8);
2197 }
2198 
2199 int
2200 rte_cryptodev_configure_raw_dp_ctx(uint8_t dev_id, uint16_t qp_id,
2201 	struct rte_crypto_raw_dp_ctx *ctx,
2202 	enum rte_crypto_op_sess_type sess_type,
2203 	union rte_cryptodev_session_ctx session_ctx,
2204 	uint8_t is_update)
2205 {
2206 	struct rte_cryptodev *dev;
2207 
2208 	if (!rte_cryptodev_get_qp_status(dev_id, qp_id))
2209 		return -EINVAL;
2210 
2211 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2212 	if (!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP)
2213 			|| dev->dev_ops->sym_configure_raw_dp_ctx == NULL)
2214 		return -ENOTSUP;
2215 
2216 	return (*dev->dev_ops->sym_configure_raw_dp_ctx)(dev, qp_id, ctx,
2217 			sess_type, session_ctx, is_update);
2218 }
2219 
2220 uint32_t
2221 rte_cryptodev_raw_enqueue_burst(struct rte_crypto_raw_dp_ctx *ctx,
2222 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
2223 	void **user_data, int *enqueue_status)
2224 {
2225 	return (*ctx->enqueue_burst)(ctx->qp_data, ctx->drv_ctx_data, vec,
2226 			ofs, user_data, enqueue_status);
2227 }
2228 
2229 int
2230 rte_cryptodev_raw_enqueue_done(struct rte_crypto_raw_dp_ctx *ctx,
2231 		uint32_t n)
2232 {
2233 	return (*ctx->enqueue_done)(ctx->qp_data, ctx->drv_ctx_data, n);
2234 }
2235 
2236 uint32_t
2237 rte_cryptodev_raw_dequeue_burst(struct rte_crypto_raw_dp_ctx *ctx,
2238 	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
2239 	uint32_t max_nb_to_dequeue,
2240 	rte_cryptodev_raw_post_dequeue_t post_dequeue,
2241 	void **out_user_data, uint8_t is_user_data_array,
2242 	uint32_t *n_success_jobs, int *status)
2243 {
2244 	return (*ctx->dequeue_burst)(ctx->qp_data, ctx->drv_ctx_data,
2245 		get_dequeue_count, max_nb_to_dequeue, post_dequeue,
2246 		out_user_data, is_user_data_array, n_success_jobs, status);
2247 }
2248 
2249 int
2250 rte_cryptodev_raw_dequeue_done(struct rte_crypto_raw_dp_ctx *ctx,
2251 		uint32_t n)
2252 {
2253 	return (*ctx->dequeue_done)(ctx->qp_data, ctx->drv_ctx_data, n);
2254 }
2255 
2256 /** Initialise rte_crypto_op mempool element */
2257 static void
2258 rte_crypto_op_init(struct rte_mempool *mempool,
2259 		void *opaque_arg,
2260 		void *_op_data,
2261 		__rte_unused unsigned i)
2262 {
2263 	struct rte_crypto_op *op = _op_data;
2264 	enum rte_crypto_op_type type = *(enum rte_crypto_op_type *)opaque_arg;
2265 
2266 	memset(_op_data, 0, mempool->elt_size);
2267 
2268 	__rte_crypto_op_reset(op, type);
2269 
2270 	op->phys_addr = rte_mem_virt2iova(_op_data);
2271 	op->mempool = mempool;
2272 }
2273 
2274 
2275 struct rte_mempool *
2276 rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
2277 		unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
2278 		int socket_id)
2279 {
2280 	struct rte_crypto_op_pool_private *priv;
2281 
2282 	unsigned elt_size = sizeof(struct rte_crypto_op) +
2283 			priv_size;
2284 
2285 	if (type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
2286 		elt_size += sizeof(struct rte_crypto_sym_op);
2287 	} else if (type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
2288 		elt_size += sizeof(struct rte_crypto_asym_op);
2289 	} else if (type == RTE_CRYPTO_OP_TYPE_UNDEFINED) {
2290 		elt_size += RTE_MAX(sizeof(struct rte_crypto_sym_op),
2291 		                    sizeof(struct rte_crypto_asym_op));
2292 	} else {
2293 		CDEV_LOG_ERR("Invalid op_type\n");
2294 		return NULL;
2295 	}
2296 
2297 	/* lookup mempool in case already allocated */
2298 	struct rte_mempool *mp = rte_mempool_lookup(name);
2299 
2300 	if (mp != NULL) {
2301 		priv = (struct rte_crypto_op_pool_private *)
2302 				rte_mempool_get_priv(mp);
2303 
2304 		if (mp->elt_size != elt_size ||
2305 				mp->cache_size < cache_size ||
2306 				mp->size < nb_elts ||
2307 				priv->priv_size <  priv_size) {
2308 			mp = NULL;
2309 			CDEV_LOG_ERR("Mempool %s already exists but with "
2310 					"incompatible parameters", name);
2311 			return NULL;
2312 		}
2313 		return mp;
2314 	}
2315 
2316 	mp = rte_mempool_create(
2317 			name,
2318 			nb_elts,
2319 			elt_size,
2320 			cache_size,
2321 			sizeof(struct rte_crypto_op_pool_private),
2322 			NULL,
2323 			NULL,
2324 			rte_crypto_op_init,
2325 			&type,
2326 			socket_id,
2327 			0);
2328 
2329 	if (mp == NULL) {
2330 		CDEV_LOG_ERR("Failed to create mempool %s", name);
2331 		return NULL;
2332 	}
2333 
2334 	priv = (struct rte_crypto_op_pool_private *)
2335 			rte_mempool_get_priv(mp);
2336 
2337 	priv->priv_size = priv_size;
2338 	priv->type = type;
2339 
2340 	return mp;
2341 }
2342 
2343 int
2344 rte_cryptodev_pmd_create_dev_name(char *name, const char *dev_name_prefix)
2345 {
2346 	struct rte_cryptodev *dev = NULL;
2347 	uint32_t i = 0;
2348 
2349 	if (name == NULL)
2350 		return -EINVAL;
2351 
2352 	for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
2353 		int ret = snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
2354 				"%s_%u", dev_name_prefix, i);
2355 
2356 		if (ret < 0)
2357 			return ret;
2358 
2359 		dev = rte_cryptodev_pmd_get_named_dev(name);
2360 		if (!dev)
2361 			return 0;
2362 	}
2363 
2364 	return -1;
2365 }
2366 
2367 TAILQ_HEAD(cryptodev_driver_list, cryptodev_driver);
2368 
2369 static struct cryptodev_driver_list cryptodev_driver_list =
2370 	TAILQ_HEAD_INITIALIZER(cryptodev_driver_list);
2371 
2372 int
2373 rte_cryptodev_driver_id_get(const char *name)
2374 {
2375 	struct cryptodev_driver *driver;
2376 	const char *driver_name;
2377 
2378 	if (name == NULL) {
2379 		RTE_LOG(DEBUG, CRYPTODEV, "name pointer NULL");
2380 		return -1;
2381 	}
2382 
2383 	TAILQ_FOREACH(driver, &cryptodev_driver_list, next) {
2384 		driver_name = driver->driver->name;
2385 		if (strncmp(driver_name, name, strlen(driver_name) + 1) == 0)
2386 			return driver->id;
2387 	}
2388 	return -1;
2389 }
2390 
2391 const char *
2392 rte_cryptodev_name_get(uint8_t dev_id)
2393 {
2394 	struct rte_cryptodev *dev;
2395 
2396 	if (!rte_cryptodev_is_valid_device_data(dev_id)) {
2397 		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
2398 		return NULL;
2399 	}
2400 
2401 	dev = rte_cryptodev_pmd_get_dev(dev_id);
2402 	if (dev == NULL)
2403 		return NULL;
2404 
2405 	return dev->data->name;
2406 }
2407 
2408 const char *
2409 rte_cryptodev_driver_name_get(uint8_t driver_id)
2410 {
2411 	struct cryptodev_driver *driver;
2412 
2413 	TAILQ_FOREACH(driver, &cryptodev_driver_list, next)
2414 		if (driver->id == driver_id)
2415 			return driver->driver->name;
2416 	return NULL;
2417 }
2418 
2419 uint8_t
2420 rte_cryptodev_allocate_driver(struct cryptodev_driver *crypto_drv,
2421 		const struct rte_driver *drv)
2422 {
2423 	crypto_drv->driver = drv;
2424 	crypto_drv->id = nb_drivers;
2425 
2426 	TAILQ_INSERT_TAIL(&cryptodev_driver_list, crypto_drv, next);
2427 
2428 	return nb_drivers++;
2429 }
2430