xref: /dpdk/app/test-crypto-perf/main.c (revision 981a1ed32a7920bf0f5e2864ab1f78c296bdfaec)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2017 Intel Corporation
3  */
4 
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <unistd.h>
8 
9 #include <rte_malloc.h>
10 #include <rte_random.h>
11 #include <rte_eal.h>
12 #include <rte_errno.h>
13 #include <rte_cryptodev.h>
14 #ifdef RTE_CRYPTO_SCHEDULER
15 #include <rte_cryptodev_scheduler.h>
16 #endif
17 
18 #include "cperf.h"
19 #include "cperf_options.h"
20 #include "cperf_test_vector_parsing.h"
21 #include "cperf_test_common.h"
22 #include "cperf_test_throughput.h"
23 #include "cperf_test_latency.h"
24 #include "cperf_test_verify.h"
25 #include "cperf_test_pmd_cyclecount.h"
26 
27 static struct {
28 	struct rte_mempool *sess_mp;
29 } session_pool_socket[RTE_MAX_NUMA_NODES];
30 
31 const char *cperf_test_type_strs[] = {
32 	[CPERF_TEST_TYPE_THROUGHPUT] = "throughput",
33 	[CPERF_TEST_TYPE_LATENCY] = "latency",
34 	[CPERF_TEST_TYPE_VERIFY] = "verify",
35 	[CPERF_TEST_TYPE_PMDCC] = "pmd-cyclecount"
36 };
37 
38 const char *cperf_op_type_strs[] = {
39 	[CPERF_CIPHER_ONLY] = "cipher-only",
40 	[CPERF_AUTH_ONLY] = "auth-only",
41 	[CPERF_CIPHER_THEN_AUTH] = "cipher-then-auth",
42 	[CPERF_AUTH_THEN_CIPHER] = "auth-then-cipher",
43 	[CPERF_AEAD] = "aead",
44 	[CPERF_PDCP] = "pdcp",
45 	[CPERF_DOCSIS] = "docsis",
46 	[CPERF_IPSEC] = "ipsec",
47 	[CPERF_ASYM_MODEX] = "modex",
48 	[CPERF_ASYM_SECP256R1] = "ecdsa_p256r1",
49 	[CPERF_ASYM_ED25519] = "eddsa_25519",
50 	[CPERF_ASYM_SM2] = "sm2",
51 	[CPERF_TLS] = "tls-record"
52 };
53 
54 const struct cperf_test cperf_testmap[] = {
55 		[CPERF_TEST_TYPE_THROUGHPUT] = {
56 				cperf_throughput_test_constructor,
57 				cperf_throughput_test_runner,
58 				cperf_throughput_test_destructor
59 		},
60 		[CPERF_TEST_TYPE_LATENCY] = {
61 				cperf_latency_test_constructor,
62 				cperf_latency_test_runner,
63 				cperf_latency_test_destructor
64 		},
65 		[CPERF_TEST_TYPE_VERIFY] = {
66 				cperf_verify_test_constructor,
67 				cperf_verify_test_runner,
68 				cperf_verify_test_destructor
69 		},
70 		[CPERF_TEST_TYPE_PMDCC] = {
71 				cperf_pmd_cyclecount_test_constructor,
72 				cperf_pmd_cyclecount_test_runner,
73 				cperf_pmd_cyclecount_test_destructor
74 		}
75 };
76 
77 static int
78 create_asym_op_pool_socket(int32_t socket_id, uint32_t nb_sessions)
79 {
80 	char mp_name[RTE_MEMPOOL_NAMESIZE];
81 	struct rte_mempool *mpool = NULL;
82 
83 	if (session_pool_socket[socket_id].sess_mp == NULL) {
84 		snprintf(mp_name, RTE_MEMPOOL_NAMESIZE, "perf_asym_sess_pool%u",
85 			 socket_id);
86 		mpool = rte_cryptodev_asym_session_pool_create(mp_name,
87 				nb_sessions, 0, 0, socket_id);
88 		if (mpool == NULL) {
89 			printf("Cannot create pool \"%s\" on socket %d\n",
90 			       mp_name, socket_id);
91 			return -ENOMEM;
92 		}
93 		session_pool_socket[socket_id].sess_mp = mpool;
94 	}
95 	return 0;
96 }
97 
98 static int
99 fill_session_pool_socket(int32_t socket_id, uint32_t session_priv_size,
100 		uint32_t nb_sessions)
101 {
102 	char mp_name[RTE_MEMPOOL_NAMESIZE];
103 	struct rte_mempool *sess_mp;
104 
105 	if (session_pool_socket[socket_id].sess_mp == NULL) {
106 
107 		snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
108 			"sess_mp_%u", socket_id);
109 
110 		sess_mp = rte_cryptodev_sym_session_pool_create(mp_name,
111 					nb_sessions, session_priv_size, 0, 0,
112 					socket_id);
113 
114 		if (sess_mp == NULL) {
115 			printf("Cannot create pool \"%s\" on socket %d\n",
116 				mp_name, socket_id);
117 			return -ENOMEM;
118 		}
119 
120 		printf("Allocated pool \"%s\" on socket %d\n",
121 			mp_name, socket_id);
122 		session_pool_socket[socket_id].sess_mp = sess_mp;
123 	}
124 
125 	return 0;
126 }
127 
128 static int
129 cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs)
130 {
131 	uint8_t enabled_cdev_count = 0, nb_lcores, cdev_id;
132 	uint32_t sessions_needed = 0;
133 	unsigned int i, j;
134 	int ret;
135 
136 	enabled_cdev_count = rte_cryptodev_devices_get(opts->device_type,
137 			enabled_cdevs, RTE_CRYPTO_MAX_DEVS);
138 	if (enabled_cdev_count == 0) {
139 		printf("No crypto devices type %s available\n",
140 				opts->device_type);
141 		return -EINVAL;
142 	}
143 
144 	nb_lcores = rte_lcore_count() - 1;
145 
146 	if (nb_lcores < 1) {
147 		RTE_LOG(ERR, USER1,
148 			"Number of enabled cores need to be higher than 1\n");
149 		return -EINVAL;
150 	}
151 
152 	/*
153 	 * Use less number of devices,
154 	 * if there are more available than cores.
155 	 */
156 	if (enabled_cdev_count > nb_lcores)
157 		enabled_cdev_count = nb_lcores;
158 
159 	/* Create a mempool shared by all the devices */
160 	uint32_t max_sess_size = 0, sess_size;
161 
162 	for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
163 		sess_size = rte_cryptodev_sym_get_private_session_size(cdev_id);
164 		if (sess_size > max_sess_size)
165 			max_sess_size = sess_size;
166 	}
167 #ifdef RTE_LIB_SECURITY
168 	for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
169 		sess_size = rte_security_session_get_size(
170 				rte_cryptodev_get_sec_ctx(cdev_id));
171 		if (sess_size > max_sess_size)
172 			max_sess_size = sess_size;
173 	}
174 #endif
175 	/*
176 	 * Calculate number of needed queue pairs, based on the amount
177 	 * of available number of logical cores and crypto devices.
178 	 * For instance, if there are 4 cores and 2 crypto devices,
179 	 * 2 queue pairs will be set up per device.
180 	 */
181 	opts->nb_qps = (nb_lcores % enabled_cdev_count) ?
182 				(nb_lcores / enabled_cdev_count) + 1 :
183 				nb_lcores / enabled_cdev_count;
184 
185 	for (i = 0; i < enabled_cdev_count &&
186 			i < RTE_CRYPTO_MAX_DEVS; i++) {
187 		cdev_id = enabled_cdevs[i];
188 #ifdef RTE_CRYPTO_SCHEDULER
189 		/*
190 		 * If multi-core scheduler is used, limit the number
191 		 * of queue pairs to 1, as there is no way to know
192 		 * how many cores are being used by the PMD, and
193 		 * how many will be available for the application.
194 		 */
195 		if (!strcmp((const char *)opts->device_type, "crypto_scheduler") &&
196 				rte_cryptodev_scheduler_mode_get(cdev_id) ==
197 				CDEV_SCHED_MODE_MULTICORE)
198 			opts->nb_qps = 1;
199 #endif
200 
201 		struct rte_cryptodev_info cdev_info;
202 		int socket_id = rte_cryptodev_socket_id(cdev_id);
203 
204 		/* Use the first socket if SOCKET_ID_ANY is returned. */
205 		if (socket_id == SOCKET_ID_ANY)
206 			socket_id = 0;
207 
208 		rte_cryptodev_info_get(cdev_id, &cdev_info);
209 
210 		if (cperf_is_asym_test(opts)) {
211 			if ((cdev_info.feature_flags &
212 			     RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO) == 0)
213 				continue;
214 		}
215 
216 		if (opts->nb_qps > cdev_info.max_nb_queue_pairs) {
217 			printf("Number of needed queue pairs is higher "
218 				"than the maximum number of queue pairs "
219 				"per device.\n");
220 			printf("Lower the number of cores or increase "
221 				"the number of crypto devices\n");
222 			return -EINVAL;
223 		}
224 		struct rte_cryptodev_config conf = {
225 			.nb_queue_pairs = opts->nb_qps,
226 			.socket_id = socket_id,
227 		};
228 
229 		switch (opts->op_type) {
230 		case CPERF_ASYM_SECP256R1:
231 		case CPERF_ASYM_ED25519:
232 		case CPERF_ASYM_SM2:
233 		case CPERF_ASYM_MODEX:
234 			conf.ff_disable |= (RTE_CRYPTODEV_FF_SECURITY |
235 					    RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO);
236 			break;
237 		case CPERF_CIPHER_ONLY:
238 		case CPERF_AUTH_ONLY:
239 		case CPERF_CIPHER_THEN_AUTH:
240 		case CPERF_AUTH_THEN_CIPHER:
241 		case CPERF_AEAD:
242 			conf.ff_disable |= RTE_CRYPTODEV_FF_SECURITY;
243 			/* Fall through */
244 		case CPERF_PDCP:
245 		case CPERF_DOCSIS:
246 		case CPERF_IPSEC:
247 		case CPERF_TLS:
248 			/* Fall through */
249 		default:
250 			conf.ff_disable |= RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO;
251 		}
252 
253 		struct rte_cryptodev_qp_conf qp_conf = {
254 			.nb_descriptors = opts->nb_descriptors,
255 			.priority = RTE_CRYPTODEV_QP_PRIORITY_HIGHEST
256 		};
257 
258 		/**
259 		 * Device info specifies the min headroom and tailroom
260 		 * requirement for the crypto PMD. This need to be honoured
261 		 * by the application, while creating mbuf.
262 		 */
263 		if (opts->headroom_sz < cdev_info.min_mbuf_headroom_req) {
264 			/* Update headroom */
265 			opts->headroom_sz = cdev_info.min_mbuf_headroom_req;
266 		}
267 		if (opts->tailroom_sz < cdev_info.min_mbuf_tailroom_req) {
268 			/* Update tailroom */
269 			opts->tailroom_sz = cdev_info.min_mbuf_tailroom_req;
270 		}
271 
272 		/* Update segment size to include headroom & tailroom */
273 		opts->segment_sz += (opts->headroom_sz + opts->tailroom_sz);
274 
275 		uint32_t dev_max_nb_sess = cdev_info.sym.max_nb_sessions;
276 		if (!strcmp((const char *)opts->device_type,
277 					"crypto_scheduler")) {
278 #ifdef RTE_CRYPTO_SCHEDULER
279 			uint32_t nb_workers =
280 				rte_cryptodev_scheduler_workers_get(cdev_id,
281 								NULL);
282 			/* scheduler session header per lcore + 1 session per worker qp */
283 			sessions_needed = nb_lcores + enabled_cdev_count *
284 				opts->nb_qps * nb_workers;
285 #endif
286 		} else
287 			sessions_needed = enabled_cdev_count * opts->nb_qps;
288 
289 		/*
290 		 * A single session is required per queue pair
291 		 * in each device
292 		 */
293 		if (dev_max_nb_sess != 0 && dev_max_nb_sess < opts->nb_qps) {
294 			RTE_LOG(ERR, USER1,
295 				"Device does not support at least "
296 				"%u sessions\n", opts->nb_qps);
297 			return -ENOTSUP;
298 		}
299 
300 		if (cperf_is_asym_test(opts))
301 			ret = create_asym_op_pool_socket(socket_id,
302 							 sessions_needed);
303 		else
304 			ret = fill_session_pool_socket(socket_id, max_sess_size,
305 						       sessions_needed);
306 		if (ret < 0)
307 			return ret;
308 
309 		qp_conf.mp_session = session_pool_socket[socket_id].sess_mp;
310 
311 		if (cperf_is_asym_test(opts))
312 			qp_conf.mp_session = NULL;
313 
314 		ret = rte_cryptodev_configure(cdev_id, &conf);
315 		if (ret < 0) {
316 			printf("Failed to configure cryptodev %u", cdev_id);
317 			return -EINVAL;
318 		}
319 
320 		for (j = 0; j < opts->nb_qps; j++) {
321 			if ((1 << j) & opts->low_prio_qp_mask)
322 				qp_conf.priority = RTE_CRYPTODEV_QP_PRIORITY_LOWEST;
323 
324 			ret = rte_cryptodev_queue_pair_setup(cdev_id, j,
325 				&qp_conf, socket_id);
326 			if (ret < 0) {
327 				printf("Failed to setup queue pair %u on "
328 					"cryptodev %u",	j, cdev_id);
329 				return -EINVAL;
330 			}
331 		}
332 
333 		ret = rte_cryptodev_start(cdev_id);
334 		if (ret < 0) {
335 			printf("Failed to start device %u: error %d\n",
336 					cdev_id, ret);
337 			return -EPERM;
338 		}
339 	}
340 
341 	return enabled_cdev_count;
342 }
343 
344 static int
345 cperf_verify_devices_capabilities(struct cperf_options *opts,
346 		uint8_t *enabled_cdevs, uint8_t nb_cryptodevs)
347 {
348 	struct rte_cryptodev_sym_capability_idx cap_idx;
349 	const struct rte_cryptodev_symmetric_capability *capability;
350 	struct rte_cryptodev_asym_capability_idx asym_cap_idx;
351 	const struct rte_cryptodev_asymmetric_xform_capability *asym_capability;
352 
353 
354 	uint8_t i, cdev_id;
355 	int ret;
356 
357 	for (i = 0; i < nb_cryptodevs; i++) {
358 
359 		cdev_id = enabled_cdevs[i];
360 
361 		if (opts->op_type == CPERF_ASYM_MODEX) {
362 			asym_cap_idx.type = RTE_CRYPTO_ASYM_XFORM_MODEX;
363 			asym_capability = rte_cryptodev_asym_capability_get(
364 				cdev_id, &asym_cap_idx);
365 			if (asym_capability == NULL)
366 				return -1;
367 
368 			ret = rte_cryptodev_asym_xform_capability_check_modlen(
369 				asym_capability, opts->modex_data->modulus.len);
370 			if (ret != 0)
371 				return ret;
372 
373 		}
374 
375 		if (opts->op_type == CPERF_ASYM_SECP256R1) {
376 			asym_cap_idx.type = RTE_CRYPTO_ASYM_XFORM_ECDSA;
377 			asym_capability = rte_cryptodev_asym_capability_get(cdev_id, &asym_cap_idx);
378 			if (asym_capability == NULL)
379 				return -1;
380 
381 			if (!rte_cryptodev_asym_xform_capability_check_optype(asym_capability,
382 						opts->asym_op_type))
383 				return -1;
384 
385 			if (asym_capability->internal_rng != 0) {
386 				opts->secp256r1_data->k.data = NULL;
387 				opts->secp256r1_data->k.length = 0;
388 			}
389 		}
390 
391 		if (opts->op_type == CPERF_ASYM_ED25519) {
392 			asym_cap_idx.type = RTE_CRYPTO_ASYM_XFORM_EDDSA;
393 			asym_capability = rte_cryptodev_asym_capability_get(cdev_id, &asym_cap_idx);
394 			if (asym_capability == NULL)
395 				return -1;
396 
397 			if (!rte_cryptodev_asym_xform_capability_check_optype(asym_capability,
398 						opts->asym_op_type))
399 				return -1;
400 		}
401 
402 		if (opts->op_type == CPERF_ASYM_SM2) {
403 			asym_cap_idx.type = RTE_CRYPTO_ASYM_XFORM_SM2;
404 			asym_capability = rte_cryptodev_asym_capability_get(cdev_id, &asym_cap_idx);
405 			if (asym_capability == NULL)
406 				return -1;
407 
408 			if (!rte_cryptodev_asym_xform_capability_check_optype(asym_capability,
409 						opts->asym_op_type))
410 				return -1;
411 
412 			if (rte_cryptodev_asym_xform_capability_check_hash(asym_capability,
413 						RTE_CRYPTO_AUTH_SM3)) {
414 				opts->asym_hash_alg = RTE_CRYPTO_AUTH_SM3;
415 				if (opts->asym_op_type == RTE_CRYPTO_ASYM_OP_SIGN ||
416 						opts->asym_op_type == RTE_CRYPTO_ASYM_OP_VERIFY) {
417 					opts->sm2_data->message.data = sm2_perf_data.message.data;
418 					opts->sm2_data->message.length =
419 							sm2_perf_data.message.length;
420 					opts->sm2_data->id.data = sm2_perf_data.id.data;
421 					opts->sm2_data->id.length = sm2_perf_data.id.length;
422 				}
423 			} else {
424 				opts->asym_hash_alg = RTE_CRYPTO_AUTH_NULL;
425 				if (opts->asym_op_type == RTE_CRYPTO_ASYM_OP_SIGN ||
426 						opts->asym_op_type == RTE_CRYPTO_ASYM_OP_VERIFY) {
427 					opts->sm2_data->message.data = sm2_perf_data.digest.data;
428 					opts->sm2_data->message.length =
429 							sm2_perf_data.digest.length;
430 					opts->sm2_data->id.data = NULL;
431 					opts->sm2_data->id.length = 0;
432 				}
433 			}
434 			if (asym_capability->internal_rng != 0) {
435 				opts->sm2_data->k.data = NULL;
436 				opts->sm2_data->k.length = 0;
437 			}
438 			if (opts->asym_op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT) {
439 				opts->sm2_data->message.data = sm2_perf_data.message.data;
440 				opts->sm2_data->message.length = sm2_perf_data.message.length;
441 				opts->sm2_data->cipher.data = sm2_perf_data.cipher.data;
442 				opts->sm2_data->cipher.length = sm2_perf_data.cipher.length;
443 			} else if (opts->asym_op_type == RTE_CRYPTO_ASYM_OP_DECRYPT) {
444 				opts->sm2_data->cipher.data = sm2_perf_data.cipher.data;
445 				opts->sm2_data->cipher.length = sm2_perf_data.cipher.length;
446 				opts->sm2_data->message.data = sm2_perf_data.message.data;
447 				opts->sm2_data->message.length = sm2_perf_data.message.length;
448 			} else if (opts->asym_op_type == RTE_CRYPTO_ASYM_OP_SIGN) {
449 				opts->sm2_data->sign_r.data = sm2_perf_data.sign_r.data;
450 				opts->sm2_data->sign_r.length = sm2_perf_data.sign_r.length;
451 				opts->sm2_data->sign_s.data = sm2_perf_data.sign_s.data;
452 				opts->sm2_data->sign_s.length = sm2_perf_data.sign_s.length;
453 			} else if (opts->asym_op_type == RTE_CRYPTO_ASYM_OP_VERIFY) {
454 				opts->sm2_data->sign_r.data = sm2_perf_data.sign_r.data;
455 				opts->sm2_data->sign_r.length = sm2_perf_data.sign_r.length;
456 				opts->sm2_data->sign_s.data = sm2_perf_data.sign_s.data;
457 				opts->sm2_data->sign_s.length = sm2_perf_data.sign_s.length;
458 			}
459 		}
460 
461 		if (opts->op_type == CPERF_AUTH_ONLY ||
462 				opts->op_type == CPERF_CIPHER_THEN_AUTH ||
463 				opts->op_type == CPERF_AUTH_THEN_CIPHER) {
464 
465 			cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH;
466 			cap_idx.algo.auth = opts->auth_algo;
467 
468 			capability = rte_cryptodev_sym_capability_get(cdev_id,
469 					&cap_idx);
470 			if (capability == NULL)
471 				return -1;
472 
473 			ret = rte_cryptodev_sym_capability_check_auth(
474 					capability,
475 					opts->auth_key_sz,
476 					opts->digest_sz,
477 					opts->auth_iv_sz);
478 			if (ret != 0)
479 				return ret;
480 		}
481 
482 		if (opts->op_type == CPERF_CIPHER_ONLY ||
483 				opts->op_type == CPERF_CIPHER_THEN_AUTH ||
484 				opts->op_type == CPERF_AUTH_THEN_CIPHER) {
485 
486 			cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
487 			cap_idx.algo.cipher = opts->cipher_algo;
488 
489 			capability = rte_cryptodev_sym_capability_get(cdev_id,
490 					&cap_idx);
491 			if (capability == NULL)
492 				return -1;
493 
494 			ret = rte_cryptodev_sym_capability_check_cipher(
495 					capability,
496 					opts->cipher_key_sz,
497 					opts->cipher_iv_sz);
498 			if (ret != 0)
499 				return ret;
500 		}
501 
502 		if (opts->op_type == CPERF_AEAD) {
503 
504 			cap_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;
505 			cap_idx.algo.aead = opts->aead_algo;
506 
507 			capability = rte_cryptodev_sym_capability_get(cdev_id,
508 					&cap_idx);
509 			if (capability == NULL)
510 				return -1;
511 
512 			ret = rte_cryptodev_sym_capability_check_aead(
513 					capability,
514 					opts->aead_key_sz,
515 					opts->digest_sz,
516 					opts->aead_aad_sz,
517 					opts->aead_iv_sz);
518 			if (ret != 0)
519 				return ret;
520 		}
521 	}
522 
523 	return 0;
524 }
525 
526 static int
527 cperf_check_test_vector(struct cperf_options *opts,
528 		struct cperf_test_vector *test_vec)
529 {
530 	if (opts->op_type == CPERF_CIPHER_ONLY) {
531 		if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
532 			if (test_vec->plaintext.data == NULL)
533 				return -1;
534 		} else {
535 			if (test_vec->plaintext.data == NULL)
536 				return -1;
537 			if (test_vec->plaintext.length < opts->max_buffer_size)
538 				return -1;
539 			if (test_vec->ciphertext.data == NULL)
540 				return -1;
541 			if (test_vec->ciphertext.length < opts->max_buffer_size)
542 				return -1;
543 			/* Cipher IV is only required for some algorithms */
544 			if (opts->cipher_iv_sz &&
545 					test_vec->cipher_iv.data == NULL)
546 				return -1;
547 			if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
548 				return -1;
549 			if (test_vec->cipher_key.data == NULL)
550 				return -1;
551 			if (test_vec->cipher_key.length != opts->cipher_key_sz)
552 				return -1;
553 		}
554 	} else if (opts->op_type == CPERF_AUTH_ONLY) {
555 		if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) {
556 			if (test_vec->plaintext.data == NULL)
557 				return -1;
558 			if (test_vec->plaintext.length < opts->max_buffer_size)
559 				return -1;
560 			/* Auth key is only required for some algorithms */
561 			if (opts->auth_key_sz &&
562 					test_vec->auth_key.data == NULL)
563 				return -1;
564 			if (test_vec->auth_key.length != opts->auth_key_sz)
565 				return -1;
566 			if (test_vec->auth_iv.length != opts->auth_iv_sz)
567 				return -1;
568 			/* Auth IV is only required for some algorithms */
569 			if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL)
570 				return -1;
571 			if (test_vec->digest.data == NULL)
572 				return -1;
573 			if (test_vec->digest.length < opts->digest_sz)
574 				return -1;
575 		}
576 
577 	} else if (opts->op_type == CPERF_CIPHER_THEN_AUTH ||
578 			opts->op_type == CPERF_AUTH_THEN_CIPHER) {
579 		if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
580 			if (test_vec->plaintext.data == NULL)
581 				return -1;
582 			if (test_vec->plaintext.length < opts->max_buffer_size)
583 				return -1;
584 		} else {
585 			if (test_vec->plaintext.data == NULL)
586 				return -1;
587 			if (test_vec->plaintext.length < opts->max_buffer_size)
588 				return -1;
589 			if (test_vec->ciphertext.data == NULL)
590 				return -1;
591 			if (test_vec->ciphertext.length < opts->max_buffer_size)
592 				return -1;
593 			if (test_vec->cipher_iv.data == NULL)
594 				return -1;
595 			if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
596 				return -1;
597 			if (test_vec->cipher_key.data == NULL)
598 				return -1;
599 			if (test_vec->cipher_key.length != opts->cipher_key_sz)
600 				return -1;
601 		}
602 		if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) {
603 			if (test_vec->auth_key.data == NULL)
604 				return -1;
605 			if (test_vec->auth_key.length != opts->auth_key_sz)
606 				return -1;
607 			if (test_vec->auth_iv.length != opts->auth_iv_sz)
608 				return -1;
609 			/* Auth IV is only required for some algorithms */
610 			if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL)
611 				return -1;
612 			if (test_vec->digest.data == NULL)
613 				return -1;
614 			if (test_vec->digest.length < opts->digest_sz)
615 				return -1;
616 		}
617 	} else if (opts->op_type == CPERF_AEAD) {
618 		if (test_vec->plaintext.data == NULL)
619 			return -1;
620 		if (test_vec->plaintext.length < opts->max_buffer_size)
621 			return -1;
622 		if (test_vec->ciphertext.data == NULL)
623 			return -1;
624 		if (test_vec->ciphertext.length < opts->max_buffer_size)
625 			return -1;
626 		if (test_vec->aead_key.data == NULL)
627 			return -1;
628 		if (test_vec->aead_key.length != opts->aead_key_sz)
629 			return -1;
630 		if (test_vec->aead_iv.data == NULL)
631 			return -1;
632 		if (test_vec->aead_iv.length != opts->aead_iv_sz)
633 			return -1;
634 		if (test_vec->aad.data == NULL)
635 			return -1;
636 		if (test_vec->aad.length != opts->aead_aad_sz)
637 			return -1;
638 		if (test_vec->digest.data == NULL)
639 			return -1;
640 		if (test_vec->digest.length < opts->digest_sz)
641 			return -1;
642 	}
643 	return 0;
644 }
645 
646 int
647 main(int argc, char **argv)
648 {
649 	struct cperf_options opts = {0};
650 	struct cperf_test_vector *t_vec = NULL;
651 	struct cperf_op_fns op_fns;
652 	void *ctx[RTE_MAX_LCORE] = { };
653 	int nb_cryptodevs = 0;
654 	uint16_t total_nb_qps = 0;
655 	uint8_t cdev_id, i;
656 	uint8_t enabled_cdevs[RTE_CRYPTO_MAX_DEVS] = { 0 };
657 
658 	uint8_t buffer_size_idx = 0;
659 
660 	int ret;
661 	uint32_t lcore_id;
662 	bool cap_unsupported = false;
663 
664 	/* Initialise DPDK EAL */
665 	ret = rte_eal_init(argc, argv);
666 	if (ret < 0)
667 		rte_exit(EXIT_FAILURE, "Invalid EAL arguments!\n");
668 	argc -= ret;
669 	argv += ret;
670 
671 	cperf_options_default(&opts);
672 
673 	ret = cperf_options_parse(&opts, argc, argv);
674 	if (ret) {
675 		RTE_LOG(ERR, USER1, "Parsing one or more user options failed\n");
676 		goto err;
677 	}
678 
679 	ret = cperf_options_check(&opts);
680 	if (ret) {
681 		RTE_LOG(ERR, USER1,
682 				"Checking one or more user options failed\n");
683 		goto err;
684 	}
685 
686 	nb_cryptodevs = cperf_initialize_cryptodev(&opts, enabled_cdevs);
687 
688 	if (!opts.silent)
689 		cperf_options_dump(&opts);
690 
691 	if (nb_cryptodevs < 1) {
692 		RTE_LOG(ERR, USER1, "Failed to initialise requested crypto "
693 				"device type\n");
694 		nb_cryptodevs = 0;
695 		goto err;
696 	}
697 
698 	ret = cperf_verify_devices_capabilities(&opts, enabled_cdevs,
699 			nb_cryptodevs);
700 	if (ret) {
701 		RTE_LOG(ERR, USER1, "Crypto device type does not support "
702 				"capabilities requested\n");
703 		cap_unsupported = true;
704 		goto err;
705 	}
706 
707 	if (opts.test_file != NULL) {
708 		t_vec = cperf_test_vector_get_from_file(&opts);
709 		if (t_vec == NULL) {
710 			RTE_LOG(ERR, USER1,
711 					"Failed to create test vector for"
712 					" specified file\n");
713 			goto err;
714 		}
715 
716 		if (cperf_check_test_vector(&opts, t_vec)) {
717 			RTE_LOG(ERR, USER1, "Incomplete necessary test vectors"
718 					"\n");
719 			goto err;
720 		}
721 	} else {
722 		t_vec = cperf_test_vector_get_dummy(&opts);
723 		if (t_vec == NULL) {
724 			RTE_LOG(ERR, USER1,
725 					"Failed to create test vector for"
726 					" specified algorithms\n");
727 			goto err;
728 		}
729 	}
730 
731 	ret = cperf_get_op_functions(&opts, &op_fns);
732 	if (ret) {
733 		RTE_LOG(ERR, USER1, "Failed to find function ops set for "
734 				"specified algorithms combination\n");
735 		goto err;
736 	}
737 
738 	if (!opts.silent && opts.test != CPERF_TEST_TYPE_THROUGHPUT &&
739 			opts.test != CPERF_TEST_TYPE_LATENCY)
740 		show_test_vector(t_vec);
741 
742 	total_nb_qps = nb_cryptodevs * opts.nb_qps;
743 
744 	i = 0;
745 	uint8_t qp_id = 0, cdev_index = 0;
746 
747 	void *sess = NULL;
748 
749 	RTE_LCORE_FOREACH_WORKER(lcore_id) {
750 
751 		if (i == total_nb_qps)
752 			break;
753 
754 		cdev_id = enabled_cdevs[cdev_index];
755 
756 		int socket_id = rte_cryptodev_socket_id(cdev_id);
757 
758 		/* Use the first socket if SOCKET_ID_ANY is returned. */
759 		if (socket_id == SOCKET_ID_ANY)
760 			socket_id = 0;
761 
762 		ctx[i] = cperf_testmap[opts.test].constructor(
763 				session_pool_socket[socket_id].sess_mp,
764 				cdev_id, qp_id,
765 				&opts, t_vec, &op_fns, &sess);
766 
767 		/*
768 		 * If sess was NULL, the constructor will have set it to a newly
769 		 * created session. This means future calls to constructors will
770 		 * provide this session, sharing it across all qps. If session
771 		 * sharing is not enabled, re-set sess to NULL, to prevent this.
772 		 */
773 		if (!opts.shared_session)
774 			sess = NULL;
775 
776 		if (ctx[i] == NULL) {
777 			RTE_LOG(ERR, USER1, "Test run constructor failed\n");
778 			goto err;
779 		}
780 
781 		qp_id = (qp_id + 1) % opts.nb_qps;
782 		if (qp_id == 0) {
783 			cdev_index++;
784 			/* If next qp is on a new cdev, don't share the session
785 			 * - it shouldn't be shared across different cdevs.
786 			 */
787 			sess = NULL;
788 		}
789 		i++;
790 	}
791 
792 	if (opts.imix_distribution_count != 0) {
793 		uint8_t buffer_size_count = opts.buffer_size_count;
794 		uint16_t distribution_total[buffer_size_count];
795 		uint32_t op_idx;
796 		uint32_t test_average_size = 0;
797 		const uint32_t *buffer_size_list = opts.buffer_size_list;
798 		const uint32_t *imix_distribution_list = opts.imix_distribution_list;
799 
800 		opts.imix_buffer_sizes = rte_malloc(NULL,
801 					sizeof(uint32_t) * opts.pool_sz,
802 					0);
803 		/*
804 		 * Calculate accumulated distribution of
805 		 * probabilities per packet size
806 		 */
807 		distribution_total[0] = imix_distribution_list[0];
808 		for (i = 1; i < buffer_size_count; i++)
809 			distribution_total[i] = imix_distribution_list[i] +
810 				distribution_total[i-1];
811 
812 		/* Calculate a random sequence of packet sizes, based on distribution */
813 		for (op_idx = 0; op_idx < opts.pool_sz; op_idx++) {
814 			uint16_t random_number = rte_rand() %
815 				distribution_total[buffer_size_count - 1];
816 			for (i = 0; i < buffer_size_count; i++)
817 				if (random_number < distribution_total[i])
818 					break;
819 
820 			opts.imix_buffer_sizes[op_idx] = buffer_size_list[i];
821 		}
822 
823 		/* Calculate average buffer size for the IMIX distribution */
824 		for (i = 0; i < buffer_size_count; i++)
825 			test_average_size += buffer_size_list[i] *
826 				imix_distribution_list[i];
827 
828 		opts.test_buffer_size = test_average_size /
829 				distribution_total[buffer_size_count - 1];
830 
831 		i = 0;
832 		RTE_LCORE_FOREACH_WORKER(lcore_id) {
833 
834 			if (i == total_nb_qps)
835 				break;
836 
837 			rte_eal_remote_launch(cperf_testmap[opts.test].runner,
838 				ctx[i], lcore_id);
839 			i++;
840 		}
841 		i = 0;
842 		RTE_LCORE_FOREACH_WORKER(lcore_id) {
843 
844 			if (i == total_nb_qps)
845 				break;
846 			ret |= rte_eal_wait_lcore(lcore_id);
847 			i++;
848 		}
849 
850 		if (ret != EXIT_SUCCESS)
851 			goto err;
852 	} else {
853 
854 		/* Get next size from range or list */
855 		if (opts.inc_buffer_size != 0)
856 			opts.test_buffer_size = opts.min_buffer_size;
857 		else
858 			opts.test_buffer_size = opts.buffer_size_list[0];
859 
860 		while (opts.test_buffer_size <= opts.max_buffer_size) {
861 			i = 0;
862 			RTE_LCORE_FOREACH_WORKER(lcore_id) {
863 
864 				if (i == total_nb_qps)
865 					break;
866 
867 				rte_eal_remote_launch(cperf_testmap[opts.test].runner,
868 					ctx[i], lcore_id);
869 				i++;
870 			}
871 			i = 0;
872 			RTE_LCORE_FOREACH_WORKER(lcore_id) {
873 
874 				if (i == total_nb_qps)
875 					break;
876 				ret |= rte_eal_wait_lcore(lcore_id);
877 				i++;
878 			}
879 
880 			if (ret != EXIT_SUCCESS)
881 				goto err;
882 
883 			/* Get next size from range or list */
884 			if (opts.inc_buffer_size != 0)
885 				opts.test_buffer_size += opts.inc_buffer_size;
886 			else {
887 				if (++buffer_size_idx == opts.buffer_size_count)
888 					break;
889 				opts.test_buffer_size =
890 					opts.buffer_size_list[buffer_size_idx];
891 			}
892 		}
893 	}
894 
895 	i = 0;
896 	RTE_LCORE_FOREACH_WORKER(lcore_id) {
897 
898 		if (i == total_nb_qps)
899 			break;
900 
901 		cperf_testmap[opts.test].destructor(ctx[i]);
902 		i++;
903 	}
904 
905 	for (i = 0; i < nb_cryptodevs &&
906 			i < RTE_CRYPTO_MAX_DEVS; i++) {
907 		rte_cryptodev_stop(enabled_cdevs[i]);
908 		ret = rte_cryptodev_close(enabled_cdevs[i]);
909 		if (ret)
910 			RTE_LOG(ERR, USER1,
911 					"Crypto device close error %d\n", ret);
912 	}
913 
914 	free_test_vector(t_vec, &opts);
915 
916 	printf("\n");
917 	return EXIT_SUCCESS;
918 
919 err:
920 	i = 0;
921 	RTE_LCORE_FOREACH_WORKER(lcore_id) {
922 		if (i == total_nb_qps)
923 			break;
924 
925 		if (ctx[i] && cperf_testmap[opts.test].destructor)
926 			cperf_testmap[opts.test].destructor(ctx[i]);
927 		i++;
928 	}
929 
930 	for (i = 0; i < nb_cryptodevs &&
931 			i < RTE_CRYPTO_MAX_DEVS; i++) {
932 		rte_cryptodev_stop(enabled_cdevs[i]);
933 		ret = rte_cryptodev_close(enabled_cdevs[i]);
934 		if (ret)
935 			RTE_LOG(ERR, USER1,
936 					"Crypto device close error %d\n", ret);
937 
938 	}
939 	rte_free(opts.imix_buffer_sizes);
940 	free_test_vector(t_vec, &opts);
941 
942 	if (rte_errno == ENOTSUP || cap_unsupported) {
943 		RTE_LOG(ERR, USER1, "Unsupported case: errno: %u\n", rte_errno);
944 		return -ENOTSUP;
945 	}
946 	printf("\n");
947 	return EXIT_FAILURE;
948 }
949