xref: /dpdk/app/test-crypto-perf/main.c (revision c9902a15bd005b6d4fe072cf7b60fe4ee679155f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2017 Intel Corporation
3  */
4 
5 #include <stdio.h>
6 #include <unistd.h>
7 
8 #include <rte_malloc.h>
9 #include <rte_random.h>
10 #include <rte_eal.h>
11 #include <rte_cryptodev.h>
12 #ifdef RTE_CRYPTO_SCHEDULER
13 #include <rte_cryptodev_scheduler.h>
14 #endif
15 
16 #include "cperf.h"
17 #include "cperf_options.h"
18 #include "cperf_test_vector_parsing.h"
19 #include "cperf_test_throughput.h"
20 #include "cperf_test_latency.h"
21 #include "cperf_test_verify.h"
22 #include "cperf_test_pmd_cyclecount.h"
23 
24 static struct {
25 	struct rte_mempool *sess_mp;
26 	struct rte_mempool *priv_mp;
27 } session_pool_socket[RTE_MAX_NUMA_NODES];
28 
29 const char *cperf_test_type_strs[] = {
30 	[CPERF_TEST_TYPE_THROUGHPUT] = "throughput",
31 	[CPERF_TEST_TYPE_LATENCY] = "latency",
32 	[CPERF_TEST_TYPE_VERIFY] = "verify",
33 	[CPERF_TEST_TYPE_PMDCC] = "pmd-cyclecount"
34 };
35 
36 const char *cperf_op_type_strs[] = {
37 	[CPERF_CIPHER_ONLY] = "cipher-only",
38 	[CPERF_AUTH_ONLY] = "auth-only",
39 	[CPERF_CIPHER_THEN_AUTH] = "cipher-then-auth",
40 	[CPERF_AUTH_THEN_CIPHER] = "auth-then-cipher",
41 	[CPERF_AEAD] = "aead",
42 	[CPERF_PDCP] = "pdcp",
43 	[CPERF_DOCSIS] = "docsis",
44 	[CPERF_ASYM_MODEX] = "modex"
45 };
46 
47 const struct cperf_test cperf_testmap[] = {
48 		[CPERF_TEST_TYPE_THROUGHPUT] = {
49 				cperf_throughput_test_constructor,
50 				cperf_throughput_test_runner,
51 				cperf_throughput_test_destructor
52 		},
53 		[CPERF_TEST_TYPE_LATENCY] = {
54 				cperf_latency_test_constructor,
55 				cperf_latency_test_runner,
56 				cperf_latency_test_destructor
57 		},
58 		[CPERF_TEST_TYPE_VERIFY] = {
59 				cperf_verify_test_constructor,
60 				cperf_verify_test_runner,
61 				cperf_verify_test_destructor
62 		},
63 		[CPERF_TEST_TYPE_PMDCC] = {
64 				cperf_pmd_cyclecount_test_constructor,
65 				cperf_pmd_cyclecount_test_runner,
66 				cperf_pmd_cyclecount_test_destructor
67 		}
68 };
69 
70 static int
71 create_asym_op_pool_socket(uint8_t dev_id, int32_t socket_id,
72 			   uint32_t nb_sessions)
73 {
74 	char mp_name[RTE_MEMPOOL_NAMESIZE];
75 	struct rte_mempool *mpool = NULL;
76 	unsigned int session_size =
77 		RTE_MAX(rte_cryptodev_asym_get_private_session_size(dev_id),
78 			rte_cryptodev_asym_get_header_session_size());
79 
80 	if (session_pool_socket[socket_id].priv_mp == NULL) {
81 		snprintf(mp_name, RTE_MEMPOOL_NAMESIZE, "perf_asym_priv_pool%u",
82 			 socket_id);
83 
84 		mpool = rte_mempool_create(mp_name, nb_sessions, session_size,
85 					   0, 0, NULL, NULL, NULL, NULL,
86 					   socket_id, 0);
87 		if (mpool == NULL) {
88 			printf("Cannot create pool \"%s\" on socket %d\n",
89 			       mp_name, socket_id);
90 			return -ENOMEM;
91 		}
92 		printf("Allocated pool \"%s\" on socket %d\n", mp_name,
93 		       socket_id);
94 		session_pool_socket[socket_id].priv_mp = mpool;
95 	}
96 
97 	if (session_pool_socket[socket_id].sess_mp == NULL) {
98 
99 		snprintf(mp_name, RTE_MEMPOOL_NAMESIZE, "perf_asym_sess_pool%u",
100 			 socket_id);
101 		mpool = rte_mempool_create(mp_name, nb_sessions,
102 					   session_size, 0, 0, NULL, NULL, NULL,
103 					   NULL, socket_id, 0);
104 		if (mpool == NULL) {
105 			printf("Cannot create pool \"%s\" on socket %d\n",
106 			       mp_name, socket_id);
107 			return -ENOMEM;
108 		}
109 		session_pool_socket[socket_id].sess_mp = mpool;
110 	}
111 	return 0;
112 }
113 
114 static int
115 fill_session_pool_socket(int32_t socket_id, uint32_t session_priv_size,
116 		uint32_t nb_sessions)
117 {
118 	char mp_name[RTE_MEMPOOL_NAMESIZE];
119 	struct rte_mempool *sess_mp;
120 
121 	if (session_pool_socket[socket_id].priv_mp == NULL) {
122 		snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
123 			"priv_sess_mp_%u", socket_id);
124 
125 		sess_mp = rte_mempool_create(mp_name,
126 					nb_sessions,
127 					session_priv_size,
128 					0, 0, NULL, NULL, NULL,
129 					NULL, socket_id,
130 					0);
131 
132 		if (sess_mp == NULL) {
133 			printf("Cannot create pool \"%s\" on socket %d\n",
134 				mp_name, socket_id);
135 			return -ENOMEM;
136 		}
137 
138 		printf("Allocated pool \"%s\" on socket %d\n",
139 			mp_name, socket_id);
140 		session_pool_socket[socket_id].priv_mp = sess_mp;
141 	}
142 
143 	if (session_pool_socket[socket_id].sess_mp == NULL) {
144 
145 		snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
146 			"sess_mp_%u", socket_id);
147 
148 		sess_mp = rte_cryptodev_sym_session_pool_create(mp_name,
149 					nb_sessions, 0, 0, 0, socket_id);
150 
151 		if (sess_mp == NULL) {
152 			printf("Cannot create pool \"%s\" on socket %d\n",
153 				mp_name, socket_id);
154 			return -ENOMEM;
155 		}
156 
157 		printf("Allocated pool \"%s\" on socket %d\n",
158 			mp_name, socket_id);
159 		session_pool_socket[socket_id].sess_mp = sess_mp;
160 	}
161 
162 	return 0;
163 }
164 
165 static int
166 cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs)
167 {
168 	uint8_t enabled_cdev_count = 0, nb_lcores, cdev_id;
169 	uint32_t sessions_needed = 0;
170 	unsigned int i, j;
171 	int ret;
172 
173 	enabled_cdev_count = rte_cryptodev_devices_get(opts->device_type,
174 			enabled_cdevs, RTE_CRYPTO_MAX_DEVS);
175 	if (enabled_cdev_count == 0) {
176 		printf("No crypto devices type %s available\n",
177 				opts->device_type);
178 		return -EINVAL;
179 	}
180 
181 	nb_lcores = rte_lcore_count() - 1;
182 
183 	if (nb_lcores < 1) {
184 		RTE_LOG(ERR, USER1,
185 			"Number of enabled cores need to be higher than 1\n");
186 		return -EINVAL;
187 	}
188 
189 	/*
190 	 * Use less number of devices,
191 	 * if there are more available than cores.
192 	 */
193 	if (enabled_cdev_count > nb_lcores)
194 		enabled_cdev_count = nb_lcores;
195 
196 	/* Create a mempool shared by all the devices */
197 	uint32_t max_sess_size = 0, sess_size;
198 
199 	for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
200 		sess_size = rte_cryptodev_sym_get_private_session_size(cdev_id);
201 		if (sess_size > max_sess_size)
202 			max_sess_size = sess_size;
203 	}
204 #ifdef RTE_LIB_SECURITY
205 	for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
206 		sess_size = rte_security_session_get_size(
207 				rte_cryptodev_get_sec_ctx(cdev_id));
208 		if (sess_size > max_sess_size)
209 			max_sess_size = sess_size;
210 	}
211 #endif
212 	/*
213 	 * Calculate number of needed queue pairs, based on the amount
214 	 * of available number of logical cores and crypto devices.
215 	 * For instance, if there are 4 cores and 2 crypto devices,
216 	 * 2 queue pairs will be set up per device.
217 	 */
218 	opts->nb_qps = (nb_lcores % enabled_cdev_count) ?
219 				(nb_lcores / enabled_cdev_count) + 1 :
220 				nb_lcores / enabled_cdev_count;
221 
222 	for (i = 0; i < enabled_cdev_count &&
223 			i < RTE_CRYPTO_MAX_DEVS; i++) {
224 		cdev_id = enabled_cdevs[i];
225 #ifdef RTE_CRYPTO_SCHEDULER
226 		/*
227 		 * If multi-core scheduler is used, limit the number
228 		 * of queue pairs to 1, as there is no way to know
229 		 * how many cores are being used by the PMD, and
230 		 * how many will be available for the application.
231 		 */
232 		if (!strcmp((const char *)opts->device_type, "crypto_scheduler") &&
233 				rte_cryptodev_scheduler_mode_get(cdev_id) ==
234 				CDEV_SCHED_MODE_MULTICORE)
235 			opts->nb_qps = 1;
236 #endif
237 
238 		struct rte_cryptodev_info cdev_info;
239 		uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);
240 		/* range check the socket_id - negative values become big
241 		 * positive ones due to use of unsigned value
242 		 */
243 		if (socket_id >= RTE_MAX_NUMA_NODES)
244 			socket_id = 0;
245 
246 		rte_cryptodev_info_get(cdev_id, &cdev_info);
247 
248 		if (opts->op_type == CPERF_ASYM_MODEX) {
249 			if ((cdev_info.feature_flags &
250 			     RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO) == 0)
251 				continue;
252 		}
253 
254 		if (opts->nb_qps > cdev_info.max_nb_queue_pairs) {
255 			printf("Number of needed queue pairs is higher "
256 				"than the maximum number of queue pairs "
257 				"per device.\n");
258 			printf("Lower the number of cores or increase "
259 				"the number of crypto devices\n");
260 			return -EINVAL;
261 		}
262 		struct rte_cryptodev_config conf = {
263 			.nb_queue_pairs = opts->nb_qps,
264 			.socket_id = socket_id,
265 		};
266 
267 		switch (opts->op_type) {
268 		case CPERF_ASYM_MODEX:
269 			conf.ff_disable |= (RTE_CRYPTODEV_FF_SECURITY |
270 					    RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO);
271 			break;
272 		case CPERF_CIPHER_ONLY:
273 		case CPERF_AUTH_ONLY:
274 		case CPERF_CIPHER_THEN_AUTH:
275 		case CPERF_AUTH_THEN_CIPHER:
276 		case CPERF_AEAD:
277 			conf.ff_disable |= RTE_CRYPTODEV_FF_SECURITY;
278 			/* Fall through */
279 		case CPERF_PDCP:
280 		case CPERF_DOCSIS:
281 			/* Fall through */
282 		default:
283 
284 			conf.ff_disable |= RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO;
285 		}
286 
287 		struct rte_cryptodev_qp_conf qp_conf = {
288 			.nb_descriptors = opts->nb_descriptors
289 		};
290 
291 		/**
292 		 * Device info specifies the min headroom and tailroom
293 		 * requirement for the crypto PMD. This need to be honoured
294 		 * by the application, while creating mbuf.
295 		 */
296 		if (opts->headroom_sz < cdev_info.min_mbuf_headroom_req) {
297 			/* Update headroom */
298 			opts->headroom_sz = cdev_info.min_mbuf_headroom_req;
299 		}
300 		if (opts->tailroom_sz < cdev_info.min_mbuf_tailroom_req) {
301 			/* Update tailroom */
302 			opts->tailroom_sz = cdev_info.min_mbuf_tailroom_req;
303 		}
304 
305 		/* Update segment size to include headroom & tailroom */
306 		opts->segment_sz += (opts->headroom_sz + opts->tailroom_sz);
307 
308 		uint32_t dev_max_nb_sess = cdev_info.sym.max_nb_sessions;
309 		/*
310 		 * Two sessions objects are required for each session
311 		 * (one for the header, one for the private data)
312 		 */
313 		if (!strcmp((const char *)opts->device_type,
314 					"crypto_scheduler")) {
315 #ifdef RTE_CRYPTO_SCHEDULER
316 			uint32_t nb_slaves =
317 				rte_cryptodev_scheduler_workers_get(cdev_id,
318 								NULL);
319 
320 			sessions_needed = enabled_cdev_count *
321 				opts->nb_qps * nb_slaves;
322 #endif
323 		} else
324 			sessions_needed = enabled_cdev_count * opts->nb_qps;
325 
326 		/*
327 		 * A single session is required per queue pair
328 		 * in each device
329 		 */
330 		if (dev_max_nb_sess != 0 && dev_max_nb_sess < opts->nb_qps) {
331 			RTE_LOG(ERR, USER1,
332 				"Device does not support at least "
333 				"%u sessions\n", opts->nb_qps);
334 			return -ENOTSUP;
335 		}
336 
337 		if (opts->op_type == CPERF_ASYM_MODEX)
338 			ret = create_asym_op_pool_socket(cdev_id, socket_id,
339 							 sessions_needed);
340 		else
341 			ret = fill_session_pool_socket(socket_id, max_sess_size,
342 						       sessions_needed);
343 		if (ret < 0)
344 			return ret;
345 
346 		qp_conf.mp_session = session_pool_socket[socket_id].sess_mp;
347 		qp_conf.mp_session_private =
348 				session_pool_socket[socket_id].priv_mp;
349 
350 		if (opts->op_type == CPERF_ASYM_MODEX) {
351 			qp_conf.mp_session = NULL;
352 			qp_conf.mp_session_private = NULL;
353 		}
354 
355 		ret = rte_cryptodev_configure(cdev_id, &conf);
356 		if (ret < 0) {
357 			printf("Failed to configure cryptodev %u", cdev_id);
358 			return -EINVAL;
359 		}
360 
361 		for (j = 0; j < opts->nb_qps; j++) {
362 			ret = rte_cryptodev_queue_pair_setup(cdev_id, j,
363 				&qp_conf, socket_id);
364 			if (ret < 0) {
365 				printf("Failed to setup queue pair %u on "
366 					"cryptodev %u",	j, cdev_id);
367 				return -EINVAL;
368 			}
369 		}
370 
371 		ret = rte_cryptodev_start(cdev_id);
372 		if (ret < 0) {
373 			printf("Failed to start device %u: error %d\n",
374 					cdev_id, ret);
375 			return -EPERM;
376 		}
377 	}
378 
379 	return enabled_cdev_count;
380 }
381 
382 static int
383 cperf_verify_devices_capabilities(struct cperf_options *opts,
384 		uint8_t *enabled_cdevs, uint8_t nb_cryptodevs)
385 {
386 	struct rte_cryptodev_sym_capability_idx cap_idx;
387 	const struct rte_cryptodev_symmetric_capability *capability;
388 	struct rte_cryptodev_asym_capability_idx asym_cap_idx;
389 	const struct rte_cryptodev_asymmetric_xform_capability *asym_capability;
390 
391 
392 	uint8_t i, cdev_id;
393 	int ret;
394 
395 	for (i = 0; i < nb_cryptodevs; i++) {
396 
397 		cdev_id = enabled_cdevs[i];
398 
399 		if (opts->op_type == CPERF_ASYM_MODEX) {
400 			asym_cap_idx.type = RTE_CRYPTO_ASYM_XFORM_MODEX;
401 			asym_capability = rte_cryptodev_asym_capability_get(
402 				cdev_id, &asym_cap_idx);
403 			if (asym_capability == NULL)
404 				return -1;
405 
406 			ret = rte_cryptodev_asym_xform_capability_check_modlen(
407 				asym_capability, sizeof(perf_mod_p));
408 			if (ret != 0)
409 				return ret;
410 
411 		}
412 
413 		if (opts->op_type == CPERF_AUTH_ONLY ||
414 				opts->op_type == CPERF_CIPHER_THEN_AUTH ||
415 				opts->op_type == CPERF_AUTH_THEN_CIPHER) {
416 
417 			cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH;
418 			cap_idx.algo.auth = opts->auth_algo;
419 
420 			capability = rte_cryptodev_sym_capability_get(cdev_id,
421 					&cap_idx);
422 			if (capability == NULL)
423 				return -1;
424 
425 			ret = rte_cryptodev_sym_capability_check_auth(
426 					capability,
427 					opts->auth_key_sz,
428 					opts->digest_sz,
429 					opts->auth_iv_sz);
430 			if (ret != 0)
431 				return ret;
432 		}
433 
434 		if (opts->op_type == CPERF_CIPHER_ONLY ||
435 				opts->op_type == CPERF_CIPHER_THEN_AUTH ||
436 				opts->op_type == CPERF_AUTH_THEN_CIPHER) {
437 
438 			cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
439 			cap_idx.algo.cipher = opts->cipher_algo;
440 
441 			capability = rte_cryptodev_sym_capability_get(cdev_id,
442 					&cap_idx);
443 			if (capability == NULL)
444 				return -1;
445 
446 			ret = rte_cryptodev_sym_capability_check_cipher(
447 					capability,
448 					opts->cipher_key_sz,
449 					opts->cipher_iv_sz);
450 			if (ret != 0)
451 				return ret;
452 		}
453 
454 		if (opts->op_type == CPERF_AEAD) {
455 
456 			cap_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;
457 			cap_idx.algo.aead = opts->aead_algo;
458 
459 			capability = rte_cryptodev_sym_capability_get(cdev_id,
460 					&cap_idx);
461 			if (capability == NULL)
462 				return -1;
463 
464 			ret = rte_cryptodev_sym_capability_check_aead(
465 					capability,
466 					opts->aead_key_sz,
467 					opts->digest_sz,
468 					opts->aead_aad_sz,
469 					opts->aead_iv_sz);
470 			if (ret != 0)
471 				return ret;
472 		}
473 	}
474 
475 	return 0;
476 }
477 
478 static int
479 cperf_check_test_vector(struct cperf_options *opts,
480 		struct cperf_test_vector *test_vec)
481 {
482 	if (opts->op_type == CPERF_CIPHER_ONLY) {
483 		if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
484 			if (test_vec->plaintext.data == NULL)
485 				return -1;
486 		} else {
487 			if (test_vec->plaintext.data == NULL)
488 				return -1;
489 			if (test_vec->plaintext.length < opts->max_buffer_size)
490 				return -1;
491 			if (test_vec->ciphertext.data == NULL)
492 				return -1;
493 			if (test_vec->ciphertext.length < opts->max_buffer_size)
494 				return -1;
495 			/* Cipher IV is only required for some algorithms */
496 			if (opts->cipher_iv_sz &&
497 					test_vec->cipher_iv.data == NULL)
498 				return -1;
499 			if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
500 				return -1;
501 			if (test_vec->cipher_key.data == NULL)
502 				return -1;
503 			if (test_vec->cipher_key.length != opts->cipher_key_sz)
504 				return -1;
505 		}
506 	} else if (opts->op_type == CPERF_AUTH_ONLY) {
507 		if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) {
508 			if (test_vec->plaintext.data == NULL)
509 				return -1;
510 			if (test_vec->plaintext.length < opts->max_buffer_size)
511 				return -1;
512 			/* Auth key is only required for some algorithms */
513 			if (opts->auth_key_sz &&
514 					test_vec->auth_key.data == NULL)
515 				return -1;
516 			if (test_vec->auth_key.length != opts->auth_key_sz)
517 				return -1;
518 			if (test_vec->auth_iv.length != opts->auth_iv_sz)
519 				return -1;
520 			/* Auth IV is only required for some algorithms */
521 			if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL)
522 				return -1;
523 			if (test_vec->digest.data == NULL)
524 				return -1;
525 			if (test_vec->digest.length < opts->digest_sz)
526 				return -1;
527 		}
528 
529 	} else if (opts->op_type == CPERF_CIPHER_THEN_AUTH ||
530 			opts->op_type == CPERF_AUTH_THEN_CIPHER) {
531 		if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
532 			if (test_vec->plaintext.data == NULL)
533 				return -1;
534 			if (test_vec->plaintext.length < opts->max_buffer_size)
535 				return -1;
536 		} else {
537 			if (test_vec->plaintext.data == NULL)
538 				return -1;
539 			if (test_vec->plaintext.length < opts->max_buffer_size)
540 				return -1;
541 			if (test_vec->ciphertext.data == NULL)
542 				return -1;
543 			if (test_vec->ciphertext.length < opts->max_buffer_size)
544 				return -1;
545 			if (test_vec->cipher_iv.data == NULL)
546 				return -1;
547 			if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
548 				return -1;
549 			if (test_vec->cipher_key.data == NULL)
550 				return -1;
551 			if (test_vec->cipher_key.length != opts->cipher_key_sz)
552 				return -1;
553 		}
554 		if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) {
555 			if (test_vec->auth_key.data == NULL)
556 				return -1;
557 			if (test_vec->auth_key.length != opts->auth_key_sz)
558 				return -1;
559 			if (test_vec->auth_iv.length != opts->auth_iv_sz)
560 				return -1;
561 			/* Auth IV is only required for some algorithms */
562 			if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL)
563 				return -1;
564 			if (test_vec->digest.data == NULL)
565 				return -1;
566 			if (test_vec->digest.length < opts->digest_sz)
567 				return -1;
568 		}
569 	} else if (opts->op_type == CPERF_AEAD) {
570 		if (test_vec->plaintext.data == NULL)
571 			return -1;
572 		if (test_vec->plaintext.length < opts->max_buffer_size)
573 			return -1;
574 		if (test_vec->ciphertext.data == NULL)
575 			return -1;
576 		if (test_vec->ciphertext.length < opts->max_buffer_size)
577 			return -1;
578 		if (test_vec->aead_key.data == NULL)
579 			return -1;
580 		if (test_vec->aead_key.length != opts->aead_key_sz)
581 			return -1;
582 		if (test_vec->aead_iv.data == NULL)
583 			return -1;
584 		if (test_vec->aead_iv.length != opts->aead_iv_sz)
585 			return -1;
586 		if (test_vec->aad.data == NULL)
587 			return -1;
588 		if (test_vec->aad.length != opts->aead_aad_sz)
589 			return -1;
590 		if (test_vec->digest.data == NULL)
591 			return -1;
592 		if (test_vec->digest.length < opts->digest_sz)
593 			return -1;
594 	}
595 	return 0;
596 }
597 
598 int
599 main(int argc, char **argv)
600 {
601 	struct cperf_options opts = {0};
602 	struct cperf_test_vector *t_vec = NULL;
603 	struct cperf_op_fns op_fns;
604 	void *ctx[RTE_MAX_LCORE] = { };
605 	int nb_cryptodevs = 0;
606 	uint16_t total_nb_qps = 0;
607 	uint8_t cdev_id, i;
608 	uint8_t enabled_cdevs[RTE_CRYPTO_MAX_DEVS] = { 0 };
609 
610 	uint8_t buffer_size_idx = 0;
611 
612 	int ret;
613 	uint32_t lcore_id;
614 
615 	/* Initialise DPDK EAL */
616 	ret = rte_eal_init(argc, argv);
617 	if (ret < 0)
618 		rte_exit(EXIT_FAILURE, "Invalid EAL arguments!\n");
619 	argc -= ret;
620 	argv += ret;
621 
622 	cperf_options_default(&opts);
623 
624 	ret = cperf_options_parse(&opts, argc, argv);
625 	if (ret) {
626 		RTE_LOG(ERR, USER1, "Parsing one or more user options failed\n");
627 		goto err;
628 	}
629 
630 	ret = cperf_options_check(&opts);
631 	if (ret) {
632 		RTE_LOG(ERR, USER1,
633 				"Checking one or more user options failed\n");
634 		goto err;
635 	}
636 
637 	nb_cryptodevs = cperf_initialize_cryptodev(&opts, enabled_cdevs);
638 
639 	if (!opts.silent)
640 		cperf_options_dump(&opts);
641 
642 	if (nb_cryptodevs < 1) {
643 		RTE_LOG(ERR, USER1, "Failed to initialise requested crypto "
644 				"device type\n");
645 		nb_cryptodevs = 0;
646 		goto err;
647 	}
648 
649 	ret = cperf_verify_devices_capabilities(&opts, enabled_cdevs,
650 			nb_cryptodevs);
651 	if (ret) {
652 		RTE_LOG(ERR, USER1, "Crypto device type does not support "
653 				"capabilities requested\n");
654 		goto err;
655 	}
656 
657 	if (opts.test_file != NULL) {
658 		t_vec = cperf_test_vector_get_from_file(&opts);
659 		if (t_vec == NULL) {
660 			RTE_LOG(ERR, USER1,
661 					"Failed to create test vector for"
662 					" specified file\n");
663 			goto err;
664 		}
665 
666 		if (cperf_check_test_vector(&opts, t_vec)) {
667 			RTE_LOG(ERR, USER1, "Incomplete necessary test vectors"
668 					"\n");
669 			goto err;
670 		}
671 	} else {
672 		t_vec = cperf_test_vector_get_dummy(&opts);
673 		if (t_vec == NULL) {
674 			RTE_LOG(ERR, USER1,
675 					"Failed to create test vector for"
676 					" specified algorithms\n");
677 			goto err;
678 		}
679 	}
680 
681 	ret = cperf_get_op_functions(&opts, &op_fns);
682 	if (ret) {
683 		RTE_LOG(ERR, USER1, "Failed to find function ops set for "
684 				"specified algorithms combination\n");
685 		goto err;
686 	}
687 
688 	if (!opts.silent && opts.test != CPERF_TEST_TYPE_THROUGHPUT &&
689 			opts.test != CPERF_TEST_TYPE_LATENCY)
690 		show_test_vector(t_vec);
691 
692 	total_nb_qps = nb_cryptodevs * opts.nb_qps;
693 
694 	i = 0;
695 	uint8_t qp_id = 0, cdev_index = 0;
696 	RTE_LCORE_FOREACH_WORKER(lcore_id) {
697 
698 		if (i == total_nb_qps)
699 			break;
700 
701 		cdev_id = enabled_cdevs[cdev_index];
702 
703 		uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);
704 
705 		ctx[i] = cperf_testmap[opts.test].constructor(
706 				session_pool_socket[socket_id].sess_mp,
707 				session_pool_socket[socket_id].priv_mp,
708 				cdev_id, qp_id,
709 				&opts, t_vec, &op_fns);
710 		if (ctx[i] == NULL) {
711 			RTE_LOG(ERR, USER1, "Test run constructor failed\n");
712 			goto err;
713 		}
714 		qp_id = (qp_id + 1) % opts.nb_qps;
715 		if (qp_id == 0)
716 			cdev_index++;
717 		i++;
718 	}
719 
720 	if (opts.imix_distribution_count != 0) {
721 		uint8_t buffer_size_count = opts.buffer_size_count;
722 		uint16_t distribution_total[buffer_size_count];
723 		uint32_t op_idx;
724 		uint32_t test_average_size = 0;
725 		const uint32_t *buffer_size_list = opts.buffer_size_list;
726 		const uint32_t *imix_distribution_list = opts.imix_distribution_list;
727 
728 		opts.imix_buffer_sizes = rte_malloc(NULL,
729 					sizeof(uint32_t) * opts.pool_sz,
730 					0);
731 		/*
732 		 * Calculate accumulated distribution of
733 		 * probabilities per packet size
734 		 */
735 		distribution_total[0] = imix_distribution_list[0];
736 		for (i = 1; i < buffer_size_count; i++)
737 			distribution_total[i] = imix_distribution_list[i] +
738 				distribution_total[i-1];
739 
740 		/* Calculate a random sequence of packet sizes, based on distribution */
741 		for (op_idx = 0; op_idx < opts.pool_sz; op_idx++) {
742 			uint16_t random_number = rte_rand() %
743 				distribution_total[buffer_size_count - 1];
744 			for (i = 0; i < buffer_size_count; i++)
745 				if (random_number < distribution_total[i])
746 					break;
747 
748 			opts.imix_buffer_sizes[op_idx] = buffer_size_list[i];
749 		}
750 
751 		/* Calculate average buffer size for the IMIX distribution */
752 		for (i = 0; i < buffer_size_count; i++)
753 			test_average_size += buffer_size_list[i] *
754 				imix_distribution_list[i];
755 
756 		opts.test_buffer_size = test_average_size /
757 				distribution_total[buffer_size_count - 1];
758 
759 		i = 0;
760 		RTE_LCORE_FOREACH_WORKER(lcore_id) {
761 
762 			if (i == total_nb_qps)
763 				break;
764 
765 			rte_eal_remote_launch(cperf_testmap[opts.test].runner,
766 				ctx[i], lcore_id);
767 			i++;
768 		}
769 		i = 0;
770 		RTE_LCORE_FOREACH_WORKER(lcore_id) {
771 
772 			if (i == total_nb_qps)
773 				break;
774 			ret |= rte_eal_wait_lcore(lcore_id);
775 			i++;
776 		}
777 
778 		if (ret != EXIT_SUCCESS)
779 			goto err;
780 	} else {
781 
782 		/* Get next size from range or list */
783 		if (opts.inc_buffer_size != 0)
784 			opts.test_buffer_size = opts.min_buffer_size;
785 		else
786 			opts.test_buffer_size = opts.buffer_size_list[0];
787 
788 		while (opts.test_buffer_size <= opts.max_buffer_size) {
789 			i = 0;
790 			RTE_LCORE_FOREACH_WORKER(lcore_id) {
791 
792 				if (i == total_nb_qps)
793 					break;
794 
795 				rte_eal_remote_launch(cperf_testmap[opts.test].runner,
796 					ctx[i], lcore_id);
797 				i++;
798 			}
799 			i = 0;
800 			RTE_LCORE_FOREACH_WORKER(lcore_id) {
801 
802 				if (i == total_nb_qps)
803 					break;
804 				ret |= rte_eal_wait_lcore(lcore_id);
805 				i++;
806 			}
807 
808 			if (ret != EXIT_SUCCESS)
809 				goto err;
810 
811 			/* Get next size from range or list */
812 			if (opts.inc_buffer_size != 0)
813 				opts.test_buffer_size += opts.inc_buffer_size;
814 			else {
815 				if (++buffer_size_idx == opts.buffer_size_count)
816 					break;
817 				opts.test_buffer_size =
818 					opts.buffer_size_list[buffer_size_idx];
819 			}
820 		}
821 	}
822 
823 	i = 0;
824 	RTE_LCORE_FOREACH_WORKER(lcore_id) {
825 
826 		if (i == total_nb_qps)
827 			break;
828 
829 		cperf_testmap[opts.test].destructor(ctx[i]);
830 		i++;
831 	}
832 
833 	for (i = 0; i < nb_cryptodevs &&
834 			i < RTE_CRYPTO_MAX_DEVS; i++) {
835 		rte_cryptodev_stop(enabled_cdevs[i]);
836 		ret = rte_cryptodev_close(enabled_cdevs[i]);
837 		if (ret)
838 			RTE_LOG(ERR, USER1,
839 					"Crypto device close error %d\n", ret);
840 	}
841 
842 	free_test_vector(t_vec, &opts);
843 
844 	printf("\n");
845 	return EXIT_SUCCESS;
846 
847 err:
848 	i = 0;
849 	RTE_LCORE_FOREACH_WORKER(lcore_id) {
850 		if (i == total_nb_qps)
851 			break;
852 
853 		if (ctx[i] && cperf_testmap[opts.test].destructor)
854 			cperf_testmap[opts.test].destructor(ctx[i]);
855 		i++;
856 	}
857 
858 	for (i = 0; i < nb_cryptodevs &&
859 			i < RTE_CRYPTO_MAX_DEVS; i++) {
860 		rte_cryptodev_stop(enabled_cdevs[i]);
861 		ret = rte_cryptodev_close(enabled_cdevs[i]);
862 		if (ret)
863 			RTE_LOG(ERR, USER1,
864 					"Crypto device close error %d\n", ret);
865 
866 	}
867 	rte_free(opts.imix_buffer_sizes);
868 	free_test_vector(t_vec, &opts);
869 
870 	printf("\n");
871 	return EXIT_FAILURE;
872 }
873