xref: /dpdk/app/test-crypto-perf/main.c (revision 2a7bb4fdf61e9edfb7adbaecb50e728b82da9e23)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2017 Intel Corporation
3  */
4 
5 #include <stdio.h>
6 #include <unistd.h>
7 
8 #include <rte_malloc.h>
9 #include <rte_random.h>
10 #include <rte_eal.h>
11 #include <rte_cryptodev.h>
12 #ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
13 #include <rte_cryptodev_scheduler.h>
14 #endif
15 
16 #include "cperf.h"
17 #include "cperf_options.h"
18 #include "cperf_test_vector_parsing.h"
19 #include "cperf_test_throughput.h"
20 #include "cperf_test_latency.h"
21 #include "cperf_test_verify.h"
22 #include "cperf_test_pmd_cyclecount.h"
23 
24 static struct {
25 	struct rte_mempool *sess_mp;
26 	struct rte_mempool *priv_mp;
27 } session_pool_socket[RTE_MAX_NUMA_NODES];
28 
29 const char *cperf_test_type_strs[] = {
30 	[CPERF_TEST_TYPE_THROUGHPUT] = "throughput",
31 	[CPERF_TEST_TYPE_LATENCY] = "latency",
32 	[CPERF_TEST_TYPE_VERIFY] = "verify",
33 	[CPERF_TEST_TYPE_PMDCC] = "pmd-cyclecount"
34 };
35 
36 const char *cperf_op_type_strs[] = {
37 	[CPERF_CIPHER_ONLY] = "cipher-only",
38 	[CPERF_AUTH_ONLY] = "auth-only",
39 	[CPERF_CIPHER_THEN_AUTH] = "cipher-then-auth",
40 	[CPERF_AUTH_THEN_CIPHER] = "auth-then-cipher",
41 	[CPERF_AEAD] = "aead"
42 };
43 
44 const struct cperf_test cperf_testmap[] = {
45 		[CPERF_TEST_TYPE_THROUGHPUT] = {
46 				cperf_throughput_test_constructor,
47 				cperf_throughput_test_runner,
48 				cperf_throughput_test_destructor
49 		},
50 		[CPERF_TEST_TYPE_LATENCY] = {
51 				cperf_latency_test_constructor,
52 				cperf_latency_test_runner,
53 				cperf_latency_test_destructor
54 		},
55 		[CPERF_TEST_TYPE_VERIFY] = {
56 				cperf_verify_test_constructor,
57 				cperf_verify_test_runner,
58 				cperf_verify_test_destructor
59 		},
60 		[CPERF_TEST_TYPE_PMDCC] = {
61 				cperf_pmd_cyclecount_test_constructor,
62 				cperf_pmd_cyclecount_test_runner,
63 				cperf_pmd_cyclecount_test_destructor
64 		}
65 };
66 
67 static int
68 fill_session_pool_socket(int32_t socket_id, uint32_t session_priv_size,
69 		uint32_t nb_sessions)
70 {
71 	char mp_name[RTE_MEMPOOL_NAMESIZE];
72 	struct rte_mempool *sess_mp;
73 
74 	if (session_pool_socket[socket_id].priv_mp == NULL) {
75 		snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
76 			"priv_sess_mp_%u", socket_id);
77 
78 		sess_mp = rte_mempool_create(mp_name,
79 					nb_sessions,
80 					session_priv_size,
81 					0, 0, NULL, NULL, NULL,
82 					NULL, socket_id,
83 					0);
84 
85 		if (sess_mp == NULL) {
86 			printf("Cannot create pool \"%s\" on socket %d\n",
87 				mp_name, socket_id);
88 			return -ENOMEM;
89 		}
90 
91 		printf("Allocated pool \"%s\" on socket %d\n",
92 			mp_name, socket_id);
93 		session_pool_socket[socket_id].priv_mp = sess_mp;
94 	}
95 
96 	if (session_pool_socket[socket_id].sess_mp == NULL) {
97 
98 		snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
99 			"sess_mp_%u", socket_id);
100 
101 		sess_mp = rte_cryptodev_sym_session_pool_create(mp_name,
102 					nb_sessions, 0, 0, 0, socket_id);
103 
104 		if (sess_mp == NULL) {
105 			printf("Cannot create pool \"%s\" on socket %d\n",
106 				mp_name, socket_id);
107 			return -ENOMEM;
108 		}
109 
110 		printf("Allocated pool \"%s\" on socket %d\n",
111 			mp_name, socket_id);
112 		session_pool_socket[socket_id].sess_mp = sess_mp;
113 	}
114 
115 	return 0;
116 }
117 
118 static int
119 cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs)
120 {
121 	uint8_t enabled_cdev_count = 0, nb_lcores, cdev_id;
122 	uint32_t sessions_needed = 0;
123 	unsigned int i, j;
124 	int ret;
125 
126 	enabled_cdev_count = rte_cryptodev_devices_get(opts->device_type,
127 			enabled_cdevs, RTE_CRYPTO_MAX_DEVS);
128 	if (enabled_cdev_count == 0) {
129 		printf("No crypto devices type %s available\n",
130 				opts->device_type);
131 		return -EINVAL;
132 	}
133 
134 	nb_lcores = rte_lcore_count() - 1;
135 
136 	if (nb_lcores < 1) {
137 		RTE_LOG(ERR, USER1,
138 			"Number of enabled cores need to be higher than 1\n");
139 		return -EINVAL;
140 	}
141 
142 	/*
143 	 * Use less number of devices,
144 	 * if there are more available than cores.
145 	 */
146 	if (enabled_cdev_count > nb_lcores)
147 		enabled_cdev_count = nb_lcores;
148 
149 	/* Create a mempool shared by all the devices */
150 	uint32_t max_sess_size = 0, sess_size;
151 
152 	for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
153 		sess_size = rte_cryptodev_sym_get_private_session_size(cdev_id);
154 		if (sess_size > max_sess_size)
155 			max_sess_size = sess_size;
156 	}
157 
158 	/*
159 	 * Calculate number of needed queue pairs, based on the amount
160 	 * of available number of logical cores and crypto devices.
161 	 * For instance, if there are 4 cores and 2 crypto devices,
162 	 * 2 queue pairs will be set up per device.
163 	 */
164 	opts->nb_qps = (nb_lcores % enabled_cdev_count) ?
165 				(nb_lcores / enabled_cdev_count) + 1 :
166 				nb_lcores / enabled_cdev_count;
167 
168 	for (i = 0; i < enabled_cdev_count &&
169 			i < RTE_CRYPTO_MAX_DEVS; i++) {
170 		cdev_id = enabled_cdevs[i];
171 #ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
172 		/*
173 		 * If multi-core scheduler is used, limit the number
174 		 * of queue pairs to 1, as there is no way to know
175 		 * how many cores are being used by the PMD, and
176 		 * how many will be available for the application.
177 		 */
178 		if (!strcmp((const char *)opts->device_type, "crypto_scheduler") &&
179 				rte_cryptodev_scheduler_mode_get(cdev_id) ==
180 				CDEV_SCHED_MODE_MULTICORE)
181 			opts->nb_qps = 1;
182 #endif
183 
184 		struct rte_cryptodev_info cdev_info;
185 		uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);
186 
187 		rte_cryptodev_info_get(cdev_id, &cdev_info);
188 		if (opts->nb_qps > cdev_info.max_nb_queue_pairs) {
189 			printf("Number of needed queue pairs is higher "
190 				"than the maximum number of queue pairs "
191 				"per device.\n");
192 			printf("Lower the number of cores or increase "
193 				"the number of crypto devices\n");
194 			return -EINVAL;
195 		}
196 		struct rte_cryptodev_config conf = {
197 			.nb_queue_pairs = opts->nb_qps,
198 			.socket_id = socket_id
199 		};
200 
201 		struct rte_cryptodev_qp_conf qp_conf = {
202 			.nb_descriptors = opts->nb_descriptors
203 		};
204 
205 		/**
206 		 * Device info specifies the min headroom and tailroom
207 		 * requirement for the crypto PMD. This need to be honoured
208 		 * by the application, while creating mbuf.
209 		 */
210 		if (opts->headroom_sz < cdev_info.min_mbuf_headroom_req) {
211 			/* Update headroom */
212 			opts->headroom_sz = cdev_info.min_mbuf_headroom_req;
213 		}
214 		if (opts->tailroom_sz < cdev_info.min_mbuf_tailroom_req) {
215 			/* Update tailroom */
216 			opts->tailroom_sz = cdev_info.min_mbuf_tailroom_req;
217 		}
218 
219 		/* Update segment size to include headroom & tailroom */
220 		opts->segment_sz += (opts->headroom_sz + opts->tailroom_sz);
221 
222 		uint32_t dev_max_nb_sess = cdev_info.sym.max_nb_sessions;
223 		/*
224 		 * Two sessions objects are required for each session
225 		 * (one for the header, one for the private data)
226 		 */
227 		if (!strcmp((const char *)opts->device_type,
228 					"crypto_scheduler")) {
229 #ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
230 			uint32_t nb_slaves =
231 				rte_cryptodev_scheduler_slaves_get(cdev_id,
232 								NULL);
233 
234 			sessions_needed = enabled_cdev_count *
235 				opts->nb_qps * nb_slaves;
236 #endif
237 		} else
238 			sessions_needed = enabled_cdev_count *
239 						opts->nb_qps;
240 
241 		/*
242 		 * A single session is required per queue pair
243 		 * in each device
244 		 */
245 		if (dev_max_nb_sess != 0 && dev_max_nb_sess < opts->nb_qps) {
246 			RTE_LOG(ERR, USER1,
247 				"Device does not support at least "
248 				"%u sessions\n", opts->nb_qps);
249 			return -ENOTSUP;
250 		}
251 
252 		ret = fill_session_pool_socket(socket_id, max_sess_size,
253 				sessions_needed);
254 		if (ret < 0)
255 			return ret;
256 
257 		qp_conf.mp_session = session_pool_socket[socket_id].sess_mp;
258 		qp_conf.mp_session_private =
259 				session_pool_socket[socket_id].priv_mp;
260 
261 		ret = rte_cryptodev_configure(cdev_id, &conf);
262 		if (ret < 0) {
263 			printf("Failed to configure cryptodev %u", cdev_id);
264 			return -EINVAL;
265 		}
266 
267 		for (j = 0; j < opts->nb_qps; j++) {
268 			ret = rte_cryptodev_queue_pair_setup(cdev_id, j,
269 				&qp_conf, socket_id);
270 			if (ret < 0) {
271 				printf("Failed to setup queue pair %u on "
272 					"cryptodev %u",	j, cdev_id);
273 				return -EINVAL;
274 			}
275 		}
276 
277 		ret = rte_cryptodev_start(cdev_id);
278 		if (ret < 0) {
279 			printf("Failed to start device %u: error %d\n",
280 					cdev_id, ret);
281 			return -EPERM;
282 		}
283 	}
284 
285 	return enabled_cdev_count;
286 }
287 
288 static int
289 cperf_verify_devices_capabilities(struct cperf_options *opts,
290 		uint8_t *enabled_cdevs, uint8_t nb_cryptodevs)
291 {
292 	struct rte_cryptodev_sym_capability_idx cap_idx;
293 	const struct rte_cryptodev_symmetric_capability *capability;
294 
295 	uint8_t i, cdev_id;
296 	int ret;
297 
298 	for (i = 0; i < nb_cryptodevs; i++) {
299 
300 		cdev_id = enabled_cdevs[i];
301 
302 		if (opts->op_type == CPERF_AUTH_ONLY ||
303 				opts->op_type == CPERF_CIPHER_THEN_AUTH ||
304 				opts->op_type == CPERF_AUTH_THEN_CIPHER) {
305 
306 			cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH;
307 			cap_idx.algo.auth = opts->auth_algo;
308 
309 			capability = rte_cryptodev_sym_capability_get(cdev_id,
310 					&cap_idx);
311 			if (capability == NULL)
312 				return -1;
313 
314 			ret = rte_cryptodev_sym_capability_check_auth(
315 					capability,
316 					opts->auth_key_sz,
317 					opts->digest_sz,
318 					opts->auth_iv_sz);
319 			if (ret != 0)
320 				return ret;
321 		}
322 
323 		if (opts->op_type == CPERF_CIPHER_ONLY ||
324 				opts->op_type == CPERF_CIPHER_THEN_AUTH ||
325 				opts->op_type == CPERF_AUTH_THEN_CIPHER) {
326 
327 			cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
328 			cap_idx.algo.cipher = opts->cipher_algo;
329 
330 			capability = rte_cryptodev_sym_capability_get(cdev_id,
331 					&cap_idx);
332 			if (capability == NULL)
333 				return -1;
334 
335 			ret = rte_cryptodev_sym_capability_check_cipher(
336 					capability,
337 					opts->cipher_key_sz,
338 					opts->cipher_iv_sz);
339 			if (ret != 0)
340 				return ret;
341 		}
342 
343 		if (opts->op_type == CPERF_AEAD) {
344 
345 			cap_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;
346 			cap_idx.algo.aead = opts->aead_algo;
347 
348 			capability = rte_cryptodev_sym_capability_get(cdev_id,
349 					&cap_idx);
350 			if (capability == NULL)
351 				return -1;
352 
353 			ret = rte_cryptodev_sym_capability_check_aead(
354 					capability,
355 					opts->aead_key_sz,
356 					opts->digest_sz,
357 					opts->aead_aad_sz,
358 					opts->aead_iv_sz);
359 			if (ret != 0)
360 				return ret;
361 		}
362 	}
363 
364 	return 0;
365 }
366 
367 static int
368 cperf_check_test_vector(struct cperf_options *opts,
369 		struct cperf_test_vector *test_vec)
370 {
371 	if (opts->op_type == CPERF_CIPHER_ONLY) {
372 		if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
373 			if (test_vec->plaintext.data == NULL)
374 				return -1;
375 		} else if (opts->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
376 			if (test_vec->plaintext.data == NULL)
377 				return -1;
378 			if (test_vec->plaintext.length < opts->max_buffer_size)
379 				return -1;
380 			if (test_vec->ciphertext.data == NULL)
381 				return -1;
382 			if (test_vec->ciphertext.length < opts->max_buffer_size)
383 				return -1;
384 			/* Cipher IV is only required for some algorithms */
385 			if (opts->cipher_iv_sz &&
386 					test_vec->cipher_iv.data == NULL)
387 				return -1;
388 			if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
389 				return -1;
390 			if (test_vec->cipher_key.data == NULL)
391 				return -1;
392 			if (test_vec->cipher_key.length != opts->cipher_key_sz)
393 				return -1;
394 		}
395 	} else if (opts->op_type == CPERF_AUTH_ONLY) {
396 		if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) {
397 			if (test_vec->plaintext.data == NULL)
398 				return -1;
399 			if (test_vec->plaintext.length < opts->max_buffer_size)
400 				return -1;
401 			/* Auth key is only required for some algorithms */
402 			if (opts->auth_key_sz &&
403 					test_vec->auth_key.data == NULL)
404 				return -1;
405 			if (test_vec->auth_key.length != opts->auth_key_sz)
406 				return -1;
407 			if (test_vec->auth_iv.length != opts->auth_iv_sz)
408 				return -1;
409 			/* Auth IV is only required for some algorithms */
410 			if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL)
411 				return -1;
412 			if (test_vec->digest.data == NULL)
413 				return -1;
414 			if (test_vec->digest.length < opts->digest_sz)
415 				return -1;
416 		}
417 
418 	} else if (opts->op_type == CPERF_CIPHER_THEN_AUTH ||
419 			opts->op_type == CPERF_AUTH_THEN_CIPHER) {
420 		if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
421 			if (test_vec->plaintext.data == NULL)
422 				return -1;
423 			if (test_vec->plaintext.length < opts->max_buffer_size)
424 				return -1;
425 		} else if (opts->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
426 			if (test_vec->plaintext.data == NULL)
427 				return -1;
428 			if (test_vec->plaintext.length < opts->max_buffer_size)
429 				return -1;
430 			if (test_vec->ciphertext.data == NULL)
431 				return -1;
432 			if (test_vec->ciphertext.length < opts->max_buffer_size)
433 				return -1;
434 			if (test_vec->cipher_iv.data == NULL)
435 				return -1;
436 			if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
437 				return -1;
438 			if (test_vec->cipher_key.data == NULL)
439 				return -1;
440 			if (test_vec->cipher_key.length != opts->cipher_key_sz)
441 				return -1;
442 		}
443 		if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) {
444 			if (test_vec->auth_key.data == NULL)
445 				return -1;
446 			if (test_vec->auth_key.length != opts->auth_key_sz)
447 				return -1;
448 			if (test_vec->auth_iv.length != opts->auth_iv_sz)
449 				return -1;
450 			/* Auth IV is only required for some algorithms */
451 			if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL)
452 				return -1;
453 			if (test_vec->digest.data == NULL)
454 				return -1;
455 			if (test_vec->digest.length < opts->digest_sz)
456 				return -1;
457 		}
458 	} else if (opts->op_type == CPERF_AEAD) {
459 		if (test_vec->plaintext.data == NULL)
460 			return -1;
461 		if (test_vec->plaintext.length < opts->max_buffer_size)
462 			return -1;
463 		if (test_vec->ciphertext.data == NULL)
464 			return -1;
465 		if (test_vec->ciphertext.length < opts->max_buffer_size)
466 			return -1;
467 		if (test_vec->aead_key.data == NULL)
468 			return -1;
469 		if (test_vec->aead_key.length != opts->aead_key_sz)
470 			return -1;
471 		if (test_vec->aead_iv.data == NULL)
472 			return -1;
473 		if (test_vec->aead_iv.length != opts->aead_iv_sz)
474 			return -1;
475 		if (test_vec->aad.data == NULL)
476 			return -1;
477 		if (test_vec->aad.length != opts->aead_aad_sz)
478 			return -1;
479 		if (test_vec->digest.data == NULL)
480 			return -1;
481 		if (test_vec->digest.length < opts->digest_sz)
482 			return -1;
483 	}
484 	return 0;
485 }
486 
487 int
488 main(int argc, char **argv)
489 {
490 	struct cperf_options opts = {0};
491 	struct cperf_test_vector *t_vec = NULL;
492 	struct cperf_op_fns op_fns;
493 	void *ctx[RTE_MAX_LCORE] = { };
494 	int nb_cryptodevs = 0;
495 	uint16_t total_nb_qps = 0;
496 	uint8_t cdev_id, i;
497 	uint8_t enabled_cdevs[RTE_CRYPTO_MAX_DEVS] = { 0 };
498 
499 	uint8_t buffer_size_idx = 0;
500 
501 	int ret;
502 	uint32_t lcore_id;
503 
504 	/* Initialise DPDK EAL */
505 	ret = rte_eal_init(argc, argv);
506 	if (ret < 0)
507 		rte_exit(EXIT_FAILURE, "Invalid EAL arguments!\n");
508 	argc -= ret;
509 	argv += ret;
510 
511 	cperf_options_default(&opts);
512 
513 	ret = cperf_options_parse(&opts, argc, argv);
514 	if (ret) {
515 		RTE_LOG(ERR, USER1, "Parsing on or more user options failed\n");
516 		goto err;
517 	}
518 
519 	ret = cperf_options_check(&opts);
520 	if (ret) {
521 		RTE_LOG(ERR, USER1,
522 				"Checking on or more user options failed\n");
523 		goto err;
524 	}
525 
526 	nb_cryptodevs = cperf_initialize_cryptodev(&opts, enabled_cdevs);
527 
528 	if (!opts.silent)
529 		cperf_options_dump(&opts);
530 
531 	if (nb_cryptodevs < 1) {
532 		RTE_LOG(ERR, USER1, "Failed to initialise requested crypto "
533 				"device type\n");
534 		nb_cryptodevs = 0;
535 		goto err;
536 	}
537 
538 	ret = cperf_verify_devices_capabilities(&opts, enabled_cdevs,
539 			nb_cryptodevs);
540 	if (ret) {
541 		RTE_LOG(ERR, USER1, "Crypto device type does not support "
542 				"capabilities requested\n");
543 		goto err;
544 	}
545 
546 	if (opts.test_file != NULL) {
547 		t_vec = cperf_test_vector_get_from_file(&opts);
548 		if (t_vec == NULL) {
549 			RTE_LOG(ERR, USER1,
550 					"Failed to create test vector for"
551 					" specified file\n");
552 			goto err;
553 		}
554 
555 		if (cperf_check_test_vector(&opts, t_vec)) {
556 			RTE_LOG(ERR, USER1, "Incomplete necessary test vectors"
557 					"\n");
558 			goto err;
559 		}
560 	} else {
561 		t_vec = cperf_test_vector_get_dummy(&opts);
562 		if (t_vec == NULL) {
563 			RTE_LOG(ERR, USER1,
564 					"Failed to create test vector for"
565 					" specified algorithms\n");
566 			goto err;
567 		}
568 	}
569 
570 	ret = cperf_get_op_functions(&opts, &op_fns);
571 	if (ret) {
572 		RTE_LOG(ERR, USER1, "Failed to find function ops set for "
573 				"specified algorithms combination\n");
574 		goto err;
575 	}
576 
577 	if (!opts.silent)
578 		show_test_vector(t_vec);
579 
580 	total_nb_qps = nb_cryptodevs * opts.nb_qps;
581 
582 	i = 0;
583 	uint8_t qp_id = 0, cdev_index = 0;
584 	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
585 
586 		if (i == total_nb_qps)
587 			break;
588 
589 		cdev_id = enabled_cdevs[cdev_index];
590 
591 		uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);
592 
593 		ctx[i] = cperf_testmap[opts.test].constructor(
594 				session_pool_socket[socket_id].sess_mp,
595 				session_pool_socket[socket_id].priv_mp,
596 				cdev_id, qp_id,
597 				&opts, t_vec, &op_fns);
598 		if (ctx[i] == NULL) {
599 			RTE_LOG(ERR, USER1, "Test run constructor failed\n");
600 			goto err;
601 		}
602 		qp_id = (qp_id + 1) % opts.nb_qps;
603 		if (qp_id == 0)
604 			cdev_index++;
605 		i++;
606 	}
607 
608 	if (opts.imix_distribution_count != 0) {
609 		uint8_t buffer_size_count = opts.buffer_size_count;
610 		uint16_t distribution_total[buffer_size_count];
611 		uint32_t op_idx;
612 		uint32_t test_average_size = 0;
613 		const uint32_t *buffer_size_list = opts.buffer_size_list;
614 		const uint32_t *imix_distribution_list = opts.imix_distribution_list;
615 
616 		opts.imix_buffer_sizes = rte_malloc(NULL,
617 					sizeof(uint32_t) * opts.pool_sz,
618 					0);
619 		/*
620 		 * Calculate accumulated distribution of
621 		 * probabilities per packet size
622 		 */
623 		distribution_total[0] = imix_distribution_list[0];
624 		for (i = 1; i < buffer_size_count; i++)
625 			distribution_total[i] = imix_distribution_list[i] +
626 				distribution_total[i-1];
627 
628 		/* Calculate a random sequence of packet sizes, based on distribution */
629 		for (op_idx = 0; op_idx < opts.pool_sz; op_idx++) {
630 			uint16_t random_number = rte_rand() %
631 				distribution_total[buffer_size_count - 1];
632 			for (i = 0; i < buffer_size_count; i++)
633 				if (random_number < distribution_total[i])
634 					break;
635 
636 			opts.imix_buffer_sizes[op_idx] = buffer_size_list[i];
637 		}
638 
639 		/* Calculate average buffer size for the IMIX distribution */
640 		for (i = 0; i < buffer_size_count; i++)
641 			test_average_size += buffer_size_list[i] *
642 				imix_distribution_list[i];
643 
644 		opts.test_buffer_size = test_average_size /
645 				distribution_total[buffer_size_count - 1];
646 
647 		i = 0;
648 		RTE_LCORE_FOREACH_SLAVE(lcore_id) {
649 
650 			if (i == total_nb_qps)
651 				break;
652 
653 			rte_eal_remote_launch(cperf_testmap[opts.test].runner,
654 				ctx[i], lcore_id);
655 			i++;
656 		}
657 		i = 0;
658 		RTE_LCORE_FOREACH_SLAVE(lcore_id) {
659 
660 			if (i == total_nb_qps)
661 				break;
662 			rte_eal_wait_lcore(lcore_id);
663 			i++;
664 		}
665 	} else {
666 
667 		/* Get next size from range or list */
668 		if (opts.inc_buffer_size != 0)
669 			opts.test_buffer_size = opts.min_buffer_size;
670 		else
671 			opts.test_buffer_size = opts.buffer_size_list[0];
672 
673 		while (opts.test_buffer_size <= opts.max_buffer_size) {
674 			i = 0;
675 			RTE_LCORE_FOREACH_SLAVE(lcore_id) {
676 
677 				if (i == total_nb_qps)
678 					break;
679 
680 				rte_eal_remote_launch(cperf_testmap[opts.test].runner,
681 					ctx[i], lcore_id);
682 				i++;
683 			}
684 			i = 0;
685 			RTE_LCORE_FOREACH_SLAVE(lcore_id) {
686 
687 				if (i == total_nb_qps)
688 					break;
689 				rte_eal_wait_lcore(lcore_id);
690 				i++;
691 			}
692 
693 			/* Get next size from range or list */
694 			if (opts.inc_buffer_size != 0)
695 				opts.test_buffer_size += opts.inc_buffer_size;
696 			else {
697 				if (++buffer_size_idx == opts.buffer_size_count)
698 					break;
699 				opts.test_buffer_size =
700 					opts.buffer_size_list[buffer_size_idx];
701 			}
702 		}
703 	}
704 
705 	i = 0;
706 	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
707 
708 		if (i == total_nb_qps)
709 			break;
710 
711 		cperf_testmap[opts.test].destructor(ctx[i]);
712 		i++;
713 	}
714 
715 	for (i = 0; i < nb_cryptodevs &&
716 			i < RTE_CRYPTO_MAX_DEVS; i++)
717 		rte_cryptodev_stop(enabled_cdevs[i]);
718 
719 	free_test_vector(t_vec, &opts);
720 
721 	printf("\n");
722 	return EXIT_SUCCESS;
723 
724 err:
725 	i = 0;
726 	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
727 		if (i == total_nb_qps)
728 			break;
729 
730 		if (ctx[i] && cperf_testmap[opts.test].destructor)
731 			cperf_testmap[opts.test].destructor(ctx[i]);
732 		i++;
733 	}
734 
735 	for (i = 0; i < nb_cryptodevs &&
736 			i < RTE_CRYPTO_MAX_DEVS; i++)
737 		rte_cryptodev_stop(enabled_cdevs[i]);
738 	rte_free(opts.imix_buffer_sizes);
739 	free_test_vector(t_vec, &opts);
740 
741 	printf("\n");
742 	return EXIT_FAILURE;
743 }
744