xref: /dpdk/app/test-crypto-perf/main.c (revision 68a03efeed657e6e05f281479b33b51102797e15)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2017 Intel Corporation
3  */
4 
5 #include <stdio.h>
6 #include <unistd.h>
7 
8 #include <rte_malloc.h>
9 #include <rte_random.h>
10 #include <rte_eal.h>
11 #include <rte_cryptodev.h>
12 #ifdef RTE_CRYPTO_SCHEDULER
13 #include <rte_cryptodev_scheduler.h>
14 #endif
15 
16 #include "cperf.h"
17 #include "cperf_options.h"
18 #include "cperf_test_vector_parsing.h"
19 #include "cperf_test_throughput.h"
20 #include "cperf_test_latency.h"
21 #include "cperf_test_verify.h"
22 #include "cperf_test_pmd_cyclecount.h"
23 
24 static struct {
25 	struct rte_mempool *sess_mp;
26 	struct rte_mempool *priv_mp;
27 } session_pool_socket[RTE_MAX_NUMA_NODES];
28 
29 const char *cperf_test_type_strs[] = {
30 	[CPERF_TEST_TYPE_THROUGHPUT] = "throughput",
31 	[CPERF_TEST_TYPE_LATENCY] = "latency",
32 	[CPERF_TEST_TYPE_VERIFY] = "verify",
33 	[CPERF_TEST_TYPE_PMDCC] = "pmd-cyclecount"
34 };
35 
36 const char *cperf_op_type_strs[] = {
37 	[CPERF_CIPHER_ONLY] = "cipher-only",
38 	[CPERF_AUTH_ONLY] = "auth-only",
39 	[CPERF_CIPHER_THEN_AUTH] = "cipher-then-auth",
40 	[CPERF_AUTH_THEN_CIPHER] = "auth-then-cipher",
41 	[CPERF_AEAD] = "aead",
42 	[CPERF_PDCP] = "pdcp",
43 	[CPERF_DOCSIS] = "docsis"
44 };
45 
46 const struct cperf_test cperf_testmap[] = {
47 		[CPERF_TEST_TYPE_THROUGHPUT] = {
48 				cperf_throughput_test_constructor,
49 				cperf_throughput_test_runner,
50 				cperf_throughput_test_destructor
51 		},
52 		[CPERF_TEST_TYPE_LATENCY] = {
53 				cperf_latency_test_constructor,
54 				cperf_latency_test_runner,
55 				cperf_latency_test_destructor
56 		},
57 		[CPERF_TEST_TYPE_VERIFY] = {
58 				cperf_verify_test_constructor,
59 				cperf_verify_test_runner,
60 				cperf_verify_test_destructor
61 		},
62 		[CPERF_TEST_TYPE_PMDCC] = {
63 				cperf_pmd_cyclecount_test_constructor,
64 				cperf_pmd_cyclecount_test_runner,
65 				cperf_pmd_cyclecount_test_destructor
66 		}
67 };
68 
69 static int
70 fill_session_pool_socket(int32_t socket_id, uint32_t session_priv_size,
71 		uint32_t nb_sessions)
72 {
73 	char mp_name[RTE_MEMPOOL_NAMESIZE];
74 	struct rte_mempool *sess_mp;
75 
76 	if (session_pool_socket[socket_id].priv_mp == NULL) {
77 		snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
78 			"priv_sess_mp_%u", socket_id);
79 
80 		sess_mp = rte_mempool_create(mp_name,
81 					nb_sessions,
82 					session_priv_size,
83 					0, 0, NULL, NULL, NULL,
84 					NULL, socket_id,
85 					0);
86 
87 		if (sess_mp == NULL) {
88 			printf("Cannot create pool \"%s\" on socket %d\n",
89 				mp_name, socket_id);
90 			return -ENOMEM;
91 		}
92 
93 		printf("Allocated pool \"%s\" on socket %d\n",
94 			mp_name, socket_id);
95 		session_pool_socket[socket_id].priv_mp = sess_mp;
96 	}
97 
98 	if (session_pool_socket[socket_id].sess_mp == NULL) {
99 
100 		snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
101 			"sess_mp_%u", socket_id);
102 
103 		sess_mp = rte_cryptodev_sym_session_pool_create(mp_name,
104 					nb_sessions, 0, 0, 0, socket_id);
105 
106 		if (sess_mp == NULL) {
107 			printf("Cannot create pool \"%s\" on socket %d\n",
108 				mp_name, socket_id);
109 			return -ENOMEM;
110 		}
111 
112 		printf("Allocated pool \"%s\" on socket %d\n",
113 			mp_name, socket_id);
114 		session_pool_socket[socket_id].sess_mp = sess_mp;
115 	}
116 
117 	return 0;
118 }
119 
120 static int
121 cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs)
122 {
123 	uint8_t enabled_cdev_count = 0, nb_lcores, cdev_id;
124 	uint32_t sessions_needed = 0;
125 	unsigned int i, j;
126 	int ret;
127 
128 	enabled_cdev_count = rte_cryptodev_devices_get(opts->device_type,
129 			enabled_cdevs, RTE_CRYPTO_MAX_DEVS);
130 	if (enabled_cdev_count == 0) {
131 		printf("No crypto devices type %s available\n",
132 				opts->device_type);
133 		return -EINVAL;
134 	}
135 
136 	nb_lcores = rte_lcore_count() - 1;
137 
138 	if (nb_lcores < 1) {
139 		RTE_LOG(ERR, USER1,
140 			"Number of enabled cores need to be higher than 1\n");
141 		return -EINVAL;
142 	}
143 
144 	/*
145 	 * Use less number of devices,
146 	 * if there are more available than cores.
147 	 */
148 	if (enabled_cdev_count > nb_lcores)
149 		enabled_cdev_count = nb_lcores;
150 
151 	/* Create a mempool shared by all the devices */
152 	uint32_t max_sess_size = 0, sess_size;
153 
154 	for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
155 		sess_size = rte_cryptodev_sym_get_private_session_size(cdev_id);
156 		if (sess_size > max_sess_size)
157 			max_sess_size = sess_size;
158 	}
159 #ifdef RTE_LIB_SECURITY
160 	for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
161 		sess_size = rte_security_session_get_size(
162 				rte_cryptodev_get_sec_ctx(cdev_id));
163 		if (sess_size > max_sess_size)
164 			max_sess_size = sess_size;
165 	}
166 #endif
167 	/*
168 	 * Calculate number of needed queue pairs, based on the amount
169 	 * of available number of logical cores and crypto devices.
170 	 * For instance, if there are 4 cores and 2 crypto devices,
171 	 * 2 queue pairs will be set up per device.
172 	 */
173 	opts->nb_qps = (nb_lcores % enabled_cdev_count) ?
174 				(nb_lcores / enabled_cdev_count) + 1 :
175 				nb_lcores / enabled_cdev_count;
176 
177 	for (i = 0; i < enabled_cdev_count &&
178 			i < RTE_CRYPTO_MAX_DEVS; i++) {
179 		cdev_id = enabled_cdevs[i];
180 #ifdef RTE_CRYPTO_SCHEDULER
181 		/*
182 		 * If multi-core scheduler is used, limit the number
183 		 * of queue pairs to 1, as there is no way to know
184 		 * how many cores are being used by the PMD, and
185 		 * how many will be available for the application.
186 		 */
187 		if (!strcmp((const char *)opts->device_type, "crypto_scheduler") &&
188 				rte_cryptodev_scheduler_mode_get(cdev_id) ==
189 				CDEV_SCHED_MODE_MULTICORE)
190 			opts->nb_qps = 1;
191 #endif
192 
193 		struct rte_cryptodev_info cdev_info;
194 		uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);
195 		/* range check the socket_id - negative values become big
196 		 * positive ones due to use of unsigned value
197 		 */
198 		if (socket_id >= RTE_MAX_NUMA_NODES)
199 			socket_id = 0;
200 
201 		rte_cryptodev_info_get(cdev_id, &cdev_info);
202 		if (opts->nb_qps > cdev_info.max_nb_queue_pairs) {
203 			printf("Number of needed queue pairs is higher "
204 				"than the maximum number of queue pairs "
205 				"per device.\n");
206 			printf("Lower the number of cores or increase "
207 				"the number of crypto devices\n");
208 			return -EINVAL;
209 		}
210 		struct rte_cryptodev_config conf = {
211 			.nb_queue_pairs = opts->nb_qps,
212 			.socket_id = socket_id,
213 			.ff_disable = RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO,
214 		};
215 
216 		if (opts->op_type != CPERF_PDCP &&
217 				opts->op_type != CPERF_DOCSIS)
218 			conf.ff_disable |= RTE_CRYPTODEV_FF_SECURITY;
219 
220 		struct rte_cryptodev_qp_conf qp_conf = {
221 			.nb_descriptors = opts->nb_descriptors
222 		};
223 
224 		/**
225 		 * Device info specifies the min headroom and tailroom
226 		 * requirement for the crypto PMD. This need to be honoured
227 		 * by the application, while creating mbuf.
228 		 */
229 		if (opts->headroom_sz < cdev_info.min_mbuf_headroom_req) {
230 			/* Update headroom */
231 			opts->headroom_sz = cdev_info.min_mbuf_headroom_req;
232 		}
233 		if (opts->tailroom_sz < cdev_info.min_mbuf_tailroom_req) {
234 			/* Update tailroom */
235 			opts->tailroom_sz = cdev_info.min_mbuf_tailroom_req;
236 		}
237 
238 		/* Update segment size to include headroom & tailroom */
239 		opts->segment_sz += (opts->headroom_sz + opts->tailroom_sz);
240 
241 		uint32_t dev_max_nb_sess = cdev_info.sym.max_nb_sessions;
242 		/*
243 		 * Two sessions objects are required for each session
244 		 * (one for the header, one for the private data)
245 		 */
246 		if (!strcmp((const char *)opts->device_type,
247 					"crypto_scheduler")) {
248 #ifdef RTE_CRYPTO_SCHEDULER
249 			uint32_t nb_slaves =
250 				rte_cryptodev_scheduler_workers_get(cdev_id,
251 								NULL);
252 
253 			sessions_needed = enabled_cdev_count *
254 				opts->nb_qps * nb_slaves;
255 #endif
256 		} else
257 			sessions_needed = enabled_cdev_count * opts->nb_qps;
258 
259 		/*
260 		 * A single session is required per queue pair
261 		 * in each device
262 		 */
263 		if (dev_max_nb_sess != 0 && dev_max_nb_sess < opts->nb_qps) {
264 			RTE_LOG(ERR, USER1,
265 				"Device does not support at least "
266 				"%u sessions\n", opts->nb_qps);
267 			return -ENOTSUP;
268 		}
269 
270 		ret = fill_session_pool_socket(socket_id, max_sess_size,
271 				sessions_needed);
272 		if (ret < 0)
273 			return ret;
274 
275 		qp_conf.mp_session = session_pool_socket[socket_id].sess_mp;
276 		qp_conf.mp_session_private =
277 				session_pool_socket[socket_id].priv_mp;
278 
279 		ret = rte_cryptodev_configure(cdev_id, &conf);
280 		if (ret < 0) {
281 			printf("Failed to configure cryptodev %u", cdev_id);
282 			return -EINVAL;
283 		}
284 
285 		for (j = 0; j < opts->nb_qps; j++) {
286 			ret = rte_cryptodev_queue_pair_setup(cdev_id, j,
287 				&qp_conf, socket_id);
288 			if (ret < 0) {
289 				printf("Failed to setup queue pair %u on "
290 					"cryptodev %u",	j, cdev_id);
291 				return -EINVAL;
292 			}
293 		}
294 
295 		ret = rte_cryptodev_start(cdev_id);
296 		if (ret < 0) {
297 			printf("Failed to start device %u: error %d\n",
298 					cdev_id, ret);
299 			return -EPERM;
300 		}
301 	}
302 
303 	return enabled_cdev_count;
304 }
305 
306 static int
307 cperf_verify_devices_capabilities(struct cperf_options *opts,
308 		uint8_t *enabled_cdevs, uint8_t nb_cryptodevs)
309 {
310 	struct rte_cryptodev_sym_capability_idx cap_idx;
311 	const struct rte_cryptodev_symmetric_capability *capability;
312 
313 	uint8_t i, cdev_id;
314 	int ret;
315 
316 	for (i = 0; i < nb_cryptodevs; i++) {
317 
318 		cdev_id = enabled_cdevs[i];
319 
320 		if (opts->op_type == CPERF_AUTH_ONLY ||
321 				opts->op_type == CPERF_CIPHER_THEN_AUTH ||
322 				opts->op_type == CPERF_AUTH_THEN_CIPHER) {
323 
324 			cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH;
325 			cap_idx.algo.auth = opts->auth_algo;
326 
327 			capability = rte_cryptodev_sym_capability_get(cdev_id,
328 					&cap_idx);
329 			if (capability == NULL)
330 				return -1;
331 
332 			ret = rte_cryptodev_sym_capability_check_auth(
333 					capability,
334 					opts->auth_key_sz,
335 					opts->digest_sz,
336 					opts->auth_iv_sz);
337 			if (ret != 0)
338 				return ret;
339 		}
340 
341 		if (opts->op_type == CPERF_CIPHER_ONLY ||
342 				opts->op_type == CPERF_CIPHER_THEN_AUTH ||
343 				opts->op_type == CPERF_AUTH_THEN_CIPHER) {
344 
345 			cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
346 			cap_idx.algo.cipher = opts->cipher_algo;
347 
348 			capability = rte_cryptodev_sym_capability_get(cdev_id,
349 					&cap_idx);
350 			if (capability == NULL)
351 				return -1;
352 
353 			ret = rte_cryptodev_sym_capability_check_cipher(
354 					capability,
355 					opts->cipher_key_sz,
356 					opts->cipher_iv_sz);
357 			if (ret != 0)
358 				return ret;
359 		}
360 
361 		if (opts->op_type == CPERF_AEAD) {
362 
363 			cap_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;
364 			cap_idx.algo.aead = opts->aead_algo;
365 
366 			capability = rte_cryptodev_sym_capability_get(cdev_id,
367 					&cap_idx);
368 			if (capability == NULL)
369 				return -1;
370 
371 			ret = rte_cryptodev_sym_capability_check_aead(
372 					capability,
373 					opts->aead_key_sz,
374 					opts->digest_sz,
375 					opts->aead_aad_sz,
376 					opts->aead_iv_sz);
377 			if (ret != 0)
378 				return ret;
379 		}
380 	}
381 
382 	return 0;
383 }
384 
385 static int
386 cperf_check_test_vector(struct cperf_options *opts,
387 		struct cperf_test_vector *test_vec)
388 {
389 	if (opts->op_type == CPERF_CIPHER_ONLY) {
390 		if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
391 			if (test_vec->plaintext.data == NULL)
392 				return -1;
393 		} else {
394 			if (test_vec->plaintext.data == NULL)
395 				return -1;
396 			if (test_vec->plaintext.length < opts->max_buffer_size)
397 				return -1;
398 			if (test_vec->ciphertext.data == NULL)
399 				return -1;
400 			if (test_vec->ciphertext.length < opts->max_buffer_size)
401 				return -1;
402 			/* Cipher IV is only required for some algorithms */
403 			if (opts->cipher_iv_sz &&
404 					test_vec->cipher_iv.data == NULL)
405 				return -1;
406 			if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
407 				return -1;
408 			if (test_vec->cipher_key.data == NULL)
409 				return -1;
410 			if (test_vec->cipher_key.length != opts->cipher_key_sz)
411 				return -1;
412 		}
413 	} else if (opts->op_type == CPERF_AUTH_ONLY) {
414 		if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) {
415 			if (test_vec->plaintext.data == NULL)
416 				return -1;
417 			if (test_vec->plaintext.length < opts->max_buffer_size)
418 				return -1;
419 			/* Auth key is only required for some algorithms */
420 			if (opts->auth_key_sz &&
421 					test_vec->auth_key.data == NULL)
422 				return -1;
423 			if (test_vec->auth_key.length != opts->auth_key_sz)
424 				return -1;
425 			if (test_vec->auth_iv.length != opts->auth_iv_sz)
426 				return -1;
427 			/* Auth IV is only required for some algorithms */
428 			if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL)
429 				return -1;
430 			if (test_vec->digest.data == NULL)
431 				return -1;
432 			if (test_vec->digest.length < opts->digest_sz)
433 				return -1;
434 		}
435 
436 	} else if (opts->op_type == CPERF_CIPHER_THEN_AUTH ||
437 			opts->op_type == CPERF_AUTH_THEN_CIPHER) {
438 		if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
439 			if (test_vec->plaintext.data == NULL)
440 				return -1;
441 			if (test_vec->plaintext.length < opts->max_buffer_size)
442 				return -1;
443 		} else {
444 			if (test_vec->plaintext.data == NULL)
445 				return -1;
446 			if (test_vec->plaintext.length < opts->max_buffer_size)
447 				return -1;
448 			if (test_vec->ciphertext.data == NULL)
449 				return -1;
450 			if (test_vec->ciphertext.length < opts->max_buffer_size)
451 				return -1;
452 			if (test_vec->cipher_iv.data == NULL)
453 				return -1;
454 			if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
455 				return -1;
456 			if (test_vec->cipher_key.data == NULL)
457 				return -1;
458 			if (test_vec->cipher_key.length != opts->cipher_key_sz)
459 				return -1;
460 		}
461 		if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) {
462 			if (test_vec->auth_key.data == NULL)
463 				return -1;
464 			if (test_vec->auth_key.length != opts->auth_key_sz)
465 				return -1;
466 			if (test_vec->auth_iv.length != opts->auth_iv_sz)
467 				return -1;
468 			/* Auth IV is only required for some algorithms */
469 			if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL)
470 				return -1;
471 			if (test_vec->digest.data == NULL)
472 				return -1;
473 			if (test_vec->digest.length < opts->digest_sz)
474 				return -1;
475 		}
476 	} else if (opts->op_type == CPERF_AEAD) {
477 		if (test_vec->plaintext.data == NULL)
478 			return -1;
479 		if (test_vec->plaintext.length < opts->max_buffer_size)
480 			return -1;
481 		if (test_vec->ciphertext.data == NULL)
482 			return -1;
483 		if (test_vec->ciphertext.length < opts->max_buffer_size)
484 			return -1;
485 		if (test_vec->aead_key.data == NULL)
486 			return -1;
487 		if (test_vec->aead_key.length != opts->aead_key_sz)
488 			return -1;
489 		if (test_vec->aead_iv.data == NULL)
490 			return -1;
491 		if (test_vec->aead_iv.length != opts->aead_iv_sz)
492 			return -1;
493 		if (test_vec->aad.data == NULL)
494 			return -1;
495 		if (test_vec->aad.length != opts->aead_aad_sz)
496 			return -1;
497 		if (test_vec->digest.data == NULL)
498 			return -1;
499 		if (test_vec->digest.length < opts->digest_sz)
500 			return -1;
501 	}
502 	return 0;
503 }
504 
505 int
506 main(int argc, char **argv)
507 {
508 	struct cperf_options opts = {0};
509 	struct cperf_test_vector *t_vec = NULL;
510 	struct cperf_op_fns op_fns;
511 	void *ctx[RTE_MAX_LCORE] = { };
512 	int nb_cryptodevs = 0;
513 	uint16_t total_nb_qps = 0;
514 	uint8_t cdev_id, i;
515 	uint8_t enabled_cdevs[RTE_CRYPTO_MAX_DEVS] = { 0 };
516 
517 	uint8_t buffer_size_idx = 0;
518 
519 	int ret;
520 	uint32_t lcore_id;
521 
522 	/* Initialise DPDK EAL */
523 	ret = rte_eal_init(argc, argv);
524 	if (ret < 0)
525 		rte_exit(EXIT_FAILURE, "Invalid EAL arguments!\n");
526 	argc -= ret;
527 	argv += ret;
528 
529 	cperf_options_default(&opts);
530 
531 	ret = cperf_options_parse(&opts, argc, argv);
532 	if (ret) {
533 		RTE_LOG(ERR, USER1, "Parsing one or more user options failed\n");
534 		goto err;
535 	}
536 
537 	ret = cperf_options_check(&opts);
538 	if (ret) {
539 		RTE_LOG(ERR, USER1,
540 				"Checking one or more user options failed\n");
541 		goto err;
542 	}
543 
544 	nb_cryptodevs = cperf_initialize_cryptodev(&opts, enabled_cdevs);
545 
546 	if (!opts.silent)
547 		cperf_options_dump(&opts);
548 
549 	if (nb_cryptodevs < 1) {
550 		RTE_LOG(ERR, USER1, "Failed to initialise requested crypto "
551 				"device type\n");
552 		nb_cryptodevs = 0;
553 		goto err;
554 	}
555 
556 	ret = cperf_verify_devices_capabilities(&opts, enabled_cdevs,
557 			nb_cryptodevs);
558 	if (ret) {
559 		RTE_LOG(ERR, USER1, "Crypto device type does not support "
560 				"capabilities requested\n");
561 		goto err;
562 	}
563 
564 	if (opts.test_file != NULL) {
565 		t_vec = cperf_test_vector_get_from_file(&opts);
566 		if (t_vec == NULL) {
567 			RTE_LOG(ERR, USER1,
568 					"Failed to create test vector for"
569 					" specified file\n");
570 			goto err;
571 		}
572 
573 		if (cperf_check_test_vector(&opts, t_vec)) {
574 			RTE_LOG(ERR, USER1, "Incomplete necessary test vectors"
575 					"\n");
576 			goto err;
577 		}
578 	} else {
579 		t_vec = cperf_test_vector_get_dummy(&opts);
580 		if (t_vec == NULL) {
581 			RTE_LOG(ERR, USER1,
582 					"Failed to create test vector for"
583 					" specified algorithms\n");
584 			goto err;
585 		}
586 	}
587 
588 	ret = cperf_get_op_functions(&opts, &op_fns);
589 	if (ret) {
590 		RTE_LOG(ERR, USER1, "Failed to find function ops set for "
591 				"specified algorithms combination\n");
592 		goto err;
593 	}
594 
595 	if (!opts.silent && opts.test != CPERF_TEST_TYPE_THROUGHPUT &&
596 			opts.test != CPERF_TEST_TYPE_LATENCY)
597 		show_test_vector(t_vec);
598 
599 	total_nb_qps = nb_cryptodevs * opts.nb_qps;
600 
601 	i = 0;
602 	uint8_t qp_id = 0, cdev_index = 0;
603 	RTE_LCORE_FOREACH_WORKER(lcore_id) {
604 
605 		if (i == total_nb_qps)
606 			break;
607 
608 		cdev_id = enabled_cdevs[cdev_index];
609 
610 		uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);
611 
612 		ctx[i] = cperf_testmap[opts.test].constructor(
613 				session_pool_socket[socket_id].sess_mp,
614 				session_pool_socket[socket_id].priv_mp,
615 				cdev_id, qp_id,
616 				&opts, t_vec, &op_fns);
617 		if (ctx[i] == NULL) {
618 			RTE_LOG(ERR, USER1, "Test run constructor failed\n");
619 			goto err;
620 		}
621 		qp_id = (qp_id + 1) % opts.nb_qps;
622 		if (qp_id == 0)
623 			cdev_index++;
624 		i++;
625 	}
626 
627 	if (opts.imix_distribution_count != 0) {
628 		uint8_t buffer_size_count = opts.buffer_size_count;
629 		uint16_t distribution_total[buffer_size_count];
630 		uint32_t op_idx;
631 		uint32_t test_average_size = 0;
632 		const uint32_t *buffer_size_list = opts.buffer_size_list;
633 		const uint32_t *imix_distribution_list = opts.imix_distribution_list;
634 
635 		opts.imix_buffer_sizes = rte_malloc(NULL,
636 					sizeof(uint32_t) * opts.pool_sz,
637 					0);
638 		/*
639 		 * Calculate accumulated distribution of
640 		 * probabilities per packet size
641 		 */
642 		distribution_total[0] = imix_distribution_list[0];
643 		for (i = 1; i < buffer_size_count; i++)
644 			distribution_total[i] = imix_distribution_list[i] +
645 				distribution_total[i-1];
646 
647 		/* Calculate a random sequence of packet sizes, based on distribution */
648 		for (op_idx = 0; op_idx < opts.pool_sz; op_idx++) {
649 			uint16_t random_number = rte_rand() %
650 				distribution_total[buffer_size_count - 1];
651 			for (i = 0; i < buffer_size_count; i++)
652 				if (random_number < distribution_total[i])
653 					break;
654 
655 			opts.imix_buffer_sizes[op_idx] = buffer_size_list[i];
656 		}
657 
658 		/* Calculate average buffer size for the IMIX distribution */
659 		for (i = 0; i < buffer_size_count; i++)
660 			test_average_size += buffer_size_list[i] *
661 				imix_distribution_list[i];
662 
663 		opts.test_buffer_size = test_average_size /
664 				distribution_total[buffer_size_count - 1];
665 
666 		i = 0;
667 		RTE_LCORE_FOREACH_WORKER(lcore_id) {
668 
669 			if (i == total_nb_qps)
670 				break;
671 
672 			rte_eal_remote_launch(cperf_testmap[opts.test].runner,
673 				ctx[i], lcore_id);
674 			i++;
675 		}
676 		i = 0;
677 		RTE_LCORE_FOREACH_WORKER(lcore_id) {
678 
679 			if (i == total_nb_qps)
680 				break;
681 			ret |= rte_eal_wait_lcore(lcore_id);
682 			i++;
683 		}
684 
685 		if (ret != EXIT_SUCCESS)
686 			goto err;
687 	} else {
688 
689 		/* Get next size from range or list */
690 		if (opts.inc_buffer_size != 0)
691 			opts.test_buffer_size = opts.min_buffer_size;
692 		else
693 			opts.test_buffer_size = opts.buffer_size_list[0];
694 
695 		while (opts.test_buffer_size <= opts.max_buffer_size) {
696 			i = 0;
697 			RTE_LCORE_FOREACH_WORKER(lcore_id) {
698 
699 				if (i == total_nb_qps)
700 					break;
701 
702 				rte_eal_remote_launch(cperf_testmap[opts.test].runner,
703 					ctx[i], lcore_id);
704 				i++;
705 			}
706 			i = 0;
707 			RTE_LCORE_FOREACH_WORKER(lcore_id) {
708 
709 				if (i == total_nb_qps)
710 					break;
711 				ret |= rte_eal_wait_lcore(lcore_id);
712 				i++;
713 			}
714 
715 			if (ret != EXIT_SUCCESS)
716 				goto err;
717 
718 			/* Get next size from range or list */
719 			if (opts.inc_buffer_size != 0)
720 				opts.test_buffer_size += opts.inc_buffer_size;
721 			else {
722 				if (++buffer_size_idx == opts.buffer_size_count)
723 					break;
724 				opts.test_buffer_size =
725 					opts.buffer_size_list[buffer_size_idx];
726 			}
727 		}
728 	}
729 
730 	i = 0;
731 	RTE_LCORE_FOREACH_WORKER(lcore_id) {
732 
733 		if (i == total_nb_qps)
734 			break;
735 
736 		cperf_testmap[opts.test].destructor(ctx[i]);
737 		i++;
738 	}
739 
740 	for (i = 0; i < nb_cryptodevs &&
741 			i < RTE_CRYPTO_MAX_DEVS; i++)
742 		rte_cryptodev_stop(enabled_cdevs[i]);
743 
744 	free_test_vector(t_vec, &opts);
745 
746 	printf("\n");
747 	return EXIT_SUCCESS;
748 
749 err:
750 	i = 0;
751 	RTE_LCORE_FOREACH_WORKER(lcore_id) {
752 		if (i == total_nb_qps)
753 			break;
754 
755 		if (ctx[i] && cperf_testmap[opts.test].destructor)
756 			cperf_testmap[opts.test].destructor(ctx[i]);
757 		i++;
758 	}
759 
760 	for (i = 0; i < nb_cryptodevs &&
761 			i < RTE_CRYPTO_MAX_DEVS; i++)
762 		rte_cryptodev_stop(enabled_cdevs[i]);
763 	rte_free(opts.imix_buffer_sizes);
764 	free_test_vector(t_vec, &opts);
765 
766 	printf("\n");
767 	return EXIT_FAILURE;
768 }
769